1/* 2 * IPVS: Locality-Based Least-Connection with Replication scheduler 3 * 4 * Authors: Wensong Zhang <wensong@gnuchina.org> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * Changes: 12 * Julian Anastasov : Added the missing (dest->weight>0) 13 * condition in the ip_vs_dest_set_max. 14 * 15 */ 16 17/* 18 * The lblc/r algorithm is as follows (pseudo code): 19 * 20 * if serverSet[dest_ip] is null then 21 * n, serverSet[dest_ip] <- {weighted least-conn node}; 22 * else 23 * n <- {least-conn (alive) node in serverSet[dest_ip]}; 24 * if (n is null) OR 25 * (n.conns>n.weight AND 26 * there is a node m with m.conns<m.weight/2) then 27 * n <- {weighted least-conn node}; 28 * add n to serverSet[dest_ip]; 29 * if |serverSet[dest_ip]| > 1 AND 30 * now - serverSet[dest_ip].lastMod > T then 31 * m <- {most conn node in serverSet[dest_ip]}; 32 * remove m from serverSet[dest_ip]; 33 * if serverSet[dest_ip] changed then 34 * serverSet[dest_ip].lastMod <- now; 35 * 36 * return n; 37 * 38 */ 39 40#define KMSG_COMPONENT "IPVS" 41#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 42 43#include <linux/ip.h> 44#include <linux/module.h> 45#include <linux/kernel.h> 46#include <linux/skbuff.h> 47#include <linux/jiffies.h> 48#include <linux/list.h> 49#include <linux/slab.h> 50 51/* for sysctl */ 52#include <linux/fs.h> 53#include <linux/sysctl.h> 54#include <net/net_namespace.h> 55 56#include <net/ip_vs.h> 57 58 59/* 60 * It is for garbage collection of stale IPVS lblcr entries, 61 * when the table is full. 62 */ 63#define CHECK_EXPIRE_INTERVAL (60*HZ) 64#define ENTRY_TIMEOUT (6*60*HZ) 65 66/* 67 * It is for full expiration check. 68 * When there is no partial expiration check (garbage collection) 69 * in a half hour, do a full expiration check to collect stale 70 * entries that haven't been touched for a day. 71 */ 72#define COUNT_FOR_FULL_EXPIRATION 30 73static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ; 74 75 76/* 77 * for IPVS lblcr entry hash table 78 */ 79#ifndef CONFIG_IP_VS_LBLCR_TAB_BITS 80#define CONFIG_IP_VS_LBLCR_TAB_BITS 10 81#endif 82#define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS 83#define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS) 84#define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1) 85 86 87/* 88 * IPVS destination set structure and operations 89 */ 90struct ip_vs_dest_set_elem { 91 struct list_head list; /* list link */ 92 struct ip_vs_dest *dest; /* destination server */ 93}; 94 95struct ip_vs_dest_set { 96 atomic_t size; /* set size */ 97 unsigned long lastmod; /* last modified time */ 98 struct list_head list; /* destination list */ 99 rwlock_t lock; /* lock for this list */ 100}; 101 102 103static struct ip_vs_dest_set_elem * 104ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 105{ 106 struct ip_vs_dest_set_elem *e; 107 108 list_for_each_entry(e, &set->list, list) { 109 if (e->dest == dest) 110 /* already existed */ 111 return NULL; 112 } 113 114 e = kmalloc(sizeof(*e), GFP_ATOMIC); 115 if (e == NULL) { 116 pr_err("%s(): no memory\n", __func__); 117 return NULL; 118 } 119 120 atomic_inc(&dest->refcnt); 121 e->dest = dest; 122 123 list_add(&e->list, &set->list); 124 atomic_inc(&set->size); 125 126 set->lastmod = jiffies; 127 return e; 128} 129 130static void 131ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 132{ 133 struct ip_vs_dest_set_elem *e; 134 135 list_for_each_entry(e, &set->list, list) { 136 if (e->dest == dest) { 137 /* HIT */ 138 atomic_dec(&set->size); 139 set->lastmod = jiffies; 140 atomic_dec(&e->dest->refcnt); 141 list_del(&e->list); 142 kfree(e); 143 break; 144 } 145 } 146} 147 148static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) 149{ 150 struct ip_vs_dest_set_elem *e, *ep; 151 152 write_lock(&set->lock); 153 list_for_each_entry_safe(e, ep, &set->list, list) { 154 /* 155 * We don't kfree dest because it is refered either 156 * by its service or by the trash dest list. 157 */ 158 atomic_dec(&e->dest->refcnt); 159 list_del(&e->list); 160 kfree(e); 161 } 162 write_unlock(&set->lock); 163} 164 165/* get weighted least-connection node in the destination set */ 166static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) 167{ 168 register struct ip_vs_dest_set_elem *e; 169 struct ip_vs_dest *dest, *least; 170 int loh, doh; 171 172 if (set == NULL) 173 return NULL; 174 175 /* select the first destination server, whose weight > 0 */ 176 list_for_each_entry(e, &set->list, list) { 177 least = e->dest; 178 if (least->flags & IP_VS_DEST_F_OVERLOAD) 179 continue; 180 181 if ((atomic_read(&least->weight) > 0) 182 && (least->flags & IP_VS_DEST_F_AVAILABLE)) { 183 loh = atomic_read(&least->activeconns) * 50 184 + atomic_read(&least->inactconns); 185 goto nextstage; 186 } 187 } 188 return NULL; 189 190 /* find the destination with the weighted least load */ 191 nextstage: 192 list_for_each_entry(e, &set->list, list) { 193 dest = e->dest; 194 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 195 continue; 196 197 doh = atomic_read(&dest->activeconns) * 50 198 + atomic_read(&dest->inactconns); 199 if ((loh * atomic_read(&dest->weight) > 200 doh * atomic_read(&least->weight)) 201 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 202 least = dest; 203 loh = doh; 204 } 205 } 206 207 IP_VS_DBG_BUF(6, "%s(): server %s:%d " 208 "activeconns %d refcnt %d weight %d overhead %d\n", 209 __func__, 210 IP_VS_DBG_ADDR(least->af, &least->addr), 211 ntohs(least->port), 212 atomic_read(&least->activeconns), 213 atomic_read(&least->refcnt), 214 atomic_read(&least->weight), loh); 215 return least; 216} 217 218 219/* get weighted most-connection node in the destination set */ 220static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) 221{ 222 register struct ip_vs_dest_set_elem *e; 223 struct ip_vs_dest *dest, *most; 224 int moh, doh; 225 226 if (set == NULL) 227 return NULL; 228 229 /* select the first destination server, whose weight > 0 */ 230 list_for_each_entry(e, &set->list, list) { 231 most = e->dest; 232 if (atomic_read(&most->weight) > 0) { 233 moh = atomic_read(&most->activeconns) * 50 234 + atomic_read(&most->inactconns); 235 goto nextstage; 236 } 237 } 238 return NULL; 239 240 /* find the destination with the weighted most load */ 241 nextstage: 242 list_for_each_entry(e, &set->list, list) { 243 dest = e->dest; 244 doh = atomic_read(&dest->activeconns) * 50 245 + atomic_read(&dest->inactconns); 246 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 247 if ((moh * atomic_read(&dest->weight) < 248 doh * atomic_read(&most->weight)) 249 && (atomic_read(&dest->weight) > 0)) { 250 most = dest; 251 moh = doh; 252 } 253 } 254 255 IP_VS_DBG_BUF(6, "%s(): server %s:%d " 256 "activeconns %d refcnt %d weight %d overhead %d\n", 257 __func__, 258 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port), 259 atomic_read(&most->activeconns), 260 atomic_read(&most->refcnt), 261 atomic_read(&most->weight), moh); 262 return most; 263} 264 265 266/* 267 * IPVS lblcr entry represents an association between destination 268 * IP address and its destination server set 269 */ 270struct ip_vs_lblcr_entry { 271 struct list_head list; 272 int af; /* address family */ 273 union nf_inet_addr addr; /* destination IP address */ 274 struct ip_vs_dest_set set; /* destination server set */ 275 unsigned long lastuse; /* last used time */ 276}; 277 278 279/* 280 * IPVS lblcr hash table 281 */ 282struct ip_vs_lblcr_table { 283 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ 284 atomic_t entries; /* number of entries */ 285 int max_size; /* maximum size of entries */ 286 struct timer_list periodic_timer; /* collect stale entries */ 287 int rover; /* rover for expire check */ 288 int counter; /* counter for no expire */ 289}; 290 291 292/* 293 * IPVS LBLCR sysctl table 294 */ 295 296static ctl_table vs_vars_table[] = { 297 { 298 .procname = "lblcr_expiration", 299 .data = &sysctl_ip_vs_lblcr_expiration, 300 .maxlen = sizeof(int), 301 .mode = 0644, 302 .proc_handler = proc_dointvec_jiffies, 303 }, 304 { } 305}; 306 307static struct ctl_table_header * sysctl_header; 308 309static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) 310{ 311 list_del(&en->list); 312 ip_vs_dest_set_eraseall(&en->set); 313 kfree(en); 314} 315 316 317/* 318 * Returns hash value for IPVS LBLCR entry 319 */ 320static inline unsigned 321ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr) 322{ 323 __be32 addr_fold = addr->ip; 324 325#ifdef CONFIG_IP_VS_IPV6 326 if (af == AF_INET6) 327 addr_fold = addr->ip6[0]^addr->ip6[1]^ 328 addr->ip6[2]^addr->ip6[3]; 329#endif 330 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLCR_TAB_MASK; 331} 332 333 334/* 335 * Hash an entry in the ip_vs_lblcr_table. 336 * returns bool success. 337 */ 338static void 339ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) 340{ 341 unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr); 342 343 list_add(&en->list, &tbl->bucket[hash]); 344 atomic_inc(&tbl->entries); 345} 346 347 348/* 349 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under 350 * read lock. 351 */ 352static inline struct ip_vs_lblcr_entry * 353ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, 354 const union nf_inet_addr *addr) 355{ 356 unsigned hash = ip_vs_lblcr_hashkey(af, addr); 357 struct ip_vs_lblcr_entry *en; 358 359 list_for_each_entry(en, &tbl->bucket[hash], list) 360 if (ip_vs_addr_equal(af, &en->addr, addr)) 361 return en; 362 363 return NULL; 364} 365 366 367/* 368 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination 369 * IP address to a server. Called under write lock. 370 */ 371static inline struct ip_vs_lblcr_entry * 372ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, 373 struct ip_vs_dest *dest) 374{ 375 struct ip_vs_lblcr_entry *en; 376 377 en = ip_vs_lblcr_get(dest->af, tbl, daddr); 378 if (!en) { 379 en = kmalloc(sizeof(*en), GFP_ATOMIC); 380 if (!en) { 381 pr_err("%s(): no memory\n", __func__); 382 return NULL; 383 } 384 385 en->af = dest->af; 386 ip_vs_addr_copy(dest->af, &en->addr, daddr); 387 en->lastuse = jiffies; 388 389 /* initialize its dest set */ 390 atomic_set(&(en->set.size), 0); 391 INIT_LIST_HEAD(&en->set.list); 392 rwlock_init(&en->set.lock); 393 394 ip_vs_lblcr_hash(tbl, en); 395 } 396 397 write_lock(&en->set.lock); 398 ip_vs_dest_set_insert(&en->set, dest); 399 write_unlock(&en->set.lock); 400 401 return en; 402} 403 404 405/* 406 * Flush all the entries of the specified table. 407 */ 408static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl) 409{ 410 int i; 411 struct ip_vs_lblcr_entry *en, *nxt; 412 413 /* No locking required, only called during cleanup. */ 414 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 415 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 416 ip_vs_lblcr_free(en); 417 } 418 } 419} 420 421 422static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) 423{ 424 struct ip_vs_lblcr_table *tbl = svc->sched_data; 425 unsigned long now = jiffies; 426 int i, j; 427 struct ip_vs_lblcr_entry *en, *nxt; 428 429 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 430 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 431 432 write_lock(&svc->sched_lock); 433 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 434 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, 435 now)) 436 continue; 437 438 ip_vs_lblcr_free(en); 439 atomic_dec(&tbl->entries); 440 } 441 write_unlock(&svc->sched_lock); 442 } 443 tbl->rover = j; 444} 445 446 447static void ip_vs_lblcr_check_expire(unsigned long data) 448{ 449 struct ip_vs_service *svc = (struct ip_vs_service *) data; 450 struct ip_vs_lblcr_table *tbl = svc->sched_data; 451 unsigned long now = jiffies; 452 int goal; 453 int i, j; 454 struct ip_vs_lblcr_entry *en, *nxt; 455 456 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 457 /* do full expiration check */ 458 ip_vs_lblcr_full_check(svc); 459 tbl->counter = 1; 460 goto out; 461 } 462 463 if (atomic_read(&tbl->entries) <= tbl->max_size) { 464 tbl->counter++; 465 goto out; 466 } 467 468 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; 469 if (goal > tbl->max_size/2) 470 goal = tbl->max_size/2; 471 472 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 473 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 474 475 write_lock(&svc->sched_lock); 476 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 477 if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) 478 continue; 479 480 ip_vs_lblcr_free(en); 481 atomic_dec(&tbl->entries); 482 goal--; 483 } 484 write_unlock(&svc->sched_lock); 485 if (goal <= 0) 486 break; 487 } 488 tbl->rover = j; 489 490 out: 491 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL); 492} 493 494static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc) 495{ 496 int i; 497 struct ip_vs_lblcr_table *tbl; 498 499 /* 500 * Allocate the ip_vs_lblcr_table for this service 501 */ 502 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC); 503 if (tbl == NULL) { 504 pr_err("%s(): no memory\n", __func__); 505 return -ENOMEM; 506 } 507 svc->sched_data = tbl; 508 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " 509 "current service\n", sizeof(*tbl)); 510 511 /* 512 * Initialize the hash buckets 513 */ 514 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 515 INIT_LIST_HEAD(&tbl->bucket[i]); 516 } 517 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; 518 tbl->rover = 0; 519 tbl->counter = 1; 520 521 /* 522 * Hook periodic timer for garbage collection 523 */ 524 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 525 (unsigned long)svc); 526 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL); 527 528 return 0; 529} 530 531 532static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc) 533{ 534 struct ip_vs_lblcr_table *tbl = svc->sched_data; 535 536 /* remove periodic timer */ 537 del_timer_sync(&tbl->periodic_timer); 538 539 /* got to clean up table entries here */ 540 ip_vs_lblcr_flush(tbl); 541 542 /* release the table itself */ 543 kfree(tbl); 544 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", 545 sizeof(*tbl)); 546 547 return 0; 548} 549 550 551static inline struct ip_vs_dest * 552__ip_vs_lblcr_schedule(struct ip_vs_service *svc) 553{ 554 struct ip_vs_dest *dest, *least; 555 int loh, doh; 556 557 /* 558 * We think the overhead of processing active connections is fifty 559 * times higher than that of inactive connections in average. (This 560 * fifty times might not be accurate, we will change it later.) We 561 * use the following formula to estimate the overhead: 562 * dest->activeconns*50 + dest->inactconns 563 * and the load: 564 * (dest overhead) / dest->weight 565 * 566 * Remember -- no floats in kernel mode!!! 567 * The comparison of h1*w2 > h2*w1 is equivalent to that of 568 * h1/w1 > h2/w2 569 * if every weight is larger than zero. 570 * 571 * The server with weight=0 is quiesced and will not receive any 572 * new connection. 573 */ 574 list_for_each_entry(dest, &svc->destinations, n_list) { 575 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 576 continue; 577 578 if (atomic_read(&dest->weight) > 0) { 579 least = dest; 580 loh = atomic_read(&least->activeconns) * 50 581 + atomic_read(&least->inactconns); 582 goto nextstage; 583 } 584 } 585 return NULL; 586 587 /* 588 * Find the destination with the least load. 589 */ 590 nextstage: 591 list_for_each_entry_continue(dest, &svc->destinations, n_list) { 592 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 593 continue; 594 595 doh = atomic_read(&dest->activeconns) * 50 596 + atomic_read(&dest->inactconns); 597 if (loh * atomic_read(&dest->weight) > 598 doh * atomic_read(&least->weight)) { 599 least = dest; 600 loh = doh; 601 } 602 } 603 604 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d " 605 "activeconns %d refcnt %d weight %d overhead %d\n", 606 IP_VS_DBG_ADDR(least->af, &least->addr), 607 ntohs(least->port), 608 atomic_read(&least->activeconns), 609 atomic_read(&least->refcnt), 610 atomic_read(&least->weight), loh); 611 612 return least; 613} 614 615 616/* 617 * If this destination server is overloaded and there is a less loaded 618 * server, then return true. 619 */ 620static inline int 621is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc) 622{ 623 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { 624 struct ip_vs_dest *d; 625 626 list_for_each_entry(d, &svc->destinations, n_list) { 627 if (atomic_read(&d->activeconns)*2 628 < atomic_read(&d->weight)) { 629 return 1; 630 } 631 } 632 } 633 return 0; 634} 635 636 637/* 638 * Locality-Based (weighted) Least-Connection scheduling 639 */ 640static struct ip_vs_dest * 641ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 642{ 643 struct ip_vs_lblcr_table *tbl = svc->sched_data; 644 struct ip_vs_iphdr iph; 645 struct ip_vs_dest *dest = NULL; 646 struct ip_vs_lblcr_entry *en; 647 648 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 649 650 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 651 652 /* First look in our cache */ 653 read_lock(&svc->sched_lock); 654 en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr); 655 if (en) { 656 /* We only hold a read lock, but this is atomic */ 657 en->lastuse = jiffies; 658 659 /* Get the least loaded destination */ 660 read_lock(&en->set.lock); 661 dest = ip_vs_dest_set_min(&en->set); 662 read_unlock(&en->set.lock); 663 664 /* More than one destination + enough time passed by, cleanup */ 665 if (atomic_read(&en->set.size) > 1 && 666 time_after(jiffies, en->set.lastmod + 667 sysctl_ip_vs_lblcr_expiration)) { 668 struct ip_vs_dest *m; 669 670 write_lock(&en->set.lock); 671 m = ip_vs_dest_set_max(&en->set); 672 if (m) 673 ip_vs_dest_set_erase(&en->set, m); 674 write_unlock(&en->set.lock); 675 } 676 677 /* If the destination is not overloaded, use it */ 678 if (dest && !is_overloaded(dest, svc)) { 679 read_unlock(&svc->sched_lock); 680 goto out; 681 } 682 683 /* The cache entry is invalid, time to schedule */ 684 dest = __ip_vs_lblcr_schedule(svc); 685 if (!dest) { 686 IP_VS_ERR_RL("LBLCR: no destination available\n"); 687 read_unlock(&svc->sched_lock); 688 return NULL; 689 } 690 691 /* Update our cache entry */ 692 write_lock(&en->set.lock); 693 ip_vs_dest_set_insert(&en->set, dest); 694 write_unlock(&en->set.lock); 695 } 696 read_unlock(&svc->sched_lock); 697 698 if (dest) 699 goto out; 700 701 /* No cache entry, time to schedule */ 702 dest = __ip_vs_lblcr_schedule(svc); 703 if (!dest) { 704 IP_VS_DBG(1, "no destination available\n"); 705 return NULL; 706 } 707 708 /* If we fail to create a cache entry, we'll just use the valid dest */ 709 write_lock(&svc->sched_lock); 710 ip_vs_lblcr_new(tbl, &iph.daddr, dest); 711 write_unlock(&svc->sched_lock); 712 713out: 714 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n", 715 IP_VS_DBG_ADDR(svc->af, &iph.daddr), 716 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); 717 718 return dest; 719} 720 721 722/* 723 * IPVS LBLCR Scheduler structure 724 */ 725static struct ip_vs_scheduler ip_vs_lblcr_scheduler = 726{ 727 .name = "lblcr", 728 .refcnt = ATOMIC_INIT(0), 729 .module = THIS_MODULE, 730 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), 731 .init_service = ip_vs_lblcr_init_svc, 732 .done_service = ip_vs_lblcr_done_svc, 733 .schedule = ip_vs_lblcr_schedule, 734}; 735 736 737static int __init ip_vs_lblcr_init(void) 738{ 739 int ret; 740 741 sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table); 742 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 743 if (ret) 744 unregister_sysctl_table(sysctl_header); 745 return ret; 746} 747 748 749static void __exit ip_vs_lblcr_cleanup(void) 750{ 751 unregister_sysctl_table(sysctl_header); 752 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 753} 754 755 756module_init(ip_vs_lblcr_init); 757module_exit(ip_vs_lblcr_cleanup); 758MODULE_LICENSE("GPL"); 759