1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/module.h> 35#include <linux/string.h> 36#include <linux/errno.h> 37#include <linux/kernel.h> 38#include <linux/slab.h> 39#include <linux/mutex.h> 40#include <linux/workqueue.h> 41 42#include "core_priv.h" 43 44MODULE_AUTHOR("Roland Dreier"); 45MODULE_DESCRIPTION("core kernel InfiniBand API"); 46MODULE_LICENSE("Dual BSD/GPL"); 47 48#ifdef __ia64__ 49/* workaround for a bug in hp chipset that would cause kernel 50 panic when dma resources are exhaused */ 51int dma_map_sg_hp_wa = 0; 52#endif 53 54struct ib_client_data { 55 struct list_head list; 56 struct ib_client *client; 57 void * data; 58}; 59 60static LIST_HEAD(device_list); 61static LIST_HEAD(client_list); 62 63/* 64 * device_mutex protects access to both device_list and client_list. 65 * There's no real point to using multiple locks or something fancier 66 * like an rwsem: we always access both lists, and we're always 67 * modifying one list or the other list. In any case this is not a 68 * hot path so there's no point in trying to optimize. 69 */ 70static DEFINE_MUTEX(device_mutex); 71 72static int ib_device_check_mandatory(struct ib_device *device) 73{ 74#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } 75 static const struct { 76 size_t offset; 77 char *name; 78 } mandatory_table[] = { 79 IB_MANDATORY_FUNC(query_device), 80 IB_MANDATORY_FUNC(query_port), 81 IB_MANDATORY_FUNC(query_pkey), 82 IB_MANDATORY_FUNC(query_gid), 83 IB_MANDATORY_FUNC(alloc_pd), 84 IB_MANDATORY_FUNC(dealloc_pd), 85 IB_MANDATORY_FUNC(create_ah), 86 IB_MANDATORY_FUNC(destroy_ah), 87 IB_MANDATORY_FUNC(create_qp), 88 IB_MANDATORY_FUNC(modify_qp), 89 IB_MANDATORY_FUNC(destroy_qp), 90 IB_MANDATORY_FUNC(post_send), 91 IB_MANDATORY_FUNC(post_recv), 92 IB_MANDATORY_FUNC(create_cq), 93 IB_MANDATORY_FUNC(destroy_cq), 94 IB_MANDATORY_FUNC(poll_cq), 95 IB_MANDATORY_FUNC(req_notify_cq), 96 IB_MANDATORY_FUNC(get_dma_mr), 97 IB_MANDATORY_FUNC(dereg_mr), 98 IB_MANDATORY_FUNC(get_port_immutable) 99 }; 100 int i; 101 102 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 103 if (!*(void **) ((u_char *) device + mandatory_table[i].offset)) { 104 printk(KERN_WARNING "Device %s is missing mandatory function %s\n", 105 device->name, mandatory_table[i].name); 106 return -EINVAL; 107 } 108 } 109 110 return 0; 111} 112 113static struct ib_device *__ib_device_get_by_name(const char *name) 114{ 115 struct ib_device *device; 116 117 list_for_each_entry(device, &device_list, core_list) 118 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) 119 return device; 120 121 return NULL; 122} 123 124 125static int alloc_name(char *name) 126{ 127 unsigned long *inuse; 128 char buf[IB_DEVICE_NAME_MAX]; 129 struct ib_device *device; 130 int i; 131 132 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL); 133 if (!inuse) 134 return -ENOMEM; 135 136 list_for_each_entry(device, &device_list, core_list) { 137 if (!sscanf(device->name, name, &i)) 138 continue; 139 if (i < 0 || i >= PAGE_SIZE * 8) 140 continue; 141 snprintf(buf, sizeof buf, name, i); 142 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) 143 set_bit(i, inuse); 144 } 145 146 i = find_first_zero_bit(inuse, PAGE_SIZE * 8); 147 free_page((unsigned long) inuse); 148 snprintf(buf, sizeof buf, name, i); 149 150 if (__ib_device_get_by_name(buf)) 151 return -ENFILE; 152 153 strlcpy(name, buf, IB_DEVICE_NAME_MAX); 154 return 0; 155} 156 157static int rdma_start_port(struct ib_device *device) 158{ 159 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; 160} 161 162 163static int rdma_end_port(struct ib_device *device) 164{ 165 return (device->node_type == RDMA_NODE_IB_SWITCH) ? 166 0 : device->phys_port_cnt; 167} 168 169/** 170 * ib_alloc_device - allocate an IB device struct 171 * @size:size of structure to allocate 172 * 173 * Low-level drivers should use ib_alloc_device() to allocate &struct 174 * ib_device. @size is the size of the structure to be allocated, 175 * including any private data used by the low-level driver. 176 * ib_dealloc_device() must be used to free structures allocated with 177 * ib_alloc_device(). 178 */ 179struct ib_device *ib_alloc_device(size_t size) 180{ 181 BUG_ON(size < sizeof (struct ib_device)); 182 183 return kzalloc(size, GFP_KERNEL); 184} 185EXPORT_SYMBOL(ib_alloc_device); 186 187/** 188 * ib_dealloc_device - free an IB device struct 189 * @device:structure to free 190 * 191 * Free a structure allocated with ib_alloc_device(). 192 */ 193void ib_dealloc_device(struct ib_device *device) 194{ 195 if (device->reg_state == IB_DEV_UNINITIALIZED) { 196 kfree(device->port_immutable); 197 kfree(device); 198 return; 199 } 200 201 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED); 202 203 kobject_put(&device->dev.kobj); 204} 205EXPORT_SYMBOL(ib_dealloc_device); 206 207static int add_client_context(struct ib_device *device, struct ib_client *client) 208{ 209 struct ib_client_data *context; 210 unsigned long flags; 211 212 context = kmalloc(sizeof *context, GFP_KERNEL); 213 if (!context) { 214 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n", 215 device->name, client->name); 216 return -ENOMEM; 217 } 218 219 context->client = client; 220 context->data = NULL; 221 222 spin_lock_irqsave(&device->client_data_lock, flags); 223 list_add(&context->list, &device->client_data_list); 224 spin_unlock_irqrestore(&device->client_data_lock, flags); 225 226 return 0; 227} 228 229static int verify_immutable(const struct ib_device *dev, u8 port) 230{ 231 return WARN_ON(!rdma_cap_ib_mad(dev, port) && 232 rdma_max_mad_size(dev, port) != 0); 233} 234 235static int read_port_immutable(struct ib_device *device) 236{ 237 int ret; 238 u8 start_port = rdma_start_port(device); 239 u8 end_port = rdma_end_port(device); 240 u8 port; 241 242 /** 243 * device->port_immutable is indexed directly by the port number to make 244 * access to this data as efficient as possible. 245 * 246 * Therefore port_immutable is declared as a 1 based array with 247 * potential empty slots at the beginning. 248 */ 249 device->port_immutable = kzalloc(sizeof(*device->port_immutable) 250 * (end_port + 1), 251 GFP_KERNEL); 252 if (!device->port_immutable) 253 return -ENOMEM; 254 255 for (port = start_port; port <= end_port; ++port) { 256 ret = device->get_port_immutable(device, port, 257 &device->port_immutable[port]); 258 if (ret) 259 return ret; 260 261 if (verify_immutable(device, port)) 262 return -EINVAL; 263 } 264 return 0; 265} 266 267/** 268 * ib_register_device - Register an IB device with IB core 269 * @device:Device to register 270 * 271 * Low-level drivers use ib_register_device() to register their 272 * devices with the IB core. All registered clients will receive a 273 * callback for each device that is added. @device must be allocated 274 * with ib_alloc_device(). 275 */ 276int ib_register_device(struct ib_device *device, 277 int (*port_callback)(struct ib_device *, 278 u8, struct kobject *)) 279{ 280 int ret; 281 282 mutex_lock(&device_mutex); 283 284 if (strchr(device->name, '%')) { 285 ret = alloc_name(device->name); 286 if (ret) 287 goto out; 288 } 289 290 if (ib_device_check_mandatory(device)) { 291 ret = -EINVAL; 292 goto out; 293 } 294 295 INIT_LIST_HEAD(&device->event_handler_list); 296 INIT_LIST_HEAD(&device->client_data_list); 297 spin_lock_init(&device->event_handler_lock); 298 spin_lock_init(&device->client_data_lock); 299 device->ib_uverbs_xrcd_table = RB_ROOT; 300 mutex_init(&device->xrcd_table_mutex); 301 302 303 ret = read_port_immutable(device); 304 if (ret) { 305 printk(KERN_WARNING "Couldn't create per port immutable data %s\n", 306 device->name); 307 goto out; 308 } 309 310 ret = ib_device_register_sysfs(device, port_callback); 311 if (ret) { 312 printk(KERN_WARNING "Couldn't register device %s with driver model\n", 313 device->name); 314 kfree(device->port_immutable); 315 goto out; 316 } 317 318 list_add_tail(&device->core_list, &device_list); 319 320 device->reg_state = IB_DEV_REGISTERED; 321 322 { 323 struct ib_client *client; 324 325 list_for_each_entry(client, &client_list, list) 326 if (client->add && !add_client_context(device, client)) 327 client->add(device); 328 } 329 330 out: 331 mutex_unlock(&device_mutex); 332 return ret; 333} 334EXPORT_SYMBOL(ib_register_device); 335 336/** 337 * ib_unregister_device - Unregister an IB device 338 * @device:Device to unregister 339 * 340 * Unregister an IB device. All clients will receive a remove callback. 341 */ 342void ib_unregister_device(struct ib_device *device) 343{ 344 struct ib_client *client; 345 struct ib_client_data *context, *tmp; 346 unsigned long flags; 347 348 mutex_lock(&device_mutex); 349 350 list_for_each_entry_reverse(client, &client_list, list) 351 if (client->remove) 352 client->remove(device); 353 354 list_del(&device->core_list); 355 356 mutex_unlock(&device_mutex); 357 358 ib_device_unregister_sysfs(device); 359 360 spin_lock_irqsave(&device->client_data_lock, flags); 361 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 362 kfree(context); 363 spin_unlock_irqrestore(&device->client_data_lock, flags); 364 365 device->reg_state = IB_DEV_UNREGISTERED; 366} 367EXPORT_SYMBOL(ib_unregister_device); 368 369/** 370 * ib_register_client - Register an IB client 371 * @client:Client to register 372 * 373 * Upper level users of the IB drivers can use ib_register_client() to 374 * register callbacks for IB device addition and removal. When an IB 375 * device is added, each registered client's add method will be called 376 * (in the order the clients were registered), and when a device is 377 * removed, each client's remove method will be called (in the reverse 378 * order that clients were registered). In addition, when 379 * ib_register_client() is called, the client will receive an add 380 * callback for all devices already registered. 381 */ 382int ib_register_client(struct ib_client *client) 383{ 384 struct ib_device *device; 385 386 mutex_lock(&device_mutex); 387 388 list_add_tail(&client->list, &client_list); 389 list_for_each_entry(device, &device_list, core_list) 390 if (client->add && !add_client_context(device, client)) 391 client->add(device); 392 393 mutex_unlock(&device_mutex); 394 395 return 0; 396} 397EXPORT_SYMBOL(ib_register_client); 398 399/** 400 * ib_unregister_client - Unregister an IB client 401 * @client:Client to unregister 402 * 403 * Upper level users use ib_unregister_client() to remove their client 404 * registration. When ib_unregister_client() is called, the client 405 * will receive a remove callback for each IB device still registered. 406 */ 407void ib_unregister_client(struct ib_client *client) 408{ 409 struct ib_client_data *context, *tmp; 410 struct ib_device *device; 411 unsigned long flags; 412 413 mutex_lock(&device_mutex); 414 415 list_for_each_entry(device, &device_list, core_list) { 416 if (client->remove) 417 client->remove(device); 418 419 spin_lock_irqsave(&device->client_data_lock, flags); 420 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 421 if (context->client == client) { 422 list_del(&context->list); 423 kfree(context); 424 } 425 spin_unlock_irqrestore(&device->client_data_lock, flags); 426 } 427 list_del(&client->list); 428 429 mutex_unlock(&device_mutex); 430} 431EXPORT_SYMBOL(ib_unregister_client); 432 433/** 434 * ib_get_client_data - Get IB client context 435 * @device:Device to get context for 436 * @client:Client to get context for 437 * 438 * ib_get_client_data() returns client context set with 439 * ib_set_client_data(). 440 */ 441void *ib_get_client_data(struct ib_device *device, struct ib_client *client) 442{ 443 struct ib_client_data *context; 444 void *ret = NULL; 445 unsigned long flags; 446 447 spin_lock_irqsave(&device->client_data_lock, flags); 448 list_for_each_entry(context, &device->client_data_list, list) 449 if (context->client == client) { 450 ret = context->data; 451 break; 452 } 453 spin_unlock_irqrestore(&device->client_data_lock, flags); 454 455 return ret; 456} 457EXPORT_SYMBOL(ib_get_client_data); 458 459/** 460 * ib_set_client_data - Set IB client context 461 * @device:Device to set context for 462 * @client:Client to set context for 463 * @data:Context to set 464 * 465 * ib_set_client_data() sets client context that can be retrieved with 466 * ib_get_client_data(). 467 */ 468void ib_set_client_data(struct ib_device *device, struct ib_client *client, 469 void *data) 470{ 471 struct ib_client_data *context; 472 unsigned long flags; 473 474 spin_lock_irqsave(&device->client_data_lock, flags); 475 list_for_each_entry(context, &device->client_data_list, list) 476 if (context->client == client) { 477 context->data = data; 478 goto out; 479 } 480 481 printk(KERN_WARNING "No client context found for %s/%s\n", 482 device->name, client->name); 483 484out: 485 spin_unlock_irqrestore(&device->client_data_lock, flags); 486} 487EXPORT_SYMBOL(ib_set_client_data); 488 489/** 490 * ib_register_event_handler - Register an IB event handler 491 * @event_handler:Handler to register 492 * 493 * ib_register_event_handler() registers an event handler that will be 494 * called back when asynchronous IB events occur (as defined in 495 * chapter 11 of the InfiniBand Architecture Specification). This 496 * callback may occur in interrupt context. 497 */ 498int ib_register_event_handler (struct ib_event_handler *event_handler) 499{ 500 unsigned long flags; 501 502 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 503 list_add_tail(&event_handler->list, 504 &event_handler->device->event_handler_list); 505 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 506 507 return 0; 508} 509EXPORT_SYMBOL(ib_register_event_handler); 510 511/** 512 * ib_unregister_event_handler - Unregister an event handler 513 * @event_handler:Handler to unregister 514 * 515 * Unregister an event handler registered with 516 * ib_register_event_handler(). 517 */ 518int ib_unregister_event_handler(struct ib_event_handler *event_handler) 519{ 520 unsigned long flags; 521 522 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 523 list_del(&event_handler->list); 524 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 525 526 return 0; 527} 528EXPORT_SYMBOL(ib_unregister_event_handler); 529 530/** 531 * ib_dispatch_event - Dispatch an asynchronous event 532 * @event:Event to dispatch 533 * 534 * Low-level drivers must call ib_dispatch_event() to dispatch the 535 * event to all registered event handlers when an asynchronous event 536 * occurs. 537 */ 538void ib_dispatch_event(struct ib_event *event) 539{ 540 unsigned long flags; 541 struct ib_event_handler *handler; 542 543 spin_lock_irqsave(&event->device->event_handler_lock, flags); 544 545 list_for_each_entry(handler, &event->device->event_handler_list, list) 546 handler->handler(handler, event); 547 548 spin_unlock_irqrestore(&event->device->event_handler_lock, flags); 549} 550EXPORT_SYMBOL(ib_dispatch_event); 551 552/** 553 * ib_query_device - Query IB device attributes 554 * @device:Device to query 555 * @device_attr:Device attributes 556 * 557 * ib_query_device() returns the attributes of a device through the 558 * @device_attr pointer. 559 */ 560int ib_query_device(struct ib_device *device, 561 struct ib_device_attr *device_attr) 562{ 563 return device->query_device(device, device_attr); 564} 565EXPORT_SYMBOL(ib_query_device); 566 567/** 568 * ib_query_port - Query IB port attributes 569 * @device:Device to query 570 * @port_num:Port number to query 571 * @port_attr:Port attributes 572 * 573 * ib_query_port() returns the attributes of a port through the 574 * @port_attr pointer. 575 */ 576int ib_query_port(struct ib_device *device, 577 u8 port_num, 578 struct ib_port_attr *port_attr) 579{ 580 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 581 return -EINVAL; 582 583 return device->query_port(device, port_num, port_attr); 584} 585EXPORT_SYMBOL(ib_query_port); 586 587/** 588 * ib_query_gid - Get GID table entry 589 * @device:Device to query 590 * @port_num:Port number to query 591 * @index:GID table index to query 592 * @gid:Returned GID 593 * 594 * ib_query_gid() fetches the specified GID table entry. 595 */ 596int ib_query_gid(struct ib_device *device, 597 u8 port_num, int index, union ib_gid *gid) 598{ 599 return device->query_gid(device, port_num, index, gid); 600} 601EXPORT_SYMBOL(ib_query_gid); 602 603/** 604 * ib_query_pkey - Get P_Key table entry 605 * @device:Device to query 606 * @port_num:Port number to query 607 * @index:P_Key table index to query 608 * @pkey:Returned P_Key 609 * 610 * ib_query_pkey() fetches the specified P_Key table entry. 611 */ 612int ib_query_pkey(struct ib_device *device, 613 u8 port_num, u16 index, u16 *pkey) 614{ 615 return device->query_pkey(device, port_num, index, pkey); 616} 617EXPORT_SYMBOL(ib_query_pkey); 618 619/** 620 * ib_modify_device - Change IB device attributes 621 * @device:Device to modify 622 * @device_modify_mask:Mask of attributes to change 623 * @device_modify:New attribute values 624 * 625 * ib_modify_device() changes a device's attributes as specified by 626 * the @device_modify_mask and @device_modify structure. 627 */ 628int ib_modify_device(struct ib_device *device, 629 int device_modify_mask, 630 struct ib_device_modify *device_modify) 631{ 632 return device->modify_device(device, device_modify_mask, 633 device_modify); 634} 635EXPORT_SYMBOL(ib_modify_device); 636 637/** 638 * ib_modify_port - Modifies the attributes for the specified port. 639 * @device: The device to modify. 640 * @port_num: The number of the port to modify. 641 * @port_modify_mask: Mask used to specify which attributes of the port 642 * to change. 643 * @port_modify: New attribute values for the port. 644 * 645 * ib_modify_port() changes a port's attributes as specified by the 646 * @port_modify_mask and @port_modify structure. 647 */ 648int ib_modify_port(struct ib_device *device, 649 u8 port_num, int port_modify_mask, 650 struct ib_port_modify *port_modify) 651{ 652 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 653 return -EINVAL; 654 655 return device->modify_port(device, port_num, port_modify_mask, 656 port_modify); 657} 658EXPORT_SYMBOL(ib_modify_port); 659 660/** 661 * ib_find_gid - Returns the port number and GID table index where 662 * a specified GID value occurs. 663 * @device: The device to query. 664 * @gid: The GID value to search for. 665 * @port_num: The port number of the device where the GID value was found. 666 * @index: The index into the GID table where the GID was found. This 667 * parameter may be NULL. 668 */ 669int ib_find_gid(struct ib_device *device, union ib_gid *gid, 670 u8 *port_num, u16 *index) 671{ 672 union ib_gid tmp_gid; 673 int ret, port, i; 674 675 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { 676 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { 677 ret = ib_query_gid(device, port, i, &tmp_gid); 678 if (ret) 679 return ret; 680 if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 681 *port_num = port; 682 if (index) 683 *index = i; 684 return 0; 685 } 686 } 687 } 688 689 return -ENOENT; 690} 691EXPORT_SYMBOL(ib_find_gid); 692 693/** 694 * ib_find_pkey - Returns the PKey table index where a specified 695 * PKey value occurs. 696 * @device: The device to query. 697 * @port_num: The port number of the device to search for the PKey. 698 * @pkey: The PKey value to search for. 699 * @index: The index into the PKey table where the PKey was found. 700 */ 701int ib_find_pkey(struct ib_device *device, 702 u8 port_num, u16 pkey, u16 *index) 703{ 704 int ret, i; 705 u16 tmp_pkey; 706 707 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { 708 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); 709 if (ret) 710 return ret; 711 712 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { 713 *index = i; 714 return 0; 715 } 716 } 717 718 return -ENOENT; 719} 720EXPORT_SYMBOL(ib_find_pkey); 721 722static int __init ib_core_init(void) 723{ 724 int ret; 725 726#ifdef __ia64__ 727 if (ia64_platform_is("hpzx1")) 728 dma_map_sg_hp_wa = 1; 729#endif 730 731 ret = ib_sysfs_setup(); 732 if (ret) 733 printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); 734 735 ret = ib_cache_setup(); 736 if (ret) { 737 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); 738 ib_sysfs_cleanup(); 739 } 740 741 return ret; 742} 743 744static void __exit ib_core_cleanup(void) 745{ 746 ib_cache_cleanup(); 747 ib_sysfs_cleanup(); 748 /* Make sure that any pending umem accounting work is done. */ 749 flush_scheduled_work(); 750} 751 752module_init(ib_core_init); 753module_exit(ib_core_cleanup); 754 755#undef MODULE_VERSION 756#include <sys/module.h> 757static int 758ibcore_evhand(module_t mod, int event, void *arg) 759{ 760 return (0); 761} 762 763static moduledata_t ibcore_mod = { 764 .name = "ibcore", 765 .evhand = ibcore_evhand, 766}; 767 768MODULE_VERSION(ibcore, 1); 769DECLARE_MODULE(ibcore, ibcore_mod, SI_SUB_SMP, SI_ORDER_ANY); 770MODULE_DEPEND(ibcore, toecore, 1, 1, 1); 771