ib_user_mad.c revision 331772
1/*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 * Copyright (c) 2008 Cisco. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 * 37 * $FreeBSD: stable/11/sys/ofed/drivers/infiniband/core/ib_user_mad.c 331772 2018-03-30 18:17:33Z hselasky $ 38 */ 39 40#define pr_fmt(fmt) "user_mad: " fmt 41 42#include <linux/module.h> 43#include <linux/device.h> 44#include <linux/err.h> 45#include <linux/fs.h> 46#include <linux/cdev.h> 47#include <linux/dma-mapping.h> 48#include <linux/poll.h> 49#include <linux/mutex.h> 50#include <linux/kref.h> 51#include <linux/compat.h> 52#include <linux/sched.h> 53#include <linux/semaphore.h> 54#include <linux/slab.h> 55 56#include <asm/uaccess.h> 57 58#include <rdma/ib_mad.h> 59#include <rdma/ib_user_mad.h> 60 61MODULE_AUTHOR("Roland Dreier"); 62MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); 63MODULE_LICENSE("Dual BSD/GPL"); 64 65enum { 66 IB_UMAD_MAX_PORTS = 64, 67 IB_UMAD_MAX_AGENTS = 32, 68 69 IB_UMAD_MAJOR = 231, 70 IB_UMAD_MINOR_BASE = 0 71}; 72 73/* 74 * Our lifetime rules for these structs are the following: 75 * device special file is opened, we take a reference on the 76 * ib_umad_port's struct ib_umad_device. We drop these 77 * references in the corresponding close(). 78 * 79 * In addition to references coming from open character devices, there 80 * is one more reference to each ib_umad_device representing the 81 * module's reference taken when allocating the ib_umad_device in 82 * ib_umad_add_one(). 83 * 84 * When destroying an ib_umad_device, we drop the module's reference. 85 */ 86 87struct ib_umad_port { 88 struct cdev cdev; 89 struct device *dev; 90 91 struct cdev sm_cdev; 92 struct device *sm_dev; 93 struct semaphore sm_sem; 94 95 struct mutex file_mutex; 96 struct list_head file_list; 97 98 struct ib_device *ib_dev; 99 struct ib_umad_device *umad_dev; 100 int dev_num; 101 u8 port_num; 102}; 103 104struct ib_umad_device { 105 struct kobject kobj; 106 struct ib_umad_port port[0]; 107}; 108 109struct ib_umad_file { 110 struct mutex mutex; 111 struct ib_umad_port *port; 112 struct list_head recv_list; 113 struct list_head send_list; 114 struct list_head port_list; 115 spinlock_t send_lock; 116 wait_queue_head_t recv_wait; 117 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 118 int agents_dead; 119 u8 use_pkey_index; 120 u8 already_used; 121}; 122 123struct ib_umad_packet { 124 struct ib_mad_send_buf *msg; 125 struct ib_mad_recv_wc *recv_wc; 126 struct list_head list; 127 int length; 128 struct ib_user_mad mad; 129}; 130 131static struct class *umad_class; 132 133static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); 134 135static DEFINE_SPINLOCK(port_lock); 136static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); 137 138static void ib_umad_add_one(struct ib_device *device); 139static void ib_umad_remove_one(struct ib_device *device, void *client_data); 140 141static void ib_umad_release_dev(struct kobject *kobj) 142{ 143 struct ib_umad_device *dev = 144 container_of(kobj, struct ib_umad_device, kobj); 145 146 kfree(dev); 147} 148 149static struct kobj_type ib_umad_dev_ktype = { 150 .release = ib_umad_release_dev, 151}; 152 153static int hdr_size(struct ib_umad_file *file) 154{ 155 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : 156 sizeof (struct ib_user_mad_hdr_old); 157} 158 159/* caller must hold file->mutex */ 160static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) 161{ 162 return file->agents_dead ? NULL : file->agent[id]; 163} 164 165static int queue_packet(struct ib_umad_file *file, 166 struct ib_mad_agent *agent, 167 struct ib_umad_packet *packet) 168{ 169 int ret = 1; 170 171 mutex_lock(&file->mutex); 172 173 for (packet->mad.hdr.id = 0; 174 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; 175 packet->mad.hdr.id++) 176 if (agent == __get_agent(file, packet->mad.hdr.id)) { 177 list_add_tail(&packet->list, &file->recv_list); 178 wake_up_interruptible(&file->recv_wait); 179 ret = 0; 180 break; 181 } 182 183 mutex_unlock(&file->mutex); 184 185 return ret; 186} 187 188static void dequeue_send(struct ib_umad_file *file, 189 struct ib_umad_packet *packet) 190{ 191 spin_lock_irq(&file->send_lock); 192 list_del(&packet->list); 193 spin_unlock_irq(&file->send_lock); 194} 195 196static void send_handler(struct ib_mad_agent *agent, 197 struct ib_mad_send_wc *send_wc) 198{ 199 struct ib_umad_file *file = agent->context; 200 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 201 202 dequeue_send(file, packet); 203 ib_destroy_ah(packet->msg->ah); 204 ib_free_send_mad(packet->msg); 205 206 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { 207 packet->length = IB_MGMT_MAD_HDR; 208 packet->mad.hdr.status = ETIMEDOUT; 209 if (!queue_packet(file, agent, packet)) 210 return; 211 } 212 kfree(packet); 213} 214 215static void recv_handler(struct ib_mad_agent *agent, 216 struct ib_mad_send_buf *send_buf, 217 struct ib_mad_recv_wc *mad_recv_wc) 218{ 219 struct ib_umad_file *file = agent->context; 220 struct ib_umad_packet *packet; 221 222 if (mad_recv_wc->wc->status != IB_WC_SUCCESS) 223 goto err1; 224 225 packet = kzalloc(sizeof *packet, GFP_KERNEL); 226 if (!packet) 227 goto err1; 228 229 packet->length = mad_recv_wc->mad_len; 230 packet->recv_wc = mad_recv_wc; 231 232 packet->mad.hdr.status = 0; 233 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; 234 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 235 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); 236 packet->mad.hdr.sl = mad_recv_wc->wc->sl; 237 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; 238 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; 239 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); 240 if (packet->mad.hdr.grh_present) { 241 struct ib_ah_attr ah_attr; 242 243 ib_init_ah_from_wc(agent->device, agent->port_num, 244 mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, 245 &ah_attr); 246 247 packet->mad.hdr.gid_index = ah_attr.grh.sgid_index; 248 packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit; 249 packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class; 250 memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16); 251 packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label); 252 } 253 254 if (queue_packet(file, agent, packet)) 255 goto err2; 256 return; 257 258err2: 259 kfree(packet); 260err1: 261 ib_free_recv_mad(mad_recv_wc); 262} 263 264static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, 265 struct ib_umad_packet *packet, size_t count) 266{ 267 struct ib_mad_recv_buf *recv_buf; 268 int left, seg_payload, offset, max_seg_payload; 269 size_t seg_size; 270 271 recv_buf = &packet->recv_wc->recv_buf; 272 seg_size = packet->recv_wc->mad_seg_size; 273 274 /* We need enough room to copy the first (or only) MAD segment. */ 275 if ((packet->length <= seg_size && 276 count < hdr_size(file) + packet->length) || 277 (packet->length > seg_size && 278 count < hdr_size(file) + seg_size)) 279 return -EINVAL; 280 281 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 282 return -EFAULT; 283 284 buf += hdr_size(file); 285 seg_payload = min_t(int, packet->length, seg_size); 286 if (copy_to_user(buf, recv_buf->mad, seg_payload)) 287 return -EFAULT; 288 289 if (seg_payload < packet->length) { 290 /* 291 * Multipacket RMPP MAD message. Copy remainder of message. 292 * Note that last segment may have a shorter payload. 293 */ 294 if (count < hdr_size(file) + packet->length) { 295 /* 296 * The buffer is too small, return the first RMPP segment, 297 * which includes the RMPP message length. 298 */ 299 return -ENOSPC; 300 } 301 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); 302 max_seg_payload = seg_size - offset; 303 304 for (left = packet->length - seg_payload, buf += seg_payload; 305 left; left -= seg_payload, buf += seg_payload) { 306 recv_buf = container_of(recv_buf->list.next, 307 struct ib_mad_recv_buf, list); 308 seg_payload = min(left, max_seg_payload); 309 if (copy_to_user(buf, (char *)recv_buf->mad + offset, 310 seg_payload)) 311 return -EFAULT; 312 } 313 } 314 return hdr_size(file) + packet->length; 315} 316 317static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, 318 struct ib_umad_packet *packet, size_t count) 319{ 320 ssize_t size = hdr_size(file) + packet->length; 321 322 if (count < size) 323 return -EINVAL; 324 325 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 326 return -EFAULT; 327 328 buf += hdr_size(file); 329 330 if (copy_to_user(buf, packet->mad.data, packet->length)) 331 return -EFAULT; 332 333 return size; 334} 335 336static ssize_t ib_umad_read(struct file *filp, char __user *buf, 337 size_t count, loff_t *pos) 338{ 339 struct ib_umad_file *file = filp->private_data; 340 struct ib_umad_packet *packet; 341 ssize_t ret; 342 343 if (count < hdr_size(file)) 344 return -EINVAL; 345 346 mutex_lock(&file->mutex); 347 348 while (list_empty(&file->recv_list)) { 349 mutex_unlock(&file->mutex); 350 351 if (filp->f_flags & O_NONBLOCK) 352 return -EAGAIN; 353 354 if (wait_event_interruptible(file->recv_wait, 355 !list_empty(&file->recv_list))) 356 return -ERESTARTSYS; 357 358 mutex_lock(&file->mutex); 359 } 360 361 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); 362 list_del(&packet->list); 363 364 mutex_unlock(&file->mutex); 365 366 if (packet->recv_wc) 367 ret = copy_recv_mad(file, buf, packet, count); 368 else 369 ret = copy_send_mad(file, buf, packet, count); 370 371 if (ret < 0) { 372 /* Requeue packet */ 373 mutex_lock(&file->mutex); 374 list_add(&packet->list, &file->recv_list); 375 mutex_unlock(&file->mutex); 376 } else { 377 if (packet->recv_wc) 378 ib_free_recv_mad(packet->recv_wc); 379 kfree(packet); 380 } 381 return ret; 382} 383 384static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) 385{ 386 int left, seg; 387 388 /* Copy class specific header */ 389 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) && 390 copy_from_user((char *)msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR, 391 msg->hdr_len - IB_MGMT_RMPP_HDR)) 392 return -EFAULT; 393 394 /* All headers are in place. Copy data segments. */ 395 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; 396 seg++, left -= msg->seg_size, buf += msg->seg_size) { 397 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf, 398 min(left, msg->seg_size))) 399 return -EFAULT; 400 } 401 return 0; 402} 403 404static int same_destination(struct ib_user_mad_hdr *hdr1, 405 struct ib_user_mad_hdr *hdr2) 406{ 407 if (!hdr1->grh_present && !hdr2->grh_present) 408 return (hdr1->lid == hdr2->lid); 409 410 if (hdr1->grh_present && hdr2->grh_present) 411 return !memcmp(hdr1->gid, hdr2->gid, 16); 412 413 return 0; 414} 415 416static int is_duplicate(struct ib_umad_file *file, 417 struct ib_umad_packet *packet) 418{ 419 struct ib_umad_packet *sent_packet; 420 struct ib_mad_hdr *sent_hdr, *hdr; 421 422 hdr = (struct ib_mad_hdr *) packet->mad.data; 423 list_for_each_entry(sent_packet, &file->send_list, list) { 424 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; 425 426 if ((hdr->tid != sent_hdr->tid) || 427 (hdr->mgmt_class != sent_hdr->mgmt_class)) 428 continue; 429 430 /* 431 * No need to be overly clever here. If two new operations have 432 * the same TID, reject the second as a duplicate. This is more 433 * restrictive than required by the spec. 434 */ 435 if (!ib_response_mad(hdr)) { 436 if (!ib_response_mad(sent_hdr)) 437 return 1; 438 continue; 439 } else if (!ib_response_mad(sent_hdr)) 440 continue; 441 442 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) 443 return 1; 444 } 445 446 return 0; 447} 448 449static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 450 size_t count, loff_t *pos) 451{ 452 struct ib_umad_file *file = filp->private_data; 453 struct ib_umad_packet *packet; 454 struct ib_mad_agent *agent; 455 struct ib_ah_attr ah_attr; 456 struct ib_ah *ah; 457 struct ib_rmpp_mad *rmpp_mad; 458 __be64 *tid; 459 int ret, data_len, hdr_len, copy_offset, rmpp_active; 460 u8 base_version; 461 462 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) 463 return -EINVAL; 464 465 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); 466 if (!packet) 467 return -ENOMEM; 468 469 if (copy_from_user(&packet->mad, buf, hdr_size(file))) { 470 ret = -EFAULT; 471 goto err; 472 } 473 474 if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { 475 ret = -EINVAL; 476 goto err; 477 } 478 479 buf += hdr_size(file); 480 481 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { 482 ret = -EFAULT; 483 goto err; 484 } 485 486 mutex_lock(&file->mutex); 487 488 agent = __get_agent(file, packet->mad.hdr.id); 489 if (!agent) { 490 ret = -EINVAL; 491 goto err_up; 492 } 493 494 memset(&ah_attr, 0, sizeof ah_attr); 495 ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid); 496 ah_attr.sl = packet->mad.hdr.sl; 497 ah_attr.src_path_bits = packet->mad.hdr.path_bits; 498 ah_attr.port_num = file->port->port_num; 499 if (packet->mad.hdr.grh_present) { 500 ah_attr.ah_flags = IB_AH_GRH; 501 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 502 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; 503 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); 504 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 505 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 506 } 507 508 ah = ib_create_ah(agent->qp->pd, &ah_attr); 509 if (IS_ERR(ah)) { 510 ret = PTR_ERR(ah); 511 goto err_up; 512 } 513 514 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 515 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 516 517 if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) 518 && ib_mad_kernel_rmpp_agent(agent)) { 519 copy_offset = IB_MGMT_RMPP_HDR; 520 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 521 IB_MGMT_RMPP_FLAG_ACTIVE; 522 } else { 523 copy_offset = IB_MGMT_MAD_HDR; 524 rmpp_active = 0; 525 } 526 527 base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version; 528 data_len = count - hdr_size(file) - hdr_len; 529 packet->msg = ib_create_send_mad(agent, 530 be32_to_cpu(packet->mad.hdr.qpn), 531 packet->mad.hdr.pkey_index, rmpp_active, 532 hdr_len, data_len, GFP_KERNEL, 533 base_version); 534 if (IS_ERR(packet->msg)) { 535 ret = PTR_ERR(packet->msg); 536 goto err_ah; 537 } 538 539 packet->msg->ah = ah; 540 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; 541 packet->msg->retries = packet->mad.hdr.retries; 542 packet->msg->context[0] = packet; 543 544 /* Copy MAD header. Any RMPP header is already in place. */ 545 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); 546 547 if (!rmpp_active) { 548 if (copy_from_user((char *)packet->msg->mad + copy_offset, 549 buf + copy_offset, 550 hdr_len + data_len - copy_offset)) { 551 ret = -EFAULT; 552 goto err_msg; 553 } 554 } else { 555 ret = copy_rmpp_mad(packet->msg, buf); 556 if (ret) 557 goto err_msg; 558 } 559 560 /* 561 * Set the high-order part of the transaction ID to make MADs from 562 * different agents unique, and allow routing responses back to the 563 * original requestor. 564 */ 565 if (!ib_response_mad(packet->msg->mad)) { 566 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; 567 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 568 (be64_to_cpup(tid) & 0xffffffff)); 569 rmpp_mad->mad_hdr.tid = *tid; 570 } 571 572 if (!ib_mad_kernel_rmpp_agent(agent) 573 && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) 574 && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { 575 spin_lock_irq(&file->send_lock); 576 list_add_tail(&packet->list, &file->send_list); 577 spin_unlock_irq(&file->send_lock); 578 } else { 579 spin_lock_irq(&file->send_lock); 580 ret = is_duplicate(file, packet); 581 if (!ret) 582 list_add_tail(&packet->list, &file->send_list); 583 spin_unlock_irq(&file->send_lock); 584 if (ret) { 585 ret = -EINVAL; 586 goto err_msg; 587 } 588 } 589 590 ret = ib_post_send_mad(packet->msg, NULL); 591 if (ret) 592 goto err_send; 593 594 mutex_unlock(&file->mutex); 595 return count; 596 597err_send: 598 dequeue_send(file, packet); 599err_msg: 600 ib_free_send_mad(packet->msg); 601err_ah: 602 ib_destroy_ah(ah); 603err_up: 604 mutex_unlock(&file->mutex); 605err: 606 kfree(packet); 607 return ret; 608} 609 610static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait) 611{ 612 struct ib_umad_file *file = filp->private_data; 613 614 /* we will always be able to post a MAD send */ 615 unsigned int mask = POLLOUT | POLLWRNORM; 616 617 poll_wait(filp, &file->recv_wait, wait); 618 619 if (!list_empty(&file->recv_list)) 620 mask |= POLLIN | POLLRDNORM; 621 622 return mask; 623} 624 625static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, 626 int compat_method_mask) 627{ 628 struct ib_user_mad_reg_req ureq; 629 struct ib_mad_reg_req req; 630 struct ib_mad_agent *agent = NULL; 631 int agent_id; 632 int ret; 633 634 mutex_lock(&file->port->file_mutex); 635 mutex_lock(&file->mutex); 636 637 if (!file->port->ib_dev) { 638 dev_notice(file->port->dev, 639 "ib_umad_reg_agent: invalid device\n"); 640 ret = -EPIPE; 641 goto out; 642 } 643 644 if (copy_from_user(&ureq, arg, sizeof ureq)) { 645 ret = -EFAULT; 646 goto out; 647 } 648 649 if (ureq.qpn != 0 && ureq.qpn != 1) { 650 dev_notice(file->port->dev, 651 "ib_umad_reg_agent: invalid QPN %d specified\n", 652 ureq.qpn); 653 ret = -EINVAL; 654 goto out; 655 } 656 657 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 658 if (!__get_agent(file, agent_id)) 659 goto found; 660 661 dev_notice(file->port->dev, 662 "ib_umad_reg_agent: Max Agents (%u) reached\n", 663 IB_UMAD_MAX_AGENTS); 664 ret = -ENOMEM; 665 goto out; 666 667found: 668 if (ureq.mgmt_class) { 669 memset(&req, 0, sizeof(req)); 670 req.mgmt_class = ureq.mgmt_class; 671 req.mgmt_class_version = ureq.mgmt_class_version; 672 memcpy(req.oui, ureq.oui, sizeof req.oui); 673 674 if (compat_method_mask) { 675 u32 *umm = (u32 *) ureq.method_mask; 676 int i; 677 678 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i) 679 req.method_mask[i] = 680 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32); 681 } else 682 memcpy(req.method_mask, ureq.method_mask, 683 sizeof req.method_mask); 684 } 685 686 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 687 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 688 ureq.mgmt_class ? &req : NULL, 689 ureq.rmpp_version, 690 send_handler, recv_handler, file, 0); 691 if (IS_ERR(agent)) { 692 ret = PTR_ERR(agent); 693 agent = NULL; 694 goto out; 695 } 696 697 if (put_user(agent_id, 698 (u32 __user *) ((char *)arg + offsetof(struct ib_user_mad_reg_req, id)))) { 699 ret = -EFAULT; 700 goto out; 701 } 702 703 if (!file->already_used) { 704 file->already_used = 1; 705 if (!file->use_pkey_index) { 706 dev_warn(file->port->dev, 707 "process %s did not enable P_Key index support.\n", 708 current->comm); 709 dev_warn(file->port->dev, 710 " Documentation/infiniband/user_mad.txt has info on the new ABI.\n"); 711 } 712 } 713 714 file->agent[agent_id] = agent; 715 ret = 0; 716 717out: 718 mutex_unlock(&file->mutex); 719 720 if (ret && agent) 721 ib_unregister_mad_agent(agent); 722 723 mutex_unlock(&file->port->file_mutex); 724 725 return ret; 726} 727 728static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) 729{ 730 struct ib_user_mad_reg_req2 ureq; 731 struct ib_mad_reg_req req; 732 struct ib_mad_agent *agent = NULL; 733 int agent_id; 734 int ret; 735 736 mutex_lock(&file->port->file_mutex); 737 mutex_lock(&file->mutex); 738 739 if (!file->port->ib_dev) { 740 dev_notice(file->port->dev, 741 "ib_umad_reg_agent2: invalid device\n"); 742 ret = -EPIPE; 743 goto out; 744 } 745 746 if (copy_from_user(&ureq, arg, sizeof(ureq))) { 747 ret = -EFAULT; 748 goto out; 749 } 750 751 if (ureq.qpn != 0 && ureq.qpn != 1) { 752 dev_notice(file->port->dev, 753 "ib_umad_reg_agent2: invalid QPN %d specified\n", 754 ureq.qpn); 755 ret = -EINVAL; 756 goto out; 757 } 758 759 if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { 760 const u32 flags = IB_USER_MAD_REG_FLAGS_CAP; 761 dev_notice(file->port->dev, 762 "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n", 763 ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); 764 ret = -EINVAL; 765 766 if (put_user(flags, 767 (u32 __user *) ((char *)arg + offsetof(struct 768 ib_user_mad_reg_req2, flags)))) 769 ret = -EFAULT; 770 771 goto out; 772 } 773 774 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 775 if (!__get_agent(file, agent_id)) 776 goto found; 777 778 dev_notice(file->port->dev, 779 "ib_umad_reg_agent2: Max Agents (%u) reached\n", 780 IB_UMAD_MAX_AGENTS); 781 ret = -ENOMEM; 782 goto out; 783 784found: 785 if (ureq.mgmt_class) { 786 memset(&req, 0, sizeof(req)); 787 req.mgmt_class = ureq.mgmt_class; 788 req.mgmt_class_version = ureq.mgmt_class_version; 789 if (ureq.oui & 0xff000000) { 790 dev_notice(file->port->dev, 791 "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n", 792 ureq.oui); 793 ret = -EINVAL; 794 goto out; 795 } 796 req.oui[2] = ureq.oui & 0x0000ff; 797 req.oui[1] = (ureq.oui & 0x00ff00) >> 8; 798 req.oui[0] = (ureq.oui & 0xff0000) >> 16; 799 memcpy(req.method_mask, ureq.method_mask, 800 sizeof(req.method_mask)); 801 } 802 803 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 804 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 805 ureq.mgmt_class ? &req : NULL, 806 ureq.rmpp_version, 807 send_handler, recv_handler, file, 808 ureq.flags); 809 if (IS_ERR(agent)) { 810 ret = PTR_ERR(agent); 811 agent = NULL; 812 goto out; 813 } 814 815 if (put_user(agent_id, 816 (u32 __user *)((char *)arg + 817 offsetof(struct ib_user_mad_reg_req2, id)))) { 818 ret = -EFAULT; 819 goto out; 820 } 821 822 if (!file->already_used) { 823 file->already_used = 1; 824 file->use_pkey_index = 1; 825 } 826 827 file->agent[agent_id] = agent; 828 ret = 0; 829 830out: 831 mutex_unlock(&file->mutex); 832 833 if (ret && agent) 834 ib_unregister_mad_agent(agent); 835 836 mutex_unlock(&file->port->file_mutex); 837 838 return ret; 839} 840 841 842static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) 843{ 844 struct ib_mad_agent *agent = NULL; 845 u32 id; 846 int ret = 0; 847 848 if (get_user(id, arg)) 849 return -EFAULT; 850 851 mutex_lock(&file->port->file_mutex); 852 mutex_lock(&file->mutex); 853 854 if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { 855 ret = -EINVAL; 856 goto out; 857 } 858 859 agent = file->agent[id]; 860 file->agent[id] = NULL; 861 862out: 863 mutex_unlock(&file->mutex); 864 865 if (agent) 866 ib_unregister_mad_agent(agent); 867 868 mutex_unlock(&file->port->file_mutex); 869 870 return ret; 871} 872 873static long ib_umad_enable_pkey(struct ib_umad_file *file) 874{ 875 int ret = 0; 876 877 mutex_lock(&file->mutex); 878 if (file->already_used) 879 ret = -EINVAL; 880 else 881 file->use_pkey_index = 1; 882 mutex_unlock(&file->mutex); 883 884 return ret; 885} 886 887static long ib_umad_ioctl(struct file *filp, unsigned int cmd, 888 unsigned long arg) 889{ 890 switch (cmd) { 891 case IB_USER_MAD_REGISTER_AGENT: 892 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0); 893 case IB_USER_MAD_UNREGISTER_AGENT: 894 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); 895 case IB_USER_MAD_ENABLE_PKEY: 896 return ib_umad_enable_pkey(filp->private_data); 897 case IB_USER_MAD_REGISTER_AGENT2: 898 return ib_umad_reg_agent2(filp->private_data, (void __user *) arg); 899 default: 900 return -ENOIOCTLCMD; 901 } 902} 903 904#ifdef CONFIG_COMPAT 905static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, 906 unsigned long arg) 907{ 908 switch (cmd) { 909 case IB_USER_MAD_REGISTER_AGENT: 910 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1); 911 case IB_USER_MAD_UNREGISTER_AGENT: 912 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); 913 case IB_USER_MAD_ENABLE_PKEY: 914 return ib_umad_enable_pkey(filp->private_data); 915 case IB_USER_MAD_REGISTER_AGENT2: 916 return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg)); 917 default: 918 return -ENOIOCTLCMD; 919 } 920} 921#endif 922 923/* 924 * ib_umad_open() does not need the BKL: 925 * 926 * - the ib_umad_port structures are properly reference counted, and 927 * everything else is purely local to the file being created, so 928 * races against other open calls are not a problem; 929 * - the ioctl method does not affect any global state outside of the 930 * file structure being operated on; 931 */ 932static int ib_umad_open(struct inode *inode, struct file *filp) 933{ 934 struct ib_umad_port *port; 935 struct ib_umad_file *file; 936 int ret = -ENXIO; 937 938 port = container_of(inode->i_cdev->si_drv1, struct ib_umad_port, cdev); 939 940 mutex_lock(&port->file_mutex); 941 942 if (!port->ib_dev) 943 goto out; 944 945 ret = -ENOMEM; 946 file = kzalloc(sizeof *file, GFP_KERNEL); 947 if (!file) 948 goto out; 949 950 mutex_init(&file->mutex); 951 spin_lock_init(&file->send_lock); 952 INIT_LIST_HEAD(&file->recv_list); 953 INIT_LIST_HEAD(&file->send_list); 954 init_waitqueue_head(&file->recv_wait); 955 956 file->port = port; 957 filp->private_data = file; 958 959 list_add_tail(&file->port_list, &port->file_list); 960 961 ret = nonseekable_open(inode, filp); 962 if (ret) { 963 list_del(&file->port_list); 964 kfree(file); 965 goto out; 966 } 967 968 kobject_get(&port->umad_dev->kobj); 969 970out: 971 mutex_unlock(&port->file_mutex); 972 return ret; 973} 974 975static int ib_umad_close(struct inode *inode, struct file *filp) 976{ 977 struct ib_umad_file *file = filp->private_data; 978 struct ib_umad_device *dev = file->port->umad_dev; 979 struct ib_umad_packet *packet, *tmp; 980 int already_dead; 981 int i; 982 983 mutex_lock(&file->port->file_mutex); 984 mutex_lock(&file->mutex); 985 986 already_dead = file->agents_dead; 987 file->agents_dead = 1; 988 989 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { 990 if (packet->recv_wc) 991 ib_free_recv_mad(packet->recv_wc); 992 kfree(packet); 993 } 994 995 list_del(&file->port_list); 996 997 mutex_unlock(&file->mutex); 998 999 if (!already_dead) 1000 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) 1001 if (file->agent[i]) 1002 ib_unregister_mad_agent(file->agent[i]); 1003 1004 mutex_unlock(&file->port->file_mutex); 1005 1006 kfree(file); 1007 kobject_put(&dev->kobj); 1008 1009 return 0; 1010} 1011 1012static const struct file_operations umad_fops = { 1013 .owner = THIS_MODULE, 1014 .read = ib_umad_read, 1015 .write = ib_umad_write, 1016 .poll = ib_umad_poll, 1017 .unlocked_ioctl = ib_umad_ioctl, 1018#ifdef CONFIG_COMPAT 1019 .compat_ioctl = ib_umad_compat_ioctl, 1020#endif 1021 .open = ib_umad_open, 1022 .release = ib_umad_close, 1023 .llseek = no_llseek, 1024}; 1025 1026static int ib_umad_sm_open(struct inode *inode, struct file *filp) 1027{ 1028 struct ib_umad_port *port; 1029 struct ib_port_modify props = { 1030 .set_port_cap_mask = IB_PORT_SM 1031 }; 1032 int ret; 1033 1034 port = container_of(inode->i_cdev->si_drv1, struct ib_umad_port, sm_cdev); 1035 1036 if (filp->f_flags & O_NONBLOCK) { 1037 if (down_trylock(&port->sm_sem)) { 1038 ret = -EAGAIN; 1039 goto fail; 1040 } 1041 } else { 1042 if (down_interruptible(&port->sm_sem)) { 1043 ret = -ERESTARTSYS; 1044 goto fail; 1045 } 1046 } 1047 1048 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1049 if (ret) 1050 goto err_up_sem; 1051 1052 filp->private_data = port; 1053 1054 ret = nonseekable_open(inode, filp); 1055 if (ret) 1056 goto err_clr_sm_cap; 1057 1058 kobject_get(&port->umad_dev->kobj); 1059 1060 return 0; 1061 1062err_clr_sm_cap: 1063 swap(props.set_port_cap_mask, props.clr_port_cap_mask); 1064 ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1065 1066err_up_sem: 1067 up(&port->sm_sem); 1068 1069fail: 1070 return ret; 1071} 1072 1073static int ib_umad_sm_close(struct inode *inode, struct file *filp) 1074{ 1075 struct ib_umad_port *port = filp->private_data; 1076 struct ib_port_modify props = { 1077 .clr_port_cap_mask = IB_PORT_SM 1078 }; 1079 int ret = 0; 1080 1081 mutex_lock(&port->file_mutex); 1082 if (port->ib_dev) 1083 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1084 mutex_unlock(&port->file_mutex); 1085 1086 up(&port->sm_sem); 1087 1088 kobject_put(&port->umad_dev->kobj); 1089 1090 return ret; 1091} 1092 1093static const struct file_operations umad_sm_fops = { 1094 .owner = THIS_MODULE, 1095 .open = ib_umad_sm_open, 1096 .release = ib_umad_sm_close, 1097 .llseek = no_llseek, 1098}; 1099 1100static struct ib_client umad_client = { 1101 .name = "umad", 1102 .add = ib_umad_add_one, 1103 .remove = ib_umad_remove_one 1104}; 1105 1106static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1107 char *buf) 1108{ 1109 struct ib_umad_port *port = dev_get_drvdata(dev); 1110 1111 if (!port) 1112 return -ENODEV; 1113 1114 return sprintf(buf, "%s\n", port->ib_dev->name); 1115} 1116static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1117 1118static ssize_t show_port(struct device *dev, struct device_attribute *attr, 1119 char *buf) 1120{ 1121 struct ib_umad_port *port = dev_get_drvdata(dev); 1122 1123 if (!port) 1124 return -ENODEV; 1125 1126 return sprintf(buf, "%d\n", port->port_num); 1127} 1128static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1129 1130static CLASS_ATTR_STRING(abi_version, S_IRUGO, 1131 __stringify(IB_USER_MAD_ABI_VERSION)); 1132 1133static dev_t overflow_maj; 1134static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); 1135static int find_overflow_devnum(struct ib_device *device) 1136{ 1137 int ret; 1138 1139 if (!overflow_maj) { 1140 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, 1141 "infiniband_mad"); 1142 if (ret) { 1143 dev_err(&device->dev, 1144 "couldn't register dynamic device number\n"); 1145 return ret; 1146 } 1147 } 1148 1149 ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS); 1150 if (ret >= IB_UMAD_MAX_PORTS) 1151 return -1; 1152 1153 return ret; 1154} 1155 1156static int ib_umad_init_port(struct ib_device *device, int port_num, 1157 struct ib_umad_device *umad_dev, 1158 struct ib_umad_port *port) 1159{ 1160 int devnum; 1161 dev_t base; 1162 1163 spin_lock(&port_lock); 1164 devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); 1165 if (devnum >= IB_UMAD_MAX_PORTS) { 1166 spin_unlock(&port_lock); 1167 devnum = find_overflow_devnum(device); 1168 if (devnum < 0) 1169 return -1; 1170 1171 spin_lock(&port_lock); 1172 port->dev_num = devnum + IB_UMAD_MAX_PORTS; 1173 base = devnum + overflow_maj; 1174 set_bit(devnum, overflow_map); 1175 } else { 1176 port->dev_num = devnum; 1177 base = devnum + base_dev; 1178 set_bit(devnum, dev_map); 1179 } 1180 spin_unlock(&port_lock); 1181 1182 port->ib_dev = device; 1183 port->port_num = port_num; 1184 sema_init(&port->sm_sem, 1); 1185 mutex_init(&port->file_mutex); 1186 INIT_LIST_HEAD(&port->file_list); 1187 1188 cdev_init(&port->cdev, &umad_fops); 1189 port->cdev.owner = THIS_MODULE; 1190 port->cdev.kobj.parent = &umad_dev->kobj; 1191 kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); 1192 if (cdev_add(&port->cdev, base, 1)) 1193 goto err_cdev; 1194 1195 port->dev = device_create(umad_class, device->dma_device, 1196 port->cdev.dev, port, 1197 "umad%d", port->dev_num); 1198 if (IS_ERR(port->dev)) 1199 goto err_cdev; 1200 1201 if (device_create_file(port->dev, &dev_attr_ibdev)) 1202 goto err_dev; 1203 if (device_create_file(port->dev, &dev_attr_port)) 1204 goto err_dev; 1205 1206 base += IB_UMAD_MAX_PORTS; 1207 cdev_init(&port->sm_cdev, &umad_sm_fops); 1208 port->sm_cdev.owner = THIS_MODULE; 1209 port->sm_cdev.kobj.parent = &umad_dev->kobj; 1210 kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); 1211 if (cdev_add(&port->sm_cdev, base, 1)) 1212 goto err_sm_cdev; 1213 1214 port->sm_dev = device_create(umad_class, device->dma_device, 1215 port->sm_cdev.dev, port, 1216 "issm%d", port->dev_num); 1217 if (IS_ERR(port->sm_dev)) 1218 goto err_sm_cdev; 1219 1220 if (device_create_file(port->sm_dev, &dev_attr_ibdev)) 1221 goto err_sm_dev; 1222 if (device_create_file(port->sm_dev, &dev_attr_port)) 1223 goto err_sm_dev; 1224 1225 return 0; 1226 1227err_sm_dev: 1228 device_destroy(umad_class, port->sm_cdev.dev); 1229 1230err_sm_cdev: 1231 cdev_del(&port->sm_cdev); 1232 1233err_dev: 1234 device_destroy(umad_class, port->cdev.dev); 1235 1236err_cdev: 1237 cdev_del(&port->cdev); 1238 if (port->dev_num < IB_UMAD_MAX_PORTS) 1239 clear_bit(devnum, dev_map); 1240 else 1241 clear_bit(devnum, overflow_map); 1242 1243 return -1; 1244} 1245 1246static void ib_umad_kill_port(struct ib_umad_port *port) 1247{ 1248 struct ib_umad_file *file; 1249 int id; 1250 1251 dev_set_drvdata(port->dev, NULL); 1252 dev_set_drvdata(port->sm_dev, NULL); 1253 1254 device_destroy(umad_class, port->cdev.dev); 1255 device_destroy(umad_class, port->sm_cdev.dev); 1256 1257 cdev_del(&port->cdev); 1258 cdev_del(&port->sm_cdev); 1259 1260 mutex_lock(&port->file_mutex); 1261 1262 port->ib_dev = NULL; 1263 1264 list_for_each_entry(file, &port->file_list, port_list) { 1265 mutex_lock(&file->mutex); 1266 file->agents_dead = 1; 1267 mutex_unlock(&file->mutex); 1268 1269 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) 1270 if (file->agent[id]) 1271 ib_unregister_mad_agent(file->agent[id]); 1272 } 1273 1274 mutex_unlock(&port->file_mutex); 1275 1276 if (port->dev_num < IB_UMAD_MAX_PORTS) 1277 clear_bit(port->dev_num, dev_map); 1278 else 1279 clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map); 1280} 1281 1282static void ib_umad_add_one(struct ib_device *device) 1283{ 1284 struct ib_umad_device *umad_dev; 1285 int s, e, i; 1286 int count = 0; 1287 1288 s = rdma_start_port(device); 1289 e = rdma_end_port(device); 1290 1291 umad_dev = kzalloc(sizeof *umad_dev + 1292 (e - s + 1) * sizeof (struct ib_umad_port), 1293 GFP_KERNEL); 1294 if (!umad_dev) 1295 return; 1296 1297 kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); 1298 1299 for (i = s; i <= e; ++i) { 1300 if (!rdma_cap_ib_mad(device, i)) 1301 continue; 1302 1303 umad_dev->port[i - s].umad_dev = umad_dev; 1304 1305 if (ib_umad_init_port(device, i, umad_dev, 1306 &umad_dev->port[i - s])) 1307 goto err; 1308 1309 count++; 1310 } 1311 1312 if (!count) 1313 goto free; 1314 1315 ib_set_client_data(device, &umad_client, umad_dev); 1316 1317 return; 1318 1319err: 1320 while (--i >= s) { 1321 if (!rdma_cap_ib_mad(device, i)) 1322 continue; 1323 1324 ib_umad_kill_port(&umad_dev->port[i - s]); 1325 } 1326free: 1327 kobject_put(&umad_dev->kobj); 1328} 1329 1330static void ib_umad_remove_one(struct ib_device *device, void *client_data) 1331{ 1332 struct ib_umad_device *umad_dev = client_data; 1333 int i; 1334 1335 if (!umad_dev) 1336 return; 1337 1338 for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) { 1339 if (rdma_cap_ib_mad(device, i + rdma_start_port(device))) 1340 ib_umad_kill_port(&umad_dev->port[i]); 1341 } 1342 1343 kobject_put(&umad_dev->kobj); 1344} 1345 1346static char *umad_devnode(struct device *dev, umode_t *mode) 1347{ 1348 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 1349} 1350 1351static int __init ib_umad_init(void) 1352{ 1353 int ret; 1354 1355 ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, 1356 "infiniband_mad"); 1357 if (ret) { 1358 pr_err("couldn't register device number\n"); 1359 goto out; 1360 } 1361 1362 umad_class = class_create(THIS_MODULE, "infiniband_mad"); 1363 if (IS_ERR(umad_class)) { 1364 ret = PTR_ERR(umad_class); 1365 pr_err("couldn't create class infiniband_mad\n"); 1366 goto out_chrdev; 1367 } 1368 1369 umad_class->devnode = umad_devnode; 1370 1371 ret = class_create_file(umad_class, &class_attr_abi_version.attr); 1372 if (ret) { 1373 pr_err("couldn't create abi_version attribute\n"); 1374 goto out_class; 1375 } 1376 1377 ret = ib_register_client(&umad_client); 1378 if (ret) { 1379 pr_err("couldn't register ib_umad client\n"); 1380 goto out_class; 1381 } 1382 1383 return 0; 1384 1385out_class: 1386 class_destroy(umad_class); 1387 1388out_chrdev: 1389 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1390 1391out: 1392 return ret; 1393} 1394 1395static void __exit ib_umad_cleanup(void) 1396{ 1397 ib_unregister_client(&umad_client); 1398 class_destroy(umad_class); 1399 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1400 if (overflow_maj) 1401 unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2); 1402} 1403 1404module_init_order(ib_umad_init, SI_ORDER_THIRD); 1405module_exit(ib_umad_cleanup); 1406