ib_uverbs_cmd.c revision 331769
1/* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 PathScale, Inc. All rights reserved. 5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#define LINUXKPI_PARAM_PREFIX ibcore_ 37 38#include <linux/file.h> 39#include <linux/fs.h> 40#include <linux/slab.h> 41#include <linux/sched.h> 42#include <linux/rbtree.h> 43 44#include <asm/uaccess.h> 45 46#include "uverbs.h" 47#include "core_priv.h" 48 49#include <sys/priv.h> 50 51struct uverbs_lock_class { 52 char name[16]; 53}; 54 55static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; 56static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; 57static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" }; 58static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; 59static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; 60static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; 61static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; 62static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; 63static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" }; 64static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" }; 65static struct uverbs_lock_class rwq_ind_table_lock_class = { .name = "IND_TBL-uobj" }; 66 67/* 68 * The ib_uobject locking scheme is as follows: 69 * 70 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it 71 * needs to be held during all idr write operations. When an object is 72 * looked up, a reference must be taken on the object's kref before 73 * dropping this lock. For read operations, the rcu_read_lock() 74 * and rcu_write_lock() but similarly the kref reference is grabbed 75 * before the rcu_read_unlock(). 76 * 77 * - Each object also has an rwsem. This rwsem must be held for 78 * reading while an operation that uses the object is performed. 79 * For example, while registering an MR, the associated PD's 80 * uobject.mutex must be held for reading. The rwsem must be held 81 * for writing while initializing or destroying an object. 82 * 83 * - In addition, each object has a "live" flag. If this flag is not 84 * set, then lookups of the object will fail even if it is found in 85 * the idr. This handles a reader that blocks and does not acquire 86 * the rwsem until after the object is destroyed. The destroy 87 * operation will set the live flag to 0 and then drop the rwsem; 88 * this will allow the reader to acquire the rwsem, see that the 89 * live flag is 0, and then drop the rwsem and its reference to 90 * object. The underlying storage will not be freed until the last 91 * reference to the object is dropped. 92 */ 93 94static void init_uobj(struct ib_uobject *uobj, u64 user_handle, 95 struct ib_ucontext *context, struct uverbs_lock_class *c) 96{ 97 uobj->user_handle = user_handle; 98 uobj->context = context; 99 kref_init(&uobj->ref); 100 init_rwsem(&uobj->mutex); 101 uobj->live = 0; 102} 103 104static void release_uobj(struct kref *kref) 105{ 106 kfree_rcu(container_of(kref, struct ib_uobject, ref), rcu); 107} 108 109static void put_uobj(struct ib_uobject *uobj) 110{ 111 kref_put(&uobj->ref, release_uobj); 112} 113 114static void put_uobj_read(struct ib_uobject *uobj) 115{ 116 up_read(&uobj->mutex); 117 put_uobj(uobj); 118} 119 120static void put_uobj_write(struct ib_uobject *uobj) 121{ 122 up_write(&uobj->mutex); 123 put_uobj(uobj); 124} 125 126static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj) 127{ 128 int ret; 129 130 idr_preload(GFP_KERNEL); 131 spin_lock(&ib_uverbs_idr_lock); 132 133 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT); 134 if (ret >= 0) 135 uobj->id = ret; 136 137 spin_unlock(&ib_uverbs_idr_lock); 138 idr_preload_end(); 139 140 return ret < 0 ? ret : 0; 141} 142 143void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj) 144{ 145 spin_lock(&ib_uverbs_idr_lock); 146 idr_remove(idr, uobj->id); 147 spin_unlock(&ib_uverbs_idr_lock); 148} 149 150static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, 151 struct ib_ucontext *context) 152{ 153 struct ib_uobject *uobj; 154 155 rcu_read_lock(); 156 uobj = idr_find(idr, id); 157 if (uobj) { 158 if (uobj->context == context) 159 kref_get(&uobj->ref); 160 else 161 uobj = NULL; 162 } 163 rcu_read_unlock(); 164 165 return uobj; 166} 167 168static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, 169 struct ib_ucontext *context, int nested) 170{ 171 struct ib_uobject *uobj; 172 173 uobj = __idr_get_uobj(idr, id, context); 174 if (!uobj) 175 return NULL; 176 177 if (nested) 178 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); 179 else 180 down_read(&uobj->mutex); 181 if (!uobj->live) { 182 put_uobj_read(uobj); 183 return NULL; 184 } 185 186 return uobj; 187} 188 189static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, 190 struct ib_ucontext *context) 191{ 192 struct ib_uobject *uobj; 193 194 uobj = __idr_get_uobj(idr, id, context); 195 if (!uobj) 196 return NULL; 197 198 down_write(&uobj->mutex); 199 if (!uobj->live) { 200 put_uobj_write(uobj); 201 return NULL; 202 } 203 204 return uobj; 205} 206 207static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, 208 int nested) 209{ 210 struct ib_uobject *uobj; 211 212 uobj = idr_read_uobj(idr, id, context, nested); 213 return uobj ? uobj->object : NULL; 214} 215 216static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) 217{ 218 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); 219} 220 221static void put_pd_read(struct ib_pd *pd) 222{ 223 put_uobj_read(pd->uobject); 224} 225 226static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) 227{ 228 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); 229} 230 231static void put_cq_read(struct ib_cq *cq) 232{ 233 put_uobj_read(cq->uobject); 234} 235 236static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) 237{ 238 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); 239} 240 241static void put_ah_read(struct ib_ah *ah) 242{ 243 put_uobj_read(ah->uobject); 244} 245 246static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) 247{ 248 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); 249} 250 251static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context) 252{ 253 return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0); 254} 255 256static void put_wq_read(struct ib_wq *wq) 257{ 258 put_uobj_read(wq->uobject); 259} 260 261static struct ib_rwq_ind_table *idr_read_rwq_indirection_table(int ind_table_handle, 262 struct ib_ucontext *context) 263{ 264 return idr_read_obj(&ib_uverbs_rwq_ind_tbl_idr, ind_table_handle, context, 0); 265} 266 267static void put_rwq_indirection_table_read(struct ib_rwq_ind_table *ind_table) 268{ 269 put_uobj_read(ind_table->uobject); 270} 271 272static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) 273{ 274 struct ib_uobject *uobj; 275 276 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); 277 return uobj ? uobj->object : NULL; 278} 279 280static void put_qp_read(struct ib_qp *qp) 281{ 282 put_uobj_read(qp->uobject); 283} 284 285static void put_qp_write(struct ib_qp *qp) 286{ 287 put_uobj_write(qp->uobject); 288} 289 290static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) 291{ 292 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); 293} 294 295static void put_srq_read(struct ib_srq *srq) 296{ 297 put_uobj_read(srq->uobject); 298} 299 300static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, 301 struct ib_uobject **uobj) 302{ 303 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); 304 return *uobj ? (*uobj)->object : NULL; 305} 306 307static void put_xrcd_read(struct ib_uobject *uobj) 308{ 309 put_uobj_read(uobj); 310} 311 312ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, 313 struct ib_device *ib_dev, 314 const char __user *buf, 315 int in_len, int out_len) 316{ 317 struct ib_uverbs_get_context cmd; 318 struct ib_uverbs_get_context_resp resp; 319 struct ib_udata udata; 320 struct ib_ucontext *ucontext; 321 struct file *filp; 322 int ret; 323 324 if (out_len < sizeof resp) 325 return -ENOSPC; 326 327 if (copy_from_user(&cmd, buf, sizeof cmd)) 328 return -EFAULT; 329 330 mutex_lock(&file->mutex); 331 332 if (file->ucontext) { 333 ret = -EINVAL; 334 goto err; 335 } 336 337 INIT_UDATA(&udata, buf + sizeof cmd, 338 (unsigned long) cmd.response + sizeof resp, 339 in_len - sizeof cmd, out_len - sizeof resp); 340 341 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); 342 if (IS_ERR(ucontext)) { 343 ret = PTR_ERR(ucontext); 344 goto err; 345 } 346 347 ucontext->device = ib_dev; 348 INIT_LIST_HEAD(&ucontext->pd_list); 349 INIT_LIST_HEAD(&ucontext->mr_list); 350 INIT_LIST_HEAD(&ucontext->mw_list); 351 INIT_LIST_HEAD(&ucontext->cq_list); 352 INIT_LIST_HEAD(&ucontext->qp_list); 353 INIT_LIST_HEAD(&ucontext->srq_list); 354 INIT_LIST_HEAD(&ucontext->ah_list); 355 INIT_LIST_HEAD(&ucontext->wq_list); 356 INIT_LIST_HEAD(&ucontext->rwq_ind_tbl_list); 357 INIT_LIST_HEAD(&ucontext->xrcd_list); 358 INIT_LIST_HEAD(&ucontext->rule_list); 359 rcu_read_lock(); 360 ucontext->tgid = get_pid(task_pid_group_leader(current)); 361 rcu_read_unlock(); 362 ucontext->closing = 0; 363 364#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 365 ucontext->umem_tree = RB_ROOT; 366 init_rwsem(&ucontext->umem_rwsem); 367 ucontext->odp_mrs_count = 0; 368 INIT_LIST_HEAD(&ucontext->no_private_counters); 369 370 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 371 ucontext->invalidate_range = NULL; 372 373#endif 374 375 resp.num_comp_vectors = file->device->num_comp_vectors; 376 377 ret = get_unused_fd_flags(O_CLOEXEC); 378 if (ret < 0) 379 goto err_free; 380 resp.async_fd = ret; 381 382 filp = ib_uverbs_alloc_event_file(file, ib_dev, 1); 383 if (IS_ERR(filp)) { 384 ret = PTR_ERR(filp); 385 goto err_fd; 386 } 387 388 if (copy_to_user((void __user *) (unsigned long) cmd.response, 389 &resp, sizeof resp)) { 390 ret = -EFAULT; 391 goto err_file; 392 } 393 394 file->ucontext = ucontext; 395 396 fd_install(resp.async_fd, filp); 397 398 mutex_unlock(&file->mutex); 399 400 return in_len; 401 402err_file: 403 ib_uverbs_free_async_event_file(file); 404 fput(filp); 405 406err_fd: 407 put_unused_fd(resp.async_fd); 408 409err_free: 410 put_pid(ucontext->tgid); 411 ib_dev->dealloc_ucontext(ucontext); 412 413err: 414 mutex_unlock(&file->mutex); 415 return ret; 416} 417 418static void copy_query_dev_fields(struct ib_uverbs_file *file, 419 struct ib_device *ib_dev, 420 struct ib_uverbs_query_device_resp *resp, 421 struct ib_device_attr *attr) 422{ 423 resp->fw_ver = attr->fw_ver; 424 resp->node_guid = ib_dev->node_guid; 425 resp->sys_image_guid = attr->sys_image_guid; 426 resp->max_mr_size = attr->max_mr_size; 427 resp->page_size_cap = attr->page_size_cap; 428 resp->vendor_id = attr->vendor_id; 429 resp->vendor_part_id = attr->vendor_part_id; 430 resp->hw_ver = attr->hw_ver; 431 resp->max_qp = attr->max_qp; 432 resp->max_qp_wr = attr->max_qp_wr; 433 resp->device_cap_flags = (u32)(attr->device_cap_flags); 434 resp->max_sge = attr->max_sge; 435 resp->max_sge_rd = attr->max_sge_rd; 436 resp->max_cq = attr->max_cq; 437 resp->max_cqe = attr->max_cqe; 438 resp->max_mr = attr->max_mr; 439 resp->max_pd = attr->max_pd; 440 resp->max_qp_rd_atom = attr->max_qp_rd_atom; 441 resp->max_ee_rd_atom = attr->max_ee_rd_atom; 442 resp->max_res_rd_atom = attr->max_res_rd_atom; 443 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 444 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 445 resp->atomic_cap = attr->atomic_cap; 446 resp->max_ee = attr->max_ee; 447 resp->max_rdd = attr->max_rdd; 448 resp->max_mw = attr->max_mw; 449 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 450 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 451 resp->max_mcast_grp = attr->max_mcast_grp; 452 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 453 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 454 resp->max_ah = attr->max_ah; 455 resp->max_fmr = attr->max_fmr; 456 resp->max_map_per_fmr = attr->max_map_per_fmr; 457 resp->max_srq = attr->max_srq; 458 resp->max_srq_wr = attr->max_srq_wr; 459 resp->max_srq_sge = attr->max_srq_sge; 460 resp->max_pkeys = attr->max_pkeys; 461 resp->local_ca_ack_delay = attr->local_ca_ack_delay; 462 resp->phys_port_cnt = ib_dev->phys_port_cnt; 463} 464 465ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 466 struct ib_device *ib_dev, 467 const char __user *buf, 468 int in_len, int out_len) 469{ 470 struct ib_uverbs_query_device cmd; 471 struct ib_uverbs_query_device_resp resp; 472 473 if (out_len < sizeof resp) 474 return -ENOSPC; 475 476 if (copy_from_user(&cmd, buf, sizeof cmd)) 477 return -EFAULT; 478 479 memset(&resp, 0, sizeof resp); 480 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs); 481 482 if (copy_to_user((void __user *) (unsigned long) cmd.response, 483 &resp, sizeof resp)) 484 return -EFAULT; 485 486 return in_len; 487} 488 489ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, 490 struct ib_device *ib_dev, 491 const char __user *buf, 492 int in_len, int out_len) 493{ 494 struct ib_uverbs_query_port cmd; 495 struct ib_uverbs_query_port_resp resp; 496 struct ib_port_attr attr; 497 int ret; 498 499 if (out_len < sizeof resp) 500 return -ENOSPC; 501 502 if (copy_from_user(&cmd, buf, sizeof cmd)) 503 return -EFAULT; 504 505 ret = ib_query_port(ib_dev, cmd.port_num, &attr); 506 if (ret) 507 return ret; 508 509 memset(&resp, 0, sizeof resp); 510 511 resp.state = attr.state; 512 resp.max_mtu = attr.max_mtu; 513 resp.active_mtu = attr.active_mtu; 514 resp.gid_tbl_len = attr.gid_tbl_len; 515 resp.port_cap_flags = attr.port_cap_flags; 516 resp.max_msg_sz = attr.max_msg_sz; 517 resp.bad_pkey_cntr = attr.bad_pkey_cntr; 518 resp.qkey_viol_cntr = attr.qkey_viol_cntr; 519 resp.pkey_tbl_len = attr.pkey_tbl_len; 520 resp.lid = attr.lid; 521 resp.sm_lid = attr.sm_lid; 522 resp.lmc = attr.lmc; 523 resp.max_vl_num = attr.max_vl_num; 524 resp.sm_sl = attr.sm_sl; 525 resp.subnet_timeout = attr.subnet_timeout; 526 resp.init_type_reply = attr.init_type_reply; 527 resp.active_width = attr.active_width; 528 resp.active_speed = attr.active_speed; 529 resp.phys_state = attr.phys_state; 530 resp.link_layer = rdma_port_get_link_layer(ib_dev, 531 cmd.port_num); 532 533 if (copy_to_user((void __user *) (unsigned long) cmd.response, 534 &resp, sizeof resp)) 535 return -EFAULT; 536 537 return in_len; 538} 539 540ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, 541 struct ib_device *ib_dev, 542 const char __user *buf, 543 int in_len, int out_len) 544{ 545 struct ib_uverbs_alloc_pd cmd; 546 struct ib_uverbs_alloc_pd_resp resp; 547 struct ib_udata udata; 548 struct ib_uobject *uobj; 549 struct ib_pd *pd; 550 int ret; 551 552 if (out_len < sizeof resp) 553 return -ENOSPC; 554 555 if (copy_from_user(&cmd, buf, sizeof cmd)) 556 return -EFAULT; 557 558 INIT_UDATA(&udata, buf + sizeof cmd, 559 (unsigned long) cmd.response + sizeof resp, 560 in_len - sizeof cmd, out_len - sizeof resp); 561 562 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 563 if (!uobj) 564 return -ENOMEM; 565 566 init_uobj(uobj, 0, file->ucontext, &pd_lock_class); 567 down_write(&uobj->mutex); 568 569 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata); 570 if (IS_ERR(pd)) { 571 ret = PTR_ERR(pd); 572 goto err; 573 } 574 575 pd->device = ib_dev; 576 pd->uobject = uobj; 577 pd->__internal_mr = NULL; 578 atomic_set(&pd->usecnt, 0); 579 580 uobj->object = pd; 581 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj); 582 if (ret) 583 goto err_idr; 584 585 memset(&resp, 0, sizeof resp); 586 resp.pd_handle = uobj->id; 587 588 if (copy_to_user((void __user *) (unsigned long) cmd.response, 589 &resp, sizeof resp)) { 590 ret = -EFAULT; 591 goto err_copy; 592 } 593 594 mutex_lock(&file->mutex); 595 list_add_tail(&uobj->list, &file->ucontext->pd_list); 596 mutex_unlock(&file->mutex); 597 598 uobj->live = 1; 599 600 up_write(&uobj->mutex); 601 602 return in_len; 603 604err_copy: 605 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 606 607err_idr: 608 ib_dealloc_pd(pd); 609 610err: 611 put_uobj_write(uobj); 612 return ret; 613} 614 615ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, 616 struct ib_device *ib_dev, 617 const char __user *buf, 618 int in_len, int out_len) 619{ 620 struct ib_uverbs_dealloc_pd cmd; 621 struct ib_uobject *uobj; 622 struct ib_pd *pd; 623 int ret; 624 625 if (copy_from_user(&cmd, buf, sizeof cmd)) 626 return -EFAULT; 627 628 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext); 629 if (!uobj) 630 return -EINVAL; 631 pd = uobj->object; 632 633 if (atomic_read(&pd->usecnt)) { 634 ret = -EBUSY; 635 goto err_put; 636 } 637 638 ret = pd->device->dealloc_pd(uobj->object); 639 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); 640 if (ret) 641 goto err_put; 642 643 uobj->live = 0; 644 put_uobj_write(uobj); 645 646 idr_remove_uobj(&ib_uverbs_pd_idr, uobj); 647 648 mutex_lock(&file->mutex); 649 list_del(&uobj->list); 650 mutex_unlock(&file->mutex); 651 652 put_uobj(uobj); 653 654 return in_len; 655 656err_put: 657 put_uobj_write(uobj); 658 return ret; 659} 660 661struct xrcd_table_entry { 662 struct rb_node node; 663 struct ib_xrcd *xrcd; 664 struct inode *inode; 665}; 666 667static int xrcd_table_insert(struct ib_uverbs_device *dev, 668 struct inode *inode, 669 struct ib_xrcd *xrcd) 670{ 671 struct xrcd_table_entry *entry, *scan; 672 struct rb_node **p = &dev->xrcd_tree.rb_node; 673 struct rb_node *parent = NULL; 674 675 entry = kmalloc(sizeof *entry, GFP_KERNEL); 676 if (!entry) 677 return -ENOMEM; 678 679 entry->xrcd = xrcd; 680 entry->inode = inode; 681 682 while (*p) { 683 parent = *p; 684 scan = rb_entry(parent, struct xrcd_table_entry, node); 685 686 if (inode < scan->inode) { 687 p = &(*p)->rb_left; 688 } else if (inode > scan->inode) { 689 p = &(*p)->rb_right; 690 } else { 691 kfree(entry); 692 return -EEXIST; 693 } 694 } 695 696 rb_link_node(&entry->node, parent, p); 697 rb_insert_color(&entry->node, &dev->xrcd_tree); 698 igrab(inode); 699 return 0; 700} 701 702static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, 703 struct inode *inode) 704{ 705 struct xrcd_table_entry *entry; 706 struct rb_node *p = dev->xrcd_tree.rb_node; 707 708 while (p) { 709 entry = rb_entry(p, struct xrcd_table_entry, node); 710 711 if (inode < entry->inode) 712 p = p->rb_left; 713 else if (inode > entry->inode) 714 p = p->rb_right; 715 else 716 return entry; 717 } 718 719 return NULL; 720} 721 722static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) 723{ 724 struct xrcd_table_entry *entry; 725 726 entry = xrcd_table_search(dev, inode); 727 if (!entry) 728 return NULL; 729 730 return entry->xrcd; 731} 732 733static void xrcd_table_delete(struct ib_uverbs_device *dev, 734 struct inode *inode) 735{ 736 struct xrcd_table_entry *entry; 737 738 entry = xrcd_table_search(dev, inode); 739 if (entry) { 740 iput(inode); 741 rb_erase(&entry->node, &dev->xrcd_tree); 742 kfree(entry); 743 } 744} 745 746ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, 747 struct ib_device *ib_dev, 748 const char __user *buf, int in_len, 749 int out_len) 750{ 751 struct ib_uverbs_open_xrcd cmd; 752 struct ib_uverbs_open_xrcd_resp resp; 753 struct ib_udata udata; 754 struct ib_uxrcd_object *obj; 755 struct ib_xrcd *xrcd = NULL; 756 struct fd f = {NULL}; 757 struct inode *inode = NULL; 758 int ret = 0; 759 int new_xrcd = 0; 760 761 if (out_len < sizeof resp) 762 return -ENOSPC; 763 764 if (copy_from_user(&cmd, buf, sizeof cmd)) 765 return -EFAULT; 766 767 INIT_UDATA(&udata, buf + sizeof cmd, 768 (unsigned long) cmd.response + sizeof resp, 769 in_len - sizeof cmd, out_len - sizeof resp); 770 771 mutex_lock(&file->device->xrcd_tree_mutex); 772 773 if (cmd.fd != -1) { 774 /* search for file descriptor */ 775 f = fdget(cmd.fd); 776 if (!f.file) { 777 ret = -EBADF; 778 goto err_tree_mutex_unlock; 779 } 780 781 inode = f.file->f_dentry->d_inode; 782 xrcd = find_xrcd(file->device, inode); 783 if (!xrcd && !(cmd.oflags & O_CREAT)) { 784 /* no file descriptor. Need CREATE flag */ 785 ret = -EAGAIN; 786 goto err_tree_mutex_unlock; 787 } 788 789 if (xrcd && cmd.oflags & O_EXCL) { 790 ret = -EINVAL; 791 goto err_tree_mutex_unlock; 792 } 793 } 794 795 obj = kmalloc(sizeof *obj, GFP_KERNEL); 796 if (!obj) { 797 ret = -ENOMEM; 798 goto err_tree_mutex_unlock; 799 } 800 801 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); 802 803 down_write(&obj->uobject.mutex); 804 805 if (!xrcd) { 806 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata); 807 if (IS_ERR(xrcd)) { 808 ret = PTR_ERR(xrcd); 809 goto err; 810 } 811 812 xrcd->inode = inode; 813 xrcd->device = ib_dev; 814 atomic_set(&xrcd->usecnt, 0); 815 mutex_init(&xrcd->tgt_qp_mutex); 816 INIT_LIST_HEAD(&xrcd->tgt_qp_list); 817 new_xrcd = 1; 818 } 819 820 atomic_set(&obj->refcnt, 0); 821 obj->uobject.object = xrcd; 822 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 823 if (ret) 824 goto err_idr; 825 826 memset(&resp, 0, sizeof resp); 827 resp.xrcd_handle = obj->uobject.id; 828 829 if (inode) { 830 if (new_xrcd) { 831 /* create new inode/xrcd table entry */ 832 ret = xrcd_table_insert(file->device, inode, xrcd); 833 if (ret) 834 goto err_insert_xrcd; 835 } 836 atomic_inc(&xrcd->usecnt); 837 } 838 839 if (copy_to_user((void __user *) (unsigned long) cmd.response, 840 &resp, sizeof resp)) { 841 ret = -EFAULT; 842 goto err_copy; 843 } 844 845 if (f.file) 846 fdput(f); 847 848 mutex_lock(&file->mutex); 849 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); 850 mutex_unlock(&file->mutex); 851 852 obj->uobject.live = 1; 853 up_write(&obj->uobject.mutex); 854 855 mutex_unlock(&file->device->xrcd_tree_mutex); 856 return in_len; 857 858err_copy: 859 if (inode) { 860 if (new_xrcd) 861 xrcd_table_delete(file->device, inode); 862 atomic_dec(&xrcd->usecnt); 863 } 864 865err_insert_xrcd: 866 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); 867 868err_idr: 869 ib_dealloc_xrcd(xrcd); 870 871err: 872 put_uobj_write(&obj->uobject); 873 874err_tree_mutex_unlock: 875 if (f.file) 876 fdput(f); 877 878 mutex_unlock(&file->device->xrcd_tree_mutex); 879 880 return ret; 881} 882 883ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, 884 struct ib_device *ib_dev, 885 const char __user *buf, int in_len, 886 int out_len) 887{ 888 struct ib_uverbs_close_xrcd cmd; 889 struct ib_uobject *uobj; 890 struct ib_xrcd *xrcd = NULL; 891 struct inode *inode = NULL; 892 struct ib_uxrcd_object *obj; 893 int live; 894 int ret = 0; 895 896 if (copy_from_user(&cmd, buf, sizeof cmd)) 897 return -EFAULT; 898 899 mutex_lock(&file->device->xrcd_tree_mutex); 900 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); 901 if (!uobj) { 902 ret = -EINVAL; 903 goto out; 904 } 905 906 xrcd = uobj->object; 907 inode = xrcd->inode; 908 obj = container_of(uobj, struct ib_uxrcd_object, uobject); 909 if (atomic_read(&obj->refcnt)) { 910 put_uobj_write(uobj); 911 ret = -EBUSY; 912 goto out; 913 } 914 915 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { 916 ret = ib_dealloc_xrcd(uobj->object); 917 if (!ret) 918 uobj->live = 0; 919 } 920 921 live = uobj->live; 922 if (inode && ret) 923 atomic_inc(&xrcd->usecnt); 924 925 put_uobj_write(uobj); 926 927 if (ret) 928 goto out; 929 930 if (inode && !live) 931 xrcd_table_delete(file->device, inode); 932 933 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); 934 mutex_lock(&file->mutex); 935 list_del(&uobj->list); 936 mutex_unlock(&file->mutex); 937 938 put_uobj(uobj); 939 ret = in_len; 940 941out: 942 mutex_unlock(&file->device->xrcd_tree_mutex); 943 return ret; 944} 945 946void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, 947 struct ib_xrcd *xrcd) 948{ 949 struct inode *inode; 950 951 inode = xrcd->inode; 952 if (inode && !atomic_dec_and_test(&xrcd->usecnt)) 953 return; 954 955 ib_dealloc_xrcd(xrcd); 956 957 if (inode) 958 xrcd_table_delete(dev, inode); 959} 960 961ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, 962 struct ib_device *ib_dev, 963 const char __user *buf, int in_len, 964 int out_len) 965{ 966 struct ib_uverbs_reg_mr cmd; 967 struct ib_uverbs_reg_mr_resp resp; 968 struct ib_udata udata; 969 struct ib_uobject *uobj; 970 struct ib_pd *pd; 971 struct ib_mr *mr; 972 int ret; 973 974 if (out_len < sizeof resp) 975 return -ENOSPC; 976 977 if (copy_from_user(&cmd, buf, sizeof cmd)) 978 return -EFAULT; 979 980 INIT_UDATA(&udata, buf + sizeof cmd, 981 (unsigned long) cmd.response + sizeof resp, 982 in_len - sizeof cmd, out_len - sizeof resp); 983 984 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) 985 return -EINVAL; 986 987 ret = ib_check_mr_access(cmd.access_flags); 988 if (ret) 989 return ret; 990 991 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 992 if (!uobj) 993 return -ENOMEM; 994 995 init_uobj(uobj, 0, file->ucontext, &mr_lock_class); 996 down_write(&uobj->mutex); 997 998 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 999 if (!pd) { 1000 ret = -EINVAL; 1001 goto err_free; 1002 } 1003 1004 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) { 1005 if (!(pd->device->attrs.device_cap_flags & 1006 IB_DEVICE_ON_DEMAND_PAGING)) { 1007 pr_debug("ODP support not available\n"); 1008 ret = -EINVAL; 1009 goto err_put; 1010 } 1011 } 1012 1013 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, 1014 cmd.access_flags, &udata); 1015 if (IS_ERR(mr)) { 1016 ret = PTR_ERR(mr); 1017 goto err_put; 1018 } 1019 1020 mr->device = pd->device; 1021 mr->pd = pd; 1022 mr->uobject = uobj; 1023 atomic_inc(&pd->usecnt); 1024 1025 uobj->object = mr; 1026 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj); 1027 if (ret) 1028 goto err_unreg; 1029 1030 memset(&resp, 0, sizeof resp); 1031 resp.lkey = mr->lkey; 1032 resp.rkey = mr->rkey; 1033 resp.mr_handle = uobj->id; 1034 1035 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1036 &resp, sizeof resp)) { 1037 ret = -EFAULT; 1038 goto err_copy; 1039 } 1040 1041 put_pd_read(pd); 1042 1043 mutex_lock(&file->mutex); 1044 list_add_tail(&uobj->list, &file->ucontext->mr_list); 1045 mutex_unlock(&file->mutex); 1046 1047 uobj->live = 1; 1048 1049 up_write(&uobj->mutex); 1050 1051 return in_len; 1052 1053err_copy: 1054 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1055 1056err_unreg: 1057 ib_dereg_mr(mr); 1058 1059err_put: 1060 put_pd_read(pd); 1061 1062err_free: 1063 put_uobj_write(uobj); 1064 return ret; 1065} 1066 1067ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, 1068 struct ib_device *ib_dev, 1069 const char __user *buf, int in_len, 1070 int out_len) 1071{ 1072 struct ib_uverbs_rereg_mr cmd; 1073 struct ib_uverbs_rereg_mr_resp resp; 1074 struct ib_udata udata; 1075 struct ib_pd *pd = NULL; 1076 struct ib_mr *mr; 1077 struct ib_pd *old_pd; 1078 int ret; 1079 struct ib_uobject *uobj; 1080 1081 if (out_len < sizeof(resp)) 1082 return -ENOSPC; 1083 1084 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1085 return -EFAULT; 1086 1087 INIT_UDATA(&udata, buf + sizeof(cmd), 1088 (unsigned long) cmd.response + sizeof(resp), 1089 in_len - sizeof(cmd), out_len - sizeof(resp)); 1090 1091 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) 1092 return -EINVAL; 1093 1094 if ((cmd.flags & IB_MR_REREG_TRANS) && 1095 (!cmd.start || !cmd.hca_va || 0 >= cmd.length || 1096 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) 1097 return -EINVAL; 1098 1099 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, 1100 file->ucontext); 1101 1102 if (!uobj) 1103 return -EINVAL; 1104 1105 mr = uobj->object; 1106 1107 if (cmd.flags & IB_MR_REREG_ACCESS) { 1108 ret = ib_check_mr_access(cmd.access_flags); 1109 if (ret) 1110 goto put_uobjs; 1111 } 1112 1113 if (cmd.flags & IB_MR_REREG_PD) { 1114 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1115 if (!pd) { 1116 ret = -EINVAL; 1117 goto put_uobjs; 1118 } 1119 } 1120 1121 old_pd = mr->pd; 1122 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, 1123 cmd.length, cmd.hca_va, 1124 cmd.access_flags, pd, &udata); 1125 if (!ret) { 1126 if (cmd.flags & IB_MR_REREG_PD) { 1127 atomic_inc(&pd->usecnt); 1128 mr->pd = pd; 1129 atomic_dec(&old_pd->usecnt); 1130 } 1131 } else { 1132 goto put_uobj_pd; 1133 } 1134 1135 memset(&resp, 0, sizeof(resp)); 1136 resp.lkey = mr->lkey; 1137 resp.rkey = mr->rkey; 1138 1139 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1140 &resp, sizeof(resp))) 1141 ret = -EFAULT; 1142 else 1143 ret = in_len; 1144 1145put_uobj_pd: 1146 if (cmd.flags & IB_MR_REREG_PD) 1147 put_pd_read(pd); 1148 1149put_uobjs: 1150 1151 put_uobj_write(mr->uobject); 1152 1153 return ret; 1154} 1155 1156ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, 1157 struct ib_device *ib_dev, 1158 const char __user *buf, int in_len, 1159 int out_len) 1160{ 1161 struct ib_uverbs_dereg_mr cmd; 1162 struct ib_mr *mr; 1163 struct ib_uobject *uobj; 1164 int ret = -EINVAL; 1165 1166 if (copy_from_user(&cmd, buf, sizeof cmd)) 1167 return -EFAULT; 1168 1169 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext); 1170 if (!uobj) 1171 return -EINVAL; 1172 1173 mr = uobj->object; 1174 1175 ret = ib_dereg_mr(mr); 1176 if (!ret) 1177 uobj->live = 0; 1178 1179 put_uobj_write(uobj); 1180 1181 if (ret) 1182 return ret; 1183 1184 idr_remove_uobj(&ib_uverbs_mr_idr, uobj); 1185 1186 mutex_lock(&file->mutex); 1187 list_del(&uobj->list); 1188 mutex_unlock(&file->mutex); 1189 1190 put_uobj(uobj); 1191 1192 return in_len; 1193} 1194 1195ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, 1196 struct ib_device *ib_dev, 1197 const char __user *buf, int in_len, 1198 int out_len) 1199{ 1200 struct ib_uverbs_alloc_mw cmd; 1201 struct ib_uverbs_alloc_mw_resp resp; 1202 struct ib_uobject *uobj; 1203 struct ib_pd *pd; 1204 struct ib_mw *mw; 1205 struct ib_udata udata; 1206 int ret; 1207 1208 if (out_len < sizeof(resp)) 1209 return -ENOSPC; 1210 1211 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1212 return -EFAULT; 1213 1214 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 1215 if (!uobj) 1216 return -ENOMEM; 1217 1218 init_uobj(uobj, 0, file->ucontext, &mw_lock_class); 1219 down_write(&uobj->mutex); 1220 1221 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 1222 if (!pd) { 1223 ret = -EINVAL; 1224 goto err_free; 1225 } 1226 1227 INIT_UDATA(&udata, buf + sizeof(cmd), 1228 (unsigned long)cmd.response + sizeof(resp), 1229 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 1230 out_len - sizeof(resp)); 1231 1232 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); 1233 if (IS_ERR(mw)) { 1234 ret = PTR_ERR(mw); 1235 goto err_put; 1236 } 1237 1238 mw->device = pd->device; 1239 mw->pd = pd; 1240 mw->uobject = uobj; 1241 atomic_inc(&pd->usecnt); 1242 1243 uobj->object = mw; 1244 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj); 1245 if (ret) 1246 goto err_unalloc; 1247 1248 memset(&resp, 0, sizeof(resp)); 1249 resp.rkey = mw->rkey; 1250 resp.mw_handle = uobj->id; 1251 1252 if (copy_to_user((void __user *)(unsigned long)cmd.response, 1253 &resp, sizeof(resp))) { 1254 ret = -EFAULT; 1255 goto err_copy; 1256 } 1257 1258 put_pd_read(pd); 1259 1260 mutex_lock(&file->mutex); 1261 list_add_tail(&uobj->list, &file->ucontext->mw_list); 1262 mutex_unlock(&file->mutex); 1263 1264 uobj->live = 1; 1265 1266 up_write(&uobj->mutex); 1267 1268 return in_len; 1269 1270err_copy: 1271 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1272 1273err_unalloc: 1274 uverbs_dealloc_mw(mw); 1275 1276err_put: 1277 put_pd_read(pd); 1278 1279err_free: 1280 put_uobj_write(uobj); 1281 return ret; 1282} 1283 1284ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, 1285 struct ib_device *ib_dev, 1286 const char __user *buf, int in_len, 1287 int out_len) 1288{ 1289 struct ib_uverbs_dealloc_mw cmd; 1290 struct ib_mw *mw; 1291 struct ib_uobject *uobj; 1292 int ret = -EINVAL; 1293 1294 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1295 return -EFAULT; 1296 1297 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext); 1298 if (!uobj) 1299 return -EINVAL; 1300 1301 mw = uobj->object; 1302 1303 ret = uverbs_dealloc_mw(mw); 1304 if (!ret) 1305 uobj->live = 0; 1306 1307 put_uobj_write(uobj); 1308 1309 if (ret) 1310 return ret; 1311 1312 idr_remove_uobj(&ib_uverbs_mw_idr, uobj); 1313 1314 mutex_lock(&file->mutex); 1315 list_del(&uobj->list); 1316 mutex_unlock(&file->mutex); 1317 1318 put_uobj(uobj); 1319 1320 return in_len; 1321} 1322 1323ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, 1324 struct ib_device *ib_dev, 1325 const char __user *buf, int in_len, 1326 int out_len) 1327{ 1328 struct ib_uverbs_create_comp_channel cmd; 1329 struct ib_uverbs_create_comp_channel_resp resp; 1330 struct file *filp; 1331 int ret; 1332 1333 if (out_len < sizeof resp) 1334 return -ENOSPC; 1335 1336 if (copy_from_user(&cmd, buf, sizeof cmd)) 1337 return -EFAULT; 1338 1339 ret = get_unused_fd_flags(O_CLOEXEC); 1340 if (ret < 0) 1341 return ret; 1342 resp.fd = ret; 1343 1344 filp = ib_uverbs_alloc_event_file(file, ib_dev, 0); 1345 if (IS_ERR(filp)) { 1346 put_unused_fd(resp.fd); 1347 return PTR_ERR(filp); 1348 } 1349 1350 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1351 &resp, sizeof resp)) { 1352 put_unused_fd(resp.fd); 1353 fput(filp); 1354 return -EFAULT; 1355 } 1356 1357 fd_install(resp.fd, filp); 1358 return in_len; 1359} 1360 1361static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, 1362 struct ib_device *ib_dev, 1363 struct ib_udata *ucore, 1364 struct ib_udata *uhw, 1365 struct ib_uverbs_ex_create_cq *cmd, 1366 size_t cmd_sz, 1367 int (*cb)(struct ib_uverbs_file *file, 1368 struct ib_ucq_object *obj, 1369 struct ib_uverbs_ex_create_cq_resp *resp, 1370 struct ib_udata *udata, 1371 void *context), 1372 void *context) 1373{ 1374 struct ib_ucq_object *obj; 1375 struct ib_uverbs_event_file *ev_file = NULL; 1376 struct ib_cq *cq; 1377 int ret; 1378 struct ib_uverbs_ex_create_cq_resp resp; 1379 struct ib_cq_init_attr attr = {}; 1380 1381 if (cmd->comp_vector >= file->device->num_comp_vectors) 1382 return ERR_PTR(-EINVAL); 1383 1384 obj = kmalloc(sizeof *obj, GFP_KERNEL); 1385 if (!obj) 1386 return ERR_PTR(-ENOMEM); 1387 1388 init_uobj(&obj->uobject, cmd->user_handle, file->ucontext, &cq_lock_class); 1389 down_write(&obj->uobject.mutex); 1390 1391 if (cmd->comp_channel >= 0) { 1392 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel); 1393 if (!ev_file) { 1394 ret = -EINVAL; 1395 goto err; 1396 } 1397 } 1398 1399 obj->uverbs_file = file; 1400 obj->comp_events_reported = 0; 1401 obj->async_events_reported = 0; 1402 INIT_LIST_HEAD(&obj->comp_list); 1403 INIT_LIST_HEAD(&obj->async_list); 1404 1405 attr.cqe = cmd->cqe; 1406 attr.comp_vector = cmd->comp_vector; 1407 1408 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) 1409 attr.flags = cmd->flags; 1410 1411 cq = ib_dev->create_cq(ib_dev, &attr, 1412 file->ucontext, uhw); 1413 if (IS_ERR(cq)) { 1414 ret = PTR_ERR(cq); 1415 goto err_file; 1416 } 1417 1418 cq->device = ib_dev; 1419 cq->uobject = &obj->uobject; 1420 cq->comp_handler = ib_uverbs_comp_handler; 1421 cq->event_handler = ib_uverbs_cq_event_handler; 1422 cq->cq_context = ev_file; 1423 atomic_set(&cq->usecnt, 0); 1424 1425 obj->uobject.object = cq; 1426 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1427 if (ret) 1428 goto err_free; 1429 1430 memset(&resp, 0, sizeof resp); 1431 resp.base.cq_handle = obj->uobject.id; 1432 resp.base.cqe = cq->cqe; 1433 1434 resp.response_length = offsetof(typeof(resp), response_length) + 1435 sizeof(resp.response_length); 1436 1437 ret = cb(file, obj, &resp, ucore, context); 1438 if (ret) 1439 goto err_cb; 1440 1441 mutex_lock(&file->mutex); 1442 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list); 1443 mutex_unlock(&file->mutex); 1444 1445 obj->uobject.live = 1; 1446 1447 up_write(&obj->uobject.mutex); 1448 1449 return obj; 1450 1451err_cb: 1452 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); 1453 1454err_free: 1455 ib_destroy_cq(cq); 1456 1457err_file: 1458 if (ev_file) 1459 ib_uverbs_release_ucq(file, ev_file, obj); 1460 1461err: 1462 put_uobj_write(&obj->uobject); 1463 1464 return ERR_PTR(ret); 1465} 1466 1467static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, 1468 struct ib_ucq_object *obj, 1469 struct ib_uverbs_ex_create_cq_resp *resp, 1470 struct ib_udata *ucore, void *context) 1471{ 1472 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 1473 return -EFAULT; 1474 1475 return 0; 1476} 1477 1478ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, 1479 struct ib_device *ib_dev, 1480 const char __user *buf, int in_len, 1481 int out_len) 1482{ 1483 struct ib_uverbs_create_cq cmd; 1484 struct ib_uverbs_ex_create_cq cmd_ex; 1485 struct ib_uverbs_create_cq_resp resp; 1486 struct ib_udata ucore; 1487 struct ib_udata uhw; 1488 struct ib_ucq_object *obj; 1489 1490 if (out_len < sizeof(resp)) 1491 return -ENOSPC; 1492 1493 if (copy_from_user(&cmd, buf, sizeof(cmd))) 1494 return -EFAULT; 1495 1496 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp)); 1497 1498 INIT_UDATA(&uhw, buf + sizeof(cmd), 1499 (unsigned long)cmd.response + sizeof(resp), 1500 in_len - sizeof(cmd), out_len - sizeof(resp)); 1501 1502 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1503 cmd_ex.user_handle = cmd.user_handle; 1504 cmd_ex.cqe = cmd.cqe; 1505 cmd_ex.comp_vector = cmd.comp_vector; 1506 cmd_ex.comp_channel = cmd.comp_channel; 1507 1508 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex, 1509 offsetof(typeof(cmd_ex), comp_channel) + 1510 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, 1511 NULL); 1512 1513 if (IS_ERR(obj)) 1514 return PTR_ERR(obj); 1515 1516 return in_len; 1517} 1518 1519static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, 1520 struct ib_ucq_object *obj, 1521 struct ib_uverbs_ex_create_cq_resp *resp, 1522 struct ib_udata *ucore, void *context) 1523{ 1524 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 1525 return -EFAULT; 1526 1527 return 0; 1528} 1529 1530int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, 1531 struct ib_device *ib_dev, 1532 struct ib_udata *ucore, 1533 struct ib_udata *uhw) 1534{ 1535 struct ib_uverbs_ex_create_cq_resp resp; 1536 struct ib_uverbs_ex_create_cq cmd; 1537 struct ib_ucq_object *obj; 1538 int err; 1539 1540 if (ucore->inlen < sizeof(cmd)) 1541 return -EINVAL; 1542 1543 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 1544 if (err) 1545 return err; 1546 1547 if (cmd.comp_mask) 1548 return -EINVAL; 1549 1550 if (cmd.reserved) 1551 return -EINVAL; 1552 1553 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 1554 sizeof(resp.response_length))) 1555 return -ENOSPC; 1556 1557 obj = create_cq(file, ib_dev, ucore, uhw, &cmd, 1558 min(ucore->inlen, sizeof(cmd)), 1559 ib_uverbs_ex_create_cq_cb, NULL); 1560 1561 if (IS_ERR(obj)) 1562 return PTR_ERR(obj); 1563 1564 return 0; 1565} 1566 1567ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, 1568 struct ib_device *ib_dev, 1569 const char __user *buf, int in_len, 1570 int out_len) 1571{ 1572 struct ib_uverbs_resize_cq cmd; 1573 struct ib_uverbs_resize_cq_resp resp; 1574 struct ib_udata udata; 1575 struct ib_cq *cq; 1576 int ret = -EINVAL; 1577 1578 if (copy_from_user(&cmd, buf, sizeof cmd)) 1579 return -EFAULT; 1580 1581 INIT_UDATA(&udata, buf + sizeof cmd, 1582 (unsigned long) cmd.response + sizeof resp, 1583 in_len - sizeof cmd, out_len - sizeof resp); 1584 1585 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1586 if (!cq) 1587 return -EINVAL; 1588 1589 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); 1590 if (ret) 1591 goto out; 1592 1593 resp.cqe = cq->cqe; 1594 1595 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1596 &resp, sizeof resp.cqe)) 1597 ret = -EFAULT; 1598 1599out: 1600 put_cq_read(cq); 1601 1602 return ret ? ret : in_len; 1603} 1604 1605static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) 1606{ 1607 struct ib_uverbs_wc tmp; 1608 1609 tmp.wr_id = wc->wr_id; 1610 tmp.status = wc->status; 1611 tmp.opcode = wc->opcode; 1612 tmp.vendor_err = wc->vendor_err; 1613 tmp.byte_len = wc->byte_len; 1614 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; 1615 tmp.qp_num = wc->qp->qp_num; 1616 tmp.src_qp = wc->src_qp; 1617 tmp.wc_flags = wc->wc_flags; 1618 tmp.pkey_index = wc->pkey_index; 1619 tmp.slid = wc->slid; 1620 tmp.sl = wc->sl; 1621 tmp.dlid_path_bits = wc->dlid_path_bits; 1622 tmp.port_num = wc->port_num; 1623 tmp.reserved = 0; 1624 1625 if (copy_to_user(dest, &tmp, sizeof tmp)) 1626 return -EFAULT; 1627 1628 return 0; 1629} 1630 1631ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, 1632 struct ib_device *ib_dev, 1633 const char __user *buf, int in_len, 1634 int out_len) 1635{ 1636 struct ib_uverbs_poll_cq cmd; 1637 struct ib_uverbs_poll_cq_resp resp; 1638 u8 __user *header_ptr; 1639 u8 __user *data_ptr; 1640 struct ib_cq *cq; 1641 struct ib_wc wc; 1642 int ret; 1643 1644 if (copy_from_user(&cmd, buf, sizeof cmd)) 1645 return -EFAULT; 1646 1647 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1648 if (!cq) 1649 return -EINVAL; 1650 1651 /* we copy a struct ib_uverbs_poll_cq_resp to user space */ 1652 header_ptr = (void __user *)(unsigned long) cmd.response; 1653 data_ptr = header_ptr + sizeof resp; 1654 1655 memset(&resp, 0, sizeof resp); 1656 while (resp.count < cmd.ne) { 1657 ret = ib_poll_cq(cq, 1, &wc); 1658 if (ret < 0) 1659 goto out_put; 1660 if (!ret) 1661 break; 1662 1663 ret = copy_wc_to_user(data_ptr, &wc); 1664 if (ret) 1665 goto out_put; 1666 1667 data_ptr += sizeof(struct ib_uverbs_wc); 1668 ++resp.count; 1669 } 1670 1671 if (copy_to_user(header_ptr, &resp, sizeof resp)) { 1672 ret = -EFAULT; 1673 goto out_put; 1674 } 1675 1676 ret = in_len; 1677 1678out_put: 1679 put_cq_read(cq); 1680 return ret; 1681} 1682 1683ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, 1684 struct ib_device *ib_dev, 1685 const char __user *buf, int in_len, 1686 int out_len) 1687{ 1688 struct ib_uverbs_req_notify_cq cmd; 1689 struct ib_cq *cq; 1690 1691 if (copy_from_user(&cmd, buf, sizeof cmd)) 1692 return -EFAULT; 1693 1694 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 1695 if (!cq) 1696 return -EINVAL; 1697 1698 ib_req_notify_cq(cq, cmd.solicited_only ? 1699 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); 1700 1701 put_cq_read(cq); 1702 1703 return in_len; 1704} 1705 1706ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, 1707 struct ib_device *ib_dev, 1708 const char __user *buf, int in_len, 1709 int out_len) 1710{ 1711 struct ib_uverbs_destroy_cq cmd; 1712 struct ib_uverbs_destroy_cq_resp resp; 1713 struct ib_uobject *uobj; 1714 struct ib_cq *cq; 1715 struct ib_ucq_object *obj; 1716 struct ib_uverbs_event_file *ev_file; 1717 int ret = -EINVAL; 1718 1719 if (copy_from_user(&cmd, buf, sizeof cmd)) 1720 return -EFAULT; 1721 1722 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); 1723 if (!uobj) 1724 return -EINVAL; 1725 cq = uobj->object; 1726 ev_file = cq->cq_context; 1727 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1728 1729 ret = ib_destroy_cq(cq); 1730 if (!ret) 1731 uobj->live = 0; 1732 1733 put_uobj_write(uobj); 1734 1735 if (ret) 1736 return ret; 1737 1738 idr_remove_uobj(&ib_uverbs_cq_idr, uobj); 1739 1740 mutex_lock(&file->mutex); 1741 list_del(&uobj->list); 1742 mutex_unlock(&file->mutex); 1743 1744 ib_uverbs_release_ucq(file, ev_file, obj); 1745 1746 memset(&resp, 0, sizeof resp); 1747 resp.comp_events_reported = obj->comp_events_reported; 1748 resp.async_events_reported = obj->async_events_reported; 1749 1750 put_uobj(uobj); 1751 1752 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1753 &resp, sizeof resp)) 1754 return -EFAULT; 1755 1756 return in_len; 1757} 1758 1759static int create_qp(struct ib_uverbs_file *file, 1760 struct ib_udata *ucore, 1761 struct ib_udata *uhw, 1762 struct ib_uverbs_ex_create_qp *cmd, 1763 size_t cmd_sz, 1764 int (*cb)(struct ib_uverbs_file *file, 1765 struct ib_uverbs_ex_create_qp_resp *resp, 1766 struct ib_udata *udata), 1767 void *context) 1768{ 1769 struct ib_uqp_object *obj; 1770 struct ib_device *device; 1771 struct ib_pd *pd = NULL; 1772 struct ib_xrcd *xrcd = NULL; 1773 struct ib_uobject *uninitialized_var(xrcd_uobj); 1774 struct ib_cq *scq = NULL, *rcq = NULL; 1775 struct ib_srq *srq = NULL; 1776 struct ib_qp *qp; 1777 char *buf; 1778 struct ib_qp_init_attr attr = {}; 1779 struct ib_uverbs_ex_create_qp_resp resp; 1780 int ret; 1781 struct ib_rwq_ind_table *ind_tbl = NULL; 1782 bool has_sq = true; 1783 1784 if (cmd->qp_type == IB_QPT_RAW_PACKET && priv_check(curthread, PRIV_NET_RAW) != 0) 1785 return -EPERM; 1786 1787 obj = kzalloc(sizeof *obj, GFP_KERNEL); 1788 if (!obj) 1789 return -ENOMEM; 1790 1791 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, 1792 &qp_lock_class); 1793 down_write(&obj->uevent.uobject.mutex); 1794 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + 1795 sizeof(cmd->rwq_ind_tbl_handle) && 1796 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { 1797 ind_tbl = idr_read_rwq_indirection_table(cmd->rwq_ind_tbl_handle, 1798 file->ucontext); 1799 if (!ind_tbl) { 1800 ret = -EINVAL; 1801 goto err_put; 1802 } 1803 1804 attr.rwq_ind_tbl = ind_tbl; 1805 } 1806 1807 if ((cmd_sz >= offsetof(typeof(*cmd), reserved1) + 1808 sizeof(cmd->reserved1)) && cmd->reserved1) { 1809 ret = -EOPNOTSUPP; 1810 goto err_put; 1811 } 1812 1813 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { 1814 ret = -EINVAL; 1815 goto err_put; 1816 } 1817 1818 if (ind_tbl && !cmd->max_send_wr) 1819 has_sq = false; 1820 1821 if (cmd->qp_type == IB_QPT_XRC_TGT) { 1822 xrcd = idr_read_xrcd(cmd->pd_handle, file->ucontext, 1823 &xrcd_uobj); 1824 if (!xrcd) { 1825 ret = -EINVAL; 1826 goto err_put; 1827 } 1828 device = xrcd->device; 1829 } else { 1830 if (cmd->qp_type == IB_QPT_XRC_INI) { 1831 cmd->max_recv_wr = 0; 1832 cmd->max_recv_sge = 0; 1833 } else { 1834 if (cmd->is_srq) { 1835 srq = idr_read_srq(cmd->srq_handle, 1836 file->ucontext); 1837 if (!srq || srq->srq_type != IB_SRQT_BASIC) { 1838 ret = -EINVAL; 1839 goto err_put; 1840 } 1841 } 1842 1843 if (!ind_tbl) { 1844 if (cmd->recv_cq_handle != cmd->send_cq_handle) { 1845 rcq = idr_read_cq(cmd->recv_cq_handle, 1846 file->ucontext, 0); 1847 if (!rcq) { 1848 ret = -EINVAL; 1849 goto err_put; 1850 } 1851 } 1852 } 1853 } 1854 1855 if (has_sq) 1856 scq = idr_read_cq(cmd->send_cq_handle, file->ucontext, !!rcq); 1857 if (!ind_tbl) 1858 rcq = rcq ?: scq; 1859 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 1860 if (!pd || (!scq && has_sq)) { 1861 ret = -EINVAL; 1862 goto err_put; 1863 } 1864 1865 device = pd->device; 1866 } 1867 1868 attr.event_handler = ib_uverbs_qp_event_handler; 1869 attr.qp_context = file; 1870 attr.send_cq = scq; 1871 attr.recv_cq = rcq; 1872 attr.srq = srq; 1873 attr.xrcd = xrcd; 1874 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : 1875 IB_SIGNAL_REQ_WR; 1876 attr.qp_type = cmd->qp_type; 1877 attr.create_flags = 0; 1878 1879 attr.cap.max_send_wr = cmd->max_send_wr; 1880 attr.cap.max_recv_wr = cmd->max_recv_wr; 1881 attr.cap.max_send_sge = cmd->max_send_sge; 1882 attr.cap.max_recv_sge = cmd->max_recv_sge; 1883 attr.cap.max_inline_data = cmd->max_inline_data; 1884 1885 obj->uevent.events_reported = 0; 1886 INIT_LIST_HEAD(&obj->uevent.event_list); 1887 INIT_LIST_HEAD(&obj->mcast_list); 1888 1889 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + 1890 sizeof(cmd->create_flags)) 1891 attr.create_flags = cmd->create_flags; 1892 1893 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 1894 IB_QP_CREATE_CROSS_CHANNEL | 1895 IB_QP_CREATE_MANAGED_SEND | 1896 IB_QP_CREATE_MANAGED_RECV | 1897 IB_QP_CREATE_SCATTER_FCS)) { 1898 ret = -EINVAL; 1899 goto err_put; 1900 } 1901 1902 buf = (char *)cmd + sizeof(*cmd); 1903 if (cmd_sz > sizeof(*cmd)) 1904 if (!(buf[0] == 0 && !memcmp(buf, buf + 1, 1905 cmd_sz - sizeof(*cmd) - 1))) { 1906 ret = -EINVAL; 1907 goto err_put; 1908 } 1909 1910 if (cmd->qp_type == IB_QPT_XRC_TGT) 1911 qp = ib_create_qp(pd, &attr); 1912 else 1913 qp = device->create_qp(pd, &attr, uhw); 1914 1915 if (IS_ERR(qp)) { 1916 ret = PTR_ERR(qp); 1917 goto err_put; 1918 } 1919 1920 if (cmd->qp_type != IB_QPT_XRC_TGT) { 1921 qp->real_qp = qp; 1922 qp->device = device; 1923 qp->pd = pd; 1924 qp->send_cq = attr.send_cq; 1925 qp->recv_cq = attr.recv_cq; 1926 qp->srq = attr.srq; 1927 qp->rwq_ind_tbl = ind_tbl; 1928 qp->event_handler = attr.event_handler; 1929 qp->qp_context = attr.qp_context; 1930 qp->qp_type = attr.qp_type; 1931 atomic_set(&qp->usecnt, 0); 1932 atomic_inc(&pd->usecnt); 1933 if (attr.send_cq) 1934 atomic_inc(&attr.send_cq->usecnt); 1935 if (attr.recv_cq) 1936 atomic_inc(&attr.recv_cq->usecnt); 1937 if (attr.srq) 1938 atomic_inc(&attr.srq->usecnt); 1939 if (ind_tbl) 1940 atomic_inc(&ind_tbl->usecnt); 1941 } 1942 qp->uobject = &obj->uevent.uobject; 1943 1944 obj->uevent.uobject.object = qp; 1945 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1946 if (ret) 1947 goto err_destroy; 1948 1949 memset(&resp, 0, sizeof resp); 1950 resp.base.qpn = qp->qp_num; 1951 resp.base.qp_handle = obj->uevent.uobject.id; 1952 resp.base.max_recv_sge = attr.cap.max_recv_sge; 1953 resp.base.max_send_sge = attr.cap.max_send_sge; 1954 resp.base.max_recv_wr = attr.cap.max_recv_wr; 1955 resp.base.max_send_wr = attr.cap.max_send_wr; 1956 resp.base.max_inline_data = attr.cap.max_inline_data; 1957 1958 resp.response_length = offsetof(typeof(resp), response_length) + 1959 sizeof(resp.response_length); 1960 1961 ret = cb(file, &resp, ucore); 1962 if (ret) 1963 goto err_cb; 1964 1965 if (xrcd) { 1966 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, 1967 uobject); 1968 atomic_inc(&obj->uxrcd->refcnt); 1969 put_xrcd_read(xrcd_uobj); 1970 } 1971 1972 if (pd) 1973 put_pd_read(pd); 1974 if (scq) 1975 put_cq_read(scq); 1976 if (rcq && rcq != scq) 1977 put_cq_read(rcq); 1978 if (srq) 1979 put_srq_read(srq); 1980 if (ind_tbl) 1981 put_rwq_indirection_table_read(ind_tbl); 1982 1983 mutex_lock(&file->mutex); 1984 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 1985 mutex_unlock(&file->mutex); 1986 1987 obj->uevent.uobject.live = 1; 1988 1989 up_write(&obj->uevent.uobject.mutex); 1990 1991 return 0; 1992err_cb: 1993 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 1994 1995err_destroy: 1996 ib_destroy_qp(qp); 1997 1998err_put: 1999 if (xrcd) 2000 put_xrcd_read(xrcd_uobj); 2001 if (pd) 2002 put_pd_read(pd); 2003 if (scq) 2004 put_cq_read(scq); 2005 if (rcq && rcq != scq) 2006 put_cq_read(rcq); 2007 if (srq) 2008 put_srq_read(srq); 2009 if (ind_tbl) 2010 put_rwq_indirection_table_read(ind_tbl); 2011 2012 put_uobj_write(&obj->uevent.uobject); 2013 return ret; 2014} 2015 2016static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, 2017 struct ib_uverbs_ex_create_qp_resp *resp, 2018 struct ib_udata *ucore) 2019{ 2020 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) 2021 return -EFAULT; 2022 2023 return 0; 2024} 2025 2026ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, 2027 struct ib_device *ib_dev, 2028 const char __user *buf, int in_len, 2029 int out_len) 2030{ 2031 struct ib_uverbs_create_qp cmd; 2032 struct ib_uverbs_ex_create_qp cmd_ex; 2033 struct ib_udata ucore; 2034 struct ib_udata uhw; 2035 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); 2036 int err; 2037 2038 if (out_len < resp_size) 2039 return -ENOSPC; 2040 2041 if (copy_from_user(&cmd, buf, sizeof(cmd))) 2042 return -EFAULT; 2043 2044 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), 2045 resp_size); 2046 INIT_UDATA(&uhw, buf + sizeof(cmd), 2047 (unsigned long)cmd.response + resp_size, 2048 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), 2049 out_len - resp_size); 2050 2051 memset(&cmd_ex, 0, sizeof(cmd_ex)); 2052 cmd_ex.user_handle = cmd.user_handle; 2053 cmd_ex.pd_handle = cmd.pd_handle; 2054 cmd_ex.send_cq_handle = cmd.send_cq_handle; 2055 cmd_ex.recv_cq_handle = cmd.recv_cq_handle; 2056 cmd_ex.srq_handle = cmd.srq_handle; 2057 cmd_ex.max_send_wr = cmd.max_send_wr; 2058 cmd_ex.max_recv_wr = cmd.max_recv_wr; 2059 cmd_ex.max_send_sge = cmd.max_send_sge; 2060 cmd_ex.max_recv_sge = cmd.max_recv_sge; 2061 cmd_ex.max_inline_data = cmd.max_inline_data; 2062 cmd_ex.sq_sig_all = cmd.sq_sig_all; 2063 cmd_ex.qp_type = cmd.qp_type; 2064 cmd_ex.is_srq = cmd.is_srq; 2065 2066 err = create_qp(file, &ucore, &uhw, &cmd_ex, 2067 offsetof(typeof(cmd_ex), is_srq) + 2068 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, 2069 NULL); 2070 2071 if (err) 2072 return err; 2073 2074 return in_len; 2075} 2076 2077static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, 2078 struct ib_uverbs_ex_create_qp_resp *resp, 2079 struct ib_udata *ucore) 2080{ 2081 if (ib_copy_to_udata(ucore, resp, resp->response_length)) 2082 return -EFAULT; 2083 2084 return 0; 2085} 2086 2087int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, 2088 struct ib_device *ib_dev, 2089 struct ib_udata *ucore, 2090 struct ib_udata *uhw) 2091{ 2092 struct ib_uverbs_ex_create_qp_resp resp; 2093 struct ib_uverbs_ex_create_qp cmd = {0}; 2094 int err; 2095 2096 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + 2097 sizeof(cmd.comp_mask))) 2098 return -EINVAL; 2099 2100 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 2101 if (err) 2102 return err; 2103 2104 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) 2105 return -EINVAL; 2106 2107 if (cmd.reserved) 2108 return -EINVAL; 2109 2110 if (ucore->outlen < (offsetof(typeof(resp), response_length) + 2111 sizeof(resp.response_length))) 2112 return -ENOSPC; 2113 2114 err = create_qp(file, ucore, uhw, &cmd, 2115 min(ucore->inlen, sizeof(cmd)), 2116 ib_uverbs_ex_create_qp_cb, NULL); 2117 2118 if (err) 2119 return err; 2120 2121 return 0; 2122} 2123 2124ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, 2125 struct ib_device *ib_dev, 2126 const char __user *buf, int in_len, int out_len) 2127{ 2128 struct ib_uverbs_open_qp cmd; 2129 struct ib_uverbs_create_qp_resp resp; 2130 struct ib_udata udata; 2131 struct ib_uqp_object *obj; 2132 struct ib_xrcd *xrcd; 2133 struct ib_uobject *uninitialized_var(xrcd_uobj); 2134 struct ib_qp *qp; 2135 struct ib_qp_open_attr attr; 2136 int ret; 2137 2138 if (out_len < sizeof resp) 2139 return -ENOSPC; 2140 2141 if (copy_from_user(&cmd, buf, sizeof cmd)) 2142 return -EFAULT; 2143 2144 INIT_UDATA(&udata, buf + sizeof cmd, 2145 (unsigned long) cmd.response + sizeof resp, 2146 in_len - sizeof cmd, out_len - sizeof resp); 2147 2148 obj = kmalloc(sizeof *obj, GFP_KERNEL); 2149 if (!obj) 2150 return -ENOMEM; 2151 2152 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); 2153 down_write(&obj->uevent.uobject.mutex); 2154 2155 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); 2156 if (!xrcd) { 2157 ret = -EINVAL; 2158 goto err_put; 2159 } 2160 2161 attr.event_handler = ib_uverbs_qp_event_handler; 2162 attr.qp_context = file; 2163 attr.qp_num = cmd.qpn; 2164 attr.qp_type = cmd.qp_type; 2165 2166 obj->uevent.events_reported = 0; 2167 INIT_LIST_HEAD(&obj->uevent.event_list); 2168 INIT_LIST_HEAD(&obj->mcast_list); 2169 2170 qp = ib_open_qp(xrcd, &attr); 2171 if (IS_ERR(qp)) { 2172 ret = PTR_ERR(qp); 2173 goto err_put; 2174 } 2175 2176 qp->uobject = &obj->uevent.uobject; 2177 2178 obj->uevent.uobject.object = qp; 2179 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2180 if (ret) 2181 goto err_destroy; 2182 2183 memset(&resp, 0, sizeof resp); 2184 resp.qpn = qp->qp_num; 2185 resp.qp_handle = obj->uevent.uobject.id; 2186 2187 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2188 &resp, sizeof resp)) { 2189 ret = -EFAULT; 2190 goto err_remove; 2191 } 2192 2193 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 2194 atomic_inc(&obj->uxrcd->refcnt); 2195 put_xrcd_read(xrcd_uobj); 2196 2197 mutex_lock(&file->mutex); 2198 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); 2199 mutex_unlock(&file->mutex); 2200 2201 obj->uevent.uobject.live = 1; 2202 2203 up_write(&obj->uevent.uobject.mutex); 2204 2205 return in_len; 2206 2207err_remove: 2208 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); 2209 2210err_destroy: 2211 ib_destroy_qp(qp); 2212 2213err_put: 2214 put_xrcd_read(xrcd_uobj); 2215 put_uobj_write(&obj->uevent.uobject); 2216 return ret; 2217} 2218 2219ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, 2220 struct ib_device *ib_dev, 2221 const char __user *buf, int in_len, 2222 int out_len) 2223{ 2224 struct ib_uverbs_query_qp cmd; 2225 struct ib_uverbs_query_qp_resp resp; 2226 struct ib_qp *qp; 2227 struct ib_qp_attr *attr; 2228 struct ib_qp_init_attr *init_attr; 2229 int ret; 2230 2231 if (copy_from_user(&cmd, buf, sizeof cmd)) 2232 return -EFAULT; 2233 2234 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2235 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); 2236 if (!attr || !init_attr) { 2237 ret = -ENOMEM; 2238 goto out; 2239 } 2240 2241 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2242 if (!qp) { 2243 ret = -EINVAL; 2244 goto out; 2245 } 2246 2247 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); 2248 2249 put_qp_read(qp); 2250 2251 if (ret) 2252 goto out; 2253 2254 memset(&resp, 0, sizeof resp); 2255 2256 resp.qp_state = attr->qp_state; 2257 resp.cur_qp_state = attr->cur_qp_state; 2258 resp.path_mtu = attr->path_mtu; 2259 resp.path_mig_state = attr->path_mig_state; 2260 resp.qkey = attr->qkey; 2261 resp.rq_psn = attr->rq_psn; 2262 resp.sq_psn = attr->sq_psn; 2263 resp.dest_qp_num = attr->dest_qp_num; 2264 resp.qp_access_flags = attr->qp_access_flags; 2265 resp.pkey_index = attr->pkey_index; 2266 resp.alt_pkey_index = attr->alt_pkey_index; 2267 resp.sq_draining = attr->sq_draining; 2268 resp.max_rd_atomic = attr->max_rd_atomic; 2269 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic; 2270 resp.min_rnr_timer = attr->min_rnr_timer; 2271 resp.port_num = attr->port_num; 2272 resp.timeout = attr->timeout; 2273 resp.retry_cnt = attr->retry_cnt; 2274 resp.rnr_retry = attr->rnr_retry; 2275 resp.alt_port_num = attr->alt_port_num; 2276 resp.alt_timeout = attr->alt_timeout; 2277 2278 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16); 2279 resp.dest.flow_label = attr->ah_attr.grh.flow_label; 2280 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index; 2281 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit; 2282 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class; 2283 resp.dest.dlid = attr->ah_attr.dlid; 2284 resp.dest.sl = attr->ah_attr.sl; 2285 resp.dest.src_path_bits = attr->ah_attr.src_path_bits; 2286 resp.dest.static_rate = attr->ah_attr.static_rate; 2287 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH); 2288 resp.dest.port_num = attr->ah_attr.port_num; 2289 2290 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16); 2291 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label; 2292 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index; 2293 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit; 2294 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class; 2295 resp.alt_dest.dlid = attr->alt_ah_attr.dlid; 2296 resp.alt_dest.sl = attr->alt_ah_attr.sl; 2297 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits; 2298 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate; 2299 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH); 2300 resp.alt_dest.port_num = attr->alt_ah_attr.port_num; 2301 2302 resp.max_send_wr = init_attr->cap.max_send_wr; 2303 resp.max_recv_wr = init_attr->cap.max_recv_wr; 2304 resp.max_send_sge = init_attr->cap.max_send_sge; 2305 resp.max_recv_sge = init_attr->cap.max_recv_sge; 2306 resp.max_inline_data = init_attr->cap.max_inline_data; 2307 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; 2308 2309 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2310 &resp, sizeof resp)) 2311 ret = -EFAULT; 2312 2313out: 2314 kfree(attr); 2315 kfree(init_attr); 2316 2317 return ret ? ret : in_len; 2318} 2319 2320/* Remove ignored fields set in the attribute mask */ 2321static int modify_qp_mask(enum ib_qp_type qp_type, int mask) 2322{ 2323 switch (qp_type) { 2324 case IB_QPT_XRC_INI: 2325 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); 2326 case IB_QPT_XRC_TGT: 2327 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | 2328 IB_QP_RNR_RETRY); 2329 default: 2330 return mask; 2331 } 2332} 2333 2334ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, 2335 struct ib_device *ib_dev, 2336 const char __user *buf, int in_len, 2337 int out_len) 2338{ 2339 struct ib_uverbs_modify_qp cmd; 2340 struct ib_udata udata; 2341 struct ib_qp *qp; 2342 struct ib_qp_attr *attr; 2343 int ret; 2344 2345 if (copy_from_user(&cmd, buf, sizeof cmd)) 2346 return -EFAULT; 2347 2348 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 2349 out_len); 2350 2351 attr = kmalloc(sizeof *attr, GFP_KERNEL); 2352 if (!attr) 2353 return -ENOMEM; 2354 2355 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2356 if (!qp) { 2357 ret = -EINVAL; 2358 goto out; 2359 } 2360 2361 attr->qp_state = cmd.qp_state; 2362 attr->cur_qp_state = cmd.cur_qp_state; 2363 attr->path_mtu = cmd.path_mtu; 2364 attr->path_mig_state = cmd.path_mig_state; 2365 attr->qkey = cmd.qkey; 2366 attr->rq_psn = cmd.rq_psn; 2367 attr->sq_psn = cmd.sq_psn; 2368 attr->dest_qp_num = cmd.dest_qp_num; 2369 attr->qp_access_flags = cmd.qp_access_flags; 2370 attr->pkey_index = cmd.pkey_index; 2371 attr->alt_pkey_index = cmd.alt_pkey_index; 2372 attr->en_sqd_async_notify = cmd.en_sqd_async_notify; 2373 attr->max_rd_atomic = cmd.max_rd_atomic; 2374 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic; 2375 attr->min_rnr_timer = cmd.min_rnr_timer; 2376 attr->port_num = cmd.port_num; 2377 attr->timeout = cmd.timeout; 2378 attr->retry_cnt = cmd.retry_cnt; 2379 attr->rnr_retry = cmd.rnr_retry; 2380 attr->alt_port_num = cmd.alt_port_num; 2381 attr->alt_timeout = cmd.alt_timeout; 2382 2383 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16); 2384 attr->ah_attr.grh.flow_label = cmd.dest.flow_label; 2385 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index; 2386 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit; 2387 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class; 2388 attr->ah_attr.dlid = cmd.dest.dlid; 2389 attr->ah_attr.sl = cmd.dest.sl; 2390 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits; 2391 attr->ah_attr.static_rate = cmd.dest.static_rate; 2392 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0; 2393 attr->ah_attr.port_num = cmd.dest.port_num; 2394 2395 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16); 2396 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label; 2397 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index; 2398 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit; 2399 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class; 2400 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid; 2401 attr->alt_ah_attr.sl = cmd.alt_dest.sl; 2402 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits; 2403 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate; 2404 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; 2405 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; 2406 2407 if (qp->real_qp == qp) { 2408 ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask); 2409 if (ret) 2410 goto release_qp; 2411 ret = qp->device->modify_qp(qp, attr, 2412 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2413 } else { 2414 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2415 } 2416 2417 if (ret) 2418 goto release_qp; 2419 2420 ret = in_len; 2421 2422release_qp: 2423 put_qp_read(qp); 2424 2425out: 2426 kfree(attr); 2427 2428 return ret; 2429} 2430 2431ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, 2432 struct ib_device *ib_dev, 2433 const char __user *buf, int in_len, 2434 int out_len) 2435{ 2436 struct ib_uverbs_destroy_qp cmd; 2437 struct ib_uverbs_destroy_qp_resp resp; 2438 struct ib_uobject *uobj; 2439 struct ib_qp *qp; 2440 struct ib_uqp_object *obj; 2441 int ret = -EINVAL; 2442 2443 if (copy_from_user(&cmd, buf, sizeof cmd)) 2444 return -EFAULT; 2445 2446 memset(&resp, 0, sizeof resp); 2447 2448 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext); 2449 if (!uobj) 2450 return -EINVAL; 2451 qp = uobj->object; 2452 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2453 2454 if (!list_empty(&obj->mcast_list)) { 2455 put_uobj_write(uobj); 2456 return -EBUSY; 2457 } 2458 2459 ret = ib_destroy_qp(qp); 2460 if (!ret) 2461 uobj->live = 0; 2462 2463 put_uobj_write(uobj); 2464 2465 if (ret) 2466 return ret; 2467 2468 if (obj->uxrcd) 2469 atomic_dec(&obj->uxrcd->refcnt); 2470 2471 idr_remove_uobj(&ib_uverbs_qp_idr, uobj); 2472 2473 mutex_lock(&file->mutex); 2474 list_del(&uobj->list); 2475 mutex_unlock(&file->mutex); 2476 2477 ib_uverbs_release_uevent(file, &obj->uevent); 2478 2479 resp.events_reported = obj->uevent.events_reported; 2480 2481 put_uobj(uobj); 2482 2483 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2484 &resp, sizeof resp)) 2485 return -EFAULT; 2486 2487 return in_len; 2488} 2489 2490static void *alloc_wr(size_t wr_size, __u32 num_sge) 2491{ 2492 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + 2493 num_sge * sizeof (struct ib_sge), GFP_KERNEL); 2494}; 2495 2496ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, 2497 struct ib_device *ib_dev, 2498 const char __user *buf, int in_len, 2499 int out_len) 2500{ 2501 struct ib_uverbs_post_send cmd; 2502 struct ib_uverbs_post_send_resp resp; 2503 struct ib_uverbs_send_wr *user_wr; 2504 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr; 2505 struct ib_qp *qp; 2506 int i, sg_ind; 2507 int is_ud; 2508 ssize_t ret = -EINVAL; 2509 size_t next_size; 2510 2511 if (copy_from_user(&cmd, buf, sizeof cmd)) 2512 return -EFAULT; 2513 2514 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + 2515 cmd.sge_count * sizeof (struct ib_uverbs_sge)) 2516 return -EINVAL; 2517 2518 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) 2519 return -EINVAL; 2520 2521 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); 2522 if (!user_wr) 2523 return -ENOMEM; 2524 2525 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2526 if (!qp) 2527 goto out; 2528 2529 is_ud = qp->qp_type == IB_QPT_UD; 2530 sg_ind = 0; 2531 last = NULL; 2532 for (i = 0; i < cmd.wr_count; ++i) { 2533 if (copy_from_user(user_wr, 2534 buf + sizeof cmd + i * cmd.wqe_size, 2535 cmd.wqe_size)) { 2536 ret = -EFAULT; 2537 goto out_put; 2538 } 2539 2540 if (user_wr->num_sge + sg_ind > cmd.sge_count) { 2541 ret = -EINVAL; 2542 goto out_put; 2543 } 2544 2545 if (is_ud) { 2546 struct ib_ud_wr *ud; 2547 2548 if (user_wr->opcode != IB_WR_SEND && 2549 user_wr->opcode != IB_WR_SEND_WITH_IMM) { 2550 ret = -EINVAL; 2551 goto out_put; 2552 } 2553 2554 next_size = sizeof(*ud); 2555 ud = alloc_wr(next_size, user_wr->num_sge); 2556 if (!ud) { 2557 ret = -ENOMEM; 2558 goto out_put; 2559 } 2560 2561 ud->ah = idr_read_ah(user_wr->wr.ud.ah, file->ucontext); 2562 if (!ud->ah) { 2563 kfree(ud); 2564 ret = -EINVAL; 2565 goto out_put; 2566 } 2567 ud->remote_qpn = user_wr->wr.ud.remote_qpn; 2568 ud->remote_qkey = user_wr->wr.ud.remote_qkey; 2569 2570 next = &ud->wr; 2571 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 2572 user_wr->opcode == IB_WR_RDMA_WRITE || 2573 user_wr->opcode == IB_WR_RDMA_READ) { 2574 struct ib_rdma_wr *rdma; 2575 2576 next_size = sizeof(*rdma); 2577 rdma = alloc_wr(next_size, user_wr->num_sge); 2578 if (!rdma) { 2579 ret = -ENOMEM; 2580 goto out_put; 2581 } 2582 2583 rdma->remote_addr = user_wr->wr.rdma.remote_addr; 2584 rdma->rkey = user_wr->wr.rdma.rkey; 2585 2586 next = &rdma->wr; 2587 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || 2588 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { 2589 struct ib_atomic_wr *atomic; 2590 2591 next_size = sizeof(*atomic); 2592 atomic = alloc_wr(next_size, user_wr->num_sge); 2593 if (!atomic) { 2594 ret = -ENOMEM; 2595 goto out_put; 2596 } 2597 2598 atomic->remote_addr = user_wr->wr.atomic.remote_addr; 2599 atomic->compare_add = user_wr->wr.atomic.compare_add; 2600 atomic->swap = user_wr->wr.atomic.swap; 2601 atomic->rkey = user_wr->wr.atomic.rkey; 2602 2603 next = &atomic->wr; 2604 } else if (user_wr->opcode == IB_WR_SEND || 2605 user_wr->opcode == IB_WR_SEND_WITH_IMM || 2606 user_wr->opcode == IB_WR_SEND_WITH_INV) { 2607 next_size = sizeof(*next); 2608 next = alloc_wr(next_size, user_wr->num_sge); 2609 if (!next) { 2610 ret = -ENOMEM; 2611 goto out_put; 2612 } 2613 } else { 2614 ret = -EINVAL; 2615 goto out_put; 2616 } 2617 2618 if (user_wr->opcode == IB_WR_SEND_WITH_IMM || 2619 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 2620 next->ex.imm_data = 2621 (__be32 __force) user_wr->ex.imm_data; 2622 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) { 2623 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey; 2624 } 2625 2626 if (!last) 2627 wr = next; 2628 else 2629 last->next = next; 2630 last = next; 2631 2632 next->next = NULL; 2633 next->wr_id = user_wr->wr_id; 2634 next->num_sge = user_wr->num_sge; 2635 next->opcode = user_wr->opcode; 2636 next->send_flags = user_wr->send_flags; 2637 2638 if (next->num_sge) { 2639 next->sg_list = (void *)((char *)next + 2640 ALIGN(next_size, sizeof(struct ib_sge))); 2641 if (copy_from_user(next->sg_list, 2642 (const char *)buf + sizeof cmd + 2643 cmd.wr_count * cmd.wqe_size + 2644 sg_ind * sizeof (struct ib_sge), 2645 next->num_sge * sizeof (struct ib_sge))) { 2646 ret = -EFAULT; 2647 goto out_put; 2648 } 2649 sg_ind += next->num_sge; 2650 } else 2651 next->sg_list = NULL; 2652 } 2653 2654 resp.bad_wr = 0; 2655 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); 2656 if (ret) 2657 for (next = wr; next; next = next->next) { 2658 ++resp.bad_wr; 2659 if (next == bad_wr) 2660 break; 2661 } 2662 2663 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2664 &resp, sizeof resp)) 2665 ret = -EFAULT; 2666 2667out_put: 2668 put_qp_read(qp); 2669 2670 while (wr) { 2671 if (is_ud && ud_wr(wr)->ah) 2672 put_ah_read(ud_wr(wr)->ah); 2673 next = wr->next; 2674 kfree(wr); 2675 wr = next; 2676 } 2677 2678out: 2679 kfree(user_wr); 2680 2681 return ret ? ret : in_len; 2682} 2683 2684static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, 2685 int in_len, 2686 u32 wr_count, 2687 u32 sge_count, 2688 u32 wqe_size) 2689{ 2690 struct ib_uverbs_recv_wr *user_wr; 2691 struct ib_recv_wr *wr = NULL, *last, *next; 2692 int sg_ind; 2693 int i; 2694 int ret; 2695 2696 if (in_len < wqe_size * wr_count + 2697 sge_count * sizeof (struct ib_uverbs_sge)) 2698 return ERR_PTR(-EINVAL); 2699 2700 if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) 2701 return ERR_PTR(-EINVAL); 2702 2703 user_wr = kmalloc(wqe_size, GFP_KERNEL); 2704 if (!user_wr) 2705 return ERR_PTR(-ENOMEM); 2706 2707 sg_ind = 0; 2708 last = NULL; 2709 for (i = 0; i < wr_count; ++i) { 2710 if (copy_from_user(user_wr, buf + i * wqe_size, 2711 wqe_size)) { 2712 ret = -EFAULT; 2713 goto err; 2714 } 2715 2716 if (user_wr->num_sge + sg_ind > sge_count) { 2717 ret = -EINVAL; 2718 goto err; 2719 } 2720 2721 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + 2722 user_wr->num_sge * sizeof (struct ib_sge), 2723 GFP_KERNEL); 2724 if (!next) { 2725 ret = -ENOMEM; 2726 goto err; 2727 } 2728 2729 if (!last) 2730 wr = next; 2731 else 2732 last->next = next; 2733 last = next; 2734 2735 next->next = NULL; 2736 next->wr_id = user_wr->wr_id; 2737 next->num_sge = user_wr->num_sge; 2738 2739 if (next->num_sge) { 2740 next->sg_list = (void *)((char *)next + 2741 ALIGN(sizeof *next, sizeof (struct ib_sge))); 2742 if (copy_from_user(next->sg_list, 2743 (const char *)buf + wr_count * wqe_size + 2744 sg_ind * sizeof (struct ib_sge), 2745 next->num_sge * sizeof (struct ib_sge))) { 2746 ret = -EFAULT; 2747 goto err; 2748 } 2749 sg_ind += next->num_sge; 2750 } else 2751 next->sg_list = NULL; 2752 } 2753 2754 kfree(user_wr); 2755 return wr; 2756 2757err: 2758 kfree(user_wr); 2759 2760 while (wr) { 2761 next = wr->next; 2762 kfree(wr); 2763 wr = next; 2764 } 2765 2766 return ERR_PTR(ret); 2767} 2768 2769ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, 2770 struct ib_device *ib_dev, 2771 const char __user *buf, int in_len, 2772 int out_len) 2773{ 2774 struct ib_uverbs_post_recv cmd; 2775 struct ib_uverbs_post_recv_resp resp; 2776 struct ib_recv_wr *wr, *next, *bad_wr; 2777 struct ib_qp *qp; 2778 ssize_t ret = -EINVAL; 2779 2780 if (copy_from_user(&cmd, buf, sizeof cmd)) 2781 return -EFAULT; 2782 2783 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2784 in_len - sizeof cmd, cmd.wr_count, 2785 cmd.sge_count, cmd.wqe_size); 2786 if (IS_ERR(wr)) 2787 return PTR_ERR(wr); 2788 2789 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 2790 if (!qp) 2791 goto out; 2792 2793 resp.bad_wr = 0; 2794 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); 2795 2796 put_qp_read(qp); 2797 2798 if (ret) 2799 for (next = wr; next; next = next->next) { 2800 ++resp.bad_wr; 2801 if (next == bad_wr) 2802 break; 2803 } 2804 2805 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2806 &resp, sizeof resp)) 2807 ret = -EFAULT; 2808 2809out: 2810 while (wr) { 2811 next = wr->next; 2812 kfree(wr); 2813 wr = next; 2814 } 2815 2816 return ret ? ret : in_len; 2817} 2818 2819ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, 2820 struct ib_device *ib_dev, 2821 const char __user *buf, int in_len, 2822 int out_len) 2823{ 2824 struct ib_uverbs_post_srq_recv cmd; 2825 struct ib_uverbs_post_srq_recv_resp resp; 2826 struct ib_recv_wr *wr, *next, *bad_wr; 2827 struct ib_srq *srq; 2828 ssize_t ret = -EINVAL; 2829 2830 if (copy_from_user(&cmd, buf, sizeof cmd)) 2831 return -EFAULT; 2832 2833 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, 2834 in_len - sizeof cmd, cmd.wr_count, 2835 cmd.sge_count, cmd.wqe_size); 2836 if (IS_ERR(wr)) 2837 return PTR_ERR(wr); 2838 2839 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 2840 if (!srq) 2841 goto out; 2842 2843 resp.bad_wr = 0; 2844 ret = srq->device->post_srq_recv(srq, wr, &bad_wr); 2845 2846 put_srq_read(srq); 2847 2848 if (ret) 2849 for (next = wr; next; next = next->next) { 2850 ++resp.bad_wr; 2851 if (next == bad_wr) 2852 break; 2853 } 2854 2855 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2856 &resp, sizeof resp)) 2857 ret = -EFAULT; 2858 2859out: 2860 while (wr) { 2861 next = wr->next; 2862 kfree(wr); 2863 wr = next; 2864 } 2865 2866 return ret ? ret : in_len; 2867} 2868 2869ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, 2870 struct ib_device *ib_dev, 2871 const char __user *buf, int in_len, 2872 int out_len) 2873{ 2874 struct ib_uverbs_create_ah cmd; 2875 struct ib_uverbs_create_ah_resp resp; 2876 struct ib_uobject *uobj; 2877 struct ib_pd *pd; 2878 struct ib_ah *ah; 2879 struct ib_ah_attr attr; 2880 int ret; 2881 2882 if (out_len < sizeof resp) 2883 return -ENOSPC; 2884 2885 if (copy_from_user(&cmd, buf, sizeof cmd)) 2886 return -EFAULT; 2887 2888 uobj = kmalloc(sizeof *uobj, GFP_KERNEL); 2889 if (!uobj) 2890 return -ENOMEM; 2891 2892 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); 2893 down_write(&uobj->mutex); 2894 2895 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 2896 if (!pd) { 2897 ret = -EINVAL; 2898 goto err; 2899 } 2900 2901 attr.dlid = cmd.attr.dlid; 2902 attr.sl = cmd.attr.sl; 2903 attr.src_path_bits = cmd.attr.src_path_bits; 2904 attr.static_rate = cmd.attr.static_rate; 2905 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0; 2906 attr.port_num = cmd.attr.port_num; 2907 attr.grh.flow_label = cmd.attr.grh.flow_label; 2908 attr.grh.sgid_index = cmd.attr.grh.sgid_index; 2909 attr.grh.hop_limit = cmd.attr.grh.hop_limit; 2910 attr.grh.traffic_class = cmd.attr.grh.traffic_class; 2911 memset(&attr.dmac, 0, sizeof(attr.dmac)); 2912 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16); 2913 2914 ah = ib_create_ah(pd, &attr); 2915 if (IS_ERR(ah)) { 2916 ret = PTR_ERR(ah); 2917 goto err_put; 2918 } 2919 2920 ah->uobject = uobj; 2921 uobj->object = ah; 2922 2923 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj); 2924 if (ret) 2925 goto err_destroy; 2926 2927 resp.ah_handle = uobj->id; 2928 2929 if (copy_to_user((void __user *) (unsigned long) cmd.response, 2930 &resp, sizeof resp)) { 2931 ret = -EFAULT; 2932 goto err_copy; 2933 } 2934 2935 put_pd_read(pd); 2936 2937 mutex_lock(&file->mutex); 2938 list_add_tail(&uobj->list, &file->ucontext->ah_list); 2939 mutex_unlock(&file->mutex); 2940 2941 uobj->live = 1; 2942 2943 up_write(&uobj->mutex); 2944 2945 return in_len; 2946 2947err_copy: 2948 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2949 2950err_destroy: 2951 ib_destroy_ah(ah); 2952 2953err_put: 2954 put_pd_read(pd); 2955 2956err: 2957 put_uobj_write(uobj); 2958 return ret; 2959} 2960 2961ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, 2962 struct ib_device *ib_dev, 2963 const char __user *buf, int in_len, int out_len) 2964{ 2965 struct ib_uverbs_destroy_ah cmd; 2966 struct ib_ah *ah; 2967 struct ib_uobject *uobj; 2968 int ret; 2969 2970 if (copy_from_user(&cmd, buf, sizeof cmd)) 2971 return -EFAULT; 2972 2973 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext); 2974 if (!uobj) 2975 return -EINVAL; 2976 ah = uobj->object; 2977 2978 ret = ib_destroy_ah(ah); 2979 if (!ret) 2980 uobj->live = 0; 2981 2982 put_uobj_write(uobj); 2983 2984 if (ret) 2985 return ret; 2986 2987 idr_remove_uobj(&ib_uverbs_ah_idr, uobj); 2988 2989 mutex_lock(&file->mutex); 2990 list_del(&uobj->list); 2991 mutex_unlock(&file->mutex); 2992 2993 put_uobj(uobj); 2994 2995 return in_len; 2996} 2997 2998ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, 2999 struct ib_device *ib_dev, 3000 const char __user *buf, int in_len, 3001 int out_len) 3002{ 3003 struct ib_uverbs_attach_mcast cmd; 3004 struct ib_qp *qp; 3005 struct ib_uqp_object *obj; 3006 struct ib_uverbs_mcast_entry *mcast; 3007 int ret; 3008 3009 if (copy_from_user(&cmd, buf, sizeof cmd)) 3010 return -EFAULT; 3011 3012 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 3013 if (!qp) 3014 return -EINVAL; 3015 3016 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3017 3018 list_for_each_entry(mcast, &obj->mcast_list, list) 3019 if (cmd.mlid == mcast->lid && 3020 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3021 ret = 0; 3022 goto out_put; 3023 } 3024 3025 mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 3026 if (!mcast) { 3027 ret = -ENOMEM; 3028 goto out_put; 3029 } 3030 3031 mcast->lid = cmd.mlid; 3032 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 3033 3034 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 3035 if (!ret) 3036 list_add_tail(&mcast->list, &obj->mcast_list); 3037 else 3038 kfree(mcast); 3039 3040out_put: 3041 put_qp_write(qp); 3042 3043 return ret ? ret : in_len; 3044} 3045 3046ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, 3047 struct ib_device *ib_dev, 3048 const char __user *buf, int in_len, 3049 int out_len) 3050{ 3051 struct ib_uverbs_detach_mcast cmd; 3052 struct ib_uqp_object *obj; 3053 struct ib_qp *qp; 3054 struct ib_uverbs_mcast_entry *mcast; 3055 int ret = -EINVAL; 3056 3057 if (copy_from_user(&cmd, buf, sizeof cmd)) 3058 return -EFAULT; 3059 3060 qp = idr_write_qp(cmd.qp_handle, file->ucontext); 3061 if (!qp) 3062 return -EINVAL; 3063 3064 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 3065 if (ret) 3066 goto out_put; 3067 3068 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 3069 3070 list_for_each_entry(mcast, &obj->mcast_list, list) 3071 if (cmd.mlid == mcast->lid && 3072 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 3073 list_del(&mcast->list); 3074 kfree(mcast); 3075 break; 3076 } 3077 3078out_put: 3079 put_qp_write(qp); 3080 3081 return ret ? ret : in_len; 3082} 3083 3084static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec) 3085{ 3086 /* Returns user space filter size, includes padding */ 3087 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; 3088} 3089 3090static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size, 3091 u16 ib_real_filter_sz) 3092{ 3093 /* 3094 * User space filter structures must be 64 bit aligned, otherwise this 3095 * may pass, but we won't handle additional new attributes. 3096 */ 3097 3098 if (kern_filter_size > ib_real_filter_sz) { 3099 if (memchr_inv((char *)kern_spec_filter + 3100 ib_real_filter_sz, 0, 3101 kern_filter_size - ib_real_filter_sz)) 3102 return -EINVAL; 3103 return ib_real_filter_sz; 3104 } 3105 return kern_filter_size; 3106} 3107 3108static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec, 3109 union ib_flow_spec *ib_spec) 3110{ 3111 ssize_t actual_filter_sz; 3112 ssize_t kern_filter_sz; 3113 ssize_t ib_filter_sz; 3114 void *kern_spec_mask; 3115 void *kern_spec_val; 3116 3117 if (kern_spec->reserved) 3118 return -EINVAL; 3119 3120 ib_spec->type = kern_spec->type; 3121 3122 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); 3123 /* User flow spec size must be aligned to 4 bytes */ 3124 if (kern_filter_sz != ALIGN(kern_filter_sz, 4)) 3125 return -EINVAL; 3126 3127 kern_spec_val = (char *)kern_spec + 3128 sizeof(struct ib_uverbs_flow_spec_hdr); 3129 kern_spec_mask = (char *)kern_spec_val + kern_filter_sz; 3130 3131 switch (ib_spec->type) { 3132 case IB_FLOW_SPEC_ETH: 3133 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz); 3134 actual_filter_sz = spec_filter_size(kern_spec_mask, 3135 kern_filter_sz, 3136 ib_filter_sz); 3137 if (actual_filter_sz <= 0) 3138 return -EINVAL; 3139 ib_spec->size = sizeof(struct ib_flow_spec_eth); 3140 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz); 3141 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz); 3142 break; 3143 case IB_FLOW_SPEC_IPV4: 3144 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz); 3145 actual_filter_sz = spec_filter_size(kern_spec_mask, 3146 kern_filter_sz, 3147 ib_filter_sz); 3148 if (actual_filter_sz <= 0) 3149 return -EINVAL; 3150 ib_spec->size = sizeof(struct ib_flow_spec_ipv4); 3151 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz); 3152 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz); 3153 break; 3154 case IB_FLOW_SPEC_IPV6: 3155 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz); 3156 actual_filter_sz = spec_filter_size(kern_spec_mask, 3157 kern_filter_sz, 3158 ib_filter_sz); 3159 if (actual_filter_sz <= 0) 3160 return -EINVAL; 3161 ib_spec->size = sizeof(struct ib_flow_spec_ipv6); 3162 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz); 3163 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz); 3164 3165 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) || 3166 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20)) 3167 return -EINVAL; 3168 break; 3169 case IB_FLOW_SPEC_TCP: 3170 case IB_FLOW_SPEC_UDP: 3171 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz); 3172 actual_filter_sz = spec_filter_size(kern_spec_mask, 3173 kern_filter_sz, 3174 ib_filter_sz); 3175 if (actual_filter_sz <= 0) 3176 return -EINVAL; 3177 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp); 3178 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz); 3179 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz); 3180 break; 3181 default: 3182 return -EINVAL; 3183 } 3184 return 0; 3185} 3186 3187int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, 3188 struct ib_device *ib_dev, 3189 struct ib_udata *ucore, 3190 struct ib_udata *uhw) 3191{ 3192 struct ib_uverbs_ex_create_wq cmd = {}; 3193 struct ib_uverbs_ex_create_wq_resp resp = {}; 3194 struct ib_uwq_object *obj; 3195 int err = 0; 3196 struct ib_cq *cq; 3197 struct ib_pd *pd; 3198 struct ib_wq *wq; 3199 struct ib_wq_init_attr wq_init_attr = {}; 3200 size_t required_cmd_sz; 3201 size_t required_resp_len; 3202 3203 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); 3204 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); 3205 3206 if (ucore->inlen < required_cmd_sz) 3207 return -EINVAL; 3208 3209 if (ucore->outlen < required_resp_len) 3210 return -ENOSPC; 3211 3212 if (ucore->inlen > sizeof(cmd) && 3213 !ib_is_udata_cleared(ucore, sizeof(cmd), 3214 ucore->inlen - sizeof(cmd))) 3215 return -EOPNOTSUPP; 3216 3217 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3218 if (err) 3219 return err; 3220 3221 if (cmd.comp_mask) 3222 return -EOPNOTSUPP; 3223 3224 obj = kmalloc(sizeof(*obj), GFP_KERNEL); 3225 if (!obj) 3226 return -ENOMEM; 3227 3228 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, 3229 &wq_lock_class); 3230 down_write(&obj->uevent.uobject.mutex); 3231 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 3232 if (!pd) { 3233 err = -EINVAL; 3234 goto err_uobj; 3235 } 3236 3237 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); 3238 if (!cq) { 3239 err = -EINVAL; 3240 goto err_put_pd; 3241 } 3242 3243 wq_init_attr.cq = cq; 3244 wq_init_attr.max_sge = cmd.max_sge; 3245 wq_init_attr.max_wr = cmd.max_wr; 3246 wq_init_attr.wq_context = file; 3247 wq_init_attr.wq_type = cmd.wq_type; 3248 wq_init_attr.event_handler = ib_uverbs_wq_event_handler; 3249 obj->uevent.events_reported = 0; 3250 INIT_LIST_HEAD(&obj->uevent.event_list); 3251 wq = pd->device->create_wq(pd, &wq_init_attr, uhw); 3252 if (IS_ERR(wq)) { 3253 err = PTR_ERR(wq); 3254 goto err_put_cq; 3255 } 3256 3257 wq->uobject = &obj->uevent.uobject; 3258 obj->uevent.uobject.object = wq; 3259 wq->wq_type = wq_init_attr.wq_type; 3260 wq->cq = cq; 3261 wq->pd = pd; 3262 wq->device = pd->device; 3263 wq->wq_context = wq_init_attr.wq_context; 3264 atomic_set(&wq->usecnt, 0); 3265 atomic_inc(&pd->usecnt); 3266 atomic_inc(&cq->usecnt); 3267 wq->uobject = &obj->uevent.uobject; 3268 obj->uevent.uobject.object = wq; 3269 err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3270 if (err) 3271 goto destroy_wq; 3272 3273 memset(&resp, 0, sizeof(resp)); 3274 resp.wq_handle = obj->uevent.uobject.id; 3275 resp.max_sge = wq_init_attr.max_sge; 3276 resp.max_wr = wq_init_attr.max_wr; 3277 resp.wqn = wq->wq_num; 3278 resp.response_length = required_resp_len; 3279 err = ib_copy_to_udata(ucore, 3280 &resp, resp.response_length); 3281 if (err) 3282 goto err_copy; 3283 3284 put_pd_read(pd); 3285 put_cq_read(cq); 3286 3287 mutex_lock(&file->mutex); 3288 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list); 3289 mutex_unlock(&file->mutex); 3290 3291 obj->uevent.uobject.live = 1; 3292 up_write(&obj->uevent.uobject.mutex); 3293 return 0; 3294 3295err_copy: 3296 idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject); 3297destroy_wq: 3298 ib_destroy_wq(wq); 3299err_put_cq: 3300 put_cq_read(cq); 3301err_put_pd: 3302 put_pd_read(pd); 3303err_uobj: 3304 put_uobj_write(&obj->uevent.uobject); 3305 3306 return err; 3307} 3308 3309int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, 3310 struct ib_device *ib_dev, 3311 struct ib_udata *ucore, 3312 struct ib_udata *uhw) 3313{ 3314 struct ib_uverbs_ex_destroy_wq cmd = {}; 3315 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3316 struct ib_wq *wq; 3317 struct ib_uobject *uobj; 3318 struct ib_uwq_object *obj; 3319 size_t required_cmd_sz; 3320 size_t required_resp_len; 3321 int ret; 3322 3323 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); 3324 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 3325 3326 if (ucore->inlen < required_cmd_sz) 3327 return -EINVAL; 3328 3329 if (ucore->outlen < required_resp_len) 3330 return -ENOSPC; 3331 3332 if (ucore->inlen > sizeof(cmd) && 3333 !ib_is_udata_cleared(ucore, sizeof(cmd), 3334 ucore->inlen - sizeof(cmd))) 3335 return -EOPNOTSUPP; 3336 3337 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3338 if (ret) 3339 return ret; 3340 3341 if (cmd.comp_mask) 3342 return -EOPNOTSUPP; 3343 3344 resp.response_length = required_resp_len; 3345 uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle, 3346 file->ucontext); 3347 if (!uobj) 3348 return -EINVAL; 3349 3350 wq = uobj->object; 3351 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3352 ret = ib_destroy_wq(wq); 3353 if (!ret) 3354 uobj->live = 0; 3355 3356 put_uobj_write(uobj); 3357 if (ret) 3358 return ret; 3359 3360 idr_remove_uobj(&ib_uverbs_wq_idr, uobj); 3361 3362 mutex_lock(&file->mutex); 3363 list_del(&uobj->list); 3364 mutex_unlock(&file->mutex); 3365 3366 ib_uverbs_release_uevent(file, &obj->uevent); 3367 resp.events_reported = obj->uevent.events_reported; 3368 put_uobj(uobj); 3369 3370 ret = ib_copy_to_udata(ucore, &resp, resp.response_length); 3371 if (ret) 3372 return ret; 3373 3374 return 0; 3375} 3376 3377int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, 3378 struct ib_device *ib_dev, 3379 struct ib_udata *ucore, 3380 struct ib_udata *uhw) 3381{ 3382 struct ib_uverbs_ex_modify_wq cmd = {}; 3383 struct ib_wq *wq; 3384 struct ib_wq_attr wq_attr = {}; 3385 size_t required_cmd_sz; 3386 int ret; 3387 3388 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); 3389 if (ucore->inlen < required_cmd_sz) 3390 return -EINVAL; 3391 3392 if (ucore->inlen > sizeof(cmd) && 3393 !ib_is_udata_cleared(ucore, sizeof(cmd), 3394 ucore->inlen - sizeof(cmd))) 3395 return -EOPNOTSUPP; 3396 3397 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3398 if (ret) 3399 return ret; 3400 3401 if (!cmd.attr_mask) 3402 return -EINVAL; 3403 3404 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE)) 3405 return -EINVAL; 3406 3407 wq = idr_read_wq(cmd.wq_handle, file->ucontext); 3408 if (!wq) 3409 return -EINVAL; 3410 3411 wq_attr.curr_wq_state = cmd.curr_wq_state; 3412 wq_attr.wq_state = cmd.wq_state; 3413 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); 3414 put_wq_read(wq); 3415 return ret; 3416} 3417 3418int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, 3419 struct ib_device *ib_dev, 3420 struct ib_udata *ucore, 3421 struct ib_udata *uhw) 3422{ 3423 struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; 3424 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; 3425 struct ib_uobject *uobj; 3426 int err = 0; 3427 struct ib_rwq_ind_table_init_attr init_attr = {}; 3428 struct ib_rwq_ind_table *rwq_ind_tbl; 3429 struct ib_wq **wqs = NULL; 3430 u32 *wqs_handles = NULL; 3431 struct ib_wq *wq = NULL; 3432 int i, j, num_read_wqs; 3433 u32 num_wq_handles; 3434 u32 expected_in_size; 3435 size_t required_cmd_sz_header; 3436 size_t required_resp_len; 3437 3438 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); 3439 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); 3440 3441 if (ucore->inlen < required_cmd_sz_header) 3442 return -EINVAL; 3443 3444 if (ucore->outlen < required_resp_len) 3445 return -ENOSPC; 3446 3447 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); 3448 if (err) 3449 return err; 3450 3451 ucore->inbuf = (const char *)ucore->inbuf + required_cmd_sz_header; 3452 ucore->inlen -= required_cmd_sz_header; 3453 3454 if (cmd.comp_mask) 3455 return -EOPNOTSUPP; 3456 3457 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE) 3458 return -EINVAL; 3459 3460 num_wq_handles = 1 << cmd.log_ind_tbl_size; 3461 expected_in_size = num_wq_handles * sizeof(__u32); 3462 if (num_wq_handles == 1) 3463 /* input size for wq handles is u64 aligned */ 3464 expected_in_size += sizeof(__u32); 3465 3466 if (ucore->inlen < expected_in_size) 3467 return -EINVAL; 3468 3469 if (ucore->inlen > expected_in_size && 3470 !ib_is_udata_cleared(ucore, expected_in_size, 3471 ucore->inlen - expected_in_size)) 3472 return -EOPNOTSUPP; 3473 3474 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), 3475 GFP_KERNEL); 3476 if (!wqs_handles) 3477 return -ENOMEM; 3478 3479 err = ib_copy_from_udata(wqs_handles, ucore, 3480 num_wq_handles * sizeof(__u32)); 3481 if (err) 3482 goto err_free; 3483 3484 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL); 3485 if (!wqs) { 3486 err = -ENOMEM; 3487 goto err_free; 3488 } 3489 3490 for (num_read_wqs = 0; num_read_wqs < num_wq_handles; 3491 num_read_wqs++) { 3492 wq = idr_read_wq(wqs_handles[num_read_wqs], file->ucontext); 3493 if (!wq) { 3494 err = -EINVAL; 3495 goto put_wqs; 3496 } 3497 3498 wqs[num_read_wqs] = wq; 3499 } 3500 3501 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3502 if (!uobj) { 3503 err = -ENOMEM; 3504 goto put_wqs; 3505 } 3506 3507 init_uobj(uobj, 0, file->ucontext, &rwq_ind_table_lock_class); 3508 down_write(&uobj->mutex); 3509 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; 3510 init_attr.ind_tbl = wqs; 3511 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); 3512 3513 if (IS_ERR(rwq_ind_tbl)) { 3514 err = PTR_ERR(rwq_ind_tbl); 3515 goto err_uobj; 3516 } 3517 3518 rwq_ind_tbl->ind_tbl = wqs; 3519 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size; 3520 rwq_ind_tbl->uobject = uobj; 3521 uobj->object = rwq_ind_tbl; 3522 rwq_ind_tbl->device = ib_dev; 3523 atomic_set(&rwq_ind_tbl->usecnt, 0); 3524 3525 for (i = 0; i < num_wq_handles; i++) 3526 atomic_inc(&wqs[i]->usecnt); 3527 3528 err = idr_add_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3529 if (err) 3530 goto destroy_ind_tbl; 3531 3532 resp.ind_tbl_handle = uobj->id; 3533 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; 3534 resp.response_length = required_resp_len; 3535 3536 err = ib_copy_to_udata(ucore, 3537 &resp, resp.response_length); 3538 if (err) 3539 goto err_copy; 3540 3541 kfree(wqs_handles); 3542 3543 for (j = 0; j < num_read_wqs; j++) 3544 put_wq_read(wqs[j]); 3545 3546 mutex_lock(&file->mutex); 3547 list_add_tail(&uobj->list, &file->ucontext->rwq_ind_tbl_list); 3548 mutex_unlock(&file->mutex); 3549 3550 uobj->live = 1; 3551 3552 up_write(&uobj->mutex); 3553 return 0; 3554 3555err_copy: 3556 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3557destroy_ind_tbl: 3558 ib_destroy_rwq_ind_table(rwq_ind_tbl); 3559err_uobj: 3560 put_uobj_write(uobj); 3561put_wqs: 3562 for (j = 0; j < num_read_wqs; j++) 3563 put_wq_read(wqs[j]); 3564err_free: 3565 kfree(wqs_handles); 3566 kfree(wqs); 3567 return err; 3568} 3569 3570int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, 3571 struct ib_device *ib_dev, 3572 struct ib_udata *ucore, 3573 struct ib_udata *uhw) 3574{ 3575 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; 3576 struct ib_rwq_ind_table *rwq_ind_tbl; 3577 struct ib_uobject *uobj; 3578 int ret; 3579 struct ib_wq **ind_tbl; 3580 size_t required_cmd_sz; 3581 3582 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); 3583 3584 if (ucore->inlen < required_cmd_sz) 3585 return -EINVAL; 3586 3587 if (ucore->inlen > sizeof(cmd) && 3588 !ib_is_udata_cleared(ucore, sizeof(cmd), 3589 ucore->inlen - sizeof(cmd))) 3590 return -EOPNOTSUPP; 3591 3592 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); 3593 if (ret) 3594 return ret; 3595 3596 if (cmd.comp_mask) 3597 return -EOPNOTSUPP; 3598 3599 uobj = idr_write_uobj(&ib_uverbs_rwq_ind_tbl_idr, cmd.ind_tbl_handle, 3600 file->ucontext); 3601 if (!uobj) 3602 return -EINVAL; 3603 rwq_ind_tbl = uobj->object; 3604 ind_tbl = rwq_ind_tbl->ind_tbl; 3605 3606 ret = ib_destroy_rwq_ind_table(rwq_ind_tbl); 3607 if (!ret) 3608 uobj->live = 0; 3609 3610 put_uobj_write(uobj); 3611 3612 if (ret) 3613 return ret; 3614 3615 idr_remove_uobj(&ib_uverbs_rwq_ind_tbl_idr, uobj); 3616 3617 mutex_lock(&file->mutex); 3618 list_del(&uobj->list); 3619 mutex_unlock(&file->mutex); 3620 3621 put_uobj(uobj); 3622 kfree(ind_tbl); 3623 return ret; 3624} 3625 3626int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, 3627 struct ib_device *ib_dev, 3628 struct ib_udata *ucore, 3629 struct ib_udata *uhw) 3630{ 3631 struct ib_uverbs_create_flow cmd; 3632 struct ib_uverbs_create_flow_resp resp; 3633 struct ib_uobject *uobj; 3634 struct ib_flow *flow_id; 3635 struct ib_uverbs_flow_attr *kern_flow_attr; 3636 struct ib_flow_attr *flow_attr; 3637 struct ib_qp *qp; 3638 int err = 0; 3639 void *kern_spec; 3640 void *ib_spec; 3641 int i; 3642 3643 if (ucore->inlen < sizeof(cmd)) 3644 return -EINVAL; 3645 3646 if (ucore->outlen < sizeof(resp)) 3647 return -ENOSPC; 3648 3649 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3650 if (err) 3651 return err; 3652 3653 ucore->inbuf = (const char *)ucore->inbuf + sizeof(cmd); 3654 ucore->inlen -= sizeof(cmd); 3655 3656 if (cmd.comp_mask) 3657 return -EINVAL; 3658 3659 if (priv_check(curthread, PRIV_NET_RAW) != 0) 3660 return -EPERM; 3661 3662 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED) 3663 return -EINVAL; 3664 3665 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && 3666 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) || 3667 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT))) 3668 return -EINVAL; 3669 3670 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) 3671 return -EINVAL; 3672 3673 if (cmd.flow_attr.size > ucore->inlen || 3674 cmd.flow_attr.size > 3675 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) 3676 return -EINVAL; 3677 3678 if (cmd.flow_attr.reserved[0] || 3679 cmd.flow_attr.reserved[1]) 3680 return -EINVAL; 3681 3682 if (cmd.flow_attr.num_of_specs) { 3683 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size, 3684 GFP_KERNEL); 3685 if (!kern_flow_attr) 3686 return -ENOMEM; 3687 3688 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3689 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3690 cmd.flow_attr.size); 3691 if (err) 3692 goto err_free_attr; 3693 } else { 3694 kern_flow_attr = &cmd.flow_attr; 3695 } 3696 3697 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL); 3698 if (!uobj) { 3699 err = -ENOMEM; 3700 goto err_free_attr; 3701 } 3702 init_uobj(uobj, 0, file->ucontext, &rule_lock_class); 3703 down_write(&uobj->mutex); 3704 3705 qp = idr_read_qp(cmd.qp_handle, file->ucontext); 3706 if (!qp) { 3707 err = -EINVAL; 3708 goto err_uobj; 3709 } 3710 3711 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * 3712 sizeof(union ib_flow_spec), GFP_KERNEL); 3713 if (!flow_attr) { 3714 err = -ENOMEM; 3715 goto err_put; 3716 } 3717 3718 flow_attr->type = kern_flow_attr->type; 3719 flow_attr->priority = kern_flow_attr->priority; 3720 flow_attr->num_of_specs = kern_flow_attr->num_of_specs; 3721 flow_attr->port = kern_flow_attr->port; 3722 flow_attr->flags = kern_flow_attr->flags; 3723 flow_attr->size = sizeof(*flow_attr); 3724 3725 kern_spec = kern_flow_attr + 1; 3726 ib_spec = flow_attr + 1; 3727 for (i = 0; i < flow_attr->num_of_specs && 3728 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3729 cmd.flow_attr.size >= 3730 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3731 err = kern_spec_to_ib_spec(kern_spec, ib_spec); 3732 if (err) 3733 goto err_free; 3734 flow_attr->size += 3735 ((union ib_flow_spec *) ib_spec)->size; 3736 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3737 kern_spec = (char *)kern_spec + ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3738 ib_spec = (char *)ib_spec + ((union ib_flow_spec *)ib_spec)->size; 3739 } 3740 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3741 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n", 3742 i, cmd.flow_attr.size); 3743 err = -EINVAL; 3744 goto err_free; 3745 } 3746 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); 3747 if (IS_ERR(flow_id)) { 3748 err = PTR_ERR(flow_id); 3749 goto err_free; 3750 } 3751 flow_id->qp = qp; 3752 flow_id->uobject = uobj; 3753 uobj->object = flow_id; 3754 3755 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj); 3756 if (err) 3757 goto destroy_flow; 3758 3759 memset(&resp, 0, sizeof(resp)); 3760 resp.flow_handle = uobj->id; 3761 3762 err = ib_copy_to_udata(ucore, 3763 &resp, sizeof(resp)); 3764 if (err) 3765 goto err_copy; 3766 3767 put_qp_read(qp); 3768 mutex_lock(&file->mutex); 3769 list_add_tail(&uobj->list, &file->ucontext->rule_list); 3770 mutex_unlock(&file->mutex); 3771 3772 uobj->live = 1; 3773 3774 up_write(&uobj->mutex); 3775 kfree(flow_attr); 3776 if (cmd.flow_attr.num_of_specs) 3777 kfree(kern_flow_attr); 3778 return 0; 3779err_copy: 3780 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3781destroy_flow: 3782 ib_destroy_flow(flow_id); 3783err_free: 3784 kfree(flow_attr); 3785err_put: 3786 put_qp_read(qp); 3787err_uobj: 3788 put_uobj_write(uobj); 3789err_free_attr: 3790 if (cmd.flow_attr.num_of_specs) 3791 kfree(kern_flow_attr); 3792 return err; 3793} 3794 3795int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, 3796 struct ib_device *ib_dev, 3797 struct ib_udata *ucore, 3798 struct ib_udata *uhw) 3799{ 3800 struct ib_uverbs_destroy_flow cmd; 3801 struct ib_flow *flow_id; 3802 struct ib_uobject *uobj; 3803 int ret; 3804 3805 if (ucore->inlen < sizeof(cmd)) 3806 return -EINVAL; 3807 3808 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3809 if (ret) 3810 return ret; 3811 3812 if (cmd.comp_mask) 3813 return -EINVAL; 3814 3815 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle, 3816 file->ucontext); 3817 if (!uobj) 3818 return -EINVAL; 3819 flow_id = uobj->object; 3820 3821 ret = ib_destroy_flow(flow_id); 3822 if (!ret) 3823 uobj->live = 0; 3824 3825 put_uobj_write(uobj); 3826 3827 idr_remove_uobj(&ib_uverbs_rule_idr, uobj); 3828 3829 mutex_lock(&file->mutex); 3830 list_del(&uobj->list); 3831 mutex_unlock(&file->mutex); 3832 3833 put_uobj(uobj); 3834 3835 return ret; 3836} 3837 3838static int __uverbs_create_xsrq(struct ib_uverbs_file *file, 3839 struct ib_device *ib_dev, 3840 struct ib_uverbs_create_xsrq *cmd, 3841 struct ib_udata *udata) 3842{ 3843 struct ib_uverbs_create_srq_resp resp; 3844 struct ib_usrq_object *obj; 3845 struct ib_pd *pd; 3846 struct ib_srq *srq; 3847 struct ib_uobject *uninitialized_var(xrcd_uobj); 3848 struct ib_srq_init_attr attr; 3849 int ret; 3850 3851 obj = kmalloc(sizeof *obj, GFP_KERNEL); 3852 if (!obj) 3853 return -ENOMEM; 3854 3855 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); 3856 down_write(&obj->uevent.uobject.mutex); 3857 3858 if (cmd->srq_type == IB_SRQT_XRC) { 3859 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); 3860 if (!attr.ext.xrc.xrcd) { 3861 ret = -EINVAL; 3862 goto err; 3863 } 3864 3865 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); 3866 atomic_inc(&obj->uxrcd->refcnt); 3867 3868 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); 3869 if (!attr.ext.xrc.cq) { 3870 ret = -EINVAL; 3871 goto err_put_xrcd; 3872 } 3873 } 3874 3875 pd = idr_read_pd(cmd->pd_handle, file->ucontext); 3876 if (!pd) { 3877 ret = -EINVAL; 3878 goto err_put_cq; 3879 } 3880 3881 attr.event_handler = ib_uverbs_srq_event_handler; 3882 attr.srq_context = file; 3883 attr.srq_type = cmd->srq_type; 3884 attr.attr.max_wr = cmd->max_wr; 3885 attr.attr.max_sge = cmd->max_sge; 3886 attr.attr.srq_limit = cmd->srq_limit; 3887 3888 obj->uevent.events_reported = 0; 3889 INIT_LIST_HEAD(&obj->uevent.event_list); 3890 3891 srq = pd->device->create_srq(pd, &attr, udata); 3892 if (IS_ERR(srq)) { 3893 ret = PTR_ERR(srq); 3894 goto err_put; 3895 } 3896 3897 srq->device = pd->device; 3898 srq->pd = pd; 3899 srq->srq_type = cmd->srq_type; 3900 srq->uobject = &obj->uevent.uobject; 3901 srq->event_handler = attr.event_handler; 3902 srq->srq_context = attr.srq_context; 3903 3904 if (cmd->srq_type == IB_SRQT_XRC) { 3905 srq->ext.xrc.cq = attr.ext.xrc.cq; 3906 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; 3907 atomic_inc(&attr.ext.xrc.cq->usecnt); 3908 atomic_inc(&attr.ext.xrc.xrcd->usecnt); 3909 } 3910 3911 atomic_inc(&pd->usecnt); 3912 atomic_set(&srq->usecnt, 0); 3913 3914 obj->uevent.uobject.object = srq; 3915 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3916 if (ret) 3917 goto err_destroy; 3918 3919 memset(&resp, 0, sizeof resp); 3920 resp.srq_handle = obj->uevent.uobject.id; 3921 resp.max_wr = attr.attr.max_wr; 3922 resp.max_sge = attr.attr.max_sge; 3923 if (cmd->srq_type == IB_SRQT_XRC) 3924 resp.srqn = srq->ext.xrc.srq_num; 3925 3926 if (copy_to_user((void __user *) (unsigned long) cmd->response, 3927 &resp, sizeof resp)) { 3928 ret = -EFAULT; 3929 goto err_copy; 3930 } 3931 3932 if (cmd->srq_type == IB_SRQT_XRC) { 3933 put_uobj_read(xrcd_uobj); 3934 put_cq_read(attr.ext.xrc.cq); 3935 } 3936 put_pd_read(pd); 3937 3938 mutex_lock(&file->mutex); 3939 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); 3940 mutex_unlock(&file->mutex); 3941 3942 obj->uevent.uobject.live = 1; 3943 3944 up_write(&obj->uevent.uobject.mutex); 3945 3946 return 0; 3947 3948err_copy: 3949 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); 3950 3951err_destroy: 3952 ib_destroy_srq(srq); 3953 3954err_put: 3955 put_pd_read(pd); 3956 3957err_put_cq: 3958 if (cmd->srq_type == IB_SRQT_XRC) 3959 put_cq_read(attr.ext.xrc.cq); 3960 3961err_put_xrcd: 3962 if (cmd->srq_type == IB_SRQT_XRC) { 3963 atomic_dec(&obj->uxrcd->refcnt); 3964 put_uobj_read(xrcd_uobj); 3965 } 3966 3967err: 3968 put_uobj_write(&obj->uevent.uobject); 3969 return ret; 3970} 3971 3972ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, 3973 struct ib_device *ib_dev, 3974 const char __user *buf, int in_len, 3975 int out_len) 3976{ 3977 struct ib_uverbs_create_srq cmd; 3978 struct ib_uverbs_create_xsrq xcmd; 3979 struct ib_uverbs_create_srq_resp resp; 3980 struct ib_udata udata; 3981 int ret; 3982 3983 if (out_len < sizeof resp) 3984 return -ENOSPC; 3985 3986 if (copy_from_user(&cmd, buf, sizeof cmd)) 3987 return -EFAULT; 3988 3989 xcmd.response = cmd.response; 3990 xcmd.user_handle = cmd.user_handle; 3991 xcmd.srq_type = IB_SRQT_BASIC; 3992 xcmd.pd_handle = cmd.pd_handle; 3993 xcmd.max_wr = cmd.max_wr; 3994 xcmd.max_sge = cmd.max_sge; 3995 xcmd.srq_limit = cmd.srq_limit; 3996 3997 INIT_UDATA(&udata, buf + sizeof cmd, 3998 (unsigned long) cmd.response + sizeof resp, 3999 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 4000 out_len - sizeof resp); 4001 4002 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 4003 if (ret) 4004 return ret; 4005 4006 return in_len; 4007} 4008 4009ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, 4010 struct ib_device *ib_dev, 4011 const char __user *buf, int in_len, int out_len) 4012{ 4013 struct ib_uverbs_create_xsrq cmd; 4014 struct ib_uverbs_create_srq_resp resp; 4015 struct ib_udata udata; 4016 int ret; 4017 4018 if (out_len < sizeof resp) 4019 return -ENOSPC; 4020 4021 if (copy_from_user(&cmd, buf, sizeof cmd)) 4022 return -EFAULT; 4023 4024 INIT_UDATA(&udata, buf + sizeof cmd, 4025 (unsigned long) cmd.response + sizeof resp, 4026 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr), 4027 out_len - sizeof resp); 4028 4029 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 4030 if (ret) 4031 return ret; 4032 4033 return in_len; 4034} 4035 4036ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, 4037 struct ib_device *ib_dev, 4038 const char __user *buf, int in_len, 4039 int out_len) 4040{ 4041 struct ib_uverbs_modify_srq cmd; 4042 struct ib_udata udata; 4043 struct ib_srq *srq; 4044 struct ib_srq_attr attr; 4045 int ret; 4046 4047 if (copy_from_user(&cmd, buf, sizeof cmd)) 4048 return -EFAULT; 4049 4050 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, 4051 out_len); 4052 4053 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 4054 if (!srq) 4055 return -EINVAL; 4056 4057 attr.max_wr = cmd.max_wr; 4058 attr.srq_limit = cmd.srq_limit; 4059 4060 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); 4061 4062 put_srq_read(srq); 4063 4064 return ret ? ret : in_len; 4065} 4066 4067ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, 4068 struct ib_device *ib_dev, 4069 const char __user *buf, 4070 int in_len, int out_len) 4071{ 4072 struct ib_uverbs_query_srq cmd; 4073 struct ib_uverbs_query_srq_resp resp; 4074 struct ib_srq_attr attr; 4075 struct ib_srq *srq; 4076 int ret; 4077 4078 if (out_len < sizeof resp) 4079 return -ENOSPC; 4080 4081 if (copy_from_user(&cmd, buf, sizeof cmd)) 4082 return -EFAULT; 4083 4084 srq = idr_read_srq(cmd.srq_handle, file->ucontext); 4085 if (!srq) 4086 return -EINVAL; 4087 4088 ret = ib_query_srq(srq, &attr); 4089 4090 put_srq_read(srq); 4091 4092 if (ret) 4093 return ret; 4094 4095 memset(&resp, 0, sizeof resp); 4096 4097 resp.max_wr = attr.max_wr; 4098 resp.max_sge = attr.max_sge; 4099 resp.srq_limit = attr.srq_limit; 4100 4101 if (copy_to_user((void __user *) (unsigned long) cmd.response, 4102 &resp, sizeof resp)) 4103 return -EFAULT; 4104 4105 return in_len; 4106} 4107 4108ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, 4109 struct ib_device *ib_dev, 4110 const char __user *buf, int in_len, 4111 int out_len) 4112{ 4113 struct ib_uverbs_destroy_srq cmd; 4114 struct ib_uverbs_destroy_srq_resp resp; 4115 struct ib_uobject *uobj; 4116 struct ib_srq *srq; 4117 struct ib_uevent_object *obj; 4118 int ret = -EINVAL; 4119 struct ib_usrq_object *us; 4120 enum ib_srq_type srq_type; 4121 4122 if (copy_from_user(&cmd, buf, sizeof cmd)) 4123 return -EFAULT; 4124 4125 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext); 4126 if (!uobj) 4127 return -EINVAL; 4128 srq = uobj->object; 4129 obj = container_of(uobj, struct ib_uevent_object, uobject); 4130 srq_type = srq->srq_type; 4131 4132 ret = ib_destroy_srq(srq); 4133 if (!ret) 4134 uobj->live = 0; 4135 4136 put_uobj_write(uobj); 4137 4138 if (ret) 4139 return ret; 4140 4141 if (srq_type == IB_SRQT_XRC) { 4142 us = container_of(obj, struct ib_usrq_object, uevent); 4143 atomic_dec(&us->uxrcd->refcnt); 4144 } 4145 4146 idr_remove_uobj(&ib_uverbs_srq_idr, uobj); 4147 4148 mutex_lock(&file->mutex); 4149 list_del(&uobj->list); 4150 mutex_unlock(&file->mutex); 4151 4152 ib_uverbs_release_uevent(file, obj); 4153 4154 memset(&resp, 0, sizeof resp); 4155 resp.events_reported = obj->events_reported; 4156 4157 put_uobj(uobj); 4158 4159 if (copy_to_user((void __user *) (unsigned long) cmd.response, 4160 &resp, sizeof resp)) 4161 ret = -EFAULT; 4162 4163 return ret ? ret : in_len; 4164} 4165 4166int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 4167 struct ib_device *ib_dev, 4168 struct ib_udata *ucore, 4169 struct ib_udata *uhw) 4170{ 4171 struct ib_uverbs_ex_query_device_resp resp = { {0} }; 4172 struct ib_uverbs_ex_query_device cmd; 4173 struct ib_device_attr attr = {0}; 4174 int err; 4175 4176 if (ucore->inlen < sizeof(cmd)) 4177 return -EINVAL; 4178 4179 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 4180 if (err) 4181 return err; 4182 4183 if (cmd.comp_mask) 4184 return -EINVAL; 4185 4186 if (cmd.reserved) 4187 return -EINVAL; 4188 4189 resp.response_length = offsetof(typeof(resp), odp_caps); 4190 4191 if (ucore->outlen < resp.response_length) 4192 return -ENOSPC; 4193 4194 err = ib_dev->query_device(ib_dev, &attr, uhw); 4195 if (err) 4196 return err; 4197 4198 copy_query_dev_fields(file, ib_dev, &resp.base, &attr); 4199 4200 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 4201 goto end; 4202 4203#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 4204 resp.odp_caps.general_caps = attr.odp_caps.general_caps; 4205 resp.odp_caps.per_transport_caps.rc_odp_caps = 4206 attr.odp_caps.per_transport_caps.rc_odp_caps; 4207 resp.odp_caps.per_transport_caps.uc_odp_caps = 4208 attr.odp_caps.per_transport_caps.uc_odp_caps; 4209 resp.odp_caps.per_transport_caps.ud_odp_caps = 4210 attr.odp_caps.per_transport_caps.ud_odp_caps; 4211#endif 4212 resp.response_length += sizeof(resp.odp_caps); 4213 4214 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) 4215 goto end; 4216 4217 resp.timestamp_mask = attr.timestamp_mask; 4218 resp.response_length += sizeof(resp.timestamp_mask); 4219 4220 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) 4221 goto end; 4222 4223 resp.hca_core_clock = attr.hca_core_clock; 4224 resp.response_length += sizeof(resp.hca_core_clock); 4225 4226 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) 4227 goto end; 4228 4229 resp.device_cap_flags_ex = attr.device_cap_flags; 4230 resp.response_length += sizeof(resp.device_cap_flags_ex); 4231 4232 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) 4233 goto end; 4234 4235 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 4236 resp.rss_caps.max_rwq_indirection_tables = 4237 attr.rss_caps.max_rwq_indirection_tables; 4238 resp.rss_caps.max_rwq_indirection_table_size = 4239 attr.rss_caps.max_rwq_indirection_table_size; 4240 4241 resp.response_length += sizeof(resp.rss_caps); 4242 4243 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) 4244 goto end; 4245 4246 resp.max_wq_type_rq = attr.max_wq_type_rq; 4247 resp.response_length += sizeof(resp.max_wq_type_rq); 4248end: 4249 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 4250 return err; 4251} 4252