1219820Sjeff/* 2219820Sjeff * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3219820Sjeff * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4219820Sjeff * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 5219820Sjeff * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 6219820Sjeff * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7219820Sjeff * 8219820Sjeff * This software is available to you under a choice of one of two 9219820Sjeff * licenses. You may choose to be licensed under the terms of the GNU 10219820Sjeff * General Public License (GPL) Version 2, available from the file 11219820Sjeff * COPYING in the main directory of this source tree, or the 12219820Sjeff * OpenIB.org BSD license below: 13219820Sjeff * 14219820Sjeff * Redistribution and use in source and binary forms, with or 15219820Sjeff * without modification, are permitted provided that the following 16219820Sjeff * conditions are met: 17219820Sjeff * 18219820Sjeff * - Redistributions of source code must retain the above 19219820Sjeff * copyright notice, this list of conditions and the following 20219820Sjeff * disclaimer. 21219820Sjeff * 22219820Sjeff * - Redistributions in binary form must reproduce the above 23219820Sjeff * copyright notice, this list of conditions and the following 24219820Sjeff * disclaimer in the documentation and/or other materials 25219820Sjeff * provided with the distribution. 26219820Sjeff * 27219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34219820Sjeff * SOFTWARE. 35219820Sjeff */ 36219820Sjeff 37219820Sjeff#include <rdma/ib_smi.h> 38219820Sjeff#include <rdma/ib_umem.h> 39219820Sjeff#include <rdma/ib_user_verbs.h> 40219820Sjeff 41219820Sjeff#include <linux/sched.h> 42219820Sjeff#include <linux/mm.h> 43219820Sjeff 44219820Sjeff#include "mthca_dev.h" 45219820Sjeff#include "mthca_cmd.h" 46219820Sjeff#include "mthca_user.h" 47219820Sjeff#include "mthca_memfree.h" 48219820Sjeff 49219820Sjeffstatic void init_query_mad(struct ib_smp *mad) 50219820Sjeff{ 51219820Sjeff mad->base_version = 1; 52219820Sjeff mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 53219820Sjeff mad->class_version = 1; 54219820Sjeff mad->method = IB_MGMT_METHOD_GET; 55219820Sjeff} 56219820Sjeff 57219820Sjeffstatic int mthca_query_device(struct ib_device *ibdev, 58219820Sjeff struct ib_device_attr *props) 59219820Sjeff{ 60219820Sjeff struct ib_smp *in_mad = NULL; 61219820Sjeff struct ib_smp *out_mad = NULL; 62219820Sjeff int err = -ENOMEM; 63219820Sjeff struct mthca_dev *mdev = to_mdev(ibdev); 64219820Sjeff 65219820Sjeff u8 status; 66219820Sjeff 67219820Sjeff in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 68219820Sjeff out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 69219820Sjeff if (!in_mad || !out_mad) 70219820Sjeff goto out; 71219820Sjeff 72219820Sjeff memset(props, 0, sizeof *props); 73219820Sjeff 74219820Sjeff props->fw_ver = mdev->fw_ver; 75219820Sjeff 76219820Sjeff init_query_mad(in_mad); 77219820Sjeff in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 78219820Sjeff 79219820Sjeff err = mthca_MAD_IFC(mdev, 1, 1, 80219820Sjeff 1, NULL, NULL, in_mad, out_mad, 81219820Sjeff &status); 82219820Sjeff if (err) 83219820Sjeff goto out; 84219820Sjeff if (status) { 85219820Sjeff err = -EINVAL; 86219820Sjeff goto out; 87219820Sjeff } 88219820Sjeff 89219820Sjeff props->device_cap_flags = mdev->device_cap_flags; 90219820Sjeff props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 91219820Sjeff 0xffffff; 92219820Sjeff props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 93219820Sjeff props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); 94219820Sjeff memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 95219820Sjeff 96219820Sjeff props->max_mr_size = ~0ull; 97219820Sjeff props->page_size_cap = mdev->limits.page_size_cap; 98219820Sjeff props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; 99219820Sjeff props->max_qp_wr = mdev->limits.max_wqes; 100219820Sjeff props->max_sge = mdev->limits.max_sg; 101219820Sjeff props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; 102219820Sjeff props->max_cqe = mdev->limits.max_cqes; 103219820Sjeff props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; 104219820Sjeff props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; 105219820Sjeff props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; 106219820Sjeff props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma; 107219820Sjeff props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 108219820Sjeff props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; 109219820Sjeff props->max_srq_wr = mdev->limits.max_srq_wqes; 110219820Sjeff props->max_srq_sge = mdev->limits.max_srq_sge; 111219820Sjeff props->local_ca_ack_delay = mdev->limits.local_ca_ack_delay; 112219820Sjeff props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? 113219820Sjeff IB_ATOMIC_HCA : IB_ATOMIC_NONE; 114219820Sjeff props->max_pkeys = mdev->limits.pkey_table_len; 115219820Sjeff props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms; 116219820Sjeff props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; 117219820Sjeff props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 118219820Sjeff props->max_mcast_grp; 119219820Sjeff /* 120219820Sjeff * If Sinai memory key optimization is being used, then only 121219820Sjeff * the 8-bit key portion will change. For other HCAs, the 122219820Sjeff * unused index bits will also be used for FMR remapping. 123219820Sjeff */ 124219820Sjeff if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) 125219820Sjeff props->max_map_per_fmr = 255; 126219820Sjeff else 127219820Sjeff props->max_map_per_fmr = 128219820Sjeff (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1; 129219820Sjeff 130219820Sjeff err = 0; 131219820Sjeff out: 132219820Sjeff kfree(in_mad); 133219820Sjeff kfree(out_mad); 134219820Sjeff return err; 135219820Sjeff} 136219820Sjeff 137219820Sjeffstatic int mthca_query_port(struct ib_device *ibdev, 138219820Sjeff u8 port, struct ib_port_attr *props) 139219820Sjeff{ 140219820Sjeff struct ib_smp *in_mad = NULL; 141219820Sjeff struct ib_smp *out_mad = NULL; 142219820Sjeff int err = -ENOMEM; 143219820Sjeff u8 status; 144219820Sjeff 145219820Sjeff in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 146219820Sjeff out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 147219820Sjeff if (!in_mad || !out_mad) 148219820Sjeff goto out; 149219820Sjeff 150219820Sjeff memset(props, 0, sizeof *props); 151219820Sjeff 152219820Sjeff init_query_mad(in_mad); 153219820Sjeff in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 154219820Sjeff in_mad->attr_mod = cpu_to_be32(port); 155219820Sjeff 156219820Sjeff err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 157219820Sjeff port, NULL, NULL, in_mad, out_mad, 158219820Sjeff &status); 159219820Sjeff if (err) 160219820Sjeff goto out; 161219820Sjeff if (status) { 162219820Sjeff err = -EINVAL; 163219820Sjeff goto out; 164219820Sjeff } 165219820Sjeff 166219820Sjeff props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 167219820Sjeff props->lmc = out_mad->data[34] & 0x7; 168219820Sjeff props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); 169219820Sjeff props->sm_sl = out_mad->data[36] & 0xf; 170219820Sjeff props->state = out_mad->data[32] & 0xf; 171219820Sjeff props->phys_state = out_mad->data[33] >> 4; 172219820Sjeff props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20)); 173219820Sjeff props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; 174219820Sjeff props->max_msg_sz = 0x80000000; 175219820Sjeff props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; 176219820Sjeff props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); 177219820Sjeff props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); 178219820Sjeff props->active_width = out_mad->data[31] & 0xf; 179219820Sjeff props->active_speed = out_mad->data[35] >> 4; 180219820Sjeff props->max_mtu = out_mad->data[41] & 0xf; 181219820Sjeff props->active_mtu = out_mad->data[36] >> 4; 182219820Sjeff props->subnet_timeout = out_mad->data[51] & 0x1f; 183219820Sjeff props->max_vl_num = out_mad->data[37] >> 4; 184219820Sjeff props->init_type_reply = out_mad->data[41] >> 4; 185219820Sjeff 186219820Sjeff out: 187219820Sjeff kfree(in_mad); 188219820Sjeff kfree(out_mad); 189219820Sjeff return err; 190219820Sjeff} 191219820Sjeff 192219820Sjeffstatic int mthca_modify_device(struct ib_device *ibdev, 193219820Sjeff int mask, 194219820Sjeff struct ib_device_modify *props) 195219820Sjeff{ 196219820Sjeff if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 197219820Sjeff return -EOPNOTSUPP; 198219820Sjeff 199219820Sjeff if (mask & IB_DEVICE_MODIFY_NODE_DESC) { 200219820Sjeff if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) 201219820Sjeff return -ERESTARTSYS; 202219820Sjeff memcpy(ibdev->node_desc, props->node_desc, 64); 203219820Sjeff mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 204219820Sjeff } 205219820Sjeff 206219820Sjeff return 0; 207219820Sjeff} 208219820Sjeff 209219820Sjeffstatic int mthca_modify_port(struct ib_device *ibdev, 210219820Sjeff u8 port, int port_modify_mask, 211219820Sjeff struct ib_port_modify *props) 212219820Sjeff{ 213219820Sjeff struct mthca_set_ib_param set_ib; 214219820Sjeff struct ib_port_attr attr; 215219820Sjeff int err; 216219820Sjeff u8 status; 217219820Sjeff 218219820Sjeff if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) 219219820Sjeff return -ERESTARTSYS; 220219820Sjeff 221219820Sjeff err = mthca_query_port(ibdev, port, &attr); 222219820Sjeff if (err) 223219820Sjeff goto out; 224219820Sjeff 225219820Sjeff set_ib.set_si_guid = 0; 226219820Sjeff set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR); 227219820Sjeff 228219820Sjeff set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & 229219820Sjeff ~props->clr_port_cap_mask; 230219820Sjeff 231219820Sjeff err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status); 232219820Sjeff if (err) 233219820Sjeff goto out; 234219820Sjeff if (status) { 235219820Sjeff err = -EINVAL; 236219820Sjeff goto out; 237219820Sjeff } 238219820Sjeff 239219820Sjeffout: 240219820Sjeff mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 241219820Sjeff return err; 242219820Sjeff} 243219820Sjeff 244219820Sjeffstatic int mthca_query_pkey(struct ib_device *ibdev, 245219820Sjeff u8 port, u16 index, u16 *pkey) 246219820Sjeff{ 247219820Sjeff struct ib_smp *in_mad = NULL; 248219820Sjeff struct ib_smp *out_mad = NULL; 249219820Sjeff int err = -ENOMEM; 250219820Sjeff u8 status; 251219820Sjeff 252219820Sjeff in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 253219820Sjeff out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 254219820Sjeff if (!in_mad || !out_mad) 255219820Sjeff goto out; 256219820Sjeff 257219820Sjeff init_query_mad(in_mad); 258219820Sjeff in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; 259219820Sjeff in_mad->attr_mod = cpu_to_be32(index / 32); 260219820Sjeff 261219820Sjeff err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 262219820Sjeff port, NULL, NULL, in_mad, out_mad, 263219820Sjeff &status); 264219820Sjeff if (err) 265219820Sjeff goto out; 266219820Sjeff if (status) { 267219820Sjeff err = -EINVAL; 268219820Sjeff goto out; 269219820Sjeff } 270219820Sjeff 271219820Sjeff *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 272219820Sjeff 273219820Sjeff out: 274219820Sjeff kfree(in_mad); 275219820Sjeff kfree(out_mad); 276219820Sjeff return err; 277219820Sjeff} 278219820Sjeff 279219820Sjeffstatic int mthca_query_gid(struct ib_device *ibdev, u8 port, 280219820Sjeff int index, union ib_gid *gid) 281219820Sjeff{ 282219820Sjeff struct ib_smp *in_mad = NULL; 283219820Sjeff struct ib_smp *out_mad = NULL; 284219820Sjeff int err = -ENOMEM; 285219820Sjeff u8 status; 286219820Sjeff 287219820Sjeff in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 288219820Sjeff out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 289219820Sjeff if (!in_mad || !out_mad) 290219820Sjeff goto out; 291219820Sjeff 292219820Sjeff init_query_mad(in_mad); 293219820Sjeff in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; 294219820Sjeff in_mad->attr_mod = cpu_to_be32(port); 295219820Sjeff 296219820Sjeff err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 297219820Sjeff port, NULL, NULL, in_mad, out_mad, 298219820Sjeff &status); 299219820Sjeff if (err) 300219820Sjeff goto out; 301219820Sjeff if (status) { 302219820Sjeff err = -EINVAL; 303219820Sjeff goto out; 304219820Sjeff } 305219820Sjeff 306219820Sjeff memcpy(gid->raw, out_mad->data + 8, 8); 307219820Sjeff 308219820Sjeff init_query_mad(in_mad); 309219820Sjeff in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; 310219820Sjeff in_mad->attr_mod = cpu_to_be32(index / 8); 311219820Sjeff 312219820Sjeff err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 313219820Sjeff port, NULL, NULL, in_mad, out_mad, 314219820Sjeff &status); 315219820Sjeff if (err) 316219820Sjeff goto out; 317219820Sjeff if (status) { 318219820Sjeff err = -EINVAL; 319219820Sjeff goto out; 320219820Sjeff } 321219820Sjeff 322219820Sjeff memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 323219820Sjeff 324219820Sjeff out: 325219820Sjeff kfree(in_mad); 326219820Sjeff kfree(out_mad); 327219820Sjeff return err; 328219820Sjeff} 329219820Sjeff 330219820Sjeffstatic struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, 331219820Sjeff struct ib_udata *udata) 332219820Sjeff{ 333219820Sjeff struct mthca_alloc_ucontext_resp uresp; 334219820Sjeff struct mthca_ucontext *context; 335219820Sjeff int err; 336219820Sjeff 337219820Sjeff if (!(to_mdev(ibdev)->active)) 338219820Sjeff return ERR_PTR(-EAGAIN); 339219820Sjeff 340219820Sjeff memset(&uresp, 0, sizeof uresp); 341219820Sjeff 342219820Sjeff uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; 343219820Sjeff if (mthca_is_memfree(to_mdev(ibdev))) 344219820Sjeff uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; 345219820Sjeff else 346219820Sjeff uresp.uarc_size = 0; 347219820Sjeff 348219820Sjeff context = kmalloc(sizeof *context, GFP_KERNEL); 349219820Sjeff if (!context) 350219820Sjeff return ERR_PTR(-ENOMEM); 351219820Sjeff 352219820Sjeff err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); 353219820Sjeff if (err) { 354219820Sjeff kfree(context); 355219820Sjeff return ERR_PTR(err); 356219820Sjeff } 357219820Sjeff 358219820Sjeff context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); 359219820Sjeff if (IS_ERR(context->db_tab)) { 360219820Sjeff err = PTR_ERR(context->db_tab); 361219820Sjeff mthca_uar_free(to_mdev(ibdev), &context->uar); 362219820Sjeff kfree(context); 363219820Sjeff return ERR_PTR(err); 364219820Sjeff } 365219820Sjeff 366219820Sjeff if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) { 367219820Sjeff mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab); 368219820Sjeff mthca_uar_free(to_mdev(ibdev), &context->uar); 369219820Sjeff kfree(context); 370219820Sjeff return ERR_PTR(-EFAULT); 371219820Sjeff } 372219820Sjeff 373219820Sjeff context->reg_mr_warned = 0; 374219820Sjeff 375219820Sjeff return &context->ibucontext; 376219820Sjeff} 377219820Sjeff 378219820Sjeffstatic int mthca_dealloc_ucontext(struct ib_ucontext *context) 379219820Sjeff{ 380219820Sjeff mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar, 381219820Sjeff to_mucontext(context)->db_tab); 382219820Sjeff mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar); 383219820Sjeff kfree(to_mucontext(context)); 384219820Sjeff 385219820Sjeff return 0; 386219820Sjeff} 387219820Sjeff 388219820Sjeffstatic int mthca_mmap_uar(struct ib_ucontext *context, 389219820Sjeff struct vm_area_struct *vma) 390219820Sjeff{ 391219820Sjeff if (vma->vm_end - vma->vm_start != PAGE_SIZE) 392219820Sjeff return -EINVAL; 393219820Sjeff 394219820Sjeff vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 395219820Sjeff 396219820Sjeff if (io_remap_pfn_range(vma, vma->vm_start, 397219820Sjeff to_mucontext(context)->uar.pfn, 398219820Sjeff PAGE_SIZE, vma->vm_page_prot)) 399219820Sjeff return -EAGAIN; 400219820Sjeff 401219820Sjeff return 0; 402219820Sjeff} 403219820Sjeff 404219820Sjeffstatic struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, 405219820Sjeff struct ib_ucontext *context, 406219820Sjeff struct ib_udata *udata) 407219820Sjeff{ 408219820Sjeff struct mthca_pd *pd; 409219820Sjeff int err; 410219820Sjeff 411219820Sjeff pd = kmalloc(sizeof *pd, GFP_KERNEL); 412219820Sjeff if (!pd) 413219820Sjeff return ERR_PTR(-ENOMEM); 414219820Sjeff 415219820Sjeff err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); 416219820Sjeff if (err) { 417219820Sjeff kfree(pd); 418219820Sjeff return ERR_PTR(err); 419219820Sjeff } 420219820Sjeff 421219820Sjeff if (context) { 422219820Sjeff if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) { 423219820Sjeff mthca_pd_free(to_mdev(ibdev), pd); 424219820Sjeff kfree(pd); 425219820Sjeff return ERR_PTR(-EFAULT); 426219820Sjeff } 427219820Sjeff } 428219820Sjeff 429219820Sjeff return &pd->ibpd; 430219820Sjeff} 431219820Sjeff 432219820Sjeffstatic int mthca_dealloc_pd(struct ib_pd *pd) 433219820Sjeff{ 434219820Sjeff mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); 435219820Sjeff kfree(pd); 436219820Sjeff 437219820Sjeff return 0; 438219820Sjeff} 439219820Sjeff 440219820Sjeffstatic struct ib_ah *mthca_ah_create(struct ib_pd *pd, 441219820Sjeff struct ib_ah_attr *ah_attr) 442219820Sjeff{ 443219820Sjeff int err; 444219820Sjeff struct mthca_ah *ah; 445219820Sjeff 446219820Sjeff ah = kmalloc(sizeof *ah, GFP_ATOMIC); 447219820Sjeff if (!ah) 448219820Sjeff return ERR_PTR(-ENOMEM); 449219820Sjeff 450219820Sjeff err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah); 451219820Sjeff if (err) { 452219820Sjeff kfree(ah); 453219820Sjeff return ERR_PTR(err); 454219820Sjeff } 455219820Sjeff 456219820Sjeff return &ah->ibah; 457219820Sjeff} 458219820Sjeff 459219820Sjeffstatic int mthca_ah_destroy(struct ib_ah *ah) 460219820Sjeff{ 461219820Sjeff mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); 462219820Sjeff kfree(ah); 463219820Sjeff 464219820Sjeff return 0; 465219820Sjeff} 466219820Sjeff 467219820Sjeffstatic struct ib_srq *mthca_create_srq(struct ib_pd *pd, 468219820Sjeff struct ib_srq_init_attr *init_attr, 469219820Sjeff struct ib_udata *udata) 470219820Sjeff{ 471219820Sjeff struct mthca_create_srq ucmd; 472219820Sjeff struct mthca_ucontext *context = NULL; 473219820Sjeff struct mthca_srq *srq; 474219820Sjeff int err; 475219820Sjeff 476219820Sjeff srq = kmalloc(sizeof *srq, GFP_KERNEL); 477219820Sjeff if (!srq) 478219820Sjeff return ERR_PTR(-ENOMEM); 479219820Sjeff 480219820Sjeff if (pd->uobject) { 481219820Sjeff context = to_mucontext(pd->uobject->context); 482219820Sjeff 483219820Sjeff if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 484219820Sjeff err = -EFAULT; 485219820Sjeff goto err_free; 486219820Sjeff } 487219820Sjeff 488219820Sjeff err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 489219820Sjeff context->db_tab, ucmd.db_index, 490219820Sjeff ucmd.db_page); 491219820Sjeff 492219820Sjeff if (err) 493219820Sjeff goto err_free; 494219820Sjeff 495219820Sjeff srq->mr.ibmr.lkey = ucmd.lkey; 496219820Sjeff srq->db_index = ucmd.db_index; 497219820Sjeff } 498219820Sjeff 499219820Sjeff err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), 500219820Sjeff &init_attr->attr, srq); 501219820Sjeff 502219820Sjeff if (err && pd->uobject) 503219820Sjeff mthca_unmap_user_db(to_mdev(pd->device), &context->uar, 504219820Sjeff context->db_tab, ucmd.db_index); 505219820Sjeff 506219820Sjeff if (err) 507219820Sjeff goto err_free; 508219820Sjeff 509219820Sjeff if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { 510219820Sjeff mthca_free_srq(to_mdev(pd->device), srq); 511219820Sjeff err = -EFAULT; 512219820Sjeff goto err_free; 513219820Sjeff } 514219820Sjeff 515219820Sjeff return &srq->ibsrq; 516219820Sjeff 517219820Sjefferr_free: 518219820Sjeff kfree(srq); 519219820Sjeff 520219820Sjeff return ERR_PTR(err); 521219820Sjeff} 522219820Sjeff 523219820Sjeffstatic int mthca_destroy_srq(struct ib_srq *srq) 524219820Sjeff{ 525219820Sjeff struct mthca_ucontext *context; 526219820Sjeff 527219820Sjeff if (srq->uobject) { 528219820Sjeff context = to_mucontext(srq->uobject->context); 529219820Sjeff 530219820Sjeff mthca_unmap_user_db(to_mdev(srq->device), &context->uar, 531219820Sjeff context->db_tab, to_msrq(srq)->db_index); 532219820Sjeff } 533219820Sjeff 534219820Sjeff mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); 535219820Sjeff kfree(srq); 536219820Sjeff 537219820Sjeff return 0; 538219820Sjeff} 539219820Sjeff 540219820Sjeffstatic struct ib_qp *mthca_create_qp(struct ib_pd *pd, 541219820Sjeff struct ib_qp_init_attr *init_attr, 542219820Sjeff struct ib_udata *udata) 543219820Sjeff{ 544219820Sjeff struct mthca_create_qp ucmd; 545219820Sjeff struct mthca_qp *qp; 546219820Sjeff int err; 547219820Sjeff 548219820Sjeff if (init_attr->create_flags) 549219820Sjeff return ERR_PTR(-EINVAL); 550219820Sjeff 551219820Sjeff switch (init_attr->qp_type) { 552219820Sjeff case IB_QPT_RC: 553219820Sjeff case IB_QPT_UC: 554219820Sjeff case IB_QPT_UD: 555219820Sjeff { 556219820Sjeff struct mthca_ucontext *context; 557219820Sjeff 558219820Sjeff qp = kmalloc(sizeof *qp, GFP_KERNEL); 559219820Sjeff if (!qp) 560219820Sjeff return ERR_PTR(-ENOMEM); 561219820Sjeff 562219820Sjeff if (pd->uobject) { 563219820Sjeff context = to_mucontext(pd->uobject->context); 564219820Sjeff 565219820Sjeff if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 566219820Sjeff kfree(qp); 567219820Sjeff return ERR_PTR(-EFAULT); 568219820Sjeff } 569219820Sjeff 570219820Sjeff err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 571219820Sjeff context->db_tab, 572219820Sjeff ucmd.sq_db_index, ucmd.sq_db_page); 573219820Sjeff if (err) { 574219820Sjeff kfree(qp); 575219820Sjeff return ERR_PTR(err); 576219820Sjeff } 577219820Sjeff 578219820Sjeff err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 579219820Sjeff context->db_tab, 580219820Sjeff ucmd.rq_db_index, ucmd.rq_db_page); 581219820Sjeff if (err) { 582219820Sjeff mthca_unmap_user_db(to_mdev(pd->device), 583219820Sjeff &context->uar, 584219820Sjeff context->db_tab, 585219820Sjeff ucmd.sq_db_index); 586219820Sjeff kfree(qp); 587219820Sjeff return ERR_PTR(err); 588219820Sjeff } 589219820Sjeff 590219820Sjeff qp->mr.ibmr.lkey = ucmd.lkey; 591219820Sjeff qp->sq.db_index = ucmd.sq_db_index; 592219820Sjeff qp->rq.db_index = ucmd.rq_db_index; 593219820Sjeff } 594219820Sjeff 595219820Sjeff err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), 596219820Sjeff to_mcq(init_attr->send_cq), 597219820Sjeff to_mcq(init_attr->recv_cq), 598219820Sjeff init_attr->qp_type, init_attr->sq_sig_type, 599219820Sjeff &init_attr->cap, qp); 600219820Sjeff 601219820Sjeff if (err && pd->uobject) { 602219820Sjeff context = to_mucontext(pd->uobject->context); 603219820Sjeff 604219820Sjeff mthca_unmap_user_db(to_mdev(pd->device), 605219820Sjeff &context->uar, 606219820Sjeff context->db_tab, 607219820Sjeff ucmd.sq_db_index); 608219820Sjeff mthca_unmap_user_db(to_mdev(pd->device), 609219820Sjeff &context->uar, 610219820Sjeff context->db_tab, 611219820Sjeff ucmd.rq_db_index); 612219820Sjeff } 613219820Sjeff 614219820Sjeff qp->ibqp.qp_num = qp->qpn; 615219820Sjeff break; 616219820Sjeff } 617219820Sjeff case IB_QPT_SMI: 618219820Sjeff case IB_QPT_GSI: 619219820Sjeff { 620219820Sjeff /* Don't allow userspace to create special QPs */ 621219820Sjeff if (pd->uobject) 622219820Sjeff return ERR_PTR(-EINVAL); 623219820Sjeff 624219820Sjeff qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); 625219820Sjeff if (!qp) 626219820Sjeff return ERR_PTR(-ENOMEM); 627219820Sjeff 628219820Sjeff qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; 629219820Sjeff 630219820Sjeff err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), 631219820Sjeff to_mcq(init_attr->send_cq), 632219820Sjeff to_mcq(init_attr->recv_cq), 633219820Sjeff init_attr->sq_sig_type, &init_attr->cap, 634219820Sjeff qp->ibqp.qp_num, init_attr->port_num, 635219820Sjeff to_msqp(qp)); 636219820Sjeff break; 637219820Sjeff } 638219820Sjeff default: 639219820Sjeff /* Don't support raw QPs */ 640219820Sjeff return ERR_PTR(-ENOSYS); 641219820Sjeff } 642219820Sjeff 643219820Sjeff if (err) { 644219820Sjeff kfree(qp); 645219820Sjeff return ERR_PTR(err); 646219820Sjeff } 647219820Sjeff 648219820Sjeff init_attr->cap.max_send_wr = qp->sq.max; 649219820Sjeff init_attr->cap.max_recv_wr = qp->rq.max; 650219820Sjeff init_attr->cap.max_send_sge = qp->sq.max_gs; 651219820Sjeff init_attr->cap.max_recv_sge = qp->rq.max_gs; 652219820Sjeff init_attr->cap.max_inline_data = qp->max_inline_data; 653219820Sjeff 654219820Sjeff return &qp->ibqp; 655219820Sjeff} 656219820Sjeff 657219820Sjeffstatic int mthca_destroy_qp(struct ib_qp *qp) 658219820Sjeff{ 659219820Sjeff if (qp->uobject) { 660219820Sjeff mthca_unmap_user_db(to_mdev(qp->device), 661219820Sjeff &to_mucontext(qp->uobject->context)->uar, 662219820Sjeff to_mucontext(qp->uobject->context)->db_tab, 663219820Sjeff to_mqp(qp)->sq.db_index); 664219820Sjeff mthca_unmap_user_db(to_mdev(qp->device), 665219820Sjeff &to_mucontext(qp->uobject->context)->uar, 666219820Sjeff to_mucontext(qp->uobject->context)->db_tab, 667219820Sjeff to_mqp(qp)->rq.db_index); 668219820Sjeff } 669219820Sjeff mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); 670219820Sjeff kfree(qp); 671219820Sjeff return 0; 672219820Sjeff} 673219820Sjeff 674219820Sjeffstatic struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, 675219820Sjeff int comp_vector, 676219820Sjeff struct ib_ucontext *context, 677219820Sjeff struct ib_udata *udata) 678219820Sjeff{ 679219820Sjeff struct mthca_create_cq ucmd; 680219820Sjeff struct mthca_cq *cq; 681219820Sjeff int nent; 682219820Sjeff int err; 683219820Sjeff 684219820Sjeff if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) 685219820Sjeff return ERR_PTR(-EINVAL); 686219820Sjeff 687219820Sjeff if (context) { 688219820Sjeff if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 689219820Sjeff return ERR_PTR(-EFAULT); 690219820Sjeff 691219820Sjeff err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, 692219820Sjeff to_mucontext(context)->db_tab, 693219820Sjeff ucmd.set_db_index, ucmd.set_db_page); 694219820Sjeff if (err) 695219820Sjeff return ERR_PTR(err); 696219820Sjeff 697219820Sjeff err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, 698219820Sjeff to_mucontext(context)->db_tab, 699219820Sjeff ucmd.arm_db_index, ucmd.arm_db_page); 700219820Sjeff if (err) 701219820Sjeff goto err_unmap_set; 702219820Sjeff } 703219820Sjeff 704219820Sjeff cq = kmalloc(sizeof *cq, GFP_KERNEL); 705219820Sjeff if (!cq) { 706219820Sjeff err = -ENOMEM; 707219820Sjeff goto err_unmap_arm; 708219820Sjeff } 709219820Sjeff 710219820Sjeff if (context) { 711219820Sjeff cq->buf.mr.ibmr.lkey = ucmd.lkey; 712219820Sjeff cq->set_ci_db_index = ucmd.set_db_index; 713219820Sjeff cq->arm_db_index = ucmd.arm_db_index; 714219820Sjeff } 715219820Sjeff 716219820Sjeff for (nent = 1; nent <= entries; nent <<= 1) 717219820Sjeff ; /* nothing */ 718219820Sjeff 719219820Sjeff err = mthca_init_cq(to_mdev(ibdev), nent, 720219820Sjeff context ? to_mucontext(context) : NULL, 721219820Sjeff context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num, 722219820Sjeff cq); 723219820Sjeff if (err) 724219820Sjeff goto err_free; 725219820Sjeff 726219820Sjeff if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { 727219820Sjeff mthca_free_cq(to_mdev(ibdev), cq); 728219820Sjeff goto err_free; 729219820Sjeff } 730219820Sjeff 731219820Sjeff cq->resize_buf = NULL; 732219820Sjeff 733219820Sjeff return &cq->ibcq; 734219820Sjeff 735219820Sjefferr_free: 736219820Sjeff kfree(cq); 737219820Sjeff 738219820Sjefferr_unmap_arm: 739219820Sjeff if (context) 740219820Sjeff mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, 741219820Sjeff to_mucontext(context)->db_tab, ucmd.arm_db_index); 742219820Sjeff 743219820Sjefferr_unmap_set: 744219820Sjeff if (context) 745219820Sjeff mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, 746219820Sjeff to_mucontext(context)->db_tab, ucmd.set_db_index); 747219820Sjeff 748219820Sjeff return ERR_PTR(err); 749219820Sjeff} 750219820Sjeff 751219820Sjeffstatic int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, 752219820Sjeff int entries) 753219820Sjeff{ 754219820Sjeff int ret; 755219820Sjeff 756219820Sjeff spin_lock_irq(&cq->lock); 757219820Sjeff if (cq->resize_buf) { 758219820Sjeff ret = -EBUSY; 759219820Sjeff goto unlock; 760219820Sjeff } 761219820Sjeff 762219820Sjeff cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 763219820Sjeff if (!cq->resize_buf) { 764219820Sjeff ret = -ENOMEM; 765219820Sjeff goto unlock; 766219820Sjeff } 767219820Sjeff 768219820Sjeff cq->resize_buf->state = CQ_RESIZE_ALLOC; 769219820Sjeff 770219820Sjeff ret = 0; 771219820Sjeff 772219820Sjeffunlock: 773219820Sjeff spin_unlock_irq(&cq->lock); 774219820Sjeff 775219820Sjeff if (ret) 776219820Sjeff return ret; 777219820Sjeff 778219820Sjeff ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); 779219820Sjeff if (ret) { 780219820Sjeff spin_lock_irq(&cq->lock); 781219820Sjeff kfree(cq->resize_buf); 782219820Sjeff cq->resize_buf = NULL; 783219820Sjeff spin_unlock_irq(&cq->lock); 784219820Sjeff return ret; 785219820Sjeff } 786219820Sjeff 787219820Sjeff cq->resize_buf->cqe = entries - 1; 788219820Sjeff 789219820Sjeff spin_lock_irq(&cq->lock); 790219820Sjeff cq->resize_buf->state = CQ_RESIZE_READY; 791219820Sjeff spin_unlock_irq(&cq->lock); 792219820Sjeff 793219820Sjeff return 0; 794219820Sjeff} 795219820Sjeff 796219820Sjeffstatic int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 797219820Sjeff{ 798219820Sjeff struct mthca_dev *dev = to_mdev(ibcq->device); 799219820Sjeff struct mthca_cq *cq = to_mcq(ibcq); 800219820Sjeff struct mthca_resize_cq ucmd; 801219820Sjeff u32 lkey; 802219820Sjeff u8 status; 803219820Sjeff int ret; 804219820Sjeff 805219820Sjeff if (entries < 1 || entries > dev->limits.max_cqes) 806219820Sjeff return -EINVAL; 807219820Sjeff 808219820Sjeff mutex_lock(&cq->mutex); 809219820Sjeff 810219820Sjeff entries = roundup_pow_of_two(entries + 1); 811219820Sjeff if (entries == ibcq->cqe + 1) { 812219820Sjeff ret = 0; 813219820Sjeff goto out; 814219820Sjeff } 815219820Sjeff 816219820Sjeff if (cq->is_kernel) { 817219820Sjeff ret = mthca_alloc_resize_buf(dev, cq, entries); 818219820Sjeff if (ret) 819219820Sjeff goto out; 820219820Sjeff lkey = cq->resize_buf->buf.mr.ibmr.lkey; 821219820Sjeff } else { 822219820Sjeff if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 823219820Sjeff ret = -EFAULT; 824219820Sjeff goto out; 825219820Sjeff } 826219820Sjeff lkey = ucmd.lkey; 827219820Sjeff } 828219820Sjeff 829219820Sjeff ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status); 830219820Sjeff if (status) 831219820Sjeff ret = -EINVAL; 832219820Sjeff 833219820Sjeff if (ret) { 834219820Sjeff if (cq->resize_buf) { 835219820Sjeff mthca_free_cq_buf(dev, &cq->resize_buf->buf, 836219820Sjeff cq->resize_buf->cqe); 837219820Sjeff kfree(cq->resize_buf); 838219820Sjeff spin_lock_irq(&cq->lock); 839219820Sjeff cq->resize_buf = NULL; 840219820Sjeff spin_unlock_irq(&cq->lock); 841219820Sjeff } 842219820Sjeff goto out; 843219820Sjeff } 844219820Sjeff 845219820Sjeff if (cq->is_kernel) { 846219820Sjeff struct mthca_cq_buf tbuf; 847219820Sjeff int tcqe; 848219820Sjeff 849219820Sjeff spin_lock_irq(&cq->lock); 850219820Sjeff if (cq->resize_buf->state == CQ_RESIZE_READY) { 851219820Sjeff mthca_cq_resize_copy_cqes(cq); 852219820Sjeff tbuf = cq->buf; 853219820Sjeff tcqe = cq->ibcq.cqe; 854219820Sjeff cq->buf = cq->resize_buf->buf; 855219820Sjeff cq->ibcq.cqe = cq->resize_buf->cqe; 856219820Sjeff } else { 857219820Sjeff tbuf = cq->resize_buf->buf; 858219820Sjeff tcqe = cq->resize_buf->cqe; 859219820Sjeff } 860219820Sjeff 861219820Sjeff kfree(cq->resize_buf); 862219820Sjeff cq->resize_buf = NULL; 863219820Sjeff spin_unlock_irq(&cq->lock); 864219820Sjeff 865219820Sjeff mthca_free_cq_buf(dev, &tbuf, tcqe); 866219820Sjeff } else 867219820Sjeff ibcq->cqe = entries - 1; 868219820Sjeff 869219820Sjeffout: 870219820Sjeff mutex_unlock(&cq->mutex); 871219820Sjeff 872219820Sjeff return ret; 873219820Sjeff} 874219820Sjeff 875219820Sjeffstatic int mthca_destroy_cq(struct ib_cq *cq) 876219820Sjeff{ 877219820Sjeff if (cq->uobject) { 878219820Sjeff mthca_unmap_user_db(to_mdev(cq->device), 879219820Sjeff &to_mucontext(cq->uobject->context)->uar, 880219820Sjeff to_mucontext(cq->uobject->context)->db_tab, 881219820Sjeff to_mcq(cq)->arm_db_index); 882219820Sjeff mthca_unmap_user_db(to_mdev(cq->device), 883219820Sjeff &to_mucontext(cq->uobject->context)->uar, 884219820Sjeff to_mucontext(cq->uobject->context)->db_tab, 885219820Sjeff to_mcq(cq)->set_ci_db_index); 886219820Sjeff } 887219820Sjeff mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); 888219820Sjeff kfree(cq); 889219820Sjeff 890219820Sjeff return 0; 891219820Sjeff} 892219820Sjeff 893219820Sjeffstatic inline u32 convert_access(int acc) 894219820Sjeff{ 895219820Sjeff return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC : 0) | 896219820Sjeff (acc & IB_ACCESS_REMOTE_WRITE ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) | 897219820Sjeff (acc & IB_ACCESS_REMOTE_READ ? MTHCA_MPT_FLAG_REMOTE_READ : 0) | 898219820Sjeff (acc & IB_ACCESS_LOCAL_WRITE ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0) | 899219820Sjeff MTHCA_MPT_FLAG_LOCAL_READ; 900219820Sjeff} 901219820Sjeff 902219820Sjeffstatic struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc) 903219820Sjeff{ 904219820Sjeff struct mthca_mr *mr; 905219820Sjeff int err; 906219820Sjeff 907219820Sjeff mr = kmalloc(sizeof *mr, GFP_KERNEL); 908219820Sjeff if (!mr) 909219820Sjeff return ERR_PTR(-ENOMEM); 910219820Sjeff 911219820Sjeff err = mthca_mr_alloc_notrans(to_mdev(pd->device), 912219820Sjeff to_mpd(pd)->pd_num, 913219820Sjeff convert_access(acc), mr); 914219820Sjeff 915219820Sjeff if (err) { 916219820Sjeff kfree(mr); 917219820Sjeff return ERR_PTR(err); 918219820Sjeff } 919219820Sjeff 920219820Sjeff mr->umem = NULL; 921219820Sjeff 922219820Sjeff return &mr->ibmr; 923219820Sjeff} 924219820Sjeff 925219820Sjeffstatic struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, 926219820Sjeff struct ib_phys_buf *buffer_list, 927219820Sjeff int num_phys_buf, 928219820Sjeff int acc, 929219820Sjeff u64 *iova_start) 930219820Sjeff{ 931219820Sjeff struct mthca_mr *mr; 932219820Sjeff u64 *page_list; 933219820Sjeff u64 total_size; 934219820Sjeff unsigned long mask; 935219820Sjeff int shift; 936219820Sjeff int npages; 937219820Sjeff int err; 938219820Sjeff int i, j, n; 939219820Sjeff 940219820Sjeff mask = buffer_list[0].addr ^ *iova_start; 941219820Sjeff total_size = 0; 942219820Sjeff for (i = 0; i < num_phys_buf; ++i) { 943219820Sjeff if (i != 0) 944219820Sjeff mask |= buffer_list[i].addr; 945219820Sjeff if (i != num_phys_buf - 1) 946219820Sjeff mask |= buffer_list[i].addr + buffer_list[i].size; 947219820Sjeff 948219820Sjeff total_size += buffer_list[i].size; 949219820Sjeff } 950219820Sjeff 951219820Sjeff if (mask & ~PAGE_MASK) 952219820Sjeff return ERR_PTR(-EINVAL); 953219820Sjeff 954219820Sjeff shift = __ffs(mask | 1 << 31); 955219820Sjeff 956219820Sjeff buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1); 957219820Sjeff buffer_list[0].addr &= ~0ull << shift; 958219820Sjeff 959219820Sjeff mr = kmalloc(sizeof *mr, GFP_KERNEL); 960219820Sjeff if (!mr) 961219820Sjeff return ERR_PTR(-ENOMEM); 962219820Sjeff 963219820Sjeff npages = 0; 964219820Sjeff for (i = 0; i < num_phys_buf; ++i) 965219820Sjeff npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift; 966219820Sjeff 967219820Sjeff if (!npages) 968219820Sjeff return &mr->ibmr; 969219820Sjeff 970219820Sjeff page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); 971219820Sjeff if (!page_list) { 972219820Sjeff kfree(mr); 973219820Sjeff return ERR_PTR(-ENOMEM); 974219820Sjeff } 975219820Sjeff 976219820Sjeff n = 0; 977219820Sjeff for (i = 0; i < num_phys_buf; ++i) 978219820Sjeff for (j = 0; 979219820Sjeff j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift; 980219820Sjeff ++j) 981219820Sjeff page_list[n++] = buffer_list[i].addr + ((u64) j << shift); 982219820Sjeff 983219820Sjeff mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) " 984219820Sjeff "in PD %x; shift %d, npages %d.\n", 985219820Sjeff (unsigned long long) buffer_list[0].addr, 986219820Sjeff (unsigned long long) *iova_start, 987219820Sjeff to_mpd(pd)->pd_num, 988219820Sjeff shift, npages); 989219820Sjeff 990219820Sjeff err = mthca_mr_alloc_phys(to_mdev(pd->device), 991219820Sjeff to_mpd(pd)->pd_num, 992219820Sjeff page_list, shift, npages, 993219820Sjeff *iova_start, total_size, 994219820Sjeff convert_access(acc), mr); 995219820Sjeff 996219820Sjeff if (err) { 997219820Sjeff kfree(page_list); 998219820Sjeff kfree(mr); 999219820Sjeff return ERR_PTR(err); 1000219820Sjeff } 1001219820Sjeff 1002219820Sjeff kfree(page_list); 1003219820Sjeff mr->umem = NULL; 1004219820Sjeff 1005219820Sjeff return &mr->ibmr; 1006219820Sjeff} 1007219820Sjeff 1008219820Sjeffstatic struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 1009219820Sjeff u64 virt, int acc, struct ib_udata *udata) 1010219820Sjeff{ 1011219820Sjeff struct mthca_dev *dev = to_mdev(pd->device); 1012219820Sjeff struct ib_umem_chunk *chunk; 1013219820Sjeff struct mthca_mr *mr; 1014219820Sjeff struct mthca_reg_mr ucmd; 1015219820Sjeff u64 *pages; 1016219820Sjeff int shift, n, len; 1017219820Sjeff int i, j, k; 1018219820Sjeff int err = 0; 1019219820Sjeff int write_mtt_size; 1020219820Sjeff 1021219820Sjeff if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) { 1022219820Sjeff if (!to_mucontext(pd->uobject->context)->reg_mr_warned) { 1023219820Sjeff mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n", 1024219820Sjeff curproc->p_comm); 1025219820Sjeff mthca_warn(dev, " Update libmthca to fix this.\n"); 1026219820Sjeff } 1027219820Sjeff ++to_mucontext(pd->uobject->context)->reg_mr_warned; 1028219820Sjeff ucmd.mr_attrs = 0; 1029219820Sjeff } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 1030219820Sjeff return ERR_PTR(-EFAULT); 1031219820Sjeff 1032219820Sjeff mr = kmalloc(sizeof *mr, GFP_KERNEL); 1033219820Sjeff if (!mr) 1034219820Sjeff return ERR_PTR(-ENOMEM); 1035219820Sjeff 1036219820Sjeff mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 1037219820Sjeff ucmd.mr_attrs & MTHCA_MR_DMASYNC); 1038219820Sjeff 1039219820Sjeff if (IS_ERR(mr->umem)) { 1040219820Sjeff err = PTR_ERR(mr->umem); 1041219820Sjeff goto err; 1042219820Sjeff } 1043219820Sjeff 1044219820Sjeff shift = ffs(mr->umem->page_size) - 1; 1045219820Sjeff 1046219820Sjeff n = 0; 1047219820Sjeff list_for_each_entry(chunk, &mr->umem->chunk_list, list) 1048219820Sjeff n += chunk->nents; 1049219820Sjeff 1050219820Sjeff mr->mtt = mthca_alloc_mtt(dev, n); 1051219820Sjeff if (IS_ERR(mr->mtt)) { 1052219820Sjeff err = PTR_ERR(mr->mtt); 1053219820Sjeff goto err_umem; 1054219820Sjeff } 1055219820Sjeff 1056219820Sjeff pages = (u64 *) __get_free_page(GFP_KERNEL); 1057219820Sjeff if (!pages) { 1058219820Sjeff err = -ENOMEM; 1059219820Sjeff goto err_mtt; 1060219820Sjeff } 1061219820Sjeff 1062219820Sjeff i = n = 0; 1063219820Sjeff 1064219820Sjeff write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); 1065219820Sjeff 1066219820Sjeff list_for_each_entry(chunk, &mr->umem->chunk_list, list) 1067219820Sjeff for (j = 0; j < chunk->nmap; ++j) { 1068219820Sjeff len = sg_dma_len(&chunk->page_list[j]) >> shift; 1069219820Sjeff for (k = 0; k < len; ++k) { 1070219820Sjeff pages[i++] = sg_dma_address(&chunk->page_list[j]) + 1071219820Sjeff mr->umem->page_size * k; 1072219820Sjeff /* 1073219820Sjeff * Be friendly to write_mtt and pass it chunks 1074219820Sjeff * of appropriate size. 1075219820Sjeff */ 1076219820Sjeff if (i == write_mtt_size) { 1077219820Sjeff err = mthca_write_mtt(dev, mr->mtt, n, pages, i); 1078219820Sjeff if (err) 1079219820Sjeff goto mtt_done; 1080219820Sjeff n += i; 1081219820Sjeff i = 0; 1082219820Sjeff } 1083219820Sjeff } 1084219820Sjeff } 1085219820Sjeff 1086219820Sjeff if (i) 1087219820Sjeff err = mthca_write_mtt(dev, mr->mtt, n, pages, i); 1088219820Sjeffmtt_done: 1089219820Sjeff free_page((unsigned long) pages); 1090219820Sjeff if (err) 1091219820Sjeff goto err_mtt; 1092219820Sjeff 1093219820Sjeff err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length, 1094219820Sjeff convert_access(acc), mr); 1095219820Sjeff 1096219820Sjeff if (err) 1097219820Sjeff goto err_mtt; 1098219820Sjeff 1099219820Sjeff return &mr->ibmr; 1100219820Sjeff 1101219820Sjefferr_mtt: 1102219820Sjeff mthca_free_mtt(dev, mr->mtt); 1103219820Sjeff 1104219820Sjefferr_umem: 1105219820Sjeff ib_umem_release(mr->umem); 1106219820Sjeff 1107219820Sjefferr: 1108219820Sjeff kfree(mr); 1109219820Sjeff return ERR_PTR(err); 1110219820Sjeff} 1111219820Sjeff 1112219820Sjeffstatic int mthca_dereg_mr(struct ib_mr *mr) 1113219820Sjeff{ 1114219820Sjeff struct mthca_mr *mmr = to_mmr(mr); 1115219820Sjeff 1116219820Sjeff mthca_free_mr(to_mdev(mr->device), mmr); 1117219820Sjeff if (mmr->umem) 1118219820Sjeff ib_umem_release(mmr->umem); 1119219820Sjeff kfree(mmr); 1120219820Sjeff 1121219820Sjeff return 0; 1122219820Sjeff} 1123219820Sjeff 1124219820Sjeffstatic struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 1125219820Sjeff struct ib_fmr_attr *fmr_attr) 1126219820Sjeff{ 1127219820Sjeff struct mthca_fmr *fmr; 1128219820Sjeff int err; 1129219820Sjeff 1130219820Sjeff fmr = kmalloc(sizeof *fmr, GFP_KERNEL); 1131219820Sjeff if (!fmr) 1132219820Sjeff return ERR_PTR(-ENOMEM); 1133219820Sjeff 1134219820Sjeff memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr); 1135219820Sjeff err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, 1136219820Sjeff convert_access(mr_access_flags), fmr); 1137219820Sjeff 1138219820Sjeff if (err) { 1139219820Sjeff kfree(fmr); 1140219820Sjeff return ERR_PTR(err); 1141219820Sjeff } 1142219820Sjeff 1143219820Sjeff return &fmr->ibmr; 1144219820Sjeff} 1145219820Sjeff 1146219820Sjeffstatic int mthca_dealloc_fmr(struct ib_fmr *fmr) 1147219820Sjeff{ 1148219820Sjeff struct mthca_fmr *mfmr = to_mfmr(fmr); 1149219820Sjeff int err; 1150219820Sjeff 1151219820Sjeff err = mthca_free_fmr(to_mdev(fmr->device), mfmr); 1152219820Sjeff if (err) 1153219820Sjeff return err; 1154219820Sjeff 1155219820Sjeff kfree(mfmr); 1156219820Sjeff return 0; 1157219820Sjeff} 1158219820Sjeff 1159219820Sjeffstatic int mthca_unmap_fmr(struct list_head *fmr_list) 1160219820Sjeff{ 1161219820Sjeff struct ib_fmr *fmr; 1162219820Sjeff int err; 1163219820Sjeff u8 status; 1164219820Sjeff struct mthca_dev *mdev = NULL; 1165219820Sjeff 1166219820Sjeff list_for_each_entry(fmr, fmr_list, list) { 1167219820Sjeff if (mdev && to_mdev(fmr->device) != mdev) 1168219820Sjeff return -EINVAL; 1169219820Sjeff mdev = to_mdev(fmr->device); 1170219820Sjeff } 1171219820Sjeff 1172219820Sjeff if (!mdev) 1173219820Sjeff return 0; 1174219820Sjeff 1175219820Sjeff if (mthca_is_memfree(mdev)) { 1176219820Sjeff list_for_each_entry(fmr, fmr_list, list) 1177219820Sjeff mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr)); 1178219820Sjeff 1179219820Sjeff wmb(); 1180219820Sjeff } else 1181219820Sjeff list_for_each_entry(fmr, fmr_list, list) 1182219820Sjeff mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr)); 1183219820Sjeff 1184219820Sjeff err = mthca_SYNC_TPT(mdev, &status); 1185219820Sjeff if (err) 1186219820Sjeff return err; 1187219820Sjeff if (status) 1188219820Sjeff return -EINVAL; 1189219820Sjeff return 0; 1190219820Sjeff} 1191219820Sjeff 1192219820Sjeffstatic ssize_t show_rev(struct device *device, struct device_attribute *attr, 1193219820Sjeff char *buf) 1194219820Sjeff{ 1195219820Sjeff struct mthca_dev *dev = 1196219820Sjeff container_of(device, struct mthca_dev, ib_dev.dev); 1197219820Sjeff return sprintf(buf, "%x\n", dev->rev_id); 1198219820Sjeff} 1199219820Sjeff 1200219820Sjeffstatic ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 1201219820Sjeff char *buf) 1202219820Sjeff{ 1203219820Sjeff struct mthca_dev *dev = 1204219820Sjeff container_of(device, struct mthca_dev, ib_dev.dev); 1205219820Sjeff return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32), 1206219820Sjeff (int) (dev->fw_ver >> 16) & 0xffff, 1207219820Sjeff (int) dev->fw_ver & 0xffff); 1208219820Sjeff} 1209219820Sjeff 1210219820Sjeffstatic ssize_t show_hca(struct device *device, struct device_attribute *attr, 1211219820Sjeff char *buf) 1212219820Sjeff{ 1213219820Sjeff struct mthca_dev *dev = 1214219820Sjeff container_of(device, struct mthca_dev, ib_dev.dev); 1215219820Sjeff switch (dev->pdev->device) { 1216219820Sjeff case PCI_DEVICE_ID_MELLANOX_TAVOR: 1217219820Sjeff return sprintf(buf, "MT23108\n"); 1218219820Sjeff case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT: 1219219820Sjeff return sprintf(buf, "MT25208 (MT23108 compat mode)\n"); 1220219820Sjeff case PCI_DEVICE_ID_MELLANOX_ARBEL: 1221219820Sjeff return sprintf(buf, "MT25208\n"); 1222219820Sjeff case PCI_DEVICE_ID_MELLANOX_SINAI: 1223219820Sjeff case PCI_DEVICE_ID_MELLANOX_SINAI_OLD: 1224219820Sjeff return sprintf(buf, "MT25204\n"); 1225219820Sjeff default: 1226219820Sjeff return sprintf(buf, "unknown\n"); 1227219820Sjeff } 1228219820Sjeff} 1229219820Sjeff 1230219820Sjeffstatic ssize_t show_board(struct device *device, struct device_attribute *attr, 1231219820Sjeff char *buf) 1232219820Sjeff{ 1233219820Sjeff struct mthca_dev *dev = 1234219820Sjeff container_of(device, struct mthca_dev, ib_dev.dev); 1235219820Sjeff return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id); 1236219820Sjeff} 1237219820Sjeff 1238219820Sjeffstatic DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1239219820Sjeffstatic DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1240219820Sjeffstatic DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 1241219820Sjeffstatic DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 1242219820Sjeff 1243219820Sjeffstatic struct device_attribute *mthca_dev_attributes[] = { 1244219820Sjeff &dev_attr_hw_rev, 1245219820Sjeff &dev_attr_fw_ver, 1246219820Sjeff &dev_attr_hca_type, 1247219820Sjeff &dev_attr_board_id 1248219820Sjeff}; 1249219820Sjeff 1250219820Sjeffstatic int mthca_init_node_data(struct mthca_dev *dev) 1251219820Sjeff{ 1252219820Sjeff struct ib_smp *in_mad = NULL; 1253219820Sjeff struct ib_smp *out_mad = NULL; 1254219820Sjeff int err = -ENOMEM; 1255219820Sjeff u8 status; 1256219820Sjeff 1257219820Sjeff in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 1258219820Sjeff out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 1259219820Sjeff if (!in_mad || !out_mad) 1260219820Sjeff goto out; 1261219820Sjeff 1262219820Sjeff init_query_mad(in_mad); 1263219820Sjeff in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 1264219820Sjeff 1265219820Sjeff err = mthca_MAD_IFC(dev, 1, 1, 1266219820Sjeff 1, NULL, NULL, in_mad, out_mad, 1267219820Sjeff &status); 1268219820Sjeff if (err) 1269219820Sjeff goto out; 1270219820Sjeff if (status) { 1271219820Sjeff err = -EINVAL; 1272219820Sjeff goto out; 1273219820Sjeff } 1274219820Sjeff 1275219820Sjeff memcpy(dev->ib_dev.node_desc, out_mad->data, 64); 1276219820Sjeff 1277219820Sjeff in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 1278219820Sjeff 1279219820Sjeff err = mthca_MAD_IFC(dev, 1, 1, 1280219820Sjeff 1, NULL, NULL, in_mad, out_mad, 1281219820Sjeff &status); 1282219820Sjeff if (err) 1283219820Sjeff goto out; 1284219820Sjeff if (status) { 1285219820Sjeff err = -EINVAL; 1286219820Sjeff goto out; 1287219820Sjeff } 1288219820Sjeff 1289219820Sjeff if (mthca_is_memfree(dev)) 1290219820Sjeff dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); 1291219820Sjeff memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 1292219820Sjeff 1293219820Sjeffout: 1294219820Sjeff kfree(in_mad); 1295219820Sjeff kfree(out_mad); 1296219820Sjeff return err; 1297219820Sjeff} 1298219820Sjeff 1299219820Sjeffint mthca_register_device(struct mthca_dev *dev) 1300219820Sjeff{ 1301219820Sjeff int ret; 1302219820Sjeff int i; 1303219820Sjeff 1304219820Sjeff ret = mthca_init_node_data(dev); 1305219820Sjeff if (ret) 1306219820Sjeff return ret; 1307219820Sjeff 1308219820Sjeff strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); 1309219820Sjeff dev->ib_dev.owner = THIS_MODULE; 1310219820Sjeff 1311219820Sjeff dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION; 1312219820Sjeff dev->ib_dev.uverbs_cmd_mask = 1313219820Sjeff (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1314219820Sjeff (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 1315219820Sjeff (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 1316219820Sjeff (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 1317219820Sjeff (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 1318219820Sjeff (1ull << IB_USER_VERBS_CMD_REG_MR) | 1319219820Sjeff (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 1320219820Sjeff (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 1321219820Sjeff (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 1322219820Sjeff (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 1323219820Sjeff (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 1324219820Sjeff (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 1325219820Sjeff (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 1326219820Sjeff (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1327219820Sjeff (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1328219820Sjeff (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 1329219820Sjeff (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); 1330219820Sjeff dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1331219820Sjeff dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1332219820Sjeff dev->ib_dev.num_comp_vectors = 1; 1333219820Sjeff dev->ib_dev.dma_device = &dev->pdev->dev; 1334219820Sjeff dev->ib_dev.query_device = mthca_query_device; 1335219820Sjeff dev->ib_dev.query_port = mthca_query_port; 1336219820Sjeff dev->ib_dev.modify_device = mthca_modify_device; 1337219820Sjeff dev->ib_dev.modify_port = mthca_modify_port; 1338219820Sjeff dev->ib_dev.query_pkey = mthca_query_pkey; 1339219820Sjeff dev->ib_dev.query_gid = mthca_query_gid; 1340219820Sjeff dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext; 1341219820Sjeff dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext; 1342219820Sjeff dev->ib_dev.mmap = mthca_mmap_uar; 1343219820Sjeff dev->ib_dev.alloc_pd = mthca_alloc_pd; 1344219820Sjeff dev->ib_dev.dealloc_pd = mthca_dealloc_pd; 1345219820Sjeff dev->ib_dev.create_ah = mthca_ah_create; 1346219820Sjeff dev->ib_dev.query_ah = mthca_ah_query; 1347219820Sjeff dev->ib_dev.destroy_ah = mthca_ah_destroy; 1348219820Sjeff 1349219820Sjeff if (dev->mthca_flags & MTHCA_FLAG_SRQ) { 1350219820Sjeff dev->ib_dev.create_srq = mthca_create_srq; 1351219820Sjeff dev->ib_dev.modify_srq = mthca_modify_srq; 1352219820Sjeff dev->ib_dev.query_srq = mthca_query_srq; 1353219820Sjeff dev->ib_dev.destroy_srq = mthca_destroy_srq; 1354219820Sjeff dev->ib_dev.uverbs_cmd_mask |= 1355219820Sjeff (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 1356219820Sjeff (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 1357219820Sjeff (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 1358219820Sjeff (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ); 1359219820Sjeff 1360219820Sjeff if (mthca_is_memfree(dev)) 1361219820Sjeff dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; 1362219820Sjeff else 1363219820Sjeff dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; 1364219820Sjeff } 1365219820Sjeff 1366219820Sjeff dev->ib_dev.create_qp = mthca_create_qp; 1367219820Sjeff dev->ib_dev.modify_qp = mthca_modify_qp; 1368219820Sjeff dev->ib_dev.query_qp = mthca_query_qp; 1369219820Sjeff dev->ib_dev.destroy_qp = mthca_destroy_qp; 1370219820Sjeff dev->ib_dev.create_cq = mthca_create_cq; 1371219820Sjeff dev->ib_dev.resize_cq = mthca_resize_cq; 1372219820Sjeff dev->ib_dev.destroy_cq = mthca_destroy_cq; 1373219820Sjeff dev->ib_dev.poll_cq = mthca_poll_cq; 1374219820Sjeff dev->ib_dev.get_dma_mr = mthca_get_dma_mr; 1375219820Sjeff dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; 1376219820Sjeff dev->ib_dev.reg_user_mr = mthca_reg_user_mr; 1377219820Sjeff dev->ib_dev.dereg_mr = mthca_dereg_mr; 1378219820Sjeff 1379219820Sjeff if (dev->mthca_flags & MTHCA_FLAG_FMR) { 1380219820Sjeff dev->ib_dev.alloc_fmr = mthca_alloc_fmr; 1381219820Sjeff dev->ib_dev.unmap_fmr = mthca_unmap_fmr; 1382219820Sjeff dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr; 1383219820Sjeff if (mthca_is_memfree(dev)) 1384219820Sjeff dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr; 1385219820Sjeff else 1386219820Sjeff dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr; 1387219820Sjeff } 1388219820Sjeff 1389219820Sjeff dev->ib_dev.attach_mcast = mthca_multicast_attach; 1390219820Sjeff dev->ib_dev.detach_mcast = mthca_multicast_detach; 1391219820Sjeff dev->ib_dev.process_mad = mthca_process_mad; 1392219820Sjeff 1393219820Sjeff if (mthca_is_memfree(dev)) { 1394219820Sjeff dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq; 1395219820Sjeff dev->ib_dev.post_send = mthca_arbel_post_send; 1396219820Sjeff dev->ib_dev.post_recv = mthca_arbel_post_receive; 1397219820Sjeff } else { 1398219820Sjeff dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq; 1399219820Sjeff dev->ib_dev.post_send = mthca_tavor_post_send; 1400219820Sjeff dev->ib_dev.post_recv = mthca_tavor_post_receive; 1401219820Sjeff } 1402219820Sjeff 1403219820Sjeff mutex_init(&dev->cap_mask_mutex); 1404219820Sjeff 1405219820Sjeff ret = ib_register_device(&dev->ib_dev); 1406219820Sjeff if (ret) 1407219820Sjeff return ret; 1408219820Sjeff 1409219820Sjeff for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) { 1410219820Sjeff ret = device_create_file(&dev->ib_dev.dev, 1411219820Sjeff mthca_dev_attributes[i]); 1412219820Sjeff if (ret) { 1413219820Sjeff ib_unregister_device(&dev->ib_dev); 1414219820Sjeff return ret; 1415219820Sjeff } 1416219820Sjeff } 1417219820Sjeff 1418219820Sjeff mthca_start_catas_poll(dev); 1419219820Sjeff 1420219820Sjeff return 0; 1421219820Sjeff} 1422219820Sjeff 1423219820Sjeffvoid mthca_unregister_device(struct mthca_dev *dev) 1424219820Sjeff{ 1425219820Sjeff mthca_stop_catas_poll(dev); 1426219820Sjeff ib_unregister_device(&dev->ib_dev); 1427219820Sjeff} 1428