1219820Sjeff/* 2219820Sjeff * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3219820Sjeff * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4219820Sjeff * 5219820Sjeff * This software is available to you under a choice of one of two 6219820Sjeff * licenses. You may choose to be licensed under the terms of the GNU 7219820Sjeff * General Public License (GPL) Version 2, available from the file 8219820Sjeff * COPYING in the main directory of this source tree, or the 9219820Sjeff * OpenIB.org BSD license below: 10219820Sjeff * 11219820Sjeff * Redistribution and use in source and binary forms, with or 12219820Sjeff * without modification, are permitted provided that the following 13219820Sjeff * conditions are met: 14219820Sjeff * 15219820Sjeff * - Redistributions of source code must retain the above 16219820Sjeff * copyright notice, this list of conditions and the following 17219820Sjeff * disclaimer. 18219820Sjeff * 19219820Sjeff * - Redistributions in binary form must reproduce the above 20219820Sjeff * copyright notice, this list of conditions and the following 21219820Sjeff * disclaimer in the documentation and/or other materials 22219820Sjeff * provided with the distribution. 23219820Sjeff * 24219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31219820Sjeff * SOFTWARE. 32219820Sjeff */ 33219820Sjeff 34219820Sjeff#include <linux/mlx4/qp.h> 35219820Sjeff#include <linux/mlx4/srq.h> 36255932Salfred#include <linux/slab.h> 37219820Sjeff 38219820Sjeff#include "mlx4_ib.h" 39219820Sjeff#include "user.h" 40219820Sjeff 41219820Sjeffstatic void *get_wqe(struct mlx4_ib_srq *srq, int n) 42219820Sjeff{ 43219820Sjeff return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); 44219820Sjeff} 45219820Sjeff 46219820Sjeffstatic void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) 47219820Sjeff{ 48219820Sjeff struct ib_event event; 49219820Sjeff struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; 50219820Sjeff 51219820Sjeff if (ibsrq->event_handler) { 52219820Sjeff event.device = ibsrq->device; 53219820Sjeff event.element.srq = ibsrq; 54219820Sjeff switch (type) { 55219820Sjeff case MLX4_EVENT_TYPE_SRQ_LIMIT: 56219820Sjeff event.event = IB_EVENT_SRQ_LIMIT_REACHED; 57219820Sjeff break; 58219820Sjeff case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 59219820Sjeff event.event = IB_EVENT_SRQ_ERR; 60219820Sjeff break; 61219820Sjeff default: 62255932Salfred pr_warn("Unexpected event type %d " 63219820Sjeff "on SRQ %06x\n", type, srq->srqn); 64219820Sjeff return; 65219820Sjeff } 66219820Sjeff 67219820Sjeff ibsrq->event_handler(&event, ibsrq->srq_context); 68219820Sjeff } 69219820Sjeff} 70219820Sjeff 71255932Salfredstruct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, 72255932Salfred struct ib_srq_init_attr *init_attr, 73255932Salfred struct ib_udata *udata) 74219820Sjeff{ 75219820Sjeff struct mlx4_ib_dev *dev = to_mdev(pd->device); 76219820Sjeff struct mlx4_ib_srq *srq; 77219820Sjeff struct mlx4_wqe_srq_next_seg *next; 78255932Salfred struct mlx4_wqe_data_seg *scatter; 79255932Salfred u32 cqn; 80255932Salfred u16 xrcdn; 81219820Sjeff int desc_size; 82219820Sjeff int buf_size; 83219820Sjeff int err; 84219820Sjeff int i; 85219820Sjeff 86219820Sjeff /* Sanity check SRQ size before proceeding */ 87219820Sjeff if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes || 88255932Salfred init_attr->attr.max_sge > dev->dev->caps.max_srq_sge) 89219820Sjeff return ERR_PTR(-EINVAL); 90219820Sjeff 91255932Salfred srq = kmalloc(sizeof *srq, GFP_KERNEL); 92219820Sjeff if (!srq) 93219820Sjeff return ERR_PTR(-ENOMEM); 94219820Sjeff 95219820Sjeff mutex_init(&srq->mutex); 96219820Sjeff spin_lock_init(&srq->lock); 97219820Sjeff srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); 98219820Sjeff srq->msrq.max_gs = init_attr->attr.max_sge; 99219820Sjeff 100219820Sjeff desc_size = max(32UL, 101219820Sjeff roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) + 102219820Sjeff srq->msrq.max_gs * 103219820Sjeff sizeof (struct mlx4_wqe_data_seg))); 104219820Sjeff srq->msrq.wqe_shift = ilog2(desc_size); 105219820Sjeff 106219820Sjeff buf_size = srq->msrq.max * desc_size; 107219820Sjeff 108219820Sjeff if (pd->uobject) { 109219820Sjeff struct mlx4_ib_create_srq ucmd; 110219820Sjeff 111219820Sjeff if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 112219820Sjeff err = -EFAULT; 113219820Sjeff goto err_srq; 114219820Sjeff } 115219820Sjeff 116219820Sjeff srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 117219820Sjeff buf_size, 0, 0); 118219820Sjeff if (IS_ERR(srq->umem)) { 119219820Sjeff err = PTR_ERR(srq->umem); 120219820Sjeff goto err_srq; 121219820Sjeff } 122219820Sjeff 123219820Sjeff err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), 124219820Sjeff ilog2(srq->umem->page_size), &srq->mtt); 125219820Sjeff if (err) 126219820Sjeff goto err_buf; 127219820Sjeff 128219820Sjeff err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); 129219820Sjeff if (err) 130219820Sjeff goto err_mtt; 131219820Sjeff 132219820Sjeff err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context), 133219820Sjeff ucmd.db_addr, &srq->db); 134219820Sjeff if (err) 135219820Sjeff goto err_mtt; 136219820Sjeff } else { 137219820Sjeff err = mlx4_db_alloc(dev->dev, &srq->db, 0); 138219820Sjeff if (err) 139219820Sjeff goto err_srq; 140219820Sjeff 141219820Sjeff *srq->db.db = 0; 142219820Sjeff 143219820Sjeff if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) { 144219820Sjeff err = -ENOMEM; 145219820Sjeff goto err_db; 146219820Sjeff } 147219820Sjeff 148219820Sjeff srq->head = 0; 149219820Sjeff srq->tail = srq->msrq.max - 1; 150219820Sjeff srq->wqe_ctr = 0; 151219820Sjeff 152219820Sjeff for (i = 0; i < srq->msrq.max; ++i) { 153219820Sjeff next = get_wqe(srq, i); 154219820Sjeff next->next_wqe_index = 155219820Sjeff cpu_to_be16((i + 1) & (srq->msrq.max - 1)); 156219820Sjeff 157219820Sjeff for (scatter = (void *) (next + 1); 158219820Sjeff (void *) scatter < (void *) next + desc_size; 159219820Sjeff ++scatter) 160219820Sjeff scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY); 161219820Sjeff } 162219820Sjeff 163219820Sjeff err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, 164219820Sjeff &srq->mtt); 165219820Sjeff if (err) 166219820Sjeff goto err_buf; 167219820Sjeff 168219820Sjeff err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); 169219820Sjeff if (err) 170219820Sjeff goto err_mtt; 171219820Sjeff 172219820Sjeff srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL); 173219820Sjeff if (!srq->wrid) { 174219820Sjeff err = -ENOMEM; 175219820Sjeff goto err_mtt; 176219820Sjeff } 177219820Sjeff } 178219820Sjeff 179255932Salfred cqn = (init_attr->srq_type == IB_SRQT_XRC) ? 180255932Salfred to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0; 181255932Salfred xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ? 182255932Salfred to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn : 183219820Sjeff (u16) dev->dev->caps.reserved_xrcds; 184219820Sjeff err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt, 185219820Sjeff srq->db.dma, &srq->msrq); 186219820Sjeff if (err) 187219820Sjeff goto err_wrid; 188219820Sjeff 189219820Sjeff srq->msrq.event = mlx4_ib_srq_event; 190255932Salfred srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; 191219820Sjeff 192255932Salfred if (pd->uobject) 193219820Sjeff if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { 194219820Sjeff err = -EFAULT; 195219820Sjeff goto err_wrid; 196219820Sjeff } 197219820Sjeff 198219820Sjeff init_attr->attr.max_wr = srq->msrq.max - 1; 199219820Sjeff 200219820Sjeff return &srq->ibsrq; 201219820Sjeff 202219820Sjefferr_wrid: 203219820Sjeff if (pd->uobject) 204219820Sjeff mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); 205219820Sjeff else 206219820Sjeff kfree(srq->wrid); 207219820Sjeff 208219820Sjefferr_mtt: 209219820Sjeff mlx4_mtt_cleanup(dev->dev, &srq->mtt); 210219820Sjeff 211219820Sjefferr_buf: 212219820Sjeff if (pd->uobject) 213219820Sjeff ib_umem_release(srq->umem); 214219820Sjeff else 215219820Sjeff mlx4_buf_free(dev->dev, buf_size, &srq->buf); 216219820Sjeff 217219820Sjefferr_db: 218219820Sjeff if (!pd->uobject) 219219820Sjeff mlx4_db_free(dev->dev, &srq->db); 220219820Sjeff 221219820Sjefferr_srq: 222219820Sjeff kfree(srq); 223219820Sjeff 224219820Sjeff return ERR_PTR(err); 225219820Sjeff} 226219820Sjeff 227219820Sjeffint mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 228219820Sjeff enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) 229219820Sjeff{ 230219820Sjeff struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); 231219820Sjeff struct mlx4_ib_srq *srq = to_msrq(ibsrq); 232219820Sjeff int ret; 233219820Sjeff 234219820Sjeff /* We don't support resizing SRQs (yet?) */ 235255932Salfred if (attr_mask & IB_SRQ_MAX_WR) 236219820Sjeff return -EINVAL; 237219820Sjeff 238219820Sjeff if (attr_mask & IB_SRQ_LIMIT) { 239255932Salfred if (attr->srq_limit >= srq->msrq.max) 240219820Sjeff return -EINVAL; 241219820Sjeff 242219820Sjeff mutex_lock(&srq->mutex); 243219820Sjeff ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit); 244219820Sjeff mutex_unlock(&srq->mutex); 245219820Sjeff 246219820Sjeff if (ret) 247219820Sjeff return ret; 248219820Sjeff } 249219820Sjeff 250219820Sjeff return 0; 251219820Sjeff} 252219820Sjeff 253219820Sjeffint mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 254219820Sjeff{ 255219820Sjeff struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); 256219820Sjeff struct mlx4_ib_srq *srq = to_msrq(ibsrq); 257219820Sjeff int ret; 258219820Sjeff int limit_watermark; 259219820Sjeff 260219820Sjeff ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark); 261219820Sjeff if (ret) 262219820Sjeff return ret; 263219820Sjeff 264219820Sjeff srq_attr->srq_limit = limit_watermark; 265219820Sjeff srq_attr->max_wr = srq->msrq.max - 1; 266219820Sjeff srq_attr->max_sge = srq->msrq.max_gs; 267219820Sjeff 268219820Sjeff return 0; 269219820Sjeff} 270219820Sjeff 271219820Sjeffint mlx4_ib_destroy_srq(struct ib_srq *srq) 272219820Sjeff{ 273219820Sjeff struct mlx4_ib_dev *dev = to_mdev(srq->device); 274219820Sjeff struct mlx4_ib_srq *msrq = to_msrq(srq); 275219820Sjeff 276219820Sjeff mlx4_srq_free(dev->dev, &msrq->msrq); 277219820Sjeff mlx4_mtt_cleanup(dev->dev, &msrq->mtt); 278219820Sjeff 279219820Sjeff if (srq->uobject) { 280219820Sjeff mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); 281219820Sjeff ib_umem_release(msrq->umem); 282219820Sjeff } else { 283219820Sjeff kfree(msrq->wrid); 284219820Sjeff mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, 285219820Sjeff &msrq->buf); 286219820Sjeff mlx4_db_free(dev->dev, &msrq->db); 287219820Sjeff } 288219820Sjeff 289219820Sjeff kfree(msrq); 290219820Sjeff 291219820Sjeff return 0; 292219820Sjeff} 293219820Sjeff 294219820Sjeffvoid mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index) 295219820Sjeff{ 296219820Sjeff struct mlx4_wqe_srq_next_seg *next; 297219820Sjeff 298219820Sjeff /* always called with interrupts disabled. */ 299219820Sjeff spin_lock(&srq->lock); 300219820Sjeff 301219820Sjeff next = get_wqe(srq, srq->tail); 302219820Sjeff next->next_wqe_index = cpu_to_be16(wqe_index); 303219820Sjeff srq->tail = wqe_index; 304219820Sjeff 305219820Sjeff spin_unlock(&srq->lock); 306219820Sjeff} 307219820Sjeff 308219820Sjeffint mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 309219820Sjeff struct ib_recv_wr **bad_wr) 310219820Sjeff{ 311219820Sjeff struct mlx4_ib_srq *srq = to_msrq(ibsrq); 312219820Sjeff struct mlx4_wqe_srq_next_seg *next; 313219820Sjeff struct mlx4_wqe_data_seg *scat; 314219820Sjeff unsigned long flags; 315219820Sjeff int err = 0; 316219820Sjeff int nreq; 317219820Sjeff int i; 318219820Sjeff 319219820Sjeff spin_lock_irqsave(&srq->lock, flags); 320219820Sjeff 321219820Sjeff for (nreq = 0; wr; ++nreq, wr = wr->next) { 322219820Sjeff if (unlikely(wr->num_sge > srq->msrq.max_gs)) { 323219820Sjeff err = -EINVAL; 324219820Sjeff *bad_wr = wr; 325219820Sjeff break; 326219820Sjeff } 327219820Sjeff 328219820Sjeff if (unlikely(srq->head == srq->tail)) { 329219820Sjeff err = -ENOMEM; 330219820Sjeff *bad_wr = wr; 331219820Sjeff break; 332219820Sjeff } 333219820Sjeff 334219820Sjeff srq->wrid[srq->head] = wr->wr_id; 335219820Sjeff 336219820Sjeff next = get_wqe(srq, srq->head); 337219820Sjeff srq->head = be16_to_cpu(next->next_wqe_index); 338219820Sjeff scat = (struct mlx4_wqe_data_seg *) (next + 1); 339219820Sjeff 340219820Sjeff for (i = 0; i < wr->num_sge; ++i) { 341219820Sjeff scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); 342219820Sjeff scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); 343219820Sjeff scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); 344219820Sjeff } 345219820Sjeff 346219820Sjeff if (i < srq->msrq.max_gs) { 347219820Sjeff scat[i].byte_count = 0; 348219820Sjeff scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); 349219820Sjeff scat[i].addr = 0; 350219820Sjeff } 351219820Sjeff } 352219820Sjeff 353219820Sjeff if (likely(nreq)) { 354219820Sjeff srq->wqe_ctr += nreq; 355219820Sjeff 356219820Sjeff /* 357219820Sjeff * Make sure that descriptors are written before 358219820Sjeff * doorbell record. 359219820Sjeff */ 360219820Sjeff wmb(); 361219820Sjeff 362219820Sjeff *srq->db.db = cpu_to_be32(srq->wqe_ctr); 363219820Sjeff } 364219820Sjeff 365219820Sjeff spin_unlock_irqrestore(&srq->lock, flags); 366219820Sjeff 367219820Sjeff return err; 368219820Sjeff} 369