1219820Sjeff/* 2219820Sjeff * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3219820Sjeff * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4219820Sjeff * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved. 5219820Sjeff * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 6219820Sjeff * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 7219820Sjeff * 8219820Sjeff * This software is available to you under a choice of one of two 9219820Sjeff * licenses. You may choose to be licensed under the terms of the GNU 10219820Sjeff * General Public License (GPL) Version 2, available from the file 11219820Sjeff * COPYING in the main directory of this source tree, or the 12219820Sjeff * OpenIB.org BSD license below: 13219820Sjeff * 14219820Sjeff * Redistribution and use in source and binary forms, with or 15219820Sjeff * without modification, are permitted provided that the following 16219820Sjeff * conditions are met: 17219820Sjeff * 18219820Sjeff * - Redistributions of source code must retain the above 19219820Sjeff * copyright notice, this list of conditions and the following 20219820Sjeff * disclaimer. 21219820Sjeff * 22219820Sjeff * - Redistributions in binary form must reproduce the above 23219820Sjeff * copyright notice, this list of conditions and the following 24219820Sjeff * disclaimer in the documentation and/or other materials 25219820Sjeff * provided with the distribution. 26219820Sjeff * 27219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34219820Sjeff * SOFTWARE. 35219820Sjeff */ 36219820Sjeff 37219820Sjeff#include <linux/hardirq.h> 38219820Sjeff#include <linux/sched.h> 39219820Sjeff 40219820Sjeff#include <asm/io.h> 41219820Sjeff 42219820Sjeff#include <rdma/ib_pack.h> 43219820Sjeff 44219820Sjeff#include "mthca_dev.h" 45219820Sjeff#include "mthca_cmd.h" 46219820Sjeff#include "mthca_memfree.h" 47219820Sjeff 48219820Sjeffenum { 49219820Sjeff MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE 50219820Sjeff}; 51219820Sjeff 52219820Sjeffenum { 53219820Sjeff MTHCA_CQ_ENTRY_SIZE = 0x20 54219820Sjeff}; 55219820Sjeff 56219820Sjeffenum { 57219820Sjeff MTHCA_ATOMIC_BYTE_LEN = 8 58219820Sjeff}; 59219820Sjeff 60219820Sjeff/* 61219820Sjeff * Must be packed because start is 64 bits but only aligned to 32 bits. 62219820Sjeff */ 63219820Sjeffstruct mthca_cq_context { 64219820Sjeff __be32 flags; 65219820Sjeff __be64 start; 66219820Sjeff __be32 logsize_usrpage; 67219820Sjeff __be32 error_eqn; /* Tavor only */ 68219820Sjeff __be32 comp_eqn; 69219820Sjeff __be32 pd; 70219820Sjeff __be32 lkey; 71219820Sjeff __be32 last_notified_index; 72219820Sjeff __be32 solicit_producer_index; 73219820Sjeff __be32 consumer_index; 74219820Sjeff __be32 producer_index; 75219820Sjeff __be32 cqn; 76219820Sjeff __be32 ci_db; /* Arbel only */ 77219820Sjeff __be32 state_db; /* Arbel only */ 78219820Sjeff u32 reserved; 79219820Sjeff} __attribute__((packed)); 80219820Sjeff 81219820Sjeff#define MTHCA_CQ_STATUS_OK ( 0 << 28) 82219820Sjeff#define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28) 83219820Sjeff#define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28) 84219820Sjeff#define MTHCA_CQ_FLAG_TR ( 1 << 18) 85219820Sjeff#define MTHCA_CQ_FLAG_OI ( 1 << 17) 86219820Sjeff#define MTHCA_CQ_STATE_DISARMED ( 0 << 8) 87219820Sjeff#define MTHCA_CQ_STATE_ARMED ( 1 << 8) 88219820Sjeff#define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8) 89219820Sjeff#define MTHCA_EQ_STATE_FIRED (10 << 8) 90219820Sjeff 91219820Sjeffenum { 92219820Sjeff MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe 93219820Sjeff}; 94219820Sjeff 95219820Sjeffenum { 96219820Sjeff SYNDROME_LOCAL_LENGTH_ERR = 0x01, 97219820Sjeff SYNDROME_LOCAL_QP_OP_ERR = 0x02, 98219820Sjeff SYNDROME_LOCAL_EEC_OP_ERR = 0x03, 99219820Sjeff SYNDROME_LOCAL_PROT_ERR = 0x04, 100219820Sjeff SYNDROME_WR_FLUSH_ERR = 0x05, 101219820Sjeff SYNDROME_MW_BIND_ERR = 0x06, 102219820Sjeff SYNDROME_BAD_RESP_ERR = 0x10, 103219820Sjeff SYNDROME_LOCAL_ACCESS_ERR = 0x11, 104219820Sjeff SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, 105219820Sjeff SYNDROME_REMOTE_ACCESS_ERR = 0x13, 106219820Sjeff SYNDROME_REMOTE_OP_ERR = 0x14, 107219820Sjeff SYNDROME_RETRY_EXC_ERR = 0x15, 108219820Sjeff SYNDROME_RNR_RETRY_EXC_ERR = 0x16, 109219820Sjeff SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20, 110219820Sjeff SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21, 111219820Sjeff SYNDROME_REMOTE_ABORTED_ERR = 0x22, 112219820Sjeff SYNDROME_INVAL_EECN_ERR = 0x23, 113219820Sjeff SYNDROME_INVAL_EEC_STATE_ERR = 0x24 114219820Sjeff}; 115219820Sjeff 116219820Sjeffstruct mthca_cqe { 117219820Sjeff __be32 my_qpn; 118219820Sjeff __be32 my_ee; 119219820Sjeff __be32 rqpn; 120219820Sjeff u8 sl_ipok; 121219820Sjeff u8 g_mlpath; 122219820Sjeff __be16 rlid; 123219820Sjeff __be32 imm_etype_pkey_eec; 124219820Sjeff __be32 byte_cnt; 125219820Sjeff __be32 wqe; 126219820Sjeff u8 opcode; 127219820Sjeff u8 is_send; 128219820Sjeff u8 reserved; 129219820Sjeff u8 owner; 130219820Sjeff}; 131219820Sjeff 132219820Sjeffstruct mthca_err_cqe { 133219820Sjeff __be32 my_qpn; 134219820Sjeff u32 reserved1[3]; 135219820Sjeff u8 syndrome; 136219820Sjeff u8 vendor_err; 137219820Sjeff __be16 db_cnt; 138219820Sjeff u32 reserved2; 139219820Sjeff __be32 wqe; 140219820Sjeff u8 opcode; 141219820Sjeff u8 reserved3[2]; 142219820Sjeff u8 owner; 143219820Sjeff}; 144219820Sjeff 145219820Sjeff#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) 146219820Sjeff#define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7) 147219820Sjeff 148219820Sjeff#define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24) 149219820Sjeff#define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24) 150219820Sjeff#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24) 151219820Sjeff#define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24) 152219820Sjeff#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24) 153219820Sjeff 154219820Sjeff#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24) 155219820Sjeff#define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) 156219820Sjeff#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) 157219820Sjeff 158219820Sjeffstatic inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf, 159219820Sjeff int entry) 160219820Sjeff{ 161219820Sjeff if (buf->is_direct) 162219820Sjeff return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE); 163219820Sjeff else 164219820Sjeff return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf 165219820Sjeff + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE; 166219820Sjeff} 167219820Sjeff 168219820Sjeffstatic inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) 169219820Sjeff{ 170219820Sjeff return get_cqe_from_buf(&cq->buf, entry); 171219820Sjeff} 172219820Sjeff 173219820Sjeffstatic inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) 174219820Sjeff{ 175219820Sjeff return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; 176219820Sjeff} 177219820Sjeff 178219820Sjeffstatic inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) 179219820Sjeff{ 180219820Sjeff return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); 181219820Sjeff} 182219820Sjeff 183219820Sjeffstatic inline void set_cqe_hw(struct mthca_cqe *cqe) 184219820Sjeff{ 185219820Sjeff cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; 186219820Sjeff} 187219820Sjeff 188219820Sjeffstatic void dump_cqe(struct mthca_dev *dev, void *cqe_ptr) 189219820Sjeff{ 190219820Sjeff __be32 *cqe = cqe_ptr; 191219820Sjeff 192219820Sjeff (void) cqe; /* avoid warning if mthca_dbg compiled away... */ 193219820Sjeff mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", 194219820Sjeff be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), 195219820Sjeff be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), 196219820Sjeff be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); 197219820Sjeff} 198219820Sjeff 199219820Sjeff/* 200219820Sjeff * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index 201219820Sjeff * should be correct before calling update_cons_index(). 202219820Sjeff */ 203219820Sjeffstatic inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, 204219820Sjeff int incr) 205219820Sjeff{ 206219820Sjeff if (mthca_is_memfree(dev)) { 207219820Sjeff *cq->set_ci_db = cpu_to_be32(cq->cons_index); 208219820Sjeff wmb(); 209219820Sjeff } else { 210219820Sjeff mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, 211219820Sjeff dev->kar + MTHCA_CQ_DOORBELL, 212219820Sjeff MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 213219820Sjeff /* 214219820Sjeff * Make sure doorbells don't leak out of CQ spinlock 215219820Sjeff * and reach the HCA out of order: 216219820Sjeff */ 217219820Sjeff mmiowb(); 218219820Sjeff } 219219820Sjeff} 220219820Sjeff 221219820Sjeffvoid mthca_cq_completion(struct mthca_dev *dev, u32 cqn) 222219820Sjeff{ 223219820Sjeff struct mthca_cq *cq; 224219820Sjeff 225219820Sjeff cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 226219820Sjeff 227219820Sjeff if (!cq) { 228219820Sjeff mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn); 229219820Sjeff return; 230219820Sjeff } 231219820Sjeff 232219820Sjeff ++cq->arm_sn; 233219820Sjeff 234219820Sjeff cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 235219820Sjeff} 236219820Sjeff 237219820Sjeffvoid mthca_cq_event(struct mthca_dev *dev, u32 cqn, 238219820Sjeff enum ib_event_type event_type) 239219820Sjeff{ 240219820Sjeff struct mthca_cq *cq; 241219820Sjeff struct ib_event event; 242219820Sjeff 243219820Sjeff spin_lock(&dev->cq_table.lock); 244219820Sjeff 245219820Sjeff cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); 246219820Sjeff if (cq) 247219820Sjeff ++cq->refcount; 248219820Sjeff 249219820Sjeff spin_unlock(&dev->cq_table.lock); 250219820Sjeff 251219820Sjeff if (!cq) { 252219820Sjeff mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn); 253219820Sjeff return; 254219820Sjeff } 255219820Sjeff 256219820Sjeff event.device = &dev->ib_dev; 257219820Sjeff event.event = event_type; 258219820Sjeff event.element.cq = &cq->ibcq; 259219820Sjeff if (cq->ibcq.event_handler) 260219820Sjeff cq->ibcq.event_handler(&event, cq->ibcq.cq_context); 261219820Sjeff 262219820Sjeff spin_lock(&dev->cq_table.lock); 263219820Sjeff if (!--cq->refcount) 264219820Sjeff wake_up(&cq->wait); 265219820Sjeff spin_unlock(&dev->cq_table.lock); 266219820Sjeff} 267219820Sjeff 268219820Sjeffstatic inline int is_recv_cqe(struct mthca_cqe *cqe) 269219820Sjeff{ 270219820Sjeff if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == 271219820Sjeff MTHCA_ERROR_CQE_OPCODE_MASK) 272219820Sjeff return !(cqe->opcode & 0x01); 273219820Sjeff else 274219820Sjeff return !(cqe->is_send & 0x80); 275219820Sjeff} 276219820Sjeff 277219820Sjeffvoid mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, 278219820Sjeff struct mthca_srq *srq) 279219820Sjeff{ 280219820Sjeff struct mthca_cqe *cqe; 281219820Sjeff u32 prod_index; 282219820Sjeff int i, nfreed = 0; 283219820Sjeff 284219820Sjeff spin_lock_irq(&cq->lock); 285219820Sjeff 286219820Sjeff /* 287219820Sjeff * First we need to find the current producer index, so we 288219820Sjeff * know where to start cleaning from. It doesn't matter if HW 289219820Sjeff * adds new entries after this loop -- the QP we're worried 290219820Sjeff * about is already in RESET, so the new entries won't come 291219820Sjeff * from our QP and therefore don't need to be checked. 292219820Sjeff */ 293219820Sjeff for (prod_index = cq->cons_index; 294219820Sjeff cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe)); 295219820Sjeff ++prod_index) 296219820Sjeff if (prod_index == cq->cons_index + cq->ibcq.cqe) 297219820Sjeff break; 298219820Sjeff 299219820Sjeff if (0) 300219820Sjeff mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", 301219820Sjeff qpn, cq->cqn, cq->cons_index, prod_index); 302219820Sjeff 303219820Sjeff /* 304219820Sjeff * Now sweep backwards through the CQ, removing CQ entries 305219820Sjeff * that match our QP by copying older entries on top of them. 306219820Sjeff */ 307219820Sjeff while ((int) --prod_index - (int) cq->cons_index >= 0) { 308219820Sjeff cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 309219820Sjeff if (cqe->my_qpn == cpu_to_be32(qpn)) { 310219820Sjeff if (srq && is_recv_cqe(cqe)) 311219820Sjeff mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); 312219820Sjeff ++nfreed; 313219820Sjeff } else if (nfreed) 314219820Sjeff memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), 315219820Sjeff cqe, MTHCA_CQ_ENTRY_SIZE); 316219820Sjeff } 317219820Sjeff 318219820Sjeff if (nfreed) { 319219820Sjeff for (i = 0; i < nfreed; ++i) 320219820Sjeff set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); 321219820Sjeff wmb(); 322219820Sjeff cq->cons_index += nfreed; 323219820Sjeff update_cons_index(dev, cq, nfreed); 324219820Sjeff } 325219820Sjeff 326219820Sjeff spin_unlock_irq(&cq->lock); 327219820Sjeff} 328219820Sjeff 329219820Sjeffvoid mthca_cq_resize_copy_cqes(struct mthca_cq *cq) 330219820Sjeff{ 331219820Sjeff int i; 332219820Sjeff 333219820Sjeff /* 334219820Sjeff * In Tavor mode, the hardware keeps the consumer and producer 335219820Sjeff * indices mod the CQ size. Since we might be making the CQ 336219820Sjeff * bigger, we need to deal with the case where the producer 337219820Sjeff * index wrapped around before the CQ was resized. 338219820Sjeff */ 339219820Sjeff if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) && 340219820Sjeff cq->ibcq.cqe < cq->resize_buf->cqe) { 341219820Sjeff cq->cons_index &= cq->ibcq.cqe; 342219820Sjeff if (cqe_sw(get_cqe(cq, cq->ibcq.cqe))) 343219820Sjeff cq->cons_index -= cq->ibcq.cqe + 1; 344219820Sjeff } 345219820Sjeff 346219820Sjeff for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i) 347219820Sjeff memcpy(get_cqe_from_buf(&cq->resize_buf->buf, 348219820Sjeff i & cq->resize_buf->cqe), 349219820Sjeff get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE); 350219820Sjeff} 351219820Sjeff 352219820Sjeffint mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) 353219820Sjeff{ 354219820Sjeff int ret; 355219820Sjeff int i; 356219820Sjeff 357219820Sjeff ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, 358219820Sjeff MTHCA_MAX_DIRECT_CQ_SIZE, 359219820Sjeff &buf->queue, &buf->is_direct, 360219820Sjeff &dev->driver_pd, 1, &buf->mr); 361219820Sjeff if (ret) 362219820Sjeff return ret; 363219820Sjeff 364219820Sjeff for (i = 0; i < nent; ++i) 365219820Sjeff set_cqe_hw(get_cqe_from_buf(buf, i)); 366219820Sjeff 367219820Sjeff return 0; 368219820Sjeff} 369219820Sjeff 370219820Sjeffvoid mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) 371219820Sjeff{ 372219820Sjeff mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue, 373219820Sjeff buf->is_direct, &buf->mr); 374219820Sjeff} 375219820Sjeff 376219820Sjeffstatic void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, 377219820Sjeff struct mthca_qp *qp, int wqe_index, int is_send, 378219820Sjeff struct mthca_err_cqe *cqe, 379219820Sjeff struct ib_wc *entry, int *free_cqe) 380219820Sjeff{ 381219820Sjeff int dbd; 382219820Sjeff __be32 new_wqe; 383219820Sjeff 384219820Sjeff if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { 385219820Sjeff mthca_dbg(dev, "local QP operation err " 386219820Sjeff "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n", 387219820Sjeff be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), 388219820Sjeff cq->cqn, cq->cons_index); 389219820Sjeff dump_cqe(dev, cqe); 390219820Sjeff } 391219820Sjeff 392219820Sjeff /* 393219820Sjeff * For completions in error, only work request ID, status, vendor error 394219820Sjeff * (and freed resource count for RD) have to be set. 395219820Sjeff */ 396219820Sjeff switch (cqe->syndrome) { 397219820Sjeff case SYNDROME_LOCAL_LENGTH_ERR: 398219820Sjeff entry->status = IB_WC_LOC_LEN_ERR; 399219820Sjeff break; 400219820Sjeff case SYNDROME_LOCAL_QP_OP_ERR: 401219820Sjeff entry->status = IB_WC_LOC_QP_OP_ERR; 402219820Sjeff break; 403219820Sjeff case SYNDROME_LOCAL_EEC_OP_ERR: 404219820Sjeff entry->status = IB_WC_LOC_EEC_OP_ERR; 405219820Sjeff break; 406219820Sjeff case SYNDROME_LOCAL_PROT_ERR: 407219820Sjeff entry->status = IB_WC_LOC_PROT_ERR; 408219820Sjeff break; 409219820Sjeff case SYNDROME_WR_FLUSH_ERR: 410219820Sjeff entry->status = IB_WC_WR_FLUSH_ERR; 411219820Sjeff break; 412219820Sjeff case SYNDROME_MW_BIND_ERR: 413219820Sjeff entry->status = IB_WC_MW_BIND_ERR; 414219820Sjeff break; 415219820Sjeff case SYNDROME_BAD_RESP_ERR: 416219820Sjeff entry->status = IB_WC_BAD_RESP_ERR; 417219820Sjeff break; 418219820Sjeff case SYNDROME_LOCAL_ACCESS_ERR: 419219820Sjeff entry->status = IB_WC_LOC_ACCESS_ERR; 420219820Sjeff break; 421219820Sjeff case SYNDROME_REMOTE_INVAL_REQ_ERR: 422219820Sjeff entry->status = IB_WC_REM_INV_REQ_ERR; 423219820Sjeff break; 424219820Sjeff case SYNDROME_REMOTE_ACCESS_ERR: 425219820Sjeff entry->status = IB_WC_REM_ACCESS_ERR; 426219820Sjeff break; 427219820Sjeff case SYNDROME_REMOTE_OP_ERR: 428219820Sjeff entry->status = IB_WC_REM_OP_ERR; 429219820Sjeff break; 430219820Sjeff case SYNDROME_RETRY_EXC_ERR: 431219820Sjeff entry->status = IB_WC_RETRY_EXC_ERR; 432219820Sjeff break; 433219820Sjeff case SYNDROME_RNR_RETRY_EXC_ERR: 434219820Sjeff entry->status = IB_WC_RNR_RETRY_EXC_ERR; 435219820Sjeff break; 436219820Sjeff case SYNDROME_LOCAL_RDD_VIOL_ERR: 437219820Sjeff entry->status = IB_WC_LOC_RDD_VIOL_ERR; 438219820Sjeff break; 439219820Sjeff case SYNDROME_REMOTE_INVAL_RD_REQ_ERR: 440219820Sjeff entry->status = IB_WC_REM_INV_RD_REQ_ERR; 441219820Sjeff break; 442219820Sjeff case SYNDROME_REMOTE_ABORTED_ERR: 443219820Sjeff entry->status = IB_WC_REM_ABORT_ERR; 444219820Sjeff break; 445219820Sjeff case SYNDROME_INVAL_EECN_ERR: 446219820Sjeff entry->status = IB_WC_INV_EECN_ERR; 447219820Sjeff break; 448219820Sjeff case SYNDROME_INVAL_EEC_STATE_ERR: 449219820Sjeff entry->status = IB_WC_INV_EEC_STATE_ERR; 450219820Sjeff break; 451219820Sjeff default: 452219820Sjeff entry->status = IB_WC_GENERAL_ERR; 453219820Sjeff break; 454219820Sjeff } 455219820Sjeff 456219820Sjeff entry->vendor_err = cqe->vendor_err; 457219820Sjeff 458219820Sjeff /* 459219820Sjeff * Mem-free HCAs always generate one CQE per WQE, even in the 460219820Sjeff * error case, so we don't have to check the doorbell count, etc. 461219820Sjeff */ 462219820Sjeff if (mthca_is_memfree(dev)) 463219820Sjeff return; 464219820Sjeff 465219820Sjeff mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); 466219820Sjeff 467219820Sjeff /* 468219820Sjeff * If we're at the end of the WQE chain, or we've used up our 469219820Sjeff * doorbell count, free the CQE. Otherwise just update it for 470219820Sjeff * the next poll operation. 471219820Sjeff */ 472219820Sjeff if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) 473219820Sjeff return; 474219820Sjeff 475219820Sjeff be16_add_cpu(&cqe->db_cnt, -dbd); 476219820Sjeff cqe->wqe = new_wqe; 477219820Sjeff cqe->syndrome = SYNDROME_WR_FLUSH_ERR; 478219820Sjeff 479219820Sjeff *free_cqe = 0; 480219820Sjeff} 481219820Sjeff 482219820Sjeffstatic inline int mthca_poll_one(struct mthca_dev *dev, 483219820Sjeff struct mthca_cq *cq, 484219820Sjeff struct mthca_qp **cur_qp, 485219820Sjeff int *freed, 486219820Sjeff struct ib_wc *entry) 487219820Sjeff{ 488219820Sjeff struct mthca_wq *wq; 489219820Sjeff struct mthca_cqe *cqe; 490219820Sjeff int wqe_index; 491219820Sjeff int is_error; 492219820Sjeff int is_send; 493219820Sjeff int free_cqe = 1; 494219820Sjeff int err = 0; 495219820Sjeff u16 checksum; 496219820Sjeff 497219820Sjeff cqe = next_cqe_sw(cq); 498219820Sjeff if (!cqe) 499219820Sjeff return -EAGAIN; 500219820Sjeff 501219820Sjeff /* 502219820Sjeff * Make sure we read CQ entry contents after we've checked the 503219820Sjeff * ownership bit. 504219820Sjeff */ 505219820Sjeff rmb(); 506219820Sjeff 507219820Sjeff if (0) { 508219820Sjeff mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n", 509219820Sjeff cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), 510219820Sjeff be32_to_cpu(cqe->wqe)); 511219820Sjeff dump_cqe(dev, cqe); 512219820Sjeff } 513219820Sjeff 514219820Sjeff is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == 515219820Sjeff MTHCA_ERROR_CQE_OPCODE_MASK; 516219820Sjeff is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; 517219820Sjeff 518219820Sjeff if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) { 519219820Sjeff /* 520219820Sjeff * We do not have to take the QP table lock here, 521219820Sjeff * because CQs will be locked while QPs are removed 522219820Sjeff * from the table. 523219820Sjeff */ 524219820Sjeff *cur_qp = mthca_array_get(&dev->qp_table.qp, 525219820Sjeff be32_to_cpu(cqe->my_qpn) & 526219820Sjeff (dev->limits.num_qps - 1)); 527219820Sjeff if (!*cur_qp) { 528219820Sjeff mthca_warn(dev, "CQ entry for unknown QP %06x\n", 529219820Sjeff be32_to_cpu(cqe->my_qpn) & 0xffffff); 530219820Sjeff err = -EINVAL; 531219820Sjeff goto out; 532219820Sjeff } 533219820Sjeff } 534219820Sjeff 535219820Sjeff entry->qp = &(*cur_qp)->ibqp; 536219820Sjeff 537219820Sjeff if (is_send) { 538219820Sjeff wq = &(*cur_qp)->sq; 539219820Sjeff wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) 540219820Sjeff >> wq->wqe_shift); 541219820Sjeff entry->wr_id = (*cur_qp)->wrid[wqe_index]; 542219820Sjeff } else if ((*cur_qp)->ibqp.srq) { 543219820Sjeff struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); 544219820Sjeff u32 wqe = be32_to_cpu(cqe->wqe); 545219820Sjeff wq = NULL; 546219820Sjeff wqe_index = wqe >> srq->wqe_shift; 547219820Sjeff entry->wr_id = srq->wrid[wqe_index]; 548219820Sjeff mthca_free_srq_wqe(srq, wqe); 549219820Sjeff } else { 550219820Sjeff s32 wqe; 551219820Sjeff wq = &(*cur_qp)->rq; 552219820Sjeff wqe = be32_to_cpu(cqe->wqe); 553219820Sjeff wqe_index = wqe >> wq->wqe_shift; 554219820Sjeff /* 555219820Sjeff * WQE addr == base - 1 might be reported in receive completion 556219820Sjeff * with error instead of (rq size - 1) by Sinai FW 1.0.800 and 557219820Sjeff * Arbel FW 5.1.400. This bug should be fixed in later FW revs. 558219820Sjeff */ 559219820Sjeff if (unlikely(wqe_index < 0)) 560219820Sjeff wqe_index = wq->max - 1; 561219820Sjeff entry->wr_id = (*cur_qp)->wrid[wqe_index + (*cur_qp)->sq.max]; 562219820Sjeff } 563219820Sjeff 564219820Sjeff if (wq) { 565219820Sjeff if (wq->last_comp < wqe_index) 566219820Sjeff wq->tail += wqe_index - wq->last_comp; 567219820Sjeff else 568219820Sjeff wq->tail += wqe_index + wq->max - wq->last_comp; 569219820Sjeff 570219820Sjeff wq->last_comp = wqe_index; 571219820Sjeff } 572219820Sjeff 573219820Sjeff if (is_error) { 574219820Sjeff handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, 575219820Sjeff (struct mthca_err_cqe *) cqe, 576219820Sjeff entry, &free_cqe); 577219820Sjeff goto out; 578219820Sjeff } 579219820Sjeff 580219820Sjeff if (is_send) { 581219820Sjeff entry->wc_flags = 0; 582219820Sjeff switch (cqe->opcode) { 583219820Sjeff case MTHCA_OPCODE_RDMA_WRITE: 584219820Sjeff entry->opcode = IB_WC_RDMA_WRITE; 585219820Sjeff break; 586219820Sjeff case MTHCA_OPCODE_RDMA_WRITE_IMM: 587219820Sjeff entry->opcode = IB_WC_RDMA_WRITE; 588219820Sjeff entry->wc_flags |= IB_WC_WITH_IMM; 589219820Sjeff break; 590219820Sjeff case MTHCA_OPCODE_SEND: 591219820Sjeff entry->opcode = IB_WC_SEND; 592219820Sjeff break; 593219820Sjeff case MTHCA_OPCODE_SEND_IMM: 594219820Sjeff entry->opcode = IB_WC_SEND; 595219820Sjeff entry->wc_flags |= IB_WC_WITH_IMM; 596219820Sjeff break; 597219820Sjeff case MTHCA_OPCODE_RDMA_READ: 598219820Sjeff entry->opcode = IB_WC_RDMA_READ; 599219820Sjeff entry->byte_len = be32_to_cpu(cqe->byte_cnt); 600219820Sjeff break; 601219820Sjeff case MTHCA_OPCODE_ATOMIC_CS: 602219820Sjeff entry->opcode = IB_WC_COMP_SWAP; 603219820Sjeff entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; 604219820Sjeff break; 605219820Sjeff case MTHCA_OPCODE_ATOMIC_FA: 606219820Sjeff entry->opcode = IB_WC_FETCH_ADD; 607219820Sjeff entry->byte_len = MTHCA_ATOMIC_BYTE_LEN; 608219820Sjeff break; 609219820Sjeff case MTHCA_OPCODE_BIND_MW: 610219820Sjeff entry->opcode = IB_WC_BIND_MW; 611219820Sjeff break; 612219820Sjeff default: 613219820Sjeff entry->opcode = MTHCA_OPCODE_INVALID; 614219820Sjeff break; 615219820Sjeff } 616219820Sjeff } else { 617219820Sjeff entry->byte_len = be32_to_cpu(cqe->byte_cnt); 618219820Sjeff switch (cqe->opcode & 0x1f) { 619219820Sjeff case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE: 620219820Sjeff case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE: 621219820Sjeff entry->wc_flags = IB_WC_WITH_IMM; 622219820Sjeff entry->ex.imm_data = cqe->imm_etype_pkey_eec; 623219820Sjeff entry->opcode = IB_WC_RECV; 624219820Sjeff break; 625219820Sjeff case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: 626219820Sjeff case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: 627219820Sjeff entry->wc_flags = IB_WC_WITH_IMM; 628219820Sjeff entry->ex.imm_data = cqe->imm_etype_pkey_eec; 629219820Sjeff entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; 630219820Sjeff break; 631219820Sjeff default: 632219820Sjeff entry->wc_flags = 0; 633219820Sjeff entry->opcode = IB_WC_RECV; 634219820Sjeff break; 635219820Sjeff } 636219820Sjeff entry->slid = be16_to_cpu(cqe->rlid); 637219820Sjeff entry->sl = cqe->sl_ipok >> 4; 638219820Sjeff entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff; 639219820Sjeff entry->dlid_path_bits = cqe->g_mlpath & 0x7f; 640219820Sjeff entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16; 641219820Sjeff entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; 642219820Sjeff checksum = (be32_to_cpu(cqe->rqpn) >> 24) | 643219820Sjeff ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00); 644219820Sjeff entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff); 645219820Sjeff } 646219820Sjeff 647219820Sjeff entry->status = IB_WC_SUCCESS; 648219820Sjeff 649219820Sjeff out: 650219820Sjeff if (likely(free_cqe)) { 651219820Sjeff set_cqe_hw(cqe); 652219820Sjeff ++(*freed); 653219820Sjeff ++cq->cons_index; 654219820Sjeff } 655219820Sjeff 656219820Sjeff return err; 657219820Sjeff} 658219820Sjeff 659219820Sjeffint mthca_poll_cq(struct ib_cq *ibcq, int num_entries, 660219820Sjeff struct ib_wc *entry) 661219820Sjeff{ 662219820Sjeff struct mthca_dev *dev = to_mdev(ibcq->device); 663219820Sjeff struct mthca_cq *cq = to_mcq(ibcq); 664219820Sjeff struct mthca_qp *qp = NULL; 665219820Sjeff unsigned long flags; 666219820Sjeff int err = 0; 667219820Sjeff int freed = 0; 668219820Sjeff int npolled; 669219820Sjeff 670219820Sjeff spin_lock_irqsave(&cq->lock, flags); 671219820Sjeff 672219820Sjeff npolled = 0; 673219820Sjeffrepoll: 674219820Sjeff while (npolled < num_entries) { 675219820Sjeff err = mthca_poll_one(dev, cq, &qp, 676219820Sjeff &freed, entry + npolled); 677219820Sjeff if (err) 678219820Sjeff break; 679219820Sjeff ++npolled; 680219820Sjeff } 681219820Sjeff 682219820Sjeff if (freed) { 683219820Sjeff wmb(); 684219820Sjeff update_cons_index(dev, cq, freed); 685219820Sjeff } 686219820Sjeff 687219820Sjeff /* 688219820Sjeff * If a CQ resize is in progress and we discovered that the 689219820Sjeff * old buffer is empty, then peek in the new buffer, and if 690219820Sjeff * it's not empty, switch to the new buffer and continue 691219820Sjeff * polling there. 692219820Sjeff */ 693219820Sjeff if (unlikely(err == -EAGAIN && cq->resize_buf && 694219820Sjeff cq->resize_buf->state == CQ_RESIZE_READY)) { 695219820Sjeff /* 696219820Sjeff * In Tavor mode, the hardware keeps the producer 697219820Sjeff * index modulo the CQ size. Since we might be making 698219820Sjeff * the CQ bigger, we need to mask our consumer index 699219820Sjeff * using the size of the old CQ buffer before looking 700219820Sjeff * in the new CQ buffer. 701219820Sjeff */ 702219820Sjeff if (!mthca_is_memfree(dev)) 703219820Sjeff cq->cons_index &= cq->ibcq.cqe; 704219820Sjeff 705219820Sjeff if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf, 706219820Sjeff cq->cons_index & cq->resize_buf->cqe))) { 707219820Sjeff struct mthca_cq_buf tbuf; 708219820Sjeff int tcqe; 709219820Sjeff 710219820Sjeff tbuf = cq->buf; 711219820Sjeff tcqe = cq->ibcq.cqe; 712219820Sjeff cq->buf = cq->resize_buf->buf; 713219820Sjeff cq->ibcq.cqe = cq->resize_buf->cqe; 714219820Sjeff 715219820Sjeff cq->resize_buf->buf = tbuf; 716219820Sjeff cq->resize_buf->cqe = tcqe; 717219820Sjeff cq->resize_buf->state = CQ_RESIZE_SWAPPED; 718219820Sjeff 719219820Sjeff goto repoll; 720219820Sjeff } 721219820Sjeff } 722219820Sjeff 723219820Sjeff spin_unlock_irqrestore(&cq->lock, flags); 724219820Sjeff 725219820Sjeff return err == 0 || err == -EAGAIN ? npolled : err; 726219820Sjeff} 727219820Sjeff 728219820Sjeffint mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) 729219820Sjeff{ 730219820Sjeff u32 dbhi = ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 731219820Sjeff MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : 732219820Sjeff MTHCA_TAVOR_CQ_DB_REQ_NOT) | 733219820Sjeff to_mcq(cq)->cqn; 734219820Sjeff 735219820Sjeff mthca_write64(dbhi, 0xffffffff, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, 736219820Sjeff MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock)); 737219820Sjeff 738219820Sjeff return 0; 739219820Sjeff} 740219820Sjeff 741219820Sjeffint mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 742219820Sjeff{ 743219820Sjeff struct mthca_cq *cq = to_mcq(ibcq); 744219820Sjeff __be32 db_rec[2]; 745219820Sjeff u32 dbhi; 746219820Sjeff u32 sn = cq->arm_sn & 3; 747219820Sjeff 748219820Sjeff db_rec[0] = cpu_to_be32(cq->cons_index); 749219820Sjeff db_rec[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | 750219820Sjeff ((flags & IB_CQ_SOLICITED_MASK) == 751219820Sjeff IB_CQ_SOLICITED ? 1 : 2)); 752219820Sjeff 753219820Sjeff mthca_write_db_rec(db_rec, cq->arm_db); 754219820Sjeff 755219820Sjeff /* 756219820Sjeff * Make sure that the doorbell record in host memory is 757219820Sjeff * written before ringing the doorbell via PCI MMIO. 758219820Sjeff */ 759219820Sjeff wmb(); 760219820Sjeff 761219820Sjeff dbhi = (sn << 28) | 762219820Sjeff ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 763219820Sjeff MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : 764219820Sjeff MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn; 765219820Sjeff 766219820Sjeff mthca_write64(dbhi, cq->cons_index, 767219820Sjeff to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL, 768219820Sjeff MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock)); 769219820Sjeff 770219820Sjeff return 0; 771219820Sjeff} 772219820Sjeff 773219820Sjeffint mthca_init_cq(struct mthca_dev *dev, int nent, 774219820Sjeff struct mthca_ucontext *ctx, u32 pdn, 775219820Sjeff struct mthca_cq *cq) 776219820Sjeff{ 777219820Sjeff struct mthca_mailbox *mailbox; 778219820Sjeff struct mthca_cq_context *cq_context; 779219820Sjeff int err = -ENOMEM; 780219820Sjeff u8 status; 781219820Sjeff 782219820Sjeff cq->ibcq.cqe = nent - 1; 783219820Sjeff cq->is_kernel = !ctx; 784219820Sjeff 785219820Sjeff cq->cqn = mthca_alloc(&dev->cq_table.alloc); 786219820Sjeff if (cq->cqn == -1) 787219820Sjeff return -ENOMEM; 788219820Sjeff 789219820Sjeff if (mthca_is_memfree(dev)) { 790219820Sjeff err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); 791219820Sjeff if (err) 792219820Sjeff goto err_out; 793219820Sjeff 794219820Sjeff if (cq->is_kernel) { 795219820Sjeff cq->arm_sn = 1; 796219820Sjeff 797219820Sjeff err = -ENOMEM; 798219820Sjeff 799219820Sjeff cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, 800219820Sjeff cq->cqn, &cq->set_ci_db); 801219820Sjeff if (cq->set_ci_db_index < 0) 802219820Sjeff goto err_out_icm; 803219820Sjeff 804219820Sjeff cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM, 805219820Sjeff cq->cqn, &cq->arm_db); 806219820Sjeff if (cq->arm_db_index < 0) 807219820Sjeff goto err_out_ci; 808219820Sjeff } 809219820Sjeff } 810219820Sjeff 811219820Sjeff mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 812219820Sjeff if (IS_ERR(mailbox)) 813219820Sjeff goto err_out_arm; 814219820Sjeff 815219820Sjeff cq_context = mailbox->buf; 816219820Sjeff 817219820Sjeff if (cq->is_kernel) { 818219820Sjeff err = mthca_alloc_cq_buf(dev, &cq->buf, nent); 819219820Sjeff if (err) 820219820Sjeff goto err_out_mailbox; 821219820Sjeff } 822219820Sjeff 823219820Sjeff spin_lock_init(&cq->lock); 824219820Sjeff cq->refcount = 1; 825219820Sjeff init_waitqueue_head(&cq->wait); 826219820Sjeff mutex_init(&cq->mutex); 827219820Sjeff 828219820Sjeff memset(cq_context, 0, sizeof *cq_context); 829219820Sjeff cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK | 830219820Sjeff MTHCA_CQ_STATE_DISARMED | 831219820Sjeff MTHCA_CQ_FLAG_TR); 832219820Sjeff cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 833219820Sjeff if (ctx) 834219820Sjeff cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index); 835219820Sjeff else 836219820Sjeff cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index); 837219820Sjeff cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); 838219820Sjeff cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); 839219820Sjeff cq_context->pd = cpu_to_be32(pdn); 840219820Sjeff cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey); 841219820Sjeff cq_context->cqn = cpu_to_be32(cq->cqn); 842219820Sjeff 843219820Sjeff if (mthca_is_memfree(dev)) { 844219820Sjeff cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index); 845219820Sjeff cq_context->state_db = cpu_to_be32(cq->arm_db_index); 846219820Sjeff } 847219820Sjeff 848219820Sjeff err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status); 849219820Sjeff if (err) { 850219820Sjeff mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); 851219820Sjeff goto err_out_free_mr; 852219820Sjeff } 853219820Sjeff 854219820Sjeff if (status) { 855219820Sjeff mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n", 856219820Sjeff status); 857219820Sjeff err = -EINVAL; 858219820Sjeff goto err_out_free_mr; 859219820Sjeff } 860219820Sjeff 861219820Sjeff spin_lock_irq(&dev->cq_table.lock); 862219820Sjeff if (mthca_array_set(&dev->cq_table.cq, 863219820Sjeff cq->cqn & (dev->limits.num_cqs - 1), 864219820Sjeff cq)) { 865219820Sjeff spin_unlock_irq(&dev->cq_table.lock); 866219820Sjeff goto err_out_free_mr; 867219820Sjeff } 868219820Sjeff spin_unlock_irq(&dev->cq_table.lock); 869219820Sjeff 870219820Sjeff cq->cons_index = 0; 871219820Sjeff 872219820Sjeff mthca_free_mailbox(dev, mailbox); 873219820Sjeff 874219820Sjeff return 0; 875219820Sjeff 876219820Sjefferr_out_free_mr: 877219820Sjeff if (cq->is_kernel) 878219820Sjeff mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 879219820Sjeff 880219820Sjefferr_out_mailbox: 881219820Sjeff mthca_free_mailbox(dev, mailbox); 882219820Sjeff 883219820Sjefferr_out_arm: 884219820Sjeff if (cq->is_kernel && mthca_is_memfree(dev)) 885219820Sjeff mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 886219820Sjeff 887219820Sjefferr_out_ci: 888219820Sjeff if (cq->is_kernel && mthca_is_memfree(dev)) 889219820Sjeff mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); 890219820Sjeff 891219820Sjefferr_out_icm: 892219820Sjeff mthca_table_put(dev, dev->cq_table.table, cq->cqn); 893219820Sjeff 894219820Sjefferr_out: 895219820Sjeff mthca_free(&dev->cq_table.alloc, cq->cqn); 896219820Sjeff 897219820Sjeff return err; 898219820Sjeff} 899219820Sjeff 900219820Sjeffstatic inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) 901219820Sjeff{ 902219820Sjeff int c; 903219820Sjeff 904219820Sjeff spin_lock_irq(&dev->cq_table.lock); 905219820Sjeff c = cq->refcount; 906219820Sjeff spin_unlock_irq(&dev->cq_table.lock); 907219820Sjeff 908219820Sjeff return c; 909219820Sjeff} 910219820Sjeff 911219820Sjeffvoid mthca_free_cq(struct mthca_dev *dev, 912219820Sjeff struct mthca_cq *cq) 913219820Sjeff{ 914219820Sjeff struct mthca_mailbox *mailbox; 915219820Sjeff int err; 916219820Sjeff u8 status; 917219820Sjeff 918219820Sjeff mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 919219820Sjeff if (IS_ERR(mailbox)) { 920219820Sjeff mthca_warn(dev, "No memory for mailbox to free CQ.\n"); 921219820Sjeff return; 922219820Sjeff } 923219820Sjeff 924219820Sjeff err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status); 925219820Sjeff if (err) 926219820Sjeff mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); 927219820Sjeff else if (status) 928219820Sjeff mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); 929219820Sjeff 930219820Sjeff if (0) { 931219820Sjeff __be32 *ctx = mailbox->buf; 932219820Sjeff int j; 933219820Sjeff 934219820Sjeff printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", 935219820Sjeff cq->cqn, cq->cons_index, 936219820Sjeff cq->is_kernel ? !!next_cqe_sw(cq) : 0); 937219820Sjeff for (j = 0; j < 16; ++j) 938219820Sjeff printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j])); 939219820Sjeff } 940219820Sjeff 941219820Sjeff spin_lock_irq(&dev->cq_table.lock); 942219820Sjeff mthca_array_clear(&dev->cq_table.cq, 943219820Sjeff cq->cqn & (dev->limits.num_cqs - 1)); 944219820Sjeff --cq->refcount; 945219820Sjeff spin_unlock_irq(&dev->cq_table.lock); 946219820Sjeff 947219820Sjeff if (dev->mthca_flags & MTHCA_FLAG_MSI_X) 948219820Sjeff synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector); 949219820Sjeff else 950219820Sjeff synchronize_irq(dev->pdev->irq); 951219820Sjeff 952219820Sjeff wait_event(cq->wait, !get_cq_refcount(dev, cq)); 953219820Sjeff 954219820Sjeff if (cq->is_kernel) { 955219820Sjeff mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 956219820Sjeff if (mthca_is_memfree(dev)) { 957219820Sjeff mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 958219820Sjeff mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); 959219820Sjeff } 960219820Sjeff } 961219820Sjeff 962219820Sjeff mthca_table_put(dev, dev->cq_table.table, cq->cqn); 963219820Sjeff mthca_free(&dev->cq_table.alloc, cq->cqn); 964219820Sjeff mthca_free_mailbox(dev, mailbox); 965219820Sjeff} 966219820Sjeff 967219820Sjeffint mthca_init_cq_table(struct mthca_dev *dev) 968219820Sjeff{ 969219820Sjeff int err; 970219820Sjeff 971219820Sjeff spin_lock_init(&dev->cq_table.lock); 972219820Sjeff 973219820Sjeff err = mthca_alloc_init(&dev->cq_table.alloc, 974219820Sjeff dev->limits.num_cqs, 975219820Sjeff (1 << 24) - 1, 976219820Sjeff dev->limits.reserved_cqs); 977219820Sjeff if (err) 978219820Sjeff return err; 979219820Sjeff 980219820Sjeff err = mthca_array_init(&dev->cq_table.cq, 981219820Sjeff dev->limits.num_cqs); 982219820Sjeff if (err) 983219820Sjeff mthca_alloc_cleanup(&dev->cq_table.alloc); 984219820Sjeff 985219820Sjeff return err; 986219820Sjeff} 987219820Sjeff 988219820Sjeffvoid mthca_cleanup_cq_table(struct mthca_dev *dev) 989219820Sjeff{ 990219820Sjeff mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); 991219820Sjeff mthca_alloc_cleanup(&dev->cq_table.alloc); 992219820Sjeff} 993