• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/mthca/

Lines Matching refs:cqe

174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
184 static inline void set_cqe_hw(struct mthca_cqe *cqe)
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
191 __be32 *cqe = cqe_ptr;
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
274 static inline int is_recv_cqe(struct mthca_cqe *cqe)
276 if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
278 return !(cqe->opcode & 0x01);
280 return !(cqe->is_send & 0x80);
286 struct mthca_cqe *cqe;
300 cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
302 if (prod_index == cq->cons_index + cq->ibcq.cqe)
314 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
315 if (cqe->my_qpn == cpu_to_be32(qpn)) {
316 if (srq && is_recv_cqe(cqe))
317 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
320 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
321 cqe, MTHCA_CQ_ENTRY_SIZE);
326 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
346 cq->ibcq.cqe < cq->resize_buf->cqe) {
347 cq->cons_index &= cq->ibcq.cqe;
348 if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
349 cq->cons_index -= cq->ibcq.cqe + 1;
352 for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
354 i & cq->resize_buf->cqe),
355 get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
376 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe)
378 mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
384 struct mthca_err_cqe *cqe,
390 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
393 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
395 dump_cqe(dev, cqe);
402 switch (cqe->syndrome) {
462 entry->vendor_err = cqe->vendor_err;
478 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
481 cqe->db_cnt = cpu_to_be16(be16_to_cpu(cqe->db_cnt) - dbd);
482 cqe->wqe = new_wqe;
483 cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
495 struct mthca_cqe *cqe;
502 cqe = next_cqe_sw(cq);
503 if (!cqe)
514 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
515 be32_to_cpu(cqe->wqe));
516 dump_cqe(dev, cqe);
519 is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
521 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
523 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
530 be32_to_cpu(cqe->my_qpn) &
534 be32_to_cpu(cqe->my_qpn) & 0xffffff);
544 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
550 u32 wqe = be32_to_cpu(cqe->wqe);
558 wqe = be32_to_cpu(cqe->wqe);
581 (struct mthca_err_cqe *) cqe,
588 switch (cqe->opcode) {
605 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
623 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
624 switch (cqe->opcode & 0x1f) {
628 entry->imm_data = cqe->imm_etype_pkey_eec;
634 entry->imm_data = cqe->imm_etype_pkey_eec;
642 entry->slid = be16_to_cpu(cqe->rlid);
643 entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
644 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
645 entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
646 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
647 entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
655 set_cqe_hw(cqe);
707 cq->cons_index &= cq->ibcq.cqe;
710 cq->cons_index & cq->resize_buf->cqe))) {
715 tcqe = cq->ibcq.cqe;
717 cq->ibcq.cqe = cq->resize_buf->cqe;
720 cq->resize_buf->cqe = tcqe;
796 cq->ibcq.cqe = nent - 1;
892 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
969 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);