Lines Matching refs:qp

196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
198 return qp->qpn >= dev->qp_table.sqp_start &&
199 qp->qpn <= dev->qp_table.sqp_start + 3;
202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
204 return qp->qpn >= dev->qp_table.sqp_start &&
205 qp->qpn <= dev->qp_table.sqp_start + 1;
208 static void *get_recv_wqe(struct mthca_qp *qp, int n)
210 if (qp->is_direct)
211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
214 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
217 static void *get_send_wqe(struct mthca_qp *qp, int n)
219 if (qp->is_direct)
220 return qp->queue.direct.buf + qp->send_wqe_offset +
221 (n << qp->sq.wqe_shift);
223 return qp->queue.page_list[(qp->send_wqe_offset +
224 (n << qp->sq.wqe_shift)) >>
226 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
241 struct mthca_qp *qp;
245 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
246 if (qp)
247 ++qp->refcount;
250 if (!qp) {
257 qp->port = qp->alt_port;
261 event.element.qp = &qp->ibqp;
262 if (qp->ibqp.event_handler)
263 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
266 if (!--qp->refcount)
267 wake_up(&qp->wait);
328 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
338 dest_rd_atomic = qp->resp_depth;
343 access_flags = qp->atomic_rd_en;
434 struct mthca_qp *qp = to_mqp(ibqp);
441 mutex_lock(&qp->mutex);
443 if (qp->state == IB_QPS_RESET) {
454 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
464 qp->state = to_ib_qp_state(mthca_state);
465 qp_attr->qp_state = qp->state;
476 if (qp->transport == RC || qp->transport == UC) {
489 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
505 qp_attr->cap.max_send_wr = qp->sq.max;
506 qp_attr->cap.max_recv_wr = qp->rq.max;
507 qp_attr->cap.max_send_sge = qp->sq.max_gs;
508 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
509 qp_attr->cap.max_inline_data = qp->max_inline_data;
512 qp_init_attr->sq_sig_type = qp->sq_policy;
518 mutex_unlock(&qp->mutex);
563 struct mthca_qp *qp = to_mqp(ibqp);
582 (to_mthca_st(qp->transport) << 16));
603 if (qp->transport == MLX || qp->transport == UD)
615 if (qp->rq.max)
616 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
617 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
619 if (qp->sq.max)
620 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
621 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
626 if (qp->ibqp.uobject)
630 qp_context->local_qpn = cpu_to_be32(qp->qpn);
635 if (qp->transport == MLX)
637 cpu_to_be32(qp->port << 24);
661 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
711 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
715 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
738 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
739 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
751 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
771 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
777 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
793 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
801 qp->state = new_state;
803 qp->atomic_rd_en = attr->qp_access_flags;
805 qp->resp_depth = attr->max_dest_rd_atomic;
807 qp->port = attr->port_num;
809 qp->alt_port = attr->alt_port_num;
811 if (is_sqp(dev, qp))
812 store_attrs(qp->sqp, attr, attr_mask);
818 if (is_qp0(dev, qp)) {
821 init_port(dev, qp->port);
827 mthca_CLOSE_IB(dev, qp->port);
834 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
835 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
836 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
840 mthca_wq_reset(&qp->sq);
841 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
843 mthca_wq_reset(&qp->rq);
844 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
847 *qp->sq.db = 0;
848 *qp->rq.db = 0;
862 struct mthca_qp *qp = to_mqp(ibqp);
869 mutex_lock(&qp->mutex);
873 spin_lock_irq(&qp->sq.lock);
874 spin_lock(&qp->rq.lock);
875 cur_state = qp->state;
876 spin_unlock(&qp->rq.lock);
877 spin_unlock_irq(&qp->sq.lock);
886 qp->transport, cur_state, new_state,
927 mutex_unlock(&qp->mutex);
931 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
939 switch (qp->transport) {
967 struct mthca_qp *qp)
969 int max_data_size = mthca_max_data_size(dev, qp,
971 1 << qp->sq.wqe_shift));
973 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
975 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
977 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
978 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
984 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
992 struct mthca_qp *qp,
999 qp->rq.max_gs * sizeof (struct mthca_data_seg);
1004 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
1005 qp->rq.wqe_shift++)
1008 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
1009 switch (qp->transport) {
1048 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
1049 qp->sq.wqe_shift++)
1052 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1053 1 << qp->sq.wqe_shift);
1063 size = PAGE_ALIGN(qp->send_wqe_offset +
1064 (qp->sq.max << qp->sq.wqe_shift));
1066 qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64),
1068 if (!qp->wrid)
1072 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1079 kfree(qp->wrid);
1084 struct mthca_qp *qp)
1086 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1087 (qp->sq.max << qp->sq.wqe_shift)),
1088 &qp->queue, qp->is_direct, &qp->mr);
1089 kfree(qp->wrid);
1093 struct mthca_qp *qp)
1098 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1102 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1107 qp->qpn << dev->qp_table.rdb_shift);
1116 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1119 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1125 struct mthca_qp *qp)
1128 qp->qpn << dev->qp_table.rdb_shift);
1129 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1130 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1134 struct mthca_qp *qp)
1137 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1138 qp->qpn, &qp->rq.db);
1139 if (qp->rq.db_index < 0)
1142 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1143 qp->qpn, &qp->sq.db);
1144 if (qp->sq.db_index < 0) {
1145 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1154 struct mthca_qp *qp)
1157 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1158 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1167 struct mthca_qp *qp,
1174 qp->refcount = 1;
1175 init_waitqueue_head(&qp->wait);
1176 mutex_init(&qp->mutex);
1177 qp->state = IB_QPS_RESET;
1178 qp->atomic_rd_en = 0;
1179 qp->resp_depth = 0;
1180 qp->sq_policy = send_policy;
1181 mthca_wq_reset(&qp->sq);
1182 mthca_wq_reset(&qp->rq);
1184 spin_lock_init(&qp->sq.lock);
1185 spin_lock_init(&qp->rq.lock);
1187 ret = mthca_map_memfree(dev, qp);
1191 ret = mthca_alloc_wqe_buf(dev, pd, qp, udata);
1193 mthca_unmap_memfree(dev, qp);
1197 mthca_adjust_qp_caps(dev, pd, qp);
1207 ret = mthca_alloc_memfree(dev, qp);
1209 mthca_free_wqe_buf(dev, qp);
1210 mthca_unmap_memfree(dev, qp);
1217 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1219 for (i = 0; i < qp->rq.max; ++i) {
1220 next = get_recv_wqe(qp, i);
1221 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1222 qp->rq.wqe_shift);
1226 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1231 for (i = 0; i < qp->sq.max; ++i) {
1232 next = get_send_wqe(qp, i);
1233 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1234 qp->sq.wqe_shift) +
1235 qp->send_wqe_offset);
1238 for (i = 0; i < qp->rq.max; ++i) {
1239 next = get_recv_wqe(qp, i);
1240 next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1241 qp->rq.wqe_shift) | 1);
1246 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1247 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1253 struct mthca_pd *pd, struct mthca_qp *qp)
1255 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1269 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
1273 qp->rq.max = cap->max_recv_wr ?
1275 qp->sq.max = cap->max_send_wr ?
1278 qp->rq.max = cap->max_recv_wr;
1279 qp->sq.max = cap->max_send_wr;
1282 qp->rq.max_gs = cap->max_recv_sge;
1283 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1298 struct mthca_qp *qp,
1304 case IB_QPT_RC: qp->transport = RC; break;
1305 case IB_QPT_UC: qp->transport = UC; break;
1306 case IB_QPT_UD: qp->transport = UD; break;
1310 err = mthca_set_qp_size(dev, cap, pd, qp);
1314 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1315 if (qp->qpn == -1)
1319 qp->port = 0;
1322 send_policy, qp, udata);
1324 mthca_free(&dev->qp_table.alloc, qp->qpn);
1329 mthca_array_set(&dev->qp_table.qp,
1330 qp->qpn & (dev->limits.num_qps - 1), qp);
1374 struct mthca_qp *qp,
1380 qp->transport = MLX;
1381 err = mthca_set_qp_size(dev, cap, pd, qp);
1385 qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE;
1386 qp->sqp->header_buf =
1387 dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1388 &qp->sqp->header_dma, GFP_KERNEL);
1389 if (!qp->sqp->header_buf)
1393 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1396 mthca_array_set(&dev->qp_table.qp, mqpn, qp);
1402 qp->port = port;
1403 qp->qpn = mqpn;
1404 qp->transport = MLX;
1407 send_policy, qp, udata);
1423 mthca_array_clear(&dev->qp_table.qp, mqpn);
1429 dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1430 qp->sqp->header_buf, qp->sqp->header_dma);
1434 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1439 c = qp->refcount;
1446 struct mthca_qp *qp)
1451 send_cq = to_mcq(qp->ibqp.send_cq);
1452 recv_cq = to_mcq(qp->ibqp.recv_cq);
1461 mthca_array_clear(&dev->qp_table.qp,
1462 qp->qpn & (dev->limits.num_qps - 1));
1463 --qp->refcount;
1468 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1470 if (qp->state != IB_QPS_RESET)
1471 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1479 if (!qp->ibqp.uobject) {
1480 mthca_cq_clean(dev, recv_cq, qp->qpn,
1481 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1483 mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
1485 mthca_free_memfree(dev, qp);
1486 mthca_free_wqe_buf(dev, qp);
1489 mthca_unmap_memfree(dev, qp);
1491 if (is_sqp(dev, qp)) {
1492 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1493 dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1494 qp->sqp->header_buf, qp->sqp->header_dma);
1496 mthca_free(&dev->qp_table.alloc, qp->qpn);
1500 static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind,
1505 struct mthca_sqp *sqp = qp->sqp;
1518 mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1539 sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0;
1543 if (!qp->ibqp.qp_num)
1544 ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index,
1547 ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index,
1554 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
1561 data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey);
1629 struct mthca_qp *qp = to_mqp(ibqp);
1649 spin_lock_irqsave(&qp->sq.lock, flags);
1653 ind = qp->sq.next_ind;
1656 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1658 " %d max, %d nreq)\n", qp->qpn,
1659 qp->sq.head, qp->sq.tail,
1660 qp->sq.max, nreq);
1666 wqe = get_send_wqe(qp, ind);
1667 prev_wqe = qp->sq.last;
1668 qp->sq.last = wqe;
1685 switch (qp->transport) {
1741 dev, qp, ind, ud_wr(wr),
1752 if (wr->num_sge > qp->sq.max_gs) {
1766 if (qp->transport == MLX) {
1774 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1784 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1785 qp->send_wqe_offset) |
1801 if (unlikely(ind >= qp->sq.max))
1802 ind -= qp->sq.max;
1809 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +
1810 qp->send_wqe_offset) | f0 | op0,
1811 (qp->qpn << 8) | size0,
1816 qp->sq.next_ind = ind;
1817 qp->sq.head += nreq;
1819 spin_unlock_irqrestore(&qp->sq.lock, flags);
1827 struct mthca_qp *qp = to_mqp(ibqp);
1845 spin_lock_irqsave(&qp->rq.lock, flags);
1849 ind = qp->rq.next_ind;
1852 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1854 " %d max, %d nreq)\n", qp->qpn,
1855 qp->rq.head, qp->rq.tail,
1856 qp->rq.max, nreq);
1862 wqe = get_recv_wqe(qp, ind);
1863 prev_wqe = qp->rq.last;
1864 qp->rq.last = wqe;
1873 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1885 qp->wrid[ind] = wr->wr_id;
1894 if (unlikely(ind >= qp->rq.max))
1895 ind -= qp->rq.max;
1903 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1904 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL,
1907 qp->rq.next_ind = ind;
1908 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1916 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1917 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL,
1921 qp->rq.next_ind = ind;
1922 qp->rq.head += nreq;
1924 spin_unlock_irqrestore(&qp->rq.lock, flags);
1932 struct mthca_qp *qp = to_mqp(ibqp);
1953 spin_lock_irqsave(&qp->sq.lock, flags);
1957 ind = qp->sq.head & (qp->sq.max - 1);
1964 ((qp->sq.head & 0xffff) << 8) | f0 | op0;
1966 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1973 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1981 mthca_write64(dbhi, (qp->qpn << 8) | size0,
1986 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1988 " %d max, %d nreq)\n", qp->qpn,
1989 qp->sq.head, qp->sq.tail,
1990 qp->sq.max, nreq);
1996 wqe = get_send_wqe(qp, ind);
1997 prev_wqe = qp->sq.last;
1998 qp->sq.last = wqe;
2015 switch (qp->transport) {
2071 dev, qp, ind, ud_wr(wr),
2082 if (wr->num_sge > qp->sq.max_gs) {
2096 if (qp->transport == MLX) {
2104 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2114 cpu_to_be32(((ind << qp->sq.wqe_shift) +
2115 qp->send_wqe_offset) |
2131 if (unlikely(ind >= qp->sq.max))
2132 ind -= qp->sq.max;
2137 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0;
2139 qp->sq.head += nreq;
2146 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2154 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
2158 spin_unlock_irqrestore(&qp->sq.lock, flags);
2166 struct mthca_qp *qp = to_mqp(ibqp);
2174 spin_lock_irqsave(&qp->rq.lock, flags);
2178 ind = qp->rq.head & (qp->rq.max - 1);
2181 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2183 " %d max, %d nreq)\n", qp->qpn,
2184 qp->rq.head, qp->rq.tail,
2185 qp->rq.max, nreq);
2191 wqe = get_recv_wqe(qp, ind);
2197 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2208 if (i < qp->rq.max_gs)
2211 qp->wrid[ind] = wr->wr_id;
2214 if (unlikely(ind >= qp->rq.max))
2215 ind -= qp->rq.max;
2219 qp->rq.head += nreq;
2226 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2229 spin_unlock_irqrestore(&qp->rq.lock, flags);
2233 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2242 if (qp->ibqp.srq && !is_send) {
2248 next = get_send_wqe(qp, index);
2250 next = get_recv_wqe(qp, index);
2280 err = mthca_array_init(&dev->qp_table.qp,
2302 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2315 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);