• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/ofed/libcxgb4/

Lines Matching refs:qhp

308 	struct c4iw_qp *qhp;
314 qhp = calloc(1, sizeof *qhp);
315 if (!qhp)
318 ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
330 qhp->wq.qid_mask = resp.qid_mask;
331 qhp->rhp = dev;
332 qhp->wq.sq.qid = resp.sqid;
333 qhp->wq.sq.size = resp.sq_size;
334 qhp->wq.sq.memsize = resp.sq_memsize;
335 qhp->wq.sq.flags = 0;
336 qhp->wq.rq.msn = 1;
337 qhp->wq.rq.qid = resp.rqid;
338 qhp->wq.rq.size = resp.rq_size;
339 qhp->wq.rq.memsize = resp.rq_memsize;
340 pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
347 qhp->wq.sq.udb = dbva;
348 qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
351 if (qhp->wq.sq.queue == MAP_FAILED)
358 qhp->wq.rq.udb = dbva;
359 qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
362 if (qhp->wq.rq.queue == MAP_FAILED)
365 qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
366 if (!qhp->wq.sq.sw_sq)
369 qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
370 if (!qhp->wq.rq.sw_rq)
376 qhp->wq.sq.udb, qhp->wq.sq.queue,
377 qhp->wq.sq.size, qhp->wq.sq.memsize,
378 qhp->wq.rq.udb, qhp->wq.rq.queue,
379 qhp->wq.rq.size, qhp->wq.rq.memsize);
381 qhp->sq_sig_all = attr->sq_sig_all;
384 dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
387 return &qhp->ibv_qp;
389 free(qhp->wq.sq.sw_sq);
391 munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
393 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
395 munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
397 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
399 (void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
401 free(qhp);
411 struct c4iw_qp *qhp;
418 qhp = calloc(1, sizeof *qhp);
419 if (!qhp)
422 ret = ibv_cmd_create_qp(pd, &qhp->ibv_qp, attr, &cmd,
434 qhp->wq.qid_mask = resp.qid_mask;
435 qhp->rhp = dev;
436 qhp->wq.sq.qid = resp.sqid;
437 qhp->wq.sq.size = resp.sq_size;
438 qhp->wq.sq.memsize = resp.sq_memsize;
439 qhp->wq.sq.flags = resp.flags & C4IW_QPF_ONCHIP ? T4_SQ_ONCHIP : 0;
440 qhp->wq.sq.flush_cidx = -1;
441 qhp->wq.rq.msn = 1;
442 qhp->wq.rq.qid = resp.rqid;
443 qhp->wq.rq.size = resp.rq_size;
444 qhp->wq.rq.memsize = resp.rq_memsize;
446 sizeof *qhp->wq.sq.queue + 16*sizeof(__be64) ) {
451 pthread_spin_init(&qhp->lock, PTHREAD_PROCESS_PRIVATE);
457 qhp->wq.sq.udb = dbva;
458 if (!dev_is_t4(qhp->rhp)) {
459 unsigned long segment_offset = 128 * (qhp->wq.sq.qid &
460 qhp->wq.qid_mask);
463 qhp->wq.sq.udb += segment_offset / 4;
464 qhp->wq.sq.wc_reg_available = 1;
466 qhp->wq.sq.bar2_qid = qhp->wq.sq.qid & qhp->wq.qid_mask;
467 qhp->wq.sq.udb += 2;
470 qhp->wq.sq.queue = mmap(NULL, qhp->wq.sq.memsize,
473 if (qhp->wq.sq.queue == MAP_FAILED)
480 qhp->wq.rq.udb = dbva;
481 if (!dev_is_t4(qhp->rhp)) {
482 unsigned long segment_offset = 128 * (qhp->wq.rq.qid &
483 qhp->wq.qid_mask);
486 qhp->wq.rq.udb += segment_offset / 4;
487 qhp->wq.rq.wc_reg_available = 1;
489 qhp->wq.rq.bar2_qid = qhp->wq.rq.qid & qhp->wq.qid_mask;
490 qhp->wq.rq.udb += 2;
492 qhp->wq.rq.queue = mmap(NULL, qhp->wq.rq.memsize,
495 if (qhp->wq.rq.queue == MAP_FAILED)
498 qhp->wq.sq.sw_sq = calloc(qhp->wq.sq.size, sizeof (struct t4_swsqe));
499 if (!qhp->wq.sq.sw_sq)
502 qhp->wq.rq.sw_rq = calloc(qhp->wq.rq.size, sizeof (uint64_t));
503 if (!qhp->wq.rq.sw_rq)
506 if (t4_sq_onchip(&qhp->wq)) {
507 qhp->wq.sq.ma_sync = mmap(NULL, c4iw_page_size, PROT_WRITE,
510 if (qhp->wq.sq.ma_sync == MAP_FAILED)
512 qhp->wq.sq.ma_sync += (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
516 qhp->wq.db_offp = &ctx->status_page->db_off;
518 qhp->wq.db_offp =
519 &qhp->wq.rq.queue[qhp->wq.rq.size].status.db_off;
525 qhp->wq.sq.udb, qhp->wq.sq.queue,
526 qhp->wq.sq.size, qhp->wq.sq.memsize,
527 qhp->wq.rq.udb, qhp->wq.rq.queue,
528 qhp->wq.rq.size, qhp->wq.rq.memsize);
530 qhp->sq_sig_all = attr->sq_sig_all;
533 dev->qpid2ptr[qhp->wq.sq.qid] = qhp;
536 return &qhp->ibv_qp;
538 free(qhp->wq.rq.sw_rq);
540 free(qhp->wq.sq.sw_sq);
542 munmap((void *)qhp->wq.rq.queue, qhp->wq.rq.memsize);
544 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
546 munmap((void *)qhp->wq.sq.queue, qhp->wq.sq.memsize);
548 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
550 (void)ibv_cmd_destroy_qp(&qhp->ibv_qp);
552 free(qhp);
567 static void reset_qp(struct c4iw_qp *qhp)
569 PDBG("%s enter qp %p\n", __func__, qhp);
570 qhp->wq.sq.cidx = 0;
571 qhp->wq.sq.wq_pidx = qhp->wq.sq.pidx = qhp->wq.sq.in_use = 0;
572 qhp->wq.rq.cidx = qhp->wq.rq.pidx = qhp->wq.rq.in_use = 0;
573 qhp->wq.sq.oldest_read = NULL;
574 memset(qhp->wq.sq.queue, 0, qhp->wq.sq.memsize);
575 if (t4_sq_onchip(&qhp->wq))
577 memset(qhp->wq.rq.queue, 0, qhp->wq.rq.memsize);
584 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
588 pthread_spin_lock(&qhp->lock);
589 if (t4_wq_in_error(&qhp->wq))
590 c4iw_flush_qp(qhp);
593 reset_qp(qhp);
594 pthread_spin_unlock(&qhp->lock);
601 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
605 pthread_spin_lock(&qhp->lock);
606 c4iw_flush_qp(qhp);
607 pthread_spin_unlock(&qhp->lock);
613 if (t4_sq_onchip(&qhp->wq)) {
614 qhp->wq.sq.ma_sync -= (A_PCIE_MA_SYNC & (c4iw_page_size - 1));
615 munmap((void *)qhp->wq.sq.ma_sync, c4iw_page_size);
617 munmap(MASKED(qhp->wq.sq.udb), c4iw_page_size);
618 munmap(MASKED(qhp->wq.rq.udb), c4iw_page_size);
619 munmap(qhp->wq.sq.queue, qhp->wq.sq.memsize);
620 munmap(qhp->wq.rq.queue, qhp->wq.rq.memsize);
623 dev->qpid2ptr[qhp->wq.sq.qid] = NULL;
626 free(qhp->wq.rq.sw_rq);
627 free(qhp->wq.sq.sw_sq);
628 free(qhp);
636 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
639 pthread_spin_lock(&qhp->lock);
640 if (t4_wq_in_error(&qhp->wq))
641 c4iw_flush_qp(qhp);
643 pthread_spin_unlock(&qhp->lock);
660 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
663 pthread_spin_lock(&qhp->lock);
664 if (t4_wq_in_error(&qhp->wq))
665 c4iw_flush_qp(qhp);
667 pthread_spin_unlock(&qhp->lock);
674 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
677 pthread_spin_lock(&qhp->lock);
678 if (t4_wq_in_error(&qhp->wq))
679 c4iw_flush_qp(qhp);
681 pthread_spin_unlock(&qhp->lock);
697 struct c4iw_qp *qhp = to_c4iw_qp(event->element.qp);
698 pthread_spin_lock(&qhp->lock);
699 c4iw_flush_qp(qhp);
700 pthread_spin_unlock(&qhp->lock);