Lines Matching refs:wq

375 static inline void t4_ma_sync(struct t4_wq *wq, int page_size)
378 *((volatile u32 *)wq->sq.ma_sync) = 1;
381 static inline int t4_rqes_posted(struct t4_wq *wq)
383 return wq->rq.in_use;
386 static inline int t4_rq_empty(struct t4_wq *wq)
388 return wq->rq.in_use == 0;
391 static inline int t4_rq_full(struct t4_wq *wq)
393 return wq->rq.in_use == (wq->rq.size - 1);
396 static inline u32 t4_rq_avail(struct t4_wq *wq)
398 return wq->rq.size - 1 - wq->rq.in_use;
401 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
403 wq->rq.in_use++;
404 if (++wq->rq.pidx == wq->rq.size)
405 wq->rq.pidx = 0;
406 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
407 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
408 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
409 if (!wq->error)
410 wq->rq.queue[wq->rq.size].status.host_pidx = wq->rq.pidx;
413 static inline void t4_rq_consume(struct t4_wq *wq)
415 wq->rq.in_use--;
416 wq->rq.msn++;
417 if (++wq->rq.cidx == wq->rq.size)
418 wq->rq.cidx = 0;
419 assert((wq->rq.cidx != wq->rq.pidx) || wq->rq.in_use == 0);
420 if (!wq->error)
421 wq->rq.queue[wq->rq.size].status.host_cidx = wq->rq.cidx;
424 static inline int t4_sq_empty(struct t4_wq *wq)
426 return wq->sq.in_use == 0;
429 static inline int t4_sq_full(struct t4_wq *wq)
431 return wq->sq.in_use == (wq->sq.size - 1);
434 static inline u32 t4_sq_avail(struct t4_wq *wq)
436 return wq->sq.size - 1 - wq->sq.in_use;
439 static inline int t4_sq_onchip(struct t4_wq *wq)
441 return wq->sq.flags & T4_SQ_ONCHIP;
444 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
446 wq->sq.in_use++;
447 if (++wq->sq.pidx == wq->sq.size)
448 wq->sq.pidx = 0;
449 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
450 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
451 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
452 if (!wq->error)
453 wq->sq.queue[wq->sq.size].status.host_pidx = (wq->sq.pidx);
456 static inline void t4_sq_consume(struct t4_wq *wq)
458 assert(wq->sq.in_use >= 1);
459 if (wq->sq.cidx == wq->sq.flush_cidx)
460 wq->sq.flush_cidx = -1;
461 wq->sq.in_use--;
462 if (++wq->sq.cidx == wq->sq.size)
463 wq->sq.cidx = 0;
464 assert((wq->sq.cidx != wq->sq.pidx) || wq->sq.in_use == 0);
465 if (!wq->error)
466 wq->sq.queue[wq->sq.size].status.host_cidx = wq->sq.cidx;
487 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, u8 len16,
492 if (t5_en_wc && inc == 1 && wq->sq.wc_reg_available) {
493 PDBG("%s: WC wq->sq.pidx = %d; len16=%d\n",
494 __func__, wq->sq.pidx, len16);
495 copy_wqe_to_udb(wq->sq.udb + 14, wqe);
497 PDBG("%s: DB wq->sq.pidx = %d; len16=%d\n",
498 __func__, wq->sq.pidx, len16);
499 writel(V_QID(wq->sq.bar2_qid) | V_PIDX_T5(inc), wq->sq.udb);
505 if (t4_sq_onchip(wq)) {
508 *(volatile u32 *)&wq->sq.queue[wq->sq.size].flits[2+i] = i;
511 if (t4_sq_onchip(wq)) {
514 *(u32 *)&wq->sq.queue[wq->sq.size].flits[2] = i;
517 writel(V_QID(wq->sq.qid & wq->qid_mask) | V_PIDX(inc), wq->sq.udb);
520 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, u8 len16,
525 if (t5_en_wc && inc == 1 && wq->sq.wc_reg_available) {
526 PDBG("%s: WC wq->rq.pidx = %d; len16=%d\n",
527 __func__, wq->rq.pidx, len16);
528 copy_wqe_to_udb(wq->rq.udb + 14, wqe);
530 PDBG("%s: DB wq->rq.pidx = %d; len16=%d\n",
531 __func__, wq->rq.pidx, len16);
532 writel(V_QID(wq->rq.bar2_qid) | V_PIDX_T5(inc), wq->rq.udb);
537 writel(V_QID(wq->rq.qid & wq->qid_mask) | V_PIDX(inc), wq->rq.udb);
540 static inline int t4_wq_in_error(struct t4_wq *wq)
542 return wq->error || wq->rq.queue[wq->rq.size].status.qp_err;
545 static inline void t4_set_wq_in_error(struct t4_wq *wq)
547 wq->rq.queue[wq->rq.size].status.qp_err = 1;
552 static inline int t4_wq_db_enabled(struct t4_wq *wq)
561 return ! *wq->db_offp;