Searched refs:rq_depth (Results 1 - 25 of 29) sorted by relevance

12

/linux-master/block/
H A Dblk-wbt.c93 struct rq_depth rq_depth; member in struct:rq_wb
306 struct rq_depth *rqd = &rwb->rq_depth;
359 struct rq_depth *rqd = &rwb->rq_depth;
369 } else if (rwb->rq_depth.max_depth <= 2) {
370 rwb->wb_normal = rwb->rq_depth.max_depth;
373 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
374 rwb->wb_background = (rwb->rq_depth
[all...]
H A Dblk-rq-qos.h51 struct rq_depth { struct
99 bool rq_depth_scale_up(struct rq_depth *rqd);
100 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
101 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
H A Dblk-rq-qos.c110 bool rq_depth_calc_max_depth(struct rq_depth *rqd)
158 bool rq_depth_scale_up(struct rq_depth *rqd)
177 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
/linux-master/drivers/net/ethernet/fungible/funcore/
H A Dfun_queue.c175 for (i = 0; i < funq->rq_depth; i++) {
192 for (i = 0; i < funq->rq_depth; i++) {
209 funq->rq_tail = funq->rq_depth - 1;
222 if (++funq->rq_buf_idx == funq->rq_depth)
344 funq->rq_depth;
375 funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth,
396 fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes),
419 if (req->rq_depth) {
458 if (req->rq_depth) {
460 funq->rq_depth
[all...]
H A Dfun_dev.h94 u16 rq_depth; member in struct:fun_dev_params
H A Dfun_queue.h47 u32 rq_depth; member in struct:fun_queue
124 u32 rq_depth; member in struct:fun_queue_alloc_req
H A Dfun_dev.c232 .rq_depth = areq->rq_depth,
280 if (areq->rq_depth) {
582 if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
/linux-master/net/9p/
H A Dtrans_rdma.c58 * @rq_depth: The depth of the Receive Queue.
85 int rq_depth; member in struct:p9_trans_rdma
119 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
126 int rq_depth; member in struct:p9_rdma_opts
158 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH)
159 seq_printf(m, ",rq=%u", rdma->rq_depth);
183 opts->rq_depth = P9_RDMA_RQ_DEPTH;
220 opts->rq_depth = option;
233 opts->rq_depth = max(opts->rq_depth, opt
[all...]
/linux-master/net/sunrpc/xprtrdma/
H A Dsvc_rdma_transport.c378 unsigned int ctxts, rq_depth; local
417 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests +
419 if (rq_depth > dev->attrs.max_qp_wr) {
420 rq_depth = dev->attrs.max_qp_wr;
422 newxprt->sc_max_requests = rq_depth - 2;
432 newxprt->sc_sq_depth = rq_depth + ctxts;
447 ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
457 qp_attr.cap.max_recv_wr = rq_depth;
469 newxprt->sc_sq_depth, rq_depth);
/linux-master/drivers/net/ethernet/fungible/funeth/
H A Dfuneth.h76 unsigned int rq_depth; member in struct:fun_qset
116 unsigned int rq_depth; member in struct:funeth_priv
H A Dfuneth_ethtool.c566 ring->rx_pending = fp->rq_depth;
594 fp->rq_depth == ring->rx_pending)
600 .rq_depth = ring->rx_pending,
610 fp->rq_depth = ring->rx_pending;
611 fp->cq_depth = 2 * fp->rq_depth;
H A Dfuneth_main.c509 qset->rq_depth, qset->rxq_start, qset->state);
842 .rq_depth = fp->rq_depth,
1643 .rq_depth = fp->rq_depth,
1783 fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth);
2003 .rq_depth = ADMIN_RQ_DEPTH,
/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_io.h74 u16 rq_depth; member in struct:hinic_func_to_io
H A Dhinic_dev.h100 u16 rq_depth; member in struct:hinic_dev
H A Dhinic_hw_dev.c265 * @rq_depth: rq depth
271 unsigned int rq_depth)
285 hw_ioctxt.rq_depth = ilog2(rq_depth);
434 * @rq_depth: the receive queue depth
438 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth) argument
462 func_to_io->rq_depth = rq_depth;
489 err = set_hw_ioctxt(hwdev, sq_depth, rq_depth);
270 set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int sq_depth, unsigned int rq_depth) argument
H A Dhinic_hw_dev.h286 u16 rq_depth; member in struct:hinic_cmd_hw_ioctxt
627 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
H A Dhinic_hw_mbox.c1289 ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \
1290 (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \
1300 if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth &&
H A Dhinic_ethtool.c557 ring->rx_pending = nic_dev->rq_depth;
600 new_rq_depth == nic_dev->rq_depth)
605 nic_dev->sq_depth, nic_dev->rq_depth,
609 nic_dev->rq_depth = new_rq_depth;
H A Dhinic_hw_io.c293 func_to_io->rq_depth, HINIC_RQ_WQE_SIZE);
H A Dhinic_port.h317 u32 rq_depth; member in struct:hinic_rq_num
H A Dhinic_port.c491 rq_num.rq_depth = ilog2(nic_dev->rq_depth);
H A Dhinic_main.c423 nic_dev->rq_depth);
1213 nic_dev->rq_depth = HINIC_RQ_DEPTH;
/linux-master/drivers/infiniband/hw/irdma/
H A Duser.h302 u32 *rq_depth, u8 *rq_shift);
379 u32 rq_depth; member in struct:irdma_qp_uk_init_info
H A Dverbs.c637 ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
645 (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
647 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
677 status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
688 kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
699 size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
717 ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
719 info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
721 ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
724 iwqp->max_recv_wr = (ukinfo->rq_depth
[all...]
/linux-master/drivers/infiniband/hw/efa/
H A Defa_com_cmd.h26 u32 rq_depth; member in struct:efa_com_create_qp_params

Completed in 274 milliseconds

12