Searched refs:q_depth (Results 1 - 25 of 33) sorted by relevance

12

/linux-master/drivers/net/ethernet/huawei/hinic/
H A Dhinic_hw_wq.h30 u16 q_depth; member in struct:hinic_wq
80 u16 q_depth, u16 max_wqe_size);
91 u16 wqebb_size, u32 wq_page_size, u16 q_depth,
H A Dhinic_hw_wq.c34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
498 * @q_depth: number of wqebbs in WQ
504 u16 wqebb_size, u32 wq_page_size, u16 q_depth,
523 if (q_depth & (q_depth - 1)) {
524 dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
547 wq->q_depth = q_depth;
564 atomic_set(&wq->delta, q_depth);
565 wq->mask = q_depth
503 hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) argument
599 hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, struct hinic_wq *wq, struct hinic_hwif *hwif, int cmdq_blocks, u16 wqebb_size, u32 wq_page_size, u16 q_depth, u16 max_wqe_size) argument
[all...]
H A Dhinic_hw_qp.c224 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb);
252 skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
324 cqe_size = wq->q_depth * sizeof(*rq->cqe);
329 cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
334 for (i = 0; i < wq->q_depth; i++) {
367 for (i = 0; i < wq->q_depth; i++)
H A Dhinic_hw_cmdq.c363 if (next_prod_idx >= wq->q_depth) {
365 next_prod_idx -= wq->q_depth;
442 if (next_prod_idx >= wq->q_depth) {
444 next_prod_idx -= wq->q_depth;
749 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
754 wq->q_depth));
/linux-master/drivers/net/ethernet/microsoft/mana/
H A Dhw_channel.c350 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, argument
363 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
367 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
390 comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
398 hwc_cq->queue_depth = q_depth;
411 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, argument
425 dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
429 dma_buf->num_reqs = q_depth;
431 buf_size = PAGE_ALIGN(q_depth * max_msg_size);
443 for (i = 0; i < q_depth;
482 mana_hwc_create_wq(struct hw_channel_context *hwc, enum gdma_queue_type q_type, u16 q_depth, u32 max_msg_size, struct hwc_cq *hwc_cq, struct hwc_wq **hwc_wq_ptr) argument
586 mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth, u32 max_req_msg_size, u32 max_resp_msg_size) argument
616 mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth, u32 *max_req_msg_size, u32 *max_resp_msg_size) argument
658 mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth, u32 max_req_msg_size, u32 max_resp_msg_size) argument
[all...]
/linux-master/drivers/net/ethernet/amazon/ena/
H A Dena_eth_com.h82 return io_sq->q_depth - 1 - cnt;
196 if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
207 masked_head = io_cq->head & (io_cq->q_depth - 1);
225 if (unlikely(*req_id >= io_cq->q_depth)) {
H A Dena_eth_com.c15 head_masked = io_cq->head & (io_cq->q_depth - 1);
40 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
55 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
83 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
217 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
226 idx &= (io_cq->q_depth - 1);
254 head_masked = io_cq->head & (io_cq->q_depth - 1);
538 u16 q_depth = io_cq->q_depth; local
566 if (unlikely(ena_buf[i].req_id >= q_depth))
[all...]
H A Dena_com.c91 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
113 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
135 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
144 aenq->head = aenq->q_depth;
154 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
180 if (unlikely(command_id >= admin_queue->q_depth)) {
183 command_id, admin_queue->q_depth);
216 queue_size_mask = admin_queue->q_depth - 1;
222 if (cnt >= admin_queue->q_depth) {
266 size_t size = admin_queue->q_depth * sizeo
[all...]
H A Dena_com.h127 u16 q_depth; member in struct:ena_com_io_cq
169 u16 q_depth; member in struct:ena_com_io_sq
218 u16 q_depth; member in struct:ena_com_admin_queue
248 u16 q_depth; member in struct:ena_com_aenq
/linux-master/drivers/net/ethernet/brocade/bna/
H A Dbnad.c78 for (i = 0; i < ccb->q_depth; i++) {
91 u32 q_depth, u32 index)
114 BNA_QE_INDX_INC(index, q_depth);
126 BNA_QE_INDX_INC(index, q_depth);
143 for (i = 0; i < tcb->q_depth; i++) {
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
162 u32 wis, unmap_wis, hw_cons, cons, q_depth; local
174 q_depth = tcb->q_depth;
176 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
89 bnad_tx_buff_unmap(struct bnad *bnad, struct bnad_tx_unmap *unmap_q, u32 q_depth, u32 index) argument
341 u32 alloced, prod, q_depth; local
415 u32 alloced, prod, q_depth, buff_sz; local
2914 u32 prod, q_depth, vect_id; local
[all...]
H A Dbna_types.h428 u32 q_depth; member in struct:bna_tcb
559 u32 q_depth; member in struct:bna_rcb
575 int q_depth; member in struct:bna_rxq
623 u32 q_depth; member in struct:bna_ccb
H A Dbfa_msgq.c516 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
518 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
H A Dbfi.h413 u16 q_depth; /* Total num of entries in the queue */ member in struct:bfi_msgq
/linux-master/drivers/net/ethernet/marvell/octeon_ep/
H A Doctep_ctrl_mbox.c228 u32 pi, ci, r_sz, buf_sz, q_depth; local
240 q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz);
241 if (q_depth < mbox_hdr_sz) {
/linux-master/drivers/net/ethernet/fungible/funcore/
H A Dfun_dev.h69 unsigned int q_depth; /* max queue depth supported by device */ member in struct:fun_dev
H A Dfun_queue.c89 if (sq_depth > fdev->q_depth)
138 if (cq_depth > fdev->q_depth)
/linux-master/drivers/crypto/hisilicon/sec2/
H A Dsec_crypto.c313 u16 q_depth = res->depth; local
316 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
321 for (i = 1; i < q_depth; i++) {
338 u16 q_depth = res->depth; local
341 res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
346 for (i = 1; i < q_depth; i++) {
363 u16 q_depth = res->depth; local
366 res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
371 for (i = 1; i < q_depth; i++) {
400 u16 q_depth local
490 u16 q_depth = qp_ctx->qp->sq_depth; local
[all...]
/linux-master/drivers/nvme/host/
H A Dpci.c35 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
36 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
130 u32 q_depth; member in struct:nvme_dev
199 u32 q_depth; member in struct:nvme_queue
476 if (next_tail == nvmeq->q_depth)
493 if (++nvmeq->sq_tail == nvmeq->q_depth)
1049 if (tmp == nvmeq->q_depth) {
1171 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1200 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1479 int q_depth local
[all...]
/linux-master/drivers/crypto/hisilicon/zip/
H A Dzip_crypto.c443 u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; local
449 req_q->size = q_depth;
495 u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; local
503 tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
/linux-master/drivers/scsi/bfa/
H A Dbfi.h538 #define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci)
540 #define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth)
541 #define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth)
543 /* q_depth must be power of 2 */
544 #define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1))
585 u16 q_depth; /* Total num of entries in the queue */ member in struct:bfi_msgq_s
H A Dbfa_fcpim.h119 u16 q_depth; member in struct:bfa_fcpim_s
/linux-master/drivers/block/
H A Dublk_drv.c133 int q_depth; member in struct:ublk_queue
649 return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
1205 for (i = 0; i < ubq->q_depth; i++) {
1213 if (nr_inflight == ubq->q_depth) {
1369 for (i = 0; i < ubq->q_depth; i++) {
1455 if (WARN_ON_ONCE(pdu->tag >= ubq->q_depth))
1479 return ubq->nr_io_ready == ubq->q_depth;
1486 for (i = 0; i < ubq->q_depth; i++)
1687 if (tag >= ubq->q_depth)
1904 if (tag >= ubq->q_depth)
[all...]
/linux-master/drivers/scsi/mpi3mr/
H A Dmpi3mr_os.c964 * @q_depth: Queue depth
971 int q_depth)
978 q_depth = 1;
979 if (q_depth > shost->can_queue)
980 q_depth = shost->can_queue;
981 else if (!q_depth)
982 q_depth = MPI3MR_DEFAULT_SDEV_QD;
983 retval = scsi_change_queue_depth(sdev, q_depth);
1009 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
1118 tgtdev->q_depth
970 mpi3mr_change_queue_depth(struct scsi_device *sdev, int q_depth) argument
1856 u16 *q_depth = (u16 *)data; local
[all...]
/linux-master/net/mac80211/
H A Ddebugfs_sta.c258 u32 q_depth[IEEE80211_NUM_ACS]; local
271 q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending);
277 q_depth[0], q_depth[1], q_depth[2], q_depth[3],
/linux-master/drivers/crypto/hisilicon/
H A Ddebugfs.c272 u32 *e_id, u32 *q_id, u16 q_depth)
298 if (ret || *e_id >= q_depth) {
299 dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1);
271 q_dump_param_parse(struct hisi_qm *qm, char *s, u32 *e_id, u32 *q_id, u16 q_depth) argument

Completed in 252 milliseconds

12