Searched refs:queue_depth (Results 1 - 25 of 102) sorted by relevance

12345

/linux-master/drivers/target/iscsi/
H A Discsi_target_device.c37 * struct se_node_acl->queue_depth. The value in struct se_node_acl->queue_depth
41 sess->cmdsn_window = se_nacl->queue_depth;
42 atomic_add(se_nacl->queue_depth - 1, &sess->max_cmd_sn);
H A Discsi_target_nego.c1118 u32 payload_length, queue_depth = 0; local
1316 queue_depth = se_nacl->queue_depth;
1323 * in per-cpu-ida tag allocation logic + small queue_depth.
1326 tag_num = max_t(u32, ISCSIT_MIN_TAGS, queue_depth);
/linux-master/drivers/s390/block/
H A Ddasd_genhd.c23 static unsigned int queue_depth = 32; variable
26 module_param(queue_depth, uint, 0444);
27 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
59 block->tag_set.queue_depth = queue_depth;
/linux-master/drivers/target/
H A Dtarget_core_tpg.c158 struct se_node_acl *acl, u32 queue_depth)
160 acl->queue_depth = queue_depth;
162 if (!acl->queue_depth) {
166 acl->queue_depth = 1;
174 u32 queue_depth; local
191 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
193 queue_depth = 1;
194 target_set_nacl_queue_depth(tpg, acl, queue_depth);
218 acl->queue_depth,
157 target_set_nacl_queue_depth(struct se_portal_group *tpg, struct se_node_acl *acl, u32 queue_depth) argument
379 core_tpg_set_initiator_node_queue_depth( struct se_node_acl *acl, u32 queue_depth) argument
[all...]
/linux-master/arch/um/drivers/
H A Dvector_kern.c250 int queue_depth; local
258 qi->queue_depth -= advance;
264 if (qi->queue_depth == 0) {
268 queue_depth = qi->queue_depth;
270 return queue_depth;
280 int queue_depth; local
286 qi->queue_depth += advance;
287 queue_depth = qi->queue_depth;
336 int queue_depth; local
411 int result = 0, send_len, queue_depth = qi->max_depth; local
1051 int queue_depth = 0; local
[all...]
H A Dvector_kern.h47 int queue_depth, head, tail, max_depth, max_iov_frags; member in struct:vector_queue
/linux-master/include/net/mana/
H A Dhw_channel.h139 u16 queue_depth; member in struct:hwc_cq
153 u16 queue_depth; member in struct:hwc_wq
/linux-master/drivers/block/rnbd/
H A Drnbd-srv.h25 int queue_depth; member in struct:rnbd_srv_session
H A Drnbd-clt.h86 size_t queue_depth; member in struct:rnbd_clt_session
H A Drnbd-srv-trace.h29 __entry->qdepth = srv->queue_depth;
/linux-master/drivers/infiniband/ulp/rtrs/
H A Drtrs-clt.c71 size_t max_depth = clt->queue_depth;
499 if (WARN_ON(msg_id >= clt_path->queue_depth))
553 if (WARN_ON(buf_id >= clt_path->queue_depth))
725 q_size = clt_path->queue_depth;
1328 for (i = 0; i < clt_path->queue_depth; ++i) {
1354 for (i = 0; i < clt_path->queue_depth; ++i) {
1370 clt_path->reqs = kcalloc(clt_path->queue_depth,
1376 for (i = 0; i < clt_path->queue_depth; ++i) {
1416 clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
1421 clt->permits = kcalloc(clt->queue_depth, permit_siz
1830 u16 version, queue_depth; local
[all...]
H A Drtrs.h111 u32 queue_depth; member in struct:rtrs_attrs
H A Drtrs-srv.h64 * queue_depth of memory region to invalidate each memory region.
110 size_t queue_depth; member in struct:rtrs_srv_sess
H A Drtrs-srv.c109 for (i = 0; i < srv->queue_depth; i++)
138 srv_path->ops_ids = kcalloc(srv->queue_depth,
144 for (i = 0; i < srv->queue_depth; ++i) {
577 * Here we map queue_depth chunks to MR. Firstly we have to
585 mrs_num = srv->queue_depth;
589 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
590 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
607 srv->queue_depth - chunks);
655 chunk_bits = ilog2(srv->queue_depth - 1) + 1;
989 q_size = srv->queue_depth;
[all...]
H A Drtrs-clt.h144 size_t queue_depth; member in struct:rtrs_clt_path
173 size_t queue_depth; member in struct:rtrs_clt_sess
H A Drtrs-pri.h30 * since queue_depth in rtrs_msg_conn_rsp is defined as le16.
209 * @queue_depth: max inflight messages (queue-depth) in this session
219 __le16 queue_depth; member in struct:rtrs_msg_conn_rsp
/linux-master/drivers/ata/
H A Dlibata-sata.c1048 * @queue_depth: new queue depth
1055 int queue_depth)
1064 if (!dev || queue_depth < 1 || queue_depth == sdev->queue_depth) {
1066 return sdev->queue_depth;
1075 if (queue_depth > max_queue_depth) {
1084 if (queue_depth == 1 || !ata_ncq_supported(dev)) {
1086 queue_depth = 1;
1093 if (queue_depth
1054 ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, int queue_depth) argument
1115 ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) argument
[all...]
/linux-master/block/
H A Dblk-rq-qos.c122 if (rqd->queue_depth == 1) {
138 rqd->queue_depth);
142 unsigned int maxd = 3 * rqd->queue_depth / 4;
H A Dblk-mq.c3590 unsigned int queue_depth, struct request *flush_rq)
3601 for (i = 0; i < queue_depth; i++)
3626 set->queue_depth, flush_rq);
3817 set->queue_depth);
4276 q->nr_requests = set->queue_depth;
4309 set->queue_depth);
4336 * may reduce the depth asked for, if memory is tight. set->queue_depth
4344 depth = set->queue_depth;
4350 set->queue_depth >>= 1;
4351 if (set->queue_depth < se
3589 blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, unsigned int queue_depth, struct request *flush_rq) argument
4545 blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int queue_depth, unsigned int set_flags) argument
[all...]
H A Dblk-rq-qos.h57 unsigned int queue_depth; member in struct:rq_depth
/linux-master/drivers/scsi/snic/
H A Dsnic_main.c91 if (qsz < sdev->queue_depth)
93 else if (qsz > sdev->queue_depth)
96 atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
100 return sdev->queue_depth;
/linux-master/include/target/
H A Dtarget_core_base.h579 u32 queue_depth; member in struct:se_node_acl
723 u32 queue_depth; member in struct:se_dev_attrib
815 u32 queue_depth; member in struct:se_device
/linux-master/drivers/scsi/
H A Dscsi.c222 sdev->queue_depth = depth;
229 sbitmap_resize(&sdev->budget_map, sdev->queue_depth);
231 return sdev->queue_depth;
/linux-master/drivers/mmc/core/
H A Dqueue.c436 mq->tag_set.queue_depth =
439 mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
/linux-master/drivers/net/ethernet/microsoft/mana/
H A Dhw_channel.c318 comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
319 WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
398 hwc_cq->queue_depth = q_depth;
512 hwc_wq->queue_depth = q_depth;

Completed in 416 milliseconds

12345