Searched refs:tag_set (Results 1 - 25 of 71) sorted by path

123

/linux-master/arch/um/drivers/
H A Dubd_kern.c161 struct blk_mq_tag_set tag_set; member in struct:ubd
799 blk_mq_free_tag_set(&ubd_dev->tag_set);
879 ubd_dev->tag_set.ops = &ubd_mq_ops;
880 ubd_dev->tag_set.queue_depth = 64;
881 ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
882 ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
883 ubd_dev->tag_set.driver_data = ubd_dev;
884 ubd_dev->tag_set.nr_hw_queues = 1;
886 err = blk_mq_alloc_tag_set(&ubd_dev->tag_set);
890 disk = blk_mq_alloc_disk(&ubd_dev->tag_set,
[all...]
/linux-master/block/
H A Dblk-mq-debugfs.c368 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
H A Dblk-mq-sched.c384 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
389 hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
423 struct blk_mq_tag_set *set = queue->tag_set;
443 unsigned int flags = q->tag_set->flags;
454 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
512 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
513 blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
518 blk_mq_free_rqs(q->tag_set,
H A Dblk-mq-tag.c271 struct blk_mq_tag_set *set = q->tag_set;
492 * Note: if @q->tag_set is shared with other request queues then @fn will be
507 if (blk_mq_is_shared_tags(q->tag_set->flags)) {
508 struct blk_mq_tags *tags = q->tag_set->shared_tags;
615 struct blk_mq_tag_set *set = hctx->queue->tag_set;
663 q->nr_requests - q->tag_set->reserved_tags);
H A Dblk-mq.c215 * @set: tag_set to wait on
218 * been started on or more of the request_queues of the tag_set. This
245 blk_mq_wait_quiesce_done(q->tag_set);
1654 blk_mq_wait_quiesce_done(q->tag_set);
3760 struct blk_mq_tag_set *set = q->tag_set;
3847 struct blk_mq_tag_set *set = q->tag_set;
3979 struct blk_mq_tag_set *set = q->tag_set;
4233 struct blk_mq_tag_set *set = q->tag_set;
4266 q->tag_set = set;
4294 struct blk_mq_tag_set *set = q->tag_set;
[all...]
H A Dblk-mq.h82 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
452 if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
453 struct blk_mq_tag_set *__tag_set = (q)->tag_set; \
H A Dbsg-lib.c22 struct blk_mq_tag_set tag_set; member in struct:bsg_set
279 container_of(q->tag_set, struct bsg_set, tag_set);
324 container_of(q->tag_set, struct bsg_set, tag_set);
329 blk_mq_free_tag_set(&bset->tag_set);
338 container_of(rq->q->tag_set, struct bsg_set, tag_set);
376 set = &bset->tag_set;
H A Delevator.c561 (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
572 if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT)
576 !blk_mq_is_shared_tags(q->tag_set->flags))
708 q->nr_requests = q->tag_set->queue_depth;
/linux-master/drivers/block/
H A Damiflop.c205 struct blk_mq_tag_set tag_set; member in struct:amiga_floppy_struct
1782 disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
1812 memset(&unit[drive].tag_set, 0, sizeof(unit[drive].tag_set));
1813 unit[drive].tag_set.ops = &amiflop_mq_ops;
1814 unit[drive].tag_set.nr_hw_queues = 1;
1815 unit[drive].tag_set.nr_maps = 1;
1816 unit[drive].tag_set.queue_depth = 2;
1817 unit[drive].tag_set.numa_node = NUMA_NO_NODE;
1818 unit[drive].tag_set
[all...]
H A Dataflop.c305 struct blk_mq_tag_set tag_set; member in struct:atari_floppy_struct
1997 disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL, NULL);
2051 blk_mq_free_tag_set(&unit[i].tag_set);
2069 blk_mq_free_tag_set(&fs->tag_set);
2082 memset(&unit[i].tag_set, 0, sizeof(unit[i].tag_set));
2083 unit[i].tag_set.ops = &ataflop_mq_ops;
2084 unit[i].tag_set.nr_hw_queues = 1;
2085 unit[i].tag_set.nr_maps = 1;
2086 unit[i].tag_set
[all...]
H A Dloop.c75 struct blk_mq_tag_set tag_set; member in struct:loop_device
2019 lo->tag_set.ops = &loop_mq_ops;
2020 lo->tag_set.nr_hw_queues = 1;
2021 lo->tag_set.queue_depth = hw_queue_depth;
2022 lo->tag_set.numa_node = NUMA_NO_NODE;
2023 lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2024 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING |
2026 lo->tag_set.driver_data = lo;
2028 err = blk_mq_alloc_tag_set(&lo->tag_set);
2032 disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set,
[all...]
H A Dnbd.c116 struct blk_mq_tag_set tag_set; member in struct:nbd_device
254 blk_mq_free_tag_set(&nbd->tag_set);
457 (config->num_connections == 1 && nbd->tag_set.timeout)) {
490 if (!nbd->tag_set.timeout) {
779 if (hwq < nbd->tag_set.nr_hw_queues)
780 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
946 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
1245 if (nbd->tag_set.timeout)
1246 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1374 nbd->tag_set
[all...]
H A Dps3disk.c32 struct blk_mq_tag_set tag_set; member in struct:ps3disk_private
437 error = blk_mq_alloc_sq_tag_set(&priv->tag_set, &ps3disk_mq_ops, 1,
442 gendisk = blk_mq_alloc_disk(&priv->tag_set, &lim, dev);
479 blk_mq_free_tag_set(&priv->tag_set);
505 blk_mq_free_tag_set(&priv->tag_set);
H A Drbd.c440 struct blk_mq_tag_set tag_set; member in struct:rbd_device
4818 blk_mq_free_tag_set(&rbd_dev->tag_set);
4965 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4966 rbd_dev->tag_set.ops = &rbd_mq_ops;
4967 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4968 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4969 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
4970 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
4971 rbd_dev->tag_set
[all...]
H A Dsunvdc.c86 struct blk_mq_tag_set tag_set; member in struct:vdc_port
830 err = blk_mq_alloc_sq_tag_set(&port->tag_set, &vdc_mq_ops,
835 g = blk_mq_alloc_disk(&port->tag_set, &lim, port);
891 blk_mq_free_tag_set(&port->tag_set);
1076 blk_mq_free_tag_set(&port->tag_set);
H A Dswim.c191 struct blk_mq_tag_set tag_set; member in struct:floppy_state
785 blk_mq_free_tag_set(&fs->tag_set);
817 err = blk_mq_alloc_sq_tag_set(&swd->unit[drive].tag_set,
823 blk_mq_alloc_disk(&swd->unit[drive].tag_set, NULL,
826 blk_mq_free_tag_set(&swd->unit[drive].tag_set);
H A Dswim3.c206 struct blk_mq_tag_set tag_set; member in struct:floppy_state
1208 rc = blk_mq_alloc_sq_tag_set(&fs->tag_set, &swim3_mq_ops, 2,
1213 disk = blk_mq_alloc_disk(&fs->tag_set, NULL, fs);
1242 blk_mq_free_tag_set(&fs->tag_set);
H A Dublk_drv.c160 struct blk_mq_tag_set tag_set; member in struct:ublk_device
1349 req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1379 rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1517 blk_mq_tagset_busy_iter(&ub->tag_set,
1620 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1742 req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1794 req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
2077 blk_mq_free_tag_set(&ub->tag_set);
2131 ub->tag_set.ops = &ublk_mq_ops;
2132 ub->tag_set
[all...]
/linux-master/drivers/block/aoe/
H A Daoe.h174 struct blk_mq_tag_set tag_set; member in struct:aoedev
H A Daoeblk.c364 set = &d->tag_set;
H A Daoedev.c281 blk_mq_free_tag_set(&d->tag_set);
/linux-master/drivers/block/null_blk/
H A Dmain.c72 static struct blk_mq_tag_set tag_set; variable in typeref:struct:blk_mq_tag_set
390 set = dev->nullb->tag_set;
1678 if (nullb->tag_set == &nullb->__tag_set)
1679 blk_mq_free_tag_set(nullb->tag_set);
1744 if (tag_set.ops)
1747 tag_set.nr_hw_queues = g_submit_queues;
1748 tag_set.queue_depth = g_hw_queue_depth;
1749 tag_set.numa_node = g_home_node;
1750 tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1752 tag_set
[all...]
H A Dnull_blk.h115 struct blk_mq_tag_set *tag_set; member in struct:nullb
/linux-master/drivers/block/rnbd/
H A Drnbd-clt.c737 if (sess->tag_set.tags)
738 blk_mq_free_tag_set(&sess->tag_set);
1206 struct blk_mq_tag_set *tag_set = &sess->tag_set; local
1208 memset(tag_set, 0, sizeof(*tag_set));
1209 tag_set->ops = &rnbd_mq_ops;
1210 tag_set->queue_depth = sess->queue_depth;
1211 tag_set->numa_node = NUMA_NO_NODE;
1212 tag_set
[all...]
H A Drnbd-clt.h89 struct blk_mq_tag_set tag_set; member in struct:rnbd_clt_session

Completed in 331 milliseconds

123