Lines Matching defs:tags

310 			blk_mq_tag_wakeup_all(hctx->tags, true);
347 struct blk_mq_tags *tags, unsigned int tag)
352 struct request *rq = tags->static_rqs[tag];
407 struct blk_mq_tags *tags;
416 tags = blk_mq_tags_from_data(data);
421 prefetch(tags->static_rqs[tag]);
423 rq = blk_mq_rq_ctx_init(data, tags, tag);
452 * All requests use scheduler tags when an I/O scheduler is
717 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
1069 blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1075 int tags[TAG_COMP_BATCH], nr_tags = 0;
1111 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1115 tags[nr_tags++] = rq->tag;
1119 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1262 rq->mq_hctx->tags->rqs[rq->tag] = rq;
1302 * Any request allocated from sched tags can't be issued to
1649 * Before walking tags, we must ensure any submit started
1754 struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1755 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1761 bt = &rq->mq_hctx->tags->breserved_tags;
1789 sbq = &hctx->tags->bitmap_tags;
1799 * Mark us waiting for a tag. For shared tags, this involves hooking us into
1800 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1832 sbq = &hctx->tags->breserved_tags;
1834 sbq = &hctx->tags->bitmap_tags;
2093 /* For non-shared tags, the RESTART check will suffice */
3212 /* called before freeing request pool in @tags */
3214 struct blk_mq_tags *tags)
3220 * There is no need to clear mapping if driver tags is not initialized
3221 * or the mapping belongs to the driver tags.
3223 if (!drv_tags || drv_tags == tags)
3226 list_for_each_entry(page, &tags->page_list, lru) {
3252 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3258 if (list_empty(&tags->page_list))
3264 drv_tags = set->tags[hctx_idx];
3266 if (tags->static_rqs && set->ops->exit_request) {
3269 for (i = 0; i < tags->nr_tags; i++) {
3270 struct request *rq = tags->static_rqs[i];
3275 tags->static_rqs[i] = NULL;
3279 blk_mq_clear_rq_mapping(drv_tags, tags);
3281 while (!list_empty(&tags->page_list)) {
3282 page = list_first_entry(&tags->page_list, struct page, lru);
3293 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3295 kfree(tags->rqs);
3296 tags->rqs = NULL;
3297 kfree(tags->static_rqs);
3298 tags->static_rqs = NULL;
3300 blk_mq_free_tags(tags);
3336 struct blk_mq_tags *tags;
3341 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3343 if (!tags)
3346 tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3349 if (!tags->rqs)
3352 tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3355 if (!tags->static_rqs)
3358 return tags;
3361 kfree(tags->rqs);
3363 blk_mq_free_tags(tags);
3383 struct blk_mq_tags *tags,
3393 INIT_LIST_HEAD(&tags->page_list);
3428 list_add_tail(&page->lru, &tags->page_list);
3442 tags->static_rqs[i] = rq;
3444 tags->static_rqs[i] = NULL;
3455 blk_mq_free_rqs(set, tags, hctx_idx);
3476 struct blk_mq_tags *tags = hctx->sched_tags ?
3477 hctx->sched_tags : hctx->tags;
3482 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3587 * tags->rqs[] for avoiding potential UAF.
3589 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3596 if (!tags)
3602 cmpxchg(&tags->rqs[i], flush_rq, NULL);
3610 spin_lock_irqsave(&tags->lock, flags);
3611 spin_unlock_irqrestore(&tags->lock, flags);
3625 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3666 hctx->tags = set->tags[hctx_idx];
3791 struct blk_mq_tags *tags;
3794 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3795 if (!tags)
3798 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3800 blk_mq_free_rq_map(tags);
3804 return tags;
3811 set->tags[hctx_idx] = set->shared_tags;
3816 set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3819 return set->tags[hctx_idx];
3823 struct blk_mq_tags *tags,
3826 if (tags) {
3827 blk_mq_free_rqs(set, tags, hctx_idx);
3828 blk_mq_free_rq_map(tags);
3836 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3838 set->tags[hctx_idx] = NULL;
3871 if (!set->tags[hctx_idx] &&
3874 * If tags initialization fail for some hctx,
3877 * is guaranteed to always have tags allocated
3922 hctx->tags = NULL;
3926 hctx->tags = set->tags[i];
3927 WARN_ON(!hctx->tags);
4291 /* tags can _not_ be used after returning from blk_mq_exit_queue */
4420 if (set->tags)
4421 memcpy(new_tags, set->tags, set->nr_hw_queues *
4422 sizeof(*set->tags));
4423 kfree(set->tags);
4424 set->tags = new_tags;
4478 * memory constrained environment. Limit us to 64 tags to prevent
4501 set->tags = kcalloc_node(set->nr_hw_queues,
4504 if (!set->tags)
4532 kfree(set->tags);
4533 set->tags = NULL;
4577 kfree(set->tags);
4578 set->tags = NULL;
4604 if (!hctx->tags)
4614 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4784 /* Free the excess tags when nr_hw_queues shrink. */