Lines Matching defs:pool

77  * The data_vio_pool maintains the pool of data_vios which a vdo uses to service incoming bios. For
83 * resources are available. The pool is also responsible for distributing resources to blocked
84 * threads and waking them. Finally, the pool attempts to batch the work of recycling data_vios by
86 * into the pool on a single cpu at a time.
88 * The pool contains two "limiters", one for tracking data_vios and one for tracking discard
89 * permits. The limiters also provide safe cross-thread access to pool statistics without the need
90 * to take the pool's lock. When a thread submits a bio to a vdo device, it will first attempt to
100 * state of the pool. If the pool is not currently processing released data_vios, the pool's
102 * hold the pool's lock, and also batches release work while avoiding starvation of the cpu
105 * Whenever the pool's completion is run on a cpu thread, it calls process_release_callback() which
106 * processes a batch of returned data_vios (currently at most 32) from the pool's funnel queue. For
110 * for a data_vio. If there are no discard waiters, the discard permit is returned to the pool.
113 * pool. Finally, if any waiting bios were launched, the threads which blocked trying to submit
129 struct data_vio_pool *pool;
161 /* The administrative state of the pool */
163 /* Lock protecting the pool */
165 /* The main limiter controlling the total data_vios in the pool. */
173 /* The queue of data_vios waiting to be returned to the pool */
175 /* Whether the pool is processing, or scheduled to process releases */
177 /* The data vios in the pool */
228 * or waiters while holding the pool's lock.
230 static bool check_for_drain_complete_locked(struct data_vio_pool *pool)
232 if (pool->limiter.busy > 0)
235 VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0),
238 return (bio_list_empty(&pool->limiter.new_waiters) &&
239 bio_list_empty(&pool->discard_limiter.new_waiters));
588 launch_bio(limiter->pool->completion.vdo, data_vio, bio);
610 static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool)
613 list_first_entry(&pool->available, struct data_vio, pool_entry);
621 assign_data_vio(limiter, get_available_data_vio(limiter->pool));
657 static void schedule_releases(struct data_vio_pool *pool)
661 if (atomic_cmpxchg(&pool->processing, false, true))
664 pool->completion.requeue = true;
665 vdo_launch_completion_with_priority(&pool->completion,
669 static void reuse_or_release_resources(struct data_vio_pool *pool,
674 if (bio_list_empty(&pool->discard_limiter.waiters)) {
676 pool->discard_limiter.release_count++;
678 assign_discard_permit(&pool->discard_limiter);
682 if (pool->limiter.arrival < pool->discard_limiter.arrival) {
683 assign_data_vio(&pool->limiter, data_vio);
684 } else if (pool->discard_limiter.arrival < U64_MAX) {
685 assign_data_vio(&pool->discard_limiter, data_vio);
688 pool->limiter.release_count++;
694 * @completion: The pool with data_vios to release.
698 struct data_vio_pool *pool = as_data_vio_pool(completion);
706 spin_lock(&pool->lock);
707 get_waiters(&pool->discard_limiter);
708 get_waiters(&pool->limiter);
709 spin_unlock(&pool->lock);
711 if (pool->limiter.arrival == U64_MAX) {
712 struct bio *bio = bio_list_peek(&pool->limiter.waiters);
715 pool->limiter.arrival = get_arrival_time(bio);
720 struct funnel_queue_entry *entry = vdo_funnel_queue_poll(pool->queue);
728 reuse_or_release_resources(pool, data_vio, &returned);
731 spin_lock(&pool->lock);
738 update_limiter(&pool->discard_limiter);
739 list_splice(&returned, &pool->available);
740 update_limiter(&pool->limiter);
741 to_wake = pool->limiter.wake_count;
742 pool->limiter.wake_count = 0;
743 discards_to_wake = pool->discard_limiter.wake_count;
744 pool->discard_limiter.wake_count = 0;
746 atomic_set(&pool->processing, false);
750 reschedule = !vdo_is_funnel_queue_empty(pool->queue);
752 vdo_is_state_draining(&pool->state) &&
753 check_for_drain_complete_locked(pool));
754 spin_unlock(&pool->lock);
757 wake_up_nr(&pool->limiter.blocked_threads, to_wake);
760 wake_up_nr(&pool->discard_limiter.blocked_threads, discards_to_wake);
763 schedule_releases(pool);
765 vdo_finish_draining(&pool->state);
768 static void initialize_limiter(struct limiter *limiter, struct data_vio_pool *pool,
771 limiter->pool = pool;
834 * make_data_vio_pool() - Initialize a data_vio pool.
835 * @vdo: The vdo to which the pool will belong.
836 * @pool_size: The number of data_vios in the pool.
838 * @pool: A pointer to hold the newly allocated pool.
844 struct data_vio_pool *pool;
848 __func__, &pool);
853 "discard limit does not exceed pool size");
854 initialize_limiter(&pool->discard_limiter, pool, assign_discard_permit,
856 pool->discard_limiter.permitted_waiters = &pool->permitted_discards;
857 initialize_limiter(&pool->limiter, pool, assign_data_vio_to_waiter, pool_size);
858 pool->limiter.permitted_waiters = &pool->limiter.waiters;
859 INIT_LIST_HEAD(&pool->available);
860 spin_lock_init(&pool->lock);
861 vdo_set_admin_state_code(&pool->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
862 vdo_initialize_completion(&pool->completion, vdo, VDO_DATA_VIO_POOL_COMPLETION);
863 vdo_prepare_completion(&pool->completion, process_release_callback,
867 result = vdo_make_funnel_queue(&pool->queue);
869 free_data_vio_pool(vdo_forget(pool));
874 struct data_vio *data_vio = &pool->data_vios[i];
879 free_data_vio_pool(pool);
883 list_add(&data_vio->pool_entry, &pool->available);
886 *pool_ptr = pool;
893 * All data_vios must be returned to the pool before calling this function.
895 void free_data_vio_pool(struct data_vio_pool *pool)
899 if (pool == NULL)
907 BUG_ON(atomic_read(&pool->processing));
909 spin_lock(&pool->lock);
910 VDO_ASSERT_LOG_ONLY((pool->limiter.busy == 0),
911 "data_vio pool must not have %u busy entries when being freed",
912 pool->limiter.busy);
913 VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) &&
914 bio_list_empty(&pool->limiter.new_waiters)),
915 "data_vio pool must not have threads waiting to read or write when being freed");
916 VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) &&
917 bio_list_empty(&pool->discard_limiter.new_waiters)),
918 "data_vio pool must not have threads waiting to discard when being freed");
919 spin_unlock(&pool->lock);
921 list_for_each_entry_safe(data_vio, tmp, &pool->available, pool_entry) {
926 vdo_free_funnel_queue(vdo_forget(pool->queue));
927 vdo_free(pool);
942 __releases(&limiter->pool->lock)
949 spin_unlock(&limiter->pool->lock);
955 * vdo_launch_bio() - Acquire a data_vio from the pool, assign the bio to it, and launch it.
959 void vdo_launch_bio(struct data_vio_pool *pool, struct bio *bio)
963 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state),
967 spin_lock(&pool->lock);
969 !acquire_permit(&pool->discard_limiter)) {
970 wait_permit(&pool->discard_limiter, bio);
974 if (!acquire_permit(&pool->limiter)) {
975 wait_permit(&pool->limiter, bio);
979 data_vio = get_available_data_vio(pool);
980 spin_unlock(&pool->lock);
981 launch_bio(pool->completion.vdo, data_vio, bio);
988 struct data_vio_pool *pool = container_of(state, struct data_vio_pool, state);
990 spin_lock(&pool->lock);
991 drained = check_for_drain_complete_locked(pool);
992 spin_unlock(&pool->lock);
1005 * drain_data_vio_pool() - Wait asynchronously for all data_vios to be returned to the pool.
1006 * @completion: The completion to notify when the pool has drained.
1008 void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
1011 vdo_start_draining(&pool->state, VDO_ADMIN_STATE_SUSPENDING, completion,
1016 * resume_data_vio_pool() - Resume a data_vio pool.
1017 * @completion: The completion to notify when the pool has resumed.
1019 void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
1022 vdo_continue_completion(completion, vdo_resume_if_quiescent(&pool->state));
1035 * dump_data_vio_pool() - Dump a data_vio pool to the log.
1038 void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios)
1047 if (pool == NULL)
1050 spin_lock(&pool->lock);
1051 dump_limiter("data_vios", &pool->limiter);
1052 dump_limiter("discard permits", &pool->discard_limiter);
1057 for (i = 0; i < pool->limiter.limit; i++) {
1058 struct data_vio *data_vio = &pool->data_vios[i];
1065 spin_unlock(&pool->lock);
1068 spin_lock(&pool->lock);
1073 spin_unlock(&pool->lock);
1076 data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool)
1078 return READ_ONCE(pool->discard_limiter.busy);
1081 data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool)
1083 return READ_ONCE(pool->discard_limiter.limit);
1086 data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool)
1088 return READ_ONCE(pool->discard_limiter.max_busy);
1091 int set_data_vio_pool_discard_limit(struct data_vio_pool *pool, data_vio_count_t limit)
1093 if (get_data_vio_pool_request_limit(pool) < limit) {
1098 spin_lock(&pool->lock);
1099 pool->discard_limiter.limit = limit;
1100 spin_unlock(&pool->lock);
1105 data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool)
1107 return READ_ONCE(pool->limiter.busy);
1110 data_vio_count_t get_data_vio_pool_request_limit(struct data_vio_pool *pool)
1112 return READ_ONCE(pool->limiter.limit);
1115 data_vio_count_t get_data_vio_pool_maximum_requests(struct data_vio_pool *pool)
1117 return READ_ONCE(pool->limiter.max_busy);
1271 * pool.
1283 struct data_vio_pool *pool = completion->vdo->data_vio_pool;
1285 vdo_funnel_queue_put(pool->queue, &completion->work_queue_entry_link);
1286 schedule_releases(pool);