Lines Matching defs:bc

82 static void bkey_cached_free(struct btree_key_cache *bc,
85 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
93 list_move_tail(&ck->list, &bc->freed_pcpu);
94 bc->nr_freed_pcpu++;
96 list_move_tail(&ck->list, &bc->freed_nonpcpu);
97 bc->nr_freed_nonpcpu++;
99 atomic_long_inc(&bc->nr_freed);
110 static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
115 bc->nr_freed_nonpcpu++;
117 list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
125 list_move(&ck->list, &bc->freed_nonpcpu);
129 static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
140 f = this_cpu_ptr(bc->pcpu_freed);
149 mutex_lock(&bc->lock);
151 f = this_cpu_ptr(bc->pcpu_freed);
156 __bkey_cached_move_to_freelist_ordered(bc, ck2);
160 __bkey_cached_move_to_freelist_ordered(bc, ck);
161 mutex_unlock(&bc->lock);
164 mutex_lock(&bc->lock);
165 list_move_tail(&ck->list, &bc->freed_nonpcpu);
166 bc->nr_freed_nonpcpu++;
167 mutex_unlock(&bc->lock);
170 mutex_lock(&bc->lock);
171 list_move_tail(&ck->list, &bc->freed_pcpu);
172 bc->nr_freed_pcpu++;
173 mutex_unlock(&bc->lock);
177 static void bkey_cached_free_fast(struct btree_key_cache *bc,
180 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
186 atomic_long_inc(&bc->nr_freed);
192 bkey_cached_move_to_freelist(bc, ck);
203 struct btree_key_cache *bc = &c->btree_key_cache;
213 f = this_cpu_ptr(bc->pcpu_freed);
219 mutex_lock(&bc->lock);
221 f = this_cpu_ptr(bc->pcpu_freed);
223 while (!list_empty(&bc->freed_nonpcpu) &&
225 ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
227 bc->nr_freed_nonpcpu--;
233 mutex_unlock(&bc->lock);
236 mutex_lock(&bc->lock);
237 if (!list_empty(&bc->freed_nonpcpu)) {
238 ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
240 bc->nr_freed_nonpcpu--;
242 mutex_unlock(&bc->lock);
245 mutex_lock(&bc->lock);
246 if (!list_empty(&bc->freed_pcpu)) {
247 ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
249 bc->nr_freed_pcpu--;
251 mutex_unlock(&bc->lock);
257 bkey_cached_move_to_freelist(bc, ck);
268 bkey_cached_move_to_freelist(bc, ck);
325 struct btree_key_cache *bc = &c->btree_key_cache;
334 ck = bkey_cached_reuse(bc);
351 if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
361 bkey_cached_free_fast(bc, ck);
368 atomic_long_inc(&bc->nr_keys);
830 struct btree_key_cache *bc = &c->btree_key_cache;
837 mutex_lock(&bc->lock);
845 list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
853 atomic_long_dec(&bc->nr_freed);
855 bc->nr_freed_nonpcpu--;
858 list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
866 atomic_long_dec(&bc->nr_freed);
868 bc->nr_freed_pcpu--;
872 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
873 if (bc->shrink_iter >= tbl->size)
874 bc->shrink_iter = 0;
875 start = bc->shrink_iter;
880 pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter));
883 next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
892 bkey_cached_evict(bc, ck);
893 bkey_cached_free(bc, ck);
903 bc->shrink_iter++;
904 if (bc->shrink_iter >= tbl->size)
905 bc->shrink_iter = 0;
906 } while (scanned < nr && bc->shrink_iter != start);
911 mutex_unlock(&bc->lock);
920 struct btree_key_cache *bc = &c->btree_key_cache;
921 long nr = atomic_long_read(&bc->nr_keys) -
922 atomic_long_read(&bc->nr_dirty);
927 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
929 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
939 shrinker_free(bc->shrink);
941 mutex_lock(&bc->lock);
946 while (atomic_long_read(&bc->nr_keys)) {
948 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
952 bkey_cached_evict(bc, ck);
961 per_cpu_ptr(bc->pcpu_freed, cpu);
970 BUG_ON(list_count_nodes(&bc->freed_pcpu) != bc->nr_freed_pcpu);
971 BUG_ON(list_count_nodes(&bc->freed_nonpcpu) != bc->nr_freed_nonpcpu);
973 list_splice(&bc->freed_pcpu, &items);
974 list_splice(&bc->freed_nonpcpu, &items);
976 mutex_unlock(&bc->lock);
987 if (atomic_long_read(&bc->nr_dirty) &&
991 atomic_long_read(&bc->nr_dirty));
993 if (atomic_long_read(&bc->nr_keys))
995 atomic_long_read(&bc->nr_keys));
997 if (bc->table_init_done)
998 rhashtable_destroy(&bc->table);
1000 free_percpu(bc->pcpu_freed);
1010 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
1012 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
1016 bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
1017 if (!bc->pcpu_freed)
1021 if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
1024 bc->table_init_done = true;
1029 bc->shrink = shrink;