Lines Matching refs:bc

411 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
413 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
414 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
416 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
419 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
421 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
422 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
424 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
427 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
429 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
430 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
432 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
435 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
437 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
438 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
440 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
538 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
542 bc->num_locks = num_locks;
543 bc->no_sleep = no_sleep;
545 for (i = 0; i < bc->num_locks; i++) {
547 rwlock_init(&bc->trees[i].u.spinlock);
549 init_rwsem(&bc->trees[i].u.lock);
550 bc->trees[i].root = RB_ROOT;
553 lru_init(&bc->lru[LIST_CLEAN]);
554 lru_init(&bc->lru[LIST_DIRTY]);
557 static void cache_destroy(struct dm_buffer_cache *bc)
561 for (i = 0; i < bc->num_locks; i++)
562 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
564 lru_destroy(&bc->lru[LIST_CLEAN]);
565 lru_destroy(&bc->lru[LIST_DIRTY]);
573 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
575 return bc->lru[list_mode].count;
578 static inline unsigned long cache_total(struct dm_buffer_cache *bc)
580 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
615 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
619 cache_read_lock(bc, block);
620 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
625 cache_read_unlock(bc, block);
636 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
640 cache_read_lock(bc, b->block);
643 cache_read_unlock(bc, b->block);
680 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
688 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
694 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
699 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
705 lh_init(&lh, bc, true);
706 b = __cache_evict(bc, list_mode, pred, context, &lh);
717 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
719 cache_write_lock(bc, b->block);
721 lru_remove(&bc->lru[b->list_mode], &b->lru);
723 lru_insert(&bc->lru[b->list_mode], &b->lru);
725 cache_write_unlock(bc, b->block);
734 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
742 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
748 lru_insert(&bc->lru[b->list_mode], &b->lru);
752 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
757 lh_init(&lh, bc, true);
758 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
780 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
783 struct lru *lru = &bc->lru[list_mode];
808 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
813 lh_init(&lh, bc, false);
814 __cache_iterate(bc, list_mode, fn, context, &lh);
851 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
858 cache_write_lock(bc, b->block);
860 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
862 lru_insert(&bc->lru[b->list_mode], &b->lru);
863 cache_write_unlock(bc, b->block);
876 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
880 cache_write_lock(bc, b->block);
886 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
887 lru_remove(&bc->lru[b->list_mode], &b->lru);
890 cache_write_unlock(bc, b->block);
922 static void __remove_range(struct dm_buffer_cache *bc,
943 lru_remove(&bc->lru[b->list_mode], &b->lru);
949 static void cache_remove_range(struct dm_buffer_cache *bc,
955 BUG_ON(bc->no_sleep);
956 for (i = 0; i < bc->num_locks; i++) {
957 down_write(&bc->trees[i].u.lock);
958 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
959 up_write(&bc->trees[i].u.lock);