Lines Matching defs:ca

358 	struct cache *ca = bio->bi_private;
361 bch_count_io_errors(ca, bio->bi_status, 0,
363 closure_put(&ca->set->sb_write);
376 struct cache *ca = c->cache;
377 struct bio *bio = &ca->sb_bio;
383 ca->sb.seq++;
385 if (ca->sb.version < version)
386 ca->sb.version = version;
388 bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
390 bio->bi_private = ca;
393 __write_super(&ca->sb, ca->sb_disk, bio);
503 struct cache *ca = c->cache;
512 size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
518 atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
583 struct cache *ca = bio->bi_private;
585 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
586 bch_bbio_free(bio, ca->set);
587 closure_put(&ca->prio);
590 static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf)
592 struct closure *cl = &ca->prio;
593 struct bio *bio = bch_bbio_alloc(ca->set);
597 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
598 bio_set_dev(bio, ca->bdev);
599 bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb);
602 bio->bi_private = ca;
604 bch_bio_map(bio, ca->disk_buckets);
606 closure_bio_submit(ca->set, bio, &ca->prio);
610 int bch_prio_write(struct cache *ca, bool wait)
617 fifo_used(&ca->free[RESERVE_PRIO]),
618 fifo_used(&ca->free[RESERVE_NONE]),
619 fifo_used(&ca->free_inc));
627 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
628 fifo_used(&ca->free[RESERVE_NONE]);
629 if (prio_buckets(ca) > avail)
635 lockdep_assert_held(&ca->set->bucket_lock);
637 ca->disk_buckets->seq++;
639 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
640 &ca->meta_sectors_written);
642 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
644 struct prio_set *p = ca->disk_buckets;
646 struct bucket_disk *end = d + prios_per_bucket(ca);
648 for (b = ca->buckets + i * prios_per_bucket(ca);
649 b < ca->buckets + ca->sb.nbuckets && d < end;
655 p->next_bucket = ca->prio_buckets[i + 1];
656 p->magic = pset_magic(&ca->sb);
657 p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8);
659 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
662 mutex_unlock(&ca->set->bucket_lock);
663 prio_io(ca, bucket, REQ_OP_WRITE);
664 mutex_lock(&ca->set->bucket_lock);
666 ca->prio_buckets[i] = bucket;
667 atomic_dec_bug(&ca->buckets[bucket].pin);
670 mutex_unlock(&ca->set->bucket_lock);
672 bch_journal_meta(ca->set, &cl);
675 mutex_lock(&ca->set->bucket_lock);
681 for (i = 0; i < prio_buckets(ca); i++) {
682 if (ca->prio_last_buckets[i])
683 __bch_bucket_free(ca,
684 &ca->buckets[ca->prio_last_buckets[i]]);
686 ca->prio_last_buckets[i] = ca->prio_buckets[i];
691 static int prio_read(struct cache *ca, uint64_t bucket)
693 struct prio_set *p = ca->disk_buckets;
694 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
699 for (b = ca->buckets;
700 b < ca->buckets + ca->sb.nbuckets;
703 ca->prio_buckets[bucket_nr] = bucket;
704 ca->prio_last_buckets[bucket_nr] = bucket;
707 prio_io(ca, bucket, REQ_OP_READ);
710 bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
715 if (p->magic != pset_magic(&ca->sb)) {
793 struct cache *ca = d->c->cache;
798 bd_unlink_disk_holder(ca->bdev, d->disk);
805 struct cache *ca = c->cache;
808 bd_link_disk_holder(ca->bdev, d->disk);
1676 struct cache *ca;
1688 ca = c->cache;
1689 if (ca) {
1690 ca->set = NULL;
1692 kobject_put(&ca->kobj);
1717 struct cache *ca = c->cache;
1743 if (ca->alloc_thread)
1744 kthread_stop(ca->alloc_thread);
1860 struct cache *ca = container_of(sb, struct cache, sb);
1884 c->cache = ca;
1975 struct cache *ca = c->cache;
1982 c->nbuckets = ca->sb.nbuckets;
2002 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
2047 if (bch_cache_allocator_start(ca))
2070 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
2073 for (j = 0; j < ca->sb.keys; j++)
2074 ca->sb.d[j] = ca->sb.first_bucket + j;
2079 if (bch_cache_allocator_start(ca))
2083 bch_prio_write(ca, true);
2147 static const char *register_cache_set(struct cache *ca)
2154 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
2161 c = bch_cache_set_alloc(&ca->sb);
2177 sprintf(buf, "cache%i", ca->sb.nr_this_dev);
2178 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
2179 sysfs_create_link(&c->kobj, &ca->kobj, buf))
2182 kobject_get(&ca->kobj);
2183 ca->set = c;
2184 ca->set->cache = ca;
2198 /* When ca->kobj released */
2201 struct cache *ca = container_of(kobj, struct cache, kobj);
2204 if (ca->set) {
2205 BUG_ON(ca->set->cache != ca);
2206 ca->set->cache = NULL;
2209 free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
2210 kfree(ca->prio_buckets);
2211 vfree(ca->buckets);
2213 free_heap(&ca->heap);
2214 free_fifo(&ca->free_inc);
2217 free_fifo(&ca->free[i]);
2219 if (ca->sb_disk)
2220 put_page(virt_to_page(ca->sb_disk));
2222 if (ca->bdev_file)
2223 fput(ca->bdev_file);
2225 kfree(ca);
2229 static int cache_alloc(struct cache *ca)
2238 kobject_init(&ca->kobj, &bch_cache_ktype);
2240 bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
2243 * when ca->sb.njournal_buckets is not zero, journal exists,
2251 btree_buckets = ca->sb.njournal_buckets ?: 8;
2252 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
2255 err = "ca->sb.nbuckets is too small";
2259 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
2261 err = "ca->free[RESERVE_BTREE] alloc failed";
2265 if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
2267 err = "ca->free[RESERVE_PRIO] alloc failed";
2271 if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
2272 err = "ca->free[RESERVE_MOVINGGC] alloc failed";
2276 if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
2277 err = "ca->free[RESERVE_NONE] alloc failed";
2281 if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
2282 err = "ca->free_inc alloc failed";
2286 if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
2287 err = "ca->heap alloc failed";
2291 ca->buckets = vzalloc(array_size(sizeof(struct bucket),
2292 ca->sb.nbuckets));
2293 if (!ca->buckets) {
2294 err = "ca->buckets alloc failed";
2298 ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
2299 prio_buckets(ca), 2),
2301 if (!ca->prio_buckets) {
2302 err = "ca->prio_buckets alloc failed";
2306 ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb);
2307 if (!ca->disk_buckets) {
2308 err = "ca->disk_buckets alloc failed";
2312 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
2314 for_each_bucket(b, ca)
2319 kfree(ca->prio_buckets);
2321 vfree(ca->buckets);
2323 free_heap(&ca->heap);
2325 free_fifo(&ca->free_inc);
2327 free_fifo(&ca->free[RESERVE_NONE]);
2329 free_fifo(&ca->free[RESERVE_MOVINGGC]);
2331 free_fifo(&ca->free[RESERVE_PRIO]);
2333 free_fifo(&ca->free[RESERVE_BTREE]);
2338 pr_notice("error %pg: %s\n", ca->bdev, err);
2344 struct cache *ca)
2349 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
2350 ca->bdev_file = bdev_file;
2351 ca->bdev = file_bdev(bdev_file);
2352 ca->sb_disk = sb_disk;
2355 ca->discard = CACHE_DISCARD(&ca->sb);
2357 ret = cache_alloc(ca);
2367 * If we failed here, it means ca->kobj is not initialized yet,
2376 if (kobject_add(&ca->kobj, bdev_kobj(file_bdev(bdev_file)), "bcache")) {
2384 err = register_cache_set(ca);
2392 pr_info("registered cache device %pg\n", file_bdev(ca->bdev_file));
2395 kobject_put(&ca->kobj);
2431 struct cache *ca = c->cache;
2433 if (ca->bdev->bd_dev == dev)