Lines Matching refs:ca

570 	struct bch_dev *ca = NULL;
584 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
589 if (!ca) {
596 for (u64 b = max_t(u64, ca->mi.first_bucket, start);
597 b < min_t(u64, ca->mi.nbuckets, end);
599 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
605 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
610 if (!ca) {
616 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
621 bch2_dev_put(ca);
632 struct bch_dev *ca,
679 if (ca->mi.freespace_initialized &&
746 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
747 if (!ca)
775 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?:
776 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true);
795 new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, ca);
864 *bucket_gen(ca, new.k->p.offset) = new_a->gen;
866 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
884 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
897 struct bucket *g = gc_bucket(ca, new.k->p.offset);
913 bch2_dev_put(ca);
963 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket)
965 if (*ca) {
966 if (bucket->offset < (*ca)->mi.first_bucket)
967 bucket->offset = (*ca)->mi.first_bucket;
969 if (bucket->offset < (*ca)->mi.nbuckets)
972 bch2_dev_put(*ca);
973 *ca = NULL;
979 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
980 if (*ca) {
981 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket);
982 bch2_dev_get(*ca);
986 return *ca != NULL;
990 struct bch_dev **ca, struct bkey *hole)
999 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
1004 if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
1005 if (!next_bucket(c, ca, &hole_start))
1012 if (k.k->p.offset > (*ca)->mi.nbuckets)
1013 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
1036 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p);
1037 if (fsck_err_on(!ca,
1042 if (!ca)
1045 if (!ca->mi.freespace_initialized)
1148 bch2_dev_put(ca);
1155 struct bch_dev *ca,
1165 if (!ca->mi.freespace_initialized)
1352 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode);
1353 if (!ca) {
1361 if (fsck_err_on(end <= ca->mi.first_bucket ||
1362 start >= ca->mi.nbuckets, c,
1370 for (b = start; b < ca->mi.first_bucket; b++)
1378 for (b = ca->mi.nbuckets; b < end; b++)
1398 bch2_dev_put(ca);
1407 struct bch_dev *ca = NULL;
1426 k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
1447 ret = bch2_check_alloc_hole_freespace(trans, ca,
1475 bch2_dev_put(ca);
1476 ca = NULL;
1647 struct bch_dev *ca;
1651 static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
1653 if (s->ca == ca)
1656 if (s->ca && s->need_journal_commit_this_dev >
1657 bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
1660 if (s->ca)
1661 percpu_ref_put(&s->ca->io_ref);
1662 s->ca = ca;
1680 struct bch_dev *ca = s->ca && s->ca->dev_idx == pos.inode
1681 ? s->ca
1683 if (!ca) {
1688 discard_buckets_next_dev(c, s, ca);
1755 ca->mi.discard && !c->opts.nochanges) {
1761 blkdev_issue_discard(ca->disk_sb.bdev,
1762 k.k->p.offset * ca->mi.bucket_size,
1763 ca->mi.bucket_size,
1856 struct bch_dev *ca;
1863 ca = bch2_dev_get_ioref(c, i->inode, WRITE);
1864 if (!ca) {
1879 if (ca->mi.discard && !c->opts.nochanges)
1880 blkdev_issue_discard(ca->disk_sb.bdev,
1881 bucket.offset * ca->mi.bucket_size,
1882 ca->mi.bucket_size,
1891 percpu_ref_put(&ca->io_ref);
1904 struct bch_dev *ca = bch2_dev_rcu(c, bucket.inode);
1905 bool dead = !ca || percpu_ref_is_dying(&ca->io_ref);
2006 for_each_member_device(c, ca) {
2008 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
2011 lru_pos(ca->dev_idx, 0, 0),
2012 lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
2017 bch2_dev_put(ca);
2033 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
2040 struct bpos end = POS(ca->dev_idx, bucket_end);
2046 BUG_ON(bucket_end > ca->mi.nbuckets);
2049 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
2052 * Scan the alloc btree for every bucket on @ca, and add buckets to the
2057 bch_info(ca, "%s: currently at %llu/%llu",
2058 __func__, iter.pos.offset, ca->mi.nbuckets);
2082 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?:
2121 bch_err_msg(ca, ret, "initializing free space");
2126 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
2143 for_each_member_device(c, ca) {
2144 if (ca->mi.freespace_initialized)
2152 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2154 bch2_dev_put(ca);
2212 for_each_online_member(c, ca) {
2213 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2220 for_each_rw_member(c, ca) {
2240 dev_reserve += ca->nr_btree_reserve * 2;
2241 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2247 dev_reserve *= ca->mi.bucket_size;
2249 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2250 ca->mi.first_bucket);
2255 ca->mi.bucket_size);
2278 for_each_rw_member(c, ca)
2279 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2283 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2293 ob->dev == ca->dev_idx)
2302 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2309 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2316 bch2_open_buckets_stop(c, ca, false);
2333 !bch2_dev_has_open_write_point(c, ca));
2337 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2342 if (ca->mi.data_allowed & (1 << i))
2343 set_bit(ca->dev_idx, c->rw_devs[i].d);