Lines Matching refs:ca

66 	for_each_member_device(c, ca) {
67 struct bch_dev_usage dev = bch2_dev_usage_read(ca);
71 ca->mi.bucket_size;
77 static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
84 ? ca->usage_gc
85 : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
88 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
90 struct bch_fs *c = ca->fs;
95 memcpy(usage, ca->usage_base, u64s * sizeof(u64));
96 for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
97 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
169 for_each_member_device_rcu(c, ca, NULL) {
172 acc_u64s_percpu((u64 *) ca->usage_base,
173 (u64 __percpu *) ca->usage[idx], u64s);
174 percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
270 void bch2_dev_usage_init(struct bch_dev *ca)
272 ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
288 void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
300 fs_usage->b.hidden -= ca->mi.bucket_size;
302 fs_usage->b.hidden += ca->mi.bucket_size;
304 u = dev_usage_ptr(ca, journal_seq, gc);
315 u->d[old->data_type].fragmented -= bch2_bucket_sectors_fragmented(ca, *old);
316 u->d[new->data_type].fragmented += bch2_bucket_sectors_fragmented(ca, *new);
483 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
484 if (!ca) {
495 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
502 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
519 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
541 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
552 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
566 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
604 bch2_dev_put(ca);
632 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
633 struct bucket *g = PTR_GC_BUCKET(ca, ptr);
646 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
647 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
725 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
733 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
780 *bucket_gen(ca, bucket_nr),
963 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
972 int ret = bch2_bucket_ref_update(trans, ca, k, ptr, sectors, ptr_data_type,
993 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
994 if (unlikely(!ca)) {
1002 bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp);
1008 __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &a->v);
1013 ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, k, insert);
1021 struct bucket *g = gc_bucket(ca, bucket.offset);
1024 ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new);
1027 bch2_dev_usage_update(c, ca, &old, &new, 0, true);
1033 bch2_dev_put(ca);
1280 struct bch_dev *ca, u64 b,
1289 bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
1317 static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
1324 struct bucket *g = gc_bucket(ca, b);
1338 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
1340 ca->dev_idx, b, g->gen,
1353 bch2_dev_usage_update(c, ca, &old, &new, 0, true);
1359 struct bch_dev *ca, u64 b,
1370 if (b >= ca->mi.nbuckets)
1374 return bch2_mark_metadata_bucket(trans->c, ca, b, type, sectors, flags);
1377 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1383 struct bch_dev *ca, u64 start, u64 end,
1388 u64 b = sector_to_bucket(ca, start);
1390 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1393 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1409 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
1412 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1421 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1428 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1436 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1442 for (i = 0; i < ca->journal.nr; i++) {
1443 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1444 ca->journal.buckets[i],
1445 BCH_DATA_journal, ca->mi.bucket_size, flags);
1453 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
1457 __bch2_trans_mark_dev_sb(trans, ca, flags));
1465 for_each_online_member(c, ca) {
1466 int ret = bch2_trans_mark_dev_sb(c, ca, flags);
1468 percpu_ref_put(&ca->io_ref);
1551 for_each_member_device(c, ca) {
1552 kvfree_rcu_mightsleep(ca->buckets_nouse);
1553 ca->buckets_nouse = NULL;
1559 for_each_member_device(c, ca) {
1560 BUG_ON(ca->buckets_nouse);
1562 ca->buckets_nouse = kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
1565 if (!ca->buckets_nouse) {
1566 bch2_dev_put(ca);
1582 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1585 bool resize = ca->bucket_gens != NULL;
1588 BUG_ON(resize && ca->buckets_nouse);
1596 bucket_gens->first_bucket = ca->mi.first_bucket;
1601 down_write(&ca->bucket_lock);
1605 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1615 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1618 nbuckets = ca->mi.nbuckets;
1622 up_write(&ca->bucket_lock);
1634 void bch2_dev_buckets_free(struct bch_dev *ca)
1636 kvfree(ca->buckets_nouse);
1637 kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
1639 for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++)
1640 free_percpu(ca->usage[i]);
1641 kfree(ca->usage_base);
1644 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1646 ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
1647 if (!ca->usage_base)
1650 for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) {
1651 ca->usage[i] = alloc_percpu(struct bch_dev_usage);
1652 if (!ca->usage[i])
1656 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);