1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BCACHE_WRITEBACK_H
3#define _BCACHE_WRITEBACK_H
4
5#define CUTOFF_WRITEBACK	40
6#define CUTOFF_WRITEBACK_SYNC	70
7
8#define CUTOFF_WRITEBACK_MAX		70
9#define CUTOFF_WRITEBACK_SYNC_MAX	90
10
11#define MAX_WRITEBACKS_IN_PASS  5
12#define MAX_WRITESIZE_IN_PASS   5000	/* *512b */
13
14#define WRITEBACK_RATE_UPDATE_SECS_MAX		60
15#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT	5
16
17#define BCH_AUTO_GC_DIRTY_THRESHOLD	50
18
19#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50
20#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
21#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
22
23#define BCH_DIRTY_INIT_THRD_MAX	12
24/*
25 * 14 (16384ths) is chosen here as something that each backing device
26 * should be a reasonable fraction of the share, and not to blow up
27 * until individual backing devices are a petabyte.
28 */
29#define WRITEBACK_SHARE_SHIFT   14
30
31struct bch_dirty_init_state;
32struct dirty_init_thrd_info {
33	struct bch_dirty_init_state	*state;
34	struct task_struct		*thread;
35};
36
37struct bch_dirty_init_state {
38	struct cache_set		*c;
39	struct bcache_device		*d;
40	int				total_threads;
41	int				key_idx;
42	spinlock_t			idx_lock;
43	atomic_t			started;
44	atomic_t			enough;
45	wait_queue_head_t		wait;
46	struct dirty_init_thrd_info	infos[BCH_DIRTY_INIT_THRD_MAX];
47};
48
49static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
50{
51	uint64_t i, ret = 0;
52
53	for (i = 0; i < d->nr_stripes; i++)
54		ret += atomic_read(d->stripe_sectors_dirty + i);
55
56	return ret;
57}
58
59static inline int offset_to_stripe(struct bcache_device *d,
60					uint64_t offset)
61{
62	do_div(offset, d->stripe_size);
63
64	/* d->nr_stripes is in range [1, INT_MAX] */
65	if (unlikely(offset >= d->nr_stripes)) {
66		pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
67			offset, d->nr_stripes);
68		return -EINVAL;
69	}
70
71	/*
72	 * Here offset is definitly smaller than INT_MAX,
73	 * return it as int will never overflow.
74	 */
75	return offset;
76}
77
78static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
79					   uint64_t offset,
80					   unsigned int nr_sectors)
81{
82	int stripe = offset_to_stripe(&dc->disk, offset);
83
84	if (stripe < 0)
85		return false;
86
87	while (1) {
88		if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
89			return true;
90
91		if (nr_sectors <= dc->disk.stripe_size)
92			return false;
93
94		nr_sectors -= dc->disk.stripe_size;
95		stripe++;
96	}
97}
98
99extern unsigned int bch_cutoff_writeback;
100extern unsigned int bch_cutoff_writeback_sync;
101
102static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
103				    unsigned int cache_mode, bool would_skip)
104{
105	unsigned int in_use = dc->disk.c->gc_stats.in_use;
106
107	if (cache_mode != CACHE_MODE_WRITEBACK ||
108	    test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
109	    in_use > bch_cutoff_writeback_sync)
110		return false;
111
112	if (bio_op(bio) == REQ_OP_DISCARD)
113		return false;
114
115	if (dc->partial_stripes_expensive &&
116	    bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
117				    bio_sectors(bio)))
118		return true;
119
120	if (would_skip)
121		return false;
122
123	return (op_is_sync(bio->bi_opf) ||
124		bio->bi_opf & (REQ_META|REQ_PRIO) ||
125		in_use <= bch_cutoff_writeback);
126}
127
128static inline void bch_writeback_queue(struct cached_dev *dc)
129{
130	if (!IS_ERR_OR_NULL(dc->writeback_thread))
131		wake_up_process(dc->writeback_thread);
132}
133
134static inline void bch_writeback_add(struct cached_dev *dc)
135{
136	if (!atomic_read(&dc->has_dirty) &&
137	    !atomic_xchg(&dc->has_dirty, 1)) {
138		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
139			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
140			/* XXX: should do this synchronously */
141			bch_write_bdev_super(dc, NULL);
142		}
143
144		bch_writeback_queue(dc);
145	}
146}
147
148void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
149				  uint64_t offset, int nr_sectors);
150
151void bch_sectors_dirty_init(struct bcache_device *d);
152void bch_cached_dev_writeback_init(struct cached_dev *dc);
153int bch_cached_dev_writeback_start(struct cached_dev *dc);
154
155#endif
156