1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Internal header file _only_ for device mapper core
4 *
5 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
6 *
7 * This file is released under the LGPL.
8 */
9
10#ifndef DM_CORE_INTERNAL_H
11#define DM_CORE_INTERNAL_H
12
13#include <linux/kthread.h>
14#include <linux/ktime.h>
15#include <linux/blk-mq.h>
16#include <linux/blk-crypto-profile.h>
17#include <linux/jump_label.h>
18
19#include <trace/events/block.h>
20
21#include "dm.h"
22#include "dm-ima.h"
23
24#define DM_RESERVED_MAX_IOS		1024
25#define DM_MAX_TARGETS			1048576
26#define DM_MAX_TARGET_PARAMS		1024
27
28struct dm_io;
29
30struct dm_kobject_holder {
31	struct kobject kobj;
32	struct completion completion;
33};
34
35/*
36 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
37 * DM targets must _not_ deference a mapped_device or dm_table to directly
38 * access their members!
39 */
40
41/*
42 * For mempools pre-allocation at the table loading time.
43 */
44struct dm_md_mempools {
45	struct bio_set bs;
46	struct bio_set io_bs;
47};
48
49struct mapped_device {
50	struct mutex suspend_lock;
51
52	struct mutex table_devices_lock;
53	struct list_head table_devices;
54
55	/*
56	 * The current mapping (struct dm_table *).
57	 * Use dm_get_live_table{_fast} or take suspend_lock for
58	 * dereference.
59	 */
60	void __rcu *map;
61
62	unsigned long flags;
63
64	/* Protect queue and type against concurrent access. */
65	struct mutex type_lock;
66	enum dm_queue_mode type;
67
68	int numa_node_id;
69	struct request_queue *queue;
70
71	atomic_t holders;
72	atomic_t open_count;
73
74	struct dm_target *immutable_target;
75	struct target_type *immutable_target_type;
76
77	char name[16];
78	struct gendisk *disk;
79	struct dax_device *dax_dev;
80
81	wait_queue_head_t wait;
82	unsigned long __percpu *pending_io;
83
84	/* forced geometry settings */
85	struct hd_geometry geometry;
86
87	/*
88	 * Processing queue (flush)
89	 */
90	struct workqueue_struct *wq;
91
92	/*
93	 * A list of ios that arrived while we were suspended.
94	 */
95	struct work_struct work;
96	spinlock_t deferred_lock;
97	struct bio_list deferred;
98
99	/*
100	 * requeue work context is needed for cloning one new bio
101	 * to represent the dm_io to be requeued, since each
102	 * dm_io may point to the original bio from FS.
103	 */
104	struct work_struct requeue_work;
105	struct dm_io *requeue_list;
106
107	void *interface_ptr;
108
109	/*
110	 * Event handling.
111	 */
112	wait_queue_head_t eventq;
113	atomic_t event_nr;
114	atomic_t uevent_seq;
115	struct list_head uevent_list;
116	spinlock_t uevent_lock; /* Protect access to uevent_list */
117
118	/* for blk-mq request-based DM support */
119	bool init_tio_pdu:1;
120	struct blk_mq_tag_set *tag_set;
121
122	struct dm_stats stats;
123
124	/* the number of internal suspends */
125	unsigned int internal_suspend_count;
126
127	int swap_bios;
128	struct semaphore swap_bios_semaphore;
129	struct mutex swap_bios_lock;
130
131	/*
132	 * io objects are allocated from here.
133	 */
134	struct dm_md_mempools *mempools;
135
136	/* kobject and completion */
137	struct dm_kobject_holder kobj_holder;
138
139	struct srcu_struct io_barrier;
140
141#ifdef CONFIG_BLK_DEV_ZONED
142	unsigned int nr_zones;
143	unsigned int *zwp_offset;
144#endif
145
146#ifdef CONFIG_IMA
147	struct dm_ima_measurements ima;
148#endif
149};
150
151/*
152 * Bits for the flags field of struct mapped_device.
153 */
154#define DMF_BLOCK_IO_FOR_SUSPEND 0
155#define DMF_SUSPENDED 1
156#define DMF_FROZEN 2
157#define DMF_FREEING 3
158#define DMF_DELETING 4
159#define DMF_NOFLUSH_SUSPENDING 5
160#define DMF_DEFERRED_REMOVE 6
161#define DMF_SUSPENDED_INTERNALLY 7
162#define DMF_POST_SUSPENDING 8
163#define DMF_EMULATE_ZONE_APPEND 9
164
165void disable_discard(struct mapped_device *md);
166void disable_write_zeroes(struct mapped_device *md);
167
168static inline sector_t dm_get_size(struct mapped_device *md)
169{
170	return get_capacity(md->disk);
171}
172
173static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
174{
175	return &md->stats;
176}
177
178DECLARE_STATIC_KEY_FALSE(stats_enabled);
179DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
180DECLARE_STATIC_KEY_FALSE(zoned_enabled);
181
182static inline bool dm_emulate_zone_append(struct mapped_device *md)
183{
184	if (blk_queue_is_zoned(md->queue))
185		return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
186	return false;
187}
188
189#define DM_TABLE_MAX_DEPTH 16
190
191struct dm_table {
192	struct mapped_device *md;
193	enum dm_queue_mode type;
194
195	/* btree table */
196	unsigned int depth;
197	unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
198	sector_t *index[DM_TABLE_MAX_DEPTH];
199
200	unsigned int num_targets;
201	unsigned int num_allocated;
202	sector_t *highs;
203	struct dm_target *targets;
204
205	struct target_type *immutable_target_type;
206
207	bool integrity_supported:1;
208	bool singleton:1;
209	unsigned integrity_added:1;
210
211	/*
212	 * Indicates the rw permissions for the new logical device.  This
213	 * should be a combination of BLK_OPEN_READ and BLK_OPEN_WRITE.
214	 */
215	blk_mode_t mode;
216
217	/* a list of devices used by this table */
218	struct list_head devices;
219	struct rw_semaphore devices_lock;
220
221	/* events get handed up using this callback */
222	void (*event_fn)(void *data);
223	void *event_context;
224
225	struct dm_md_mempools *mempools;
226
227#ifdef CONFIG_BLK_INLINE_ENCRYPTION
228	struct blk_crypto_profile *crypto_profile;
229#endif
230};
231
232static inline struct dm_target *dm_table_get_target(struct dm_table *t,
233						    unsigned int index)
234{
235	BUG_ON(index >= t->num_targets);
236	return t->targets + index;
237}
238
239/*
240 * One of these is allocated per clone bio.
241 */
242#define DM_TIO_MAGIC 28714
243struct dm_target_io {
244	unsigned short magic;
245	blk_short_t flags;
246	unsigned int target_bio_nr;
247	struct dm_io *io;
248	struct dm_target *ti;
249	unsigned int *len_ptr;
250	sector_t old_sector;
251	struct bio clone;
252};
253#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
254#define DM_IO_BIO_OFFSET \
255	(offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
256
257/*
258 * dm_target_io flags
259 */
260enum {
261	DM_TIO_INSIDE_DM_IO,
262	DM_TIO_IS_DUPLICATE_BIO
263};
264
265static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
266{
267	return (tio->flags & (1U << bit)) != 0;
268}
269
270static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
271{
272	tio->flags |= (1U << bit);
273}
274
275static inline bool dm_tio_is_normal(struct dm_target_io *tio)
276{
277	return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
278		!dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
279}
280
281/*
282 * One of these is allocated per original bio.
283 * It contains the first clone used for that original.
284 */
285#define DM_IO_MAGIC 19577
286struct dm_io {
287	unsigned short magic;
288	blk_short_t flags;
289	spinlock_t lock;
290	unsigned long start_time;
291	void *data;
292	struct dm_io *next;
293	struct dm_stats_aux stats_aux;
294	blk_status_t status;
295	atomic_t io_count;
296	struct mapped_device *md;
297
298	/* The three fields represent mapped part of original bio */
299	struct bio *orig_bio;
300	unsigned int sector_offset; /* offset to end of orig_bio */
301	unsigned int sectors;
302
303	/* last member of dm_target_io is 'struct bio' */
304	struct dm_target_io tio;
305};
306
307/*
308 * dm_io flags
309 */
310enum {
311	DM_IO_ACCOUNTED,
312	DM_IO_WAS_SPLIT,
313	DM_IO_BLK_STAT
314};
315
316static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
317{
318	return (io->flags & (1U << bit)) != 0;
319}
320
321static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
322{
323	io->flags |= (1U << bit);
324}
325
326void dm_io_rewind(struct dm_io *io, struct bio_set *bs);
327
328static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
329{
330	return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
331}
332
333unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
334
335static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
336{
337	return !maxlen || strlen(result) + 1 >= maxlen;
338}
339
340extern atomic_t dm_global_event_nr;
341extern wait_queue_head_t dm_global_eventq;
342void dm_issue_global_event(void);
343
344#endif
345