1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Block Translation Table
4 * Copyright (c) 2014-2015, Intel Corporation.
5 */
6#include <linux/highmem.h>
7#include <linux/debugfs.h>
8#include <linux/blkdev.h>
9#include <linux/pagemap.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/mutex.h>
13#include <linux/hdreg.h>
14#include <linux/sizes.h>
15#include <linux/ndctl.h>
16#include <linux/fs.h>
17#include <linux/nd.h>
18#include <linux/backing-dev.h>
19#include <linux/cleanup.h>
20#include "btt.h"
21#include "nd.h"
22
23enum log_ent_request {
24	LOG_NEW_ENT = 0,
25	LOG_OLD_ENT
26};
27
28static struct device *to_dev(struct arena_info *arena)
29{
30	return &arena->nd_btt->dev;
31}
32
33static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
34{
35	return offset + nd_btt->initial_offset;
36}
37
38static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
39		void *buf, size_t n, unsigned long flags)
40{
41	struct nd_btt *nd_btt = arena->nd_btt;
42	struct nd_namespace_common *ndns = nd_btt->ndns;
43
44	/* arena offsets may be shifted from the base of the device */
45	offset = adjust_initial_offset(nd_btt, offset);
46	return nvdimm_read_bytes(ndns, offset, buf, n, flags);
47}
48
49static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
50		void *buf, size_t n, unsigned long flags)
51{
52	struct nd_btt *nd_btt = arena->nd_btt;
53	struct nd_namespace_common *ndns = nd_btt->ndns;
54
55	/* arena offsets may be shifted from the base of the device */
56	offset = adjust_initial_offset(nd_btt, offset);
57	return nvdimm_write_bytes(ndns, offset, buf, n, flags);
58}
59
60static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
61{
62	int ret;
63
64	/*
65	 * infooff and info2off should always be at least 512B aligned.
66	 * We rely on that to make sure rw_bytes does error clearing
67	 * correctly, so make sure that is the case.
68	 */
69	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
70		"arena->infooff: %#llx is unaligned\n", arena->infooff);
71	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
72		"arena->info2off: %#llx is unaligned\n", arena->info2off);
73
74	ret = arena_write_bytes(arena, arena->info2off, super,
75			sizeof(struct btt_sb), 0);
76	if (ret)
77		return ret;
78
79	return arena_write_bytes(arena, arena->infooff, super,
80			sizeof(struct btt_sb), 0);
81}
82
83static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
84{
85	return arena_read_bytes(arena, arena->infooff, super,
86			sizeof(struct btt_sb), 0);
87}
88
89/*
90 * 'raw' version of btt_map write
91 * Assumptions:
92 *   mapping is in little-endian
93 *   mapping contains 'E' and 'Z' flags as desired
94 */
95static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
96		unsigned long flags)
97{
98	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
99
100	if (unlikely(lba >= arena->external_nlba))
101		dev_err_ratelimited(to_dev(arena),
102			"%s: lba %#x out of range (max: %#x)\n",
103			__func__, lba, arena->external_nlba);
104	return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
105}
106
107static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
108			u32 z_flag, u32 e_flag, unsigned long rwb_flags)
109{
110	u32 ze;
111	__le32 mapping_le;
112
113	/*
114	 * This 'mapping' is supposed to be just the LBA mapping, without
115	 * any flags set, so strip the flag bits.
116	 */
117	mapping = ent_lba(mapping);
118
119	ze = (z_flag << 1) + e_flag;
120	switch (ze) {
121	case 0:
122		/*
123		 * We want to set neither of the Z or E flags, and
124		 * in the actual layout, this means setting the bit
125		 * positions of both to '1' to indicate a 'normal'
126		 * map entry
127		 */
128		mapping |= MAP_ENT_NORMAL;
129		break;
130	case 1:
131		mapping |= (1 << MAP_ERR_SHIFT);
132		break;
133	case 2:
134		mapping |= (1 << MAP_TRIM_SHIFT);
135		break;
136	default:
137		/*
138		 * The case where Z and E are both sent in as '1' could be
139		 * construed as a valid 'normal' case, but we decide not to,
140		 * to avoid confusion
141		 */
142		dev_err_ratelimited(to_dev(arena),
143			"Invalid use of Z and E flags\n");
144		return -EIO;
145	}
146
147	mapping_le = cpu_to_le32(mapping);
148	return __btt_map_write(arena, lba, mapping_le, rwb_flags);
149}
150
151static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
152			int *trim, int *error, unsigned long rwb_flags)
153{
154	int ret;
155	__le32 in;
156	u32 raw_mapping, postmap, ze, z_flag, e_flag;
157	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
158
159	if (unlikely(lba >= arena->external_nlba))
160		dev_err_ratelimited(to_dev(arena),
161			"%s: lba %#x out of range (max: %#x)\n",
162			__func__, lba, arena->external_nlba);
163
164	ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
165	if (ret)
166		return ret;
167
168	raw_mapping = le32_to_cpu(in);
169
170	z_flag = ent_z_flag(raw_mapping);
171	e_flag = ent_e_flag(raw_mapping);
172	ze = (z_flag << 1) + e_flag;
173	postmap = ent_lba(raw_mapping);
174
175	/* Reuse the {z,e}_flag variables for *trim and *error */
176	z_flag = 0;
177	e_flag = 0;
178
179	switch (ze) {
180	case 0:
181		/* Initial state. Return postmap = premap */
182		*mapping = lba;
183		break;
184	case 1:
185		*mapping = postmap;
186		e_flag = 1;
187		break;
188	case 2:
189		*mapping = postmap;
190		z_flag = 1;
191		break;
192	case 3:
193		*mapping = postmap;
194		break;
195	default:
196		return -EIO;
197	}
198
199	if (trim)
200		*trim = z_flag;
201	if (error)
202		*error = e_flag;
203
204	return ret;
205}
206
207static int btt_log_group_read(struct arena_info *arena, u32 lane,
208			struct log_group *log)
209{
210	return arena_read_bytes(arena,
211			arena->logoff + (lane * LOG_GRP_SIZE), log,
212			LOG_GRP_SIZE, 0);
213}
214
215static struct dentry *debugfs_root;
216
217static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
218				int idx)
219{
220	char dirname[32];
221	struct dentry *d;
222
223	/* If for some reason, parent bttN was not created, exit */
224	if (!parent)
225		return;
226
227	snprintf(dirname, 32, "arena%d", idx);
228	d = debugfs_create_dir(dirname, parent);
229	if (IS_ERR_OR_NULL(d))
230		return;
231	a->debugfs_dir = d;
232
233	debugfs_create_x64("size", S_IRUGO, d, &a->size);
234	debugfs_create_x64("external_lba_start", S_IRUGO, d,
235				&a->external_lba_start);
236	debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
237	debugfs_create_u32("internal_lbasize", S_IRUGO, d,
238				&a->internal_lbasize);
239	debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
240	debugfs_create_u32("external_lbasize", S_IRUGO, d,
241				&a->external_lbasize);
242	debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
243	debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
244	debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
245	debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
246	debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
247	debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
248	debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
249	debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
250	debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
251	debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
252	debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
253	debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
254}
255
256static void btt_debugfs_init(struct btt *btt)
257{
258	int i = 0;
259	struct arena_info *arena;
260
261	btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
262						debugfs_root);
263	if (IS_ERR_OR_NULL(btt->debugfs_dir))
264		return;
265
266	list_for_each_entry(arena, &btt->arena_list, list) {
267		arena_debugfs_init(arena, btt->debugfs_dir, i);
268		i++;
269	}
270}
271
272static u32 log_seq(struct log_group *log, int log_idx)
273{
274	return le32_to_cpu(log->ent[log_idx].seq);
275}
276
277/*
278 * This function accepts two log entries, and uses the
279 * sequence number to find the 'older' entry.
280 * It also updates the sequence number in this old entry to
281 * make it the 'new' one if the mark_flag is set.
282 * Finally, it returns which of the entries was the older one.
283 *
284 * TODO The logic feels a bit kludge-y. make it better..
285 */
286static int btt_log_get_old(struct arena_info *a, struct log_group *log)
287{
288	int idx0 = a->log_index[0];
289	int idx1 = a->log_index[1];
290	int old;
291
292	/*
293	 * the first ever time this is seen, the entry goes into [0]
294	 * the next time, the following logic works out to put this
295	 * (next) entry into [1]
296	 */
297	if (log_seq(log, idx0) == 0) {
298		log->ent[idx0].seq = cpu_to_le32(1);
299		return 0;
300	}
301
302	if (log_seq(log, idx0) == log_seq(log, idx1))
303		return -EINVAL;
304	if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
305		return -EINVAL;
306
307	if (log_seq(log, idx0) < log_seq(log, idx1)) {
308		if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
309			old = 0;
310		else
311			old = 1;
312	} else {
313		if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
314			old = 1;
315		else
316			old = 0;
317	}
318
319	return old;
320}
321
322/*
323 * This function copies the desired (old/new) log entry into ent if
324 * it is not NULL. It returns the sub-slot number (0 or 1)
325 * where the desired log entry was found. Negative return values
326 * indicate errors.
327 */
328static int btt_log_read(struct arena_info *arena, u32 lane,
329			struct log_entry *ent, int old_flag)
330{
331	int ret;
332	int old_ent, ret_ent;
333	struct log_group log;
334
335	ret = btt_log_group_read(arena, lane, &log);
336	if (ret)
337		return -EIO;
338
339	old_ent = btt_log_get_old(arena, &log);
340	if (old_ent < 0 || old_ent > 1) {
341		dev_err(to_dev(arena),
342				"log corruption (%d): lane %d seq [%d, %d]\n",
343				old_ent, lane, log.ent[arena->log_index[0]].seq,
344				log.ent[arena->log_index[1]].seq);
345		/* TODO set error state? */
346		return -EIO;
347	}
348
349	ret_ent = (old_flag ? old_ent : (1 - old_ent));
350
351	if (ent != NULL)
352		memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
353
354	return ret_ent;
355}
356
357/*
358 * This function commits a log entry to media
359 * It does _not_ prepare the freelist entry for the next write
360 * btt_flog_write is the wrapper for updating the freelist elements
361 */
362static int __btt_log_write(struct arena_info *arena, u32 lane,
363			u32 sub, struct log_entry *ent, unsigned long flags)
364{
365	int ret;
366	u32 group_slot = arena->log_index[sub];
367	unsigned int log_half = LOG_ENT_SIZE / 2;
368	void *src = ent;
369	u64 ns_off;
370
371	ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
372		(group_slot * LOG_ENT_SIZE);
373	/* split the 16B write into atomic, durable halves */
374	ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
375	if (ret)
376		return ret;
377
378	ns_off += log_half;
379	src += log_half;
380	return arena_write_bytes(arena, ns_off, src, log_half, flags);
381}
382
383static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
384			struct log_entry *ent)
385{
386	int ret;
387
388	ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
389	if (ret)
390		return ret;
391
392	/* prepare the next free entry */
393	arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
394	if (++(arena->freelist[lane].seq) == 4)
395		arena->freelist[lane].seq = 1;
396	if (ent_e_flag(le32_to_cpu(ent->old_map)))
397		arena->freelist[lane].has_err = 1;
398	arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
399
400	return ret;
401}
402
403/*
404 * This function initializes the BTT map to the initial state, which is
405 * all-zeroes, and indicates an identity mapping
406 */
407static int btt_map_init(struct arena_info *arena)
408{
409	int ret = -EINVAL;
410	void *zerobuf;
411	size_t offset = 0;
412	size_t chunk_size = SZ_2M;
413	size_t mapsize = arena->logoff - arena->mapoff;
414
415	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
416	if (!zerobuf)
417		return -ENOMEM;
418
419	/*
420	 * mapoff should always be at least 512B  aligned. We rely on that to
421	 * make sure rw_bytes does error clearing correctly, so make sure that
422	 * is the case.
423	 */
424	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
425		"arena->mapoff: %#llx is unaligned\n", arena->mapoff);
426
427	while (mapsize) {
428		size_t size = min(mapsize, chunk_size);
429
430		dev_WARN_ONCE(to_dev(arena), size < 512,
431			"chunk size: %#zx is unaligned\n", size);
432		ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
433				size, 0);
434		if (ret)
435			goto free;
436
437		offset += size;
438		mapsize -= size;
439		cond_resched();
440	}
441
442 free:
443	kfree(zerobuf);
444	return ret;
445}
446
447/*
448 * This function initializes the BTT log with 'fake' entries pointing
449 * to the initial reserved set of blocks as being free
450 */
451static int btt_log_init(struct arena_info *arena)
452{
453	size_t logsize = arena->info2off - arena->logoff;
454	size_t chunk_size = SZ_4K, offset = 0;
455	struct log_entry ent;
456	void *zerobuf;
457	int ret;
458	u32 i;
459
460	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
461	if (!zerobuf)
462		return -ENOMEM;
463	/*
464	 * logoff should always be at least 512B  aligned. We rely on that to
465	 * make sure rw_bytes does error clearing correctly, so make sure that
466	 * is the case.
467	 */
468	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
469		"arena->logoff: %#llx is unaligned\n", arena->logoff);
470
471	while (logsize) {
472		size_t size = min(logsize, chunk_size);
473
474		dev_WARN_ONCE(to_dev(arena), size < 512,
475			"chunk size: %#zx is unaligned\n", size);
476		ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
477				size, 0);
478		if (ret)
479			goto free;
480
481		offset += size;
482		logsize -= size;
483		cond_resched();
484	}
485
486	for (i = 0; i < arena->nfree; i++) {
487		ent.lba = cpu_to_le32(i);
488		ent.old_map = cpu_to_le32(arena->external_nlba + i);
489		ent.new_map = cpu_to_le32(arena->external_nlba + i);
490		ent.seq = cpu_to_le32(LOG_SEQ_INIT);
491		ret = __btt_log_write(arena, i, 0, &ent, 0);
492		if (ret)
493			goto free;
494	}
495
496 free:
497	kfree(zerobuf);
498	return ret;
499}
500
501static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
502{
503	return arena->dataoff + ((u64)lba * arena->internal_lbasize);
504}
505
506static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
507{
508	int ret = 0;
509
510	if (arena->freelist[lane].has_err) {
511		void *zero_page = page_address(ZERO_PAGE(0));
512		u32 lba = arena->freelist[lane].block;
513		u64 nsoff = to_namespace_offset(arena, lba);
514		unsigned long len = arena->sector_size;
515
516		mutex_lock(&arena->err_lock);
517
518		while (len) {
519			unsigned long chunk = min(len, PAGE_SIZE);
520
521			ret = arena_write_bytes(arena, nsoff, zero_page,
522				chunk, 0);
523			if (ret)
524				break;
525			len -= chunk;
526			nsoff += chunk;
527			if (len == 0)
528				arena->freelist[lane].has_err = 0;
529		}
530		mutex_unlock(&arena->err_lock);
531	}
532	return ret;
533}
534
535static int btt_freelist_init(struct arena_info *arena)
536{
537	int new, ret;
538	struct log_entry log_new;
539	u32 i, map_entry, log_oldmap, log_newmap;
540
541	arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
542					GFP_KERNEL);
543	if (!arena->freelist)
544		return -ENOMEM;
545
546	for (i = 0; i < arena->nfree; i++) {
547		new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
548		if (new < 0)
549			return new;
550
551		/* old and new map entries with any flags stripped out */
552		log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
553		log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
554
555		/* sub points to the next one to be overwritten */
556		arena->freelist[i].sub = 1 - new;
557		arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
558		arena->freelist[i].block = log_oldmap;
559
560		/*
561		 * FIXME: if error clearing fails during init, we want to make
562		 * the BTT read-only
563		 */
564		if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
565		    !ent_normal(le32_to_cpu(log_new.old_map))) {
566			arena->freelist[i].has_err = 1;
567			ret = arena_clear_freelist_error(arena, i);
568			if (ret)
569				dev_err_ratelimited(to_dev(arena),
570					"Unable to clear known errors\n");
571		}
572
573		/* This implies a newly created or untouched flog entry */
574		if (log_oldmap == log_newmap)
575			continue;
576
577		/* Check if map recovery is needed */
578		ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
579				NULL, NULL, 0);
580		if (ret)
581			return ret;
582
583		/*
584		 * The map_entry from btt_read_map is stripped of any flag bits,
585		 * so use the stripped out versions from the log as well for
586		 * testing whether recovery is needed. For restoration, use the
587		 * 'raw' version of the log entries as that captured what we
588		 * were going to write originally.
589		 */
590		if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
591			/*
592			 * Last transaction wrote the flog, but wasn't able
593			 * to complete the map write. So fix up the map.
594			 */
595			ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
596					le32_to_cpu(log_new.new_map), 0, 0, 0);
597			if (ret)
598				return ret;
599		}
600	}
601
602	return 0;
603}
604
605static bool ent_is_padding(struct log_entry *ent)
606{
607	return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
608		&& (ent->seq == 0);
609}
610
611/*
612 * Detecting valid log indices: We read a log group (see the comments in btt.h
613 * for a description of a 'log_group' and its 'slots'), and iterate over its
614 * four slots. We expect that a padding slot will be all-zeroes, and use this
615 * to detect a padding slot vs. an actual entry.
616 *
617 * If a log_group is in the initial state, i.e. hasn't been used since the
618 * creation of this BTT layout, it will have three of the four slots with
619 * zeroes. We skip over these log_groups for the detection of log_index. If
620 * all log_groups are in the initial state (i.e. the BTT has never been
621 * written to), it is safe to assume the 'new format' of log entries in slots
622 * (0, 1).
623 */
624static int log_set_indices(struct arena_info *arena)
625{
626	bool idx_set = false, initial_state = true;
627	int ret, log_index[2] = {-1, -1};
628	u32 i, j, next_idx = 0;
629	struct log_group log;
630	u32 pad_count = 0;
631
632	for (i = 0; i < arena->nfree; i++) {
633		ret = btt_log_group_read(arena, i, &log);
634		if (ret < 0)
635			return ret;
636
637		for (j = 0; j < 4; j++) {
638			if (!idx_set) {
639				if (ent_is_padding(&log.ent[j])) {
640					pad_count++;
641					continue;
642				} else {
643					/* Skip if index has been recorded */
644					if ((next_idx == 1) &&
645						(j == log_index[0]))
646						continue;
647					/* valid entry, record index */
648					log_index[next_idx] = j;
649					next_idx++;
650				}
651				if (next_idx == 2) {
652					/* two valid entries found */
653					idx_set = true;
654				} else if (next_idx > 2) {
655					/* too many valid indices */
656					return -ENXIO;
657				}
658			} else {
659				/*
660				 * once the indices have been set, just verify
661				 * that all subsequent log groups are either in
662				 * their initial state or follow the same
663				 * indices.
664				 */
665				if (j == log_index[0]) {
666					/* entry must be 'valid' */
667					if (ent_is_padding(&log.ent[j]))
668						return -ENXIO;
669				} else if (j == log_index[1]) {
670					;
671					/*
672					 * log_index[1] can be padding if the
673					 * lane never got used and it is still
674					 * in the initial state (three 'padding'
675					 * entries)
676					 */
677				} else {
678					/* entry must be invalid (padding) */
679					if (!ent_is_padding(&log.ent[j]))
680						return -ENXIO;
681				}
682			}
683		}
684		/*
685		 * If any of the log_groups have more than one valid,
686		 * non-padding entry, then the we are no longer in the
687		 * initial_state
688		 */
689		if (pad_count < 3)
690			initial_state = false;
691		pad_count = 0;
692	}
693
694	if (!initial_state && !idx_set)
695		return -ENXIO;
696
697	/*
698	 * If all the entries in the log were in the initial state,
699	 * assume new padding scheme
700	 */
701	if (initial_state)
702		log_index[1] = 1;
703
704	/*
705	 * Only allow the known permutations of log/padding indices,
706	 * i.e. (0, 1), and (0, 2)
707	 */
708	if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
709		; /* known index possibilities */
710	else {
711		dev_err(to_dev(arena), "Found an unknown padding scheme\n");
712		return -ENXIO;
713	}
714
715	arena->log_index[0] = log_index[0];
716	arena->log_index[1] = log_index[1];
717	dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
718	dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
719	return 0;
720}
721
722static int btt_rtt_init(struct arena_info *arena)
723{
724	arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
725	if (arena->rtt == NULL)
726		return -ENOMEM;
727
728	return 0;
729}
730
731static int btt_maplocks_init(struct arena_info *arena)
732{
733	u32 i;
734
735	arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
736				GFP_KERNEL);
737	if (!arena->map_locks)
738		return -ENOMEM;
739
740	for (i = 0; i < arena->nfree; i++)
741		spin_lock_init(&arena->map_locks[i].lock);
742
743	return 0;
744}
745
746static struct arena_info *alloc_arena(struct btt *btt, size_t size,
747				size_t start, size_t arena_off)
748{
749	struct arena_info *arena;
750	u64 logsize, mapsize, datasize;
751	u64 available = size;
752
753	arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
754	if (!arena)
755		return NULL;
756	arena->nd_btt = btt->nd_btt;
757	arena->sector_size = btt->sector_size;
758	mutex_init(&arena->err_lock);
759
760	if (!size)
761		return arena;
762
763	arena->size = size;
764	arena->external_lba_start = start;
765	arena->external_lbasize = btt->lbasize;
766	arena->internal_lbasize = roundup(arena->external_lbasize,
767					INT_LBASIZE_ALIGNMENT);
768	arena->nfree = BTT_DEFAULT_NFREE;
769	arena->version_major = btt->nd_btt->version_major;
770	arena->version_minor = btt->nd_btt->version_minor;
771
772	if (available % BTT_PG_SIZE)
773		available -= (available % BTT_PG_SIZE);
774
775	/* Two pages are reserved for the super block and its copy */
776	available -= 2 * BTT_PG_SIZE;
777
778	/* The log takes a fixed amount of space based on nfree */
779	logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
780	available -= logsize;
781
782	/* Calculate optimal split between map and data area */
783	arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
784			arena->internal_lbasize + MAP_ENT_SIZE);
785	arena->external_nlba = arena->internal_nlba - arena->nfree;
786
787	mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
788	datasize = available - mapsize;
789
790	/* 'Absolute' values, relative to start of storage space */
791	arena->infooff = arena_off;
792	arena->dataoff = arena->infooff + BTT_PG_SIZE;
793	arena->mapoff = arena->dataoff + datasize;
794	arena->logoff = arena->mapoff + mapsize;
795	arena->info2off = arena->logoff + logsize;
796
797	/* Default log indices are (0,1) */
798	arena->log_index[0] = 0;
799	arena->log_index[1] = 1;
800	return arena;
801}
802
803static void free_arenas(struct btt *btt)
804{
805	struct arena_info *arena, *next;
806
807	list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
808		list_del(&arena->list);
809		kfree(arena->rtt);
810		kfree(arena->map_locks);
811		kfree(arena->freelist);
812		debugfs_remove_recursive(arena->debugfs_dir);
813		kfree(arena);
814	}
815}
816
817/*
818 * This function reads an existing valid btt superblock and
819 * populates the corresponding arena_info struct
820 */
821static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
822				u64 arena_off)
823{
824	arena->internal_nlba = le32_to_cpu(super->internal_nlba);
825	arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
826	arena->external_nlba = le32_to_cpu(super->external_nlba);
827	arena->external_lbasize = le32_to_cpu(super->external_lbasize);
828	arena->nfree = le32_to_cpu(super->nfree);
829	arena->version_major = le16_to_cpu(super->version_major);
830	arena->version_minor = le16_to_cpu(super->version_minor);
831
832	arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
833			le64_to_cpu(super->nextoff));
834	arena->infooff = arena_off;
835	arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
836	arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
837	arena->logoff = arena_off + le64_to_cpu(super->logoff);
838	arena->info2off = arena_off + le64_to_cpu(super->info2off);
839
840	arena->size = (le64_to_cpu(super->nextoff) > 0)
841		? (le64_to_cpu(super->nextoff))
842		: (arena->info2off - arena->infooff + BTT_PG_SIZE);
843
844	arena->flags = le32_to_cpu(super->flags);
845}
846
847static int discover_arenas(struct btt *btt)
848{
849	int ret = 0;
850	struct arena_info *arena;
851	size_t remaining = btt->rawsize;
852	u64 cur_nlba = 0;
853	size_t cur_off = 0;
854	int num_arenas = 0;
855
856	struct btt_sb *super __free(kfree) = kzalloc(sizeof(*super), GFP_KERNEL);
857	if (!super)
858		return -ENOMEM;
859
860	while (remaining) {
861		/* Alloc memory for arena */
862		arena = alloc_arena(btt, 0, 0, 0);
863		if (!arena)
864			return -ENOMEM;
865
866		arena->infooff = cur_off;
867		ret = btt_info_read(arena, super);
868		if (ret)
869			goto out;
870
871		if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
872			if (remaining == btt->rawsize) {
873				btt->init_state = INIT_NOTFOUND;
874				dev_info(to_dev(arena), "No existing arenas\n");
875				goto out;
876			} else {
877				dev_err(to_dev(arena),
878						"Found corrupted metadata!\n");
879				ret = -ENODEV;
880				goto out;
881			}
882		}
883
884		arena->external_lba_start = cur_nlba;
885		parse_arena_meta(arena, super, cur_off);
886
887		ret = log_set_indices(arena);
888		if (ret) {
889			dev_err(to_dev(arena),
890				"Unable to deduce log/padding indices\n");
891			goto out;
892		}
893
894		ret = btt_freelist_init(arena);
895		if (ret)
896			goto out;
897
898		ret = btt_rtt_init(arena);
899		if (ret)
900			goto out;
901
902		ret = btt_maplocks_init(arena);
903		if (ret)
904			goto out;
905
906		list_add_tail(&arena->list, &btt->arena_list);
907
908		remaining -= arena->size;
909		cur_off += arena->size;
910		cur_nlba += arena->external_nlba;
911		num_arenas++;
912
913		if (arena->nextoff == 0)
914			break;
915	}
916	btt->num_arenas = num_arenas;
917	btt->nlba = cur_nlba;
918	btt->init_state = INIT_READY;
919
920	return ret;
921
922 out:
923	kfree(arena);
924	free_arenas(btt);
925	return ret;
926}
927
928static int create_arenas(struct btt *btt)
929{
930	size_t remaining = btt->rawsize;
931	size_t cur_off = 0;
932
933	while (remaining) {
934		struct arena_info *arena;
935		size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
936
937		remaining -= arena_size;
938		if (arena_size < ARENA_MIN_SIZE)
939			break;
940
941		arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
942		if (!arena) {
943			free_arenas(btt);
944			return -ENOMEM;
945		}
946		btt->nlba += arena->external_nlba;
947		if (remaining >= ARENA_MIN_SIZE)
948			arena->nextoff = arena->size;
949		else
950			arena->nextoff = 0;
951		cur_off += arena_size;
952		list_add_tail(&arena->list, &btt->arena_list);
953	}
954
955	return 0;
956}
957
958/*
959 * This function completes arena initialization by writing
960 * all the metadata.
961 * It is only called for an uninitialized arena when a write
962 * to that arena occurs for the first time.
963 */
964static int btt_arena_write_layout(struct arena_info *arena)
965{
966	int ret;
967	u64 sum;
968	struct btt_sb *super;
969	struct nd_btt *nd_btt = arena->nd_btt;
970	const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
971
972	ret = btt_map_init(arena);
973	if (ret)
974		return ret;
975
976	ret = btt_log_init(arena);
977	if (ret)
978		return ret;
979
980	super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
981	if (!super)
982		return -ENOMEM;
983
984	strscpy(super->signature, BTT_SIG, sizeof(super->signature));
985	export_uuid(super->uuid, nd_btt->uuid);
986	export_uuid(super->parent_uuid, parent_uuid);
987	super->flags = cpu_to_le32(arena->flags);
988	super->version_major = cpu_to_le16(arena->version_major);
989	super->version_minor = cpu_to_le16(arena->version_minor);
990	super->external_lbasize = cpu_to_le32(arena->external_lbasize);
991	super->external_nlba = cpu_to_le32(arena->external_nlba);
992	super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
993	super->internal_nlba = cpu_to_le32(arena->internal_nlba);
994	super->nfree = cpu_to_le32(arena->nfree);
995	super->infosize = cpu_to_le32(sizeof(struct btt_sb));
996	super->nextoff = cpu_to_le64(arena->nextoff);
997	/*
998	 * Subtract arena->infooff (arena start) so numbers are relative
999	 * to 'this' arena
1000	 */
1001	super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1002	super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1003	super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1004	super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1005
1006	super->flags = 0;
1007	sum = nd_sb_checksum((struct nd_gen_sb *) super);
1008	super->checksum = cpu_to_le64(sum);
1009
1010	ret = btt_info_write(arena, super);
1011
1012	kfree(super);
1013	return ret;
1014}
1015
1016/*
1017 * This function completes the initialization for the BTT namespace
1018 * such that it is ready to accept IOs
1019 */
1020static int btt_meta_init(struct btt *btt)
1021{
1022	int ret = 0;
1023	struct arena_info *arena;
1024
1025	mutex_lock(&btt->init_lock);
1026	list_for_each_entry(arena, &btt->arena_list, list) {
1027		ret = btt_arena_write_layout(arena);
1028		if (ret)
1029			goto unlock;
1030
1031		ret = btt_freelist_init(arena);
1032		if (ret)
1033			goto unlock;
1034
1035		ret = btt_rtt_init(arena);
1036		if (ret)
1037			goto unlock;
1038
1039		ret = btt_maplocks_init(arena);
1040		if (ret)
1041			goto unlock;
1042	}
1043
1044	btt->init_state = INIT_READY;
1045
1046 unlock:
1047	mutex_unlock(&btt->init_lock);
1048	return ret;
1049}
1050
1051static u32 btt_meta_size(struct btt *btt)
1052{
1053	return btt->lbasize - btt->sector_size;
1054}
1055
1056/*
1057 * This function calculates the arena in which the given LBA lies
1058 * by doing a linear walk. This is acceptable since we expect only
1059 * a few arenas. If we have backing devices that get much larger,
1060 * we can construct a balanced binary tree of arenas at init time
1061 * so that this range search becomes faster.
1062 */
1063static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1064				struct arena_info **arena)
1065{
1066	struct arena_info *arena_list;
1067	__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1068
1069	list_for_each_entry(arena_list, &btt->arena_list, list) {
1070		if (lba < arena_list->external_nlba) {
1071			*arena = arena_list;
1072			*premap = lba;
1073			return 0;
1074		}
1075		lba -= arena_list->external_nlba;
1076	}
1077
1078	return -EIO;
1079}
1080
1081/*
1082 * The following (lock_map, unlock_map) are mostly just to improve
1083 * readability, since they index into an array of locks
1084 */
1085static void lock_map(struct arena_info *arena, u32 premap)
1086		__acquires(&arena->map_locks[idx].lock)
1087{
1088	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1089
1090	spin_lock(&arena->map_locks[idx].lock);
1091}
1092
1093static void unlock_map(struct arena_info *arena, u32 premap)
1094		__releases(&arena->map_locks[idx].lock)
1095{
1096	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1097
1098	spin_unlock(&arena->map_locks[idx].lock);
1099}
1100
1101static int btt_data_read(struct arena_info *arena, struct page *page,
1102			unsigned int off, u32 lba, u32 len)
1103{
1104	int ret;
1105	u64 nsoff = to_namespace_offset(arena, lba);
1106	void *mem = kmap_atomic(page);
1107
1108	ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1109	kunmap_atomic(mem);
1110
1111	return ret;
1112}
1113
1114static int btt_data_write(struct arena_info *arena, u32 lba,
1115			struct page *page, unsigned int off, u32 len)
1116{
1117	int ret;
1118	u64 nsoff = to_namespace_offset(arena, lba);
1119	void *mem = kmap_atomic(page);
1120
1121	ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1122	kunmap_atomic(mem);
1123
1124	return ret;
1125}
1126
1127static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1128{
1129	void *mem = kmap_atomic(page);
1130
1131	memset(mem + off, 0, len);
1132	kunmap_atomic(mem);
1133}
1134
1135#ifdef CONFIG_BLK_DEV_INTEGRITY
1136static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1137			struct arena_info *arena, u32 postmap, int rw)
1138{
1139	unsigned int len = btt_meta_size(btt);
1140	u64 meta_nsoff;
1141	int ret = 0;
1142
1143	if (bip == NULL)
1144		return 0;
1145
1146	meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1147
1148	while (len) {
1149		unsigned int cur_len;
1150		struct bio_vec bv;
1151		void *mem;
1152
1153		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1154		/*
1155		 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1156		 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1157		 * can use those directly
1158		 */
1159
1160		cur_len = min(len, bv.bv_len);
1161		mem = bvec_kmap_local(&bv);
1162		if (rw)
1163			ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
1164					NVDIMM_IO_ATOMIC);
1165		else
1166			ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
1167					NVDIMM_IO_ATOMIC);
1168
1169		kunmap_local(mem);
1170		if (ret)
1171			return ret;
1172
1173		len -= cur_len;
1174		meta_nsoff += cur_len;
1175		if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1176			return -EIO;
1177	}
1178
1179	return ret;
1180}
1181
1182#else /* CONFIG_BLK_DEV_INTEGRITY */
1183static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1184			struct arena_info *arena, u32 postmap, int rw)
1185{
1186	return 0;
1187}
1188#endif
1189
1190static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1191			struct page *page, unsigned int off, sector_t sector,
1192			unsigned int len)
1193{
1194	int ret = 0;
1195	int t_flag, e_flag;
1196	struct arena_info *arena = NULL;
1197	u32 lane = 0, premap, postmap;
1198
1199	while (len) {
1200		u32 cur_len;
1201
1202		lane = nd_region_acquire_lane(btt->nd_region);
1203
1204		ret = lba_to_arena(btt, sector, &premap, &arena);
1205		if (ret)
1206			goto out_lane;
1207
1208		cur_len = min(btt->sector_size, len);
1209
1210		ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1211				NVDIMM_IO_ATOMIC);
1212		if (ret)
1213			goto out_lane;
1214
1215		/*
1216		 * We loop to make sure that the post map LBA didn't change
1217		 * from under us between writing the RTT and doing the actual
1218		 * read.
1219		 */
1220		while (1) {
1221			u32 new_map;
1222			int new_t, new_e;
1223
1224			if (t_flag) {
1225				zero_fill_data(page, off, cur_len);
1226				goto out_lane;
1227			}
1228
1229			if (e_flag) {
1230				ret = -EIO;
1231				goto out_lane;
1232			}
1233
1234			arena->rtt[lane] = RTT_VALID | postmap;
1235			/*
1236			 * Barrier to make sure this write is not reordered
1237			 * to do the verification map_read before the RTT store
1238			 */
1239			barrier();
1240
1241			ret = btt_map_read(arena, premap, &new_map, &new_t,
1242						&new_e, NVDIMM_IO_ATOMIC);
1243			if (ret)
1244				goto out_rtt;
1245
1246			if ((postmap == new_map) && (t_flag == new_t) &&
1247					(e_flag == new_e))
1248				break;
1249
1250			postmap = new_map;
1251			t_flag = new_t;
1252			e_flag = new_e;
1253		}
1254
1255		ret = btt_data_read(arena, page, off, postmap, cur_len);
1256		if (ret) {
1257			/* Media error - set the e_flag */
1258			if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1259				dev_warn_ratelimited(to_dev(arena),
1260					"Error persistently tracking bad blocks at %#x\n",
1261					premap);
1262			goto out_rtt;
1263		}
1264
1265		if (bip) {
1266			ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1267			if (ret)
1268				goto out_rtt;
1269		}
1270
1271		arena->rtt[lane] = RTT_INVALID;
1272		nd_region_release_lane(btt->nd_region, lane);
1273
1274		len -= cur_len;
1275		off += cur_len;
1276		sector += btt->sector_size >> SECTOR_SHIFT;
1277	}
1278
1279	return 0;
1280
1281 out_rtt:
1282	arena->rtt[lane] = RTT_INVALID;
1283 out_lane:
1284	nd_region_release_lane(btt->nd_region, lane);
1285	return ret;
1286}
1287
1288/*
1289 * Normally, arena_{read,write}_bytes will take care of the initial offset
1290 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1291 * we need the final, raw namespace offset here
1292 */
1293static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1294		u32 postmap)
1295{
1296	u64 nsoff = adjust_initial_offset(arena->nd_btt,
1297			to_namespace_offset(arena, postmap));
1298	sector_t phys_sector = nsoff >> 9;
1299
1300	return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1301}
1302
1303static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1304			sector_t sector, struct page *page, unsigned int off,
1305			unsigned int len)
1306{
1307	int ret = 0;
1308	struct arena_info *arena = NULL;
1309	u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1310	struct log_entry log;
1311	int sub;
1312
1313	while (len) {
1314		u32 cur_len;
1315		int e_flag;
1316
1317 retry:
1318		lane = nd_region_acquire_lane(btt->nd_region);
1319
1320		ret = lba_to_arena(btt, sector, &premap, &arena);
1321		if (ret)
1322			goto out_lane;
1323		cur_len = min(btt->sector_size, len);
1324
1325		if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1326			ret = -EIO;
1327			goto out_lane;
1328		}
1329
1330		if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1331			arena->freelist[lane].has_err = 1;
1332
1333		if (mutex_is_locked(&arena->err_lock)
1334				|| arena->freelist[lane].has_err) {
1335			nd_region_release_lane(btt->nd_region, lane);
1336
1337			ret = arena_clear_freelist_error(arena, lane);
1338			if (ret)
1339				return ret;
1340
1341			/* OK to acquire a different lane/free block */
1342			goto retry;
1343		}
1344
1345		new_postmap = arena->freelist[lane].block;
1346
1347		/* Wait if the new block is being read from */
1348		for (i = 0; i < arena->nfree; i++)
1349			while (arena->rtt[i] == (RTT_VALID | new_postmap))
1350				cpu_relax();
1351
1352
1353		if (new_postmap >= arena->internal_nlba) {
1354			ret = -EIO;
1355			goto out_lane;
1356		}
1357
1358		ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1359		if (ret)
1360			goto out_lane;
1361
1362		if (bip) {
1363			ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1364						WRITE);
1365			if (ret)
1366				goto out_lane;
1367		}
1368
1369		lock_map(arena, premap);
1370		ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1371				NVDIMM_IO_ATOMIC);
1372		if (ret)
1373			goto out_map;
1374		if (old_postmap >= arena->internal_nlba) {
1375			ret = -EIO;
1376			goto out_map;
1377		}
1378		if (e_flag)
1379			set_e_flag(old_postmap);
1380
1381		log.lba = cpu_to_le32(premap);
1382		log.old_map = cpu_to_le32(old_postmap);
1383		log.new_map = cpu_to_le32(new_postmap);
1384		log.seq = cpu_to_le32(arena->freelist[lane].seq);
1385		sub = arena->freelist[lane].sub;
1386		ret = btt_flog_write(arena, lane, sub, &log);
1387		if (ret)
1388			goto out_map;
1389
1390		ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1391			NVDIMM_IO_ATOMIC);
1392		if (ret)
1393			goto out_map;
1394
1395		unlock_map(arena, premap);
1396		nd_region_release_lane(btt->nd_region, lane);
1397
1398		if (e_flag) {
1399			ret = arena_clear_freelist_error(arena, lane);
1400			if (ret)
1401				return ret;
1402		}
1403
1404		len -= cur_len;
1405		off += cur_len;
1406		sector += btt->sector_size >> SECTOR_SHIFT;
1407	}
1408
1409	return 0;
1410
1411 out_map:
1412	unlock_map(arena, premap);
1413 out_lane:
1414	nd_region_release_lane(btt->nd_region, lane);
1415	return ret;
1416}
1417
1418static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1419			struct page *page, unsigned int len, unsigned int off,
1420			enum req_op op, sector_t sector)
1421{
1422	int ret;
1423
1424	if (!op_is_write(op)) {
1425		ret = btt_read_pg(btt, bip, page, off, sector, len);
1426		flush_dcache_page(page);
1427	} else {
1428		flush_dcache_page(page);
1429		ret = btt_write_pg(btt, bip, sector, page, off, len);
1430	}
1431
1432	return ret;
1433}
1434
1435static void btt_submit_bio(struct bio *bio)
1436{
1437	struct bio_integrity_payload *bip = bio_integrity(bio);
1438	struct btt *btt = bio->bi_bdev->bd_disk->private_data;
1439	struct bvec_iter iter;
1440	unsigned long start;
1441	struct bio_vec bvec;
1442	int err = 0;
1443	bool do_acct;
1444
1445	if (!bio_integrity_prep(bio))
1446		return;
1447
1448	do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
1449	if (do_acct)
1450		start = bio_start_io_acct(bio);
1451	bio_for_each_segment(bvec, bio, iter) {
1452		unsigned int len = bvec.bv_len;
1453
1454		if (len > PAGE_SIZE || len < btt->sector_size ||
1455				len % btt->sector_size) {
1456			dev_err_ratelimited(&btt->nd_btt->dev,
1457				"unaligned bio segment (len: %d)\n", len);
1458			bio->bi_status = BLK_STS_IOERR;
1459			break;
1460		}
1461
1462		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1463				  bio_op(bio), iter.bi_sector);
1464		if (err) {
1465			dev_err(&btt->nd_btt->dev,
1466					"io error in %s sector %lld, len %d,\n",
1467					(op_is_write(bio_op(bio))) ? "WRITE" :
1468					"READ",
1469					(unsigned long long) iter.bi_sector, len);
1470			bio->bi_status = errno_to_blk_status(err);
1471			break;
1472		}
1473	}
1474	if (do_acct)
1475		bio_end_io_acct(bio, start);
1476
1477	bio_endio(bio);
1478}
1479
1480static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1481{
1482	/* some standard values */
1483	geo->heads = 1 << 6;
1484	geo->sectors = 1 << 5;
1485	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1486	return 0;
1487}
1488
1489static const struct block_device_operations btt_fops = {
1490	.owner =		THIS_MODULE,
1491	.submit_bio =		btt_submit_bio,
1492	.getgeo =		btt_getgeo,
1493};
1494
1495static int btt_blk_init(struct btt *btt)
1496{
1497	struct nd_btt *nd_btt = btt->nd_btt;
1498	struct nd_namespace_common *ndns = nd_btt->ndns;
1499	struct queue_limits lim = {
1500		.logical_block_size	= btt->sector_size,
1501		.max_hw_sectors		= UINT_MAX,
1502	};
1503	int rc;
1504
1505	btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
1506	if (IS_ERR(btt->btt_disk))
1507		return PTR_ERR(btt->btt_disk);
1508
1509	nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1510	btt->btt_disk->first_minor = 0;
1511	btt->btt_disk->fops = &btt_fops;
1512	btt->btt_disk->private_data = btt;
1513
1514	blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
1515	blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
1516
1517	if (btt_meta_size(btt)) {
1518		rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1519		if (rc)
1520			goto out_cleanup_disk;
1521	}
1522
1523	set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1524	rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1525	if (rc)
1526		goto out_cleanup_disk;
1527
1528	btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1529	nvdimm_check_and_set_ro(btt->btt_disk);
1530
1531	return 0;
1532
1533out_cleanup_disk:
1534	put_disk(btt->btt_disk);
1535	return rc;
1536}
1537
1538static void btt_blk_cleanup(struct btt *btt)
1539{
1540	del_gendisk(btt->btt_disk);
1541	put_disk(btt->btt_disk);
1542}
1543
1544/**
1545 * btt_init - initialize a block translation table for the given device
1546 * @nd_btt:	device with BTT geometry and backing device info
1547 * @rawsize:	raw size in bytes of the backing device
1548 * @lbasize:	lba size of the backing device
1549 * @uuid:	A uuid for the backing device - this is stored on media
1550 * @nd_region:	&struct nd_region for the REGION device
1551 *
1552 * Initialize a Block Translation Table on a backing device to provide
1553 * single sector power fail atomicity.
1554 *
1555 * Context:
1556 * Might sleep.
1557 *
1558 * Returns:
1559 * Pointer to a new struct btt on success, NULL on failure.
1560 */
1561static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1562			    u32 lbasize, uuid_t *uuid,
1563			    struct nd_region *nd_region)
1564{
1565	int ret;
1566	struct btt *btt;
1567	struct nd_namespace_io *nsio;
1568	struct device *dev = &nd_btt->dev;
1569
1570	btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1571	if (!btt)
1572		return NULL;
1573
1574	btt->nd_btt = nd_btt;
1575	btt->rawsize = rawsize;
1576	btt->lbasize = lbasize;
1577	btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1578	INIT_LIST_HEAD(&btt->arena_list);
1579	mutex_init(&btt->init_lock);
1580	btt->nd_region = nd_region;
1581	nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1582	btt->phys_bb = &nsio->bb;
1583
1584	ret = discover_arenas(btt);
1585	if (ret) {
1586		dev_err(dev, "init: error in arena_discover: %d\n", ret);
1587		return NULL;
1588	}
1589
1590	if (btt->init_state != INIT_READY && nd_region->ro) {
1591		dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1592				dev_name(&nd_region->dev));
1593		return NULL;
1594	} else if (btt->init_state != INIT_READY) {
1595		btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1596			((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1597		dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1598				btt->num_arenas, rawsize);
1599
1600		ret = create_arenas(btt);
1601		if (ret) {
1602			dev_info(dev, "init: create_arenas: %d\n", ret);
1603			return NULL;
1604		}
1605
1606		ret = btt_meta_init(btt);
1607		if (ret) {
1608			dev_err(dev, "init: error in meta_init: %d\n", ret);
1609			return NULL;
1610		}
1611	}
1612
1613	ret = btt_blk_init(btt);
1614	if (ret) {
1615		dev_err(dev, "init: error in blk_init: %d\n", ret);
1616		return NULL;
1617	}
1618
1619	btt_debugfs_init(btt);
1620
1621	return btt;
1622}
1623
1624/**
1625 * btt_fini - de-initialize a BTT
1626 * @btt:	the BTT handle that was generated by btt_init
1627 *
1628 * De-initialize a Block Translation Table on device removal
1629 *
1630 * Context:
1631 * Might sleep.
1632 */
1633static void btt_fini(struct btt *btt)
1634{
1635	if (btt) {
1636		btt_blk_cleanup(btt);
1637		free_arenas(btt);
1638		debugfs_remove_recursive(btt->debugfs_dir);
1639	}
1640}
1641
1642int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1643{
1644	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1645	struct nd_region *nd_region;
1646	struct btt_sb *btt_sb;
1647	struct btt *btt;
1648	size_t size, rawsize;
1649	int rc;
1650
1651	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1652		dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1653		return -ENODEV;
1654	}
1655
1656	btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1657	if (!btt_sb)
1658		return -ENOMEM;
1659
1660	size = nvdimm_namespace_capacity(ndns);
1661	rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
1662	if (rc)
1663		return rc;
1664
1665	/*
1666	 * If this returns < 0, that is ok as it just means there wasn't
1667	 * an existing BTT, and we're creating a new one. We still need to
1668	 * call this as we need the version dependent fields in nd_btt to be
1669	 * set correctly based on the holder class
1670	 */
1671	nd_btt_version(nd_btt, ndns, btt_sb);
1672
1673	rawsize = size - nd_btt->initial_offset;
1674	if (rawsize < ARENA_MIN_SIZE) {
1675		dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1676				dev_name(&ndns->dev),
1677				ARENA_MIN_SIZE + nd_btt->initial_offset);
1678		return -ENXIO;
1679	}
1680	nd_region = to_nd_region(nd_btt->dev.parent);
1681	btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1682		       nd_region);
1683	if (!btt)
1684		return -ENOMEM;
1685	nd_btt->btt = btt;
1686
1687	return 0;
1688}
1689EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1690
1691int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1692{
1693	struct btt *btt = nd_btt->btt;
1694
1695	btt_fini(btt);
1696	nd_btt->btt = NULL;
1697
1698	return 0;
1699}
1700EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1701
1702static int __init nd_btt_init(void)
1703{
1704	int rc = 0;
1705
1706	debugfs_root = debugfs_create_dir("btt", NULL);
1707	if (IS_ERR_OR_NULL(debugfs_root))
1708		rc = -ENXIO;
1709
1710	return rc;
1711}
1712
1713static void __exit nd_btt_exit(void)
1714{
1715	debugfs_remove_recursive(debugfs_root);
1716}
1717
1718MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1719MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1720MODULE_LICENSE("GPL v2");
1721module_init(nd_btt_init);
1722module_exit(nd_btt_exit);
1723