run.c revision 195c52bd
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * TODO: try to use extents tree (instead of array)
7 */
8
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/fs.h>
12#include <linux/log2.h>
13#include <linux/nls.h>
14
15#include "debug.h"
16#include "ntfs.h"
17#include "ntfs_fs.h"
18
19/* runs_tree is a continues memory. Try to avoid big size  */
20#define NTFS3_RUN_MAX_BYTES 0x10000
21
22struct ntfs_run {
23	CLST vcn; /* virtual cluster number */
24	CLST len; /* length in clusters */
25	CLST lcn; /* logical cluster number */
26};
27
28/*
29 * run_lookup
30 *
31 * Lookup the index of a MCB entry that is first <= vcn.
32 * case of success it will return non-zero value and set
33 * 'index' parameter to index of entry been found.
34 * case of entry missing from list 'index' will be set to
35 * point to insertion position for the entry question.
36 */
37bool run_lookup(const struct runs_tree *run, CLST vcn, size_t *index)
38{
39	size_t min_idx, max_idx, mid_idx;
40	struct ntfs_run *r;
41
42	if (!run->count) {
43		*index = 0;
44		return false;
45	}
46
47	min_idx = 0;
48	max_idx = run->count - 1;
49
50	/* Check boundary cases specially, 'cause they cover the often requests */
51	r = run->runs;
52	if (vcn < r->vcn) {
53		*index = 0;
54		return false;
55	}
56
57	if (vcn < r->vcn + r->len) {
58		*index = 0;
59		return true;
60	}
61
62	r += max_idx;
63	if (vcn >= r->vcn + r->len) {
64		*index = run->count;
65		return false;
66	}
67
68	if (vcn >= r->vcn) {
69		*index = max_idx;
70		return true;
71	}
72
73	do {
74		mid_idx = min_idx + ((max_idx - min_idx) >> 1);
75		r = run->runs + mid_idx;
76
77		if (vcn < r->vcn) {
78			max_idx = mid_idx - 1;
79			if (!mid_idx)
80				break;
81		} else if (vcn >= r->vcn + r->len) {
82			min_idx = mid_idx + 1;
83		} else {
84			*index = mid_idx;
85			return true;
86		}
87	} while (min_idx <= max_idx);
88
89	*index = max_idx + 1;
90	return false;
91}
92
93/*
94 * run_consolidate
95 *
96 * consolidate runs starting from a given one.
97 */
98static void run_consolidate(struct runs_tree *run, size_t index)
99{
100	size_t i;
101	struct ntfs_run *r = run->runs + index;
102
103	while (index + 1 < run->count) {
104		/*
105		 * I should merge current run with next
106		 * if start of the next run lies inside one being tested.
107		 */
108		struct ntfs_run *n = r + 1;
109		CLST end = r->vcn + r->len;
110		CLST dl;
111
112		/* Stop if runs are not aligned one to another. */
113		if (n->vcn > end)
114			break;
115
116		dl = end - n->vcn;
117
118		/*
119		 * If range at index overlaps with next one
120		 * then I will either adjust it's start position
121		 * or (if completely matches) dust remove one from the list.
122		 */
123		if (dl > 0) {
124			if (n->len <= dl)
125				goto remove_next_range;
126
127			n->len -= dl;
128			n->vcn += dl;
129			if (n->lcn != SPARSE_LCN)
130				n->lcn += dl;
131			dl = 0;
132		}
133
134		/*
135		 * Stop if sparse mode does not match
136		 * both current and next runs.
137		 */
138		if ((n->lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) {
139			index += 1;
140			r = n;
141			continue;
142		}
143
144		/*
145		 * Check if volume block
146		 * of a next run lcn does not match
147		 * last volume block of the current run.
148		 */
149		if (n->lcn != SPARSE_LCN && n->lcn != r->lcn + r->len)
150			break;
151
152		/*
153		 * Next and current are siblings.
154		 * Eat/join.
155		 */
156		r->len += n->len - dl;
157
158remove_next_range:
159		i = run->count - (index + 1);
160		if (i > 1)
161			memmove(n, n + 1, sizeof(*n) * (i - 1));
162
163		run->count -= 1;
164	}
165}
166
167/* returns true if range [svcn - evcn] is mapped*/
168bool run_is_mapped_full(const struct runs_tree *run, CLST svcn, CLST evcn)
169{
170	size_t i;
171	const struct ntfs_run *r, *end;
172	CLST next_vcn;
173
174	if (!run_lookup(run, svcn, &i))
175		return false;
176
177	end = run->runs + run->count;
178	r = run->runs + i;
179
180	for (;;) {
181		next_vcn = r->vcn + r->len;
182		if (next_vcn > evcn)
183			return true;
184
185		if (++r >= end)
186			return false;
187
188		if (r->vcn != next_vcn)
189			return false;
190	}
191}
192
193bool run_lookup_entry(const struct runs_tree *run, CLST vcn, CLST *lcn,
194		      CLST *len, size_t *index)
195{
196	size_t idx;
197	CLST gap;
198	struct ntfs_run *r;
199
200	/* Fail immediately if nrun was not touched yet. */
201	if (!run->runs)
202		return false;
203
204	if (!run_lookup(run, vcn, &idx))
205		return false;
206
207	r = run->runs + idx;
208
209	if (vcn >= r->vcn + r->len)
210		return false;
211
212	gap = vcn - r->vcn;
213	if (r->len <= gap)
214		return false;
215
216	*lcn = r->lcn == SPARSE_LCN ? SPARSE_LCN : (r->lcn + gap);
217
218	if (len)
219		*len = r->len - gap;
220	if (index)
221		*index = idx;
222
223	return true;
224}
225
226/*
227 * run_truncate_head
228 *
229 * decommit the range before vcn
230 */
231void run_truncate_head(struct runs_tree *run, CLST vcn)
232{
233	size_t index;
234	struct ntfs_run *r;
235
236	if (run_lookup(run, vcn, &index)) {
237		r = run->runs + index;
238
239		if (vcn > r->vcn) {
240			CLST dlen = vcn - r->vcn;
241
242			r->vcn = vcn;
243			r->len -= dlen;
244			if (r->lcn != SPARSE_LCN)
245				r->lcn += dlen;
246		}
247
248		if (!index)
249			return;
250	}
251	r = run->runs;
252	memmove(r, r + index, sizeof(*r) * (run->count - index));
253
254	run->count -= index;
255
256	if (!run->count) {
257		kvfree(run->runs);
258		run->runs = NULL;
259		run->allocated = 0;
260	}
261}
262
263/*
264 * run_truncate
265 *
266 * decommit the range after vcn
267 */
268void run_truncate(struct runs_tree *run, CLST vcn)
269{
270	size_t index;
271
272	/*
273	 * If I hit the range then
274	 * I have to truncate one.
275	 * If range to be truncated is becoming empty
276	 * then it will entirely be removed.
277	 */
278	if (run_lookup(run, vcn, &index)) {
279		struct ntfs_run *r = run->runs + index;
280
281		r->len = vcn - r->vcn;
282
283		if (r->len > 0)
284			index += 1;
285	}
286
287	/*
288	 * At this point 'index' is set to
289	 * position that should be thrown away (including index itself)
290	 * Simple one - just set the limit.
291	 */
292	run->count = index;
293
294	/* Do not reallocate array 'runs'. Only free if possible */
295	if (!index) {
296		kvfree(run->runs);
297		run->runs = NULL;
298		run->allocated = 0;
299	}
300}
301
302/* trim head and tail if necessary*/
303void run_truncate_around(struct runs_tree *run, CLST vcn)
304{
305	run_truncate_head(run, vcn);
306
307	if (run->count >= NTFS3_RUN_MAX_BYTES / sizeof(struct ntfs_run) / 2)
308		run_truncate(run, (run->runs + (run->count >> 1))->vcn);
309}
310
311/*
312 * run_add_entry
313 *
314 * sets location to known state.
315 * run to be added may overlap with existing location.
316 * returns false if of memory
317 */
318bool run_add_entry(struct runs_tree *run, CLST vcn, CLST lcn, CLST len,
319		   bool is_mft)
320{
321	size_t used, index;
322	struct ntfs_run *r;
323	bool inrange;
324	CLST tail_vcn = 0, tail_len = 0, tail_lcn = 0;
325	bool should_add_tail = false;
326
327	/*
328	 * Lookup the insertion point.
329	 *
330	 * Execute bsearch for the entry containing
331	 * start position question.
332	 */
333	inrange = run_lookup(run, vcn, &index);
334
335	/*
336	 * Shortcut here would be case of
337	 * range not been found but one been added
338	 * continues previous run.
339	 * this case I can directly make use of
340	 * existing range as my start point.
341	 */
342	if (!inrange && index > 0) {
343		struct ntfs_run *t = run->runs + index - 1;
344
345		if (t->vcn + t->len == vcn &&
346		    (t->lcn == SPARSE_LCN) == (lcn == SPARSE_LCN) &&
347		    (lcn == SPARSE_LCN || lcn == t->lcn + t->len)) {
348			inrange = true;
349			index -= 1;
350		}
351	}
352
353	/*
354	 * At this point 'index' either points to the range
355	 * containing start position or to the insertion position
356	 * for a new range.
357	 * So first let's check if range I'm probing is here already.
358	 */
359	if (!inrange) {
360requires_new_range:
361		/*
362		 * Range was not found.
363		 * Insert at position 'index'
364		 */
365		used = run->count * sizeof(struct ntfs_run);
366
367		/*
368		 * Check allocated space.
369		 * If one is not enough to get one more entry
370		 * then it will be reallocated
371		 */
372		if (run->allocated < used + sizeof(struct ntfs_run)) {
373			size_t bytes;
374			struct ntfs_run *new_ptr;
375
376			/* Use power of 2 for 'bytes'*/
377			if (!used) {
378				bytes = 64;
379			} else if (used <= 16 * PAGE_SIZE) {
380				if (is_power_of_2(run->allocated))
381					bytes = run->allocated << 1;
382				else
383					bytes = (size_t)1
384						<< (2 + blksize_bits(used));
385			} else {
386				bytes = run->allocated + (16 * PAGE_SIZE);
387			}
388
389			WARN_ON(!is_mft && bytes > NTFS3_RUN_MAX_BYTES);
390
391			new_ptr = kvmalloc(bytes, GFP_KERNEL);
392
393			if (!new_ptr)
394				return false;
395
396			r = new_ptr + index;
397			memcpy(new_ptr, run->runs,
398			       index * sizeof(struct ntfs_run));
399			memcpy(r + 1, run->runs + index,
400			       sizeof(struct ntfs_run) * (run->count - index));
401
402			kvfree(run->runs);
403			run->runs = new_ptr;
404			run->allocated = bytes;
405
406		} else {
407			size_t i = run->count - index;
408
409			r = run->runs + index;
410
411			/* memmove appears to be a bottle neck here... */
412			if (i > 0)
413				memmove(r + 1, r, sizeof(struct ntfs_run) * i);
414		}
415
416		r->vcn = vcn;
417		r->lcn = lcn;
418		r->len = len;
419		run->count += 1;
420	} else {
421		r = run->runs + index;
422
423		/*
424		 * If one of ranges was not allocated
425		 * then I have to split location I just matched.
426		 * and insert current one
427		 * a common case this requires tail to be reinserted
428		 * a recursive call.
429		 */
430		if (((lcn == SPARSE_LCN) != (r->lcn == SPARSE_LCN)) ||
431		    (lcn != SPARSE_LCN && lcn != r->lcn + (vcn - r->vcn))) {
432			CLST to_eat = vcn - r->vcn;
433			CLST Tovcn = to_eat + len;
434
435			should_add_tail = Tovcn < r->len;
436
437			if (should_add_tail) {
438				tail_lcn = r->lcn == SPARSE_LCN
439						   ? SPARSE_LCN
440						   : (r->lcn + Tovcn);
441				tail_vcn = r->vcn + Tovcn;
442				tail_len = r->len - Tovcn;
443			}
444
445			if (to_eat > 0) {
446				r->len = to_eat;
447				inrange = false;
448				index += 1;
449				goto requires_new_range;
450			}
451
452			/* lcn should match one I'm going to add. */
453			r->lcn = lcn;
454		}
455
456		/*
457		 * If existing range fits then I'm done.
458		 * Otherwise extend found one and fall back to range jocode.
459		 */
460		if (r->vcn + r->len < vcn + len)
461			r->len += len - ((r->vcn + r->len) - vcn);
462	}
463
464	/*
465	 * And normalize it starting from insertion point.
466	 * It's possible that no insertion needed case if
467	 * start point lies within the range of an entry
468	 * that 'index' points to.
469	 */
470	if (inrange && index > 0)
471		index -= 1;
472	run_consolidate(run, index);
473	run_consolidate(run, index + 1);
474
475	/*
476	 * a special case
477	 * I have to add extra range a tail.
478	 */
479	if (should_add_tail &&
480	    !run_add_entry(run, tail_vcn, tail_lcn, tail_len, is_mft))
481		return false;
482
483	return true;
484}
485
486/*helper for attr_collapse_range, which is helper for fallocate(collapse_range)*/
487bool run_collapse_range(struct runs_tree *run, CLST vcn, CLST len)
488{
489	size_t index, eat;
490	struct ntfs_run *r, *e, *eat_start, *eat_end;
491	CLST end;
492
493	if (WARN_ON(!run_lookup(run, vcn, &index)))
494		return true; /* should never be here */
495
496	e = run->runs + run->count;
497	r = run->runs + index;
498	end = vcn + len;
499
500	if (vcn > r->vcn) {
501		if (r->vcn + r->len <= end) {
502			/* collapse tail of run */
503			r->len = vcn - r->vcn;
504		} else if (r->lcn == SPARSE_LCN) {
505			/* collapse a middle part of sparsed run */
506			r->len -= len;
507		} else {
508			/* collapse a middle part of normal run, split */
509			if (!run_add_entry(run, vcn, SPARSE_LCN, len, false))
510				return false;
511			return run_collapse_range(run, vcn, len);
512		}
513
514		r += 1;
515	}
516
517	eat_start = r;
518	eat_end = r;
519
520	for (; r < e; r++) {
521		CLST d;
522
523		if (r->vcn >= end) {
524			r->vcn -= len;
525			continue;
526		}
527
528		if (r->vcn + r->len <= end) {
529			/* eat this run */
530			eat_end = r + 1;
531			continue;
532		}
533
534		d = end - r->vcn;
535		if (r->lcn != SPARSE_LCN)
536			r->lcn += d;
537		r->len -= d;
538		r->vcn -= len - d;
539	}
540
541	eat = eat_end - eat_start;
542	memmove(eat_start, eat_end, (e - eat_end) * sizeof(*r));
543	run->count -= eat;
544
545	return true;
546}
547
548/*
549 * run_get_entry
550 *
551 * returns index-th mapped region
552 */
553bool run_get_entry(const struct runs_tree *run, size_t index, CLST *vcn,
554		   CLST *lcn, CLST *len)
555{
556	const struct ntfs_run *r;
557
558	if (index >= run->count)
559		return false;
560
561	r = run->runs + index;
562
563	if (!r->len)
564		return false;
565
566	if (vcn)
567		*vcn = r->vcn;
568	if (lcn)
569		*lcn = r->lcn;
570	if (len)
571		*len = r->len;
572	return true;
573}
574
575/*
576 * run_packed_size
577 *
578 * calculates the size of packed int64
579 */
580#ifdef __BIG_ENDIAN
581static inline int run_packed_size(const s64 n)
582{
583	const u8 *p = (const u8 *)&n + sizeof(n) - 1;
584
585	if (n >= 0) {
586		if (p[-7] || p[-6] || p[-5] || p[-4])
587			p -= 4;
588		if (p[-3] || p[-2])
589			p -= 2;
590		if (p[-1])
591			p -= 1;
592		if (p[0] & 0x80)
593			p -= 1;
594	} else {
595		if (p[-7] != 0xff || p[-6] != 0xff || p[-5] != 0xff ||
596		    p[-4] != 0xff)
597			p -= 4;
598		if (p[-3] != 0xff || p[-2] != 0xff)
599			p -= 2;
600		if (p[-1] != 0xff)
601			p -= 1;
602		if (!(p[0] & 0x80))
603			p -= 1;
604	}
605	return (const u8 *)&n + sizeof(n) - p;
606}
607
608/* full trusted function. It does not check 'size' for errors */
609static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
610{
611	const u8 *p = (u8 *)&v;
612
613	switch (size) {
614	case 8:
615		run_buf[7] = p[0];
616		fallthrough;
617	case 7:
618		run_buf[6] = p[1];
619		fallthrough;
620	case 6:
621		run_buf[5] = p[2];
622		fallthrough;
623	case 5:
624		run_buf[4] = p[3];
625		fallthrough;
626	case 4:
627		run_buf[3] = p[4];
628		fallthrough;
629	case 3:
630		run_buf[2] = p[5];
631		fallthrough;
632	case 2:
633		run_buf[1] = p[6];
634		fallthrough;
635	case 1:
636		run_buf[0] = p[7];
637	}
638}
639
640/* full trusted function. It does not check 'size' for errors */
641static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
642{
643	u8 *p = (u8 *)&v;
644
645	switch (size) {
646	case 8:
647		p[0] = run_buf[7];
648		fallthrough;
649	case 7:
650		p[1] = run_buf[6];
651		fallthrough;
652	case 6:
653		p[2] = run_buf[5];
654		fallthrough;
655	case 5:
656		p[3] = run_buf[4];
657		fallthrough;
658	case 4:
659		p[4] = run_buf[3];
660		fallthrough;
661	case 3:
662		p[5] = run_buf[2];
663		fallthrough;
664	case 2:
665		p[6] = run_buf[1];
666		fallthrough;
667	case 1:
668		p[7] = run_buf[0];
669	}
670	return v;
671}
672
673#else
674
675static inline int run_packed_size(const s64 n)
676{
677	const u8 *p = (const u8 *)&n;
678
679	if (n >= 0) {
680		if (p[7] || p[6] || p[5] || p[4])
681			p += 4;
682		if (p[3] || p[2])
683			p += 2;
684		if (p[1])
685			p += 1;
686		if (p[0] & 0x80)
687			p += 1;
688	} else {
689		if (p[7] != 0xff || p[6] != 0xff || p[5] != 0xff ||
690		    p[4] != 0xff)
691			p += 4;
692		if (p[3] != 0xff || p[2] != 0xff)
693			p += 2;
694		if (p[1] != 0xff)
695			p += 1;
696		if (!(p[0] & 0x80))
697			p += 1;
698	}
699
700	return 1 + p - (const u8 *)&n;
701}
702
703/* full trusted function. It does not check 'size' for errors */
704static inline void run_pack_s64(u8 *run_buf, u8 size, s64 v)
705{
706	const u8 *p = (u8 *)&v;
707
708	/* memcpy( run_buf, &v, size); is it faster? */
709	switch (size) {
710	case 8:
711		run_buf[7] = p[7];
712		fallthrough;
713	case 7:
714		run_buf[6] = p[6];
715		fallthrough;
716	case 6:
717		run_buf[5] = p[5];
718		fallthrough;
719	case 5:
720		run_buf[4] = p[4];
721		fallthrough;
722	case 4:
723		run_buf[3] = p[3];
724		fallthrough;
725	case 3:
726		run_buf[2] = p[2];
727		fallthrough;
728	case 2:
729		run_buf[1] = p[1];
730		fallthrough;
731	case 1:
732		run_buf[0] = p[0];
733	}
734}
735
736/* full trusted function. It does not check 'size' for errors */
737static inline s64 run_unpack_s64(const u8 *run_buf, u8 size, s64 v)
738{
739	u8 *p = (u8 *)&v;
740
741	/* memcpy( &v, run_buf, size); is it faster? */
742	switch (size) {
743	case 8:
744		p[7] = run_buf[7];
745		fallthrough;
746	case 7:
747		p[6] = run_buf[6];
748		fallthrough;
749	case 6:
750		p[5] = run_buf[5];
751		fallthrough;
752	case 5:
753		p[4] = run_buf[4];
754		fallthrough;
755	case 4:
756		p[3] = run_buf[3];
757		fallthrough;
758	case 3:
759		p[2] = run_buf[2];
760		fallthrough;
761	case 2:
762		p[1] = run_buf[1];
763		fallthrough;
764	case 1:
765		p[0] = run_buf[0];
766	}
767	return v;
768}
769#endif
770
771/*
772 * run_pack
773 *
774 * packs runs into buffer
775 * packed_vcns - how much runs we have packed
776 * packed_size - how much bytes we have used run_buf
777 */
778int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf,
779	     u32 run_buf_size, CLST *packed_vcns)
780{
781	CLST next_vcn, vcn, lcn;
782	CLST prev_lcn = 0;
783	CLST evcn1 = svcn + len;
784	int packed_size = 0;
785	size_t i;
786	bool ok;
787	s64 dlcn;
788	int offset_size, size_size, tmp;
789
790	next_vcn = vcn = svcn;
791
792	*packed_vcns = 0;
793
794	if (!len)
795		goto out;
796
797	ok = run_lookup_entry(run, vcn, &lcn, &len, &i);
798
799	if (!ok)
800		goto error;
801
802	if (next_vcn != vcn)
803		goto error;
804
805	for (;;) {
806		next_vcn = vcn + len;
807		if (next_vcn > evcn1)
808			len = evcn1 - vcn;
809
810		/* how much bytes required to pack len */
811		size_size = run_packed_size(len);
812
813		/* offset_size - how much bytes is packed dlcn */
814		if (lcn == SPARSE_LCN) {
815			offset_size = 0;
816			dlcn = 0;
817		} else {
818			/* NOTE: lcn can be less than prev_lcn! */
819			dlcn = (s64)lcn - prev_lcn;
820			offset_size = run_packed_size(dlcn);
821			prev_lcn = lcn;
822		}
823
824		tmp = run_buf_size - packed_size - 2 - offset_size;
825		if (tmp <= 0)
826			goto out;
827
828		/* can we store this entire run */
829		if (tmp < size_size)
830			goto out;
831
832		if (run_buf) {
833			/* pack run header */
834			run_buf[0] = ((u8)(size_size | (offset_size << 4)));
835			run_buf += 1;
836
837			/* Pack the length of run */
838			run_pack_s64(run_buf, size_size, len);
839
840			run_buf += size_size;
841			/* Pack the offset from previous lcn */
842			run_pack_s64(run_buf, offset_size, dlcn);
843			run_buf += offset_size;
844		}
845
846		packed_size += 1 + offset_size + size_size;
847		*packed_vcns += len;
848
849		if (packed_size + 1 >= run_buf_size || next_vcn >= evcn1)
850			goto out;
851
852		ok = run_get_entry(run, ++i, &vcn, &lcn, &len);
853		if (!ok)
854			goto error;
855
856		if (next_vcn != vcn)
857			goto error;
858	}
859
860out:
861	/* Store last zero */
862	if (run_buf)
863		run_buf[0] = 0;
864
865	return packed_size + 1;
866
867error:
868	return -EOPNOTSUPP;
869}
870
871/*
872 * run_unpack
873 *
874 * unpacks packed runs from "run_buf"
875 * returns error, if negative, or real used bytes
876 */
877int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
878	       CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
879	       u32 run_buf_size)
880{
881	u64 prev_lcn, vcn64, lcn, next_vcn;
882	const u8 *run_last, *run_0;
883	bool is_mft = ino == MFT_REC_MFT;
884
885	/* Check for empty */
886	if (evcn + 1 == svcn)
887		return 0;
888
889	if (evcn < svcn)
890		return -EINVAL;
891
892	run_0 = run_buf;
893	run_last = run_buf + run_buf_size;
894	prev_lcn = 0;
895	vcn64 = svcn;
896
897	/* Read all runs the chain */
898	/* size_size - how much bytes is packed len */
899	while (run_buf < run_last) {
900		/* size_size - how much bytes is packed len */
901		u8 size_size = *run_buf & 0xF;
902		/* offset_size - how much bytes is packed dlcn */
903		u8 offset_size = *run_buf++ >> 4;
904		u64 len;
905
906		if (!size_size)
907			break;
908
909		/*
910		 * Unpack runs.
911		 * NOTE: runs are stored little endian order
912		 * "len" is unsigned value, "dlcn" is signed
913		 * Large positive number requires to store 5 bytes
914		 * e.g.: 05 FF 7E FF FF 00 00 00
915		 */
916		if (size_size > 8)
917			return -EINVAL;
918
919		len = run_unpack_s64(run_buf, size_size, 0);
920		/* skip size_size */
921		run_buf += size_size;
922
923		if (!len)
924			return -EINVAL;
925
926		if (!offset_size)
927			lcn = SPARSE_LCN64;
928		else if (offset_size <= 8) {
929			s64 dlcn;
930
931			/* initial value of dlcn is -1 or 0 */
932			dlcn = (run_buf[offset_size - 1] & 0x80) ? (s64)-1 : 0;
933			dlcn = run_unpack_s64(run_buf, offset_size, dlcn);
934			/* skip offset_size */
935			run_buf += offset_size;
936
937			if (!dlcn)
938				return -EINVAL;
939			lcn = prev_lcn + dlcn;
940			prev_lcn = lcn;
941		} else
942			return -EINVAL;
943
944		next_vcn = vcn64 + len;
945		/* check boundary */
946		if (next_vcn > evcn + 1)
947			return -EINVAL;
948
949#ifndef CONFIG_NTFS3_64BIT_CLUSTER
950		if (next_vcn > 0x100000000ull || (lcn + len) > 0x100000000ull) {
951			ntfs_err(
952				sbi->sb,
953				"This driver is compiled without CONFIG_NTFS3_64BIT_CLUSTER (like windows driver).\n"
954				"Volume contains 64 bits run: vcn %llx, lcn %llx, len %llx.\n"
955				"Activate CONFIG_NTFS3_64BIT_CLUSTER to process this case",
956				vcn64, lcn, len);
957			return -EOPNOTSUPP;
958		}
959#endif
960		if (lcn != SPARSE_LCN64 && lcn + len > sbi->used.bitmap.nbits) {
961			/* lcn range is out of volume */
962			return -EINVAL;
963		}
964
965		if (!run)
966			; /* called from check_attr(fslog.c) to check run */
967		else if (run == RUN_DEALLOCATE) {
968			/* called from ni_delete_all to free clusters without storing in run */
969			if (lcn != SPARSE_LCN64)
970				mark_as_free_ex(sbi, lcn, len, true);
971		} else if (vcn64 >= vcn) {
972			if (!run_add_entry(run, vcn64, lcn, len, is_mft))
973				return -ENOMEM;
974		} else if (next_vcn > vcn) {
975			u64 dlen = vcn - vcn64;
976
977			if (!run_add_entry(run, vcn, lcn + dlen, len - dlen,
978					   is_mft))
979				return -ENOMEM;
980		}
981
982		vcn64 = next_vcn;
983	}
984
985	if (vcn64 != evcn + 1) {
986		/* not expected length of unpacked runs */
987		return -EINVAL;
988	}
989
990	return run_buf - run_0;
991}
992
993#ifdef NTFS3_CHECK_FREE_CLST
994/*
995 * run_unpack_ex
996 *
997 * unpacks packed runs from "run_buf"
998 * checks unpacked runs to be used in bitmap
999 * returns error, if negative, or real used bytes
1000 */
1001int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino,
1002		  CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf,
1003		  u32 run_buf_size)
1004{
1005	int ret, err;
1006	CLST next_vcn, lcn, len;
1007	size_t index;
1008	bool ok;
1009	struct wnd_bitmap *wnd;
1010
1011	ret = run_unpack(run, sbi, ino, svcn, evcn, vcn, run_buf, run_buf_size);
1012	if (ret <= 0)
1013		return ret;
1014
1015	if (!sbi->used.bitmap.sb || !run || run == RUN_DEALLOCATE)
1016		return ret;
1017
1018	if (ino == MFT_REC_BADCLUST)
1019		return ret;
1020
1021	next_vcn = vcn = svcn;
1022	wnd = &sbi->used.bitmap;
1023
1024	for (ok = run_lookup_entry(run, vcn, &lcn, &len, &index);
1025	     next_vcn <= evcn;
1026	     ok = run_get_entry(run, ++index, &vcn, &lcn, &len)) {
1027		if (!ok || next_vcn != vcn)
1028			return -EINVAL;
1029
1030		next_vcn = vcn + len;
1031
1032		if (lcn == SPARSE_LCN)
1033			continue;
1034
1035		if (sbi->flags & NTFS_FLAGS_NEED_REPLAY)
1036			continue;
1037
1038		down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
1039		/* Check for free blocks */
1040		ok = wnd_is_used(wnd, lcn, len);
1041		up_read(&wnd->rw_lock);
1042		if (ok)
1043			continue;
1044
1045		/* Looks like volume is corrupted */
1046		ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
1047
1048		if (down_write_trylock(&wnd->rw_lock)) {
1049			/* mark all zero bits as used in range [lcn, lcn+len) */
1050			CLST i, lcn_f = 0, len_f = 0;
1051
1052			err = 0;
1053			for (i = 0; i < len; i++) {
1054				if (wnd_is_free(wnd, lcn + i, 1)) {
1055					if (!len_f)
1056						lcn_f = lcn + i;
1057					len_f += 1;
1058				} else if (len_f) {
1059					err = wnd_set_used(wnd, lcn_f, len_f);
1060					len_f = 0;
1061					if (err)
1062						break;
1063				}
1064			}
1065
1066			if (len_f)
1067				err = wnd_set_used(wnd, lcn_f, len_f);
1068
1069			up_write(&wnd->rw_lock);
1070			if (err)
1071				return err;
1072		}
1073	}
1074
1075	return ret;
1076}
1077#endif
1078
1079/*
1080 * run_get_highest_vcn
1081 *
1082 * returns the highest vcn from a mapping pairs array
1083 * it used while replaying log file
1084 */
1085int run_get_highest_vcn(CLST vcn, const u8 *run_buf, u64 *highest_vcn)
1086{
1087	u64 vcn64 = vcn;
1088	u8 size_size;
1089
1090	while ((size_size = *run_buf & 0xF)) {
1091		u8 offset_size = *run_buf++ >> 4;
1092		u64 len;
1093
1094		if (size_size > 8 || offset_size > 8)
1095			return -EINVAL;
1096
1097		len = run_unpack_s64(run_buf, size_size, 0);
1098		if (!len)
1099			return -EINVAL;
1100
1101		run_buf += size_size + offset_size;
1102		vcn64 += len;
1103
1104#ifndef CONFIG_NTFS3_64BIT_CLUSTER
1105		if (vcn64 > 0x100000000ull)
1106			return -EINVAL;
1107#endif
1108	}
1109
1110	*highest_vcn = vcn64 - 1;
1111	return 0;
1112}
1113