1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7 */
8
9#include <linux/fs.h>
10#include <linux/slab.h>
11#include <linux/kernel.h>
12
13#include "debug.h"
14#include "ntfs.h"
15#include "ntfs_fs.h"
16
17/*
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
19 * preallocate algorithm.
20 */
21#ifndef NTFS_MIN_LOG2_OF_CLUMP
22#define NTFS_MIN_LOG2_OF_CLUMP 16
23#endif
24
25#ifndef NTFS_MAX_LOG2_OF_CLUMP
26#define NTFS_MAX_LOG2_OF_CLUMP 26
27#endif
28
29// 16M
30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31// 16G
32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33
34static inline u64 get_pre_allocated(u64 size)
35{
36	u32 clump;
37	u8 align_shift;
38	u64 ret;
39
40	if (size <= NTFS_CLUMP_MIN) {
41		clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42		align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43	} else if (size >= NTFS_CLUMP_MAX) {
44		clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45		align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46	} else {
47		align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48			      __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49		clump = 1u << align_shift;
50	}
51
52	ret = (((size + clump - 1) >> align_shift)) << align_shift;
53
54	return ret;
55}
56
57/*
58 * attr_load_runs - Load all runs stored in @attr.
59 */
60static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61			  struct runs_tree *run, const CLST *vcn)
62{
63	int err;
64	CLST svcn = le64_to_cpu(attr->nres.svcn);
65	CLST evcn = le64_to_cpu(attr->nres.evcn);
66	u32 asize;
67	u16 run_off;
68
69	if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
70		return 0;
71
72	if (vcn && (evcn < *vcn || *vcn < svcn))
73		return -EINVAL;
74
75	asize = le32_to_cpu(attr->size);
76	run_off = le16_to_cpu(attr->nres.run_off);
77
78	if (run_off > asize)
79		return -EINVAL;
80
81	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
82			    vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
83			    asize - run_off);
84	if (err < 0)
85		return err;
86
87	return 0;
88}
89
90/*
91 * run_deallocate_ex - Deallocate clusters.
92 */
93static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
94			     CLST vcn, CLST len, CLST *done, bool trim)
95{
96	int err = 0;
97	CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
98	size_t idx;
99
100	if (!len)
101		goto out;
102
103	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
104failed:
105		run_truncate(run, vcn0);
106		err = -EINVAL;
107		goto out;
108	}
109
110	for (;;) {
111		if (clen > len)
112			clen = len;
113
114		if (!clen) {
115			err = -EINVAL;
116			goto out;
117		}
118
119		if (lcn != SPARSE_LCN) {
120			if (sbi) {
121				/* mark bitmap range [lcn + clen) as free and trim clusters. */
122				mark_as_free_ex(sbi, lcn, clen, trim);
123			}
124			dn += clen;
125		}
126
127		len -= clen;
128		if (!len)
129			break;
130
131		vcn_next = vcn + clen;
132		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
133		    vcn != vcn_next) {
134			/* Save memory - don't load entire run. */
135			goto failed;
136		}
137	}
138
139out:
140	if (done)
141		*done += dn;
142
143	return err;
144}
145
146/*
147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
148 */
149int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
150			   CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
151			   enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
152			   CLST *new_lcn, CLST *new_len)
153{
154	int err;
155	CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
156	size_t cnt = run->count;
157
158	for (;;) {
159		err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
160					       opt);
161
162		if (err == -ENOSPC && pre) {
163			pre = 0;
164			if (*pre_alloc)
165				*pre_alloc = 0;
166			continue;
167		}
168
169		if (err)
170			goto out;
171
172		if (vcn == vcn0) {
173			/* Return the first fragment. */
174			if (new_lcn)
175				*new_lcn = lcn;
176			if (new_len)
177				*new_len = flen;
178		}
179
180		/* Add new fragment into run storage. */
181		if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
182			/* Undo last 'ntfs_look_for_free_space' */
183			mark_as_free_ex(sbi, lcn, len, false);
184			err = -ENOMEM;
185			goto out;
186		}
187
188		if (opt & ALLOCATE_ZERO) {
189			u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
190
191			err = blkdev_issue_zeroout(sbi->sb->s_bdev,
192						   (sector_t)lcn << shift,
193						   (sector_t)flen << shift,
194						   GFP_NOFS, 0);
195			if (err)
196				goto out;
197		}
198
199		vcn += flen;
200
201		if (flen >= len || (opt & ALLOCATE_MFT) ||
202		    (fr && run->count - cnt >= fr)) {
203			*alen = vcn - vcn0;
204			return 0;
205		}
206
207		len -= flen;
208	}
209
210out:
211	/* Undo 'ntfs_look_for_free_space' */
212	if (vcn - vcn0) {
213		run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
214		run_truncate(run, vcn0);
215	}
216
217	return err;
218}
219
220/*
221 * attr_make_nonresident
222 *
223 * If page is not NULL - it is already contains resident data
224 * and locked (called from ni_write_frame()).
225 */
226int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
227			  struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
228			  u64 new_size, struct runs_tree *run,
229			  struct ATTRIB **ins_attr, struct page *page)
230{
231	struct ntfs_sb_info *sbi;
232	struct ATTRIB *attr_s;
233	struct MFT_REC *rec;
234	u32 used, asize, rsize, aoff, align;
235	bool is_data;
236	CLST len, alen;
237	char *next;
238	int err;
239
240	if (attr->non_res) {
241		*ins_attr = attr;
242		return 0;
243	}
244
245	sbi = mi->sbi;
246	rec = mi->mrec;
247	attr_s = NULL;
248	used = le32_to_cpu(rec->used);
249	asize = le32_to_cpu(attr->size);
250	next = Add2Ptr(attr, asize);
251	aoff = PtrOffset(rec, attr);
252	rsize = le32_to_cpu(attr->res.data_size);
253	is_data = attr->type == ATTR_DATA && !attr->name_len;
254
255	align = sbi->cluster_size;
256	if (is_attr_compressed(attr))
257		align <<= COMPRESSION_UNIT;
258	len = (rsize + align - 1) >> sbi->cluster_bits;
259
260	run_init(run);
261
262	/* Make a copy of original attribute. */
263	attr_s = kmemdup(attr, asize, GFP_NOFS);
264	if (!attr_s) {
265		err = -ENOMEM;
266		goto out;
267	}
268
269	if (!len) {
270		/* Empty resident -> Empty nonresident. */
271		alen = 0;
272	} else {
273		const char *data = resident_data(attr);
274
275		err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
276					     ALLOCATE_DEF, &alen, 0, NULL,
277					     NULL);
278		if (err)
279			goto out1;
280
281		if (!rsize) {
282			/* Empty resident -> Non empty nonresident. */
283		} else if (!is_data) {
284			err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
285			if (err)
286				goto out2;
287		} else if (!page) {
288			char *kaddr;
289
290			page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
291			if (!page) {
292				err = -ENOMEM;
293				goto out2;
294			}
295			kaddr = kmap_atomic(page);
296			memcpy(kaddr, data, rsize);
297			memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
298			kunmap_atomic(kaddr);
299			flush_dcache_page(page);
300			SetPageUptodate(page);
301			set_page_dirty(page);
302			unlock_page(page);
303			put_page(page);
304		}
305	}
306
307	/* Remove original attribute. */
308	used -= asize;
309	memmove(attr, Add2Ptr(attr, asize), used - aoff);
310	rec->used = cpu_to_le32(used);
311	mi->dirty = true;
312	if (le)
313		al_remove_le(ni, le);
314
315	err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
316				    attr_s->name_len, run, 0, alen,
317				    attr_s->flags, &attr, NULL, NULL);
318	if (err)
319		goto out3;
320
321	kfree(attr_s);
322	attr->nres.data_size = cpu_to_le64(rsize);
323	attr->nres.valid_size = attr->nres.data_size;
324
325	*ins_attr = attr;
326
327	if (is_data)
328		ni->ni_flags &= ~NI_FLAG_RESIDENT;
329
330	/* Resident attribute becomes non resident. */
331	return 0;
332
333out3:
334	attr = Add2Ptr(rec, aoff);
335	memmove(next, attr, used - aoff);
336	memcpy(attr, attr_s, asize);
337	rec->used = cpu_to_le32(used + asize);
338	mi->dirty = true;
339out2:
340	/* Undo: do not trim new allocated clusters. */
341	run_deallocate(sbi, run, false);
342	run_close(run);
343out1:
344	kfree(attr_s);
345out:
346	return err;
347}
348
349/*
350 * attr_set_size_res - Helper for attr_set_size().
351 */
352static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
353			     struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
354			     u64 new_size, struct runs_tree *run,
355			     struct ATTRIB **ins_attr)
356{
357	struct ntfs_sb_info *sbi = mi->sbi;
358	struct MFT_REC *rec = mi->mrec;
359	u32 used = le32_to_cpu(rec->used);
360	u32 asize = le32_to_cpu(attr->size);
361	u32 aoff = PtrOffset(rec, attr);
362	u32 rsize = le32_to_cpu(attr->res.data_size);
363	u32 tail = used - aoff - asize;
364	char *next = Add2Ptr(attr, asize);
365	s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
366
367	if (dsize < 0) {
368		memmove(next + dsize, next, tail);
369	} else if (dsize > 0) {
370		if (used + dsize > sbi->max_bytes_per_attr)
371			return attr_make_nonresident(ni, attr, le, mi, new_size,
372						     run, ins_attr, NULL);
373
374		memmove(next + dsize, next, tail);
375		memset(next, 0, dsize);
376	}
377
378	if (new_size > rsize)
379		memset(Add2Ptr(resident_data(attr), rsize), 0,
380		       new_size - rsize);
381
382	rec->used = cpu_to_le32(used + dsize);
383	attr->size = cpu_to_le32(asize + dsize);
384	attr->res.data_size = cpu_to_le32(new_size);
385	mi->dirty = true;
386	*ins_attr = attr;
387
388	return 0;
389}
390
391/*
392 * attr_set_size - Change the size of attribute.
393 *
394 * Extend:
395 *   - Sparse/compressed: No allocated clusters.
396 *   - Normal: Append allocated and preallocated new clusters.
397 * Shrink:
398 *   - No deallocate if @keep_prealloc is set.
399 */
400int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
401		  const __le16 *name, u8 name_len, struct runs_tree *run,
402		  u64 new_size, const u64 *new_valid, bool keep_prealloc,
403		  struct ATTRIB **ret)
404{
405	int err = 0;
406	struct ntfs_sb_info *sbi = ni->mi.sbi;
407	u8 cluster_bits = sbi->cluster_bits;
408	bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA &&
409		      !name_len;
410	u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
411	struct ATTRIB *attr = NULL, *attr_b;
412	struct ATTR_LIST_ENTRY *le, *le_b;
413	struct mft_inode *mi, *mi_b;
414	CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
415	CLST next_svcn, pre_alloc = -1, done = 0;
416	bool is_ext, is_bad = false;
417	bool dirty = false;
418	u32 align;
419	struct MFT_REC *rec;
420
421again:
422	alen = 0;
423	le_b = NULL;
424	attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
425			      &mi_b);
426	if (!attr_b) {
427		err = -ENOENT;
428		goto bad_inode;
429	}
430
431	if (!attr_b->non_res) {
432		err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
433					&attr_b);
434		if (err)
435			return err;
436
437		/* Return if file is still resident. */
438		if (!attr_b->non_res) {
439			dirty = true;
440			goto ok1;
441		}
442
443		/* Layout of records may be changed, so do a full search. */
444		goto again;
445	}
446
447	is_ext = is_attr_ext(attr_b);
448	align = sbi->cluster_size;
449	if (is_ext)
450		align <<= attr_b->nres.c_unit;
451
452	old_valid = le64_to_cpu(attr_b->nres.valid_size);
453	old_size = le64_to_cpu(attr_b->nres.data_size);
454	old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
455
456again_1:
457	old_alen = old_alloc >> cluster_bits;
458
459	new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
460	new_alen = new_alloc >> cluster_bits;
461
462	if (keep_prealloc && new_size < old_size) {
463		attr_b->nres.data_size = cpu_to_le64(new_size);
464		mi_b->dirty = dirty = true;
465		goto ok;
466	}
467
468	vcn = old_alen - 1;
469
470	svcn = le64_to_cpu(attr_b->nres.svcn);
471	evcn = le64_to_cpu(attr_b->nres.evcn);
472
473	if (svcn <= vcn && vcn <= evcn) {
474		attr = attr_b;
475		le = le_b;
476		mi = mi_b;
477	} else if (!le_b) {
478		err = -EINVAL;
479		goto bad_inode;
480	} else {
481		le = le_b;
482		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
483				    &mi);
484		if (!attr) {
485			err = -EINVAL;
486			goto bad_inode;
487		}
488
489next_le_1:
490		svcn = le64_to_cpu(attr->nres.svcn);
491		evcn = le64_to_cpu(attr->nres.evcn);
492	}
493	/*
494	 * Here we have:
495	 * attr,mi,le - last attribute segment (containing 'vcn').
496	 * attr_b,mi_b,le_b - base (primary) attribute segment.
497	 */
498next_le:
499	rec = mi->mrec;
500	err = attr_load_runs(attr, ni, run, NULL);
501	if (err)
502		goto out;
503
504	if (new_size > old_size) {
505		CLST to_allocate;
506		size_t free;
507
508		if (new_alloc <= old_alloc) {
509			attr_b->nres.data_size = cpu_to_le64(new_size);
510			mi_b->dirty = dirty = true;
511			goto ok;
512		}
513
514		/*
515		 * Add clusters. In simple case we have to:
516		 *  - allocate space (vcn, lcn, len)
517		 *  - update packed run in 'mi'
518		 *  - update attr->nres.evcn
519		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
520		 */
521		to_allocate = new_alen - old_alen;
522add_alloc_in_same_attr_seg:
523		lcn = 0;
524		if (is_mft) {
525			/* MFT allocates clusters from MFT zone. */
526			pre_alloc = 0;
527		} else if (is_ext) {
528			/* No preallocate for sparse/compress. */
529			pre_alloc = 0;
530		} else if (pre_alloc == -1) {
531			pre_alloc = 0;
532			if (type == ATTR_DATA && !name_len &&
533			    sbi->options->prealloc) {
534				pre_alloc = bytes_to_cluster(
535						    sbi, get_pre_allocated(
536								 new_size)) -
537					    new_alen;
538			}
539
540			/* Get the last LCN to allocate from. */
541			if (old_alen &&
542			    !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
543				lcn = SPARSE_LCN;
544			}
545
546			if (lcn == SPARSE_LCN)
547				lcn = 0;
548			else if (lcn)
549				lcn += 1;
550
551			free = wnd_zeroes(&sbi->used.bitmap);
552			if (to_allocate > free) {
553				err = -ENOSPC;
554				goto out;
555			}
556
557			if (pre_alloc && to_allocate + pre_alloc > free)
558				pre_alloc = 0;
559		}
560
561		vcn = old_alen;
562
563		if (is_ext) {
564			if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
565					   false)) {
566				err = -ENOMEM;
567				goto out;
568			}
569			alen = to_allocate;
570		} else {
571			/* ~3 bytes per fragment. */
572			err = attr_allocate_clusters(
573				sbi, run, vcn, lcn, to_allocate, &pre_alloc,
574				is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
575				is_mft ? 0 :
576					 (sbi->record_size -
577					  le32_to_cpu(rec->used) + 8) /
578							 3 +
579						 1,
580				NULL, NULL);
581			if (err)
582				goto out;
583		}
584
585		done += alen;
586		vcn += alen;
587		if (to_allocate > alen)
588			to_allocate -= alen;
589		else
590			to_allocate = 0;
591
592pack_runs:
593		err = mi_pack_runs(mi, attr, run, vcn - svcn);
594		if (err)
595			goto undo_1;
596
597		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
598		new_alloc_tmp = (u64)next_svcn << cluster_bits;
599		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
600		mi_b->dirty = dirty = true;
601
602		if (next_svcn >= vcn && !to_allocate) {
603			/* Normal way. Update attribute and exit. */
604			attr_b->nres.data_size = cpu_to_le64(new_size);
605			goto ok;
606		}
607
608		/* At least two MFT to avoid recursive loop. */
609		if (is_mft && next_svcn == vcn &&
610		    ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
611			new_size = new_alloc_tmp;
612			attr_b->nres.data_size = attr_b->nres.alloc_size;
613			goto ok;
614		}
615
616		if (le32_to_cpu(rec->used) < sbi->record_size) {
617			old_alen = next_svcn;
618			evcn = old_alen - 1;
619			goto add_alloc_in_same_attr_seg;
620		}
621
622		attr_b->nres.data_size = attr_b->nres.alloc_size;
623		if (new_alloc_tmp < old_valid)
624			attr_b->nres.valid_size = attr_b->nres.data_size;
625
626		if (type == ATTR_LIST) {
627			err = ni_expand_list(ni);
628			if (err)
629				goto undo_2;
630			if (next_svcn < vcn)
631				goto pack_runs;
632
633			/* Layout of records is changed. */
634			goto again;
635		}
636
637		if (!ni->attr_list.size) {
638			err = ni_create_attr_list(ni);
639			/* In case of error layout of records is not changed. */
640			if (err)
641				goto undo_2;
642			/* Layout of records is changed. */
643		}
644
645		if (next_svcn >= vcn) {
646			/* This is MFT data, repeat. */
647			goto again;
648		}
649
650		/* Insert new attribute segment. */
651		err = ni_insert_nonresident(ni, type, name, name_len, run,
652					    next_svcn, vcn - next_svcn,
653					    attr_b->flags, &attr, &mi, NULL);
654
655		/*
656		 * Layout of records maybe changed.
657		 * Find base attribute to update.
658		 */
659		le_b = NULL;
660		attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
661				      NULL, &mi_b);
662		if (!attr_b) {
663			err = -EINVAL;
664			goto bad_inode;
665		}
666
667		if (err) {
668			/* ni_insert_nonresident failed. */
669			attr = NULL;
670			goto undo_2;
671		}
672
673		if (!is_mft)
674			run_truncate_head(run, evcn + 1);
675
676		svcn = le64_to_cpu(attr->nres.svcn);
677		evcn = le64_to_cpu(attr->nres.evcn);
678
679		/*
680		 * Attribute is in consistency state.
681		 * Save this point to restore to if next steps fail.
682		 */
683		old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
684		attr_b->nres.valid_size = attr_b->nres.data_size =
685			attr_b->nres.alloc_size = cpu_to_le64(old_size);
686		mi_b->dirty = dirty = true;
687		goto again_1;
688	}
689
690	if (new_size != old_size ||
691	    (new_alloc != old_alloc && !keep_prealloc)) {
692		/*
693		 * Truncate clusters. In simple case we have to:
694		 *  - update packed run in 'mi'
695		 *  - update attr->nres.evcn
696		 *  - update attr_b->nres.data_size/attr_b->nres.alloc_size
697		 *  - mark and trim clusters as free (vcn, lcn, len)
698		 */
699		CLST dlen = 0;
700
701		vcn = max(svcn, new_alen);
702		new_alloc_tmp = (u64)vcn << cluster_bits;
703
704		if (vcn > svcn) {
705			err = mi_pack_runs(mi, attr, run, vcn - svcn);
706			if (err)
707				goto out;
708		} else if (le && le->vcn) {
709			u16 le_sz = le16_to_cpu(le->size);
710
711			/*
712			 * NOTE: List entries for one attribute are always
713			 * the same size. We deal with last entry (vcn==0)
714			 * and it is not first in entries array
715			 * (list entry for std attribute always first).
716			 * So it is safe to step back.
717			 */
718			mi_remove_attr(NULL, mi, attr);
719
720			if (!al_remove_le(ni, le)) {
721				err = -EINVAL;
722				goto bad_inode;
723			}
724
725			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
726		} else {
727			attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
728			mi->dirty = true;
729		}
730
731		attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
732
733		if (vcn == new_alen) {
734			attr_b->nres.data_size = cpu_to_le64(new_size);
735			if (new_size < old_valid)
736				attr_b->nres.valid_size =
737					attr_b->nres.data_size;
738		} else {
739			if (new_alloc_tmp <=
740			    le64_to_cpu(attr_b->nres.data_size))
741				attr_b->nres.data_size =
742					attr_b->nres.alloc_size;
743			if (new_alloc_tmp <
744			    le64_to_cpu(attr_b->nres.valid_size))
745				attr_b->nres.valid_size =
746					attr_b->nres.alloc_size;
747		}
748		mi_b->dirty = dirty = true;
749
750		err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
751					true);
752		if (err)
753			goto out;
754
755		if (is_ext) {
756			/* dlen - really deallocated clusters. */
757			le64_sub_cpu(&attr_b->nres.total_size,
758				     ((u64)dlen << cluster_bits));
759		}
760
761		run_truncate(run, vcn);
762
763		if (new_alloc_tmp <= new_alloc)
764			goto ok;
765
766		old_size = new_alloc_tmp;
767		vcn = svcn - 1;
768
769		if (le == le_b) {
770			attr = attr_b;
771			mi = mi_b;
772			evcn = svcn - 1;
773			svcn = 0;
774			goto next_le;
775		}
776
777		if (le->type != type || le->name_len != name_len ||
778		    memcmp(le_name(le), name, name_len * sizeof(short))) {
779			err = -EINVAL;
780			goto bad_inode;
781		}
782
783		err = ni_load_mi(ni, le, &mi);
784		if (err)
785			goto out;
786
787		attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
788		if (!attr) {
789			err = -EINVAL;
790			goto bad_inode;
791		}
792		goto next_le_1;
793	}
794
795ok:
796	if (new_valid) {
797		__le64 valid = cpu_to_le64(min(*new_valid, new_size));
798
799		if (attr_b->nres.valid_size != valid) {
800			attr_b->nres.valid_size = valid;
801			mi_b->dirty = true;
802		}
803	}
804
805ok1:
806	if (ret)
807		*ret = attr_b;
808
809	if (((type == ATTR_DATA && !name_len) ||
810	     (type == ATTR_ALLOC && name == I30_NAME))) {
811		/* Update inode_set_bytes. */
812		if (attr_b->non_res) {
813			new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
814			if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
815				inode_set_bytes(&ni->vfs_inode, new_alloc);
816				dirty = true;
817			}
818		}
819
820		/* Don't forget to update duplicate information in parent. */
821		if (dirty) {
822			ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
823			mark_inode_dirty(&ni->vfs_inode);
824		}
825	}
826
827	return 0;
828
829undo_2:
830	vcn -= alen;
831	attr_b->nres.data_size = cpu_to_le64(old_size);
832	attr_b->nres.valid_size = cpu_to_le64(old_valid);
833	attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
834
835	/* Restore 'attr' and 'mi'. */
836	if (attr)
837		goto restore_run;
838
839	if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
840	    svcn <= le64_to_cpu(attr_b->nres.evcn)) {
841		attr = attr_b;
842		le = le_b;
843		mi = mi_b;
844	} else if (!le_b) {
845		err = -EINVAL;
846		goto bad_inode;
847	} else {
848		le = le_b;
849		attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
850				    &svcn, &mi);
851		if (!attr)
852			goto bad_inode;
853	}
854
855restore_run:
856	if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
857		is_bad = true;
858
859undo_1:
860	run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
861
862	run_truncate(run, vcn);
863out:
864	if (is_bad) {
865bad_inode:
866		_ntfs_bad_inode(&ni->vfs_inode);
867	}
868	return err;
869}
870
871/*
872 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
873 *
874 * @new == NULL means just to get current mapping for 'vcn'
875 * @new != NULL means allocate real cluster if 'vcn' maps to hole
876 * @zero - zeroout new allocated clusters
877 *
878 *  NOTE:
879 *  - @new != NULL is called only for sparsed or compressed attributes.
880 *  - new allocated clusters are zeroed via blkdev_issue_zeroout.
881 */
882int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
883			CLST *len, bool *new, bool zero)
884{
885	int err = 0;
886	struct runs_tree *run = &ni->file.run;
887	struct ntfs_sb_info *sbi;
888	u8 cluster_bits;
889	struct ATTRIB *attr, *attr_b;
890	struct ATTR_LIST_ENTRY *le, *le_b;
891	struct mft_inode *mi, *mi_b;
892	CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
893	CLST alloc, evcn;
894	unsigned fr;
895	u64 total_size, total_size0;
896	int step = 0;
897
898	if (new)
899		*new = false;
900
901	/* Try to find in cache. */
902	down_read(&ni->file.run_lock);
903	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
904		*len = 0;
905	up_read(&ni->file.run_lock);
906
907	if (*len && (*lcn != SPARSE_LCN || !new))
908		return 0; /* Fast normal way without allocation. */
909
910	/* No cluster in cache or we need to allocate cluster in hole. */
911	sbi = ni->mi.sbi;
912	cluster_bits = sbi->cluster_bits;
913
914	ni_lock(ni);
915	down_write(&ni->file.run_lock);
916
917	/* Repeat the code above (under write lock). */
918	if (!run_lookup_entry(run, vcn, lcn, len, NULL))
919		*len = 0;
920
921	if (*len) {
922		if (*lcn != SPARSE_LCN || !new)
923			goto out; /* normal way without allocation. */
924		if (clen > *len)
925			clen = *len;
926	}
927
928	le_b = NULL;
929	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
930	if (!attr_b) {
931		err = -ENOENT;
932		goto out;
933	}
934
935	if (!attr_b->non_res) {
936		*lcn = RESIDENT_LCN;
937		*len = 1;
938		goto out;
939	}
940
941	asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
942	if (vcn >= asize) {
943		if (new) {
944			err = -EINVAL;
945		} else {
946			*len = 1;
947			*lcn = SPARSE_LCN;
948		}
949		goto out;
950	}
951
952	svcn = le64_to_cpu(attr_b->nres.svcn);
953	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
954
955	attr = attr_b;
956	le = le_b;
957	mi = mi_b;
958
959	if (le_b && (vcn < svcn || evcn1 <= vcn)) {
960		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
961				    &mi);
962		if (!attr) {
963			err = -EINVAL;
964			goto out;
965		}
966		svcn = le64_to_cpu(attr->nres.svcn);
967		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
968	}
969
970	/* Load in cache actual information. */
971	err = attr_load_runs(attr, ni, run, NULL);
972	if (err)
973		goto out;
974
975	if (!*len) {
976		if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
977			if (*lcn != SPARSE_LCN || !new)
978				goto ok; /* Slow normal way without allocation. */
979
980			if (clen > *len)
981				clen = *len;
982		} else if (!new) {
983			/* Here we may return -ENOENT.
984			 * In any case caller gets zero length. */
985			goto ok;
986		}
987	}
988
989	if (!is_attr_ext(attr_b)) {
990		/* The code below only for sparsed or compressed attributes. */
991		err = -EINVAL;
992		goto out;
993	}
994
995	vcn0 = vcn;
996	to_alloc = clen;
997	fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
998	/* Allocate frame aligned clusters.
999	 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
1000	 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
1001	if (attr_b->nres.c_unit) {
1002		CLST clst_per_frame = 1u << attr_b->nres.c_unit;
1003		CLST cmask = ~(clst_per_frame - 1);
1004
1005		/* Get frame aligned vcn and to_alloc. */
1006		vcn = vcn0 & cmask;
1007		to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1008		if (fr < clst_per_frame)
1009			fr = clst_per_frame;
1010		zero = true;
1011
1012		/* Check if 'vcn' and 'vcn0' in different attribute segments. */
1013		if (vcn < svcn || evcn1 <= vcn) {
1014			/* Load attribute for truncated vcn. */
1015			attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
1016					    &vcn, &mi);
1017			if (!attr) {
1018				err = -EINVAL;
1019				goto out;
1020			}
1021			svcn = le64_to_cpu(attr->nres.svcn);
1022			evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1023			err = attr_load_runs(attr, ni, run, NULL);
1024			if (err)
1025				goto out;
1026		}
1027	}
1028
1029	if (vcn + to_alloc > asize)
1030		to_alloc = asize - vcn;
1031
1032	/* Get the last LCN to allocate from. */
1033	hint = 0;
1034
1035	if (vcn > evcn1) {
1036		if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1037				   false)) {
1038			err = -ENOMEM;
1039			goto out;
1040		}
1041	} else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1042		hint = -1;
1043	}
1044
1045	/* Allocate and zeroout new clusters. */
1046	err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1047				     zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1048				     fr, lcn, len);
1049	if (err)
1050		goto out;
1051	*new = true;
1052	step = 1;
1053
1054	end = vcn + alen;
1055	/* Save 'total_size0' to restore if error. */
1056	total_size0 = le64_to_cpu(attr_b->nres.total_size);
1057	total_size = total_size0 + ((u64)alen << cluster_bits);
1058
1059	if (vcn != vcn0) {
1060		if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1061			err = -EINVAL;
1062			goto out;
1063		}
1064		if (*lcn == SPARSE_LCN) {
1065			/* Internal error. Should not happened. */
1066			WARN_ON(1);
1067			err = -EINVAL;
1068			goto out;
1069		}
1070		/* Check case when vcn0 + len overlaps new allocated clusters. */
1071		if (vcn0 + *len > end)
1072			*len = end - vcn0;
1073	}
1074
1075repack:
1076	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1077	if (err)
1078		goto out;
1079
1080	attr_b->nres.total_size = cpu_to_le64(total_size);
1081	inode_set_bytes(&ni->vfs_inode, total_size);
1082	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1083
1084	mi_b->dirty = true;
1085	mark_inode_dirty(&ni->vfs_inode);
1086
1087	/* Stored [vcn : next_svcn) from [vcn : end). */
1088	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1089
1090	if (end <= evcn1) {
1091		if (next_svcn == evcn1) {
1092			/* Normal way. Update attribute and exit. */
1093			goto ok;
1094		}
1095		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1096		if (!ni->attr_list.size) {
1097			err = ni_create_attr_list(ni);
1098			if (err)
1099				goto undo1;
1100			/* Layout of records is changed. */
1101			le_b = NULL;
1102			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1103					      0, NULL, &mi_b);
1104			if (!attr_b) {
1105				err = -ENOENT;
1106				goto out;
1107			}
1108
1109			attr = attr_b;
1110			le = le_b;
1111			mi = mi_b;
1112			goto repack;
1113		}
1114	}
1115
1116	/*
1117	 * The code below may require additional cluster (to extend attribute list)
1118	 * and / or one MFT record
1119	 * It is too complex to undo operations if -ENOSPC occurs deep inside
1120	 * in 'ni_insert_nonresident'.
1121	 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1122	 */
1123	if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1124		/* Undo step 1. */
1125		err = -ENOSPC;
1126		goto undo1;
1127	}
1128
1129	step = 2;
1130	svcn = evcn1;
1131
1132	/* Estimate next attribute. */
1133	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1134
1135	if (!attr) {
1136		/* Insert new attribute segment. */
1137		goto ins_ext;
1138	}
1139
1140	/* Try to update existed attribute segment. */
1141	alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1142	evcn = le64_to_cpu(attr->nres.evcn);
1143
1144	if (end < next_svcn)
1145		end = next_svcn;
1146	while (end > evcn) {
1147		/* Remove segment [svcn : evcn). */
1148		mi_remove_attr(NULL, mi, attr);
1149
1150		if (!al_remove_le(ni, le)) {
1151			err = -EINVAL;
1152			goto out;
1153		}
1154
1155		if (evcn + 1 >= alloc) {
1156			/* Last attribute segment. */
1157			evcn1 = evcn + 1;
1158			goto ins_ext;
1159		}
1160
1161		if (ni_load_mi(ni, le, &mi)) {
1162			attr = NULL;
1163			goto out;
1164		}
1165
1166		attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1167		if (!attr) {
1168			err = -EINVAL;
1169			goto out;
1170		}
1171		svcn = le64_to_cpu(attr->nres.svcn);
1172		evcn = le64_to_cpu(attr->nres.evcn);
1173	}
1174
1175	if (end < svcn)
1176		end = svcn;
1177
1178	err = attr_load_runs(attr, ni, run, &end);
1179	if (err)
1180		goto out;
1181
1182	evcn1 = evcn + 1;
1183	attr->nres.svcn = cpu_to_le64(next_svcn);
1184	err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1185	if (err)
1186		goto out;
1187
1188	le->vcn = cpu_to_le64(next_svcn);
1189	ni->attr_list.dirty = true;
1190	mi->dirty = true;
1191	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1192
1193ins_ext:
1194	if (evcn1 > next_svcn) {
1195		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1196					    next_svcn, evcn1 - next_svcn,
1197					    attr_b->flags, &attr, &mi, NULL);
1198		if (err)
1199			goto out;
1200	}
1201ok:
1202	run_truncate_around(run, vcn);
1203out:
1204	if (err && step > 1) {
1205		/* Too complex to restore. */
1206		_ntfs_bad_inode(&ni->vfs_inode);
1207	}
1208	up_write(&ni->file.run_lock);
1209	ni_unlock(ni);
1210
1211	return err;
1212
1213undo1:
1214	/* Undo step1. */
1215	attr_b->nres.total_size = cpu_to_le64(total_size0);
1216	inode_set_bytes(&ni->vfs_inode, total_size0);
1217
1218	if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1219	    !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1220	    mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1221		_ntfs_bad_inode(&ni->vfs_inode);
1222	}
1223	goto out;
1224}
1225
1226int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1227{
1228	u64 vbo;
1229	struct ATTRIB *attr;
1230	u32 data_size;
1231
1232	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1233	if (!attr)
1234		return -EINVAL;
1235
1236	if (attr->non_res)
1237		return E_NTFS_NONRESIDENT;
1238
1239	vbo = page->index << PAGE_SHIFT;
1240	data_size = le32_to_cpu(attr->res.data_size);
1241	if (vbo < data_size) {
1242		const char *data = resident_data(attr);
1243		char *kaddr = kmap_atomic(page);
1244		u32 use = data_size - vbo;
1245
1246		if (use > PAGE_SIZE)
1247			use = PAGE_SIZE;
1248
1249		memcpy(kaddr, data + vbo, use);
1250		memset(kaddr + use, 0, PAGE_SIZE - use);
1251		kunmap_atomic(kaddr);
1252		flush_dcache_page(page);
1253		SetPageUptodate(page);
1254	} else if (!PageUptodate(page)) {
1255		zero_user_segment(page, 0, PAGE_SIZE);
1256		SetPageUptodate(page);
1257	}
1258
1259	return 0;
1260}
1261
1262int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1263{
1264	u64 vbo;
1265	struct mft_inode *mi;
1266	struct ATTRIB *attr;
1267	u32 data_size;
1268
1269	attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1270	if (!attr)
1271		return -EINVAL;
1272
1273	if (attr->non_res) {
1274		/* Return special error code to check this case. */
1275		return E_NTFS_NONRESIDENT;
1276	}
1277
1278	vbo = page->index << PAGE_SHIFT;
1279	data_size = le32_to_cpu(attr->res.data_size);
1280	if (vbo < data_size) {
1281		char *data = resident_data(attr);
1282		char *kaddr = kmap_atomic(page);
1283		u32 use = data_size - vbo;
1284
1285		if (use > PAGE_SIZE)
1286			use = PAGE_SIZE;
1287		memcpy(data + vbo, kaddr, use);
1288		kunmap_atomic(kaddr);
1289		mi->dirty = true;
1290	}
1291	ni->i_valid = data_size;
1292
1293	return 0;
1294}
1295
1296/*
1297 * attr_load_runs_vcn - Load runs with VCN.
1298 */
1299int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1300		       const __le16 *name, u8 name_len, struct runs_tree *run,
1301		       CLST vcn)
1302{
1303	struct ATTRIB *attr;
1304	int err;
1305	CLST svcn, evcn;
1306	u16 ro;
1307
1308	if (!ni) {
1309		/* Is record corrupted? */
1310		return -ENOENT;
1311	}
1312
1313	attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1314	if (!attr) {
1315		/* Is record corrupted? */
1316		return -ENOENT;
1317	}
1318
1319	svcn = le64_to_cpu(attr->nres.svcn);
1320	evcn = le64_to_cpu(attr->nres.evcn);
1321
1322	if (evcn < vcn || vcn < svcn) {
1323		/* Is record corrupted? */
1324		return -EINVAL;
1325	}
1326
1327	ro = le16_to_cpu(attr->nres.run_off);
1328
1329	if (ro > le32_to_cpu(attr->size))
1330		return -EINVAL;
1331
1332	err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1333			    Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1334	if (err < 0)
1335		return err;
1336	return 0;
1337}
1338
1339/*
1340 * attr_load_runs_range - Load runs for given range [from to).
1341 */
1342int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1343			 const __le16 *name, u8 name_len, struct runs_tree *run,
1344			 u64 from, u64 to)
1345{
1346	struct ntfs_sb_info *sbi = ni->mi.sbi;
1347	u8 cluster_bits = sbi->cluster_bits;
1348	CLST vcn;
1349	CLST vcn_last = (to - 1) >> cluster_bits;
1350	CLST lcn, clen;
1351	int err;
1352
1353	for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1354		if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1355			err = attr_load_runs_vcn(ni, type, name, name_len, run,
1356						 vcn);
1357			if (err)
1358				return err;
1359			clen = 0; /* Next run_lookup_entry(vcn) must be success. */
1360		}
1361	}
1362
1363	return 0;
1364}
1365
1366#ifdef CONFIG_NTFS3_LZX_XPRESS
1367/*
1368 * attr_wof_frame_info
1369 *
1370 * Read header of Xpress/LZX file to get info about frame.
1371 */
1372int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1373			struct runs_tree *run, u64 frame, u64 frames,
1374			u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1375{
1376	struct ntfs_sb_info *sbi = ni->mi.sbi;
1377	u64 vbo[2], off[2], wof_size;
1378	u32 voff;
1379	u8 bytes_per_off;
1380	char *addr;
1381	struct page *page;
1382	int i, err;
1383	__le32 *off32;
1384	__le64 *off64;
1385
1386	if (ni->vfs_inode.i_size < 0x100000000ull) {
1387		/* File starts with array of 32 bit offsets. */
1388		bytes_per_off = sizeof(__le32);
1389		vbo[1] = frame << 2;
1390		*vbo_data = frames << 2;
1391	} else {
1392		/* File starts with array of 64 bit offsets. */
1393		bytes_per_off = sizeof(__le64);
1394		vbo[1] = frame << 3;
1395		*vbo_data = frames << 3;
1396	}
1397
1398	/*
1399	 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1400	 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
1401	 */
1402	if (!attr->non_res) {
1403		if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1404			ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1405			return -EINVAL;
1406		}
1407		addr = resident_data(attr);
1408
1409		if (bytes_per_off == sizeof(__le32)) {
1410			off32 = Add2Ptr(addr, vbo[1]);
1411			off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1412			off[1] = le32_to_cpu(off32[0]);
1413		} else {
1414			off64 = Add2Ptr(addr, vbo[1]);
1415			off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1416			off[1] = le64_to_cpu(off64[0]);
1417		}
1418
1419		*vbo_data += off[0];
1420		*ondisk_size = off[1] - off[0];
1421		return 0;
1422	}
1423
1424	wof_size = le64_to_cpu(attr->nres.data_size);
1425	down_write(&ni->file.run_lock);
1426	page = ni->file.offs_page;
1427	if (!page) {
1428		page = alloc_page(GFP_KERNEL);
1429		if (!page) {
1430			err = -ENOMEM;
1431			goto out;
1432		}
1433		page->index = -1;
1434		ni->file.offs_page = page;
1435	}
1436	lock_page(page);
1437	addr = page_address(page);
1438
1439	if (vbo[1]) {
1440		voff = vbo[1] & (PAGE_SIZE - 1);
1441		vbo[0] = vbo[1] - bytes_per_off;
1442		i = 0;
1443	} else {
1444		voff = 0;
1445		vbo[0] = 0;
1446		off[0] = 0;
1447		i = 1;
1448	}
1449
1450	do {
1451		pgoff_t index = vbo[i] >> PAGE_SHIFT;
1452
1453		if (index != page->index) {
1454			u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1455			u64 to = min(from + PAGE_SIZE, wof_size);
1456
1457			err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1458						   ARRAY_SIZE(WOF_NAME), run,
1459						   from, to);
1460			if (err)
1461				goto out1;
1462
1463			err = ntfs_bio_pages(sbi, run, &page, 1, from,
1464					     to - from, REQ_OP_READ);
1465			if (err) {
1466				page->index = -1;
1467				goto out1;
1468			}
1469			page->index = index;
1470		}
1471
1472		if (i) {
1473			if (bytes_per_off == sizeof(__le32)) {
1474				off32 = Add2Ptr(addr, voff);
1475				off[1] = le32_to_cpu(*off32);
1476			} else {
1477				off64 = Add2Ptr(addr, voff);
1478				off[1] = le64_to_cpu(*off64);
1479			}
1480		} else if (!voff) {
1481			if (bytes_per_off == sizeof(__le32)) {
1482				off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1483				off[0] = le32_to_cpu(*off32);
1484			} else {
1485				off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1486				off[0] = le64_to_cpu(*off64);
1487			}
1488		} else {
1489			/* Two values in one page. */
1490			if (bytes_per_off == sizeof(__le32)) {
1491				off32 = Add2Ptr(addr, voff);
1492				off[0] = le32_to_cpu(off32[-1]);
1493				off[1] = le32_to_cpu(off32[0]);
1494			} else {
1495				off64 = Add2Ptr(addr, voff);
1496				off[0] = le64_to_cpu(off64[-1]);
1497				off[1] = le64_to_cpu(off64[0]);
1498			}
1499			break;
1500		}
1501	} while (++i < 2);
1502
1503	*vbo_data += off[0];
1504	*ondisk_size = off[1] - off[0];
1505
1506out1:
1507	unlock_page(page);
1508out:
1509	up_write(&ni->file.run_lock);
1510	return err;
1511}
1512#endif
1513
1514/*
1515 * attr_is_frame_compressed - Used to detect compressed frame.
1516 */
1517int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1518			     CLST frame, CLST *clst_data)
1519{
1520	int err;
1521	u32 clst_frame;
1522	CLST clen, lcn, vcn, alen, slen, vcn_next;
1523	size_t idx;
1524	struct runs_tree *run;
1525
1526	*clst_data = 0;
1527
1528	if (!is_attr_compressed(attr))
1529		return 0;
1530
1531	if (!attr->non_res)
1532		return 0;
1533
1534	clst_frame = 1u << attr->nres.c_unit;
1535	vcn = frame * clst_frame;
1536	run = &ni->file.run;
1537
1538	if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1539		err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1540					 attr->name_len, run, vcn);
1541		if (err)
1542			return err;
1543
1544		if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1545			return -EINVAL;
1546	}
1547
1548	if (lcn == SPARSE_LCN) {
1549		/* Sparsed frame. */
1550		return 0;
1551	}
1552
1553	if (clen >= clst_frame) {
1554		/*
1555		 * The frame is not compressed 'cause
1556		 * it does not contain any sparse clusters.
1557		 */
1558		*clst_data = clst_frame;
1559		return 0;
1560	}
1561
1562	alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1563	slen = 0;
1564	*clst_data = clen;
1565
1566	/*
1567	 * The frame is compressed if *clst_data + slen >= clst_frame.
1568	 * Check next fragments.
1569	 */
1570	while ((vcn += clen) < alen) {
1571		vcn_next = vcn;
1572
1573		if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1574		    vcn_next != vcn) {
1575			err = attr_load_runs_vcn(ni, attr->type,
1576						 attr_name(attr),
1577						 attr->name_len, run, vcn_next);
1578			if (err)
1579				return err;
1580			vcn = vcn_next;
1581
1582			if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1583				return -EINVAL;
1584		}
1585
1586		if (lcn == SPARSE_LCN) {
1587			slen += clen;
1588		} else {
1589			if (slen) {
1590				/*
1591				 * Data_clusters + sparse_clusters =
1592				 * not enough for frame.
1593				 */
1594				return -EINVAL;
1595			}
1596			*clst_data += clen;
1597		}
1598
1599		if (*clst_data + slen >= clst_frame) {
1600			if (!slen) {
1601				/*
1602				 * There is no sparsed clusters in this frame
1603				 * so it is not compressed.
1604				 */
1605				*clst_data = clst_frame;
1606			} else {
1607				/* Frame is compressed. */
1608			}
1609			break;
1610		}
1611	}
1612
1613	return 0;
1614}
1615
1616/*
1617 * attr_allocate_frame - Allocate/free clusters for @frame.
1618 *
1619 * Assumed: down_write(&ni->file.run_lock);
1620 */
1621int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1622			u64 new_valid)
1623{
1624	int err = 0;
1625	struct runs_tree *run = &ni->file.run;
1626	struct ntfs_sb_info *sbi = ni->mi.sbi;
1627	struct ATTRIB *attr = NULL, *attr_b;
1628	struct ATTR_LIST_ENTRY *le, *le_b;
1629	struct mft_inode *mi, *mi_b;
1630	CLST svcn, evcn1, next_svcn, len;
1631	CLST vcn, end, clst_data;
1632	u64 total_size, valid_size, data_size;
1633
1634	le_b = NULL;
1635	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1636	if (!attr_b)
1637		return -ENOENT;
1638
1639	if (!is_attr_ext(attr_b))
1640		return -EINVAL;
1641
1642	vcn = frame << NTFS_LZNT_CUNIT;
1643	total_size = le64_to_cpu(attr_b->nres.total_size);
1644
1645	svcn = le64_to_cpu(attr_b->nres.svcn);
1646	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1647	data_size = le64_to_cpu(attr_b->nres.data_size);
1648
1649	if (svcn <= vcn && vcn < evcn1) {
1650		attr = attr_b;
1651		le = le_b;
1652		mi = mi_b;
1653	} else if (!le_b) {
1654		err = -EINVAL;
1655		goto out;
1656	} else {
1657		le = le_b;
1658		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1659				    &mi);
1660		if (!attr) {
1661			err = -EINVAL;
1662			goto out;
1663		}
1664		svcn = le64_to_cpu(attr->nres.svcn);
1665		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1666	}
1667
1668	err = attr_load_runs(attr, ni, run, NULL);
1669	if (err)
1670		goto out;
1671
1672	err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1673	if (err)
1674		goto out;
1675
1676	total_size -= (u64)clst_data << sbi->cluster_bits;
1677
1678	len = bytes_to_cluster(sbi, compr_size);
1679
1680	if (len == clst_data)
1681		goto out;
1682
1683	if (len < clst_data) {
1684		err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1685					NULL, true);
1686		if (err)
1687			goto out;
1688
1689		if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1690				   false)) {
1691			err = -ENOMEM;
1692			goto out;
1693		}
1694		end = vcn + clst_data;
1695		/* Run contains updated range [vcn + len : end). */
1696	} else {
1697		CLST alen, hint = 0;
1698		/* Get the last LCN to allocate from. */
1699		if (vcn + clst_data &&
1700		    !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1701				      NULL)) {
1702			hint = -1;
1703		}
1704
1705		err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1706					     hint + 1, len - clst_data, NULL,
1707					     ALLOCATE_DEF, &alen, 0, NULL,
1708					     NULL);
1709		if (err)
1710			goto out;
1711
1712		end = vcn + len;
1713		/* Run contains updated range [vcn + clst_data : end). */
1714	}
1715
1716	total_size += (u64)len << sbi->cluster_bits;
1717
1718repack:
1719	err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1720	if (err)
1721		goto out;
1722
1723	attr_b->nres.total_size = cpu_to_le64(total_size);
1724	inode_set_bytes(&ni->vfs_inode, total_size);
1725
1726	mi_b->dirty = true;
1727	mark_inode_dirty(&ni->vfs_inode);
1728
1729	/* Stored [vcn : next_svcn) from [vcn : end). */
1730	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1731
1732	if (end <= evcn1) {
1733		if (next_svcn == evcn1) {
1734			/* Normal way. Update attribute and exit. */
1735			goto ok;
1736		}
1737		/* Add new segment [next_svcn : evcn1 - next_svcn). */
1738		if (!ni->attr_list.size) {
1739			err = ni_create_attr_list(ni);
1740			if (err)
1741				goto out;
1742			/* Layout of records is changed. */
1743			le_b = NULL;
1744			attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1745					      0, NULL, &mi_b);
1746			if (!attr_b) {
1747				err = -ENOENT;
1748				goto out;
1749			}
1750
1751			attr = attr_b;
1752			le = le_b;
1753			mi = mi_b;
1754			goto repack;
1755		}
1756	}
1757
1758	svcn = evcn1;
1759
1760	/* Estimate next attribute. */
1761	attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1762
1763	if (attr) {
1764		CLST alloc = bytes_to_cluster(
1765			sbi, le64_to_cpu(attr_b->nres.alloc_size));
1766		CLST evcn = le64_to_cpu(attr->nres.evcn);
1767
1768		if (end < next_svcn)
1769			end = next_svcn;
1770		while (end > evcn) {
1771			/* Remove segment [svcn : evcn). */
1772			mi_remove_attr(NULL, mi, attr);
1773
1774			if (!al_remove_le(ni, le)) {
1775				err = -EINVAL;
1776				goto out;
1777			}
1778
1779			if (evcn + 1 >= alloc) {
1780				/* Last attribute segment. */
1781				evcn1 = evcn + 1;
1782				goto ins_ext;
1783			}
1784
1785			if (ni_load_mi(ni, le, &mi)) {
1786				attr = NULL;
1787				goto out;
1788			}
1789
1790			attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1791					    &le->id);
1792			if (!attr) {
1793				err = -EINVAL;
1794				goto out;
1795			}
1796			svcn = le64_to_cpu(attr->nres.svcn);
1797			evcn = le64_to_cpu(attr->nres.evcn);
1798		}
1799
1800		if (end < svcn)
1801			end = svcn;
1802
1803		err = attr_load_runs(attr, ni, run, &end);
1804		if (err)
1805			goto out;
1806
1807		evcn1 = evcn + 1;
1808		attr->nres.svcn = cpu_to_le64(next_svcn);
1809		err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1810		if (err)
1811			goto out;
1812
1813		le->vcn = cpu_to_le64(next_svcn);
1814		ni->attr_list.dirty = true;
1815		mi->dirty = true;
1816
1817		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1818	}
1819ins_ext:
1820	if (evcn1 > next_svcn) {
1821		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1822					    next_svcn, evcn1 - next_svcn,
1823					    attr_b->flags, &attr, &mi, NULL);
1824		if (err)
1825			goto out;
1826	}
1827ok:
1828	run_truncate_around(run, vcn);
1829out:
1830	if (attr_b) {
1831		if (new_valid > data_size)
1832			new_valid = data_size;
1833
1834		valid_size = le64_to_cpu(attr_b->nres.valid_size);
1835		if (new_valid != valid_size) {
1836			attr_b->nres.valid_size = cpu_to_le64(valid_size);
1837			mi_b->dirty = true;
1838		}
1839	}
1840
1841	return err;
1842}
1843
1844/*
1845 * attr_collapse_range - Collapse range in file.
1846 */
1847int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1848{
1849	int err = 0;
1850	struct runs_tree *run = &ni->file.run;
1851	struct ntfs_sb_info *sbi = ni->mi.sbi;
1852	struct ATTRIB *attr = NULL, *attr_b;
1853	struct ATTR_LIST_ENTRY *le, *le_b;
1854	struct mft_inode *mi, *mi_b;
1855	CLST svcn, evcn1, len, dealloc, alen;
1856	CLST vcn, end;
1857	u64 valid_size, data_size, alloc_size, total_size;
1858	u32 mask;
1859	__le16 a_flags;
1860
1861	if (!bytes)
1862		return 0;
1863
1864	le_b = NULL;
1865	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1866	if (!attr_b)
1867		return -ENOENT;
1868
1869	if (!attr_b->non_res) {
1870		/* Attribute is resident. Nothing to do? */
1871		return 0;
1872	}
1873
1874	data_size = le64_to_cpu(attr_b->nres.data_size);
1875	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1876	a_flags = attr_b->flags;
1877
1878	if (is_attr_ext(attr_b)) {
1879		total_size = le64_to_cpu(attr_b->nres.total_size);
1880		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1881	} else {
1882		total_size = alloc_size;
1883		mask = sbi->cluster_mask;
1884	}
1885
1886	if ((vbo & mask) || (bytes & mask)) {
1887		/* Allow to collapse only cluster aligned ranges. */
1888		return -EINVAL;
1889	}
1890
1891	if (vbo > data_size)
1892		return -EINVAL;
1893
1894	down_write(&ni->file.run_lock);
1895
1896	if (vbo + bytes >= data_size) {
1897		u64 new_valid = min(ni->i_valid, vbo);
1898
1899		/* Simple truncate file at 'vbo'. */
1900		truncate_setsize(&ni->vfs_inode, vbo);
1901		err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1902				    &new_valid, true, NULL);
1903
1904		if (!err && new_valid < ni->i_valid)
1905			ni->i_valid = new_valid;
1906
1907		goto out;
1908	}
1909
1910	/*
1911	 * Enumerate all attribute segments and collapse.
1912	 */
1913	alen = alloc_size >> sbi->cluster_bits;
1914	vcn = vbo >> sbi->cluster_bits;
1915	len = bytes >> sbi->cluster_bits;
1916	end = vcn + len;
1917	dealloc = 0;
1918
1919	svcn = le64_to_cpu(attr_b->nres.svcn);
1920	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1921
1922	if (svcn <= vcn && vcn < evcn1) {
1923		attr = attr_b;
1924		le = le_b;
1925		mi = mi_b;
1926	} else if (!le_b) {
1927		err = -EINVAL;
1928		goto out;
1929	} else {
1930		le = le_b;
1931		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1932				    &mi);
1933		if (!attr) {
1934			err = -EINVAL;
1935			goto out;
1936		}
1937
1938		svcn = le64_to_cpu(attr->nres.svcn);
1939		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1940	}
1941
1942	for (;;) {
1943		if (svcn >= end) {
1944			/* Shift VCN- */
1945			attr->nres.svcn = cpu_to_le64(svcn - len);
1946			attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1947			if (le) {
1948				le->vcn = attr->nres.svcn;
1949				ni->attr_list.dirty = true;
1950			}
1951			mi->dirty = true;
1952		} else if (svcn < vcn || end < evcn1) {
1953			CLST vcn1, eat, next_svcn;
1954
1955			/* Collapse a part of this attribute segment. */
1956			err = attr_load_runs(attr, ni, run, &svcn);
1957			if (err)
1958				goto out;
1959			vcn1 = max(vcn, svcn);
1960			eat = min(end, evcn1) - vcn1;
1961
1962			err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1963						true);
1964			if (err)
1965				goto out;
1966
1967			if (!run_collapse_range(run, vcn1, eat)) {
1968				err = -ENOMEM;
1969				goto out;
1970			}
1971
1972			if (svcn >= vcn) {
1973				/* Shift VCN */
1974				attr->nres.svcn = cpu_to_le64(vcn);
1975				if (le) {
1976					le->vcn = attr->nres.svcn;
1977					ni->attr_list.dirty = true;
1978				}
1979			}
1980
1981			err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1982			if (err)
1983				goto out;
1984
1985			next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1986			if (next_svcn + eat < evcn1) {
1987				err = ni_insert_nonresident(
1988					ni, ATTR_DATA, NULL, 0, run, next_svcn,
1989					evcn1 - eat - next_svcn, a_flags, &attr,
1990					&mi, &le);
1991				if (err)
1992					goto out;
1993
1994				/* Layout of records maybe changed. */
1995				attr_b = NULL;
1996			}
1997
1998			/* Free all allocated memory. */
1999			run_truncate(run, 0);
2000		} else {
2001			u16 le_sz;
2002			u16 roff = le16_to_cpu(attr->nres.run_off);
2003
2004			if (roff > le32_to_cpu(attr->size)) {
2005				err = -EINVAL;
2006				goto out;
2007			}
2008
2009			run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2010				      evcn1 - 1, svcn, Add2Ptr(attr, roff),
2011				      le32_to_cpu(attr->size) - roff);
2012
2013			/* Delete this attribute segment. */
2014			mi_remove_attr(NULL, mi, attr);
2015			if (!le)
2016				break;
2017
2018			le_sz = le16_to_cpu(le->size);
2019			if (!al_remove_le(ni, le)) {
2020				err = -EINVAL;
2021				goto out;
2022			}
2023
2024			if (evcn1 >= alen)
2025				break;
2026
2027			if (!svcn) {
2028				/* Load next record that contains this attribute. */
2029				if (ni_load_mi(ni, le, &mi)) {
2030					err = -EINVAL;
2031					goto out;
2032				}
2033
2034				/* Look for required attribute. */
2035				attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2036						    0, &le->id);
2037				if (!attr) {
2038					err = -EINVAL;
2039					goto out;
2040				}
2041				goto next_attr;
2042			}
2043			le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2044		}
2045
2046		if (evcn1 >= alen)
2047			break;
2048
2049		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2050		if (!attr) {
2051			err = -EINVAL;
2052			goto out;
2053		}
2054
2055next_attr:
2056		svcn = le64_to_cpu(attr->nres.svcn);
2057		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2058	}
2059
2060	if (!attr_b) {
2061		le_b = NULL;
2062		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2063				      &mi_b);
2064		if (!attr_b) {
2065			err = -ENOENT;
2066			goto out;
2067		}
2068	}
2069
2070	data_size -= bytes;
2071	valid_size = ni->i_valid;
2072	if (vbo + bytes <= valid_size)
2073		valid_size -= bytes;
2074	else if (vbo < valid_size)
2075		valid_size = vbo;
2076
2077	attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2078	attr_b->nres.data_size = cpu_to_le64(data_size);
2079	attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2080	total_size -= (u64)dealloc << sbi->cluster_bits;
2081	if (is_attr_ext(attr_b))
2082		attr_b->nres.total_size = cpu_to_le64(total_size);
2083	mi_b->dirty = true;
2084
2085	/* Update inode size. */
2086	ni->i_valid = valid_size;
2087	i_size_write(&ni->vfs_inode, data_size);
2088	inode_set_bytes(&ni->vfs_inode, total_size);
2089	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2090	mark_inode_dirty(&ni->vfs_inode);
2091
2092out:
2093	up_write(&ni->file.run_lock);
2094	if (err)
2095		_ntfs_bad_inode(&ni->vfs_inode);
2096
2097	return err;
2098}
2099
2100/*
2101 * attr_punch_hole
2102 *
2103 * Not for normal files.
2104 */
2105int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2106{
2107	int err = 0;
2108	struct runs_tree *run = &ni->file.run;
2109	struct ntfs_sb_info *sbi = ni->mi.sbi;
2110	struct ATTRIB *attr = NULL, *attr_b;
2111	struct ATTR_LIST_ENTRY *le, *le_b;
2112	struct mft_inode *mi, *mi_b;
2113	CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
2114	u64 total_size, alloc_size;
2115	u32 mask;
2116	__le16 a_flags;
2117	struct runs_tree run2;
2118
2119	if (!bytes)
2120		return 0;
2121
2122	le_b = NULL;
2123	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2124	if (!attr_b)
2125		return -ENOENT;
2126
2127	if (!attr_b->non_res) {
2128		u32 data_size = le32_to_cpu(attr_b->res.data_size);
2129		u32 from, to;
2130
2131		if (vbo > data_size)
2132			return 0;
2133
2134		from = vbo;
2135		to = min_t(u64, vbo + bytes, data_size);
2136		memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2137		return 0;
2138	}
2139
2140	if (!is_attr_ext(attr_b))
2141		return -EOPNOTSUPP;
2142
2143	alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2144	total_size = le64_to_cpu(attr_b->nres.total_size);
2145
2146	if (vbo >= alloc_size) {
2147		/* NOTE: It is allowed. */
2148		return 0;
2149	}
2150
2151	mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2152
2153	bytes += vbo;
2154	if (bytes > alloc_size)
2155		bytes = alloc_size;
2156	bytes -= vbo;
2157
2158	if ((vbo & mask) || (bytes & mask)) {
2159		/* We have to zero a range(s). */
2160		if (frame_size == NULL) {
2161			/* Caller insists range is aligned. */
2162			return -EINVAL;
2163		}
2164		*frame_size = mask + 1;
2165		return E_NTFS_NOTALIGNED;
2166	}
2167
2168	down_write(&ni->file.run_lock);
2169	run_init(&run2);
2170	run_truncate(run, 0);
2171
2172	/*
2173	 * Enumerate all attribute segments and punch hole where necessary.
2174	 */
2175	alen = alloc_size >> sbi->cluster_bits;
2176	vcn = vbo >> sbi->cluster_bits;
2177	len = bytes >> sbi->cluster_bits;
2178	end = vcn + len;
2179	hole = 0;
2180
2181	svcn = le64_to_cpu(attr_b->nres.svcn);
2182	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2183	a_flags = attr_b->flags;
2184
2185	if (svcn <= vcn && vcn < evcn1) {
2186		attr = attr_b;
2187		le = le_b;
2188		mi = mi_b;
2189	} else if (!le_b) {
2190		err = -EINVAL;
2191		goto bad_inode;
2192	} else {
2193		le = le_b;
2194		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2195				    &mi);
2196		if (!attr) {
2197			err = -EINVAL;
2198			goto bad_inode;
2199		}
2200
2201		svcn = le64_to_cpu(attr->nres.svcn);
2202		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2203	}
2204
2205	while (svcn < end) {
2206		CLST vcn1, zero, hole2 = hole;
2207
2208		err = attr_load_runs(attr, ni, run, &svcn);
2209		if (err)
2210			goto done;
2211		vcn1 = max(vcn, svcn);
2212		zero = min(end, evcn1) - vcn1;
2213
2214		/*
2215		 * Check range [vcn1 + zero).
2216		 * Calculate how many clusters there are.
2217		 * Don't do any destructive actions.
2218		 */
2219		err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
2220		if (err)
2221			goto done;
2222
2223		/* Check if required range is already hole. */
2224		if (hole2 == hole)
2225			goto next_attr;
2226
2227		/* Make a clone of run to undo. */
2228		err = run_clone(run, &run2);
2229		if (err)
2230			goto done;
2231
2232		/* Make a hole range (sparse) [vcn1 + zero). */
2233		if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2234			err = -ENOMEM;
2235			goto done;
2236		}
2237
2238		/* Update run in attribute segment. */
2239		err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2240		if (err)
2241			goto done;
2242		next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2243		if (next_svcn < evcn1) {
2244			/* Insert new attribute segment. */
2245			err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2246						    next_svcn,
2247						    evcn1 - next_svcn, a_flags,
2248						    &attr, &mi, &le);
2249			if (err)
2250				goto undo_punch;
2251
2252			/* Layout of records maybe changed. */
2253			attr_b = NULL;
2254		}
2255
2256		/* Real deallocate. Should not fail. */
2257		run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2258
2259next_attr:
2260		/* Free all allocated memory. */
2261		run_truncate(run, 0);
2262
2263		if (evcn1 >= alen)
2264			break;
2265
2266		/* Get next attribute segment. */
2267		attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2268		if (!attr) {
2269			err = -EINVAL;
2270			goto bad_inode;
2271		}
2272
2273		svcn = le64_to_cpu(attr->nres.svcn);
2274		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2275	}
2276
2277done:
2278	if (!hole)
2279		goto out;
2280
2281	if (!attr_b) {
2282		attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2283				      &mi_b);
2284		if (!attr_b) {
2285			err = -EINVAL;
2286			goto bad_inode;
2287		}
2288	}
2289
2290	total_size -= (u64)hole << sbi->cluster_bits;
2291	attr_b->nres.total_size = cpu_to_le64(total_size);
2292	mi_b->dirty = true;
2293
2294	/* Update inode size. */
2295	inode_set_bytes(&ni->vfs_inode, total_size);
2296	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2297	mark_inode_dirty(&ni->vfs_inode);
2298
2299out:
2300	run_close(&run2);
2301	up_write(&ni->file.run_lock);
2302	return err;
2303
2304bad_inode:
2305	_ntfs_bad_inode(&ni->vfs_inode);
2306	goto out;
2307
2308undo_punch:
2309	/*
2310	 * Restore packed runs.
2311	 * 'mi_pack_runs' should not fail, cause we restore original.
2312	 */
2313	if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2314		goto bad_inode;
2315
2316	goto done;
2317}
2318
2319/*
2320 * attr_insert_range - Insert range (hole) in file.
2321 * Not for normal files.
2322 */
2323int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2324{
2325	int err = 0;
2326	struct runs_tree *run = &ni->file.run;
2327	struct ntfs_sb_info *sbi = ni->mi.sbi;
2328	struct ATTRIB *attr = NULL, *attr_b;
2329	struct ATTR_LIST_ENTRY *le, *le_b;
2330	struct mft_inode *mi, *mi_b;
2331	CLST vcn, svcn, evcn1, len, next_svcn;
2332	u64 data_size, alloc_size;
2333	u32 mask;
2334	__le16 a_flags;
2335
2336	if (!bytes)
2337		return 0;
2338
2339	le_b = NULL;
2340	attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2341	if (!attr_b)
2342		return -ENOENT;
2343
2344	if (!is_attr_ext(attr_b)) {
2345		/* It was checked above. See fallocate. */
2346		return -EOPNOTSUPP;
2347	}
2348
2349	if (!attr_b->non_res) {
2350		data_size = le32_to_cpu(attr_b->res.data_size);
2351		alloc_size = data_size;
2352		mask = sbi->cluster_mask; /* cluster_size - 1 */
2353	} else {
2354		data_size = le64_to_cpu(attr_b->nres.data_size);
2355		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2356		mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2357	}
2358
2359	if (vbo > data_size) {
2360		/* Insert range after the file size is not allowed. */
2361		return -EINVAL;
2362	}
2363
2364	if ((vbo & mask) || (bytes & mask)) {
2365		/* Allow to insert only frame aligned ranges. */
2366		return -EINVAL;
2367	}
2368
2369	/*
2370	 * valid_size <= data_size <= alloc_size
2371	 * Check alloc_size for maximum possible.
2372	 */
2373	if (bytes > sbi->maxbytes_sparse - alloc_size)
2374		return -EFBIG;
2375
2376	vcn = vbo >> sbi->cluster_bits;
2377	len = bytes >> sbi->cluster_bits;
2378
2379	down_write(&ni->file.run_lock);
2380
2381	if (!attr_b->non_res) {
2382		err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
2383				    data_size + bytes, NULL, false, NULL);
2384
2385		le_b = NULL;
2386		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2387				      &mi_b);
2388		if (!attr_b) {
2389			err = -EINVAL;
2390			goto bad_inode;
2391		}
2392
2393		if (err)
2394			goto out;
2395
2396		if (!attr_b->non_res) {
2397			/* Still resident. */
2398			char *data = Add2Ptr(attr_b,
2399					     le16_to_cpu(attr_b->res.data_off));
2400
2401			memmove(data + bytes, data, bytes);
2402			memset(data, 0, bytes);
2403			goto done;
2404		}
2405
2406		/* Resident files becomes nonresident. */
2407		data_size = le64_to_cpu(attr_b->nres.data_size);
2408		alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2409	}
2410
2411	/*
2412	 * Enumerate all attribute segments and shift start vcn.
2413	 */
2414	a_flags = attr_b->flags;
2415	svcn = le64_to_cpu(attr_b->nres.svcn);
2416	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2417
2418	if (svcn <= vcn && vcn < evcn1) {
2419		attr = attr_b;
2420		le = le_b;
2421		mi = mi_b;
2422	} else if (!le_b) {
2423		err = -EINVAL;
2424		goto bad_inode;
2425	} else {
2426		le = le_b;
2427		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2428				    &mi);
2429		if (!attr) {
2430			err = -EINVAL;
2431			goto bad_inode;
2432		}
2433
2434		svcn = le64_to_cpu(attr->nres.svcn);
2435		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2436	}
2437
2438	run_truncate(run, 0); /* clear cached values. */
2439	err = attr_load_runs(attr, ni, run, NULL);
2440	if (err)
2441		goto out;
2442
2443	if (!run_insert_range(run, vcn, len)) {
2444		err = -ENOMEM;
2445		goto out;
2446	}
2447
2448	/* Try to pack in current record as much as possible. */
2449	err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2450	if (err)
2451		goto out;
2452
2453	next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2454
2455	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2456	       attr->type == ATTR_DATA && !attr->name_len) {
2457		le64_add_cpu(&attr->nres.svcn, len);
2458		le64_add_cpu(&attr->nres.evcn, len);
2459		if (le) {
2460			le->vcn = attr->nres.svcn;
2461			ni->attr_list.dirty = true;
2462		}
2463		mi->dirty = true;
2464	}
2465
2466	if (next_svcn < evcn1 + len) {
2467		err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2468					    next_svcn, evcn1 + len - next_svcn,
2469					    a_flags, NULL, NULL, NULL);
2470
2471		le_b = NULL;
2472		attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2473				      &mi_b);
2474		if (!attr_b) {
2475			err = -EINVAL;
2476			goto bad_inode;
2477		}
2478
2479		if (err) {
2480			/* ni_insert_nonresident failed. Try to undo. */
2481			goto undo_insert_range;
2482		}
2483	}
2484
2485	/*
2486	 * Update primary attribute segment.
2487	 */
2488	if (vbo <= ni->i_valid)
2489		ni->i_valid += bytes;
2490
2491	attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2492	attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
2493
2494	/* ni->valid may be not equal valid_size (temporary). */
2495	if (ni->i_valid > data_size + bytes)
2496		attr_b->nres.valid_size = attr_b->nres.data_size;
2497	else
2498		attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2499	mi_b->dirty = true;
2500
2501done:
2502	i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes);
2503	ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2504	mark_inode_dirty(&ni->vfs_inode);
2505
2506out:
2507	run_truncate(run, 0); /* clear cached values. */
2508
2509	up_write(&ni->file.run_lock);
2510
2511	return err;
2512
2513bad_inode:
2514	_ntfs_bad_inode(&ni->vfs_inode);
2515	goto out;
2516
2517undo_insert_range:
2518	svcn = le64_to_cpu(attr_b->nres.svcn);
2519	evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2520
2521	if (svcn <= vcn && vcn < evcn1) {
2522		attr = attr_b;
2523		le = le_b;
2524		mi = mi_b;
2525	} else if (!le_b) {
2526		goto bad_inode;
2527	} else {
2528		le = le_b;
2529		attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2530				    &mi);
2531		if (!attr) {
2532			goto bad_inode;
2533		}
2534
2535		svcn = le64_to_cpu(attr->nres.svcn);
2536		evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2537	}
2538
2539	if (attr_load_runs(attr, ni, run, NULL))
2540		goto bad_inode;
2541
2542	if (!run_collapse_range(run, vcn, len))
2543		goto bad_inode;
2544
2545	if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2546		goto bad_inode;
2547
2548	while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2549	       attr->type == ATTR_DATA && !attr->name_len) {
2550		le64_sub_cpu(&attr->nres.svcn, len);
2551		le64_sub_cpu(&attr->nres.evcn, len);
2552		if (le) {
2553			le->vcn = attr->nres.svcn;
2554			ni->attr_list.dirty = true;
2555		}
2556		mi->dirty = true;
2557	}
2558
2559	goto out;
2560}
2561