1/**
2 * attrib.c - NTFS attribute operations.  Part of the Linux-NTFS project.
3 *
4 * Copyright (c) 2001-2006 Anton Altaparmakov
5 * Copyright (c) 2002 Richard Russon
6 *
7 * This program/include file is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as published
9 * by the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program/include file is distributed in the hope that it will be
13 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program (in the main directory of the Linux-NTFS
19 * distribution in the file COPYING); if not, write to the Free Software
20 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 */
22
23#include <linux/buffer_head.h>
24#include <linux/sched.h>
25#include <linux/swap.h>
26#include <linux/writeback.h>
27
28#include "attrib.h"
29#include "debug.h"
30#include "layout.h"
31#include "lcnalloc.h"
32#include "malloc.h"
33#include "mft.h"
34#include "ntfs.h"
35#include "types.h"
36
37/**
38 * ntfs_map_runlist_nolock - map (a part of) a runlist of an ntfs inode
39 * @ni:		ntfs inode for which to map (part of) a runlist
40 * @vcn:	map runlist part containing this vcn
41 * @ctx:	active attribute search context if present or NULL if not
42 *
43 * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
44 *
45 * If @ctx is specified, it is an active search context of @ni and its base mft
46 * record.  This is needed when ntfs_map_runlist_nolock() encounters unmapped
47 * runlist fragments and allows their mapping.  If you do not have the mft
48 * record mapped, you can specify @ctx as NULL and ntfs_map_runlist_nolock()
49 * will perform the necessary mapping and unmapping.
50 *
51 * Note, ntfs_map_runlist_nolock() saves the state of @ctx on entry and
52 * restores it before returning.  Thus, @ctx will be left pointing to the same
53 * attribute on return as on entry.  However, the actual pointers in @ctx may
54 * point to different memory locations on return, so you must remember to reset
55 * any cached pointers from the @ctx, i.e. after the call to
56 * ntfs_map_runlist_nolock(), you will probably want to do:
57 *	m = ctx->mrec;
58 *	a = ctx->attr;
59 * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
60 * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
61 *
62 * Return 0 on success and -errno on error.  There is one special error code
63 * which is not an error as such.  This is -ENOENT.  It means that @vcn is out
64 * of bounds of the runlist.
65 *
66 * Note the runlist can be NULL after this function returns if @vcn is zero and
67 * the attribute has zero allocated size, i.e. there simply is no runlist.
68 *
69 * WARNING: If @ctx is supplied, regardless of whether success or failure is
70 *	    returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
71 *	    is no longer valid, i.e. you need to either call
72 *	    ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
73 *	    In that case PTR_ERR(@ctx->mrec) will give you the error code for
74 *	    why the mapping of the old inode failed.
75 *
76 * Locking: - The runlist described by @ni must be locked for writing on entry
77 *	      and is locked on return.  Note the runlist will be modified.
78 *	    - If @ctx is NULL, the base mft record of @ni must not be mapped on
79 *	      entry and it will be left unmapped on return.
80 *	    - If @ctx is not NULL, the base mft record must be mapped on entry
81 *	      and it will be left mapped on return.
82 */
83int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx)
84{
85	VCN end_vcn;
86	unsigned long flags;
87	ntfs_inode *base_ni;
88	MFT_RECORD *m;
89	ATTR_RECORD *a;
90	runlist_element *rl;
91	struct page *put_this_page = NULL;
92	int err = 0;
93	bool ctx_is_temporary, ctx_needs_reset;
94	ntfs_attr_search_ctx old_ctx = { NULL, };
95
96	ntfs_debug("Mapping runlist part containing vcn 0x%llx.",
97			(unsigned long long)vcn);
98	if (!NInoAttr(ni))
99		base_ni = ni;
100	else
101		base_ni = ni->ext.base_ntfs_ino;
102	if (!ctx) {
103		ctx_is_temporary = ctx_needs_reset = true;
104		m = map_mft_record(base_ni);
105		if (IS_ERR(m))
106			return PTR_ERR(m);
107		ctx = ntfs_attr_get_search_ctx(base_ni, m);
108		if (unlikely(!ctx)) {
109			err = -ENOMEM;
110			goto err_out;
111		}
112	} else {
113		VCN allocated_size_vcn;
114
115		BUG_ON(IS_ERR(ctx->mrec));
116		a = ctx->attr;
117		BUG_ON(!a->non_resident);
118		ctx_is_temporary = false;
119		end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
120		read_lock_irqsave(&ni->size_lock, flags);
121		allocated_size_vcn = ni->allocated_size >>
122				ni->vol->cluster_size_bits;
123		read_unlock_irqrestore(&ni->size_lock, flags);
124		if (!a->data.non_resident.lowest_vcn && end_vcn <= 0)
125			end_vcn = allocated_size_vcn - 1;
126		/*
127		 * If we already have the attribute extent containing @vcn in
128		 * @ctx, no need to look it up again.  We slightly cheat in
129		 * that if vcn exceeds the allocated size, we will refuse to
130		 * map the runlist below, so there is definitely no need to get
131		 * the right attribute extent.
132		 */
133		if (vcn >= allocated_size_vcn || (a->type == ni->type &&
134				a->name_length == ni->name_len &&
135				!memcmp((u8*)a + le16_to_cpu(a->name_offset),
136				ni->name, ni->name_len) &&
137				sle64_to_cpu(a->data.non_resident.lowest_vcn)
138				<= vcn && end_vcn >= vcn))
139			ctx_needs_reset = false;
140		else {
141			/* Save the old search context. */
142			old_ctx = *ctx;
143			/*
144			 * If the currently mapped (extent) inode is not the
145			 * base inode we will unmap it when we reinitialize the
146			 * search context which means we need to get a
147			 * reference to the page containing the mapped mft
148			 * record so we do not accidentally drop changes to the
149			 * mft record when it has not been marked dirty yet.
150			 */
151			if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino !=
152					old_ctx.base_ntfs_ino) {
153				put_this_page = old_ctx.ntfs_ino->page;
154				page_cache_get(put_this_page);
155			}
156			/*
157			 * Reinitialize the search context so we can lookup the
158			 * needed attribute extent.
159			 */
160			ntfs_attr_reinit_search_ctx(ctx);
161			ctx_needs_reset = true;
162		}
163	}
164	if (ctx_needs_reset) {
165		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
166				CASE_SENSITIVE, vcn, NULL, 0, ctx);
167		if (unlikely(err)) {
168			if (err == -ENOENT)
169				err = -EIO;
170			goto err_out;
171		}
172		BUG_ON(!ctx->attr->non_resident);
173	}
174	a = ctx->attr;
175	/*
176	 * Only decompress the mapping pairs if @vcn is inside it.  Otherwise
177	 * we get into problems when we try to map an out of bounds vcn because
178	 * we then try to map the already mapped runlist fragment and
179	 * ntfs_mapping_pairs_decompress() fails.
180	 */
181	end_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn) + 1;
182	if (!a->data.non_resident.lowest_vcn && end_vcn == 1)
183		end_vcn = sle64_to_cpu(a->data.non_resident.allocated_size) >>
184				ni->vol->cluster_size_bits;
185	if (unlikely(vcn >= end_vcn)) {
186		err = -ENOENT;
187		goto err_out;
188	}
189	rl = ntfs_mapping_pairs_decompress(ni->vol, a, ni->runlist.rl);
190	if (IS_ERR(rl))
191		err = PTR_ERR(rl);
192	else
193		ni->runlist.rl = rl;
194err_out:
195	if (ctx_is_temporary) {
196		if (likely(ctx))
197			ntfs_attr_put_search_ctx(ctx);
198		unmap_mft_record(base_ni);
199	} else if (ctx_needs_reset) {
200		/*
201		 * If there is no attribute list, restoring the search context
202		 * is acomplished simply by copying the saved context back over
203		 * the caller supplied context.  If there is an attribute list,
204		 * things are more complicated as we need to deal with mapping
205		 * of mft records and resulting potential changes in pointers.
206		 */
207		if (NInoAttrList(base_ni)) {
208			/*
209			 * If the currently mapped (extent) inode is not the
210			 * one we had before, we need to unmap it and map the
211			 * old one.
212			 */
213			if (ctx->ntfs_ino != old_ctx.ntfs_ino) {
214				/*
215				 * If the currently mapped inode is not the
216				 * base inode, unmap it.
217				 */
218				if (ctx->base_ntfs_ino && ctx->ntfs_ino !=
219						ctx->base_ntfs_ino) {
220					unmap_extent_mft_record(ctx->ntfs_ino);
221					ctx->mrec = ctx->base_mrec;
222					BUG_ON(!ctx->mrec);
223				}
224				/*
225				 * If the old mapped inode is not the base
226				 * inode, map it.
227				 */
228				if (old_ctx.base_ntfs_ino &&
229						old_ctx.ntfs_ino !=
230						old_ctx.base_ntfs_ino) {
231retry_map:
232					ctx->mrec = map_mft_record(
233							old_ctx.ntfs_ino);
234					/*
235					 * Something bad has happened.  If out
236					 * of memory retry till it succeeds.
237					 * Any other errors are fatal and we
238					 * return the error code in ctx->mrec.
239					 * Let the caller deal with it...  We
240					 * just need to fudge things so the
241					 * caller can reinit and/or put the
242					 * search context safely.
243					 */
244					if (IS_ERR(ctx->mrec)) {
245						if (PTR_ERR(ctx->mrec) ==
246								-ENOMEM) {
247							schedule();
248							goto retry_map;
249						} else
250							old_ctx.ntfs_ino =
251								old_ctx.
252								base_ntfs_ino;
253					}
254				}
255			}
256			/* Update the changed pointers in the saved context. */
257			if (ctx->mrec != old_ctx.mrec) {
258				if (!IS_ERR(ctx->mrec))
259					old_ctx.attr = (ATTR_RECORD*)(
260							(u8*)ctx->mrec +
261							((u8*)old_ctx.attr -
262							(u8*)old_ctx.mrec));
263				old_ctx.mrec = ctx->mrec;
264			}
265		}
266		/* Restore the search context to the saved one. */
267		*ctx = old_ctx;
268		/*
269		 * We drop the reference on the page we took earlier.  In the
270		 * case that IS_ERR(ctx->mrec) is true this means we might lose
271		 * some changes to the mft record that had been made between
272		 * the last time it was marked dirty/written out and now.  This
273		 * at this stage is not a problem as the mapping error is fatal
274		 * enough that the mft record cannot be written out anyway and
275		 * the caller is very likely to shutdown the whole inode
276		 * immediately and mark the volume dirty for chkdsk to pick up
277		 * the pieces anyway.
278		 */
279		if (put_this_page)
280			page_cache_release(put_this_page);
281	}
282	return err;
283}
284
285/**
286 * ntfs_map_runlist - map (a part of) a runlist of an ntfs inode
287 * @ni:		ntfs inode for which to map (part of) a runlist
288 * @vcn:	map runlist part containing this vcn
289 *
290 * Map the part of a runlist containing the @vcn of the ntfs inode @ni.
291 *
292 * Return 0 on success and -errno on error.  There is one special error code
293 * which is not an error as such.  This is -ENOENT.  It means that @vcn is out
294 * of bounds of the runlist.
295 *
296 * Locking: - The runlist must be unlocked on entry and is unlocked on return.
297 *	    - This function takes the runlist lock for writing and may modify
298 *	      the runlist.
299 */
300int ntfs_map_runlist(ntfs_inode *ni, VCN vcn)
301{
302	int err = 0;
303
304	down_write(&ni->runlist.lock);
305	/* Make sure someone else didn't do the work while we were sleeping. */
306	if (likely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) <=
307			LCN_RL_NOT_MAPPED))
308		err = ntfs_map_runlist_nolock(ni, vcn, NULL);
309	up_write(&ni->runlist.lock);
310	return err;
311}
312
313/**
314 * ntfs_attr_vcn_to_lcn_nolock - convert a vcn into a lcn given an ntfs inode
315 * @ni:			ntfs inode of the attribute whose runlist to search
316 * @vcn:		vcn to convert
317 * @write_locked:	true if the runlist is locked for writing
318 *
319 * Find the virtual cluster number @vcn in the runlist of the ntfs attribute
320 * described by the ntfs inode @ni and return the corresponding logical cluster
321 * number (lcn).
322 *
323 * If the @vcn is not mapped yet, the attempt is made to map the attribute
324 * extent containing the @vcn and the vcn to lcn conversion is retried.
325 *
326 * If @write_locked is true the caller has locked the runlist for writing and
327 * if false for reading.
328 *
329 * Since lcns must be >= 0, we use negative return codes with special meaning:
330 *
331 * Return code	Meaning / Description
332 * ==========================================
333 *  LCN_HOLE	Hole / not allocated on disk.
334 *  LCN_ENOENT	There is no such vcn in the runlist, i.e. @vcn is out of bounds.
335 *  LCN_ENOMEM	Not enough memory to map runlist.
336 *  LCN_EIO	Critical error (runlist/file is corrupt, i/o error, etc).
337 *
338 * Locking: - The runlist must be locked on entry and is left locked on return.
339 *	    - If @write_locked is 'false', i.e. the runlist is locked for reading,
340 *	      the lock may be dropped inside the function so you cannot rely on
341 *	      the runlist still being the same when this function returns.
342 */
343LCN ntfs_attr_vcn_to_lcn_nolock(ntfs_inode *ni, const VCN vcn,
344		const bool write_locked)
345{
346	LCN lcn;
347	unsigned long flags;
348	bool is_retry = false;
349
350	ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, %s_locked.",
351			ni->mft_no, (unsigned long long)vcn,
352			write_locked ? "write" : "read");
353	BUG_ON(!ni);
354	BUG_ON(!NInoNonResident(ni));
355	BUG_ON(vcn < 0);
356	if (!ni->runlist.rl) {
357		read_lock_irqsave(&ni->size_lock, flags);
358		if (!ni->allocated_size) {
359			read_unlock_irqrestore(&ni->size_lock, flags);
360			return LCN_ENOENT;
361		}
362		read_unlock_irqrestore(&ni->size_lock, flags);
363	}
364retry_remap:
365	/* Convert vcn to lcn.  If that fails map the runlist and retry once. */
366	lcn = ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn);
367	if (likely(lcn >= LCN_HOLE)) {
368		ntfs_debug("Done, lcn 0x%llx.", (long long)lcn);
369		return lcn;
370	}
371	if (lcn != LCN_RL_NOT_MAPPED) {
372		if (lcn != LCN_ENOENT)
373			lcn = LCN_EIO;
374	} else if (!is_retry) {
375		int err;
376
377		if (!write_locked) {
378			up_read(&ni->runlist.lock);
379			down_write(&ni->runlist.lock);
380			if (unlikely(ntfs_rl_vcn_to_lcn(ni->runlist.rl, vcn) !=
381					LCN_RL_NOT_MAPPED)) {
382				up_write(&ni->runlist.lock);
383				down_read(&ni->runlist.lock);
384				goto retry_remap;
385			}
386		}
387		err = ntfs_map_runlist_nolock(ni, vcn, NULL);
388		if (!write_locked) {
389			up_write(&ni->runlist.lock);
390			down_read(&ni->runlist.lock);
391		}
392		if (likely(!err)) {
393			is_retry = true;
394			goto retry_remap;
395		}
396		if (err == -ENOENT)
397			lcn = LCN_ENOENT;
398		else if (err == -ENOMEM)
399			lcn = LCN_ENOMEM;
400		else
401			lcn = LCN_EIO;
402	}
403	if (lcn != LCN_ENOENT)
404		ntfs_error(ni->vol->sb, "Failed with error code %lli.",
405				(long long)lcn);
406	return lcn;
407}
408
409/**
410 * ntfs_attr_find_vcn_nolock - find a vcn in the runlist of an ntfs inode
411 * @ni:		ntfs inode describing the runlist to search
412 * @vcn:	vcn to find
413 * @ctx:	active attribute search context if present or NULL if not
414 *
415 * Find the virtual cluster number @vcn in the runlist described by the ntfs
416 * inode @ni and return the address of the runlist element containing the @vcn.
417 *
418 * If the @vcn is not mapped yet, the attempt is made to map the attribute
419 * extent containing the @vcn and the vcn to lcn conversion is retried.
420 *
421 * If @ctx is specified, it is an active search context of @ni and its base mft
422 * record.  This is needed when ntfs_attr_find_vcn_nolock() encounters unmapped
423 * runlist fragments and allows their mapping.  If you do not have the mft
424 * record mapped, you can specify @ctx as NULL and ntfs_attr_find_vcn_nolock()
425 * will perform the necessary mapping and unmapping.
426 *
427 * Note, ntfs_attr_find_vcn_nolock() saves the state of @ctx on entry and
428 * restores it before returning.  Thus, @ctx will be left pointing to the same
429 * attribute on return as on entry.  However, the actual pointers in @ctx may
430 * point to different memory locations on return, so you must remember to reset
431 * any cached pointers from the @ctx, i.e. after the call to
432 * ntfs_attr_find_vcn_nolock(), you will probably want to do:
433 *	m = ctx->mrec;
434 *	a = ctx->attr;
435 * Assuming you cache ctx->attr in a variable @a of type ATTR_RECORD * and that
436 * you cache ctx->mrec in a variable @m of type MFT_RECORD *.
437 * Note you need to distinguish between the lcn of the returned runlist element
438 * being >= 0 and LCN_HOLE.  In the later case you have to return zeroes on
439 * read and allocate clusters on write.
440 *
441 * Return the runlist element containing the @vcn on success and
442 * ERR_PTR(-errno) on error.  You need to test the return value with IS_ERR()
443 * to decide if the return is success or failure and PTR_ERR() to get to the
444 * error code if IS_ERR() is true.
445 *
446 * The possible error return codes are:
447 *	-ENOENT - No such vcn in the runlist, i.e. @vcn is out of bounds.
448 *	-ENOMEM - Not enough memory to map runlist.
449 *	-EIO	- Critical error (runlist/file is corrupt, i/o error, etc).
450 *
451 * WARNING: If @ctx is supplied, regardless of whether success or failure is
452 *	    returned, you need to check IS_ERR(@ctx->mrec) and if 'true' the @ctx
453 *	    is no longer valid, i.e. you need to either call
454 *	    ntfs_attr_reinit_search_ctx() or ntfs_attr_put_search_ctx() on it.
455 *	    In that case PTR_ERR(@ctx->mrec) will give you the error code for
456 *	    why the mapping of the old inode failed.
457 *
458 * Locking: - The runlist described by @ni must be locked for writing on entry
459 *	      and is locked on return.  Note the runlist may be modified when
460 *	      needed runlist fragments need to be mapped.
461 *	    - If @ctx is NULL, the base mft record of @ni must not be mapped on
462 *	      entry and it will be left unmapped on return.
463 *	    - If @ctx is not NULL, the base mft record must be mapped on entry
464 *	      and it will be left mapped on return.
465 */
466runlist_element *ntfs_attr_find_vcn_nolock(ntfs_inode *ni, const VCN vcn,
467		ntfs_attr_search_ctx *ctx)
468{
469	unsigned long flags;
470	runlist_element *rl;
471	int err = 0;
472	bool is_retry = false;
473
474	ntfs_debug("Entering for i_ino 0x%lx, vcn 0x%llx, with%s ctx.",
475			ni->mft_no, (unsigned long long)vcn, ctx ? "" : "out");
476	BUG_ON(!ni);
477	BUG_ON(!NInoNonResident(ni));
478	BUG_ON(vcn < 0);
479	if (!ni->runlist.rl) {
480		read_lock_irqsave(&ni->size_lock, flags);
481		if (!ni->allocated_size) {
482			read_unlock_irqrestore(&ni->size_lock, flags);
483			return ERR_PTR(-ENOENT);
484		}
485		read_unlock_irqrestore(&ni->size_lock, flags);
486	}
487retry_remap:
488	rl = ni->runlist.rl;
489	if (likely(rl && vcn >= rl[0].vcn)) {
490		while (likely(rl->length)) {
491			if (unlikely(vcn < rl[1].vcn)) {
492				if (likely(rl->lcn >= LCN_HOLE)) {
493					ntfs_debug("Done.");
494					return rl;
495				}
496				break;
497			}
498			rl++;
499		}
500		if (likely(rl->lcn != LCN_RL_NOT_MAPPED)) {
501			if (likely(rl->lcn == LCN_ENOENT))
502				err = -ENOENT;
503			else
504				err = -EIO;
505		}
506	}
507	if (!err && !is_retry) {
508		/*
509		 * If the search context is invalid we cannot map the unmapped
510		 * region.
511		 */
512		if (IS_ERR(ctx->mrec))
513			err = PTR_ERR(ctx->mrec);
514		else {
515			/*
516			 * The @vcn is in an unmapped region, map the runlist
517			 * and retry.
518			 */
519			err = ntfs_map_runlist_nolock(ni, vcn, ctx);
520			if (likely(!err)) {
521				is_retry = true;
522				goto retry_remap;
523			}
524		}
525		if (err == -EINVAL)
526			err = -EIO;
527	} else if (!err)
528		err = -EIO;
529	if (err != -ENOENT)
530		ntfs_error(ni->vol->sb, "Failed with error code %i.", err);
531	return ERR_PTR(err);
532}
533
534/**
535 * ntfs_attr_find - find (next) attribute in mft record
536 * @type:	attribute type to find
537 * @name:	attribute name to find (optional, i.e. NULL means don't care)
538 * @name_len:	attribute name length (only needed if @name present)
539 * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
540 * @val:	attribute value to find (optional, resident attributes only)
541 * @val_len:	attribute value length
542 * @ctx:	search context with mft record and attribute to search from
543 *
544 * You should not need to call this function directly.  Use ntfs_attr_lookup()
545 * instead.
546 *
547 * ntfs_attr_find() takes a search context @ctx as parameter and searches the
548 * mft record specified by @ctx->mrec, beginning at @ctx->attr, for an
549 * attribute of @type, optionally @name and @val.
550 *
551 * If the attribute is found, ntfs_attr_find() returns 0 and @ctx->attr will
552 * point to the found attribute.
553 *
554 * If the attribute is not found, ntfs_attr_find() returns -ENOENT and
555 * @ctx->attr will point to the attribute before which the attribute being
556 * searched for would need to be inserted if such an action were to be desired.
557 *
558 * On actual error, ntfs_attr_find() returns -EIO.  In this case @ctx->attr is
559 * undefined and in particular do not rely on it not changing.
560 *
561 * If @ctx->is_first is 'true', the search begins with @ctx->attr itself.  If it
562 * is 'false', the search begins after @ctx->attr.
563 *
564 * If @ic is IGNORE_CASE, the @name comparisson is not case sensitive and
565 * @ctx->ntfs_ino must be set to the ntfs inode to which the mft record
566 * @ctx->mrec belongs.  This is so we can get at the ntfs volume and hence at
567 * the upcase table.  If @ic is CASE_SENSITIVE, the comparison is case
568 * sensitive.  When @name is present, @name_len is the @name length in Unicode
569 * characters.
570 *
571 * If @name is not present (NULL), we assume that the unnamed attribute is
572 * being searched for.
573 *
574 * Finally, the resident attribute value @val is looked for, if present.  If
575 * @val is not present (NULL), @val_len is ignored.
576 *
577 * ntfs_attr_find() only searches the specified mft record and it ignores the
578 * presence of an attribute list attribute (unless it is the one being searched
579 * for, obviously).  If you need to take attribute lists into consideration,
580 * use ntfs_attr_lookup() instead (see below).  This also means that you cannot
581 * use ntfs_attr_find() to search for extent records of non-resident
582 * attributes, as extents with lowest_vcn != 0 are usually described by the
583 * attribute list attribute only. - Note that it is possible that the first
584 * extent is only in the attribute list while the last extent is in the base
585 * mft record, so do not rely on being able to find the first extent in the
586 * base mft record.
587 *
588 * Warning: Never use @val when looking for attribute types which can be
589 *	    non-resident as this most likely will result in a crash!
590 */
591static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
592		const u32 name_len, const IGNORE_CASE_BOOL ic,
593		const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
594{
595	ATTR_RECORD *a;
596	ntfs_volume *vol = ctx->ntfs_ino->vol;
597	ntfschar *upcase = vol->upcase;
598	u32 upcase_len = vol->upcase_len;
599
600	/*
601	 * Iterate over attributes in mft record starting at @ctx->attr, or the
602	 * attribute following that, if @ctx->is_first is 'true'.
603	 */
604	if (ctx->is_first) {
605		a = ctx->attr;
606		ctx->is_first = false;
607	} else
608		a = (ATTR_RECORD*)((u8*)ctx->attr +
609				le32_to_cpu(ctx->attr->length));
610	for (;;	a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
611		if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
612				le32_to_cpu(ctx->mrec->bytes_allocated))
613			break;
614		ctx->attr = a;
615		if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
616				a->type == AT_END))
617			return -ENOENT;
618		if (unlikely(!a->length))
619			break;
620		if (a->type != type)
621			continue;
622		/*
623		 * If @name is present, compare the two names.  If @name is
624		 * missing, assume we want an unnamed attribute.
625		 */
626		if (!name) {
627			/* The search failed if the found attribute is named. */
628			if (a->name_length)
629				return -ENOENT;
630		} else if (!ntfs_are_names_equal(name, name_len,
631			    (ntfschar*)((u8*)a + le16_to_cpu(a->name_offset)),
632			    a->name_length, ic, upcase, upcase_len)) {
633			register int rc;
634
635			rc = ntfs_collate_names(name, name_len,
636					(ntfschar*)((u8*)a +
637					le16_to_cpu(a->name_offset)),
638					a->name_length, 1, IGNORE_CASE,
639					upcase, upcase_len);
640			/*
641			 * If @name collates before a->name, there is no
642			 * matching attribute.
643			 */
644			if (rc == -1)
645				return -ENOENT;
646			/* If the strings are not equal, continue search. */
647			if (rc)
648				continue;
649			rc = ntfs_collate_names(name, name_len,
650					(ntfschar*)((u8*)a +
651					le16_to_cpu(a->name_offset)),
652					a->name_length, 1, CASE_SENSITIVE,
653					upcase, upcase_len);
654			if (rc == -1)
655				return -ENOENT;
656			if (rc)
657				continue;
658		}
659		/*
660		 * The names match or @name not present and attribute is
661		 * unnamed.  If no @val specified, we have found the attribute
662		 * and are done.
663		 */
664		if (!val)
665			return 0;
666		/* @val is present; compare values. */
667		else {
668			register int rc;
669
670			rc = memcmp(val, (u8*)a + le16_to_cpu(
671					a->data.resident.value_offset),
672					min_t(u32, val_len, le32_to_cpu(
673					a->data.resident.value_length)));
674			/*
675			 * If @val collates before the current attribute's
676			 * value, there is no matching attribute.
677			 */
678			if (!rc) {
679				register u32 avl;
680
681				avl = le32_to_cpu(
682						a->data.resident.value_length);
683				if (val_len == avl)
684					return 0;
685				if (val_len < avl)
686					return -ENOENT;
687			} else if (rc < 0)
688				return -ENOENT;
689		}
690	}
691	ntfs_error(vol->sb, "Inode is corrupt.  Run chkdsk.");
692	NVolSetErrors(vol);
693	return -EIO;
694}
695
696/**
697 * load_attribute_list - load an attribute list into memory
698 * @vol:		ntfs volume from which to read
699 * @runlist:		runlist of the attribute list
700 * @al_start:		destination buffer
701 * @size:		size of the destination buffer in bytes
702 * @initialized_size:	initialized size of the attribute list
703 *
704 * Walk the runlist @runlist and load all clusters from it copying them into
705 * the linear buffer @al. The maximum number of bytes copied to @al is @size
706 * bytes. Note, @size does not need to be a multiple of the cluster size. If
707 * @initialized_size is less than @size, the region in @al between
708 * @initialized_size and @size will be zeroed and not read from disk.
709 *
710 * Return 0 on success or -errno on error.
711 */
712int load_attribute_list(ntfs_volume *vol, runlist *runlist, u8 *al_start,
713		const s64 size, const s64 initialized_size)
714{
715	LCN lcn;
716	u8 *al = al_start;
717	u8 *al_end = al + initialized_size;
718	runlist_element *rl;
719	struct buffer_head *bh;
720	struct super_block *sb;
721	unsigned long block_size;
722	unsigned long block, max_block;
723	int err = 0;
724	unsigned char block_size_bits;
725
726	ntfs_debug("Entering.");
727	if (!vol || !runlist || !al || size <= 0 || initialized_size < 0 ||
728			initialized_size > size)
729		return -EINVAL;
730	if (!initialized_size) {
731		memset(al, 0, size);
732		return 0;
733	}
734	sb = vol->sb;
735	block_size = sb->s_blocksize;
736	block_size_bits = sb->s_blocksize_bits;
737	down_read(&runlist->lock);
738	rl = runlist->rl;
739	if (!rl) {
740		ntfs_error(sb, "Cannot read attribute list since runlist is "
741				"missing.");
742		goto err_out;
743	}
744	/* Read all clusters specified by the runlist one run at a time. */
745	while (rl->length) {
746		lcn = ntfs_rl_vcn_to_lcn(rl, rl->vcn);
747		ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.",
748				(unsigned long long)rl->vcn,
749				(unsigned long long)lcn);
750		/* The attribute list cannot be sparse. */
751		if (lcn < 0) {
752			ntfs_error(sb, "ntfs_rl_vcn_to_lcn() failed.  Cannot "
753					"read attribute list.");
754			goto err_out;
755		}
756		block = lcn << vol->cluster_size_bits >> block_size_bits;
757		/* Read the run from device in chunks of block_size bytes. */
758		max_block = block + (rl->length << vol->cluster_size_bits >>
759				block_size_bits);
760		ntfs_debug("max_block = 0x%lx.", max_block);
761		do {
762			ntfs_debug("Reading block = 0x%lx.", block);
763			bh = sb_bread(sb, block);
764			if (!bh) {
765				ntfs_error(sb, "sb_bread() failed. Cannot "
766						"read attribute list.");
767				goto err_out;
768			}
769			if (al + block_size >= al_end)
770				goto do_final;
771			memcpy(al, bh->b_data, block_size);
772			brelse(bh);
773			al += block_size;
774		} while (++block < max_block);
775		rl++;
776	}
777	if (initialized_size < size) {
778initialize:
779		memset(al_start + initialized_size, 0, size - initialized_size);
780	}
781done:
782	up_read(&runlist->lock);
783	return err;
784do_final:
785	if (al < al_end) {
786		/*
787		 * Partial block.
788		 *
789		 * Note: The attribute list can be smaller than its allocation
790		 * by multiple clusters.  This has been encountered by at least
791		 * two people running Windows XP, thus we cannot do any
792		 * truncation sanity checking here. (AIA)
793		 */
794		memcpy(al, bh->b_data, al_end - al);
795		brelse(bh);
796		if (initialized_size < size)
797			goto initialize;
798		goto done;
799	}
800	brelse(bh);
801	/* Real overflow! */
802	ntfs_error(sb, "Attribute list buffer overflow. Read attribute list "
803			"is truncated.");
804err_out:
805	err = -EIO;
806	goto done;
807}
808
809/**
810 * ntfs_external_attr_find - find an attribute in the attribute list of an inode
811 * @type:	attribute type to find
812 * @name:	attribute name to find (optional, i.e. NULL means don't care)
813 * @name_len:	attribute name length (only needed if @name present)
814 * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
815 * @lowest_vcn:	lowest vcn to find (optional, non-resident attributes only)
816 * @val:	attribute value to find (optional, resident attributes only)
817 * @val_len:	attribute value length
818 * @ctx:	search context with mft record and attribute to search from
819 *
820 * You should not need to call this function directly.  Use ntfs_attr_lookup()
821 * instead.
822 *
823 * Find an attribute by searching the attribute list for the corresponding
824 * attribute list entry.  Having found the entry, map the mft record if the
825 * attribute is in a different mft record/inode, ntfs_attr_find() the attribute
826 * in there and return it.
827 *
828 * On first search @ctx->ntfs_ino must be the base mft record and @ctx must
829 * have been obtained from a call to ntfs_attr_get_search_ctx().  On subsequent
830 * calls @ctx->ntfs_ino can be any extent inode, too (@ctx->base_ntfs_ino is
831 * then the base inode).
832 *
833 * After finishing with the attribute/mft record you need to call
834 * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
835 * mapped inodes, etc).
836 *
837 * If the attribute is found, ntfs_external_attr_find() returns 0 and
838 * @ctx->attr will point to the found attribute.  @ctx->mrec will point to the
839 * mft record in which @ctx->attr is located and @ctx->al_entry will point to
840 * the attribute list entry for the attribute.
841 *
842 * If the attribute is not found, ntfs_external_attr_find() returns -ENOENT and
843 * @ctx->attr will point to the attribute in the base mft record before which
844 * the attribute being searched for would need to be inserted if such an action
845 * were to be desired.  @ctx->mrec will point to the mft record in which
846 * @ctx->attr is located and @ctx->al_entry will point to the attribute list
847 * entry of the attribute before which the attribute being searched for would
848 * need to be inserted if such an action were to be desired.
849 *
850 * Thus to insert the not found attribute, one wants to add the attribute to
851 * @ctx->mrec (the base mft record) and if there is not enough space, the
852 * attribute should be placed in a newly allocated extent mft record.  The
853 * attribute list entry for the inserted attribute should be inserted in the
854 * attribute list attribute at @ctx->al_entry.
855 *
856 * On actual error, ntfs_external_attr_find() returns -EIO.  In this case
857 * @ctx->attr is undefined and in particular do not rely on it not changing.
858 */
859static int ntfs_external_attr_find(const ATTR_TYPE type,
860		const ntfschar *name, const u32 name_len,
861		const IGNORE_CASE_BOOL ic, const VCN lowest_vcn,
862		const u8 *val, const u32 val_len, ntfs_attr_search_ctx *ctx)
863{
864	ntfs_inode *base_ni, *ni;
865	ntfs_volume *vol;
866	ATTR_LIST_ENTRY *al_entry, *next_al_entry;
867	u8 *al_start, *al_end;
868	ATTR_RECORD *a;
869	ntfschar *al_name;
870	u32 al_name_len;
871	int err = 0;
872	static const char *es = " Unmount and run chkdsk.";
873
874	ni = ctx->ntfs_ino;
875	base_ni = ctx->base_ntfs_ino;
876	ntfs_debug("Entering for inode 0x%lx, type 0x%x.", ni->mft_no, type);
877	if (!base_ni) {
878		/* First call happens with the base mft record. */
879		base_ni = ctx->base_ntfs_ino = ctx->ntfs_ino;
880		ctx->base_mrec = ctx->mrec;
881	}
882	if (ni == base_ni)
883		ctx->base_attr = ctx->attr;
884	if (type == AT_END)
885		goto not_found;
886	vol = base_ni->vol;
887	al_start = base_ni->attr_list;
888	al_end = al_start + base_ni->attr_list_size;
889	if (!ctx->al_entry)
890		ctx->al_entry = (ATTR_LIST_ENTRY*)al_start;
891	/*
892	 * Iterate over entries in attribute list starting at @ctx->al_entry,
893	 * or the entry following that, if @ctx->is_first is 'true'.
894	 */
895	if (ctx->is_first) {
896		al_entry = ctx->al_entry;
897		ctx->is_first = false;
898	} else
899		al_entry = (ATTR_LIST_ENTRY*)((u8*)ctx->al_entry +
900				le16_to_cpu(ctx->al_entry->length));
901	for (;; al_entry = next_al_entry) {
902		/* Out of bounds check. */
903		if ((u8*)al_entry < base_ni->attr_list ||
904				(u8*)al_entry > al_end)
905			break;	/* Inode is corrupt. */
906		ctx->al_entry = al_entry;
907		/* Catch the end of the attribute list. */
908		if ((u8*)al_entry == al_end)
909			goto not_found;
910		if (!al_entry->length)
911			break;
912		if ((u8*)al_entry + 6 > al_end || (u8*)al_entry +
913				le16_to_cpu(al_entry->length) > al_end)
914			break;
915		next_al_entry = (ATTR_LIST_ENTRY*)((u8*)al_entry +
916				le16_to_cpu(al_entry->length));
917		if (le32_to_cpu(al_entry->type) > le32_to_cpu(type))
918			goto not_found;
919		if (type != al_entry->type)
920			continue;
921		/*
922		 * If @name is present, compare the two names.  If @name is
923		 * missing, assume we want an unnamed attribute.
924		 */
925		al_name_len = al_entry->name_length;
926		al_name = (ntfschar*)((u8*)al_entry + al_entry->name_offset);
927		if (!name) {
928			if (al_name_len)
929				goto not_found;
930		} else if (!ntfs_are_names_equal(al_name, al_name_len, name,
931				name_len, ic, vol->upcase, vol->upcase_len)) {
932			register int rc;
933
934			rc = ntfs_collate_names(name, name_len, al_name,
935					al_name_len, 1, IGNORE_CASE,
936					vol->upcase, vol->upcase_len);
937			/*
938			 * If @name collates before al_name, there is no
939			 * matching attribute.
940			 */
941			if (rc == -1)
942				goto not_found;
943			/* If the strings are not equal, continue search. */
944			if (rc)
945				continue;
946			rc = ntfs_collate_names(name, name_len, al_name,
947					al_name_len, 1, CASE_SENSITIVE,
948					vol->upcase, vol->upcase_len);
949			if (rc == -1)
950				goto not_found;
951			if (rc)
952				continue;
953		}
954		/*
955		 * The names match or @name not present and attribute is
956		 * unnamed.  Now check @lowest_vcn.  Continue search if the
957		 * next attribute list entry still fits @lowest_vcn.  Otherwise
958		 * we have reached the right one or the search has failed.
959		 */
960		if (lowest_vcn && (u8*)next_al_entry >= al_start	    &&
961				(u8*)next_al_entry + 6 < al_end		    &&
962				(u8*)next_al_entry + le16_to_cpu(
963					next_al_entry->length) <= al_end    &&
964				sle64_to_cpu(next_al_entry->lowest_vcn) <=
965					lowest_vcn			    &&
966				next_al_entry->type == al_entry->type	    &&
967				next_al_entry->name_length == al_name_len   &&
968				ntfs_are_names_equal((ntfschar*)((u8*)
969					next_al_entry +
970					next_al_entry->name_offset),
971					next_al_entry->name_length,
972					al_name, al_name_len, CASE_SENSITIVE,
973					vol->upcase, vol->upcase_len))
974			continue;
975		if (MREF_LE(al_entry->mft_reference) == ni->mft_no) {
976			if (MSEQNO_LE(al_entry->mft_reference) != ni->seq_no) {
977				ntfs_error(vol->sb, "Found stale mft "
978						"reference in attribute list "
979						"of base inode 0x%lx.%s",
980						base_ni->mft_no, es);
981				err = -EIO;
982				break;
983			}
984		} else { /* Mft references do not match. */
985			/* If there is a mapped record unmap it first. */
986			if (ni != base_ni)
987				unmap_extent_mft_record(ni);
988			/* Do we want the base record back? */
989			if (MREF_LE(al_entry->mft_reference) ==
990					base_ni->mft_no) {
991				ni = ctx->ntfs_ino = base_ni;
992				ctx->mrec = ctx->base_mrec;
993			} else {
994				/* We want an extent record. */
995				ctx->mrec = map_extent_mft_record(base_ni,
996						le64_to_cpu(
997						al_entry->mft_reference), &ni);
998				if (IS_ERR(ctx->mrec)) {
999					ntfs_error(vol->sb, "Failed to map "
1000							"extent mft record "
1001							"0x%lx of base inode "
1002							"0x%lx.%s",
1003							MREF_LE(al_entry->
1004							mft_reference),
1005							base_ni->mft_no, es);
1006					err = PTR_ERR(ctx->mrec);
1007					if (err == -ENOENT)
1008						err = -EIO;
1009					/* Cause @ctx to be sanitized below. */
1010					ni = NULL;
1011					break;
1012				}
1013				ctx->ntfs_ino = ni;
1014			}
1015			ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1016					le16_to_cpu(ctx->mrec->attrs_offset));
1017		}
1018		/*
1019		 * ctx->vfs_ino, ctx->mrec, and ctx->attr now point to the
1020		 * mft record containing the attribute represented by the
1021		 * current al_entry.
1022		 */
1023		/*
1024		 * We could call into ntfs_attr_find() to find the right
1025		 * attribute in this mft record but this would be less
1026		 * efficient and not quite accurate as ntfs_attr_find() ignores
1027		 * the attribute instance numbers for example which become
1028		 * important when one plays with attribute lists.  Also,
1029		 * because a proper match has been found in the attribute list
1030		 * entry above, the comparison can now be optimized.  So it is
1031		 * worth re-implementing a simplified ntfs_attr_find() here.
1032		 */
1033		a = ctx->attr;
1034		/*
1035		 * Use a manual loop so we can still use break and continue
1036		 * with the same meanings as above.
1037		 */
1038do_next_attr_loop:
1039		if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
1040				le32_to_cpu(ctx->mrec->bytes_allocated))
1041			break;
1042		if (a->type == AT_END)
1043			break;
1044		if (!a->length)
1045			break;
1046		if (al_entry->instance != a->instance)
1047			goto do_next_attr;
1048		/*
1049		 * If the type and/or the name are mismatched between the
1050		 * attribute list entry and the attribute record, there is
1051		 * corruption so we break and return error EIO.
1052		 */
1053		if (al_entry->type != a->type)
1054			break;
1055		if (!ntfs_are_names_equal((ntfschar*)((u8*)a +
1056				le16_to_cpu(a->name_offset)), a->name_length,
1057				al_name, al_name_len, CASE_SENSITIVE,
1058				vol->upcase, vol->upcase_len))
1059			break;
1060		ctx->attr = a;
1061		/*
1062		 * If no @val specified or @val specified and it matches, we
1063		 * have found it!
1064		 */
1065		if (!val || (!a->non_resident && le32_to_cpu(
1066				a->data.resident.value_length) == val_len &&
1067				!memcmp((u8*)a +
1068				le16_to_cpu(a->data.resident.value_offset),
1069				val, val_len))) {
1070			ntfs_debug("Done, found.");
1071			return 0;
1072		}
1073do_next_attr:
1074		/* Proceed to the next attribute in the current mft record. */
1075		a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length));
1076		goto do_next_attr_loop;
1077	}
1078	if (!err) {
1079		ntfs_error(vol->sb, "Base inode 0x%lx contains corrupt "
1080				"attribute list attribute.%s", base_ni->mft_no,
1081				es);
1082		err = -EIO;
1083	}
1084	if (ni != base_ni) {
1085		if (ni)
1086			unmap_extent_mft_record(ni);
1087		ctx->ntfs_ino = base_ni;
1088		ctx->mrec = ctx->base_mrec;
1089		ctx->attr = ctx->base_attr;
1090	}
1091	if (err != -ENOMEM)
1092		NVolSetErrors(vol);
1093	return err;
1094not_found:
1095	/*
1096	 * If we were looking for AT_END, we reset the search context @ctx and
1097	 * use ntfs_attr_find() to seek to the end of the base mft record.
1098	 */
1099	if (type == AT_END) {
1100		ntfs_attr_reinit_search_ctx(ctx);
1101		return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len,
1102				ctx);
1103	}
1104	/*
1105	 * The attribute was not found.  Before we return, we want to ensure
1106	 * @ctx->mrec and @ctx->attr indicate the position at which the
1107	 * attribute should be inserted in the base mft record.  Since we also
1108	 * want to preserve @ctx->al_entry we cannot reinitialize the search
1109	 * context using ntfs_attr_reinit_search_ctx() as this would set
1110	 * @ctx->al_entry to NULL.  Thus we do the necessary bits manually (see
1111	 * ntfs_attr_init_search_ctx() below).  Note, we _only_ preserve
1112	 * @ctx->al_entry as the remaining fields (base_*) are identical to
1113	 * their non base_ counterparts and we cannot set @ctx->base_attr
1114	 * correctly yet as we do not know what @ctx->attr will be set to by
1115	 * the call to ntfs_attr_find() below.
1116	 */
1117	if (ni != base_ni)
1118		unmap_extent_mft_record(ni);
1119	ctx->mrec = ctx->base_mrec;
1120	ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1121			le16_to_cpu(ctx->mrec->attrs_offset));
1122	ctx->is_first = true;
1123	ctx->ntfs_ino = base_ni;
1124	ctx->base_ntfs_ino = NULL;
1125	ctx->base_mrec = NULL;
1126	ctx->base_attr = NULL;
1127	/*
1128	 * In case there are multiple matches in the base mft record, need to
1129	 * keep enumerating until we get an attribute not found response (or
1130	 * another error), otherwise we would keep returning the same attribute
1131	 * over and over again and all programs using us for enumeration would
1132	 * lock up in a tight loop.
1133	 */
1134	do {
1135		err = ntfs_attr_find(type, name, name_len, ic, val, val_len,
1136				ctx);
1137	} while (!err);
1138	ntfs_debug("Done, not found.");
1139	return err;
1140}
1141
1142/**
1143 * ntfs_attr_lookup - find an attribute in an ntfs inode
1144 * @type:	attribute type to find
1145 * @name:	attribute name to find (optional, i.e. NULL means don't care)
1146 * @name_len:	attribute name length (only needed if @name present)
1147 * @ic:		IGNORE_CASE or CASE_SENSITIVE (ignored if @name not present)
1148 * @lowest_vcn:	lowest vcn to find (optional, non-resident attributes only)
1149 * @val:	attribute value to find (optional, resident attributes only)
1150 * @val_len:	attribute value length
1151 * @ctx:	search context with mft record and attribute to search from
1152 *
1153 * Find an attribute in an ntfs inode.  On first search @ctx->ntfs_ino must
1154 * be the base mft record and @ctx must have been obtained from a call to
1155 * ntfs_attr_get_search_ctx().
1156 *
1157 * This function transparently handles attribute lists and @ctx is used to
1158 * continue searches where they were left off at.
1159 *
1160 * After finishing with the attribute/mft record you need to call
1161 * ntfs_attr_put_search_ctx() to cleanup the search context (unmapping any
1162 * mapped inodes, etc).
1163 *
1164 * Return 0 if the search was successful and -errno if not.
1165 *
1166 * When 0, @ctx->attr is the found attribute and it is in mft record
1167 * @ctx->mrec.  If an attribute list attribute is present, @ctx->al_entry is
1168 * the attribute list entry of the found attribute.
1169 *
1170 * When -ENOENT, @ctx->attr is the attribute which collates just after the
1171 * attribute being searched for, i.e. if one wants to add the attribute to the
1172 * mft record this is the correct place to insert it into.  If an attribute
1173 * list attribute is present, @ctx->al_entry is the attribute list entry which
1174 * collates just after the attribute list entry of the attribute being searched
1175 * for, i.e. if one wants to add the attribute to the mft record this is the
1176 * correct place to insert its attribute list entry into.
1177 *
1178 * When -errno != -ENOENT, an error occured during the lookup.  @ctx->attr is
1179 * then undefined and in particular you should not rely on it not changing.
1180 */
1181int ntfs_attr_lookup(const ATTR_TYPE type, const ntfschar *name,
1182		const u32 name_len, const IGNORE_CASE_BOOL ic,
1183		const VCN lowest_vcn, const u8 *val, const u32 val_len,
1184		ntfs_attr_search_ctx *ctx)
1185{
1186	ntfs_inode *base_ni;
1187
1188	ntfs_debug("Entering.");
1189	BUG_ON(IS_ERR(ctx->mrec));
1190	if (ctx->base_ntfs_ino)
1191		base_ni = ctx->base_ntfs_ino;
1192	else
1193		base_ni = ctx->ntfs_ino;
1194	/* Sanity check, just for debugging really. */
1195	BUG_ON(!base_ni);
1196	if (!NInoAttrList(base_ni) || type == AT_ATTRIBUTE_LIST)
1197		return ntfs_attr_find(type, name, name_len, ic, val, val_len,
1198				ctx);
1199	return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn,
1200			val, val_len, ctx);
1201}
1202
1203/**
1204 * ntfs_attr_init_search_ctx - initialize an attribute search context
1205 * @ctx:	attribute search context to initialize
1206 * @ni:		ntfs inode with which to initialize the search context
1207 * @mrec:	mft record with which to initialize the search context
1208 *
1209 * Initialize the attribute search context @ctx with @ni and @mrec.
1210 */
1211static inline void ntfs_attr_init_search_ctx(ntfs_attr_search_ctx *ctx,
1212		ntfs_inode *ni, MFT_RECORD *mrec)
1213{
1214	*ctx = (ntfs_attr_search_ctx) {
1215		.mrec = mrec,
1216		/* Sanity checks are performed elsewhere. */
1217		.attr = (ATTR_RECORD*)((u8*)mrec +
1218				le16_to_cpu(mrec->attrs_offset)),
1219		.is_first = true,
1220		.ntfs_ino = ni,
1221	};
1222}
1223
1224/**
1225 * ntfs_attr_reinit_search_ctx - reinitialize an attribute search context
1226 * @ctx:	attribute search context to reinitialize
1227 *
1228 * Reinitialize the attribute search context @ctx, unmapping an associated
1229 * extent mft record if present, and initialize the search context again.
1230 *
1231 * This is used when a search for a new attribute is being started to reset
1232 * the search context to the beginning.
1233 */
1234void ntfs_attr_reinit_search_ctx(ntfs_attr_search_ctx *ctx)
1235{
1236	if (likely(!ctx->base_ntfs_ino)) {
1237		/* No attribute list. */
1238		ctx->is_first = true;
1239		/* Sanity checks are performed elsewhere. */
1240		ctx->attr = (ATTR_RECORD*)((u8*)ctx->mrec +
1241				le16_to_cpu(ctx->mrec->attrs_offset));
1242		/*
1243		 * This needs resetting due to ntfs_external_attr_find() which
1244		 * can leave it set despite having zeroed ctx->base_ntfs_ino.
1245		 */
1246		ctx->al_entry = NULL;
1247		return;
1248	} /* Attribute list. */
1249	if (ctx->ntfs_ino != ctx->base_ntfs_ino)
1250		unmap_extent_mft_record(ctx->ntfs_ino);
1251	ntfs_attr_init_search_ctx(ctx, ctx->base_ntfs_ino, ctx->base_mrec);
1252	return;
1253}
1254
1255/**
1256 * ntfs_attr_get_search_ctx - allocate/initialize a new attribute search context
1257 * @ni:		ntfs inode with which to initialize the search context
1258 * @mrec:	mft record with which to initialize the search context
1259 *
1260 * Allocate a new attribute search context, initialize it with @ni and @mrec,
1261 * and return it. Return NULL if allocation failed.
1262 */
1263ntfs_attr_search_ctx *ntfs_attr_get_search_ctx(ntfs_inode *ni, MFT_RECORD *mrec)
1264{
1265	ntfs_attr_search_ctx *ctx;
1266
1267	ctx = kmem_cache_alloc(ntfs_attr_ctx_cache, GFP_NOFS);
1268	if (ctx)
1269		ntfs_attr_init_search_ctx(ctx, ni, mrec);
1270	return ctx;
1271}
1272
1273/**
1274 * ntfs_attr_put_search_ctx - release an attribute search context
1275 * @ctx:	attribute search context to free
1276 *
1277 * Release the attribute search context @ctx, unmapping an associated extent
1278 * mft record if present.
1279 */
1280void ntfs_attr_put_search_ctx(ntfs_attr_search_ctx *ctx)
1281{
1282	if (ctx->base_ntfs_ino && ctx->ntfs_ino != ctx->base_ntfs_ino)
1283		unmap_extent_mft_record(ctx->ntfs_ino);
1284	kmem_cache_free(ntfs_attr_ctx_cache, ctx);
1285	return;
1286}
1287
1288#ifdef NTFS_RW
1289
1290/**
1291 * ntfs_attr_find_in_attrdef - find an attribute in the $AttrDef system file
1292 * @vol:	ntfs volume to which the attribute belongs
1293 * @type:	attribute type which to find
1294 *
1295 * Search for the attribute definition record corresponding to the attribute
1296 * @type in the $AttrDef system file.
1297 *
1298 * Return the attribute type definition record if found and NULL if not found.
1299 */
1300static ATTR_DEF *ntfs_attr_find_in_attrdef(const ntfs_volume *vol,
1301		const ATTR_TYPE type)
1302{
1303	ATTR_DEF *ad;
1304
1305	BUG_ON(!vol->attrdef);
1306	BUG_ON(!type);
1307	for (ad = vol->attrdef; (u8*)ad - (u8*)vol->attrdef <
1308			vol->attrdef_size && ad->type; ++ad) {
1309		/* We have not found it yet, carry on searching. */
1310		if (likely(le32_to_cpu(ad->type) < le32_to_cpu(type)))
1311			continue;
1312		/* We found the attribute; return it. */
1313		if (likely(ad->type == type))
1314			return ad;
1315		/* We have gone too far already.  No point in continuing. */
1316		break;
1317	}
1318	/* Attribute not found. */
1319	ntfs_debug("Attribute type 0x%x not found in $AttrDef.",
1320			le32_to_cpu(type));
1321	return NULL;
1322}
1323
1324/**
1325 * ntfs_attr_size_bounds_check - check a size of an attribute type for validity
1326 * @vol:	ntfs volume to which the attribute belongs
1327 * @type:	attribute type which to check
1328 * @size:	size which to check
1329 *
1330 * Check whether the @size in bytes is valid for an attribute of @type on the
1331 * ntfs volume @vol.  This information is obtained from $AttrDef system file.
1332 *
1333 * Return 0 if valid, -ERANGE if not valid, or -ENOENT if the attribute is not
1334 * listed in $AttrDef.
1335 */
1336int ntfs_attr_size_bounds_check(const ntfs_volume *vol, const ATTR_TYPE type,
1337		const s64 size)
1338{
1339	ATTR_DEF *ad;
1340
1341	BUG_ON(size < 0);
1342	/*
1343	 * $ATTRIBUTE_LIST has a maximum size of 256kiB, but this is not
1344	 * listed in $AttrDef.
1345	 */
1346	if (unlikely(type == AT_ATTRIBUTE_LIST && size > 256 * 1024))
1347		return -ERANGE;
1348	/* Get the $AttrDef entry for the attribute @type. */
1349	ad = ntfs_attr_find_in_attrdef(vol, type);
1350	if (unlikely(!ad))
1351		return -ENOENT;
1352	/* Do the bounds check. */
1353	if (((sle64_to_cpu(ad->min_size) > 0) &&
1354			size < sle64_to_cpu(ad->min_size)) ||
1355			((sle64_to_cpu(ad->max_size) > 0) && size >
1356			sle64_to_cpu(ad->max_size)))
1357		return -ERANGE;
1358	return 0;
1359}
1360
1361/**
1362 * ntfs_attr_can_be_non_resident - check if an attribute can be non-resident
1363 * @vol:	ntfs volume to which the attribute belongs
1364 * @type:	attribute type which to check
1365 *
1366 * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1367 * be non-resident.  This information is obtained from $AttrDef system file.
1368 *
1369 * Return 0 if the attribute is allowed to be non-resident, -EPERM if not, and
1370 * -ENOENT if the attribute is not listed in $AttrDef.
1371 */
1372int ntfs_attr_can_be_non_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1373{
1374	ATTR_DEF *ad;
1375
1376	/* Find the attribute definition record in $AttrDef. */
1377	ad = ntfs_attr_find_in_attrdef(vol, type);
1378	if (unlikely(!ad))
1379		return -ENOENT;
1380	/* Check the flags and return the result. */
1381	if (ad->flags & ATTR_DEF_RESIDENT)
1382		return -EPERM;
1383	return 0;
1384}
1385
1386/**
1387 * ntfs_attr_can_be_resident - check if an attribute can be resident
1388 * @vol:	ntfs volume to which the attribute belongs
1389 * @type:	attribute type which to check
1390 *
1391 * Check whether the attribute of @type on the ntfs volume @vol is allowed to
1392 * be resident.  This information is derived from our ntfs knowledge and may
1393 * not be completely accurate, especially when user defined attributes are
1394 * present.  Basically we allow everything to be resident except for index
1395 * allocation and $EA attributes.
1396 *
1397 * Return 0 if the attribute is allowed to be non-resident and -EPERM if not.
1398 *
1399 * Warning: In the system file $MFT the attribute $Bitmap must be non-resident
1400 *	    otherwise windows will not boot (blue screen of death)!  We cannot
1401 *	    check for this here as we do not know which inode's $Bitmap is
1402 *	    being asked about so the caller needs to special case this.
1403 */
1404int ntfs_attr_can_be_resident(const ntfs_volume *vol, const ATTR_TYPE type)
1405{
1406	if (type == AT_INDEX_ALLOCATION)
1407		return -EPERM;
1408	return 0;
1409}
1410
1411/**
1412 * ntfs_attr_record_resize - resize an attribute record
1413 * @m:		mft record containing attribute record
1414 * @a:		attribute record to resize
1415 * @new_size:	new size in bytes to which to resize the attribute record @a
1416 *
1417 * Resize the attribute record @a, i.e. the resident part of the attribute, in
1418 * the mft record @m to @new_size bytes.
1419 *
1420 * Return 0 on success and -errno on error.  The following error codes are
1421 * defined:
1422 *	-ENOSPC	- Not enough space in the mft record @m to perform the resize.
1423 *
1424 * Note: On error, no modifications have been performed whatsoever.
1425 *
1426 * Warning: If you make a record smaller without having copied all the data you
1427 *	    are interested in the data may be overwritten.
1428 */
1429int ntfs_attr_record_resize(MFT_RECORD *m, ATTR_RECORD *a, u32 new_size)
1430{
1431	ntfs_debug("Entering for new_size %u.", new_size);
1432	/* Align to 8 bytes if it is not already done. */
1433	if (new_size & 7)
1434		new_size = (new_size + 7) & ~7;
1435	/* If the actual attribute length has changed, move things around. */
1436	if (new_size != le32_to_cpu(a->length)) {
1437		u32 new_muse = le32_to_cpu(m->bytes_in_use) -
1438				le32_to_cpu(a->length) + new_size;
1439		/* Not enough space in this mft record. */
1440		if (new_muse > le32_to_cpu(m->bytes_allocated))
1441			return -ENOSPC;
1442		/* Move attributes following @a to their new location. */
1443		memmove((u8*)a + new_size, (u8*)a + le32_to_cpu(a->length),
1444				le32_to_cpu(m->bytes_in_use) - ((u8*)a -
1445				(u8*)m) - le32_to_cpu(a->length));
1446		/* Adjust @m to reflect the change in used space. */
1447		m->bytes_in_use = cpu_to_le32(new_muse);
1448		/* Adjust @a to reflect the new size. */
1449		if (new_size >= offsetof(ATTR_REC, length) + sizeof(a->length))
1450			a->length = cpu_to_le32(new_size);
1451	}
1452	return 0;
1453}
1454
1455/**
1456 * ntfs_resident_attr_value_resize - resize the value of a resident attribute
1457 * @m:		mft record containing attribute record
1458 * @a:		attribute record whose value to resize
1459 * @new_size:	new size in bytes to which to resize the attribute value of @a
1460 *
1461 * Resize the value of the attribute @a in the mft record @m to @new_size bytes.
1462 * If the value is made bigger, the newly allocated space is cleared.
1463 *
1464 * Return 0 on success and -errno on error.  The following error codes are
1465 * defined:
1466 *	-ENOSPC	- Not enough space in the mft record @m to perform the resize.
1467 *
1468 * Note: On error, no modifications have been performed whatsoever.
1469 *
1470 * Warning: If you make a record smaller without having copied all the data you
1471 *	    are interested in the data may be overwritten.
1472 */
1473int ntfs_resident_attr_value_resize(MFT_RECORD *m, ATTR_RECORD *a,
1474		const u32 new_size)
1475{
1476	u32 old_size;
1477
1478	/* Resize the resident part of the attribute record. */
1479	if (ntfs_attr_record_resize(m, a,
1480			le16_to_cpu(a->data.resident.value_offset) + new_size))
1481		return -ENOSPC;
1482	/*
1483	 * The resize succeeded!  If we made the attribute value bigger, clear
1484	 * the area between the old size and @new_size.
1485	 */
1486	old_size = le32_to_cpu(a->data.resident.value_length);
1487	if (new_size > old_size)
1488		memset((u8*)a + le16_to_cpu(a->data.resident.value_offset) +
1489				old_size, 0, new_size - old_size);
1490	/* Finally update the length of the attribute value. */
1491	a->data.resident.value_length = cpu_to_le32(new_size);
1492	return 0;
1493}
1494
1495int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1496{
1497	s64 new_size;
1498	struct inode *vi = VFS_I(ni);
1499	ntfs_volume *vol = ni->vol;
1500	ntfs_inode *base_ni;
1501	MFT_RECORD *m;
1502	ATTR_RECORD *a;
1503	ntfs_attr_search_ctx *ctx;
1504	struct page *page;
1505	runlist_element *rl;
1506	u8 *kaddr;
1507	unsigned long flags;
1508	int mp_size, mp_ofs, name_ofs, arec_size, err, err2;
1509	u32 attr_size;
1510	u8 old_res_attr_flags;
1511
1512	/* Check that the attribute is allowed to be non-resident. */
1513	err = ntfs_attr_can_be_non_resident(vol, ni->type);
1514	if (unlikely(err)) {
1515		if (err == -EPERM)
1516			ntfs_debug("Attribute is not allowed to be "
1517					"non-resident.");
1518		else
1519			ntfs_debug("Attribute not defined on the NTFS "
1520					"volume!");
1521		return err;
1522	}
1523	BUG_ON(NInoCompressed(ni));
1524	BUG_ON(NInoEncrypted(ni));
1525	/*
1526	 * The size needs to be aligned to a cluster boundary for allocation
1527	 * purposes.
1528	 */
1529	new_size = (data_size + vol->cluster_size - 1) &
1530			~(vol->cluster_size - 1);
1531	if (new_size > 0) {
1532		/*
1533		 * Will need the page later and since the page lock nests
1534		 * outside all ntfs locks, we need to get the page now.
1535		 */
1536		page = find_or_create_page(vi->i_mapping, 0,
1537				mapping_gfp_mask(vi->i_mapping));
1538		if (unlikely(!page))
1539			return -ENOMEM;
1540		/* Start by allocating clusters to hold the attribute value. */
1541		rl = ntfs_cluster_alloc(vol, 0, new_size >>
1542				vol->cluster_size_bits, -1, DATA_ZONE, true);
1543		if (IS_ERR(rl)) {
1544			err = PTR_ERR(rl);
1545			ntfs_debug("Failed to allocate cluster%s, error code "
1546					"%i.", (new_size >>
1547					vol->cluster_size_bits) > 1 ? "s" : "",
1548					err);
1549			goto page_err_out;
1550		}
1551	} else {
1552		rl = NULL;
1553		page = NULL;
1554	}
1555	/* Determine the size of the mapping pairs array. */
1556	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl, 0, -1);
1557	if (unlikely(mp_size < 0)) {
1558		err = mp_size;
1559		ntfs_debug("Failed to get size for mapping pairs array, error "
1560				"code %i.", err);
1561		goto rl_err_out;
1562	}
1563	down_write(&ni->runlist.lock);
1564	if (!NInoAttr(ni))
1565		base_ni = ni;
1566	else
1567		base_ni = ni->ext.base_ntfs_ino;
1568	m = map_mft_record(base_ni);
1569	if (IS_ERR(m)) {
1570		err = PTR_ERR(m);
1571		m = NULL;
1572		ctx = NULL;
1573		goto err_out;
1574	}
1575	ctx = ntfs_attr_get_search_ctx(base_ni, m);
1576	if (unlikely(!ctx)) {
1577		err = -ENOMEM;
1578		goto err_out;
1579	}
1580	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1581			CASE_SENSITIVE, 0, NULL, 0, ctx);
1582	if (unlikely(err)) {
1583		if (err == -ENOENT)
1584			err = -EIO;
1585		goto err_out;
1586	}
1587	m = ctx->mrec;
1588	a = ctx->attr;
1589	BUG_ON(NInoNonResident(ni));
1590	BUG_ON(a->non_resident);
1591	/*
1592	 * Calculate new offsets for the name and the mapping pairs array.
1593	 */
1594	if (NInoSparse(ni) || NInoCompressed(ni))
1595		name_ofs = (offsetof(ATTR_REC,
1596				data.non_resident.compressed_size) +
1597				sizeof(a->data.non_resident.compressed_size) +
1598				7) & ~7;
1599	else
1600		name_ofs = (offsetof(ATTR_REC,
1601				data.non_resident.compressed_size) + 7) & ~7;
1602	mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1603	/*
1604	 * Determine the size of the resident part of the now non-resident
1605	 * attribute record.
1606	 */
1607	arec_size = (mp_ofs + mp_size + 7) & ~7;
1608	/*
1609	 * If the page is not uptodate bring it uptodate by copying from the
1610	 * attribute value.
1611	 */
1612	attr_size = le32_to_cpu(a->data.resident.value_length);
1613	BUG_ON(attr_size != data_size);
1614	if (page && !PageUptodate(page)) {
1615		kaddr = kmap_atomic(page, KM_USER0);
1616		memcpy(kaddr, (u8*)a +
1617				le16_to_cpu(a->data.resident.value_offset),
1618				attr_size);
1619		memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
1620		kunmap_atomic(kaddr, KM_USER0);
1621		flush_dcache_page(page);
1622		SetPageUptodate(page);
1623	}
1624	/* Backup the attribute flag. */
1625	old_res_attr_flags = a->data.resident.flags;
1626	/* Resize the resident part of the attribute record. */
1627	err = ntfs_attr_record_resize(m, a, arec_size);
1628	if (unlikely(err))
1629		goto err_out;
1630	/*
1631	 * Convert the resident part of the attribute record to describe a
1632	 * non-resident attribute.
1633	 */
1634	a->non_resident = 1;
1635	/* Move the attribute name if it exists and update the offset. */
1636	if (a->name_length)
1637		memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1638				a->name_length * sizeof(ntfschar));
1639	a->name_offset = cpu_to_le16(name_ofs);
1640	/* Setup the fields specific to non-resident attributes. */
1641	a->data.non_resident.lowest_vcn = 0;
1642	a->data.non_resident.highest_vcn = cpu_to_sle64((new_size - 1) >>
1643			vol->cluster_size_bits);
1644	a->data.non_resident.mapping_pairs_offset = cpu_to_le16(mp_ofs);
1645	memset(&a->data.non_resident.reserved, 0,
1646			sizeof(a->data.non_resident.reserved));
1647	a->data.non_resident.allocated_size = cpu_to_sle64(new_size);
1648	a->data.non_resident.data_size =
1649			a->data.non_resident.initialized_size =
1650			cpu_to_sle64(attr_size);
1651	if (NInoSparse(ni) || NInoCompressed(ni)) {
1652		a->data.non_resident.compression_unit = 0;
1653		if (NInoCompressed(ni) || vol->major_ver < 3)
1654			a->data.non_resident.compression_unit = 4;
1655		a->data.non_resident.compressed_size =
1656				a->data.non_resident.allocated_size;
1657	} else
1658		a->data.non_resident.compression_unit = 0;
1659	/* Generate the mapping pairs array into the attribute record. */
1660	err = ntfs_mapping_pairs_build(vol, (u8*)a + mp_ofs,
1661			arec_size - mp_ofs, rl, 0, -1, NULL);
1662	if (unlikely(err)) {
1663		ntfs_debug("Failed to build mapping pairs, error code %i.",
1664				err);
1665		goto undo_err_out;
1666	}
1667	/* Setup the in-memory attribute structure to be non-resident. */
1668	ni->runlist.rl = rl;
1669	write_lock_irqsave(&ni->size_lock, flags);
1670	ni->allocated_size = new_size;
1671	if (NInoSparse(ni) || NInoCompressed(ni)) {
1672		ni->itype.compressed.size = ni->allocated_size;
1673		if (a->data.non_resident.compression_unit) {
1674			ni->itype.compressed.block_size = 1U << (a->data.
1675					non_resident.compression_unit +
1676					vol->cluster_size_bits);
1677			ni->itype.compressed.block_size_bits =
1678					ffs(ni->itype.compressed.block_size) -
1679					1;
1680			ni->itype.compressed.block_clusters = 1U <<
1681					a->data.non_resident.compression_unit;
1682		} else {
1683			ni->itype.compressed.block_size = 0;
1684			ni->itype.compressed.block_size_bits = 0;
1685			ni->itype.compressed.block_clusters = 0;
1686		}
1687		vi->i_blocks = ni->itype.compressed.size >> 9;
1688	} else
1689		vi->i_blocks = ni->allocated_size >> 9;
1690	write_unlock_irqrestore(&ni->size_lock, flags);
1691	/*
1692	 * This needs to be last since the address space operations ->readpage
1693	 * and ->writepage can run concurrently with us as they are not
1694	 * serialized on i_mutex.  Note, we are not allowed to fail once we flip
1695	 * this switch, which is another reason to do this last.
1696	 */
1697	NInoSetNonResident(ni);
1698	/* Mark the mft record dirty, so it gets written back. */
1699	flush_dcache_mft_record_page(ctx->ntfs_ino);
1700	mark_mft_record_dirty(ctx->ntfs_ino);
1701	ntfs_attr_put_search_ctx(ctx);
1702	unmap_mft_record(base_ni);
1703	up_write(&ni->runlist.lock);
1704	if (page) {
1705		set_page_dirty(page);
1706		unlock_page(page);
1707		mark_page_accessed(page);
1708		page_cache_release(page);
1709	}
1710	ntfs_debug("Done.");
1711	return 0;
1712undo_err_out:
1713	/* Convert the attribute back into a resident attribute. */
1714	a->non_resident = 0;
1715	/* Move the attribute name if it exists and update the offset. */
1716	name_ofs = (offsetof(ATTR_RECORD, data.resident.reserved) +
1717			sizeof(a->data.resident.reserved) + 7) & ~7;
1718	if (a->name_length)
1719		memmove((u8*)a + name_ofs, (u8*)a + le16_to_cpu(a->name_offset),
1720				a->name_length * sizeof(ntfschar));
1721	mp_ofs = (name_ofs + a->name_length * sizeof(ntfschar) + 7) & ~7;
1722	a->name_offset = cpu_to_le16(name_ofs);
1723	arec_size = (mp_ofs + attr_size + 7) & ~7;
1724	/* Resize the resident part of the attribute record. */
1725	err2 = ntfs_attr_record_resize(m, a, arec_size);
1726	if (unlikely(err2)) {
1727		arec_size = le32_to_cpu(a->length);
1728		if ((mp_ofs + attr_size) > arec_size) {
1729			err2 = attr_size;
1730			attr_size = arec_size - mp_ofs;
1731			ntfs_error(vol->sb, "Failed to undo partial resident "
1732					"to non-resident attribute "
1733					"conversion.  Truncating inode 0x%lx, "
1734					"attribute type 0x%x from %i bytes to "
1735					"%i bytes to maintain metadata "
1736					"consistency.  THIS MEANS YOU ARE "
1737					"LOSING %i BYTES DATA FROM THIS %s.",
1738					vi->i_ino,
1739					(unsigned)le32_to_cpu(ni->type),
1740					err2, attr_size, err2 - attr_size,
1741					((ni->type == AT_DATA) &&
1742					!ni->name_len) ? "FILE": "ATTRIBUTE");
1743			write_lock_irqsave(&ni->size_lock, flags);
1744			ni->initialized_size = attr_size;
1745			i_size_write(vi, attr_size);
1746			write_unlock_irqrestore(&ni->size_lock, flags);
1747		}
1748	}
1749	/* Setup the fields specific to resident attributes. */
1750	a->data.resident.value_length = cpu_to_le32(attr_size);
1751	a->data.resident.value_offset = cpu_to_le16(mp_ofs);
1752	a->data.resident.flags = old_res_attr_flags;
1753	memset(&a->data.resident.reserved, 0,
1754			sizeof(a->data.resident.reserved));
1755	/* Copy the data from the page back to the attribute value. */
1756	if (page) {
1757		kaddr = kmap_atomic(page, KM_USER0);
1758		memcpy((u8*)a + mp_ofs, kaddr, attr_size);
1759		kunmap_atomic(kaddr, KM_USER0);
1760	}
1761	/* Setup the allocated size in the ntfs inode in case it changed. */
1762	write_lock_irqsave(&ni->size_lock, flags);
1763	ni->allocated_size = arec_size - mp_ofs;
1764	write_unlock_irqrestore(&ni->size_lock, flags);
1765	/* Mark the mft record dirty, so it gets written back. */
1766	flush_dcache_mft_record_page(ctx->ntfs_ino);
1767	mark_mft_record_dirty(ctx->ntfs_ino);
1768err_out:
1769	if (ctx)
1770		ntfs_attr_put_search_ctx(ctx);
1771	if (m)
1772		unmap_mft_record(base_ni);
1773	ni->runlist.rl = NULL;
1774	up_write(&ni->runlist.lock);
1775rl_err_out:
1776	if (rl) {
1777		if (ntfs_cluster_free_from_rl(vol, rl) < 0) {
1778			ntfs_error(vol->sb, "Failed to release allocated "
1779					"cluster(s) in error code path.  Run "
1780					"chkdsk to recover the lost "
1781					"cluster(s).");
1782			NVolSetErrors(vol);
1783		}
1784		ntfs_free(rl);
1785page_err_out:
1786		unlock_page(page);
1787		page_cache_release(page);
1788	}
1789	if (err == -EINVAL)
1790		err = -EIO;
1791	return err;
1792}
1793
1794/**
1795 * ntfs_attr_extend_allocation - extend the allocated space of an attribute
1796 * @ni:			ntfs inode of the attribute whose allocation to extend
1797 * @new_alloc_size:	new size in bytes to which to extend the allocation to
1798 * @new_data_size:	new size in bytes to which to extend the data to
1799 * @data_start:		beginning of region which is required to be non-sparse
1800 *
1801 * Extend the allocated space of an attribute described by the ntfs inode @ni
1802 * to @new_alloc_size bytes.  If @data_start is -1, the whole extension may be
1803 * implemented as a hole in the file (as long as both the volume and the ntfs
1804 * inode @ni have sparse support enabled).  If @data_start is >= 0, then the
1805 * region between the old allocated size and @data_start - 1 may be made sparse
1806 * but the regions between @data_start and @new_alloc_size must be backed by
1807 * actual clusters.
1808 *
1809 * If @new_data_size is -1, it is ignored.  If it is >= 0, then the data size
1810 * of the attribute is extended to @new_data_size.  Note that the i_size of the
1811 * vfs inode is not updated.  Only the data size in the base attribute record
1812 * is updated.  The caller has to update i_size separately if this is required.
1813 * WARNING: It is a BUG() for @new_data_size to be smaller than the old data
1814 * size as well as for @new_data_size to be greater than @new_alloc_size.
1815 *
1816 * For resident attributes this involves resizing the attribute record and if
1817 * necessary moving it and/or other attributes into extent mft records and/or
1818 * converting the attribute to a non-resident attribute which in turn involves
1819 * extending the allocation of a non-resident attribute as described below.
1820 *
1821 * For non-resident attributes this involves allocating clusters in the data
1822 * zone on the volume (except for regions that are being made sparse) and
1823 * extending the run list to describe the allocated clusters as well as
1824 * updating the mapping pairs array of the attribute.  This in turn involves
1825 * resizing the attribute record and if necessary moving it and/or other
1826 * attributes into extent mft records and/or splitting the attribute record
1827 * into multiple extent attribute records.
1828 *
1829 * Also, the attribute list attribute is updated if present and in some of the
1830 * above cases (the ones where extent mft records/attributes come into play),
1831 * an attribute list attribute is created if not already present.
1832 *
1833 * Return the new allocated size on success and -errno on error.  In the case
1834 * that an error is encountered but a partial extension at least up to
1835 * @data_start (if present) is possible, the allocation is partially extended
1836 * and this is returned.  This means the caller must check the returned size to
1837 * determine if the extension was partial.  If @data_start is -1 then partial
1838 * allocations are not performed.
1839 *
1840 * WARNING: Do not call ntfs_attr_extend_allocation() for $MFT/$DATA.
1841 *
1842 * Locking: This function takes the runlist lock of @ni for writing as well as
1843 * locking the mft record of the base ntfs inode.  These locks are maintained
1844 * throughout execution of the function.  These locks are required so that the
1845 * attribute can be resized safely and so that it can for example be converted
1846 * from resident to non-resident safely.
1847 *
1848 * TODO: At present attribute list attribute handling is not implemented.
1849 *
1850 * TODO: At present it is not safe to call this function for anything other
1851 * than the $DATA attribute(s) of an uncompressed and unencrypted file.
1852 */
1853s64 ntfs_attr_extend_allocation(ntfs_inode *ni, s64 new_alloc_size,
1854		const s64 new_data_size, const s64 data_start)
1855{
1856	VCN vcn;
1857	s64 ll, allocated_size, start = data_start;
1858	struct inode *vi = VFS_I(ni);
1859	ntfs_volume *vol = ni->vol;
1860	ntfs_inode *base_ni;
1861	MFT_RECORD *m;
1862	ATTR_RECORD *a;
1863	ntfs_attr_search_ctx *ctx;
1864	runlist_element *rl, *rl2;
1865	unsigned long flags;
1866	int err, mp_size;
1867	u32 attr_len = 0; /* Silence stupid gcc warning. */
1868	bool mp_rebuilt;
1869
1870#ifdef DEBUG
1871	read_lock_irqsave(&ni->size_lock, flags);
1872	allocated_size = ni->allocated_size;
1873	read_unlock_irqrestore(&ni->size_lock, flags);
1874	ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
1875			"old_allocated_size 0x%llx, "
1876			"new_allocated_size 0x%llx, new_data_size 0x%llx, "
1877			"data_start 0x%llx.", vi->i_ino,
1878			(unsigned)le32_to_cpu(ni->type),
1879			(unsigned long long)allocated_size,
1880			(unsigned long long)new_alloc_size,
1881			(unsigned long long)new_data_size,
1882			(unsigned long long)start);
1883#endif
1884retry_extend:
1885	/*
1886	 * For non-resident attributes, @start and @new_size need to be aligned
1887	 * to cluster boundaries for allocation purposes.
1888	 */
1889	if (NInoNonResident(ni)) {
1890		if (start > 0)
1891			start &= ~(s64)vol->cluster_size_mask;
1892		new_alloc_size = (new_alloc_size + vol->cluster_size - 1) &
1893				~(s64)vol->cluster_size_mask;
1894	}
1895	BUG_ON(new_data_size >= 0 && new_data_size > new_alloc_size);
1896	/* Check if new size is allowed in $AttrDef. */
1897	err = ntfs_attr_size_bounds_check(vol, ni->type, new_alloc_size);
1898	if (unlikely(err)) {
1899		/* Only emit errors when the write will fail completely. */
1900		read_lock_irqsave(&ni->size_lock, flags);
1901		allocated_size = ni->allocated_size;
1902		read_unlock_irqrestore(&ni->size_lock, flags);
1903		if (start < 0 || start >= allocated_size) {
1904			if (err == -ERANGE) {
1905				ntfs_error(vol->sb, "Cannot extend allocation "
1906						"of inode 0x%lx, attribute "
1907						"type 0x%x, because the new "
1908						"allocation would exceed the "
1909						"maximum allowed size for "
1910						"this attribute type.",
1911						vi->i_ino, (unsigned)
1912						le32_to_cpu(ni->type));
1913			} else {
1914				ntfs_error(vol->sb, "Cannot extend allocation "
1915						"of inode 0x%lx, attribute "
1916						"type 0x%x, because this "
1917						"attribute type is not "
1918						"defined on the NTFS volume.  "
1919						"Possible corruption!  You "
1920						"should run chkdsk!",
1921						vi->i_ino, (unsigned)
1922						le32_to_cpu(ni->type));
1923			}
1924		}
1925		/* Translate error code to be POSIX conformant for write(2). */
1926		if (err == -ERANGE)
1927			err = -EFBIG;
1928		else
1929			err = -EIO;
1930		return err;
1931	}
1932	if (!NInoAttr(ni))
1933		base_ni = ni;
1934	else
1935		base_ni = ni->ext.base_ntfs_ino;
1936	/*
1937	 * We will be modifying both the runlist (if non-resident) and the mft
1938	 * record so lock them both down.
1939	 */
1940	down_write(&ni->runlist.lock);
1941	m = map_mft_record(base_ni);
1942	if (IS_ERR(m)) {
1943		err = PTR_ERR(m);
1944		m = NULL;
1945		ctx = NULL;
1946		goto err_out;
1947	}
1948	ctx = ntfs_attr_get_search_ctx(base_ni, m);
1949	if (unlikely(!ctx)) {
1950		err = -ENOMEM;
1951		goto err_out;
1952	}
1953	read_lock_irqsave(&ni->size_lock, flags);
1954	allocated_size = ni->allocated_size;
1955	read_unlock_irqrestore(&ni->size_lock, flags);
1956	/*
1957	 * If non-resident, seek to the last extent.  If resident, there is
1958	 * only one extent, so seek to that.
1959	 */
1960	vcn = NInoNonResident(ni) ? allocated_size >> vol->cluster_size_bits :
1961			0;
1962	/*
1963	 * Abort if someone did the work whilst we waited for the locks.  If we
1964	 * just converted the attribute from resident to non-resident it is
1965	 * likely that exactly this has happened already.  We cannot quite
1966	 * abort if we need to update the data size.
1967	 */
1968	if (unlikely(new_alloc_size <= allocated_size)) {
1969		ntfs_debug("Allocated size already exceeds requested size.");
1970		new_alloc_size = allocated_size;
1971		if (new_data_size < 0)
1972			goto done;
1973		/*
1974		 * We want the first attribute extent so that we can update the
1975		 * data size.
1976		 */
1977		vcn = 0;
1978	}
1979	err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1980			CASE_SENSITIVE, vcn, NULL, 0, ctx);
1981	if (unlikely(err)) {
1982		if (err == -ENOENT)
1983			err = -EIO;
1984		goto err_out;
1985	}
1986	m = ctx->mrec;
1987	a = ctx->attr;
1988	/* Use goto to reduce indentation. */
1989	if (a->non_resident)
1990		goto do_non_resident_extend;
1991	BUG_ON(NInoNonResident(ni));
1992	/* The total length of the attribute value. */
1993	attr_len = le32_to_cpu(a->data.resident.value_length);
1994	/*
1995	 * Extend the attribute record to be able to store the new attribute
1996	 * size.  ntfs_attr_record_resize() will not do anything if the size is
1997	 * not changing.
1998	 */
1999	if (new_alloc_size < vol->mft_record_size &&
2000			!ntfs_attr_record_resize(m, a,
2001			le16_to_cpu(a->data.resident.value_offset) +
2002			new_alloc_size)) {
2003		/* The resize succeeded! */
2004		write_lock_irqsave(&ni->size_lock, flags);
2005		ni->allocated_size = le32_to_cpu(a->length) -
2006				le16_to_cpu(a->data.resident.value_offset);
2007		write_unlock_irqrestore(&ni->size_lock, flags);
2008		if (new_data_size >= 0) {
2009			BUG_ON(new_data_size < attr_len);
2010			a->data.resident.value_length =
2011					cpu_to_le32((u32)new_data_size);
2012		}
2013		goto flush_done;
2014	}
2015	/*
2016	 * We have to drop all the locks so we can call
2017	 * ntfs_attr_make_non_resident().  This could be optimised by try-
2018	 * locking the first page cache page and only if that fails dropping
2019	 * the locks, locking the page, and redoing all the locking and
2020	 * lookups.  While this would be a huge optimisation, it is not worth
2021	 * it as this is definitely a slow code path.
2022	 */
2023	ntfs_attr_put_search_ctx(ctx);
2024	unmap_mft_record(base_ni);
2025	up_write(&ni->runlist.lock);
2026	/*
2027	 * Not enough space in the mft record, try to make the attribute
2028	 * non-resident and if successful restart the extension process.
2029	 */
2030	err = ntfs_attr_make_non_resident(ni, attr_len);
2031	if (likely(!err))
2032		goto retry_extend;
2033	/*
2034	 * Could not make non-resident.  If this is due to this not being
2035	 * permitted for this attribute type or there not being enough space,
2036	 * try to make other attributes non-resident.  Otherwise fail.
2037	 */
2038	if (unlikely(err != -EPERM && err != -ENOSPC)) {
2039		/* Only emit errors when the write will fail completely. */
2040		read_lock_irqsave(&ni->size_lock, flags);
2041		allocated_size = ni->allocated_size;
2042		read_unlock_irqrestore(&ni->size_lock, flags);
2043		if (start < 0 || start >= allocated_size)
2044			ntfs_error(vol->sb, "Cannot extend allocation of "
2045					"inode 0x%lx, attribute type 0x%x, "
2046					"because the conversion from resident "
2047					"to non-resident attribute failed "
2048					"with error code %i.", vi->i_ino,
2049					(unsigned)le32_to_cpu(ni->type), err);
2050		if (err != -ENOMEM)
2051			err = -EIO;
2052		goto conv_err_out;
2053	}
2054	/* TODO: Not implemented from here, abort. */
2055	read_lock_irqsave(&ni->size_lock, flags);
2056	allocated_size = ni->allocated_size;
2057	read_unlock_irqrestore(&ni->size_lock, flags);
2058	if (start < 0 || start >= allocated_size) {
2059		if (err == -ENOSPC)
2060			ntfs_error(vol->sb, "Not enough space in the mft "
2061					"record/on disk for the non-resident "
2062					"attribute value.  This case is not "
2063					"implemented yet.");
2064		else /* if (err == -EPERM) */
2065			ntfs_error(vol->sb, "This attribute type may not be "
2066					"non-resident.  This case is not "
2067					"implemented yet.");
2068	}
2069	err = -EOPNOTSUPP;
2070	goto conv_err_out;
2071do_non_resident_extend:
2072	BUG_ON(!NInoNonResident(ni));
2073	if (new_alloc_size == allocated_size) {
2074		BUG_ON(vcn);
2075		goto alloc_done;
2076	}
2077	/*
2078	 * If the data starts after the end of the old allocation, this is a
2079	 * $DATA attribute and sparse attributes are enabled on the volume and
2080	 * for this inode, then create a sparse region between the old
2081	 * allocated size and the start of the data.  Otherwise simply proceed
2082	 * with filling the whole space between the old allocated size and the
2083	 * new allocated size with clusters.
2084	 */
2085	if ((start >= 0 && start <= allocated_size) || ni->type != AT_DATA ||
2086			!NVolSparseEnabled(vol) || NInoSparseDisabled(ni))
2087		goto skip_sparse;
2088	// TODO: This is not implemented yet.  We just fill in with real
2089	// clusters for now...
2090	ntfs_debug("Inserting holes is not-implemented yet.  Falling back to "
2091			"allocating real clusters instead.");
2092skip_sparse:
2093	rl = ni->runlist.rl;
2094	if (likely(rl)) {
2095		/* Seek to the end of the runlist. */
2096		while (rl->length)
2097			rl++;
2098	}
2099	/* If this attribute extent is not mapped, map it now. */
2100	if (unlikely(!rl || rl->lcn == LCN_RL_NOT_MAPPED ||
2101			(rl->lcn == LCN_ENOENT && rl > ni->runlist.rl &&
2102			(rl-1)->lcn == LCN_RL_NOT_MAPPED))) {
2103		if (!rl && !allocated_size)
2104			goto first_alloc;
2105		rl = ntfs_mapping_pairs_decompress(vol, a, ni->runlist.rl);
2106		if (IS_ERR(rl)) {
2107			err = PTR_ERR(rl);
2108			if (start < 0 || start >= allocated_size)
2109				ntfs_error(vol->sb, "Cannot extend allocation "
2110						"of inode 0x%lx, attribute "
2111						"type 0x%x, because the "
2112						"mapping of a runlist "
2113						"fragment failed with error "
2114						"code %i.", vi->i_ino,
2115						(unsigned)le32_to_cpu(ni->type),
2116						err);
2117			if (err != -ENOMEM)
2118				err = -EIO;
2119			goto err_out;
2120		}
2121		ni->runlist.rl = rl;
2122		/* Seek to the end of the runlist. */
2123		while (rl->length)
2124			rl++;
2125	}
2126	/*
2127	 * We now know the runlist of the last extent is mapped and @rl is at
2128	 * the end of the runlist.  We want to begin allocating clusters
2129	 * starting at the last allocated cluster to reduce fragmentation.  If
2130	 * there are no valid LCNs in the attribute we let the cluster
2131	 * allocator choose the starting cluster.
2132	 */
2133	/* If the last LCN is a hole or simillar seek back to last real LCN. */
2134	while (rl->lcn < 0 && rl > ni->runlist.rl)
2135		rl--;
2136first_alloc:
2137	// write can be performed when start >= 0.  (Needed for POSIX write(2)
2138	// conformance.)
2139	rl2 = ntfs_cluster_alloc(vol, allocated_size >> vol->cluster_size_bits,
2140			(new_alloc_size - allocated_size) >>
2141			vol->cluster_size_bits, (rl && (rl->lcn >= 0)) ?
2142			rl->lcn + rl->length : -1, DATA_ZONE, true);
2143	if (IS_ERR(rl2)) {
2144		err = PTR_ERR(rl2);
2145		if (start < 0 || start >= allocated_size)
2146			ntfs_error(vol->sb, "Cannot extend allocation of "
2147					"inode 0x%lx, attribute type 0x%x, "
2148					"because the allocation of clusters "
2149					"failed with error code %i.", vi->i_ino,
2150					(unsigned)le32_to_cpu(ni->type), err);
2151		if (err != -ENOMEM && err != -ENOSPC)
2152			err = -EIO;
2153		goto err_out;
2154	}
2155	rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
2156	if (IS_ERR(rl)) {
2157		err = PTR_ERR(rl);
2158		if (start < 0 || start >= allocated_size)
2159			ntfs_error(vol->sb, "Cannot extend allocation of "
2160					"inode 0x%lx, attribute type 0x%x, "
2161					"because the runlist merge failed "
2162					"with error code %i.", vi->i_ino,
2163					(unsigned)le32_to_cpu(ni->type), err);
2164		if (err != -ENOMEM)
2165			err = -EIO;
2166		if (ntfs_cluster_free_from_rl(vol, rl2)) {
2167			ntfs_error(vol->sb, "Failed to release allocated "
2168					"cluster(s) in error code path.  Run "
2169					"chkdsk to recover the lost "
2170					"cluster(s).");
2171			NVolSetErrors(vol);
2172		}
2173		ntfs_free(rl2);
2174		goto err_out;
2175	}
2176	ni->runlist.rl = rl;
2177	ntfs_debug("Allocated 0x%llx clusters.", (long long)(new_alloc_size -
2178			allocated_size) >> vol->cluster_size_bits);
2179	/* Find the runlist element with which the attribute extent starts. */
2180	ll = sle64_to_cpu(a->data.non_resident.lowest_vcn);
2181	rl2 = ntfs_rl_find_vcn_nolock(rl, ll);
2182	BUG_ON(!rl2);
2183	BUG_ON(!rl2->length);
2184	BUG_ON(rl2->lcn < LCN_HOLE);
2185	mp_rebuilt = false;
2186	/* Get the size for the new mapping pairs array for this extent. */
2187	mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, ll, -1);
2188	if (unlikely(mp_size <= 0)) {
2189		err = mp_size;
2190		if (start < 0 || start >= allocated_size)
2191			ntfs_error(vol->sb, "Cannot extend allocation of "
2192					"inode 0x%lx, attribute type 0x%x, "
2193					"because determining the size for the "
2194					"mapping pairs failed with error code "
2195					"%i.", vi->i_ino,
2196					(unsigned)le32_to_cpu(ni->type), err);
2197		err = -EIO;
2198		goto undo_alloc;
2199	}
2200	/* Extend the attribute record to fit the bigger mapping pairs array. */
2201	attr_len = le32_to_cpu(a->length);
2202	err = ntfs_attr_record_resize(m, a, mp_size +
2203			le16_to_cpu(a->data.non_resident.mapping_pairs_offset));
2204	if (unlikely(err)) {
2205		BUG_ON(err != -ENOSPC);
2206		// TODO: Deal with this by moving this extent to a new mft
2207		// record or by starting a new extent in a new mft record,
2208		// possibly by extending this extent partially and filling it
2209		// and creating a new extent for the remainder, or by making
2210		// other attributes non-resident and/or by moving other
2211		// attributes out of this mft record.
2212		if (start < 0 || start >= allocated_size)
2213			ntfs_error(vol->sb, "Not enough space in the mft "
2214					"record for the extended attribute "
2215					"record.  This case is not "
2216					"implemented yet.");
2217		err = -EOPNOTSUPP;
2218		goto undo_alloc;
2219	}
2220	mp_rebuilt = true;
2221	/* Generate the mapping pairs array directly into the attr record. */
2222	err = ntfs_mapping_pairs_build(vol, (u8*)a +
2223			le16_to_cpu(a->data.non_resident.mapping_pairs_offset),
2224			mp_size, rl2, ll, -1, NULL);
2225	if (unlikely(err)) {
2226		if (start < 0 || start >= allocated_size)
2227			ntfs_error(vol->sb, "Cannot extend allocation of "
2228					"inode 0x%lx, attribute type 0x%x, "
2229					"because building the mapping pairs "
2230					"failed with error code %i.", vi->i_ino,
2231					(unsigned)le32_to_cpu(ni->type), err);
2232		err = -EIO;
2233		goto undo_alloc;
2234	}
2235	/* Update the highest_vcn. */
2236	a->data.non_resident.highest_vcn = cpu_to_sle64((new_alloc_size >>
2237			vol->cluster_size_bits) - 1);
2238	/*
2239	 * We now have extended the allocated size of the attribute.  Reflect
2240	 * this in the ntfs_inode structure and the attribute record.
2241	 */
2242	if (a->data.non_resident.lowest_vcn) {
2243		/*
2244		 * We are not in the first attribute extent, switch to it, but
2245		 * first ensure the changes will make it to disk later.
2246		 */
2247		flush_dcache_mft_record_page(ctx->ntfs_ino);
2248		mark_mft_record_dirty(ctx->ntfs_ino);
2249		ntfs_attr_reinit_search_ctx(ctx);
2250		err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
2251				CASE_SENSITIVE, 0, NULL, 0, ctx);
2252		if (unlikely(err))
2253			goto restore_undo_alloc;
2254		/* @m is not used any more so no need to set it. */
2255		a = ctx->attr;
2256	}
2257	write_lock_irqsave(&ni->size_lock, flags);
2258	ni->allocated_size = new_alloc_size;
2259	a->data.non_resident.allocated_size = cpu_to_sle64(new_alloc_size);
2260	if (NInoSparse(ni) || NInoCompressed(ni)) {
2261		ni->itype.compressed.size += new_alloc_size - allocated_size;
2262		a->data.non_resident.compressed_size =
2263				cpu_to_sle64(ni->itype.compressed.size);
2264		vi->i_blocks = ni->itype.compressed.size >> 9;
2265	} else
2266		vi->i_blocks = new_alloc_size >> 9;
2267	write_unlock_irqrestore(&ni->size_lock, flags);
2268alloc_done:
2269	if (new_data_size >= 0) {
2270		BUG_ON(new_data_size <
2271				sle64_to_cpu(a->data.non_resident.data_size));
2272		a->data.non_resident.data_size = cpu_to_sle64(new_data_size);
2273	}
2274flush_done:
2275	/* Ensure the changes make it to disk. */
2276	flush_dcache_mft_record_page(ctx->ntfs_ino);
2277	mark_mft_record_dirty(ctx->ntfs_ino);
2278done:
2279	ntfs_attr_put_search_ctx(ctx);
2280	unmap_mft_record(base_ni);
2281	up_write(&ni->runlist.lock);
2282	ntfs_debug("Done, new_allocated_size 0x%llx.",
2283			(unsigned long long)new_alloc_size);
2284	return new_alloc_size;
2285restore_undo_alloc:
2286	if (start < 0 || start >= allocated_size)
2287		ntfs_error(vol->sb, "Cannot complete extension of allocation "
2288				"of inode 0x%lx, attribute type 0x%x, because "
2289				"lookup of first attribute extent failed with "
2290				"error code %i.", vi->i_ino,
2291				(unsigned)le32_to_cpu(ni->type), err);
2292	if (err == -ENOENT)
2293		err = -EIO;
2294	ntfs_attr_reinit_search_ctx(ctx);
2295	if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len, CASE_SENSITIVE,
2296			allocated_size >> vol->cluster_size_bits, NULL, 0,
2297			ctx)) {
2298		ntfs_error(vol->sb, "Failed to find last attribute extent of "
2299				"attribute in error code path.  Run chkdsk to "
2300				"recover.");
2301		write_lock_irqsave(&ni->size_lock, flags);
2302		ni->allocated_size = new_alloc_size;
2303		if (NInoSparse(ni) || NInoCompressed(ni)) {
2304			ni->itype.compressed.size += new_alloc_size -
2305					allocated_size;
2306			vi->i_blocks = ni->itype.compressed.size >> 9;
2307		} else
2308			vi->i_blocks = new_alloc_size >> 9;
2309		write_unlock_irqrestore(&ni->size_lock, flags);
2310		ntfs_attr_put_search_ctx(ctx);
2311		unmap_mft_record(base_ni);
2312		up_write(&ni->runlist.lock);
2313		/*
2314		 * The only thing that is now wrong is the allocated size of the
2315		 * base attribute extent which chkdsk should be able to fix.
2316		 */
2317		NVolSetErrors(vol);
2318		return err;
2319	}
2320	ctx->attr->data.non_resident.highest_vcn = cpu_to_sle64(
2321			(allocated_size >> vol->cluster_size_bits) - 1);
2322undo_alloc:
2323	ll = allocated_size >> vol->cluster_size_bits;
2324	if (ntfs_cluster_free(ni, ll, -1, ctx) < 0) {
2325		ntfs_error(vol->sb, "Failed to release allocated cluster(s) "
2326				"in error code path.  Run chkdsk to recover "
2327				"the lost cluster(s).");
2328		NVolSetErrors(vol);
2329	}
2330	m = ctx->mrec;
2331	a = ctx->attr;
2332	/*
2333	 * If the runlist truncation fails and/or the search context is no
2334	 * longer valid, we cannot resize the attribute record or build the
2335	 * mapping pairs array thus we mark the inode bad so that no access to
2336	 * the freed clusters can happen.
2337	 */
2338	if (ntfs_rl_truncate_nolock(vol, &ni->runlist, ll) || IS_ERR(m)) {
2339		ntfs_error(vol->sb, "Failed to %s in error code path.  Run "
2340				"chkdsk to recover.", IS_ERR(m) ?
2341				"restore attribute search context" :
2342				"truncate attribute runlist");
2343		NVolSetErrors(vol);
2344	} else if (mp_rebuilt) {
2345		if (ntfs_attr_record_resize(m, a, attr_len)) {
2346			ntfs_error(vol->sb, "Failed to restore attribute "
2347					"record in error code path.  Run "
2348					"chkdsk to recover.");
2349			NVolSetErrors(vol);
2350		} else /* if (success) */ {
2351			if (ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
2352					a->data.non_resident.
2353					mapping_pairs_offset), attr_len -
2354					le16_to_cpu(a->data.non_resident.
2355					mapping_pairs_offset), rl2, ll, -1,
2356					NULL)) {
2357				ntfs_error(vol->sb, "Failed to restore "
2358						"mapping pairs array in error "
2359						"code path.  Run chkdsk to "
2360						"recover.");
2361				NVolSetErrors(vol);
2362			}
2363			flush_dcache_mft_record_page(ctx->ntfs_ino);
2364			mark_mft_record_dirty(ctx->ntfs_ino);
2365		}
2366	}
2367err_out:
2368	if (ctx)
2369		ntfs_attr_put_search_ctx(ctx);
2370	if (m)
2371		unmap_mft_record(base_ni);
2372	up_write(&ni->runlist.lock);
2373conv_err_out:
2374	ntfs_debug("Failed.  Returning error code %i.", err);
2375	return err;
2376}
2377
2378/**
2379 * ntfs_attr_set - fill (a part of) an attribute with a byte
2380 * @ni:		ntfs inode describing the attribute to fill
2381 * @ofs:	offset inside the attribute at which to start to fill
2382 * @cnt:	number of bytes to fill
2383 * @val:	the unsigned 8-bit value with which to fill the attribute
2384 *
2385 * Fill @cnt bytes of the attribute described by the ntfs inode @ni starting at
2386 * byte offset @ofs inside the attribute with the constant byte @val.
2387 *
2388 * This function is effectively like memset() applied to an ntfs attribute.
2389 * Note thie function actually only operates on the page cache pages belonging
2390 * to the ntfs attribute and it marks them dirty after doing the memset().
2391 * Thus it relies on the vm dirty page write code paths to cause the modified
2392 * pages to be written to the mft record/disk.
2393 *
2394 * Return 0 on success and -errno on error.  An error code of -ESPIPE means
2395 * that @ofs + @cnt were outside the end of the attribute and no write was
2396 * performed.
2397 */
2398int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2399{
2400	ntfs_volume *vol = ni->vol;
2401	struct address_space *mapping;
2402	struct page *page;
2403	u8 *kaddr;
2404	pgoff_t idx, end;
2405	unsigned int start_ofs, end_ofs, size;
2406
2407	ntfs_debug("Entering for ofs 0x%llx, cnt 0x%llx, val 0x%hx.",
2408			(long long)ofs, (long long)cnt, val);
2409	BUG_ON(ofs < 0);
2410	BUG_ON(cnt < 0);
2411	if (!cnt)
2412		goto done;
2413	BUG_ON(NInoCompressed(ni));
2414	BUG_ON(NInoEncrypted(ni));
2415	mapping = VFS_I(ni)->i_mapping;
2416	/* Work out the starting index and page offset. */
2417	idx = ofs >> PAGE_CACHE_SHIFT;
2418	start_ofs = ofs & ~PAGE_CACHE_MASK;
2419	/* Work out the ending index and page offset. */
2420	end = ofs + cnt;
2421	end_ofs = end & ~PAGE_CACHE_MASK;
2422	/* If the end is outside the inode size return -ESPIPE. */
2423	if (unlikely(end > i_size_read(VFS_I(ni)))) {
2424		ntfs_error(vol->sb, "Request exceeds end of attribute.");
2425		return -ESPIPE;
2426	}
2427	end >>= PAGE_CACHE_SHIFT;
2428	/* If there is a first partial page, need to do it the slow way. */
2429	if (start_ofs) {
2430		page = read_mapping_page(mapping, idx, NULL);
2431		if (IS_ERR(page)) {
2432			ntfs_error(vol->sb, "Failed to read first partial "
2433					"page (error, index 0x%lx).", idx);
2434			return PTR_ERR(page);
2435		}
2436		/*
2437		 * If the last page is the same as the first page, need to
2438		 * limit the write to the end offset.
2439		 */
2440		size = PAGE_CACHE_SIZE;
2441		if (idx == end)
2442			size = end_ofs;
2443		kaddr = kmap_atomic(page, KM_USER0);
2444		memset(kaddr + start_ofs, val, size - start_ofs);
2445		flush_dcache_page(page);
2446		kunmap_atomic(kaddr, KM_USER0);
2447		set_page_dirty(page);
2448		page_cache_release(page);
2449		if (idx == end)
2450			goto done;
2451		idx++;
2452	}
2453	/* Do the whole pages the fast way. */
2454	for (; idx < end; idx++) {
2455		/* Find or create the current page.  (The page is locked.) */
2456		page = grab_cache_page(mapping, idx);
2457		if (unlikely(!page)) {
2458			ntfs_error(vol->sb, "Insufficient memory to grab "
2459					"page (index 0x%lx).", idx);
2460			return -ENOMEM;
2461		}
2462		kaddr = kmap_atomic(page, KM_USER0);
2463		memset(kaddr, val, PAGE_CACHE_SIZE);
2464		flush_dcache_page(page);
2465		kunmap_atomic(kaddr, KM_USER0);
2466		/*
2467		 * If the page has buffers, mark them uptodate since buffer
2468		 * state and not page state is definitive in 2.6 kernels.
2469		 */
2470		if (page_has_buffers(page)) {
2471			struct buffer_head *bh, *head;
2472
2473			bh = head = page_buffers(page);
2474			do {
2475				set_buffer_uptodate(bh);
2476			} while ((bh = bh->b_this_page) != head);
2477		}
2478		/* Now that buffers are uptodate, set the page uptodate, too. */
2479		SetPageUptodate(page);
2480		/*
2481		 * Set the page and all its buffers dirty and mark the inode
2482		 * dirty, too.  The VM will write the page later on.
2483		 */
2484		set_page_dirty(page);
2485		/* Finally unlock and release the page. */
2486		unlock_page(page);
2487		page_cache_release(page);
2488		balance_dirty_pages_ratelimited(mapping);
2489		cond_resched();
2490	}
2491	/* If there is a last partial page, need to do it the slow way. */
2492	if (end_ofs) {
2493		page = read_mapping_page(mapping, idx, NULL);
2494		if (IS_ERR(page)) {
2495			ntfs_error(vol->sb, "Failed to read last partial page "
2496					"(error, index 0x%lx).", idx);
2497			return PTR_ERR(page);
2498		}
2499		kaddr = kmap_atomic(page, KM_USER0);
2500		memset(kaddr, val, end_ofs);
2501		flush_dcache_page(page);
2502		kunmap_atomic(kaddr, KM_USER0);
2503		set_page_dirty(page);
2504		page_cache_release(page);
2505	}
2506done:
2507	ntfs_debug("Done.");
2508	return 0;
2509}
2510
2511#endif /* NTFS_RW */
2512