1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved.  The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
33 */
34/*
35 *	File:	vnode_pager.c
36 *
37 *	"Swap" pager that pages to/from vnodes.  Also
38 *	handles demand paging from files.
39 *
40 */
41
42#include <mach/boolean.h>
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/user.h>
46#include <sys/proc.h>
47#include <sys/kauth.h>
48#include <sys/buf.h>
49#include <sys/uio.h>
50#include <sys/vnode_internal.h>
51#include <sys/namei.h>
52#include <sys/mount_internal.h>	/* needs internal due to fhandle_t */
53#include <sys/ubc_internal.h>
54#include <sys/lock.h>
55#include <sys/disk.h> 		/* For DKIOC calls */
56
57#include <mach/mach_types.h>
58#include <mach/memory_object_types.h>
59#include <mach/memory_object_control.h>
60#include <mach/vm_map.h>
61#include <mach/mach_vm.h>
62#include <mach/upl.h>
63#include <mach/sdt.h>
64
65#include <vm/vm_map.h>
66#include <vm/vm_kern.h>
67#include <kern/zalloc.h>
68#include <kern/kalloc.h>
69#include <libkern/libkern.h>
70
71#include <vm/vnode_pager.h>
72#include <vm/vm_pageout.h>
73
74#include <kern/assert.h>
75#include <sys/kdebug.h>
76#include <machine/spl.h>
77
78#include <nfs/rpcv2.h>
79#include <nfs/nfsproto.h>
80#include <nfs/nfs.h>
81
82#include <vm/vm_protos.h>
83
84
85void
86vnode_pager_throttle()
87{
88	struct uthread *ut;
89
90	ut = get_bsdthread_info(current_thread());
91
92	if (ut->uu_lowpri_window)
93		throttle_lowpri_io(1);
94}
95
96
97boolean_t
98vnode_pager_isSSD(vnode_t vp)
99{
100	if (vp->v_mount->mnt_kern_flag & MNTK_SSD)
101		return (TRUE);
102	return (FALSE);
103}
104
105
106uint32_t
107vnode_pager_isinuse(struct vnode *vp)
108{
109	if (vp->v_usecount > vp->v_kusecount)
110		return (1);
111	return (0);
112}
113
114uint32_t
115vnode_pager_return_throttle_io_limit(struct vnode *vp, uint32_t *limit)
116{
117	return(cluster_throttle_io_limit(vp, limit));
118}
119
120vm_object_offset_t
121vnode_pager_get_filesize(struct vnode *vp)
122{
123	return (vm_object_offset_t) ubc_getsize(vp);
124}
125
126extern int safe_getpath(struct vnode *dvp, char *leafname, char *path, int _len, int *truncated_path);
127
128kern_return_t
129vnode_pager_get_name(
130	struct vnode	*vp,
131	char		*pathname,
132	vm_size_t	pathname_len,
133	char 		*filename,
134	vm_size_t	filename_len,
135	boolean_t	*truncated_path_p)
136{
137	*truncated_path_p = FALSE;
138	if (pathname != NULL) {
139		/* get the path name */
140		safe_getpath(vp, NULL,
141			     pathname, (int) pathname_len,
142			     truncated_path_p);
143	}
144	if ((pathname == NULL || *truncated_path_p) &&
145	    filename != NULL) {
146		/* get the file name */
147		const char *name;
148
149		name = vnode_getname_printable(vp);
150		strlcpy(filename, name, (size_t) filename_len);
151		vnode_putname_printable(name);
152	}
153	return KERN_SUCCESS;
154}
155
156kern_return_t
157vnode_pager_get_mtime(
158	struct vnode	*vp,
159	struct timespec	*current_mtime,
160	struct timespec	*cs_mtime)
161{
162	vnode_mtime(vp, current_mtime, vfs_context_current());
163	if (cs_mtime != NULL) {
164		ubc_get_cs_mtime(vp, cs_mtime);
165	}
166	return KERN_SUCCESS;
167}
168
169kern_return_t
170vnode_pager_get_cs_blobs(
171	struct vnode	*vp,
172	void		**blobs)
173{
174	*blobs = ubc_get_cs_blobs(vp);
175	return KERN_SUCCESS;
176}
177
178/*
179 * vnode_trim:
180 * Used to call the DKIOCUNMAP ioctl on the underlying disk device for the specified vnode.
181 * Trims the region at offset bytes into the file, for length bytes.
182 *
183 * Care must be taken to ensure that the vnode is sufficiently reference counted at the time this
184 * function is called; no iocounts or usecounts are taken on the vnode.
185 * This function is non-idempotent in error cases;  We cannot un-discard the blocks if only some of them
186 * are successfully discarded.
187 */
188u_int32_t vnode_trim (
189		struct vnode *vp,
190		off_t offset,
191		size_t length)
192{
193	daddr64_t io_blockno;	 /* Block number corresponding to the start of the extent */
194	size_t io_bytecount;	/* Number of bytes in current extent for the specified range */
195	size_t trimmed = 0;
196	off_t current_offset = offset;
197	size_t remaining_length = length;
198	int error = 0;
199	u_int32_t blocksize = 0;
200	struct vnode *devvp;
201	dk_extent_t extent;
202	dk_unmap_t unmap;
203
204
205	/* Get the underlying device vnode */
206	devvp = vp->v_mount->mnt_devvp;
207
208	/* Figure out the underlying device block size */
209	error  = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blocksize, 0, vfs_context_kernel());
210	if (error) {
211		goto trim_exit;
212	}
213
214	/*
215	 * We may not get the entire range from offset -> offset+length in a single
216	 * extent from the blockmap call.  Keep looping/going until we are sure we've hit
217	 * the whole range or if we encounter an error.
218	 */
219	while (trimmed < length) {
220		/*
221		 * VNOP_BLOCKMAP will tell us the logical to physical block number mapping for the
222		 * specified offset.  It returns blocks in contiguous chunks, so if the logical range is
223		 * broken into multiple extents, it must be called multiple times, increasing the offset
224		 * in each call to ensure that the entire range is covered.
225		 */
226		error = VNOP_BLOCKMAP (vp, current_offset, remaining_length,
227				&io_blockno, &io_bytecount, NULL, VNODE_READ, NULL);
228
229		if (error) {
230			goto trim_exit;
231		}
232		/*
233		 * We have a contiguous run.  Prepare & issue the ioctl for the device.
234		 * the DKIOCUNMAP ioctl takes offset in bytes from the start of the device.
235		 */
236		memset (&extent, 0, sizeof(dk_extent_t));
237		memset (&unmap, 0, sizeof(dk_unmap_t));
238		extent.offset = (uint64_t) io_blockno * (u_int64_t) blocksize;
239		extent.length = io_bytecount;
240		unmap.extents = &extent;
241		unmap.extentsCount = 1;
242		error = VNOP_IOCTL(devvp, DKIOCUNMAP, (caddr_t)&unmap, 0, vfs_context_kernel());
243
244		if (error) {
245			goto trim_exit;
246		}
247		remaining_length = remaining_length - io_bytecount;
248		trimmed = trimmed + io_bytecount;
249		current_offset = current_offset + io_bytecount;
250	}
251trim_exit:
252
253	return error;
254
255}
256
257pager_return_t
258vnode_pageout(struct vnode *vp,
259	upl_t			upl,
260	upl_offset_t		upl_offset,
261	vm_object_offset_t	f_offset,
262	upl_size_t		size,
263	int			flags,
264	int			*errorp)
265{
266	int		result = PAGER_SUCCESS;
267	int		error = 0;
268	int		error_ret = 0;
269	daddr64_t blkno;
270	int isize;
271	int pg_index;
272	int base_index;
273	upl_offset_t offset;
274	upl_page_info_t *pl;
275	vfs_context_t ctx = vfs_context_current();	/* pager context */
276
277	isize = (int)size;
278
279	if (isize <= 0) {
280	        result    = PAGER_ERROR;
281		error_ret = EINVAL;
282		goto out;
283	}
284
285	if (UBCINFOEXISTS(vp) == 0) {
286		result    = PAGER_ERROR;
287		error_ret = EINVAL;
288
289		if (upl && !(flags & UPL_NOCOMMIT))
290		        ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
291		goto out;
292	}
293	if ( !(flags & UPL_VNODE_PAGER)) {
294		/*
295		 * This is a pageout from the default pager,
296		 * just go ahead and call vnop_pageout since
297		 * it has already sorted out the dirty ranges
298		 */
299		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
300			(MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
301			size, 1, 0, 0, 0);
302
303		if ( (error_ret = VNOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset,
304					       (size_t)size, flags, ctx)) )
305			result = PAGER_ERROR;
306
307		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
308			(MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
309			size, 1, 0, 0, 0);
310
311		goto out;
312	}
313	if (upl == NULL) {
314		int			request_flags;
315
316		if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEOUTV2) {
317			/*
318			 * filesystem has requested the new form of VNOP_PAGEOUT for file
319			 * backed objects... we will not grab the UPL befofe calling VNOP_PAGEOUT...
320			 * it is the fileystem's responsibility to grab the range we're denoting
321			 * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
322			 * take any locks it needs, before effectively locking the pages into a UPL...
323			 */
324			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
325				(MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
326				size, (int)f_offset, 0, 0, 0);
327
328			if ( (error_ret = VNOP_PAGEOUT(vp, NULL, upl_offset, (off_t)f_offset,
329						       size, flags, ctx)) ) {
330				result = PAGER_ERROR;
331			}
332			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
333				(MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
334				size, 0, 0, 0, 0);
335
336			goto out;
337		}
338		if (flags & UPL_MSYNC)
339			request_flags = UPL_UBC_MSYNC | UPL_RET_ONLY_DIRTY;
340		else
341			request_flags = UPL_UBC_PAGEOUT | UPL_RET_ONLY_DIRTY;
342
343	        if (ubc_create_upl(vp, f_offset, size, &upl, &pl, request_flags) != KERN_SUCCESS) {
344			result    = PAGER_ERROR;
345			error_ret = EINVAL;
346			goto out;
347		}
348		upl_offset = 0;
349	} else
350		pl = ubc_upl_pageinfo(upl);
351
352	/*
353	 * we come here for pageouts to 'real' files and
354	 * for msyncs...  the upl may not contain any
355	 * dirty pages.. it's our responsibility to sort
356	 * through it and find the 'runs' of dirty pages
357	 * to call VNOP_PAGEOUT on...
358	 */
359	if (ubc_getsize(vp) == 0) {
360	        /*
361		 * if the file has been effectively deleted, then
362		 * we need to go through the UPL and invalidate any
363		 * buffer headers we might have that reference any
364		 * of it's pages
365		 */
366		for (offset = upl_offset; isize; isize -= PAGE_SIZE, offset += PAGE_SIZE) {
367#if NFSCLIENT
368			if (vp->v_tag == VT_NFS)
369				/* check with nfs if page is OK to drop */
370				error = nfs_buf_page_inval(vp, (off_t)f_offset);
371			else
372#endif
373			{
374			        blkno = ubc_offtoblk(vp, (off_t)f_offset);
375			        error = buf_invalblkno(vp, blkno, 0);
376			}
377			if (error) {
378			        if ( !(flags & UPL_NOCOMMIT))
379				        ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
380				if (error_ret == 0)
381				        error_ret = error;
382				result = PAGER_ERROR;
383
384			} else if ( !(flags & UPL_NOCOMMIT)) {
385			        ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY);
386			}
387			f_offset += PAGE_SIZE;
388		}
389		goto out;
390	}
391	/*
392	 * Ignore any non-present pages at the end of the
393	 * UPL so that we aren't looking at a upl that
394	 * may already have been freed by the preceeding
395	 * aborts/completions.
396	 */
397	base_index = upl_offset / PAGE_SIZE;
398
399	for (pg_index = (upl_offset + isize) / PAGE_SIZE; pg_index > base_index;) {
400	        if (upl_page_present(pl, --pg_index))
401		        break;
402		if (pg_index == base_index) {
403		        /*
404			 * no pages were returned, so release
405			 * our hold on the upl and leave
406			 */
407		        if ( !(flags & UPL_NOCOMMIT))
408			        ubc_upl_abort_range(upl, upl_offset, isize, UPL_ABORT_FREE_ON_EMPTY);
409
410			goto out;
411		}
412	}
413	isize = ((pg_index + 1) - base_index) * PAGE_SIZE;
414
415	offset = upl_offset;
416	pg_index = base_index;
417
418	while (isize) {
419		int  xsize;
420		int  num_of_pages;
421
422		if ( !upl_page_present(pl, pg_index)) {
423		        /*
424			 * we asked for RET_ONLY_DIRTY, so it's possible
425			 * to get back empty slots in the UPL
426			 * just skip over them
427			 */
428		        f_offset += PAGE_SIZE;
429			offset   += PAGE_SIZE;
430			isize    -= PAGE_SIZE;
431			pg_index++;
432
433			continue;
434		}
435		if ( !upl_dirty_page(pl, pg_index)) {
436			/*
437			 * if the page is not dirty and reached here it is
438			 * marked precious or it is due to invalidation in
439			 * memory_object_lock request as part of truncation
440			 * We also get here from vm_object_terminate()
441			 * So all you need to do in these
442			 * cases is to invalidate incore buffer if it is there
443			 * Note we must not sleep here if the buffer is busy - that is
444			 * a lock inversion which causes deadlock.
445			 */
446#if NFSCLIENT
447			if (vp->v_tag == VT_NFS)
448				/* check with nfs if page is OK to drop */
449				error = nfs_buf_page_inval(vp, (off_t)f_offset);
450			else
451#endif
452			{
453			        blkno = ubc_offtoblk(vp, (off_t)f_offset);
454			        error = buf_invalblkno(vp, blkno, 0);
455			}
456			if (error) {
457			        if ( !(flags & UPL_NOCOMMIT))
458				        ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
459				if (error_ret == 0)
460				        error_ret = error;
461				result = PAGER_ERROR;
462
463			} else if ( !(flags & UPL_NOCOMMIT)) {
464			        ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY);
465			}
466		        f_offset += PAGE_SIZE;
467			offset   += PAGE_SIZE;
468			isize    -= PAGE_SIZE;
469			pg_index++;
470
471			continue;
472		}
473		num_of_pages = 1;
474		xsize = isize - PAGE_SIZE;
475
476		while (xsize) {
477			if ( !upl_dirty_page(pl, pg_index + num_of_pages))
478				break;
479			num_of_pages++;
480			xsize -= PAGE_SIZE;
481		}
482		xsize = num_of_pages * PAGE_SIZE;
483
484		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
485			(MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START,
486			xsize, (int)f_offset, 0, 0, 0);
487
488		if ( (error = VNOP_PAGEOUT(vp, upl, offset, (off_t)f_offset,
489					   xsize, flags, ctx)) ) {
490		        if (error_ret == 0)
491		                error_ret = error;
492			result = PAGER_ERROR;
493		}
494		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
495			(MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END,
496			xsize, 0, 0, 0, 0);
497
498	        f_offset += xsize;
499		offset   += xsize;
500		isize    -= xsize;
501		pg_index += num_of_pages;
502	}
503out:
504	if (errorp)
505		*errorp = error_ret;
506
507	return (result);
508}
509
510
511pager_return_t
512vnode_pagein(
513	struct vnode 		*vp,
514	upl_t        		upl,
515	upl_offset_t  		upl_offset,
516	vm_object_offset_t	f_offset,
517	upl_size_t     		size,
518	int           		flags,
519	int 			*errorp)
520{
521        upl_page_info_t *pl;
522	int	        result = PAGER_SUCCESS;
523	int		error = 0;
524        int             pages_in_upl;
525        int             start_pg;
526        int             last_pg;
527	int             first_pg;
528        int             xsize;
529	int		must_commit = 1;
530	int		ignore_valid_page_check = 0;
531
532	if (flags & UPL_NOCOMMIT)
533	        must_commit = 0;
534
535	if (flags & UPL_IGNORE_VALID_PAGE_CHECK)
536		ignore_valid_page_check = 1;
537
538	if (UBCINFOEXISTS(vp) == 0) {
539		result = PAGER_ERROR;
540		error  = PAGER_ERROR;
541
542		if (upl && must_commit)
543			ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
544
545		goto out;
546	}
547	if (upl == (upl_t)NULL) {
548		flags &= ~UPL_NOCOMMIT;
549
550	        if (size > (MAX_UPL_SIZE * PAGE_SIZE)) {
551		        result = PAGER_ERROR;
552			error  = PAGER_ERROR;
553			goto out;
554		}
555		if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEINV2) {
556			/*
557			 * filesystem has requested the new form of VNOP_PAGEIN for file
558			 * backed objects... we will not grab the UPL befofe calling VNOP_PAGEIN...
559			 * it is the fileystem's responsibility to grab the range we're denoting
560			 * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first
561			 * take any locks it needs, before effectively locking the pages into a UPL...
562			 * so we pass a NULL into the filesystem instead of a UPL pointer... the 'upl_offset'
563			 * is used to identify the "must have" page in the extent... the filesystem is free
564			 * to clip the extent to better fit the underlying FS blocksize if it desires as
565			 * long as it continues to include the "must have" page... 'f_offset' + 'upl_offset'
566			 * identifies that page
567			 */
568			if ( (error = VNOP_PAGEIN(vp, NULL, upl_offset, (off_t)f_offset,
569						  size, flags, vfs_context_current())) ) {
570				result = PAGER_ERROR;
571				error  = PAGER_ERROR;
572			}
573			goto out;
574		}
575	        ubc_create_upl(vp, f_offset, size, &upl, &pl, UPL_UBC_PAGEIN | UPL_RET_ONLY_ABSENT);
576
577		if (upl == (upl_t)NULL) {
578		        result =  PAGER_ABSENT;
579			error = PAGER_ABSENT;
580			goto out;
581		}
582		ubc_upl_range_needed(upl, upl_offset / PAGE_SIZE, 1);
583
584		upl_offset = 0;
585		first_pg = 0;
586
587		/*
588		 * if we get here, we've created the upl and
589		 * are responsible for commiting/aborting it
590		 * regardless of what the caller has passed in
591		 */
592		must_commit = 1;
593	} else {
594	        pl = ubc_upl_pageinfo(upl);
595		first_pg = upl_offset / PAGE_SIZE;
596	}
597	pages_in_upl = size / PAGE_SIZE;
598	DTRACE_VM2(pgpgin, int, pages_in_upl, (uint64_t *), NULL);
599
600	/*
601	 * before we start marching forward, we must make sure we end on
602	 * a present page, otherwise we will be working with a freed
603         * upl
604	 */
605	for (last_pg = pages_in_upl - 1; last_pg >= first_pg; last_pg--) {
606		if (upl_page_present(pl, last_pg))
607			break;
608		if (last_pg == first_pg) {
609		        /*
610			 * empty UPL, no pages are present
611			 */
612		        if (must_commit)
613			        ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY);
614			goto out;
615		}
616	}
617	pages_in_upl = last_pg + 1;
618	last_pg = first_pg;
619
620	while (last_pg < pages_in_upl) {
621	        /*
622		 * skip over missing pages...
623		 */
624	        for ( ; last_pg < pages_in_upl; last_pg++) {
625		        if (upl_page_present(pl, last_pg))
626			        break;
627		}
628
629		if (ignore_valid_page_check == 1) {
630			start_pg = last_pg;
631		} else {
632	        	/*
633			 * skip over 'valid' pages... we don't want to issue I/O for these
634			 */
635	        	for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
636		        	if (!upl_valid_page(pl, last_pg))
637			        	break;
638			}
639		}
640
641		if (last_pg > start_pg) {
642		        /*
643			 * we've found a range of valid pages
644			 * if we've got COMMIT responsibility
645			 * commit this range of pages back to the
646			 * cache unchanged
647			 */
648		        xsize = (last_pg - start_pg) * PAGE_SIZE;
649
650			if (must_commit)
651			        ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY);
652		}
653		if (last_pg == pages_in_upl)
654		        /*
655			 * we're done... all pages that were present
656			 * have either had I/O issued on them or
657			 * were aborted unchanged...
658			 */
659		        break;
660
661		if (!upl_page_present(pl, last_pg)) {
662		        /*
663			 * we found a range of valid pages
664			 * terminated by a missing page...
665			 * bump index to the next page and continue on
666			 */
667		        last_pg++;
668		        continue;
669		}
670		/*
671		 * scan from the found invalid page looking for a valid
672		 * or non-present page before the end of the upl is reached, if we
673		 * find one, then it will be the last page of the request to
674		 * 'cluster_io'
675		 */
676		for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) {
677		        if (( !ignore_valid_page_check && upl_valid_page(pl, last_pg)) || !upl_page_present(pl, last_pg))
678			        break;
679		}
680		if (last_pg > start_pg) {
681		        int xoff;
682		        xsize = (last_pg - start_pg) * PAGE_SIZE;
683			xoff  = start_pg * PAGE_SIZE;
684
685			if ( (error = VNOP_PAGEIN(vp, upl, (upl_offset_t) xoff,
686					       (off_t)f_offset + xoff,
687					       xsize, flags, vfs_context_current())) ) {
688		        	/*
689				 * Usually this UPL will be aborted/committed by the lower cluster layer.
690				 *
691				 * a)	In the case of decmpfs, however, we may return an error (EAGAIN) to avoid
692				 *	a deadlock with another thread already inflating the file.
693				 *
694				 * b)	In the case of content protection, EPERM is a valid error and we should respect it.
695				 *
696				 * In those cases, we must take care of our UPL at this layer itself.
697				 */
698				if (must_commit) {
699					if(error == EAGAIN) {
700			        		ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_RESTART);
701					}
702#if CONFIG_PROTECT
703					if(error == EPERM) {
704			        		ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
705					}
706#endif
707				}
708				result = PAGER_ERROR;
709				error  = PAGER_ERROR;
710
711			}
712		}
713        }
714out:
715	if (errorp)
716		*errorp = result;
717
718	return (error);
719}
720
721void
722vnode_pager_shutdown(void)
723{
724	int i;
725	vnode_t vp;
726
727	for(i = 0; i < MAX_BACKING_STORE; i++) {
728		vp = (vnode_t)(bs_port_table[i]).vp;
729		if (vp) {
730			(bs_port_table[i]).vp = 0;
731
732			/* get rid of macx_swapon() reference */
733			vnode_rele(vp);
734		}
735	}
736}
737
738
739void *
740upl_get_internal_page_list(upl_t upl)
741{
742  return(UPL_GET_INTERNAL_PAGE_LIST(upl));
743
744}
745