1/*	$NetBSD: uvm_io.c,v 1.30 2024/05/03 07:09:20 skrll Exp $	*/
2
3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * from: Id: uvm_io.c,v 1.1.2.2 1997/12/30 12:02:00 mrg Exp
28 */
29
30/*
31 * uvm_io.c: uvm i/o ops
32 */
33
34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: uvm_io.c,v 1.30 2024/05/03 07:09:20 skrll Exp $");
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/mman.h>
40#include <sys/uio.h>
41
42#include <uvm/uvm.h>
43
44/*
45 * functions
46 */
47
48/*
49 * uvm_io: perform I/O on a map
50 *
51 * => caller must have a reference to "map" so that it doesn't go away
52 *    while we are working.
53 */
54
55int
56uvm_io(struct vm_map *map, struct uio *uio, int flags)
57{
58	vaddr_t baseva, endva, pageoffset, kva;
59	vsize_t chunksz, togo, sz;
60	struct vm_map_entry *dead_entries;
61	int error;
62
63	/*
64	 * step 0: sanity checks and set up for copy loop.  start with a
65	 * large chunk size.  if we have trouble finding vm space we will
66	 * reduce it.
67	 */
68
69	if (uio->uio_resid == 0)
70		return 0;
71	togo = uio->uio_resid;
72
73	baseva = (vaddr_t) uio->uio_offset;
74	endva = baseva + (togo - 1);
75
76	if (endva < baseva)   /* wrap around? */
77		return EIO;
78
79	if (baseva >= VM_MAXUSER_ADDRESS)
80		return 0;
81	if (endva >= VM_MAXUSER_ADDRESS)
82		/* EOF truncate */
83		togo = togo - (endva - VM_MAXUSER_ADDRESS + 1);
84	pageoffset = baseva & PAGE_MASK;
85	baseva = trunc_page(baseva);
86	chunksz = MIN(round_page(togo + pageoffset), trunc_page(MAXPHYS));
87	error = 0;
88
89	flags |= UVM_EXTRACT_QREF | UVM_EXTRACT_CONTIG | UVM_EXTRACT_FIXPROT;
90
91	/* XXX cannot use QREF with without AMAP_REFALL, and REFALL is unsafe */
92	flags &= ~UVM_EXTRACT_QREF;
93
94	/*
95	 * step 1: main loop...  while we've got data to move
96	 */
97
98	for (/*null*/; togo > 0 ; pageoffset = 0) {
99
100		/*
101		 * step 2: extract mappings from the map into kernel_map
102		 */
103
104		error = uvm_map_extract(map, baseva, chunksz, kernel_map, &kva,
105		    flags);
106		if (error) {
107
108			/* retry with a smaller chunk... */
109			if (error == ENOMEM && chunksz > PAGE_SIZE) {
110				chunksz = trunc_page(chunksz / 2);
111				if (chunksz < PAGE_SIZE)
112					chunksz = PAGE_SIZE;
113				continue;
114			}
115
116			break;
117		}
118
119		/*
120		 * step 3: move a chunk of data
121		 */
122
123		sz = chunksz - pageoffset;
124		if (sz > togo)
125			sz = togo;
126		error = uiomove((void *) (kva + pageoffset), sz, uio);
127		togo -= sz;
128		baseva += chunksz;
129
130		/*
131		 * step 4: unmap the area of kernel memory
132		 */
133
134		vm_map_lock(kernel_map);
135		uvm_unmap_remove(kernel_map, kva, kva + chunksz, &dead_entries,
136		   0);
137		vm_map_unlock(kernel_map);
138		if (dead_entries != NULL)
139			uvm_unmap_detach(dead_entries, AMAP_REFALL);
140
141		if (error)
142			break;
143	}
144	return error;
145}
146