kern_physio.c revision 1.10
1/*	$OpenBSD: kern_physio.c,v 1.10 2001/05/07 22:16:35 art Exp $	*/
2/*	$NetBSD: kern_physio.c,v 1.28 1997/05/19 10:43:28 pk Exp $	*/
3
4/*-
5 * Copyright (c) 1994 Christopher G. Demetriou
6 * Copyright (c) 1982, 1986, 1990, 1993
7 *	The Regents of the University of California.  All rights reserved.
8 * (c) UNIX System Laboratories, Inc.
9 * All or some portions of this file are derived from material licensed
10 * to the University of California by American Telephone and Telegraph
11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12 * the permission of UNIX System Laboratories, Inc.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 *    notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 *    notice, this list of conditions and the following disclaimer in the
21 *    documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 *    must display the following acknowledgement:
24 *	This product includes software developed by the University of
25 *	California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 *    may be used to endorse or promote products derived from this software
28 *    without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 *	@(#)kern_physio.c	8.1 (Berkeley) 6/10/93
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/buf.h>
48#include <sys/conf.h>
49#include <sys/proc.h>
50#include <sys/malloc.h>
51
52#include <vm/vm.h>
53
54#if defined(UVM)
55#include <uvm/uvm_extern.h>
56#endif
57
58/*
59 * The routines implemented in this file are described in:
60 *	Leffler, et al.: The Design and Implementation of the 4.3BSD
61 *	    UNIX Operating System (Addison Welley, 1989)
62 * on pages 231-233.
63 *
64 * The routines "getphysbuf" and "putphysbuf" steal and return a swap
65 * buffer.  Leffler, et al., says that swap buffers are used to do the
66 * I/O, so raw I/O requests don't have to be single-threaded.
67 */
68
69struct buf *getphysbuf __P((void));
70void putphysbuf __P((struct buf *bp));
71
72/*
73 * Do "physical I/O" on behalf of a user.  "Physical I/O" is I/O directly
74 * from the raw device to user buffers, and bypasses the buffer cache.
75 *
76 * Comments in brackets are from Leffler, et al.'s pseudo-code implementation.
77 */
78int
79physio(strategy, bp, dev, flags, minphys, uio)
80	void (*strategy) __P((struct buf *));
81	struct buf *bp;
82	dev_t dev;
83	int flags;
84	void (*minphys) __P((struct buf *));
85	struct uio *uio;
86{
87	struct iovec *iovp;
88	struct proc *p = curproc;
89	int error, done, i, nobuf, s, todo;
90
91	error = 0;
92	flags &= B_READ | B_WRITE;
93
94	/*
95	 * [check user read/write access to the data buffer]
96	 *
97	 * Check each iov one by one.  Note that we know if we're reading or
98	 * writing, so we ignore the uio's rw parameter.  Also note that if
99	 * we're doing a read, that's a *write* to user-space.
100	 */
101	if (uio->uio_segflg == UIO_USERSPACE)
102		for (i = 0; i < uio->uio_iovcnt; i++)
103#if defined(UVM) /* XXXCDC: map not locked, rethink */
104			/* XXX - obsolete now that vslock can error? */
105			if (!uvm_useracc(uio->uio_iov[i].iov_base,
106				     uio->uio_iov[i].iov_len,
107				     (flags == B_READ) ? B_WRITE : B_READ))
108				return (EFAULT);
109#else
110			if (!useracc(uio->uio_iov[i].iov_base,
111			    uio->uio_iov[i].iov_len,
112			    (flags == B_READ) ? B_WRITE : B_READ))
113				return (EFAULT);
114#endif
115
116	/* Make sure we have a buffer, creating one if necessary. */
117	if ((nobuf = (bp == NULL)) != 0)
118		bp = getphysbuf();
119
120	/* [raise the processor priority level to splbio;] */
121	s = splbio();
122
123	/* [while the buffer is marked busy] */
124	while (bp->b_flags & B_BUSY) {
125		/* [mark the buffer wanted] */
126		bp->b_flags |= B_WANTED;
127		/* [wait until the buffer is available] */
128		tsleep((caddr_t)bp, PRIBIO+1, "physbuf", 0);
129	}
130
131	/* Mark it busy, so nobody else will use it. */
132	bp->b_flags |= B_BUSY;
133
134	/* [lower the priority level] */
135	splx(s);
136
137	/* [set up the fixed part of the buffer for a transfer] */
138	bp->b_dev = dev;
139	bp->b_error = 0;
140	bp->b_proc = p;
141	LIST_INIT(&bp->b_dep);
142
143	/*
144	 * [while there are data to transfer and no I/O error]
145	 * Note that I/O errors are handled with a 'goto' at the bottom
146	 * of the 'while' loop.
147	 */
148	for (i = 0; i < uio->uio_iovcnt; i++) {
149		iovp = &uio->uio_iov[i];
150		while (iovp->iov_len > 0) {
151			/*
152			 * [mark the buffer busy for physical I/O]
153			 * (i.e. set B_PHYS (because it's an I/O to user
154			 * memory, and B_RAW, because B_RAW is to be
155			 * "Set by physio for raw transfers.", in addition
156			 * to the "busy" and read/write flag.)
157			 */
158			bp->b_flags = B_BUSY | B_PHYS | B_RAW | flags;
159
160			/* [set up the buffer for a maximum-sized transfer] */
161			bp->b_blkno = btodb(uio->uio_offset);
162			bp->b_bcount = iovp->iov_len;
163			bp->b_data = iovp->iov_base;
164
165			/*
166			 * [call minphys to bound the tranfer size]
167			 * and remember the amount of data to transfer,
168			 * for later comparison.
169			 */
170			(*minphys)(bp);
171			todo = bp->b_bcount;
172#ifdef DIAGNOSTIC
173			if (todo < 0)
174				panic("todo < 0; minphys broken");
175			if (todo > MAXPHYS)
176				panic("todo > MAXPHYS; minphys broken");
177#endif
178
179			/*
180			 * [lock the part of the user address space involved
181			 *    in the transfer]
182			 * Beware vmapbuf(); it clobbers b_data and
183			 * saves it in b_saveaddr.  However, vunmapbuf()
184			 * restores it.
185			 */
186			PHOLD(p);
187#if defined(UVM)
188			if (uvm_vslock(p, bp->b_data, todo, (flags & B_READ) ?
189			    VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ) !=
190			    KERN_SUCCESS) {
191				bp->b_flags |= B_ERROR;
192				bp->b_error = EFAULT;
193				goto after_unlock;
194			}
195#else
196			vslock(bp->b_data, todo);
197#endif
198			vmapbuf(bp, todo);
199
200			/* [call strategy to start the transfer] */
201			(*strategy)(bp);
202
203			/*
204			 * Note that the raise/wait/lower/get error
205			 * steps below would be done by biowait(), but
206			 * we want to unlock the address space before
207			 * we lower the priority.
208			 *
209			 * [raise the priority level to splbio]
210			 */
211			s = splbio();
212
213			/* [wait for the transfer to complete] */
214			while ((bp->b_flags & B_DONE) == 0)
215				tsleep((caddr_t) bp, PRIBIO + 1, "physio", 0);
216
217			/* Mark it busy again, so nobody else will use it. */
218			bp->b_flags |= B_BUSY;
219
220			/* [lower the priority level] */
221			splx(s);
222
223			/*
224			 * [unlock the part of the address space previously
225			 *    locked]
226			 */
227			vunmapbuf(bp, todo);
228#if defined(UVM)
229			uvm_vsunlock(p, bp->b_data, todo);
230after_unlock:
231#else
232			vsunlock(bp->b_data, todo);
233#endif
234			PRELE(p);
235
236			/* remember error value (save a splbio/splx pair) */
237			if (bp->b_flags & B_ERROR)
238				error = (bp->b_error ? bp->b_error : EIO);
239
240			/*
241			 * [deduct the transfer size from the total number
242			 *    of data to transfer]
243			 */
244			done = bp->b_bcount - bp->b_resid;
245#ifdef DIAGNOSTIC
246			if (done < 0)
247				panic("done < 0; strategy broken");
248			if (done > todo)
249				panic("done > todo; strategy broken");
250#endif
251			iovp->iov_len -= done;
252                        iovp->iov_base += done;
253                        uio->uio_offset += done;
254                        uio->uio_resid -= done;
255
256			/*
257			 * Now, check for an error.
258			 * Also, handle weird end-of-disk semantics.
259			 */
260			if (error || done < todo)
261				goto done;
262		}
263	}
264
265done:
266	/*
267	 * [clean up the state of the buffer]
268	 * Remember if somebody wants it, so we can wake them up below.
269	 * Also, if we had to steal it, give it back.
270	 */
271	s = splbio();
272	bp->b_flags &= ~(B_BUSY | B_PHYS | B_RAW);
273	if (nobuf)
274		putphysbuf(bp);
275	else {
276		/*
277		 * [if another process is waiting for the raw I/O buffer,
278		 *    wake up processes waiting to do physical I/O;
279		 */
280		if (bp->b_flags & B_WANTED) {
281			bp->b_flags &= ~B_WANTED;
282			wakeup(bp);
283		}
284	}
285	splx(s);
286
287	return (error);
288}
289
290/*
291 * Get a swap buffer structure, for use in physical I/O.
292 * Mostly taken from /sys/vm/swap_pager.c, except that it no longer
293 * records buffer list-empty conditions, and sleeps at PRIBIO + 1,
294 * rather than PSWP + 1 (and on a different wchan).
295 */
296struct buf *
297getphysbuf()
298{
299	struct buf *bp;
300#if !defined(UVM)
301	int s;
302
303	s = splbio();
304        while (bswlist.b_actf == NULL) {
305                bswlist.b_flags |= B_WANTED;
306                tsleep((caddr_t)&bswlist, PRIBIO + 1, "getphys", 0);
307        }
308        bp = bswlist.b_actf;
309        bswlist.b_actf = bp->b_actf;
310        splx(s);
311#else
312
313	bp = malloc(sizeof(*bp), M_TEMP, M_WAITOK);
314	bzero(bp, sizeof(*bp));
315
316	/* XXXCDC: are the following two lines necessary? */
317	bp->b_rcred = bp->b_wcred = NOCRED;
318	bp->b_vnbufs.le_next = NOLIST;
319#endif
320	return (bp);
321}
322
323/*
324 * Get rid of a swap buffer structure which has been used in physical I/O.
325 * Mostly taken from /sys/vm/swap_pager.c, except that it now uses
326 * wakeup() rather than the VM-internal thread_wakeup(), and that the caller
327 * must mask disk interrupts, rather than putphysbuf() itself.
328 */
329void
330putphysbuf(bp)
331	struct buf *bp;
332{
333#if !defined(UVM)
334        bp->b_actf = bswlist.b_actf;
335        bswlist.b_actf = bp;
336        if (bp->b_vp)
337                brelvp(bp);
338        if (bswlist.b_flags & B_WANTED) {
339                bswlist.b_flags &= ~B_WANTED;
340                wakeup(&bswlist);
341        }
342#else
343	/* XXXCDC: is this necesary? */
344	if (bp->b_vp)
345		brelvp(bp);
346
347	if (bp->b_flags & B_WANTED)
348		panic("putphysbuf: private buf B_WANTED");
349	free(bp, M_TEMP);
350#endif
351}
352
353/*
354 * Leffler, et al., says on p. 231:
355 * "The minphys() routine is called by physio() to adjust the
356 * size of each I/O transfer before the latter is passed to
357 * the strategy routine..."
358 *
359 * so, just adjust the buffer's count accounting to MAXPHYS here,
360 * and return the new count;
361 */
362void
363minphys(bp)
364	struct buf *bp;
365{
366
367	if (bp->b_bcount > MAXPHYS)
368		bp->b_bcount = MAXPHYS;
369}
370