geom_io.c revision 92108
155430Srwatson/*-
2115804Srwatson * Copyright (c) 2002 Poul-Henning Kamp
355430Srwatson * Copyright (c) 2002 Networks Associates Technology, Inc.
455430Srwatson * All rights reserved.
555430Srwatson *
655430Srwatson * This software was developed for the FreeBSD Project by Poul-Henning Kamp
755430Srwatson * and NAI Labs, the Security Research Division of Network Associates, Inc.
855430Srwatson * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
955430Srwatson * DARPA CHATS research program.
1055430Srwatson *
1155430Srwatson * Redistribution and use in source and binary forms, with or without
1255430Srwatson * modification, are permitted provided that the following conditions
1355430Srwatson * are met:
1455430Srwatson * 1. Redistributions of source code must retain the above copyright
1555430Srwatson *    notice, this list of conditions and the following disclaimer.
1655430Srwatson * 2. Redistributions in binary form must reproduce the above copyright
1755430Srwatson *    notice, this list of conditions and the following disclaimer in the
1855430Srwatson *    documentation and/or other materials provided with the distribution.
1955430Srwatson * 3. The names of the authors may not be used to endorse or promote
2055430Srwatson *    products derived from this software without specific prior written
2155430Srwatson *    permission.
2255430Srwatson *
2355430Srwatson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
2455430Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2555430Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2674277Srwatson * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2755430Srwatson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2855430Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29206622Suqs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3055430Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3155430Srwatson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3255430Srwatson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3375670Sru * SUCH DAMAGE.
3455430Srwatson *
3584306Sru * $FreeBSD: head/sys/geom/geom_io.c 92108 2002-03-11 21:42:35Z phk $
3684306Sru */
3784306Sru
3855430Srwatson
3994476Sdavidc#include <sys/param.h>
4094476Sdavidc#ifndef _KERNEL
4194476Sdavidc#include <stdio.h>
4294476Sdavidc#include <string.h>
4394476Sdavidc#include <stdlib.h>
4494476Sdavidc#include <signal.h>
4594476Sdavidc#include <err.h>
4694476Sdavidc#include <sched.h>
4794476Sdavidc#else
4855430Srwatson#include <sys/systm.h>
4955430Srwatson#include <sys/kernel.h>
5055430Srwatson#include <sys/malloc.h>
5155430Srwatson#include <sys/bio.h>
5255430Srwatson#endif
5394476Sdavidc
5474354Srwatson#include <sys/errno.h>
55140931Sru#include <geom/geom.h>
56115440Shmp
57140931Srustatic struct g_bioq g_bio_run_down;
58140931Srustatic struct g_bioq g_bio_run_up;
5974354Srwatsonstatic struct g_bioq g_bio_idle;
60140931Sru
6174354Srwatson#include <machine/atomic.h>
62140931Sru
6394476Sdavidcstatic void
64140931Srug_bioq_lock(struct g_bioq *bq)
6594476Sdavidc{
6694476Sdavidc
6794476Sdavidc	mtx_lock(&bq->bio_queue_lock);
6894476Sdavidc}
6994476Sdavidc
7094476Sdavidcstatic void
7194476Sdavidcg_bioq_unlock(struct g_bioq *bq)
7294476Sdavidc{
7394476Sdavidc
74235693Sgjb	mtx_unlock(&bq->bio_queue_lock);
7574354Srwatson}
76140931Sru
7786691Sarr#if 0
78140931Srustatic void
7955430Srwatsong_bioq_destroy(struct g_bioq *bq)
8055430Srwatson{
8155430Srwatson
8255430Srwatson	mtx_destroy(&bq->bio_queue_lock);
8374354Srwatson}
8474354Srwatson#endif
8574354Srwatson
8674354Srwatsonstatic void
8774277Srwatsong_bioq_init(struct g_bioq *bq)
8874277Srwatson{
8974277Srwatson
9055430Srwatson	TAILQ_INIT(&bq->bio_queue);
91107788Sru	mtx_init(&bq->bio_queue_lock, "bio queue", MTX_DEF);
9255430Srwatson}
9355430Srwatson
9455430Srwatsonstatic struct bio *
9566185Srwatsong_bioq_first(struct g_bioq *bq)
9655430Srwatson{
9755430Srwatson	struct bio *bp;
98121382Shmp
99121382Shmp	g_bioq_lock(bq);
10055430Srwatson	bp = TAILQ_FIRST(&bq->bio_queue);
10155430Srwatson	if (bp != NULL) {
102115874Srwatson		TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue);
103115874Srwatson		bq->bio_queue_length--;
10455430Srwatson	}
105235319Sgjb	g_bioq_unlock(bq);
10655430Srwatson	return (bp);
107107788Sru}
10855430Srwatson
10955430Srwatsonstatic void
110103566Struckmang_bioq_enqueue_tail(struct bio *bp, struct g_bioq *rq)
11155430Srwatson{
11255430Srwatson
11355430Srwatson	g_bioq_lock(rq);
11479727Sschweikh	TAILQ_INSERT_TAIL(&rq->bio_queue, bp, bio_queue);
115115874Srwatson	rq->bio_queue_length++;
116235693Sgjb	g_bioq_unlock(rq);
11755430Srwatson}
11855430Srwatson
11955430Srwatsonstruct bio *
12055430Srwatsong_new_bio(void)
121107788Sru{
122103534Struckman	struct bio *bp;
12355430Srwatson
12455430Srwatson	bp = g_bioq_first(&g_bio_idle);
12555430Srwatson	if (bp == NULL)
12655430Srwatson		bp = g_malloc(sizeof *bp, M_WAITOK | M_ZERO);
127120010Sru	g_trace(G_T_BIO, "g_new_bio() = %p", bp);
12855430Srwatson	return (bp);
129115804Srwatson}
130115804Srwatson
131115804Srwatsonvoid
132115804Srwatsong_destroy_bio(struct bio *bp)
133115804Srwatson{
134
135	g_trace(G_T_BIO, "g_destroy_bio(%p)", bp);
136	bzero(bp, sizeof *bp);
137	g_bioq_enqueue_tail(bp, &g_bio_idle);
138}
139
140struct bio *
141g_clone_bio(struct bio *bp)
142{
143	struct bio *bp2;
144
145	bp2 = g_new_bio();
146	bp2->bio_linkage = bp;
147	bp2->bio_cmd = bp->bio_cmd;
148	bp2->bio_length = bp->bio_length;
149	bp2->bio_offset = bp->bio_offset;
150	bp2->bio_data = bp->bio_data;
151	bp2->bio_attribute = bp->bio_attribute;
152	g_trace(G_T_BIO, "g_clone_bio(%p) = %p", bp, bp2);
153	return(bp2);
154}
155
156void
157g_io_init()
158{
159
160	g_bioq_init(&g_bio_run_down);
161	g_bioq_init(&g_bio_run_up);
162	g_bioq_init(&g_bio_idle);
163}
164
165int
166g_io_setattr(char *attr, struct g_consumer *cp, int len, void *ptr, struct thread *tp __unused)
167{
168	struct bio *bp;
169	int error;
170
171	g_trace(G_T_BIO, "bio_setattr(%s)", attr);
172	do {
173		bp = g_new_bio();
174		bp->bio_cmd = BIO_SETATTR;
175		bp->bio_done = NULL;
176		bp->bio_attribute = attr;
177		bp->bio_length = len;
178		bp->bio_data = ptr;
179		g_io_request(bp, cp);
180		while ((bp->bio_flags & BIO_DONE) == 0) {
181			mtx_lock(&Giant);
182			tsleep(bp, 0, "setattr", hz / 10);
183			mtx_unlock(&Giant);
184		}
185		error = bp->bio_error;
186		g_destroy_bio(bp);
187		if (error == EBUSY)
188			tsleep(&error, 0, "setattr_busy", hz);
189	} while(error == EBUSY);
190	return (error);
191}
192
193
194int
195g_io_getattr(char *attr, struct g_consumer *cp, int *len, void *ptr, struct thread *tp __unused)
196{
197	struct bio *bp;
198	int error;
199
200	g_trace(G_T_BIO, "bio_getattr(%s)", attr);
201	do {
202		bp = g_new_bio();
203		bp->bio_cmd = BIO_GETATTR;
204		bp->bio_done = NULL;
205		bp->bio_attribute = attr;
206		bp->bio_length = *len;
207		bp->bio_data = ptr;
208		g_io_request(bp, cp);
209		while ((bp->bio_flags & BIO_DONE) == 0) {
210			mtx_lock(&Giant);
211			tsleep(bp, 0, "getattr", hz / 10);
212			mtx_unlock(&Giant);
213		}
214		*len = bp->bio_completed;
215		error = bp->bio_error;
216		g_destroy_bio(bp);
217		if (error == EBUSY)
218			tsleep(&error, 0, "getattr_busy", hz);
219
220	} while(error == EBUSY);
221	return (error);
222}
223
224void
225g_io_request(struct bio *bp, struct g_consumer *cp)
226{
227	int error;
228
229	KASSERT(cp != NULL, ("bio_request on thin air"));
230	error = 0;
231	bp->bio_from = cp;
232	bp->bio_to = cp->provider;
233
234	/* begin_stats(&bp->stats); */
235
236	atomic_add_int(&cp->biocount, 1);
237	if (bp->bio_to == NULL)
238		error = ENXIO;
239	if (!error) {
240		switch(bp->bio_cmd) {
241		case BIO_READ:
242		case BIO_GETATTR:
243			if (cp->acr == 0)
244				error = EPERM;
245			break;
246		case BIO_WRITE:
247			if (cp->acw == 0)
248				error = EPERM;
249			break;
250		case BIO_SETATTR:
251		case BIO_DELETE:
252		case BIO_FORMAT:
253			if ((cp->acw == 0) || (cp->ace == 0))
254				error = EPERM;
255			break;
256		default:
257			error = EPERM;
258			break;
259		}
260	}
261	/* if provider is marked for error, don't disturb */
262	if (!error)
263		error = bp->bio_to->error;
264	if (error) {
265		bp->bio_error = error;
266		/* finish_stats(&bp->stats); */
267
268		g_trace(G_T_BIO,
269		    "bio_request(%p) from %p(%s) to %p(%s) cmd %d error %d\n",
270		    bp, bp->bio_from, bp->bio_from->geom->name,
271		    bp->bio_to, bp->bio_to->name, bp->bio_cmd, bp->bio_error);
272		g_bioq_enqueue_tail(bp, &g_bio_run_up);
273		mtx_lock(&Giant);
274		wakeup(&g_wait_up);
275		mtx_unlock(&Giant);
276	} else {
277		g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d",
278		    bp, bp->bio_from, bp->bio_from->geom->name,
279		    bp->bio_to, bp->bio_to->name, bp->bio_cmd);
280		g_bioq_enqueue_tail(bp, &g_bio_run_down);
281		mtx_lock(&Giant);
282		wakeup(&g_wait_down);
283		mtx_unlock(&Giant);
284	}
285}
286
287void
288g_io_deliver(struct bio *bp)
289{
290
291	g_trace(G_T_BIO,
292	    "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d",
293	    bp, bp->bio_from, bp->bio_from->geom->name,
294	    bp->bio_to, bp->bio_to->name, bp->bio_cmd, bp->bio_error);
295	/* finish_stats(&bp->stats); */
296
297	g_bioq_enqueue_tail(bp, &g_bio_run_up);
298
299	mtx_lock(&Giant);
300	wakeup(&g_wait_up);
301	mtx_unlock(&Giant);
302}
303
304void
305g_io_schedule_down(struct thread *tp __unused)
306{
307	struct bio *bp;
308
309	for(;;) {
310		bp = g_bioq_first(&g_bio_run_down);
311		if (bp == NULL)
312			break;
313		bp->bio_to->geom->start(bp);
314	}
315}
316
317void
318g_io_schedule_up(struct thread *tp __unused)
319{
320	struct bio *bp;
321	struct g_consumer *cp;
322
323	for(;;) {
324		bp = g_bioq_first(&g_bio_run_up);
325		if (bp == NULL)
326			break;
327
328		cp = bp->bio_from;
329
330		bp->bio_flags |= BIO_DONE;
331		atomic_add_int(&cp->biocount, -1);
332		if (bp->bio_done != NULL) {
333			bp->bio_done(bp);
334		} else {
335			mtx_lock(&Giant);
336			wakeup(bp);
337			mtx_unlock(&Giant);
338		}
339	}
340}
341
342void *
343g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error)
344{
345	struct bio *bp;
346	void *ptr;
347	int errorc;
348
349        do {
350		bp = g_new_bio();
351		bp->bio_cmd = BIO_READ;
352		bp->bio_done = NULL;
353		bp->bio_offset = offset;
354		bp->bio_length = length;
355		ptr = g_malloc(length, M_WAITOK);
356		bp->bio_data = ptr;
357		g_io_request(bp, cp);
358		while ((bp->bio_flags & BIO_DONE) == 0) {
359			mtx_lock(&Giant);
360			tsleep(bp, 0, "g_read_data", hz / 10);
361			mtx_unlock(&Giant);
362		}
363		errorc = bp->bio_error;
364		if (error != NULL)
365			*error = errorc;
366		g_destroy_bio(bp);
367		if (errorc) {
368			g_free(ptr);
369			ptr = NULL;
370		}
371		if (errorc == EBUSY)
372			tsleep(&errorc, 0, "g_read_data_busy", hz);
373        } while (errorc == EBUSY);
374	return (ptr);
375}
376