Lines Matching refs:wp

57 	struct gv_raid5_packet *wp, *wp2;
62 wp = g_malloc(sizeof(*wp), M_WAITOK | M_ZERO);
63 wp->bio = bp;
64 wp->waiting = NULL;
65 wp->parity = NULL;
66 TAILQ_INIT(&wp->bits);
69 err = gv_raid5_rebuild(p, wp, bp, addr, boff, bcount);
71 err = gv_raid5_check(p, wp, bp, addr, boff, bcount);
73 err = gv_raid5_request(p, wp, bp, addr, boff, bcount, &delay);
77 g_free(wp);
86 TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) {
87 TAILQ_REMOVE(&wp->bits, bq, queue);
90 if (wp->waiting != NULL) {
91 if (wp->waiting->bio_cflags & GV_BIO_MALLOC)
92 g_free(wp->waiting->bio_data);
93 g_destroy_bio(wp->waiting);
95 if (wp->parity != NULL) {
96 if (wp->parity->bio_cflags & GV_BIO_MALLOC)
97 g_free(wp->parity->bio_data);
98 g_destroy_bio(wp->parity);
100 g_free(wp);
102 TAILQ_FOREACH_SAFE(wp, &p->packets, list, wp2) {
103 if (wp->bio != bp)
106 TAILQ_REMOVE(&p->packets, wp, list);
107 TAILQ_FOREACH_SAFE(bq, &wp->bits, queue, bq2) {
108 TAILQ_REMOVE(&wp->bits, bq, queue);
111 g_free(wp);
136 return (wp);
146 struct gv_raid5_packet *wp, *owp;
149 wp = bp->bio_caller2;
150 if (wp->lockbase == -1)
155 if (owp == wp)
157 if ((wp->lockbase >= owp->lockbase) &&
158 (wp->lockbase <= owp->lockbase + owp->length)) {
162 if ((wp->lockbase <= owp->lockbase) &&
163 (wp->lockbase + wp->length >= owp->lockbase)) {
173 gv_raid5_check(struct gv_plex *p, struct gv_raid5_packet *wp, struct bio *bp,
205 wp->length = real_len;
206 wp->data = addr;
207 wp->lockbase = real_off;
218 cbp = gv_raid5_clone_bio(bp, s, wp, NULL, 1);
227 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
231 cbp = gv_raid5_clone_bio(bp, parity, wp, NULL, 1);
235 wp->waiting = cbp;
241 cbp = gv_raid5_clone_bio(bp, parity, wp, addr, 1);
244 wp->parity = cbp;
251 gv_raid5_rebuild(struct gv_plex *p, struct gv_raid5_packet *wp, struct bio *bp,
297 wp->length = real_len;
298 wp->data = addr;
299 wp->lockbase = real_off;
301 KASSERT(wp->length >= 0, ("gv_rebuild_raid5: wp->length < 0"));
313 cbp = gv_raid5_clone_bio(bp, s, wp, NULL, 1);
322 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
326 cbp = gv_raid5_clone_bio(bp, broken, wp, NULL, 1);
329 wp->parity = cbp;
339 gv_raid5_request(struct gv_plex *p, struct gv_raid5_packet *wp,
413 wp->length = real_len;
414 wp->data = addr;
415 wp->lockbase = real_off;
417 KASSERT(wp->length >= 0, ("gv_build_raid5_request: wp->length < 0"));
436 bzero(wp->data, wp->length);
444 cbp = gv_raid5_clone_bio(bp, s, wp, NULL, 1);
452 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
457 cbp = gv_raid5_clone_bio(bp, original, wp, addr, 0);
463 wp->lockbase = -1;
484 cbp = gv_raid5_clone_bio(bp, s, wp, NULL, 1);
493 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
497 cbp = gv_raid5_clone_bio(bp, parity, wp, NULL, 1);
500 bcopy(addr, cbp->bio_data, wp->length);
501 wp->parity = cbp;
507 cbp = gv_raid5_clone_bio(bp, original, wp, addr, 1);
515 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
524 cbp = gv_raid5_clone_bio(bp, parity, wp, NULL, 1);
533 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
536 cbp = gv_raid5_clone_bio(bp, original, wp, NULL, 1);
545 TAILQ_INSERT_TAIL(&wp->bits, bq, queue);
548 cbp = gv_raid5_clone_bio(bp, original, wp, addr, 1);
557 wp->waiting = cbp;
560 cbp = gv_raid5_clone_bio(bp, parity, wp, NULL, 1);
565 wp->parity = cbp;
640 gv_raid5_clone_bio(struct bio *bp, struct gv_sd *s, struct gv_raid5_packet *wp,
649 cbp->bio_data = g_malloc(wp->length, M_WAITOK | M_ZERO);
653 cbp->bio_offset = wp->lockbase + s->drive_offset;
654 cbp->bio_length = wp->length;
658 cbp->bio_caller2 = wp;