• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/md/

Lines Matching defs:rh

102 	struct dm_region_hash *rh;
116 static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
118 return sector >> rh->region_shift;
121 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
123 return region << rh->region_shift;
127 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
129 return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
135 return reg->rh->context;
145 sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
147 return rh->region_size;
164 struct dm_region_hash *rh;
177 rh = kmalloc(sizeof(*rh), GFP_KERNEL);
178 if (!rh) {
183 rh->context = context;
184 rh->dispatch_bios = dispatch_bios;
185 rh->wakeup_workers = wakeup_workers;
186 rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
187 rh->target_begin = target_begin;
188 rh->max_recovery = max_recovery;
189 rh->log = log;
190 rh->region_size = region_size;
191 rh->region_shift = ffs(region_size) - 1;
192 rwlock_init(&rh->hash_lock);
193 rh->mask = nr_buckets - 1;
194 rh->nr_buckets = nr_buckets;
196 rh->shift = RH_HASH_SHIFT;
197 rh->prime = RH_HASH_MULT;
199 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
200 if (!rh->buckets) {
202 kfree(rh);
207 INIT_LIST_HEAD(rh->buckets + i);
209 spin_lock_init(&rh->region_lock);
210 sema_init(&rh->recovery_count, 0);
211 atomic_set(&rh->recovery_in_flight, 0);
212 INIT_LIST_HEAD(&rh->clean_regions);
213 INIT_LIST_HEAD(&rh->quiesced_regions);
214 INIT_LIST_HEAD(&rh->recovered_regions);
215 INIT_LIST_HEAD(&rh->failed_recovered_regions);
216 rh->barrier_failure = 0;
218 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
220 if (!rh->region_pool) {
221 vfree(rh->buckets);
222 kfree(rh);
223 rh = ERR_PTR(-ENOMEM);
226 return rh;
230 void dm_region_hash_destroy(struct dm_region_hash *rh)
235 BUG_ON(!list_empty(&rh->quiesced_regions));
236 for (h = 0; h < rh->nr_buckets; h++) {
237 list_for_each_entry_safe(reg, nreg, rh->buckets + h,
240 mempool_free(reg, rh->region_pool);
244 if (rh->log)
245 dm_dirty_log_destroy(rh->log);
247 if (rh->region_pool)
248 mempool_destroy(rh->region_pool);
250 vfree(rh->buckets);
251 kfree(rh);
255 struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
257 return rh->log;
261 static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
263 return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
266 static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
269 struct list_head *bucket = rh->buckets + rh_hash(rh, region);
278 static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
280 list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
283 static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
287 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
291 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
293 nreg->rh = rh;
299 write_lock_irq(&rh->hash_lock);
300 reg = __rh_lookup(rh, region);
303 mempool_free(nreg, rh->region_pool);
305 __rh_insert(rh, nreg);
307 spin_lock(&rh->region_lock);
308 list_add(&nreg->list, &rh->clean_regions);
309 spin_unlock(&rh->region_lock);
314 write_unlock_irq(&rh->hash_lock);
319 static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
323 reg = __rh_lookup(rh, region);
325 read_unlock(&rh->hash_lock);
326 reg = __rh_alloc(rh, region);
327 read_lock(&rh->hash_lock);
333 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
338 read_lock(&rh->hash_lock);
339 reg = __rh_lookup(rh, region);
340 read_unlock(&rh->hash_lock);
349 r = rh->log->type->in_sync(rh->log, region, may_block);
361 struct dm_region_hash *rh = reg->rh;
363 rh->log->type->set_region_sync(rh->log, reg->key, success);
374 rh->dispatch_bios(rh->context, &reg->delayed_bios);
375 if (atomic_dec_and_test(&rh->recovery_in_flight))
376 rh->wakeup_all_recovery_waiters(rh->context);
377 up(&rh->recovery_count);
390 void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
393 struct dm_dirty_log *log = rh->log;
395 region_t region = dm_rh_bio_to_region(rh, bio);
399 rh->barrier_failure = 1;
406 read_lock(&rh->hash_lock);
407 reg = __rh_find(rh, region);
408 read_unlock(&rh->hash_lock);
414 spin_lock_irqsave(&rh->region_lock, flags);
425 spin_unlock_irqrestore(&rh->region_lock, flags);
432 void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
443 write_lock_irq(&rh->hash_lock);
444 spin_lock(&rh->region_lock);
445 if (!list_empty(&rh->clean_regions)) {
446 list_splice_init(&rh->clean_regions, &clean);
452 if (!list_empty(&rh->recovered_regions)) {
453 list_splice_init(&rh->recovered_regions, &recovered);
459 if (!list_empty(&rh->failed_recovered_regions)) {
460 list_splice_init(&rh->failed_recovered_regions,
467 spin_unlock(&rh->region_lock);
468 write_unlock_irq(&rh->hash_lock);
476 rh->log->type->clear_region(rh->log, reg->key);
478 mempool_free(reg, rh->region_pool);
483 mempool_free(reg, rh->region_pool);
487 rh->log->type->clear_region(rh->log, reg->key);
488 mempool_free(reg, rh->region_pool);
491 rh->log->type->flush(rh->log);
495 static void rh_inc(struct dm_region_hash *rh, region_t region)
499 read_lock(&rh->hash_lock);
500 reg = __rh_find(rh, region);
502 spin_lock_irq(&rh->region_lock);
508 spin_unlock_irq(&rh->region_lock);
510 rh->log->type->mark_region(rh->log, reg->key);
512 spin_unlock_irq(&rh->region_lock);
515 read_unlock(&rh->hash_lock);
518 void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
525 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
530 void dm_rh_dec(struct dm_region_hash *rh, region_t region)
536 read_lock(&rh->hash_lock);
537 reg = __rh_lookup(rh, region);
538 read_unlock(&rh->hash_lock);
540 spin_lock_irqsave(&rh->region_lock, flags);
554 if (unlikely(rh->barrier_failure)) {
562 list_add_tail(&reg->list, &rh->quiesced_regions);
565 list_add(&reg->list, &rh->clean_regions);
569 spin_unlock_irqrestore(&rh->region_lock, flags);
572 rh->wakeup_workers(rh->context);
579 static int __rh_recovery_prepare(struct dm_region_hash *rh)
588 r = rh->log->type->get_resync_work(rh->log, &region);
596 read_lock(&rh->hash_lock);
597 reg = __rh_find(rh, region);
598 read_unlock(&rh->hash_lock);
600 spin_lock_irq(&rh->region_lock);
607 list_move(&reg->list, &rh->quiesced_regions);
609 spin_unlock_irq(&rh->region_lock);
614 void dm_rh_recovery_prepare(struct dm_region_hash *rh)
617 atomic_inc(&rh->recovery_in_flight);
619 while (!down_trylock(&rh->recovery_count)) {
620 atomic_inc(&rh->recovery_in_flight);
621 if (__rh_recovery_prepare(rh) <= 0) {
622 atomic_dec(&rh->recovery_in_flight);
623 up(&rh->recovery_count);
629 if (atomic_dec_and_test(&rh->recovery_in_flight))
630 rh->wakeup_all_recovery_waiters(rh->context);
637 struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
641 spin_lock_irq(&rh->region_lock);
642 if (!list_empty(&rh->quiesced_regions)) {
643 reg = list_entry(rh->quiesced_regions.next,
647 spin_unlock_irq(&rh->region_lock);
655 struct dm_region_hash *rh = reg->rh;
657 spin_lock_irq(&rh->region_lock);
659 list_add(&reg->list, &reg->rh->recovered_regions);
661 list_add(&reg->list, &reg->rh->failed_recovered_regions);
663 spin_unlock_irq(&rh->region_lock);
665 rh->wakeup_workers(rh->context);
670 int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
672 return atomic_read(&rh->recovery_in_flight);
676 int dm_rh_flush(struct dm_region_hash *rh)
678 return rh->log->type->flush(rh->log);
682 void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
686 read_lock(&rh->hash_lock);
687 reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
689 read_unlock(&rh->hash_lock);
693 void dm_rh_stop_recovery(struct dm_region_hash *rh)
698 for (i = 0; i < rh->max_recovery; i++)
699 down(&rh->recovery_count);
703 void dm_rh_start_recovery(struct dm_region_hash *rh)
707 for (i = 0; i < rh->max_recovery; i++)
708 up(&rh->recovery_count);
710 rh->wakeup_workers(rh->context);