• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/md/

Lines Matching refs:rh

98 	struct region_hash *rh;
122 struct region_hash rh;
148 static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
150 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
153 static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
155 return region << rh->region_shift;
167 static int rh_init(struct region_hash *rh, struct mirror_set *ms,
183 rh->ms = ms;
184 rh->log = log;
185 rh->region_size = region_size;
186 rh->region_shift = ffs(region_size) - 1;
187 rwlock_init(&rh->hash_lock);
188 rh->mask = nr_buckets - 1;
189 rh->nr_buckets = nr_buckets;
191 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
192 if (!rh->buckets) {
198 INIT_LIST_HEAD(rh->buckets + i);
200 spin_lock_init(&rh->region_lock);
201 sema_init(&rh->recovery_count, 0);
202 atomic_set(&rh->recovery_in_flight, 0);
203 INIT_LIST_HEAD(&rh->clean_regions);
204 INIT_LIST_HEAD(&rh->quiesced_regions);
205 INIT_LIST_HEAD(&rh->recovered_regions);
207 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
209 if (!rh->region_pool) {
210 vfree(rh->buckets);
211 rh->buckets = NULL;
218 static void rh_exit(struct region_hash *rh)
223 BUG_ON(!list_empty(&rh->quiesced_regions));
224 for (h = 0; h < rh->nr_buckets; h++) {
225 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
227 mempool_free(reg, rh->region_pool);
231 if (rh->log)
232 dm_destroy_dirty_log(rh->log);
233 if (rh->region_pool)
234 mempool_destroy(rh->region_pool);
235 vfree(rh->buckets);
240 static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
242 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
245 static struct region *__rh_lookup(struct region_hash *rh, region_t region)
249 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
256 static void __rh_insert(struct region_hash *rh, struct region *reg)
258 unsigned int h = rh_hash(rh, reg->key);
259 list_add(&reg->hash_list, rh->buckets + h);
262 static struct region *__rh_alloc(struct region_hash *rh, region_t region)
266 read_unlock(&rh->hash_lock);
267 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
270 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
272 nreg->rh = rh;
279 write_lock_irq(&rh->hash_lock);
281 reg = __rh_lookup(rh, region);
284 mempool_free(nreg, rh->region_pool);
287 __rh_insert(rh, nreg);
289 spin_lock(&rh->region_lock);
290 list_add(&nreg->list, &rh->clean_regions);
291 spin_unlock(&rh->region_lock);
295 write_unlock_irq(&rh->hash_lock);
296 read_lock(&rh->hash_lock);
301 static inline struct region *__rh_find(struct region_hash *rh, region_t region)
305 reg = __rh_lookup(rh, region);
307 reg = __rh_alloc(rh, region);
312 static int rh_state(struct region_hash *rh, region_t region, int may_block)
317 read_lock(&rh->hash_lock);
318 reg = __rh_lookup(rh, region);
319 read_unlock(&rh->hash_lock);
328 r = rh->log->type->in_sync(rh->log, region, may_block);
337 static inline int rh_in_sync(struct region_hash *rh,
340 int state = rh_state(rh, region, may_block);
355 struct region_hash *rh = reg->rh;
357 rh->log->type->set_region_sync(rh->log, reg->key, success);
358 dispatch_bios(rh->ms, &reg->delayed_bios);
359 if (atomic_dec_and_test(&rh->recovery_in_flight))
361 up(&rh->recovery_count);
364 static void rh_update_states(struct region_hash *rh)
374 write_lock_irq(&rh->hash_lock);
375 spin_lock(&rh->region_lock);
376 if (!list_empty(&rh->clean_regions)) {
377 list_splice(&rh->clean_regions, &clean);
378 INIT_LIST_HEAD(&rh->clean_regions);
381 rh->log->type->clear_region(rh->log, reg->key);
386 if (!list_empty(&rh->recovered_regions)) {
387 list_splice(&rh->recovered_regions, &recovered);
388 INIT_LIST_HEAD(&rh->recovered_regions);
393 spin_unlock(&rh->region_lock);
394 write_unlock_irq(&rh->hash_lock);
402 rh->log->type->clear_region(rh->log, reg->key);
404 mempool_free(reg, rh->region_pool);
407 rh->log->type->flush(rh->log);
410 mempool_free(reg, rh->region_pool);
413 static void rh_inc(struct region_hash *rh, region_t region)
417 read_lock(&rh->hash_lock);
418 reg = __rh_find(rh, region);
420 spin_lock_irq(&rh->region_lock);
426 spin_unlock_irq(&rh->region_lock);
428 rh->log->type->mark_region(rh->log, reg->key);
430 spin_unlock_irq(&rh->region_lock);
433 read_unlock(&rh->hash_lock);
436 static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
441 rh_inc(rh, bio_to_region(rh, bio));
444 static void rh_dec(struct region_hash *rh, region_t region)
450 read_lock(&rh->hash_lock);
451 reg = __rh_lookup(rh, region);
452 read_unlock(&rh->hash_lock);
454 spin_lock_irqsave(&rh->region_lock, flags);
469 list_add_tail(&reg->list, &rh->quiesced_regions);
472 list_add(&reg->list, &rh->clean_regions);
476 spin_unlock_irqrestore(&rh->region_lock, flags);
479 wake(rh->ms);
485 static int __rh_recovery_prepare(struct region_hash *rh)
494 r = rh->log->type->get_resync_work(rh->log, &region);
502 read_lock(&rh->hash_lock);
503 reg = __rh_find(rh, region);
504 read_unlock(&rh->hash_lock);
506 spin_lock_irq(&rh->region_lock);
513 list_move(&reg->list, &rh->quiesced_regions);
515 spin_unlock_irq(&rh->region_lock);
520 static void rh_recovery_prepare(struct region_hash *rh)
523 atomic_inc(&rh->recovery_in_flight);
525 while (!down_trylock(&rh->recovery_count)) {
526 atomic_inc(&rh->recovery_in_flight);
527 if (__rh_recovery_prepare(rh) <= 0) {
528 atomic_dec(&rh->recovery_in_flight);
529 up(&rh->recovery_count);
535 if (atomic_dec_and_test(&rh->recovery_in_flight))
542 static struct region *rh_recovery_start(struct region_hash *rh)
546 spin_lock_irq(&rh->region_lock);
547 if (!list_empty(&rh->quiesced_regions)) {
548 reg = list_entry(rh->quiesced_regions.next,
552 spin_unlock_irq(&rh->region_lock);
559 struct region_hash *rh = reg->rh;
561 spin_lock_irq(&rh->region_lock);
562 list_add(&reg->list, &reg->rh->recovered_regions);
563 spin_unlock_irq(&rh->region_lock);
565 wake(rh->ms);
568 static void rh_flush(struct region_hash *rh)
570 rh->log->type->flush(rh->log);
573 static void rh_delay(struct region_hash *rh, struct bio *bio)
577 read_lock(&rh->hash_lock);
578 reg = __rh_find(rh, bio_to_region(rh, bio));
580 read_unlock(&rh->hash_lock);
583 static void rh_stop_recovery(struct region_hash *rh)
589 down(&rh->recovery_count);
592 static void rh_start_recovery(struct region_hash *rh)
597 up(&rh->recovery_count);
599 wake(rh->ms);
648 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
654 from.count = ms->ti->len & (reg->rh->region_size - 1);
656 from.count = reg->rh->region_size;
658 from.count = reg->rh->region_size;
667 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
684 struct dirty_log *log = ms->rh.log;
689 rh_recovery_prepare(&ms->rh);
694 while ((reg = rh_recovery_start(&ms->rh))) {
735 region = bio_to_region(&ms->rh, bio);
740 if (rh_in_sync(&ms->rh, region, 1))
832 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
856 rh_inc_pending(&ms->rh, &sync);
857 rh_inc_pending(&ms->rh, &nosync);
858 rh_flush(&ms->rh);
867 rh_delay(&ms->rh, bio);
891 rh_update_states(&ms->rh);
935 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
951 rh_exit(&ms->rh);
1121 ti->split_io = ms->rh.region_size;
1192 map_context->ll = bio_to_region(&ms->rh, bio);
1199 r = ms->rh.log->type->in_sync(ms->rh.log,
1200 bio_to_region(&ms->rh, bio), 0);
1235 rh_dec(&ms->rh, region);
1243 struct dirty_log *log = ms->rh.log;
1245 rh_stop_recovery(&ms->rh);
1249 !atomic_read(&ms->rh.recovery_in_flight));
1258 struct dirty_log *log = ms->rh.log;
1261 rh_start_recovery(&ms->rh);
1277 (unsigned long long)ms->rh.log->type->
1278 get_sync_count(ms->rh.log),
1281 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1286 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);