• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/md/

Lines Matching defs:chunk

167 				chunk_t chunk)
169 return chunk << store->chunk_shift;
210 chunk_t chunk;
216 chunk_t chunk)
222 c->chunk = chunk;
226 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
244 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
253 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
254 if (c->chunk == chunk) {
269 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
271 while (__chunk_is_tracked(s, chunk))
453 /* Sort the list according to chunk size, largest-first smallest-last */
509 * Move snapshot to correct place in list according to chunk size.
541 * The lowest hash_shift bits of the chunk number are ignored, allowing
579 static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
581 return (chunk >> et->hash_shift) & et->hash_mask;
594 chunk_t chunk)
599 slot = &et->table[exception_hash(et, chunk)];
601 if (chunk >= e->old_chunk &&
602 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
658 /* Insert after an existing chunk? */
668 /* Insert before an existing chunk? */
712 * Return a minimum chunk size of all snapshots that have the specified origin.
797 * Remove one chunk from the index of completed exceptions.
813 * If this is the only chunk using this exception, remove exception.
822 * The chunk may be either at the beginning or the end of a
826 * Decrement the consecutive chunk counter and adjust the
835 "middle of a chunk range [%llu - %llu]",
1052 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1525 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1527 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1537 * for this chunk, otherwise it allocates a new one and inserts
1545 struct dm_snap_pending_exception *pe, chunk_t chunk)
1549 pe2 = __lookup_pending_exception(s, chunk);
1555 pe->e.old_chunk = chunk;
1571 struct bio *bio, chunk_t chunk)
1576 (chunk - e->old_chunk)) +
1587 chunk_t chunk;
1595 chunk = sector_to_chunk(s->store, bio->bi_sector);
1610 e = dm_lookup_exception(&s->complete, chunk);
1612 remap_exception(s, e, bio, chunk);
1622 pe = __lookup_pending_exception(s, chunk);
1634 e = dm_lookup_exception(&s->complete, chunk);
1637 remap_exception(s, e, bio, chunk);
1641 pe = __find_pending_exception(s, pe, chunk);
1649 remap_exception(s, &pe->e, bio, chunk);
1663 map_context->ptr = track_chunk(s, chunk);
1678 * For each chunk, if there is an existing exception, it is used to
1681 * If merging is currently taking place on the chunk in question, the
1690 chunk_t chunk;
1701 chunk = sector_to_chunk(s->store, bio->bi_sector);
1710 e = dm_lookup_exception(&s->complete, chunk);
1714 chunk >= s->first_merging_chunk &&
1715 chunk < (s->first_merging_chunk +
1723 remap_exception(s, e, bio, chunk);
1726 map_context->ptr = track_chunk(s, chunk);
1814 /* Now we have correct chunk size, reregister */
1940 chunk_t chunk;
1963 * different chunk sizes.
1965 chunk = sector_to_chunk(snap->store, sector);
1972 e = dm_lookup_exception(&snap->complete, chunk);
1976 pe = __lookup_pending_exception(snap, chunk);
1987 e = dm_lookup_exception(&snap->complete, chunk);
1993 pe = __find_pending_exception(snap, pe, chunk);
2061 * The chunk size of the merging snapshot may be larger than the chunk
2145 * chunk sizes.