• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/md/

Lines Matching refs:ps

162 static int alloc_area(struct pstore *ps)
167 len = ps->store->chunk_size << SECTOR_SHIFT;
173 ps->area = vmalloc(len);
174 if (!ps->area)
177 ps->zero_area = vmalloc(len);
178 if (!ps->zero_area)
180 memset(ps->zero_area, 0, len);
182 ps->header_area = vmalloc(len);
183 if (!ps->header_area)
189 vfree(ps->zero_area);
192 vfree(ps->area);
198 static void free_area(struct pstore *ps)
200 if (ps->area)
201 vfree(ps->area);
202 ps->area = NULL;
204 if (ps->zero_area)
205 vfree(ps->zero_area);
206 ps->zero_area = NULL;
208 if (ps->header_area)
209 vfree(ps->header_area);
210 ps->header_area = NULL;
230 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
234 .bdev = dm_snap_cow(ps->store->snap)->bdev,
235 .sector = ps->store->chunk_size * chunk,
236 .count = ps->store->chunk_size,
242 .client = ps->io_client,
258 queue_work(ps->metadata_wq, &req.work);
259 flush_workqueue(ps->metadata_wq);
267 static chunk_t area_location(struct pstore *ps, chunk_t area)
269 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
276 static int area_io(struct pstore *ps, int rw)
281 chunk = area_location(ps, ps->current_area);
283 r = chunk_io(ps, ps->area, chunk, rw, 0);
290 static void zero_memory_area(struct pstore *ps)
292 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
295 static int zero_disk_area(struct pstore *ps, chunk_t area)
297 return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
300 static int read_header(struct pstore *ps, int *new_snapshot)
312 if (!ps->store->chunk_size) {
313 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
314 bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
316 ps->store->chunk_mask = ps->store->chunk_size - 1;
317 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
321 ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
323 if (IS_ERR(ps->io_client))
324 return PTR_ERR(ps->io_client);
326 r = alloc_area(ps);
330 r = chunk_io(ps, ps->header_area, 0, READ, 1);
334 dh = ps->header_area;
348 ps->valid = le32_to_cpu(dh->valid);
349 ps->version = le32_to_cpu(dh->version);
352 if (ps->store->chunk_size == chunk_size)
358 chunk_size, ps->store->chunk_size);
361 free_area(ps);
363 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
371 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
372 ps->io_client);
376 r = alloc_area(ps);
380 free_area(ps);
384 static int write_header(struct pstore *ps)
388 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
390 dh = ps->header_area;
392 dh->valid = cpu_to_le32(ps->valid);
393 dh->version = cpu_to_le32(ps->version);
394 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
396 return chunk_io(ps, ps->header_area, 0, WRITE, 1);
402 static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
404 BUG_ON(index >= ps->exceptions_per_area);
406 return ((struct disk_exception *) ps->area) + index;
409 static void read_exception(struct pstore *ps,
412 struct disk_exception *e = get_exception(ps, index);
419 static void write_exception(struct pstore *ps,
422 struct disk_exception *e = get_exception(ps, index);
429 static void clear_exception(struct pstore *ps, uint32_t index)
431 struct disk_exception *e = get_exception(ps, index);
443 static int insert_exceptions(struct pstore *ps,
456 for (i = 0; i < ps->exceptions_per_area; i++) {
457 read_exception(ps, i, &de);
466 ps->current_committed = i;
474 if (ps->next_free <= de.new_chunk)
475 ps->next_free = de.new_chunk + 1;
488 static int read_exceptions(struct pstore *ps,
499 for (ps->current_area = 0; full; ps->current_area++) {
500 r = area_io(ps, READ);
504 r = insert_exceptions(ps, callback, callback_context, &full);
509 ps->current_area--;
524 struct pstore *ps = get_info(store);
526 *sectors_allocated = ps->next_free * store->chunk_size;
531 * Then there are (ps->current_area + 1) metadata chunks, each one
532 * separated from the next by ps->exceptions_per_area data chunks.
534 *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
540 struct pstore *ps = get_info(store);
542 destroy_workqueue(ps->metadata_wq);
545 if (ps->io_client)
546 dm_io_client_destroy(ps->io_client);
547 free_area(ps);
550 if (ps->callbacks)
551 vfree(ps->callbacks);
553 kfree(ps);
562 struct pstore *ps = get_info(store);
567 r = read_header(ps, &new_snapshot);
574 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
576 ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
577 sizeof(*ps->callbacks));
578 if (!ps->callbacks)
585 r = write_header(ps);
591 ps->current_area = 0;
592 zero_memory_area(ps);
593 r = zero_disk_area(ps, 0);
601 if (ps->version != SNAPSHOT_DISK_VERSION) {
603 ps->version);
610 if (!ps->valid)
616 r = read_exceptions(ps, callback, callback_context);
624 struct pstore *ps = get_info(store);
630 if (size < ((ps->next_free + 1) * store->chunk_size))
633 e->new_chunk = ps->next_free;
639 stride = (ps->exceptions_per_area + 1);
640 next_free = ++ps->next_free;
642 ps->next_free++;
644 atomic_inc(&ps->pending_count);
654 struct pstore *ps = get_info(store);
660 write_exception(ps, ps->current_committed++, &de);
668 cb = ps->callbacks + ps->callback_count++;
676 if (!atomic_dec_and_test(&ps->pending_count) &&
677 (ps->current_committed != ps->exceptions_per_area))
683 if ((ps->current_committed == ps->exceptions_per_area) &&
684 zero_disk_area(ps, ps->current_area + 1))
685 ps->valid = 0;
690 if (ps->valid && area_io(ps, WRITE_BARRIER))
691 ps->valid = 0;
696 if (ps->current_committed == ps->exceptions_per_area) {
697 ps->current_committed = 0;
698 ps->current_area++;
699 zero_memory_area(ps);
702 for (i = 0; i < ps->callback_count; i++) {
703 cb = ps->callbacks + i;
704 cb->callback(cb->context, ps->valid);
707 ps->callback_count = 0;
714 struct pstore *ps = get_info(store);
722 if (!ps->current_committed) {
726 if (!ps->current_area)
729 ps->current_area--;
730 r = area_io(ps, READ);
733 ps->current_committed = ps->exceptions_per_area;
736 read_exception(ps, ps->current_committed - 1, &de);
744 for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
746 read_exception(ps, ps->current_committed - 1 - nr_consecutive,
760 struct pstore *ps = get_info(store);
762 BUG_ON(nr_merged > ps->current_committed);
765 clear_exception(ps, ps->current_committed - 1 - i);
767 r = area_io(ps, WRITE);
771 ps->current_committed -= nr_merged;
774 * At this stage, only persistent_usage() uses ps->next_free, so
775 * we make no attempt to keep ps->next_free strictly accurate
780 * ps->current_area does not get reduced by prepare_merge() until
783 ps->next_free = area_location(ps, ps->current_area) +
784 ps->current_committed + 1;
791 struct pstore *ps = get_info(store);
793 ps->valid = 0;
794 if (write_header(ps))
801 struct pstore *ps;
804 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
805 if (!ps)
808 ps->store = store;
809 ps->valid = 1;
810 ps->version = SNAPSHOT_DISK_VERSION;
811 ps->area = NULL;
812 ps->zero_area = NULL;
813 ps->header_area = NULL;
814 ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
815 ps->current_committed = 0;
817 ps->callback_count = 0;
818 atomic_set(&ps->pending_count, 0);
819 ps->callbacks = NULL;
821 ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
822 if (!ps->metadata_wq) {
823 kfree(ps);
828 store->context = ps;