Lines Matching refs:ps

170 static int alloc_area(struct pstore *ps)
175 len = ps->store->chunk_size << SECTOR_SHIFT;
181 ps->area = vmalloc(len);
182 if (!ps->area)
185 ps->zero_area = vzalloc(len);
186 if (!ps->zero_area)
189 ps->header_area = vmalloc(len);
190 if (!ps->header_area)
196 vfree(ps->zero_area);
199 vfree(ps->area);
205 static void free_area(struct pstore *ps)
207 vfree(ps->area);
208 ps->area = NULL;
209 vfree(ps->zero_area);
210 ps->zero_area = NULL;
211 vfree(ps->header_area);
212 ps->header_area = NULL;
232 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
236 .bdev = dm_snap_cow(ps->store->snap)->bdev,
237 .sector = ps->store->chunk_size * chunk,
238 .count = ps->store->chunk_size,
244 .client = ps->io_client,
260 queue_work(ps->metadata_wq, &req.work);
261 flush_workqueue(ps->metadata_wq);
270 static chunk_t area_location(struct pstore *ps, chunk_t area)
272 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
275 static void skip_metadata(struct pstore *ps)
277 uint32_t stride = ps->exceptions_per_area + 1;
278 chunk_t next_free = ps->next_free;
281 ps->next_free++;
288 static int area_io(struct pstore *ps, blk_opf_t opf)
290 chunk_t chunk = area_location(ps, ps->current_area);
292 return chunk_io(ps, ps->area, chunk, opf, 0);
295 static void zero_memory_area(struct pstore *ps)
297 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
300 static int zero_disk_area(struct pstore *ps, chunk_t area)
302 return chunk_io(ps, ps->zero_area, area_location(ps, area),
306 static int read_header(struct pstore *ps, int *new_snapshot)
318 if (!ps->store->chunk_size) {
319 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
320 bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
322 ps->store->chunk_mask = ps->store->chunk_size - 1;
323 ps->store->chunk_shift = __ffs(ps->store->chunk_size);
327 ps->io_client = dm_io_client_create();
328 if (IS_ERR(ps->io_client))
329 return PTR_ERR(ps->io_client);
331 r = alloc_area(ps);
335 r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1);
339 dh = ps->header_area;
353 ps->valid = le32_to_cpu(dh->valid);
354 ps->version = le32_to_cpu(dh->version);
357 if (ps->store->chunk_size == chunk_size)
362 chunk_size, ps->store->chunk_size);
365 free_area(ps);
367 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
375 r = alloc_area(ps);
379 free_area(ps);
383 static int write_header(struct pstore *ps)
387 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
389 dh = ps->header_area;
391 dh->valid = cpu_to_le32(ps->valid);
392 dh->version = cpu_to_le32(ps->version);
393 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
395 return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1);
401 static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
404 BUG_ON(index >= ps->exceptions_per_area);
409 static void read_exception(struct pstore *ps, void *ps_area,
412 struct disk_exception *de = get_exception(ps, ps_area, index);
419 static void write_exception(struct pstore *ps,
422 struct disk_exception *de = get_exception(ps, ps->area, index);
429 static void clear_exception(struct pstore *ps, uint32_t index)
431 struct disk_exception *de = get_exception(ps, ps->area, index);
443 static int insert_exceptions(struct pstore *ps, void *ps_area,
456 for (i = 0; i < ps->exceptions_per_area; i++) {
457 read_exception(ps, ps_area, i, &e);
466 ps->current_committed = i;
474 if (ps->next_free <= e.new_chunk)
475 ps->next_free = e.new_chunk + 1;
488 static int read_exceptions(struct pstore *ps,
497 client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
498 ps->store->chunk_size << SECTOR_SHIFT,
513 for (ps->current_area = 0; full; ps->current_area++) {
518 if (unlikely(prefetch_area < ps->current_area))
519 prefetch_area = ps->current_area;
523 chunk_t pf_chunk = area_location(ps, prefetch_area);
531 } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
534 chunk = area_location(ps, ps->current_area);
542 r = insert_exceptions(ps, area, callback, callback_context,
546 memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
556 ps->current_area--;
558 skip_metadata(ps);
578 struct pstore *ps = get_info(store);
580 *sectors_allocated = ps->next_free * store->chunk_size;
585 * Then there are (ps->current_area + 1) metadata chunks, each one
586 * separated from the next by ps->exceptions_per_area data chunks.
588 *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
594 struct pstore *ps = get_info(store);
596 destroy_workqueue(ps->metadata_wq);
599 if (ps->io_client)
600 dm_io_client_destroy(ps->io_client);
601 free_area(ps);
604 kvfree(ps->callbacks);
606 kfree(ps);
615 struct pstore *ps = get_info(store);
620 r = read_header(ps, &new_snapshot);
627 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
629 ps->callbacks = kvcalloc(ps->exceptions_per_area,
630 sizeof(*ps->callbacks), GFP_KERNEL);
631 if (!ps->callbacks)
638 r = write_header(ps);
644 ps->current_area = 0;
645 zero_memory_area(ps);
646 r = zero_disk_area(ps, 0);
654 if (ps->version != SNAPSHOT_DISK_VERSION) {
656 ps->version);
663 if (!ps->valid)
669 r = read_exceptions(ps, callback, callback_context);
677 struct pstore *ps = get_info(store);
681 if (size < ((ps->next_free + 1) * store->chunk_size))
684 e->new_chunk = ps->next_free;
690 ps->next_free++;
691 skip_metadata(ps);
693 atomic_inc(&ps->pending_count);
703 struct pstore *ps = get_info(store);
708 ps->valid = 0;
712 write_exception(ps, ps->current_committed++, &ce);
720 cb = ps->callbacks + ps->callback_count++;
728 if (!atomic_dec_and_test(&ps->pending_count) &&
729 (ps->current_committed != ps->exceptions_per_area))
735 if ((ps->current_committed == ps->exceptions_per_area) &&
736 zero_disk_area(ps, ps->current_area + 1))
737 ps->valid = 0;
742 if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA |
744 ps->valid = 0;
749 if (ps->current_committed == ps->exceptions_per_area) {
750 ps->current_committed = 0;
751 ps->current_area++;
752 zero_memory_area(ps);
755 for (i = 0; i < ps->callback_count; i++) {
756 cb = ps->callbacks + i;
757 cb->callback(cb->context, ps->valid);
760 ps->callback_count = 0;
767 struct pstore *ps = get_info(store);
775 if (!ps->current_committed) {
779 if (!ps->current_area)
782 ps->current_area--;
783 r = area_io(ps, REQ_OP_READ);
786 ps->current_committed = ps->exceptions_per_area;
789 read_exception(ps, ps->area, ps->current_committed - 1, &ce);
797 for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
799 read_exception(ps, ps->area,
800 ps->current_committed - 1 - nr_consecutive, &ce);
813 struct pstore *ps = get_info(store);
815 BUG_ON(nr_merged > ps->current_committed);
818 clear_exception(ps, ps->current_committed - 1 - i);
820 r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
824 ps->current_committed -= nr_merged;
827 * At this stage, only persistent_usage() uses ps->next_free, so
828 * we make no attempt to keep ps->next_free strictly accurate
833 * ps->current_area does not get reduced by prepare_merge() until
836 ps->next_free = area_location(ps, ps->current_area) +
837 ps->current_committed + 1;
844 struct pstore *ps = get_info(store);
846 ps->valid = 0;
847 if (write_header(ps))
853 struct pstore *ps;
857 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
858 if (!ps)
861 ps->store = store;
862 ps->valid = 1;
863 ps->version = SNAPSHOT_DISK_VERSION;
864 ps->area = NULL;
865 ps->zero_area = NULL;
866 ps->header_area = NULL;
867 ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
868 ps->current_committed = 0;
870 ps->callback_count = 0;
871 atomic_set(&ps->pending_count, 0);
872 ps->callbacks = NULL;
874 ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
875 if (!ps->metadata_wq) {
893 store->context = ps;
898 destroy_workqueue(ps->metadata_wq);
900 kfree(ps);