Lines Matching defs:cell

227 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
450 * Allocate a cell from the prison's mempool.
458 * We reused an old cell; we can get rid of
468 struct dm_bio_prison_cell *cell,
471 dm_cell_release(pool->prison, cell, bios);
472 dm_bio_prison_free_cell(pool->prison, cell);
478 struct dm_bio_prison_cell *cell)
480 dm_cell_visit_release(pool->prison, fn, context, cell);
481 dm_bio_prison_free_cell(pool->prison, cell);
485 struct dm_bio_prison_cell *cell,
488 dm_cell_release_no_holder(pool->prison, cell, bios);
489 dm_bio_prison_free_cell(pool->prison, cell);
493 struct dm_bio_prison_cell *cell, blk_status_t error_code)
495 dm_cell_error(pool->prison, cell, error_code);
496 dm_bio_prison_free_cell(pool->prison, cell);
504 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
506 cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
509 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
511 cell_error_with_code(pool, cell, 0);
514 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
516 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
592 struct dm_bio_prison_cell *cell;
623 struct dm_bio_prison_cell *cell, *tmp;
631 list_for_each_entry_safe(cell, tmp, &cells, user_list)
632 cell_requeue(pool, cell);
815 struct dm_bio_prison_cell *cell;
820 * still be in the cell, so care has to be taken to avoid issuing
877 * This sends the bios in the cell, except the original holder, back
880 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
887 cell_release_no_holder(pool, cell, &bios);
906 struct dm_bio_prison_cell *cell)
911 while ((bio = bio_list_pop(&cell->bios))) {
928 struct dm_bio_prison_cell *cell,
940 * before the cell is released, and avoid a race with new bios
941 * being added to the cell.
944 &info, cell);
955 cell_error(m->tc->pool, m->cell);
1000 cell_error(pool, m->cell);
1012 cell_error(pool, m->cell);
1020 * the bios in the cell.
1023 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1026 inc_all_io_entry(tc->pool, m->cell->holder);
1027 remap_and_issue(tc, m->cell->holder, m->data_block);
1028 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
1042 if (m->cell)
1043 cell_defer_no_holder(tc, m->cell);
1064 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end);
1071 cell_defer_no_holder(tc, m->cell);
1164 cell_defer_no_holder(tc, m->cell);
1177 cell_defer_no_holder(tc, m->cell);
1214 cell_defer_no_holder(tc, m->cell);
1312 struct dm_bio_prison_cell *cell, struct bio *bio,
1322 m->cell = cell;
1372 struct dm_bio_prison_cell *cell, struct bio *bio)
1375 data_origin, data_dest, cell, bio,
1380 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1391 m->cell = cell;
1411 struct dm_bio_prison_cell *cell, struct bio *bio)
1419 virt_block, data_dest, cell, bio,
1424 virt_block, data_dest, cell, bio,
1428 schedule_zero(tc, virt_block, data_dest, cell, bio);
1627 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
1635 cell_error_with_code(pool, cell, error);
1640 cell_release(pool, cell, &bios);
1659 m->cell = virt_cell;
1722 m->cell = data_cell;
1755 h->cell = virt_cell;
1793 * cell will never be granted.
1804 struct dm_bio_prison_cell *cell)
1814 data_block, cell, bio);
1818 retry_bios_on_resume(pool, cell);
1824 cell_error(pool, cell);
1830 struct dm_bio_prison_cell *cell)
1835 while ((bio = bio_list_pop(&cell->bios))) {
1850 struct dm_bio_prison_cell *cell,
1861 &info, cell);
1880 * If cell is already occupied, then sharing is already in the process
1905 struct dm_bio_prison_cell *cell)
1916 cell_defer_no_holder(tc, cell);
1927 cell_defer_no_holder(tc, cell);
1936 schedule_external_copy(tc, block, data_block, cell, bio);
1938 schedule_zero(tc, block, data_block, cell, bio);
1942 retry_bios_on_resume(pool, cell);
1948 cell_error(pool, cell);
1953 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1957 struct bio *bio = cell->holder;
1962 cell_requeue(pool, cell);
1970 process_shared_bio(tc, bio, block, &lookup_result, cell);
1974 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
1981 cell_defer_no_holder(tc, cell);
1996 provision_block(tc, bio, block, cell);
2002 cell_defer_no_holder(tc, cell);
2012 struct dm_bio_prison_cell *cell;
2016 * If cell is already occupied, then the block is already
2020 if (bio_detain(pool, &key, bio, &cell))
2023 process_cell(tc, cell);
2027 struct dm_bio_prison_cell *cell)
2039 if (cell)
2040 cell_defer_no_holder(tc, cell);
2044 if (cell)
2045 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
2050 if (cell)
2051 cell_defer_no_holder(tc, cell);
2070 if (cell)
2071 cell_defer_no_holder(tc, cell);
2082 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2084 __process_bio_read_only(tc, cell->holder, cell);
2097 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2099 cell_success(tc->pool, cell);
2102 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2104 cell_error(tc->pool, cell);
2260 struct dm_bio_prison_cell *cell, *tmp;
2262 list_for_each_entry_safe(cell, tmp, cells, user_list) {
2266 pool->cell_sort_array[count++] = cell;
2267 list_del(&cell->user_list);
2270 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
2279 struct dm_bio_prison_cell *cell;
2295 cell = pool->cell_sort_array[i];
2296 BUG_ON(!cell->holder);
2313 if (bio_op(cell->holder) == REQ_OP_DISCARD)
2314 pool->process_discard_cell(tc, cell);
2316 pool->process_cell(tc, cell);
2694 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2700 list_add_tail(&cell->user_list, &tc->deferred_cells);
2715 h->cell = NULL;
2750 * We must hold the virtual cell before doing the lookup, otherwise
3037 *error = "Error allocating cell sort array";
4368 if (h->cell)
4369 cell_defer_no_holder(h->tc, h->cell);