Searched refs:block (Results 201 - 225 of 1031) sorted by last modified time

1234567891011>>

/linux-master/drivers/md/
H A Ddm-writecache.c608 uint64_t block, int flags)
618 if (read_original_sector(wc, e) == block)
621 node = (read_original_sector(wc, e) >= block ?
626 if (read_original_sector(wc, e) >= block)
648 if (read_original_sector(wc, e2) != block)
1214 * clflushopt performs better with block size 1024, 2048, 4096
1215 * non-temporal stores perform better with block size 512
1217 * block size 512 1024 2048 4096
1559 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
2371 * Parse the cache block siz
607 writecache_find_entry(struct dm_writecache *wc, uint64_t block, int flags) argument
[all...]
H A Ddm-verity-target.c55 sector_t block; member in struct:dm_verity_prefetch_work
61 * hash_verified is nonzero, hash of the block has been verified.
94 * Return hash position of a specified block at a specified tree level
97 * inside a hash block. The remaining bits denote location of the hash block.
99 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, argument
102 return block >> (level * v->hash_per_block_bits);
203 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level, argument
206 sector_t position = verity_position_at_level(v, block, level);
225 unsigned long long block)
224 verity_handle_err(struct dm_verity *v, enum verity_block_type type, unsigned long long block) argument
288 verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, sector_t block, int level, bool skip_unverified, u8 *want_digest) argument
373 verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero) argument
790 sector_t block = io->block; local
[all...]
H A Ddm-verity.h43 u8 *root_digest; /* digest of the root block */
45 u8 *zero_digest; /* digest for a zero block */
53 unsigned char hash_per_block_bits; /* log2(hashes in hash block) */
56 bool hash_failed:1; /* set if hash of any block failed */
85 sector_t block; member in struct:dm_verity_io
134 sector_t block, u8 *digest, bool *is_zero);
H A Ddm-verity-fec.c32 * Return an interleaved offset for a byte in RS block.
43 * Decode an RS block using Reed-Solomon.
59 * Read error-correcting codes for the requested RS block. Returns a pointer
60 * to the data block. Caller is responsible for releasing buf.
66 u64 position, block, rem; local
70 block = div64_u64_rem(position, v->fec->io_size, &rem);
73 res = dm_bufio_read_with_ioprio(v->fec->bufio, block, buf, ioprio);
75 DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
77 (unsigned long long)block, PTR_ERR(res));
96 /* Loop over each RS block i
132 u8 *par, *block; local
211 u64 block, ileaved; local
422 verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, enum verity_block_type type, sector_t block, u8 *dest, struct bvec_iter *iter) argument
[all...]
H A Ddm-thin.c42 * The block size of the device holding pool data must be
64 * When we get a write in we decide if it's to a shared data block using
67 * Let's say we write to a shared block in what was the origin. The
70 * i) plug io further to this physical block. (see bio_prison code).
72 * ii) quiesce any read io to that shared data block. Obviously
73 * including all devices that share this block. (see dm_deferred_set code)
75 * iii) copy the data block to a newly allocate block. This step can be
76 * missed out if the io covers the block. (schedule_copy).
82 * devices that share the block neve
720 remap(struct thin_c *tc, struct bio *bio, dm_block_t block) argument
792 remap_and_issue(struct thin_c *tc, struct bio *bio, dm_block_t block) argument
933 inc_remap_and_issue_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell, dm_block_t block) argument
1807 break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, struct dm_cell_key *key, struct dm_thin_lookup_result *lookup_result, struct dm_bio_prison_cell *cell) argument
1855 remap_and_issue_shared_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell, dm_block_t block) argument
1876 process_shared_bio(struct thin_c *tc, struct bio *bio, dm_block_t block, struct dm_thin_lookup_result *lookup_result, struct dm_bio_prison_cell *virt_cell) argument
1910 provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, struct dm_bio_prison_cell *cell) argument
1964 dm_block_t block = get_bio_block(tc, bio); local
2017 dm_block_t block = get_bio_block(tc, bio); local
2037 dm_block_t block = get_bio_block(tc, bio); local
2731 dm_block_t block = get_bio_block(tc, bio); local
[all...]
H A Ddm-log.c675 static int core_in_sync(struct dm_dirty_log *log, region_t region, int block) argument
H A Ddm-kcopyd.c9 * block-device to one or more other block-devices, with an asynchronous
804 * If one of the destination is a host-managed zoned block device,
903 int kcopyd_cancel(struct kcopyd_job *job, int block)
H A Ddm-dust.c78 static int dust_remove_block(struct dust_device *dd, unsigned long long block) argument
84 bblock = dust_rb_search(&dd->badblocklist, block);
88 DMERR("%s: block %llu not found in badblocklist",
89 __func__, block);
98 DMINFO("%s: badblock removed at block %llu", __func__, block);
105 static int dust_add_block(struct dust_device *dd, unsigned long long block, argument
119 bblock->bb = block;
123 DMERR("%s: block %llu already in badblocklist",
124 __func__, block);
141 dust_query_block(struct dust_device *dd, unsigned long long block, char *result, unsigned int maxlen, unsigned int *sz_ptr) argument
421 unsigned long long tmp, block; local
[all...]
H A Ddm-ebs-target.c7 * Device-mapper target to emulate smaller logical block
22 /* Emulated block size context. */
24 struct dm_dev *dev; /* Underlying device to emulate block size on. */
31 unsigned int e_bs; /* Emulated block size in sectors exposed to upper layer. */
32 unsigned int u_bs; /* Underlying block size in sectors retrieved from/set on lower layer device. */
34 bool u_bs_set:1; /* Flag to indicate underlying block size is set on table line. */
73 sector_t block = __sector_to_block(ec, iter->bi_sector); local
85 /* Avoid reading for writes in case bio vector's page overwrites block completely. */
87 ba = dm_bufio_read(ec->bufio, block, &b);
89 ba = dm_bufio_new(ec->bufio, block,
145 sector_t block, blocks, sector = bio->bi_iter.bi_sector; local
[all...]
H A Ddm-bufio.c315 * Describes how the block was allocated:
331 sector_t block; member in struct:dm_buffer
406 static inline unsigned int cache_index(sector_t block, unsigned int num_locks) argument
408 return dm_hash_locks_index(block, num_locks);
411 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) argument
414 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
416 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
419 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) argument
422 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
424 up_read(&bc->trees[cache_index(block, b
427 cache_write_lock(struct dm_buffer_cache *bc, sector_t block) argument
435 cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) argument
592 __cache_get(const struct rb_root *root, sector_t block) argument
615 cache_get(struct dm_buffer_cache *bc, sector_t block) argument
899 __find_next(struct rb_root *root, sector_t block) argument
1361 block_to_sector(struct dm_bufio_client *c, sector_t block) argument
1761 __bufio_new(struct dm_bufio_client *c, sector_t block, enum new_flag nf, int *need_submit, struct list_head *write_list) argument
1857 new_read(struct dm_bufio_client *c, sector_t block, enum new_flag nf, struct dm_buffer **bp, unsigned short ioprio) argument
1930 dm_bufio_get(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp) argument
1937 __dm_bufio_read(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp, unsigned short ioprio) argument
1946 dm_bufio_read(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp) argument
1953 dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp, unsigned short ioprio) argument
1960 dm_bufio_new(struct dm_bufio_client *c, sector_t block, struct dm_buffer **bp) argument
1970 __dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks, unsigned short ioprio) argument
2024 dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks) argument
2030 dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks, unsigned short ioprio) argument
2207 dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count) argument
2228 forget_buffer(struct dm_bufio_client *c, sector_t block) argument
2253 dm_bufio_forget(struct dm_bufio_client *c, sector_t block) argument
2266 dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks) argument
[all...]
H A Ddm-cache-policy-smq.c86 static struct entry *__get_entry(struct entry_space *es, unsigned int block) argument
90 e = es->begin + block;
102 static struct entry *to_entry(struct entry_space *es, unsigned int block) argument
104 if (block == INDEXER_NULL)
107 return __get_entry(es, block);
822 * active mappings. The hotspot queue uses a larger block size to
841 * block.
1596 // FIXME: what if this block has pending background work?
/linux-master/drivers/md/dm-vdo/
H A Dvdo.c46 #include "block-map.h"
266 * read_geometry_block() - Synchronously read the geometry block from a vdo's underlying block
275 char *block; local
278 result = vdo_allocate(VDO_BLOCK_SIZE, u8, __func__, &block);
283 block, &vio);
285 vdo_free(block);
294 result = vio_reset_bio(vio, block, NULL, REQ_OP_READ,
298 vdo_free(block);
308 vdo_free(block);
[all...]
H A Dslab-depot.c42 * get_lock() - Get the lock object for a slab journal block by sequence number.
44 * @sequence_number: Sequence number of the block.
84 * initialize_tail_block() - Initialize tail block as a new block.
85 * @journal: The journal whose tail block is being initialized.
110 * block_is_full() - Check whether a journal block is full.
111 * @journal: The slab journal for the block.
113 * Return: true if the tail block is full.
200 * compute_fullness_hint() - Translate a slab's free block count into a 'fullness hint' that can be
208 * one block i
263 finish_updating_slab_summary_block(struct slab_summary_block *block) argument
280 struct slab_summary_block *block = local
293 struct slab_summary_block *block = local
304 struct slab_summary_block *block = local
314 launch_write(struct slab_summary_block *block) argument
366 struct slab_summary_block *block = &allocator->summary_blocks[index]; local
662 sequence_number_t block; local
681 const struct packed_slab_journal_block *block = local
907 struct packed_slab_journal_block *block = journal->block; local
1070 struct reference_block *block = completion->parent; local
1125 get_reference_counters_for_block(struct reference_block *block) argument
1137 pack_reference_block(struct reference_block *block, void *buffer) argument
1156 struct reference_block *block = vio->completion.parent; local
1191 struct reference_block *block = container_of(waiter, struct reference_block, local
1267 dirty_block(struct reference_block *block) argument
1420 increment_for_data(struct vdo_slab *slab, struct reference_block *block, slab_block_number block_number, enum reference_status old_status, struct pbn_lock *lock, vdo_refcount_t *counter_ptr, bool adjust_block_count) argument
1468 decrement_for_data(struct vdo_slab *slab, struct reference_block *block, slab_block_number block_number, enum reference_status old_status, struct reference_updater *updater, vdo_refcount_t *counter_ptr, bool adjust_block_count) argument
1531 increment_for_block_map(struct vdo_slab *slab, struct reference_block *block, slab_block_number block_number, enum reference_status old_status, struct pbn_lock *lock, bool normal_operation, vdo_refcount_t *counter_ptr, bool adjust_block_count) argument
1591 update_reference_count(struct vdo_slab *slab, struct reference_block *block, slab_block_number block_number, const struct journal_point *slab_journal_point, struct reference_updater *updater, bool normal_operation, bool adjust_block_count, bool *provisional_decrement_ptr) argument
1634 struct reference_block *block; local
1947 struct reference_block *block; local
1984 struct reference_block *block = get_reference_block(slab, entry.sbn); local
2145 struct reference_block *block = get_reference_block(slab, block_number); local
2173 clear_provisional_references(struct reference_block *block) argument
2198 unpack_reference_block(struct packed_reference_block *packed, struct reference_block *block) argument
2242 struct reference_block *block = completion->parent; local
2257 struct reference_block *block = vio->completion.parent; local
2273 struct reference_block *block = local
2413 struct packed_slab_journal_block *block = (struct packed_slab_journal_block *) vio->data; local
2776 apply_block_entries(struct packed_slab_journal_block *block, journal_entry_count_t entry_count, sequence_number_t block_number, struct vdo_slab *slab) argument
2847 struct packed_slab_journal_block *block = local
3968 struct slab_summary_block *block = &allocator->summary_blocks[index]; local
[all...]
H A Dslab-depot.h28 * A slab_depot is responsible for managing all of the slabs and block allocators of a VDO. It has
51 * Represents the possible status of a block.
54 RS_FREE, /* this block is free */
55 RS_SINGLE, /* this block is singly-referenced */
56 RS_SHARED, /* this block is shared */
57 RS_PROVISIONAL /* this block is provisionally allocated */
79 /* Whether a tail block commit is pending */
88 /* The oldest block in the journal on disk */
90 /* The oldest block in the journal which may not be reaped */
94 /* The next journal block t
129 struct packed_slab_journal_block *block; member in struct:slab_journal
178 struct reference_block *block; member in struct:search_cursor
[all...]
H A Drecovery-journal.c15 #include "block-map.h"
31 * block write from overwriting a block which appears to still be a valid head block of the
129 * block.
131 * @sequence_number: The journal sequence number of the referenced block.
195 * pop_free_list() - Get a block from the end of the free list.
198 * Return: The block or NULL if the list is empty.
202 struct recovery_journal_block *block; local
207 block
222 is_block_dirty(const struct recovery_journal_block *block) argument
233 is_block_empty(const struct recovery_journal_block *block) argument
244 is_block_full(const struct recovery_journal_block *block) argument
280 struct recovery_journal_block *block = get_journal_block(&journal->active_tail_blocks); local
656 initialize_recovery_block(struct vdo *vdo, struct recovery_journal *journal, struct recovery_journal_block *block) argument
738 struct recovery_journal_block *block = &journal->blocks[i]; local
811 struct recovery_journal_block *block = &journal->blocks[i]; local
917 get_block_header(const struct recovery_journal_block *block) argument
927 set_active_sector(struct recovery_journal_block *block, void *sector) argument
945 struct recovery_journal_block *block; local
1043 schedule_block_write(struct recovery_journal *journal, struct recovery_journal_block *block) argument
1058 release_journal_block_reference(struct recovery_journal_block *block) argument
1087 struct recovery_journal_block *block = context; local
1143 recycle_journal_block(struct recovery_journal_block *block) argument
1211 struct recovery_journal_block *block; local
1236 struct recovery_journal_block *block, *tmp; local
1266 struct recovery_journal_block *block = completion->parent; local
1304 struct recovery_journal_block *block = completion->parent; local
1318 struct recovery_journal_block *block = vio->completion.parent; local
1328 add_queued_recovery_entries(struct recovery_journal_block *block) argument
1370 struct recovery_journal_block *block = local
1720 dump_recovery_block(const struct recovery_journal_block *block) argument
1736 const struct recovery_journal_block *block; local
[all...]
H A Dpacker.c37 * block.
39 * @compressed_block [in] The compressed block that was read from disk.
40 * @fragment_offset [out] The offset of the fragment within a compressed block.
47 struct compressed_block *block,
59 version = vdo_unpack_version_number(block->header.version);
67 compressed_size = __le16_to_cpu(block->header.sizes[slot]);
69 offset += __le16_to_cpu(block->header.sizes[i]);
135 * vdo_make_packer() - Make a new block packer.
189 * vdo_free_packer() - Free a block packer.
255 * @allocation: The allocation to which the compressed block wa
46 vdo_get_compressed_block_fragment(enum block_mapping_state mapping_state, struct compressed_block *block, u16 *fragment_offset, u16 *fragment_size) argument
368 initialize_compressed_block(struct compressed_block *block, u16 size) argument
390 pack_fragment(struct compression_state *compression, struct data_vio *data_vio, block_size_t offset, slot_number_t slot, struct compressed_block *block) argument
430 struct compressed_block *block; local
[all...]
H A Dpacker.h22 /* The header of a compressed block. */
27 /* List of unsigned 16-bit compressed block sizes, little-endian */
35 * A compressed block is only written if we can pack at least two fragments into it, so a
36 * fragment which fills the entire data portion of a compressed block is too big.
41 /* * The compressed block overlay. */
49 * block. The bins are kept in a ring sorted by the amount of unused space so the first bin with
55 * the agent's compressed block. The agent then writes out the compressed block. If the write is
57 * block and sends each on its way. Finally the agent itself continues on the write path as before.
68 /* The number of compressed block byte
[all...]
H A Dmemory-alloc.c118 static void add_vmalloc_block(struct vmalloc_block_info *block) argument
123 block->next = memory_stats.vmalloc_list;
124 memory_stats.vmalloc_list = block;
126 memory_stats.vmalloc_bytes += block->size;
133 struct vmalloc_block_info *block; local
139 (block = *block_ptr) != NULL;
140 block_ptr = &block->next) {
141 if (block->ptr == ptr) {
142 *block_ptr = block->next;
144 memory_stats.vmalloc_bytes -= block
246 struct vmalloc_block_info *block; local
[all...]
H A Derrors.c96 /* Get the error info for an error number. Also returns the name of the error block, if known. */
99 struct error_block *block; local
106 for (block = registered_errors.blocks;
107 block < registered_errors.blocks + registered_errors.count;
108 block++) {
109 if ((errnum >= block->base) && (errnum < block->last)) {
110 *info_ptr = block->infos + (errnum - block->base);
111 return block
275 struct error_block *block; local
[all...]
H A Dencodings.c109 * The current version for the data encoded in the super block. This must be changed any time there
129 /* This is the minimum size, if the super block contains no components. */
246 * @version: The geometry block version to decode.
295 * vdo_parse_geometry_block() - Decode and validate an encoded geometry block.
296 * @block: The encoded geometry block.
299 int __must_check vdo_parse_geometry_block(u8 *block, struct volume_geometry *geometry) argument
306 if (memcmp(block, VDO_GEOMETRY_MAGIC_NUMBER, VDO_GEOMETRY_MAGIC_NUMBER_SIZE) != 0)
310 vdo_decode_header(block, &offset, &header);
321 decode_volume_geometry(block,
771 vdo_decode_slab_journal_entry(struct packed_slab_journal_block *block, journal_entry_count_t entry_count) argument
[all...]
H A Dencodings.h99 /* The block offset to be applied to bios */
126 * The entry for each logical block in the block map is encoded into five bytes, which saves space
128 * physical_block_number_t (addressing 256 terabytes with a 4KB block size) and a 4-bit encoding of
133 * Bits 7..4: The four highest bits of the 36-bit physical block number
136 * The following 4 bytes are the low order bytes of the physical block number, in little-endian
197 /* The state of the recovery journal as encoded in the VDO super block. */
203 /* Number of block map pages allocated */
213 * single mapping in the block map tree, and the two locations of the block ma
[all...]
H A Ddedupe.c14 * deduplicate against a single block instead of being serialized through a PBN read lock. Only one
41 * Deduping requires holding a PBN lock on a block that is known to contain data identical to the
44 * new copy of the data to a full data block or a slot in a compressed block (WRITING).
48 * lock on the duplicate block (UNLOCKING), and if the agent is the last data_vio referencing the
224 /* The block hash covered by this lock */
232 * data block contents, linked by their hash_lock_node fields.
263 /* The PBN lock on the block containing the duplicate data */
290 /* The fields in the next block are all protected by the lock */
509 * set_duplicate_location() - Set the location of the duplicate block fo
[all...]
H A Ddata-vio.h17 #include "block-map.h"
62 /* A position in the arboreal block map at a specific level. */
68 /* Fields for using the arboreal block map. */
72 /* The block map tree for this LBN */
80 /* The block map tree slots for this LBN */
121 /* The compressed size of this block */
137 * The compressed block used to hold the compressed form of this block and that of any
140 struct compressed_block *block; member in struct:compression_state
145 /* The physical zone in which to allocate a physical block */
[all...]
H A Ddata-vio.c27 #include "block-map.h"
82 * for which a data_vio or discard permit are not available will block until the necessary
398 /* This data_vio is already set up to not block in the packer. */
413 * attempt_logical_block_lock() - Attempt to acquire the lock on a logical block.
446 result = VDO_ASSERT(lock_holder->logical.locked, "logical block lock held");
482 * launch_data_vio() - (Re)initialize a data_vio to have a new logical block number, keeping the
510 static bool is_zero_block(char *block) argument
515 if (*((u64 *) &block[i]))
541 memset(&data_vio->compression, 0, offsetof(struct compression_state, block));
550 * block
1447 struct compressed_block *block = data_vio->compression.block; local
[all...]
H A DMakefile10 block-map.o \

Completed in 274 milliseconds

1234567891011>>