Searched refs:sector (Results 51 - 75 of 182) sorted by relevance

12345678

/linux-master/drivers/md/
H A Draid10.h123 sector_t sector; /* virtual sector number */ member in struct:r10bio
H A Ddm-stripe.c62 * Parse a single <dev> <sector> pair
201 static void stripe_map_sector(struct stripe_c *sc, sector_t sector, argument
204 sector_t chunk = dm_target_offset(sc->ti, sector);
229 static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, argument
234 stripe_map_sector(sc, sector, &stripe, result);
239 sector = *result;
241 *result -= sector_div(sector, sc->chunk_size);
243 *result = sector & ~(sector_t)(sc->chunk_size - 1);
H A Ddm-integrity.c101 __le64 sector; member in union:journal_entry::__anon94
110 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
114 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
154 sector_t sector; member in struct:journal_node
404 * Xor the number with section and sector, so that if a piece of
546 io_loc.sector = ic->start;
579 sector_t sector, sector_t n_sectors, int mode)
584 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
586 sector,
597 bit = sector >> (i
578 block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, sector_t sector, sector_t n_sectors, int mode) argument
697 sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector) argument
724 unsigned int sector; local
1040 rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, unsigned int sector, unsigned int n_sectors, struct journal_completion *comp) argument
1091 unsigned int sector, n_sectors; local
1168 unsigned int sector, pl_index, pl_offset; local
1296 add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) argument
1331 find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) argument
1353 test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector) argument
1641 integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, const char *data, char *result) argument
1700 sector_t sector, logical_sector, area, offset; local
1788 sector_t sector; local
4601 unsigned int sector, pl_index, pl_offset; local
[all...]
H A Ddm-crypt.c85 sector_t sector; member in struct:dm_crypt_io
265 * plain: the initial vector is the 32-bit little-endian version of the sector
268 * plain64: the initial vector is the 64-bit little-endian version of the sector
271 * plain64be: the initial vector is the 64-bit big-endian version of the sector
274 * essiv: "encrypted sector|salt initial vector", the sector number is
288 * with an IV derived from the sector number, the data and
291 * of sector must be tweaked according to decrypted data.
302 * with an IV derived from initial key and the sector number.
303 * In addition, whitening value is applied on every sector, whitenin
645 __le64 sector = cpu_to_le64(dmreq->iv_sector); local
677 __le64 sector = cpu_to_le64(dmreq->iv_sector); local
1217 crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, struct bio *bio_out, struct bio *bio_in, sector_t sector) argument
1302 __le64 *sector; local
1405 __le64 *sector; local
1761 crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, struct bio *bio, sector_t sector) argument
1999 sector_t sector; local
2065 sector_t sector = io->sector; local
2096 sector_t sector = io->sector; local
[all...]
H A Draid10.c418 pr_err_ratelimited("md/raid10:%s: %pg: rescheduling sector %llu\n",
421 (unsigned long long)r10_bio->sector);
430 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
574 * raid10_find_phys finds the sector offset of a given virtual sector
578 * sector offset to a virtual address
584 sector_t sector; local
597 /* now calculate first sector/dev */
598 chunk = r10bio->sector >> geo->chunk_shift;
599 sector
659 raid10_find_virt(struct r10conf *conf, sector_t sector, int dev) argument
2567 r10_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, enum req_op op) argument
2770 sector_t sector; local
3384 sector_t sector, first_bad; local
3579 sector_t first_bad, sector; local
[all...]
H A Dmd-bitmap.c148 sector_t sector = mddev->bitmap_info.offset + offset + local
160 if (sync_page_io(rdev, sector, iosize, page, REQ_OP_READ, true))
1670 sector_t sector = 0; local
1674 while (sector < bitmap->mddev->resync_max_sectors) {
1675 md_bitmap_end_sync(bitmap, sector, &blocks, 0);
1676 sector += blocks;
1681 void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) argument
1688 if (sector == 0) {
1698 bitmap->mddev->curr_resync_completed = sector;
1700 sector
1716 sector_t sector, blocks = 0; local
1972 sector_t sector = 0; local
[all...]
H A Ddm.c86 sector_t sector; member in struct:clone_info
524 sector_t sector; local
527 sector = bio_end_sector(bio) - io->sector_offset;
529 sector = bio->bi_iter.bi_sector;
532 sector, dm_io_sectors(io, bio),
855 DMERR("Start sector is beyond the geometry limits.");
1165 * Return maximum size of I/O possible at the supplied sector up to the current
1174 static sector_t __max_io_len(struct dm_target *ti, sector_t sector, argument
1178 sector_t target_offset = dm_target_offset(ti, sector);
1194 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector) argument
1237 sector_t sector = pgoff * PAGE_SECTORS; local
1264 sector_t sector = pgoff * PAGE_SECTORS; local
1291 sector_t sector = pgoff * PAGE_SECTORS; local
[all...]
/linux-master/fs/hfsplus/
H A Dwrapper.c30 * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
37 * @data will return a pointer to the start of the requested sector,
40 * If @sector is not aligned to the bdev logical block size it will
48 int hfsplus_submit_bio(struct super_block *sb, sector_t sector, argument
59 * Align sector to hardware sector size and find offset. We
64 start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
66 sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
69 bio->bi_iter.bi_sector = sector;
234 * Block size must be at least as large as a sector an
[all...]
/linux-master/drivers/mtd/devices/
H A Ddocg3.c416 * @sector: the sector
418 static void doc_setup_addr_sector(struct docg3 *docg3, int sector) argument
421 doc_flash_address(docg3, sector & 0xff);
422 doc_flash_address(docg3, (sector >> 8) & 0xff);
423 doc_flash_address(docg3, (sector >> 16) & 0xff);
430 * @sector: the sector
433 static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs) argument
438 doc_flash_address(docg3, sector
459 int sector, ret = 0; local
508 int ret = 0, sector; local
835 uint sector, pages_biblock; local
1144 int ret, sector; local
[all...]
/linux-master/arch/xtensa/platforms/iss/
H A Dsimdisk.c70 static void simdisk_transfer(struct simdisk *dev, unsigned long sector, argument
73 unsigned long offset = sector << SECTOR_SHIFT;
108 sector_t sector = bio->bi_iter.bi_sector; local
114 simdisk_transfer(dev, sector, len, buffer,
116 sector += len;
/linux-master/drivers/nvme/host/
H A Dzns.c177 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, argument
200 sector &= ~(ns->head->zsze - 1);
201 while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
204 c.zmr.slba = cpu_to_le64(nvme_sect_to_lba(ns->head, sector));
225 sector += ns->head->zsze * nz;
/linux-master/drivers/mtd/
H A Dftl.c749 u_long sector, u_long nblocks)
757 part, sector, nblocks);
765 if (((sector+i) * SECTOR_SIZE) >= le32_to_cpu(part->header.FormattedSize)) {
769 log_addr = part->VirtualBlockMap[sector+i];
863 u_long sector, u_long nblocks)
871 part, sector, nblocks);
885 virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
922 old_addr = part->VirtualBlockMap[sector+i];
924 part->VirtualBlockMap[sector+i] = 0xffffffff;
933 part->VirtualBlockMap[sector
748 ftl_read(partition_t *part, caddr_t buffer, u_long sector, u_long nblocks) argument
862 ftl_write(partition_t *part, caddr_t buffer, u_long sector, u_long nblocks) argument
969 ftl_discardsect(struct mtd_blktrans_dev *dev, unsigned long sector, unsigned nr_sects) argument
[all...]
/linux-master/include/linux/
H A Dblkdev.h210 * Independent sector access ranges. This is always NULL for
330 int blkdev_report_zones(struct block_device *bdev, sector_t sector,
345 * and must include all sectors within the disk capacity (no sector holes
353 sector_t sector; member in struct:blk_independent_access_range
629 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) argument
633 return sector >> ilog2(disk->queue->limits.chunk_sectors);
636 static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) argument
642 return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap);
677 static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) argument
681 static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) argument
1327 bdev_offset_from_zone_start(struct block_device *bdev, sector_t sector) argument
1333 bdev_is_zone_start(struct block_device *bdev, sector_t sector) argument
[all...]
H A Dbadblocks.h23 * 54 bits are sector number, 9 bits are extent size,
41 sector_t sector; member in struct:badblocks
/linux-master/drivers/nvdimm/
H A Dpmem.c59 static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector) argument
61 return (sector << SECTOR_SHIFT) + pmem->data_offset;
89 static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks) argument
93 badblocks_clear(&pmem->bb, sector, blks);
167 sector_t sector, unsigned int len)
170 phys_addr_t pmem_off = to_offset(pmem, sector);
173 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
183 sector_t sector, unsigned int len)
185 phys_addr_t pmem_off = to_offset(pmem, sector);
188 if (unlikely(is_bad_pmem(&pmem->bb, sector, le
165 pmem_do_read(struct pmem_device *pmem, struct page *page, unsigned int page_off, sector_t sector, unsigned int len) argument
181 pmem_do_write(struct pmem_device *pmem, struct page *page, unsigned int page_off, sector_t sector, unsigned int len) argument
248 sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT; local
[all...]
H A Dclaim.c258 sector_t sector = offset >> 9; local
270 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
277 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
289 badblocks_clear(&nsio->bb, sector, cleared);
/linux-master/drivers/block/
H A Dvirtio_blk.c103 __virtio64 sector; member in struct:virtblk_req::__anon346::__anon347
185 range[0].sector = cpu_to_le64(blk_rq_pos(req));
189 u64 sector = bio->bi_iter.bi_sector; local
194 range[n].sector = cpu_to_le64(sector);
246 u64 sector = 0; local
257 sector = blk_rq_pos(req);
261 sector = blk_rq_pos(req);
278 sector = blk_rq_pos(req);
282 sector
562 virtblk_submit_zone_report(struct virtio_blk *vblk, char *report_buf, size_t report_len, sector_t sector) argument
662 virtblk_report_zones(struct gendisk *disk, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) argument
[all...]
/linux-master/drivers/target/
H A Dtarget_core_sbc.c224 * Use 8-bit sector value. SBC-3 says:
913 * Currently enforce COMPARE_AND_WRITE for a single sector
1167 sector_t sector = cmd->t_task_lba; local
1211 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
1214 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
1217 "WRITE" : "READ", (unsigned long long)sector,
1221 sector++;
1231 __u16 crc, sector_t sector, unsigned int ei_lba)
1241 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
1242 " csum 0x%04x\n", (unsigned long long)sector,
1230 sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt, __u16 crc, sector_t sector, unsigned int ei_lba) argument
1322 sector_t sector = start; local
[all...]
/linux-master/fs/bcachefs/
H A Djournal_io.h12 u64 sector; member in struct:journal_ptr
/linux-master/drivers/scsi/
H A Dsd.h37 * Number of sectors at the end of the device to avoid multi-sector
224 static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector) argument
226 return sector >> (ilog2(sdev->sector_size) - 9);
255 int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
H A Dsd_zbc.c248 * @sector: Start sector.
256 int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, argument
260 sector_t lba = sectors_to_logical(sdkp->device, sector);
334 sector_t sector = blk_rq_pos(rq); local
343 if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
481 sector_t sector = blk_rq_pos(rq); local
483 sector_t block = sectors_to_logical(sdkp->device, sector);
/linux-master/drivers/mtd/nand/raw/atmel/
H A Dpmecc.c105 #define ATMEL_PMECC_ECC(sector, n) \
106 ((((sector) + 1) * 0x40) + (n))
108 #define ATMEL_PMECC_REM(sector, n) \
109 ((((sector) + 1) * 0x40) + ((n) * 4) + 0x200)
429 static void atmel_pmecc_gen_syndrome(struct atmel_pmecc_user *user, int sector) argument
438 ATMEL_PMECC_REM(sector, i / 2));
690 int atmel_pmecc_correct_sector(struct atmel_pmecc_user *user, int sector, argument
698 if (!(user->isr & BIT(sector)))
701 atmel_pmecc_gen_syndrome(user, sector);
753 int sector, voi
752 atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user, int sector, void *ecc) argument
[all...]
/linux-master/drivers/block/drbd/
H A Ddrbd_int.h504 u64 md_offset; /* sector offset to 'super' block */
513 s32 al_offset; /* signed relative sector offset to activity log */
514 s32 bm_offset; /* signed relative sector offset to bitmap */
837 /* where does the admin want us to start? (sector) */
840 /* where are we now? (sector) */
842 /* Start sector of out of sync range (to merge printk reporting). */
1036 sector_t sector, int blksize, u64 block_id);
1042 sector_t sector, int size, u64 block_id);
1043 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1046 extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, in
[all...]
/linux-master/fs/exfat/
H A Dballoc.c35 sector_t sector; local
58 sector = exfat_cluster_to_sector(sbi, sbi->map_clu);
60 sbi->vol_amap[i] = sb_bread(sb, sector + i);
/linux-master/include/uapi/linux/
H A Dblktrace_api.h105 __u64 sector; /* disk offset */ member in struct:blk_io_trace

Completed in 653 milliseconds

12345678