Searched refs:nr_sectors (Results 1 - 25 of 38) sorted by relevance

12

/linux-master/drivers/md/bcache/
H A Dwriteback.h80 unsigned int nr_sectors)
91 if (nr_sectors <= dc->disk.stripe_size)
94 nr_sectors -= dc->disk.stripe_size;
149 uint64_t offset, int nr_sectors);
78 bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned int nr_sectors) argument
H A Dwriteback.c597 uint64_t offset, int nr_sectors)
611 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
615 while (nr_sectors) {
616 int s = min_t(unsigned int, abs(nr_sectors),
619 if (nr_sectors < 0)
635 nr_sectors -= s;
596 bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, uint64_t offset, int nr_sectors) argument
/linux-master/block/
H A Dblk-ia-ranges.c25 return sprintf(buf, "%llu\n", iar->nr_sectors);
39 .attr = { .name = "nr_sectors", .mode = 0444 },
186 sector < iar->sector + iar->nr_sectors)
219 swap(iar->nr_sectors, tmp->nr_sectors);
222 sector += iar->nr_sectors;
247 new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors)
H A Dblk-zoned.c237 * @nr_sectors: Number of sectors, should be at least the length of one zone and
242 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
248 sector_t sector, sector_t nr_sectors)
253 sector_t end_sector = sector + nr_sectors;
274 if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity)
283 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
361 if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
362 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
367 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
419 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors);
247 blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, sector_t sector, sector_t nr_sectors) argument
[all...]
H A Dblk-core.c548 unsigned int nr_sectors = bio_sectors(bio); local
550 if (nr_sectors &&
551 (nr_sectors > maxsector ||
552 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
554 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
556 bio->bi_iter.bi_sector, nr_sectors, maxsector);
587 int nr_sectors = bio_sectors(bio); local
603 if (nr_sectors > q->limits.chunk_sectors)
607 if (nr_sectors > q->limits.max_zone_append_sectors)
/linux-master/drivers/block/null_blk/
H A Dnull_blk.h127 sector_t nr_sectors);
129 sector_t sector, unsigned int nr_sectors);
138 sector_t sector, sector_t nr_sectors);
156 enum req_op op, sector_t sector, sector_t nr_sectors)
155 null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) argument
H A Dzoned.c231 unsigned int nr_sectors = len >> SECTOR_SHIFT; local
235 sector + nr_sectors <= zone->wp)
360 unsigned int nr_sectors, bool append)
372 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
399 if (zone->wp + nr_sectors > zone->start + zone->capacity) {
426 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
430 zone->wp += nr_sectors;
655 sector_t sector, sector_t nr_sectors)
663 return null_zone_write(cmd, sector, nr_sectors, false);
665 return null_zone_write(cmd, sector, nr_sectors, tru
359 null_zone_write(struct nullb_cmd *cmd, sector_t sector, unsigned int nr_sectors, bool append) argument
654 null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) argument
[all...]
H A Dmain.c1134 sector_t sector, sector_t nr_sectors)
1137 size_t n = nr_sectors << SECTOR_SHIFT;
1255 sector_t nr_sectors)
1261 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1270 sector_t nr_sectors)
1275 return null_handle_discard(dev, sector, nr_sectors);
1321 sector_t sector, unsigned int nr_sectors)
1327 ret = null_handle_badblocks(cmd, sector, nr_sectors);
1333 return null_handle_memory_backed(cmd, op, sector, nr_sectors);
1339 sector_t nr_sectors, enu
1133 null_handle_discard(struct nullb_device *dev, sector_t sector, sector_t nr_sectors) argument
1253 null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector, sector_t nr_sectors) argument
1267 null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) argument
1320 null_process_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, unsigned int nr_sectors) argument
1338 null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, sector_t nr_sectors, enum req_op op) argument
1555 sector_t nr_sectors = blk_rq_sectors(rq); local
[all...]
/linux-master/fs/zonefs/
H A Dtrace.h31 __field(sector_t, nr_sectors)
39 __entry->nr_sectors = z->z_size >> SECTOR_SHIFT;
41 TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu",
44 __entry->nr_sectors
/linux-master/fs/btrfs/
H A Dscrub.c122 u16 nr_sectors; member in struct:scrub_stripe
256 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
268 stripe->sectors = kcalloc(stripe->nr_sectors,
699 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
719 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
757 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
768 for (i = 0; i < stripe->nr_sectors; i++) {
773 ASSERT(i < stripe->nr_sectors);
792 ASSERT(sector_nr < stripe->nr_sectors);
828 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
1656 unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start + local
1728 unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start + local
[all...]
H A Draid56.h71 u16 nr_sectors; member in struct:btrfs_raid_bio
H A Draid56.c175 for (i = 0; i < rbio->nr_sectors; i++) {
246 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
881 ASSERT(index >= 0 && index < rbio->nr_sectors);
955 rbio->nr_sectors = num_sectors;
1272 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1278 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1313 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1404 for (i = 0; i < rbio->nr_sectors; i++) {
1444 for (i = 0; i < rbio->nr_sectors; i++) {
1454 ASSERT(i < rbio->nr_sectors);
[all...]
H A Dzoned.c365 sector_t nr_sectors; local
422 nr_sectors = bdev_nr_sectors(bdev);
424 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors);
425 if (!IS_ALIGNED(nr_sectors, zone_sectors))
482 while (sector < nr_sectors) {
870 sector_t nr_sectors; local
884 nr_sectors = bdev_nr_sectors(bdev);
885 nr_zones = nr_sectors >> zone_sectors_shift;
1005 sector_t nr_sectors; local
1013 nr_sectors
[all...]
/linux-master/include/uapi/linux/
H A Dblkzoned.h141 * @nr_sectors: Total number of sectors of all zones to operate on.
145 __u64 nr_sectors; member in struct:blk_zone_range
H A Dublk_cmd.h285 __u32 nr_sectors; member in union:ublksrv_io_desc::__anon2826
/linux-master/drivers/md/
H A Ddm-zone.c359 unsigned int nr_sectors; member in struct:orig_bio_details
425 unsigned int nr_sectors)
444 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
451 if (nr_sectors != orig_bio_details->nr_sectors) {
455 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors);
534 orig_bio_details.nr_sectors = bio_sectors(clone);
423 dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno, struct orig_bio_details *orig_bio_details, unsigned int nr_sectors) argument
H A Dmd.h622 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) argument
624 atomic_add(nr_sectors, &bdev->bd_disk->sync_io);
627 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) argument
629 md_sync_acct(bio->bi_bdev, nr_sectors);
H A Ddm-log-writes.c72 * [ 1 sector ][ entry->nr_sectors ]
91 * nr_sectors - the number of sectors we wrote.
98 __le64 nr_sectors; member in struct:log_write_entry
127 sector_t nr_sectors; member in struct:pending_block
327 entry.nr_sectors = cpu_to_le64(block->nr_sectors);
451 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors);
704 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio));
H A Ddm-zoned-target.c631 unsigned int nr_sectors = bio_sectors(bio); local
640 bio_op(bio), (unsigned long long)sector, nr_sectors,
645 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
649 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
659 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
669 if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd))
/linux-master/block/partitions/
H A Dibm.c236 sector_t nr_sectors,
251 * 'size based on geo == size based on nr_sectors' is true, then
258 size = nr_sectors;
267 /* else keep size based on nr_sectors */
332 sector_t nr_sectors; local
347 nr_sectors = bdev_nr_sectors(bdev);
348 if (nr_sectors == 0)
376 label, labelsect, nr_sectors,
395 size = nr_sectors;
230 find_lnx1_partitions(struct parsed_partitions *state, struct hd_geometry *geo, int blocksize, char name[], union label_t *label, sector_t labelsect, sector_t nr_sectors, dasd_information2_t *info) argument
/linux-master/include/xen/interface/io/
H A Dblkif.h127 * sector index to begin discard operations at and nr_sectors as the number of
214 uint64_t nr_sectors; member in struct:blkif_request_discard
/linux-master/drivers/block/xen-blkback/
H A Dcommon.h93 uint64_t nr_sectors; member in struct:blkif_x86_32_request_discard
147 uint64_t nr_sectors; member in struct:blkif_x86_64_request_discard
H A Dblkback.c978 preq.nr_sects = req->u.discard.nr_sectors;
993 req->u.discard.nr_sectors, GFP_KERNEL);
996 req->u.discard.nr_sectors, GFP_KERNEL);
1101 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
1154 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
/linux-master/drivers/block/drbd/
H A Ddrbd_actlog.c854 sector_t esector, nr_sectors; local
870 nr_sectors = get_capacity(device->vdisk);
873 if (!expect(device, sector < nr_sectors))
875 if (!expect(device, esector < nr_sectors))
876 esector = nr_sectors - 1;
878 lbnr = BM_SECT_TO_BIT(nr_sectors-1);
885 if (unlikely(esector == (nr_sectors-1)))
/linux-master/fs/fat/
H A Dinode.c68 unsigned nr_sectors; member in struct:fat_floppy_defaults
75 .nr_sectors = 160 * KB_IN_SECTORS,
82 .nr_sectors = 180 * KB_IN_SECTORS,
89 .nr_sectors = 320 * KB_IN_SECTORS,
96 .nr_sectors = 360 * KB_IN_SECTORS,
1571 if (floppy_defaults[i].nr_sectors == bd_sects) {
1595 bpb->fat_sectors = fdefaults->nr_sectors;

Completed in 224 milliseconds

12