Searched refs:sectors (Results 1 - 25 of 210) sorted by relevance

123456789

/linux-master/fs/bcachefs/
H A Dfs-io.h58 u64 sectors; member in struct:quota_res
67 BUG_ON(res->sectors > inode->ei_quota_reserved);
70 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
71 inode->ei_quota_reserved -= res->sectors;
72 res->sectors = 0;
79 if (res->sectors) {
89 u64 sectors,
98 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
101 inode->ei_quota_reserved += sectors;
102 res->sectors
86 bch2_quota_reservation_add(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *res, u64 sectors, bool check_enospc) argument
119 bch2_quota_reservation_add(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *res, unsigned sectors, bool check_enospc) argument
133 bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, struct quota_res *quota_res, s64 sectors) argument
[all...]
H A Dec_types.h14 u16 sectors; member in struct:stripe
22 u16 sectors; member in struct:gc_stripe
H A Dec_format.h7 __le16 sectors; member in struct:bch_stripe
H A Dclock.h12 static inline void bch2_increment_clock(struct bch_fs *c, unsigned sectors, argument
17 if (unlikely(this_cpu_add_return(*clock->pcpu_buf, sectors) >=
H A Dmove_types.h26 unsigned sectors; member in struct:move_bucket
H A Dbuckets.h156 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p) argument
158 EBUG_ON(sectors < 0);
161 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
163 : sectors;
403 if (res->sectors) {
404 this_cpu_sub(*c->online_reserved, res->sectors);
405 res->sectors = 0;
416 u64 sectors, int flags)
423 if (sectors > old)
424 return __bch2_disk_reservation_add(c, res, sectors, flag
[all...]
H A Dbuckets.c30 s64 sectors)
34 fs_usage->btree += sectors;
38 fs_usage->data += sectors;
41 fs_usage->cached += sectors;
280 prt_str(out, "sectors");
291 prt_u64(out, usage->d[i].sectors);
320 u->d[old->data_type].sectors -= bch2_bucket_sectors_dirty(*old);
321 u->d[new->data_type].sectors += bch2_bucket_sectors_dirty(*new);
323 u->d[BCH_DATA_cached].sectors += new->cached_sectors;
324 u->d[BCH_DATA_cached].sectors
28 fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage, enum bch_data_type data_type, s64 sectors) argument
352 __update_replicas(struct bch_fs *c, struct bch_fs_usage *fs_usage, struct bch_replicas_entry_v1 *r, s64 sectors) argument
367 bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k, struct bch_replicas_entry_v1 *r, s64 sectors, unsigned journal_seq, bool gc) argument
407 update_cached_sectors(struct bch_fs *c, struct bkey_s_c k, unsigned dev, s64 sectors, unsigned journal_seq, bool gc) argument
462 bch2_update_replicas_list(struct btree_trans *trans, struct bch_replicas_entry_v1 *r, s64 sectors) argument
490 bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64 sectors) argument
499 bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca, size_t b, enum bch_data_type data_type, unsigned sectors, struct gc_pos pos, unsigned flags) argument
553 bch2_check_bucket_ref(struct btree_trans *trans, struct bkey_s_c k, const struct bch_extent_ptr *ptr, s64 sectors, enum bch_data_type ptr_data_type, u8 b_gen, u8 bucket_data_type, u32 bucket_sectors) argument
791 __mark_pointer(struct btree_trans *trans, struct bkey_s_c k, const struct bch_extent_ptr *ptr, s64 sectors, enum bch_data_type ptr_data_type, u8 bucket_gen, u8 *bucket_data_type, u32 *dirty_sectors, u32 *cached_sectors) argument
817 bch2_trigger_pointer(struct btree_trans *trans, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry, s64 *sectors, unsigned flags) argument
885 bch2_trigger_stripe_ptr(struct btree_trans *trans, struct bkey_s_c k, struct extent_ptr_decoded p, enum bch_data_type data_type, s64 sectors, unsigned flags) argument
1079 s64 sectors = (s64) k.k->size * replicas; local
1122 __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca, size_t b, enum bch_data_type type, unsigned sectors) argument
1166 bch2_trans_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca, size_t b, enum bch_data_type type, unsigned sectors) argument
1183 unsigned sectors = local
1272 __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, u64 sectors, int flags) argument
[all...]
H A Dbuckets_types.h38 u64 sectors; /* _compressed_ sectors: */ member in struct:bch_dev_usage::__anon14
42 * sectors?
58 /* all fields are in units of 512 byte sectors: */
80 u64 sectors; member in struct:disk_reservation
H A Dmovinggc.c34 size_t sectors; member in struct:buckets_in_flight
68 list->sectors += b.sectors;
94 b->sectors = bch2_bucket_sectors_dirty(*a);
123 list->sectors -= i->bucket.sectors;
149 size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0; local
182 sectors += b.sectors;
191 buckets_in_flight->nr, buckets_in_flight->sectors,
[all...]
H A Dalloc_foreground.h171 * Append pointers to the space we just allocated to @k, and mark @sectors space
176 struct bkey_i *k, unsigned sectors,
182 BUG_ON(sectors > wp->sectors_free);
183 wp->sectors_free -= sectors;
184 wp->sectors_allocated += sectors;
196 BUG_ON(sectors > ob->sectors_free);
197 ob->sectors_free -= sectors;
175 bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp, struct bkey_i *k, unsigned sectors, bool cached) argument
H A Dfs-io-pagecache.c160 unsigned i, sectors = folio_sectors(folio); local
162 BUG_ON(pg_offset >= sectors);
163 BUG_ON(pg_offset + pg_len > sectors);
172 if (i == sectors)
387 unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0; local
393 for (i = 0; i < sectors; i++)
407 for (i = 0; i < sectors; i++)
456 .sectors = disk_sectors
460 res->disk.sectors -= disk_sectors;
474 int i, sectors local
516 unsigned sectors = sectors_to_reserve(&s->s[i], local
657 unsigned i, sectors = folio_sectors(folio); local
728 unsigned i, sectors; local
[all...]
H A Dec.h28 return DIV_ROUND_UP(le16_to_cpu(s->sectors),
98 unsigned sectors)
103 data_ptr->offset < stripe_ptr->offset + sectors;
117 le16_to_cpu(s->sectors));
131 m->sectors);
96 __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr, const struct bch_extent_ptr *data_ptr, unsigned sectors) argument
/linux-master/block/
H A Dbadblocks.c537 sector_t sectors = bad->len; local
542 ((s + sectors) >= BB_OFFSET(p[behind])) &&
552 * sectors from bad->len.
557 sector_t sectors = bad->len; local
563 WARN_ON((s + sectors) < BB_OFFSET(p[behind]));
594 * (from bad table) indexed by 'prev'. The return value is sectors
599 sector_t sectors = bad->len; local
607 merged = min_t(sector_t, sectors, BB_END(p[prev]) - s);
609 merged = min_t(sector_t, sectors, BB_MAX_LEN - BB_LEN(p[prev]));
859 static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, argument
1076 sector_t sectors = bad->len; local
1122 sector_t sectors = bad->len; local
1134 _badblocks_clear(struct badblocks *bb, sector_t s, int sectors) argument
1274 _badblocks_check(struct badblocks *bb, sector_t s, int sectors, sector_t *first_bad, int *bad_sectors) argument
1407 badblocks_check(struct badblocks *bb, sector_t s, int sectors, sector_t *first_bad, int *bad_sectors) argument
1429 badblocks_set(struct badblocks *bb, sector_t s, int sectors, int acknowledged) argument
1450 badblocks_clear(struct badblocks *bb, sector_t s, int sectors) argument
[all...]
/linux-master/include/linux/
H A Dblk-integrity.h84 * @sectors: Size of the bio in 512-byte sectors
87 * sectors but integrity metadata is done in terms of the data integrity
88 * interval size of the storage device. Convert the block layer sectors
92 unsigned int sectors)
94 return sectors >> (bi->interval_exp - 9);
98 unsigned int sectors)
100 return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
165 unsigned int sectors)
171 unsigned int sectors)
91 bio_integrity_intervals(struct blk_integrity *bi, unsigned int sectors) argument
97 bio_integrity_bytes(struct blk_integrity *bi, unsigned int sectors) argument
164 bio_integrity_intervals(struct blk_integrity *bi, unsigned int sectors) argument
170 bio_integrity_bytes(struct blk_integrity *bi, unsigned int sectors) argument
[all...]
H A Dbadblocks.h35 int shift; /* shift from sectors to block size
42 sector_t size; /* in sectors */
51 int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
53 int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
55 int badblocks_clear(struct badblocks *bb, sector_t s, int sectors);
/linux-master/drivers/scsi/
H A Dscsicam.c50 * scsi_partsize - Parse cylinders/heads/sectors from PC partition table
52 * @capacity: size of the disk in sectors
53 * @geom: output in form of [hds, cylinders, sectors]
170 * minimizes the number of sectors that will be unused at the end
179 unsigned long heads, sectors, cylinders, temp; local
182 sectors = 62L; /* Maximize sectors per track */
184 temp = cylinders * sectors; /* Compute divisor for heads */
188 temp = cylinders * heads; /* Compute divisor for sectors */
189 sectors
[all...]
/linux-master/drivers/target/
H A Dtarget_core_sbc.c216 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) argument
218 return cmd->se_dev->dev_attrib.block_size * sectors;
279 unsigned int sectors = sbc_get_write_same_sectors(cmd); local
288 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
289 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
290 sectors, cmd->se_dev->dev_attrib.max_write_same_len);
296 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
297 ((cmd->t_task_lba + sectors) > end_lba)) {
298 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
299 (unsigned long long)end_lba, cmd->t_task_lba, sectors);
667 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, u32 sectors, bool is_write) argument
773 u32 sectors = 0; local
1270 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, struct scatterlist *sg, int sg_off) argument
1316 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, unsigned int ei_lba, struct scatterlist *psg, int psg_off) argument
[all...]
/linux-master/drivers/md/
H A Draid0.c66 sector_t curr_zone_end, sectors; local
83 sectors = rdev1->sectors;
84 sector_div(sectors, mddev->chunk_sectors);
85 rdev1->sectors = sectors * mddev->chunk_sectors;
95 (unsigned long long)rdev1->sectors,
97 (unsigned long long)rdev2->sectors);
103 if (rdev2->sectors == rdev1->sectors) {
353 raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) argument
595 unsigned sectors; local
[all...]
H A Draid10.c345 r10_bio->devs[slot].addr + (r10_bio->sectors);
431 r10_bio->sectors,
532 r10_bio->sectors) &&
729 int sectors = r10_bio->sectors; local
749 if (raid1_should_read_first(conf->mddev, this_sector, sectors))
764 r10_bio->devs[slot].addr + sectors >
771 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
775 if (is_badblock(rdev, dev_sector, sectors,
786 if (!do_balance && sectors > bad_sector
1122 regular_request_wait(struct mddev *mddev, struct r10conf *conf, struct bio *bio, sector_t sectors) argument
1350 sector_t sectors; local
1506 __make_request(struct mddev *mddev, struct bio *bio, int sectors) argument
1833 int sectors = bio_sectors(bio); local
2358 int sectors = r10_bio->sectors; local
2456 int sectors = r10_bio->sectors; local
2567 r10_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, enum req_op op) argument
2599 int sectors = r10_bio->sectors, slot = r10_bio->read_slot; local
2771 int sectors; local
3763 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) argument
4180 raid10_resize(struct mddev *mddev, sector_t sectors) argument
4952 int sectors = r10_bio->sectors; local
[all...]
H A Draid1.c63 sector_t hi = lo + r1_bio->sectors;
316 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
340 r1_bio->sector + (r1_bio->sectors);
422 r1_bio->sectors,
454 sector_t hi = r1_bio->sector + r1_bio->sectors;
513 if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) &&
538 pr_debug("raid1: behind end write sectors"
561 sector_t sectors)
565 WARN_ON(sectors == 0);
567 * len is the number of sectors fro
560 align_to_barrier_unit_end(sector_t start_sector, sector_t sectors) argument
1671 sector_t sectors; local
2088 r1_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, blk_opf_t rw) argument
2125 int sectors = r1_bio->sectors; local
2380 int sectors = r1_bio->sectors; local
2487 int sectors; local
3056 raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) argument
3307 raid1_resize(struct mddev *mddev, sector_t sectors) argument
[all...]
/linux-master/drivers/mtd/
H A Dssfdc.c22 unsigned char sectors; member in struct:ssfdcr_record
317 ssfdc->sectors = 32;
318 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors);
320 ((long)ssfdc->sectors * (long)ssfdc->heads));
323 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
325 (long)ssfdc->sectors);
328 (long)ssfdc->sectors;
411 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
414 geo->sectors = ssfdc->sectors;
[all...]
/linux-master/arch/powerpc/include/asm/
H A Dps3stor.h54 u64 start_sector, u64 sectors,
/linux-master/include/linux/mtd/
H A Dnand-ecc-mtk.h32 u32 sectors; member in struct:mtk_ecc_config
/linux-master/drivers/md/bcache/
H A Dstats.h60 int sectors);
/linux-master/drivers/scsi/aic7xxx/
H A Daiclib.h122 aic_sector_div(sector_t capacity, int heads, int sectors) argument
125 sector_div(capacity, (heads * sectors));

Completed in 524 milliseconds

123456789