/linux-master/fs/btrfs/ |
H A D | extent_io.c | 167 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) argument 170 unsigned long end_index = end >> PAGE_SHIFT; 184 unsigned long page_ops, u64 start, u64 end) 189 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); 190 len = end + 1 - start; 206 struct page *locked_page, u64 start, u64 end, 211 pgoff_t end_index = end >> PAGE_SHIFT; 226 page_ops, start, end); 235 u64 start, u64 end) 182 process_one_page(struct btrfs_fs_info *fs_info, struct page *page, struct page *locked_page, unsigned long page_ops, u64 start, u64 end) argument 205 __process_pages_contig(struct address_space *mapping, struct page *locked_page, u64 start, u64 end, unsigned long page_ops) argument 233 __unlock_for_delalloc(struct inode *inode, struct page *locked_page, u64 start, u64 end) argument 248 lock_delalloc_pages(struct inode *inode, struct page *locked_page, u64 start, u64 end) argument 321 find_lock_delalloc_range(struct inode *inode, struct page *locked_page, u64 *start, u64 *end) argument 414 extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, struct page *locked_page, u32 clear_bits, unsigned long page_ops) argument 510 u64 end; member in struct:processed_extent 525 endio_readpage_release_extent(struct processed_extent *processed, struct btrfs_inode *inode, u64 start, u64 end, bool uptodate) argument 606 u64 end; local 998 const u64 end = start + PAGE_SIZE - 1; local 1147 u64 end = start + PAGE_SIZE - 1; local 1165 contiguous_readpages(struct page *pages[], int nr_pages, u64 start, u64 end, struct extent_map **em_cached, struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start) argument 1264 find_next_dirty_byte(struct btrfs_fs_info *fs_info, struct page *page, u64 *start, u64 *end) argument 1318 u64 end = cur + PAGE_SIZE - 1; local 1666 unsigned long end; local 1915 pgoff_t end; /* Inclusive */ local 2043 pgoff_t end; /* Inclusive */ local 2189 extent_write_locked_range(struct inode *inode, struct page *locked_page, u64 start, u64 end, struct writeback_control *wbc, bool pages_dirty) argument 2301 u64 end = start + folio_size(folio) - 1; local 2332 u64 end = start + PAGE_SIZE - 1; local 2370 u64 end = start + PAGE_SIZE - 1; local 2867 fiemap_process_hole(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo, struct fiemap_cache *cache, struct extent_state **delalloc_cached_state, struct btrfs_backref_share_check_ctx *backref_ctx, u64 disk_bytenr, u64 extent_offset, u64 extent_gen, u64 start, u64 end) argument 4963 const u64 end = page_offset(page) + PAGE_SIZE; local [all...] |
H A D | extent-tree.c | 797 unsigned long end; local 883 end = (unsigned long)ei + item_size; 887 BUG_ON(ptr > end); 896 while (ptr < end) { 949 if (unlikely(ptr > end)) { 1001 unsigned long end; local 1024 end = (unsigned long)ei + btrfs_item_size(leaf, path->slots[0]); 1025 if (ptr < end - size) 1027 end - size - ptr); 1094 unsigned long end; local 1257 u64 bytes_left, end; local 1373 u64 end = bytenr + num_bytes; local 2775 unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end, const bool return_free_space) argument 2881 u64 end; local 2965 unsigned long end; local 6194 btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end) argument 6221 u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0; local 6325 u64 end; local [all...] |
/linux-master/tools/testing/selftests/timers/ |
H A D | posix_timers.c | 62 static int check_diff(struct timeval start, struct timeval end) argument 66 diff = end.tv_usec - start.tv_usec; 67 diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC; 81 struct timeval start, end; local 123 err = gettimeofday(&end, NULL); 129 ksft_test_result(check_diff(start, end) == 0, "%s\n", name); 139 struct timeval start, end; local 175 err = gettimeofday(&end, NULL); 181 ksft_test_result(check_diff(start, end) == 0,
|
/linux-master/fs/bcachefs/ |
H A D | fs-io.c | 191 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync) argument 197 ret = file_write_and_wait_range(file, start, end); 212 struct bpos end) 225 for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents, start, end, 0, k, ret) 241 pgoff_t index, loff_t start, loff_t end) 257 * XXX: we're doing two index lookups when we end up reading the 275 BUG_ON(end <= folio_pos(folio)); 278 end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio); 361 loff_t start, loff_t end) 364 start, end); 210 range_has_data(struct bch_fs *c, u32 subvol, struct bpos start, struct bpos end) argument 240 __bch2_truncate_folio(struct bch_inode_info *inode, pgoff_t index, loff_t start, loff_t end) argument 360 bch2_truncate_folios(struct bch_inode_info *inode, loff_t start, loff_t end) argument 511 u64 end = offset + len; local 708 u64 end = offset + len; local 801 quota_reserve_range(struct bch_inode_info *inode, struct quota_res *res, u64 start, u64 end) argument [all...] |
H A D | fs-io-direct.c | 108 * end: 244 u64 end = offset + size; local 258 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
|
H A D | ec.c | 523 unsigned end = offset + size; local 526 BUG_ON(end > le16_to_cpu(v->sectors)); 529 end = min_t(unsigned, le16_to_cpu(v->sectors), 530 round_up(end, csum_granularity)); 533 buf->size = end - offset; 556 unsigned end = buf->offset + buf->size; local 557 unsigned len = min(csum_granularity, end - offset); 559 BUG_ON(offset >= end); 599 unsigned end = buf->offset + buf->size; local 604 while (offset < end) { [all...] |
H A D | extents.c | 254 while (en_l < l_ptrs.end && en_r < r_ptrs.end) { 262 if (en_l < l_ptrs.end || en_r < r_ptrs.end) 270 while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) && 271 __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) { 325 while (en_l < l_ptrs.end && en_r < r_ptrs.end) { 342 while (en_l < l_ptrs.end) { 554 union bch_extent_crc *crc = (void *) ptrs.end; 714 union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k)); local [all...] |
H A D | buckets.h | 382 u64 end = offset + (1 << layout->sb_max_size_bits); 384 if (!(offset >= b_end || end <= b_offset))
|
H A D | btree_iter.h | 634 struct bpos end, 638 return bch2_btree_iter_peek_upto(iter, end); 640 if (bkey_gt(iter->pos, end)) 633 bch2_btree_iter_peek_upto_type(struct btree_iter *iter, struct bpos end, unsigned flags) argument
|
H A D | bcachefs.h | 29 * "cached" data is always dirty. The end result is that we get thin 667 u64 end; member in struct:journal_seq_blacklist_table::journal_seq_blacklist_table_entry 686 * end of the buffer - from @nr to @size - the empty space is at @gap.
|
/linux-master/drivers/vhost/ |
H A D | vhost.c | 2167 u64 start, end, l, min; local 2181 end = min(u->addr - 1 + u->size, hva - 1 + len); 2182 l = end - start + 1; 2379 * or -1U if we're at the end. */ 2388 /* Check they're not leading us off end of descriptors. */
|
/linux-master/arch/x86/kernel/cpu/ |
H A D | topology.c | 163 unsigned int id, end, cnt = 0; local 165 /* Calculate the exclusive end */ 166 end = lvlid + (1U << x86_topo_system.dom_shifts[at_level]); 169 for (id = find_next_bit(map, end, lvlid); id < end; id = find_next_bit(map, end, ++id))
|
/linux-master/fs/zonefs/ |
H A D | super.c | 285 * a write operation partially failed and data was writen at the end 395 * end up in a deadlock on the inode truncate mutex, while the latter 941 struct blk_zone *zone, *next, *end; local 960 end = zd->zones + bdev_nr_zones(sb->s_bdev); 961 for (zone = &zd->zones[1]; zone < end; zone = next) { 980 for (; next < end; next++) {
|
/linux-master/fs/ceph/ |
H A D | mds_client.c | 75 static int parse_reply_info_quota(void **p, void *end, argument 81 ceph_decode_8_safe(p, end, struct_v, bad); 82 ceph_decode_8_safe(p, end, struct_compat, bad); 87 ceph_decode_32_safe(p, end, struct_len, bad); 88 ceph_decode_need(p, end, struct_len, bad); 89 end = *p + struct_len; 90 ceph_decode_64_safe(p, end, info->max_bytes, bad); 91 ceph_decode_64_safe(p, end, info->max_files, bad); 92 *p = end; 101 static int parse_reply_info_in(void **p, void *end, argument 282 parse_reply_info_dir(void **p, void *end, struct ceph_mds_reply_dirfrag **dirfrag, u64 features) argument 312 parse_reply_info_lease(void **p, void *end, struct ceph_mds_reply_lease **lease, u64 features, u32 *altname_len, u8 **altname) argument 364 parse_reply_info_trace(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) argument 410 parse_reply_info_readdir(void **p, void *end, struct ceph_mds_request *req, u64 features) argument 551 parse_reply_info_filelock(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) argument 572 ceph_parse_deleg_inos(void **p, void *end, struct ceph_mds_session *s) argument 637 ceph_parse_deleg_inos(void **p, void *end, struct ceph_mds_session *s) argument 664 parse_reply_info_create(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features, struct ceph_mds_session *s) argument 700 parse_reply_info_getvxattr(void **p, void *end, struct ceph_mds_reply_info_parsed *info, u64 features) argument 725 parse_reply_info_extra(void **p, void *end, struct ceph_mds_request *req, u64 features, struct ceph_mds_session *s) argument 752 void *p, *end; local 1462 encode_supported_features(void **p, void *end) argument 1493 encode_metric_spec(void **p, void *end) argument 1549 void *p, *end; local 2985 void *p, *end; local 4016 void *end = p + msg->front.iov_len; local 4074 __decode_session_metadata(void **p, void *end, bool *blocklisted) argument 4113 void *end = p + msg->front.iov_len; local 5775 void *end = p + msg->front.iov_len; local 5849 void *end = p + msg->front.iov_len; local [all...] |
H A D | caps.c | 509 * (Re)queue cap at the end of the delayed cap release list. 864 * Move a cap to the end of the LRU (oldest caps at list head, newest 2076 * the exclusive cap. So that MDS does not end up 2481 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) argument 2493 ret = file_write_and_wait_range(file, start, end); 4269 static int parse_fscrypt_fields(void **p, void *end, argument 4274 ceph_decode_32_safe(p, end, extra->fscrypt_auth_len, bad); 4276 ceph_decode_need(p, end, extra->fscrypt_auth_len, bad); 4281 ceph_decode_copy_safe(p, end, extra->fscrypt_auth, 4285 ceph_decode_32_safe(p, end, le 4296 parse_fscrypt_fields(void **p, void *end, struct cap_extra_info *extra) argument 4337 void *p, *end; local [all...] |
H A D | addr.c | 171 loff_t end = rreq->start + rreq->len, new_end; local 196 new_end = min(round_up(end, lo->stripe_unit), rreq->i_size); 197 if (new_end > end && new_end <= rreq->start + max_len) 216 /* Truncate the extent at the end of the current block */ 609 u64 end = i_size_read(inode); local 619 end = capsnap->size; 627 if (end > ceph_fscrypt_page_offset(page) + thp_size(page)) 628 end = ceph_fscrypt_page_offset(page) + thp_size(page); 629 ret = end > start ? end 930 pgoff_t index, start_index, end = -1; local [all...] |
/linux-master/drivers/pci/ |
H A D | quirks.c | 152 struct pci_fixup *end) 156 for (; f < end; f++) 196 struct pci_fixup *start, *end; local 201 end = __end_pci_fixups_early; 206 end = __end_pci_fixups_header; 213 end = __end_pci_fixups_final; 218 end = __end_pci_fixups_enable; 223 end = __end_pci_fixups_resume; 228 end = __end_pci_fixups_resume_early; 233 end 151 pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) argument 2992 int end = 0; local [all...] |
/linux-master/drivers/iommu/ |
H A D | mtk_iommu.c | 834 size_t length = gather->end - gather->start + 1;
|
/linux-master/drivers/iommu/intel/ |
H A D | svm.c | 199 unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align); local 201 while (start < end) { 242 unsigned long start, unsigned long end) 246 if (start == 0 && end == -1UL) { 252 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); 260 /* This might end up being called from exit_mmap(), *before* the page 269 * page) so that we end up taking a fault that the hardware really 240 intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) argument
|
H A D | iommu.c | 187 u64 end_address; /* reserved end address */ 581 pbridge->subordinate->busn_res.end >= pdev->bus->number) 2157 * end of the mapping, if the trailing size is not enough to 2302 unsigned long long end = rmrr->end_address; local 2304 if (WARN_ON(end < start || 2305 end >> agaw_to_width(si_domain->agaw))) 2310 mm_to_dma_pfn_end(end >> PAGE_SHIFT)); 4035 u64 end; local 4038 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; 4039 if (end < max_add 4800 unsigned long end = iova + size - 1; local [all...] |
/linux-master/drivers/iommu/amd/ |
H A D | init.c | 487 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) argument 489 if (!request_mem_region(address, end, "amd_iommu")) { 491 address, end); 496 return (u8 __iomem *)ioremap(address, end); 553 u8 *p = (void *)h, *end = (void *)h; local 565 end += h->length; 567 while (p < end) { 587 WARN_ON(p != end); 615 u8 *p = (u8 *)table, *end = (u8 *)table; local 621 end 1346 u8 *end = p, flags = 0; local 1924 u8 *p = (u8 *)table, *end = (u8 *)table; local 2591 u8 *p = (u8 *)table, *end = (u8 *)table; local [all...] |
/linux-master/drivers/ata/ |
H A D | libata-core.c | 5742 void **end = (void **)&ops->inherits; local 5753 for (pp = begin; pp < end; pp++, inherit++) 5758 for (pp = begin; pp < end; pp++)
|
H A D | ahci.c | 696 char *param, *end, *str, *mask_s; local 717 end = param + strlen(param); 718 while (param && param < end && *param) { 726 if (param >= end)
|
/linux-master/block/ |
H A D | blk-core.c | 541 * Check whether this bio extends beyond the end of the device or partition. 553 pr_info_ratelimited("%s: attempt to access beyond end of device\n" 985 void update_io_ticks(struct block_device *part, unsigned long now, bool end) argument 992 __part_stat_add(part, io_ticks, end ? now - stamp : 1); 1130 * pending I/O should the task end up blocking between blk_start_plug() and 1133 * for a memory allocation, memory reclaim could end up wanting to free a 1205 * blk_finish_plug - mark the end of a batch of submitted I/O
|
/linux-master/arch/arm64/include/asm/ |
H A D | tlbflush.h | 203 * flush_tlb_range(vma, start, end) 204 * Invalidate the virtual-address range '[start, end)' on all 210 * flush_tlb_kernel_range(start, end) 211 * Same as flush_tlb_range(..., start, end), but applies to 234 * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level) 235 * Invalidate the virtual-address range '[start, end)' on all 338 * completion at the end in arch_tlbbatch_flush(). Since we've already issued 428 unsigned long start, unsigned long end, 435 end = round_up(end, strid 427 __flush_tlb_range_nosync(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) argument 464 __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) argument 474 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 486 flush_tlb_kernel_range(unsigned long start, unsigned long end) argument [all...] |