Searched refs:end (Results 1 - 25 of 3740) sorted by last modified time

1234567891011>>

/linux-master/drivers/acpi/x86/
H A Ds2idle.c186 acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
296 acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
646 .end = acpi_s2idle_end,
/linux-master/drivers/acpi/
H A Dcppc_acpi.c269 goto end;
304 goto end;
325 goto end;
339 end:
394 goto end;
406 goto end;
411 goto end;
416 goto end;
423 goto end;
427 end
[all...]
/linux-master/arch/loongarch/include/asm/
H A Dtlb.h151 flush_tlb_range(&vma, tlb->start, tlb->end);
/linux-master/arch/arc/mm/
H A Dtlb.c202 * @start is inclusive, while @end is exclusive
209 unsigned long end)
214 /* If range @start to @end is more than 32 TLB entries deep,
221 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
228 * loop end condition below, w/o need for aligning @end to end
236 while (start < end) {
246 * @start, @end interpreted as kvaddr
248 * @start,@end alon
208 local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
251 local_flush_tlb_kernel_range(unsigned long start, unsigned long end) argument
354 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
367 flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
380 flush_tlb_kernel_range(unsigned long start, unsigned long end) argument
537 local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
[all...]
/linux-master/fs/btrfs/tests/
H A Dextent-map-tests.c595 u64 start, end; local
645 end = (3 * SZ_4K) - 1;
646 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false); local
653 end = SZ_16K + SZ_4K - 1;
654 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false); local
661 end = SZ_32K - 1;
662 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false); local
669 end = SZ_64K - 1;
670 btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false); local
734 * true would mess up the start/end calculation
[all...]
/linux-master/fs/btrfs/
H A Dscrub.c2313 /* Calculate the logical end of the stripe */
2457 struct btrfs_device *scrub_dev, u64 start, u64 end)
2517 if (found_key.offset >= end)
2893 u64 end, struct btrfs_scrub_progress *progress,
3016 ret = scrub_enumerate_chunks(sctx, dev, start, end);
2456 scrub_enumerate_chunks(struct scrub_ctx *sctx, struct btrfs_device *scrub_dev, u64 start, u64 end) argument
2892 btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, u64 end, struct btrfs_scrub_progress *progress, int readonly, int is_dev_replace) argument
H A Dinode.c138 u64 end, struct writeback_control *wbc,
601 * Otherwise we could end up racing with unlink.
659 drop_args.end = fs_info->sectorsize;
718 u64 end; member in struct:async_chunk
758 u64 end)
796 !PAGE_ALIGNED(end + 1))
812 return btrfs_compress_heuristic(&inode->vfs_inode, start, end);
817 u64 start, u64 end, u64 num_bytes, u32 small_write)
821 (start > 0 || end + 1 < inode->disk_i_size))
847 u64 end local
757 inode_need_compress(struct btrfs_inode *inode, u64 start, u64 end) argument
816 inode_should_defrag(struct btrfs_inode *inode, u64 start, u64 end, u64 num_bytes, u32 small_write) argument
1077 u64 end = async_extent->start + async_extent->ram_size - 1; local
1119 u64 end = async_extent->start + async_extent->ram_size - 1; local
1283 cow_file_range(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, u64 *done_offset, bool keep_locked, bool no_inline) argument
1622 run_delalloc_compressed(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, struct writeback_control *wbc) argument
1715 run_delalloc_cow(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, struct writeback_control *wbc, bool pages_dirty) argument
1759 fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page, const u64 start, const u64 end) argument
1837 u64 end; member in struct:can_nocow_file_extent_args
1975 run_delalloc_nocow(struct btrfs_inode *inode, struct page *locked_page, const u64 start, const u64 end) argument
2247 should_nocow(struct btrfs_inode *inode, u64 start, u64 end) argument
2262 btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, struct writeback_control *wbc) argument
2646 const u64 end = start + len - 1; local
2679 btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, unsigned int extra_bits, struct extent_state **cached_state) argument
3051 u64 start, end; local
3330 u64 end = file_offset + bv->bv_len - 1; local
5184 u64 end; local
8210 u64 end; local
9692 u64 end = start + num_bytes - 1; local
9891 btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end) argument
10317 u64 start, end; local
10986 btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end) argument
[all...]
H A Dextent_map.c71 /* Do the math around the end of an extent, handling wrapping. */
85 u64 end = range_end(em->start, em->len); local
108 if (end > entry->start && em->start < extent_map_end(entry))
118 if (end > entry->start && em->start < extent_map_end(entry))
411 u64 end = range_end(start, len); local
423 if (strict && !(end > em->start && start < extent_map_end(em)))
537 u64 end; local
553 end = next ? next->start : extent_map_end(em);
554 end = min_t(u64, end, extent_map_en
684 btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, bool skip_pinned) argument
913 const u64 end = new_em->start + new_em->len - 1; local
[all...]
H A Dbackref.c1024 unsigned long end; local
1048 end = (unsigned long)ei + item_size;
1057 BUG_ON(ptr > end);
1064 while (ptr < end) {
1733 * the current while iterating. The process stops when we reach the end of the
2293 unsigned long end; local
2319 end = (unsigned long)ei + item_size;
2327 WARN_ON(*ptr > end);
2328 if (*ptr == end)
2795 * the returned pointer must be freed with free_ipath() in the end
[all...]
/linux-master/virt/kvm/
H A Dpfncache.c26 unsigned long end)
36 gpc->uhva >= start && gpc->uhva < end) {
49 gpc->uhva >= start && gpc->uhva < end)
25 gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, unsigned long end) argument
H A Dkvm_mm.h29 unsigned long end);
33 unsigned long end)
31 gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start, unsigned long end) argument
H A Dkvm_main.c354 * and smp_mb in walk_shadow_page_lockless_begin/end.
552 u64 end; member in struct:kvm_mmu_notifier_range
606 if (WARN_ON_ONCE(range->end <= range->start))
621 range->start, range->end - 1) {
626 hva_end = min_t(unsigned long, range->end,
643 gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
672 unsigned long end,
679 .end = end,
692 unsigned long end,
670 kvm_handle_hva_range(struct mmu_notifier *mn, unsigned long start, unsigned long end, union kvm_mmu_notifier_arg arg, gfn_handler_t handler) argument
690 kvm_handle_hva_range_no_flush(struct mmu_notifier *mn, unsigned long start, unsigned long end, gfn_handler_t handler) argument
766 kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) argument
905 kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) argument
916 kvm_mmu_notifier_clear_young(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) argument
1996 kvm_check_memslot_overlap(struct kvm_memslots *slots, int id, gfn_t start, gfn_t end) argument
2438 kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, unsigned long attrs) argument
2542 kvm_vm_set_mem_attributes(struct kvm *kvm, gfn_t start, gfn_t end, unsigned long attributes) argument
2601 gfn_t start, end; local
3793 update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, ktime_t end, bool success) argument
[all...]
/linux-master/kernel/sched/
H A Dsched.h1937 * by attaching extra space to the end of the structure,
2648 * acquire rq lock instead of rq_lock(). So at the end of these two functions
3360 goto end;
3395 end:
/linux-master/fs/smb/server/
H A Dvfs.c324 * @end: lock end byte offset
329 static int check_lock_range(struct file *filp, loff_t start, loff_t end, argument
342 if (flock->fl_end >= start && end >= flock->fl_start) {
1009 loff_t maxbytes = (u64)inode->i_sb->s_maxbytes, end; local
1029 end = start + length;
1030 while (start < end && *out_count < in_count) {
1038 if (extent_start >= end)
1052 cpu_to_le64(min(extent_end, end) - extent_start);
H A Dsmb2pdu.c7126 lock->end = flock->fl_end;
7128 if (lock->start == lock->end)
7229 "the end offset(%llx) is smaller than the start offset(%llx)\n",
7296 cmp_lock->end == smb_lock->end &&
7322 cmp_lock->start < smb_lock->end) {
7331 smb_lock->start < cmp_lock->end) {
7339 cmp_lock->end > smb_lock->start) ||
7340 (cmp_lock->start < smb_lock->end &&
7341 cmp_lock->end >
[all...]
/linux-master/fs/smb/client/
H A Dsmb2pdu.c927 /* check that offset is not beyond end of SMB */
2283 u8 *end = beg + le32_to_cpu(cc->DataLength); local
2293 sid_len = posix_info_sid_size(sid, end);
2301 sid_len = posix_info_sid_size(sid, end);
4429 * smbd_buffer_descriptor_v1 to the end of read request
4883 * smbd_buffer_descriptor_v1 to the end of write request
5077 int posix_info_sid_size(const void *beg, const void *end) argument
5082 if (beg + 1 > end)
5090 if (beg + total > end)
5096 int posix_info_parse(const void *beg, const void *end, argument
5161 posix_info_extra_size(const void *beg, const void *end) argument
[all...]
H A Dmisc.c633 * break handler. The matching put is done at the end of the
1111 const char *end; local
1117 end = unc;
1119 while (*end && !(*end == '\\' || *end == '/'))
1120 end++;
1123 *len = end - unc;
1337 * end up filling all the DFS cache and thus potentially
H A Dcifsfs.c1071 * way so we must seek to end on non-oplocked files by
1238 * pos resides or the folio that overlaps the end of a range unless that folio
1321 * Advance the EOF marker after the flush above to the end of the range
1333 /* Flush the folios at either end of the destination range to prevent
1431 * Advance the EOF marker after the flush above to the end of the range
1442 /* Flush the folios at either end of the destination range to prevent
1497 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) argument
/linux-master/fs/bcachefs/
H A Dsuper.c327 * Block new foreground-end write operations from starting - any new
466 * don't read the journal, so the first journal write may end up
1594 struct bpos end = POS(ca->dev_idx, U64_MAX); local
1601 ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
1603 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
1605 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
1607 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
1609 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
1611 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
H A Dsb-members.c453 u64 end = start + sectors; local
455 int resize = ilog2(roundup_pow_of_two(end)) - (m->btree_bitmap_shift + 6);
467 (u64) bit << m->btree_bitmap_shift < end;
H A Dsb-members.h236 u64 end = start + sectors; local
238 if (end > 64ULL << ca->mi.btree_bitmap_shift)
242 (u64) bit << ca->mi.btree_bitmap_shift < end;
H A Djournal_io.c228 static void journal_entry_null_range(void *start, void *end) argument
232 for (entry = start; entry != end; entry = vstruct_next(entry))
324 "extends past end of journal entry")) {
516 le64_to_cpu(bl_entry->end),
519 "invalid journal seq blacklist entry: start > end")) {
533 prt_printf(out, "start=%llu end=%llu",
535 le64_to_cpu(bl->end));
849 "journal entry extends past end of jset")) {
991 end = offset + ca->mi.bucket_size; local
998 while (offset < end) {
1812 struct jset_entry *start, *end; local
[all...]
H A Drecovery.c390 le64_to_cpu(bl_entry->end) + 1);
H A Dsb-clean.c33 bch_err(c, "journal entry (u64s %u) overran end of superblock clean section (u64s %u) by %zu",
57 struct jset_entry *entry, *start, *end; local
61 end = vstruct_end(&clean->field);
64 end = vstruct_last(j);
67 for (entry = start; entry < end; entry = vstruct_next(entry))
183 struct jset_entry **end,
197 container_of(jset_entry_init(end, sizeof(*u)),
207 container_of(jset_entry_init(end, sizeof(*u)),
217 container_of(jset_entry_init(end, sizeof(*u)),
230 container_of(jset_entry_init(end, sizeo
182 bch2_journal_super_entries_add_common(struct bch_fs *c, struct jset_entry **end, u64 journal_seq) argument
[all...]
H A Dfs.c967 struct bpos end = POS(ei->v.i_ino, (start + len) >> 9); local
996 (k = bch2_btree_iter_peek_upto(&iter, end)).k &&

Completed in 868 milliseconds

1234567891011>>