/linux-master/net/core/ |
H A D | skbuff.c | 2952 goto fault; 3006 goto fault; 3018 fault: 3361 goto fault; 3415 goto fault; 3426 fault:
|
/linux-master/drivers/nvme/host/ |
H A D | nvme.h | 15 #include <linux/fault-inject.h> 721 /* inject error when permitted by fault injection framework */
|
/linux-master/drivers/dma/idxd/ |
H A D | irq.c | 224 struct idxd_evl_fault *fault = container_of(work, struct idxd_evl_fault, work); local 225 struct idxd_wq *wq = fault->wq; 229 struct __evl_entry *entry_head = fault->entry; 237 switch (fault->status) { 262 dev_dbg_ratelimited(dev, "Unrecognized error code: %#x\n", fault->status); 276 * The task that triggered the page fault is unknown currently 278 * space or the task exits already before this fault. 284 switch (fault->status) { 308 kmem_cache_free(idxd->evl_cache, fault); 325 struct idxd_evl_fault *fault; local [all...] |
/linux-master/tools/testing/selftests/mm/ |
H A D | run_vmtests.sh | 310 CATEGORY="mlock" run_test sudo -u nobody ./on-fault-limit 312 echo "# SKIP ./on-fault-limit"
|
/linux-master/mm/ |
H A D | hugetlb.c | 2866 * As subsequent fault on such a range will not use reserves. 5293 .fault = hugetlb_vm_op_fault, 5908 * They get marked to be SIGKILLed if they fault in these 5909 * areas. This is because a future no-page fault on this VMA 6274 * sent SIGBUS. The hugetlb fault mutex prevents two 6275 * tasks from racing to fault in the same page which 6277 * Page migration does not take the fault mutex, but 6279 * lock. Page fault code could race with migration, 6300 * fault mutex is held when add a hugetlb page 6316 * If memory error occurs between mmap() and fault, som [all...] |
H A D | shmem.c | 2188 * hole-punch begins, so that one fault then races with the punch: 2192 * standard mutex or completion: but we cannot take i_rwsem in fault, 2650 * process B thread 1 takes page fault, read lock on own mmap lock 4613 .fault = shmem_fault, 4622 .fault = shmem_fault,
|
H A D | gup.c | 515 (vma_is_anonymous(vma) || !vma->vm_ops->fault)) 562 /* ... or already writable ones that just need to take a write fault */ 573 /* ... and a write-fault isn't required for other reasons. */ 802 * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only 963 * mmap lock in the page fault handler. Sanity check this. 972 * fault, with the mmap lock released. Use -EAGAIN to show 1150 * -- 0 return value is possible when the fault would need to be retried. 1369 * fixup_user_fault() - manually resolve a user page fault 1379 * section), this returns -EFAULT, and we want to resolve the user fault before 1678 * We want to touch writable mappings with a write fault i [all...] |
/linux-master/include/linux/ |
H A D | mm.h | 116 * a zero page mapping on a read fault. 471 * The default fault flags that should be used by most of the 472 * arch-specific page fault handlers. 488 * Return: true if the page fault allows retry and this is the first 489 * attempt of the fault handling; false otherwise. 512 * ->fault function. The vma's ->fault is responsible for returning a bitmask 513 * of VM_FAULT_xxx flags that give details about how the fault was handled. 515 * MM layer fills up gfp_mask for page allocations but fault handler might 536 pte_t orig_pte; /* Value of PTE at the time of fault */ 588 vm_fault_t (*fault)(struct vm_fault *vmf); member in struct:vm_operations_struct [all...] |
/linux-master/drivers/video/fbdev/core/ |
H A D | fb_defio.c | 186 * @vmf: The VM fault 216 .fault = fb_deferred_io_fault,
|
/linux-master/arch/loongarch/mm/ |
H A D | fault.c | 39 /* Are we prepared to handle this kernel fault? */ 64 * (which will retry the fault, or kill us if we got oom-killed). 142 vm_fault_t fault; local 148 * We fault-in kernel-space virtual memory on-demand. The 166 * context, we must not take the fault.. 212 * If for any reason at all we couldn't handle the fault, 214 * the fault. 216 fault = handle_mm_fault(vma, address, flags, regs); 218 if (fault_signal_pending(fault, regs)) { 224 /* The fault i [all...] |
/linux-master/virt/kvm/ |
H A D | kvm_main.c | 853 * This sequence increase will notify the kvm page fault that 1617 * read-only memslots have emulated MMIO, not page fault, semantics, 2803 * Fast pin a writable pfn only if it is a write fault request 2804 * or the caller allows to map a writable pfn for a read fault 2859 /* map read fault as writable if possible */ 2908 * not call the fault handler, so do it here. 4166 .fault = kvm_vcpu_fault,
|
/linux-master/fs/bcachefs/ |
H A D | fs.c | 1063 .fault = bch2_page_fault,
|
/linux-master/drivers/android/ |
H A D | binder.c | 5594 .fault = binder_vm_fault,
|
/linux-master/arch/x86/kvm/ |
H A D | x86.c | 507 * Handle a fault on a hardware virtualization (VMX or SVM) instruction. 509 * Hardware virtualization extension instructions may fault if a reboot turns 511 * fault we just panic; during reboot instead the instruction is ignored. 557 * #DBs can be trap-like or fault-like, the caller must check other CPU 558 * state, e.g. DR6, to determine whether a #DB is a trap or fault. 569 /* Reserved exceptions will result in fault */ 707 /* triple fault -> shutdown */ 779 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) argument 787 if (is_guest_mode(vcpu) && fault->async_page_fault) 789 true, fault 796 kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) argument 13340 struct x86_exception fault; local 13564 struct x86_exception fault; local [all...] |
/linux-master/arch/x86/kvm/vmx/ |
H A D | vmx.c | 445 noinstr void vmread_error_trampoline2(unsigned long field, bool fault) argument 447 if (fault) { 747 _ASM_EXTABLE(1b, %l[fault]) 748 ::: "cc", "memory" : fault); 753 fault: 1591 * case a #GP fault. 1617 * utilize encodings marked reserved will cause a #GP fault. 1638 * cause a #GP fault. 2791 _ASM_EXTABLE(1b, %l[fault]) 2793 : : fault); [all...] |
/linux-master/arch/x86/kvm/mmu/ |
H A D | tdp_mmu.c | 363 * handling a page fault could overwrite it, so 409 * sufficient as a fast page fault could read the upper 777 * in order to mimic the page fault path, which can replace a 1GiB page 990 * Installs a last-level SPTE to handle a TDP page fault. 994 struct kvm_page_fault *fault, 1002 if (WARN_ON_ONCE(sp->role.level != fault->goal_level)) 1005 if (unlikely(!fault->slot)) 1008 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn, 1009 fault->pfn, iter->old_spte, fault 993 tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, struct tdp_iter *iter) argument 1082 kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument [all...] |
H A D | mmu.c | 2730 * page fault paths. 2881 * fault. 2908 kvm_pfn_t pfn, struct kvm_page_fault *fault) 2919 bool host_writable = !fault || fault->map_writable; 2920 bool prefetch = !fault || fault->prefetch; 2921 bool write_fault = fault && fault->write; 3098 * page fault step 2906 mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault) argument 3180 kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 3216 disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level) argument 3237 direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 3288 kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 3311 kvm_handle_noslot_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access) argument 3341 page_fault_can_be_fast(struct kvm_page_fault *fault) argument 3381 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, u64 *sptep, u64 old_spte, u64 new_spte) argument 3406 is_access_allowed(struct kvm_page_fault *fault, u64 spte) argument 3444 fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4221 page_fault_handle_page_track(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4312 kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4320 kvm_faultin_pfn_private(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4344 __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4413 kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access) argument 4475 is_page_fault_stale(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4504 direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4545 nonpaging_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4587 kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument 4636 kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) argument [all...] |
/linux-master/arch/x86/include/asm/ |
H A D | kvm_host.h | 449 int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 451 struct x86_exception *fault); 463 * consists of 16 domains indexed by page fault error code bits [4:1], 472 * Bitmap; bit set = permission fault 473 * Byte index: page fault error code [4:1] 1998 * EMULTYPE_WRITE_PF_TO_SP - Set when emulating an intercepted page fault that 2014 * !PRESENT fault, which results in a new shadow page 2086 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault); 2088 struct x86_exception *fault);
|
/linux-master/arch/powerpc/kernel/ |
H A D | iommu.c | 24 #include <linux/fault-inject.h> 166 pr_warn("Unable to create IOMMU fault injection sysfs "
|
/linux-master/fs/fuse/ |
H A D | file.c | 2528 .fault = filemap_fault,
|
/linux-master/fs/9p/ |
H A D | vfs_file.c | 507 .fault = filemap_fault,
|
/linux-master/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_gem.c | 95 .fault = vmw_bo_vm_fault,
|
/linux-master/arch/s390/kernel/ |
H A D | entry.S | 324 j 3f # -> fault in user space 327 jnz 2f # -> enabled, can't be a double fault
|
/linux-master/fs/kernfs/ |
H A D | file.c | 382 if (of->vm_ops->fault) 383 ret = of->vm_ops->fault(vmf); 434 .fault = kernfs_vma_fault,
|
/linux-master/fs/ceph/ |
H A D | addr.c | 1968 .fault = ceph_filemap_fault,
|