Lines Matching refs:ctx

60 	struct userfaultfd_ctx *ctx;
69 struct userfaultfd_ctx *ctx;
81 static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
83 return ctx->features & UFFD_FEATURE_INITIALIZED;
86 static bool userfaultfd_wp_async_ctx(struct userfaultfd_ctx *ctx)
88 return ctx && (ctx->features & UFFD_FEATURE_WP_ASYNC);
98 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
100 if (!ctx)
103 return ctx->features & UFFD_FEATURE_WP_UNPOPULATED;
164 * @ctx: [in] Pointer to the userfaultfd context.
166 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
168 refcount_inc(&ctx->refcount);
174 * @ctx: [in] Pointer to userfaultfd context.
179 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
181 if (refcount_dec_and_test(&ctx->refcount)) {
182 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
183 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
184 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
185 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
186 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
187 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
188 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
189 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
190 mmdrop(ctx->mm);
191 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
244 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
274 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
289 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
293 struct mm_struct *mm = ctx->mm;
381 struct userfaultfd_ctx *ctx;
403 ctx = vma->vm_userfaultfd_ctx.ctx;
404 if (!ctx)
407 BUG_ON(ctx->mm != mm);
414 if (ctx->features & UFFD_FEATURE_SIGBUS)
416 if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
424 if (unlikely(READ_ONCE(ctx->released))) {
482 userfaultfd_ctx_get(ctx);
487 reason, ctx->features);
488 uwq.ctx = ctx;
502 spin_lock_irq(&ctx->fault_pending_wqh.lock);
507 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
514 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
517 must_wait = userfaultfd_must_wait(ctx, vmf, reason);
519 must_wait = userfaultfd_huge_must_wait(ctx, vmf, reason);
524 if (likely(must_wait && !READ_ONCE(ctx->released))) {
525 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
545 spin_lock_irq(&ctx->fault_pending_wqh.lock);
551 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
555 * ctx may go away after this if the userfault pseudo fd is
558 userfaultfd_ctx_put(ctx);
564 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
572 ewq->ctx = ctx;
576 spin_lock_irq(&ctx->event_wqh.lock);
581 __add_wait_queue(&ctx->event_wqh, &ewq->wq);
586 if (READ_ONCE(ctx->released) ||
594 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
606 spin_unlock_irq(&ctx->event_wqh.lock);
608 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
611 spin_lock_irq(&ctx->event_wqh.lock);
614 spin_unlock_irq(&ctx->event_wqh.lock);
624 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
637 * ctx may go away after this if the userfault pseudo fd is
641 atomic_dec(&ctx->mmap_changing);
642 VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
643 userfaultfd_ctx_put(ctx);
646 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
650 wake_up_locked(&ctx->event_wqh);
651 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
656 struct userfaultfd_ctx *ctx = NULL, *octx;
659 octx = vma->vm_userfaultfd_ctx.ctx;
669 ctx = fctx->new;
673 if (!ctx) {
678 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
679 if (!ctx) {
684 refcount_set(&ctx->refcount, 1);
685 ctx->flags = octx->flags;
686 ctx->features = octx->features;
687 ctx->released = false;
688 init_rwsem(&ctx->map_changing_lock);
689 atomic_set(&ctx->mmap_changing, 0);
690 ctx->mm = vma->vm_mm;
691 mmgrab(ctx->mm);
698 fctx->new = ctx;
702 vma->vm_userfaultfd_ctx.ctx = ctx;
708 struct userfaultfd_ctx *ctx = fctx->orig;
716 userfaultfd_event_wait_completion(ctx, &ewq);
733 struct userfaultfd_ctx *ctx;
735 ctx = vma->vm_userfaultfd_ctx.ctx;
737 if (!ctx)
740 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
741 vm_ctx->ctx = ctx;
742 userfaultfd_ctx_get(ctx);
743 down_write(&ctx->map_changing_lock);
744 atomic_inc(&ctx->mmap_changing);
745 up_write(&ctx->map_changing_lock);
758 struct userfaultfd_ctx *ctx = vm_ctx->ctx;
761 if (!ctx)
765 userfaultfd_ctx_put(ctx);
776 userfaultfd_event_wait_completion(ctx, &ewq);
783 struct userfaultfd_ctx *ctx;
786 ctx = vma->vm_userfaultfd_ctx.ctx;
787 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
790 userfaultfd_ctx_get(ctx);
791 down_write(&ctx->map_changing_lock);
792 atomic_inc(&ctx->mmap_changing);
793 up_write(&ctx->map_changing_lock);
802 userfaultfd_event_wait_completion(ctx, &ewq);
807 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
813 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
824 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
826 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
827 has_unmap_ctx(ctx, unmaps, start, end))
834 userfaultfd_ctx_get(ctx);
835 down_write(&ctx->map_changing_lock);
836 atomic_inc(&ctx->mmap_changing);
837 up_write(&ctx->map_changing_lock);
838 unmap_ctx->ctx = ctx;
848 struct userfaultfd_unmap_ctx *ctx, *n;
851 list_for_each_entry_safe(ctx, n, uf, list) {
855 ewq.msg.arg.remove.start = ctx->start;
856 ewq.msg.arg.remove.end = ctx->end;
858 userfaultfd_event_wait_completion(ctx->ctx, &ewq);
860 list_del(&ctx->list);
861 kfree(ctx);
867 struct userfaultfd_ctx *ctx = file->private_data;
868 struct mm_struct *mm = ctx->mm;
875 WRITE_ONCE(ctx->released, true);
892 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
894 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
917 spin_lock_irq(&ctx->fault_pending_wqh.lock);
918 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
919 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
920 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
923 wake_up_all(&ctx->event_wqh);
925 wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
926 userfaultfd_ctx_put(ctx);
950 struct userfaultfd_ctx *ctx)
952 return find_userfault_in(&ctx->fault_pending_wqh);
956 struct userfaultfd_ctx *ctx)
958 return find_userfault_in(&ctx->event_wqh);
963 struct userfaultfd_ctx *ctx = file->private_data;
966 poll_wait(file, &ctx->fd_wqh, wait);
968 if (!userfaultfd_is_initialized(ctx))
989 if (waitqueue_active(&ctx->fault_pending_wqh))
991 else if (waitqueue_active(&ctx->event_wqh))
1015 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
1032 spin_lock_irq(&ctx->fd_wqh.lock);
1033 __add_wait_queue(&ctx->fd_wqh, &wait);
1036 spin_lock(&ctx->fault_pending_wqh.lock);
1037 uwq = find_userfault(ctx);
1046 write_seqcount_begin(&ctx->refile_seq);
1070 add_wait_queue(&ctx->fault_wqh, &uwq->wq);
1072 write_seqcount_end(&ctx->refile_seq);
1076 spin_unlock(&ctx->fault_pending_wqh.lock);
1080 spin_unlock(&ctx->fault_pending_wqh.lock);
1082 spin_lock(&ctx->event_wqh.lock);
1083 uwq = find_userfault_evt(ctx);
1098 spin_unlock(&ctx->event_wqh.lock);
1103 userfaultfd_event_complete(ctx, uwq);
1104 spin_unlock(&ctx->event_wqh.lock);
1108 spin_unlock(&ctx->event_wqh.lock);
1118 spin_unlock_irq(&ctx->fd_wqh.lock);
1120 spin_lock_irq(&ctx->fd_wqh.lock);
1122 __remove_wait_queue(&ctx->fd_wqh, &wait);
1124 spin_unlock_irq(&ctx->fd_wqh.lock);
1128 spin_lock_irq(&ctx->event_wqh.lock);
1150 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
1158 userfaultfd_event_complete(ctx, uwq);
1174 spin_unlock_irq(&ctx->event_wqh.lock);
1183 struct userfaultfd_ctx *ctx = file->private_data;
1189 if (!userfaultfd_is_initialized(ctx))
1195 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
1211 static void __wake_userfault(struct userfaultfd_ctx *ctx,
1214 spin_lock_irq(&ctx->fault_pending_wqh.lock);
1216 if (waitqueue_active(&ctx->fault_pending_wqh))
1217 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
1219 if (waitqueue_active(&ctx->fault_wqh))
1220 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
1221 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
1224 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1246 seq = read_seqcount_begin(&ctx->refile_seq);
1247 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1248 waitqueue_active(&ctx->fault_wqh);
1250 } while (read_seqcount_retry(&ctx->refile_seq, seq));
1252 __wake_userfault(ctx, range);
1284 static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1287 struct mm_struct *mm = ctx->mm;
1297 bool wp_async = userfaultfd_wp_async_ctx(ctx);
1366 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1409 if (cur->vm_userfaultfd_ctx.ctx &&
1410 cur->vm_userfaultfd_ctx.ctx != ctx)
1433 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1434 vma->vm_userfaultfd_ctx.ctx != ctx);
1441 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1452 (struct vm_userfaultfd_ctx){ctx});
1465 vma->vm_userfaultfd_ctx.ctx = ctx;
1507 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1510 struct mm_struct *mm = ctx->mm;
1519 bool wp_async = userfaultfd_wp_async_ctx(ctx);
1563 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
1595 if (!vma->vm_userfaultfd_ctx.ctx)
1614 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1654 static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1666 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
1679 wake_userfault(ctx, &range);
1686 static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1698 if (atomic_read(&ctx->mmap_changing))
1707 ret = validate_unaligned_range(ctx->mm, uffdio_copy.src,
1711 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
1720 if (mmget_not_zero(ctx->mm)) {
1721 ret = mfill_atomic_copy(ctx, uffdio_copy.dst, uffdio_copy.src,
1723 mmput(ctx->mm);
1736 wake_userfault(ctx, &range);
1743 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1754 if (atomic_read(&ctx->mmap_changing))
1763 ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
1771 if (mmget_not_zero(ctx->mm)) {
1772 ret = mfill_atomic_zeropage(ctx, uffdio_zeropage.range.start,
1774 mmput(ctx->mm);
1787 wake_userfault(ctx, &range);
1794 static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
1803 if (atomic_read(&ctx->mmap_changing))
1812 ret = validate_range(ctx->mm, uffdio_wp.range.start,
1827 if (mmget_not_zero(ctx->mm)) {
1828 ret = mwriteprotect_range(ctx, uffdio_wp.range.start,
1830 mmput(ctx->mm);
1841 wake_userfault(ctx, &range);
1846 static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1857 if (atomic_read(&ctx->mmap_changing))
1866 ret = validate_range(ctx->mm, uffdio_continue.range.start,
1878 if (mmget_not_zero(ctx->mm)) {
1879 ret = mfill_atomic_continue(ctx, uffdio_continue.range.start,
1881 mmput(ctx->mm);
1896 wake_userfault(ctx, &range);
1904 static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
1914 if (atomic_read(&ctx->mmap_changing))
1923 ret = validate_range(ctx->mm, uffdio_poison.range.start,
1932 if (mmget_not_zero(ctx->mm)) {
1933 ret = mfill_atomic_poison(ctx, uffdio_poison.range.start,
1935 mmput(ctx->mm);
1950 wake_userfault(ctx, &range);
1960 return userfaultfd_wp_async_ctx(vma->vm_userfaultfd_ctx.ctx);
1972 static int userfaultfd_move(struct userfaultfd_ctx *ctx,
1979 struct mm_struct *mm = ctx->mm;
1983 if (atomic_read(&ctx->mmap_changing))
2008 ret = move_pages(ctx, uffdio_move.dst, uffdio_move.src,
2025 wake_userfault(ctx, &range);
2038 static int userfaultfd_api(struct userfaultfd_ctx *ctx,
2084 if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
2101 struct userfaultfd_ctx *ctx = file->private_data;
2103 if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
2108 ret = userfaultfd_api(ctx, arg);
2111 ret = userfaultfd_register(ctx, arg);
2114 ret = userfaultfd_unregister(ctx, arg);
2117 ret = userfaultfd_wake(ctx, arg);
2120 ret = userfaultfd_copy(ctx, arg);
2123 ret = userfaultfd_zeropage(ctx, arg);
2126 ret = userfaultfd_move(ctx, arg);
2129 ret = userfaultfd_writeprotect(ctx, arg);
2132 ret = userfaultfd_continue(ctx, arg);
2135 ret = userfaultfd_poison(ctx, arg);
2144 struct userfaultfd_ctx *ctx = f->private_data;
2148 spin_lock_irq(&ctx->fault_pending_wqh.lock);
2149 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
2153 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
2156 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
2164 pending, total, UFFD_API, ctx->features,
2183 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
2185 init_waitqueue_head(&ctx->fault_pending_wqh);
2186 init_waitqueue_head(&ctx->fault_wqh);
2187 init_waitqueue_head(&ctx->event_wqh);
2188 init_waitqueue_head(&ctx->fd_wqh);
2189 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
2194 struct userfaultfd_ctx *ctx;
2207 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
2208 if (!ctx)
2211 refcount_set(&ctx->refcount, 1);
2212 ctx->flags = flags;
2213 ctx->features = 0;
2214 ctx->released = false;
2215 init_rwsem(&ctx->map_changing_lock);
2216 atomic_set(&ctx->mmap_changing, 0);
2217 ctx->mm = current->mm;
2219 mmgrab(ctx->mm);
2222 fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
2225 mmdrop(ctx->mm);
2226 kmem_cache_free(userfaultfd_ctx_cachep, ctx);