Lines Matching refs:range

19  * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
20 * move the range
25 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
26 * ie using the vma access permission (vm_page_prot) to update the whole range
31 * pages in the range so to mirror those changes the user must inspect the CPU
40 * that the mm refcount is zero and the range is no longer accessible.
48 * exclusive range the owner will be initialised to the value provided by the
139 * the pages in the range, it has to implement the
145 * establishment of sptes is forbidden in the range passed to
150 * range are still mapped and have at least a refcount of one.
153 * range have been unmapped and the pages have been freed by
185 const struct mmu_notifier_range *range);
187 const struct mmu_notifier_range *range);
248 * range. This function can sleep. Return false only if sleeping
249 * was required but mmu_notifier_range_blockable(range) is false.
253 const struct mmu_notifier_range *range,
340 * mmu_interval_read_retry - End a read side critical section against a VA range
402 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
405 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
407 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
450 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
455 if (mm_has_notifiers(range->mm)) {
456 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
457 __mmu_notifier_invalidate_range_start(range);
465 * case you're not allowed to modify PTEs in the specified range.
470 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
475 if (mm_has_notifiers(range->mm)) {
476 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
477 ret = __mmu_notifier_invalidate_range_start(range);
484 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
486 if (mmu_notifier_range_blockable(range))
489 if (mm_has_notifiers(range->mm))
490 __mmu_notifier_invalidate_range_end(range);
512 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
519 range->event = event;
520 range->mm = mm;
521 range->start = start;
522 range->end = end;
523 range->flags = flags;
527 struct mmu_notifier_range *range,
532 mmu_notifier_range_init(range, event, flags, mm, start, end);
533 range->owner = owner;
611 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
615 range->start = start;
616 range->end = end;
619 #define mmu_notifier_range_init(range,event,flags,mm,start,end) \
620 _mmu_notifier_range_init(range, start, end)
621 #define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
623 _mmu_notifier_range_init(range, start, end)
626 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
659 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
664 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
670 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)