Lines Matching defs:range

19  * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
20 * move the range
25 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
26 * ie using the vma access permission (vm_page_prot) to update the whole range
31 * pages in the range so to mirror those changes the user must inspect the CPU
40 * that the mm refcount is zero and the range is no longer accessible.
48 * exclusive range the owner will be initialised to the value provided by the
130 * the pages in the range, it has to implement the
136 * establishment of sptes is forbidden in the range passed to
141 * range are still mapped and have at least a refcount of one.
144 * range have been unmapped and the pages have been freed by
176 const struct mmu_notifier_range *range);
178 const struct mmu_notifier_range *range);
239 * range. This function can sleep. Return false only if sleeping
240 * was required but mmu_notifier_range_blockable(range) is false.
244 const struct mmu_notifier_range *range,
331 * mmu_interval_read_retry - End a read side critical section against a VA range
391 mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
394 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
396 return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
432 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
437 if (mm_has_notifiers(range->mm)) {
438 range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
439 __mmu_notifier_invalidate_range_start(range);
447 * case you're not allowed to modify PTEs in the specified range.
452 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
457 if (mm_has_notifiers(range->mm)) {
458 range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
459 ret = __mmu_notifier_invalidate_range_start(range);
466 mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
468 if (mmu_notifier_range_blockable(range))
471 if (mm_has_notifiers(range->mm))
472 __mmu_notifier_invalidate_range_end(range);
494 static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
501 range->event = event;
502 range->mm = mm;
503 range->start = start;
504 range->end = end;
505 range->flags = flags;
509 struct mmu_notifier_range *range,
514 mmu_notifier_range_init(range, event, flags, mm, start, end);
515 range->owner = owner;
573 static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
577 range->start = start;
578 range->end = end;
581 #define mmu_notifier_range_init(range,event,flags,mm,start,end) \
582 _mmu_notifier_range_init(range, start, end)
583 #define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
585 _mmu_notifier_range_init(range, start, end)
588 mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
616 mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
621 mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
627 void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)