Lines Matching defs:mm

17 #include <linux/mm.h>
22 #include <linux/sched/mm.h>
123 * struct ksm_mm_slot - ksm information per mm that is being scanned
124 * @slot: hash lookup from mm to mm_slot
191 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
193 * @mm: the memory structure this rmap_item is pointing into
210 struct mm_struct *mm;
580 rmap_item->mm->ksm_rmap_items--;
581 rmap_item->mm = NULL; /* debug safety */
603 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
610 static inline bool ksm_test_exit(struct mm_struct *mm)
612 return atomic_read(&mm->mm_users) == 0;
624 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
661 * in case the application has unmapped and remapped mm,addr meanwhile.
700 * to user; and ksmd, having no mm, would never be chosen for that.
702 * But if the mm is in a limited mem_cgroup, then the fault may fail
738 static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
742 if (ksm_test_exit(mm))
744 vma = vma_lookup(mm, addr);
752 struct mm_struct *mm = rmap_item->mm;
762 mmap_read_lock(mm);
763 vma = find_mergeable_vma(mm, addr);
766 mmap_read_unlock(mm);
771 struct mm_struct *mm = rmap_item->mm;
776 mmap_read_lock(mm);
777 vma = find_mergeable_vma(mm, addr);
795 mmap_read_unlock(mm);
861 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
866 rmap_item->mm->ksm_merging_pages--;
1017 rmap_item->mm->ksm_merging_pages--;
1031 * But be careful when an mm is exiting: do the rb_erase
1201 struct mm_struct *mm;
1213 VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
1215 mm = mm_slot->slot.mm;
1216 mmap_read_lock(mm);
1219 * Exit right away if mm is exiting to avoid lockdep issue in
1222 if (ksm_test_exit(mm))
1236 mmap_read_unlock(mm);
1242 if (ksm_test_exit(mm)) {
1248 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1249 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
1250 mmdrop(mm);
1261 mmap_read_unlock(mm);
1281 struct mm_struct *mm = vma->vm_mm;
1296 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
1308 anon_exclusive || mm_tlb_flush_pending(mm)) {
1323 * See Documentation/mm/mmu_notifier.rst
1331 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1338 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1349 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1375 struct mm_struct *mm = vma->vm_mm;
1390 pmd = mm_find_pmd(mm, addr);
1402 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
1406 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1433 mm->ksm_zero_pages++;
1438 * when tearing down the mm.
1440 dec_mm_counter(mm, MM_ANONPAGES);
1448 * See Documentation/mm/mmu_notifier.rst
1451 set_pte_at(mm, addr, ptep, newpte);
1544 struct mm_struct *mm = rmap_item->mm;
1548 mmap_read_lock(mm);
1549 vma = find_mergeable_vma(mm, rmap_item->address);
1564 mmap_read_unlock(mm);
1566 rmap_item, mm, err);
2296 rmap_item->mm->ksm_merging_pages++;
2310 struct mm_struct *mm = rmap_item->mm;
2386 mmap_read_lock(mm);
2387 vma = find_mergeable_vma(mm, rmap_item->address);
2393 rmap_item, mm, err);
2401 mmap_read_unlock(mm);
2492 rmap_item->mm = mm_slot->slot.mm;
2493 rmap_item->mm->ksm_rmap_items++;
2571 struct mm_struct *mm;
2630 * of the last mm on the list may have removed it since then.
2640 mm = slot->mm;
2641 vma_iter_init(&vmi, mm, ksm_scan.address);
2643 mmap_read_lock(mm);
2644 if (ksm_test_exit(mm))
2656 if (ksm_test_exit(mm))
2681 mmap_read_unlock(mm);
2691 if (ksm_test_exit(mm)) {
2710 * __ksm_exit does to remove this mm from all our lists now.
2721 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2722 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2723 mmap_read_unlock(mm);
2724 mmdrop(mm);
2726 mmap_read_unlock(mm);
2728 * mmap_read_unlock(mm) first because after
2729 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2837 struct mm_struct *mm = vma->vm_mm;
2839 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2843 static void ksm_add_vmas(struct mm_struct *mm)
2847 VMA_ITERATOR(vmi, mm, 0);
2852 static int ksm_del_vmas(struct mm_struct *mm)
2857 VMA_ITERATOR(vmi, mm, 0);
2867 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
2870 * @mm: Pointer to mm
2874 int ksm_enable_merge_any(struct mm_struct *mm)
2878 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2881 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2882 err = __ksm_enter(mm);
2887 set_bit(MMF_VM_MERGE_ANY, &mm->flags);
2888 ksm_add_vmas(mm);
2894 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
2901 * @mm: Pointer to mm
2905 int ksm_disable_merge_any(struct mm_struct *mm)
2909 if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2912 err = ksm_del_vmas(mm);
2914 ksm_add_vmas(mm);
2918 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2922 int ksm_disable(struct mm_struct *mm)
2924 mmap_assert_write_locked(mm);
2926 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
2928 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2929 return ksm_disable_merge_any(mm);
2930 return ksm_del_vmas(mm);
2936 struct mm_struct *mm = vma->vm_mm;
2946 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2947 err = __ksm_enter(mm);
2973 int __ksm_enter(struct mm_struct *mm)
2989 mm_slot_insert(mm_slots_hash, mm, slot);
3006 set_bit(MMF_VM_MERGEABLE, &mm->flags);
3007 mmgrab(mm);
3012 trace_ksm_enter(mm);
3016 void __ksm_exit(struct mm_struct *mm)
3032 slot = mm_slot_lookup(mm_slots_hash, mm);
3048 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
3049 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
3050 mmdrop(mm);
3052 mmap_write_lock(mm);
3053 mmap_write_unlock(mm);
3056 trace_ksm_exit(mm);
3157 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
3209 if (vma->vm_mm == t->mm) {
3377 long ksm_process_profit(struct mm_struct *mm)
3379 return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
3380 mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);