Lines Matching refs:mm

19 #include <linux/mm.h>
42 void load_mm_ldt(struct mm_struct *mm)
47 ldt = READ_ONCE(mm->context.ldt);
50 * Any change to mm->context.ldt is followed by an IPI to all
51 * CPUs with the mm active. The LDT will not be freed until
93 * Load the LDT if either the old or new mm had an LDT.
95 * An mm will never go from having an LDT to not having an LDT. Two
138 struct mm_struct *mm = __mm;
140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
143 load_mm_ldt(mm);
189 static void do_sanity_check(struct mm_struct *mm,
193 if (mm->context.ldt) {
234 static void map_ldt_struct_to_user(struct mm_struct *mm)
236 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
243 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
247 static void sanity_check_ldt_mapping(struct mm_struct *mm)
249 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR);
259 do_sanity_check(mm, had_kernel, had_user);
264 static void map_ldt_struct_to_user(struct mm_struct *mm)
266 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
268 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt)
272 static void sanity_check_ldt_mapping(struct mm_struct *mm)
274 pgd_t *pgd = pgd_offset(mm, LDT_BASE_ADDR);
278 do_sanity_check(mm, had_kernel, had_user);
285 * usermode tables for the given mm.
288 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
305 sanity_check_ldt_mapping(mm);
324 * and account for them in this mm.
326 ptep = get_locked_pte(mm, va, &ptl);
338 set_pte_at(mm, va, ptep, pte);
343 map_ldt_struct_to_user(mm);
349 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
369 ptep = get_locked_pte(mm, va, &ptl);
371 pte_clear(mm, va, ptep);
377 flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
383 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
388 static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
393 static void free_ldt_pgtables(struct mm_struct *mm)
409 tlb_gather_mmu_fullmm(&tlb, mm);
421 static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
423 mutex_lock(&mm->context.lock);
426 smp_store_release(&mm->context.ldt, ldt);
428 /* Activate the LDT for all CPUs using currents mm. */
429 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
431 mutex_unlock(&mm->context.lock);
451 int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm)
473 retval = map_ldt_struct(mm, new_ldt, 0);
475 free_ldt_pgtables(mm);
479 mm->context.ldt = new_ldt;
491 void destroy_context_ldt(struct mm_struct *mm)
493 free_ldt_struct(mm->context.ldt);
494 mm->context.ldt = NULL;
497 void ldt_arch_exit_mmap(struct mm_struct *mm)
499 free_ldt_pgtables(mm);
504 struct mm_struct *mm = current->mm;
508 down_read(&mm->context.ldt_usr_sem);
510 if (!mm->context.ldt) {
518 entries_size = mm->context.ldt->nr_entries * LDT_ENTRY_SIZE;
522 if (copy_to_user(ptr, mm->context.ldt->entries, entries_size)) {
537 up_read(&mm->context.ldt_usr_sem);
580 struct mm_struct *mm = current->mm;
619 if (down_write_killable(&mm->context.ldt_usr_sem))
622 old_ldt = mm->context.ldt;
643 error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0);
651 free_ldt_pgtables(mm);
656 install_ldt(mm, new_ldt);
657 unmap_ldt_struct(mm, old_ldt);
662 up_write(&mm->context.ldt_usr_sem);