1// SPDX-License-Identifier: GPL-2.0
2/*
3 *	linux/mm/mlock.c
4 *
5 *  (C) Copyright 1995 Linus Torvalds
6 *  (C) Copyright 2002 Christoph Hellwig
7 */
8
9#include <linux/capability.h>
10#include <linux/mman.h>
11#include <linux/mm.h>
12#include <linux/sched/user.h>
13#include <linux/swap.h>
14#include <linux/swapops.h>
15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
17#include <linux/pagewalk.h>
18#include <linux/mempolicy.h>
19#include <linux/syscalls.h>
20#include <linux/sched.h>
21#include <linux/export.h>
22#include <linux/rmap.h>
23#include <linux/mmzone.h>
24#include <linux/hugetlb.h>
25#include <linux/memcontrol.h>
26#include <linux/mm_inline.h>
27#include <linux/secretmem.h>
28
29#include "internal.h"
30
31struct mlock_fbatch {
32	local_lock_t lock;
33	struct folio_batch fbatch;
34};
35
36static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
37	.lock = INIT_LOCAL_LOCK(lock),
38};
39
40bool can_do_mlock(void)
41{
42	if (rlimit(RLIMIT_MEMLOCK) != 0)
43		return true;
44	if (capable(CAP_IPC_LOCK))
45		return true;
46	return false;
47}
48EXPORT_SYMBOL(can_do_mlock);
49
50/*
51 * Mlocked folios are marked with the PG_mlocked flag for efficient testing
52 * in vmscan and, possibly, the fault path; and to support semi-accurate
53 * statistics.
54 *
55 * An mlocked folio [folio_test_mlocked(folio)] is unevictable.  As such, it
56 * will be ostensibly placed on the LRU "unevictable" list (actually no such
57 * list exists), rather than the [in]active lists. PG_unevictable is set to
58 * indicate the unevictable state.
59 */
60
61static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
62{
63	/* There is nothing more we can do while it's off LRU */
64	if (!folio_test_clear_lru(folio))
65		return lruvec;
66
67	lruvec = folio_lruvec_relock_irq(folio, lruvec);
68
69	if (unlikely(folio_evictable(folio))) {
70		/*
71		 * This is a little surprising, but quite possible: PG_mlocked
72		 * must have got cleared already by another CPU.  Could this
73		 * folio be unevictable?  I'm not sure, but move it now if so.
74		 */
75		if (folio_test_unevictable(folio)) {
76			lruvec_del_folio(lruvec, folio);
77			folio_clear_unevictable(folio);
78			lruvec_add_folio(lruvec, folio);
79
80			__count_vm_events(UNEVICTABLE_PGRESCUED,
81					  folio_nr_pages(folio));
82		}
83		goto out;
84	}
85
86	if (folio_test_unevictable(folio)) {
87		if (folio_test_mlocked(folio))
88			folio->mlock_count++;
89		goto out;
90	}
91
92	lruvec_del_folio(lruvec, folio);
93	folio_clear_active(folio);
94	folio_set_unevictable(folio);
95	folio->mlock_count = !!folio_test_mlocked(folio);
96	lruvec_add_folio(lruvec, folio);
97	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
98out:
99	folio_set_lru(folio);
100	return lruvec;
101}
102
103static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
104{
105	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
106
107	lruvec = folio_lruvec_relock_irq(folio, lruvec);
108
109	/* As above, this is a little surprising, but possible */
110	if (unlikely(folio_evictable(folio)))
111		goto out;
112
113	folio_set_unevictable(folio);
114	folio->mlock_count = !!folio_test_mlocked(folio);
115	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
116out:
117	lruvec_add_folio(lruvec, folio);
118	folio_set_lru(folio);
119	return lruvec;
120}
121
122static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
123{
124	int nr_pages = folio_nr_pages(folio);
125	bool isolated = false;
126
127	if (!folio_test_clear_lru(folio))
128		goto munlock;
129
130	isolated = true;
131	lruvec = folio_lruvec_relock_irq(folio, lruvec);
132
133	if (folio_test_unevictable(folio)) {
134		/* Then mlock_count is maintained, but might undercount */
135		if (folio->mlock_count)
136			folio->mlock_count--;
137		if (folio->mlock_count)
138			goto out;
139	}
140	/* else assume that was the last mlock: reclaim will fix it if not */
141
142munlock:
143	if (folio_test_clear_mlocked(folio)) {
144		__zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
145		if (isolated || !folio_test_unevictable(folio))
146			__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147		else
148			__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149	}
150
151	/* folio_evictable() has to be checked *after* clearing Mlocked */
152	if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
153		lruvec_del_folio(lruvec, folio);
154		folio_clear_unevictable(folio);
155		lruvec_add_folio(lruvec, folio);
156		__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157	}
158out:
159	if (isolated)
160		folio_set_lru(folio);
161	return lruvec;
162}
163
164/*
165 * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
166 */
167#define LRU_FOLIO 0x1
168#define NEW_FOLIO 0x2
169static inline struct folio *mlock_lru(struct folio *folio)
170{
171	return (struct folio *)((unsigned long)folio + LRU_FOLIO);
172}
173
174static inline struct folio *mlock_new(struct folio *folio)
175{
176	return (struct folio *)((unsigned long)folio + NEW_FOLIO);
177}
178
179/*
180 * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181 * make use of such folio pointer flags in future, but for now just keep it for
182 * mlock.  We could use three separate folio batches instead, but one feels
183 * better (munlocking a full folio batch does not need to drain mlocking folio
184 * batches first).
185 */
186static void mlock_folio_batch(struct folio_batch *fbatch)
187{
188	struct lruvec *lruvec = NULL;
189	unsigned long mlock;
190	struct folio *folio;
191	int i;
192
193	for (i = 0; i < folio_batch_count(fbatch); i++) {
194		folio = fbatch->folios[i];
195		mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196		folio = (struct folio *)((unsigned long)folio - mlock);
197		fbatch->folios[i] = folio;
198
199		if (mlock & LRU_FOLIO)
200			lruvec = __mlock_folio(folio, lruvec);
201		else if (mlock & NEW_FOLIO)
202			lruvec = __mlock_new_folio(folio, lruvec);
203		else
204			lruvec = __munlock_folio(folio, lruvec);
205	}
206
207	if (lruvec)
208		unlock_page_lruvec_irq(lruvec);
209	folios_put(fbatch);
210}
211
212void mlock_drain_local(void)
213{
214	struct folio_batch *fbatch;
215
216	local_lock(&mlock_fbatch.lock);
217	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
218	if (folio_batch_count(fbatch))
219		mlock_folio_batch(fbatch);
220	local_unlock(&mlock_fbatch.lock);
221}
222
223void mlock_drain_remote(int cpu)
224{
225	struct folio_batch *fbatch;
226
227	WARN_ON_ONCE(cpu_online(cpu));
228	fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
229	if (folio_batch_count(fbatch))
230		mlock_folio_batch(fbatch);
231}
232
233bool need_mlock_drain(int cpu)
234{
235	return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
236}
237
238/**
239 * mlock_folio - mlock a folio already on (or temporarily off) LRU
240 * @folio: folio to be mlocked.
241 */
242void mlock_folio(struct folio *folio)
243{
244	struct folio_batch *fbatch;
245
246	local_lock(&mlock_fbatch.lock);
247	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
248
249	if (!folio_test_set_mlocked(folio)) {
250		int nr_pages = folio_nr_pages(folio);
251
252		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
253		__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
254	}
255
256	folio_get(folio);
257	if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
258	    folio_test_large(folio) || lru_cache_disabled())
259		mlock_folio_batch(fbatch);
260	local_unlock(&mlock_fbatch.lock);
261}
262
263/**
264 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
265 * @folio: folio to be mlocked, either normal or a THP head.
266 */
267void mlock_new_folio(struct folio *folio)
268{
269	struct folio_batch *fbatch;
270	int nr_pages = folio_nr_pages(folio);
271
272	local_lock(&mlock_fbatch.lock);
273	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
274	folio_set_mlocked(folio);
275
276	zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
277	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
278
279	folio_get(folio);
280	if (!folio_batch_add(fbatch, mlock_new(folio)) ||
281	    folio_test_large(folio) || lru_cache_disabled())
282		mlock_folio_batch(fbatch);
283	local_unlock(&mlock_fbatch.lock);
284}
285
286/**
287 * munlock_folio - munlock a folio
288 * @folio: folio to be munlocked, either normal or a THP head.
289 */
290void munlock_folio(struct folio *folio)
291{
292	struct folio_batch *fbatch;
293
294	local_lock(&mlock_fbatch.lock);
295	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
296	/*
297	 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
298	 * which will check whether the folio is multiply mlocked.
299	 */
300	folio_get(folio);
301	if (!folio_batch_add(fbatch, folio) ||
302	    folio_test_large(folio) || lru_cache_disabled())
303		mlock_folio_batch(fbatch);
304	local_unlock(&mlock_fbatch.lock);
305}
306
307static inline unsigned int folio_mlock_step(struct folio *folio,
308		pte_t *pte, unsigned long addr, unsigned long end)
309{
310	unsigned int count, i, nr = folio_nr_pages(folio);
311	unsigned long pfn = folio_pfn(folio);
312	pte_t ptent = ptep_get(pte);
313
314	if (!folio_test_large(folio))
315		return 1;
316
317	count = pfn + nr - pte_pfn(ptent);
318	count = min_t(unsigned int, count, (end - addr) >> PAGE_SHIFT);
319
320	for (i = 0; i < count; i++, pte++) {
321		pte_t entry = ptep_get(pte);
322
323		if (!pte_present(entry))
324			break;
325		if (pte_pfn(entry) - pfn >= nr)
326			break;
327	}
328
329	return i;
330}
331
332static inline bool allow_mlock_munlock(struct folio *folio,
333		struct vm_area_struct *vma, unsigned long start,
334		unsigned long end, unsigned int step)
335{
336	/*
337	 * For unlock, allow munlock large folio which is partially
338	 * mapped to VMA. As it's possible that large folio is
339	 * mlocked and VMA is split later.
340	 *
341	 * During memory pressure, such kind of large folio can
342	 * be split. And the pages are not in VM_LOCKed VMA
343	 * can be reclaimed.
344	 */
345	if (!(vma->vm_flags & VM_LOCKED))
346		return true;
347
348	/* folio_within_range() cannot take KSM, but any small folio is OK */
349	if (!folio_test_large(folio))
350		return true;
351
352	/* folio not in range [start, end), skip mlock */
353	if (!folio_within_range(folio, vma, start, end))
354		return false;
355
356	/* folio is not fully mapped, skip mlock */
357	if (step != folio_nr_pages(folio))
358		return false;
359
360	return true;
361}
362
363static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
364			   unsigned long end, struct mm_walk *walk)
365
366{
367	struct vm_area_struct *vma = walk->vma;
368	spinlock_t *ptl;
369	pte_t *start_pte, *pte;
370	pte_t ptent;
371	struct folio *folio;
372	unsigned int step = 1;
373	unsigned long start = addr;
374
375	ptl = pmd_trans_huge_lock(pmd, vma);
376	if (ptl) {
377		if (!pmd_present(*pmd))
378			goto out;
379		if (is_huge_zero_pmd(*pmd))
380			goto out;
381		folio = page_folio(pmd_page(*pmd));
382		if (vma->vm_flags & VM_LOCKED)
383			mlock_folio(folio);
384		else
385			munlock_folio(folio);
386		goto out;
387	}
388
389	start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
390	if (!start_pte) {
391		walk->action = ACTION_AGAIN;
392		return 0;
393	}
394
395	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
396		ptent = ptep_get(pte);
397		if (!pte_present(ptent))
398			continue;
399		folio = vm_normal_folio(vma, addr, ptent);
400		if (!folio || folio_is_zone_device(folio))
401			continue;
402
403		step = folio_mlock_step(folio, pte, addr, end);
404		if (!allow_mlock_munlock(folio, vma, start, end, step))
405			goto next_entry;
406
407		if (vma->vm_flags & VM_LOCKED)
408			mlock_folio(folio);
409		else
410			munlock_folio(folio);
411
412next_entry:
413		pte += step - 1;
414		addr += (step - 1) << PAGE_SHIFT;
415	}
416	pte_unmap(start_pte);
417out:
418	spin_unlock(ptl);
419	cond_resched();
420	return 0;
421}
422
423/*
424 * mlock_vma_pages_range() - mlock any pages already in the range,
425 *                           or munlock all pages in the range.
426 * @vma - vma containing range to be mlock()ed or munlock()ed
427 * @start - start address in @vma of the range
428 * @end - end of range in @vma
429 * @newflags - the new set of flags for @vma.
430 *
431 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
432 * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
433 */
434static void mlock_vma_pages_range(struct vm_area_struct *vma,
435	unsigned long start, unsigned long end, vm_flags_t newflags)
436{
437	static const struct mm_walk_ops mlock_walk_ops = {
438		.pmd_entry = mlock_pte_range,
439		.walk_lock = PGWALK_WRLOCK_VERIFY,
440	};
441
442	/*
443	 * There is a slight chance that concurrent page migration,
444	 * or page reclaim finding a page of this now-VM_LOCKED vma,
445	 * will call mlock_vma_folio() and raise page's mlock_count:
446	 * double counting, leaving the page unevictable indefinitely.
447	 * Communicate this danger to mlock_vma_folio() with VM_IO,
448	 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
449	 * mmap_lock is held in write mode here, so this weird
450	 * combination should not be visible to other mmap_lock users;
451	 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
452	 */
453	if (newflags & VM_LOCKED)
454		newflags |= VM_IO;
455	vma_start_write(vma);
456	vm_flags_reset_once(vma, newflags);
457
458	lru_add_drain();
459	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
460	lru_add_drain();
461
462	if (newflags & VM_IO) {
463		newflags &= ~VM_IO;
464		vm_flags_reset_once(vma, newflags);
465	}
466}
467
468/*
469 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
470 *
471 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
472 * munlock is a no-op.  However, for some special vmas, we go ahead and
473 * populate the ptes.
474 *
475 * For vmas that pass the filters, merge/split as appropriate.
476 */
477static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
478	       struct vm_area_struct **prev, unsigned long start,
479	       unsigned long end, vm_flags_t newflags)
480{
481	struct mm_struct *mm = vma->vm_mm;
482	int nr_pages;
483	int ret = 0;
484	vm_flags_t oldflags = vma->vm_flags;
485
486	if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
487	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
488	    vma_is_dax(vma) || vma_is_secretmem(vma))
489		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
490		goto out;
491
492	vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
493	if (IS_ERR(vma)) {
494		ret = PTR_ERR(vma);
495		goto out;
496	}
497
498	/*
499	 * Keep track of amount of locked VM.
500	 */
501	nr_pages = (end - start) >> PAGE_SHIFT;
502	if (!(newflags & VM_LOCKED))
503		nr_pages = -nr_pages;
504	else if (oldflags & VM_LOCKED)
505		nr_pages = 0;
506	mm->locked_vm += nr_pages;
507
508	/*
509	 * vm_flags is protected by the mmap_lock held in write mode.
510	 * It's okay if try_to_unmap_one unmaps a page just after we
511	 * set VM_LOCKED, populate_vma_page_range will bring it back.
512	 */
513	if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
514		/* No work to do, and mlocking twice would be wrong */
515		vma_start_write(vma);
516		vm_flags_reset(vma, newflags);
517	} else {
518		mlock_vma_pages_range(vma, start, end, newflags);
519	}
520out:
521	*prev = vma;
522	return ret;
523}
524
525static int apply_vma_lock_flags(unsigned long start, size_t len,
526				vm_flags_t flags)
527{
528	unsigned long nstart, end, tmp;
529	struct vm_area_struct *vma, *prev;
530	VMA_ITERATOR(vmi, current->mm, start);
531
532	VM_BUG_ON(offset_in_page(start));
533	VM_BUG_ON(len != PAGE_ALIGN(len));
534	end = start + len;
535	if (end < start)
536		return -EINVAL;
537	if (end == start)
538		return 0;
539	vma = vma_iter_load(&vmi);
540	if (!vma)
541		return -ENOMEM;
542
543	prev = vma_prev(&vmi);
544	if (start > vma->vm_start)
545		prev = vma;
546
547	nstart = start;
548	tmp = vma->vm_start;
549	for_each_vma_range(vmi, vma, end) {
550		int error;
551		vm_flags_t newflags;
552
553		if (vma->vm_start != tmp)
554			return -ENOMEM;
555
556		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
557		newflags |= flags;
558		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
559		tmp = vma->vm_end;
560		if (tmp > end)
561			tmp = end;
562		error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
563		if (error)
564			return error;
565		tmp = vma_iter_end(&vmi);
566		nstart = tmp;
567	}
568
569	if (tmp < end)
570		return -ENOMEM;
571
572	return 0;
573}
574
575/*
576 * Go through vma areas and sum size of mlocked
577 * vma pages, as return value.
578 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
579 * is also counted.
580 * Return value: previously mlocked page counts
581 */
582static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
583		unsigned long start, size_t len)
584{
585	struct vm_area_struct *vma;
586	unsigned long count = 0;
587	unsigned long end;
588	VMA_ITERATOR(vmi, mm, start);
589
590	/* Don't overflow past ULONG_MAX */
591	if (unlikely(ULONG_MAX - len < start))
592		end = ULONG_MAX;
593	else
594		end = start + len;
595
596	for_each_vma_range(vmi, vma, end) {
597		if (vma->vm_flags & VM_LOCKED) {
598			if (start > vma->vm_start)
599				count -= (start - vma->vm_start);
600			if (end < vma->vm_end) {
601				count += end - vma->vm_start;
602				break;
603			}
604			count += vma->vm_end - vma->vm_start;
605		}
606	}
607
608	return count >> PAGE_SHIFT;
609}
610
611/*
612 * convert get_user_pages() return value to posix mlock() error
613 */
614static int __mlock_posix_error_return(long retval)
615{
616	if (retval == -EFAULT)
617		retval = -ENOMEM;
618	else if (retval == -ENOMEM)
619		retval = -EAGAIN;
620	return retval;
621}
622
623static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
624{
625	unsigned long locked;
626	unsigned long lock_limit;
627	int error = -ENOMEM;
628
629	start = untagged_addr(start);
630
631	if (!can_do_mlock())
632		return -EPERM;
633
634	len = PAGE_ALIGN(len + (offset_in_page(start)));
635	start &= PAGE_MASK;
636
637	lock_limit = rlimit(RLIMIT_MEMLOCK);
638	lock_limit >>= PAGE_SHIFT;
639	locked = len >> PAGE_SHIFT;
640
641	if (mmap_write_lock_killable(current->mm))
642		return -EINTR;
643
644	locked += current->mm->locked_vm;
645	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
646		/*
647		 * It is possible that the regions requested intersect with
648		 * previously mlocked areas, that part area in "mm->locked_vm"
649		 * should not be counted to new mlock increment count. So check
650		 * and adjust locked count if necessary.
651		 */
652		locked -= count_mm_mlocked_page_nr(current->mm,
653				start, len);
654	}
655
656	/* check against resource limits */
657	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
658		error = apply_vma_lock_flags(start, len, flags);
659
660	mmap_write_unlock(current->mm);
661	if (error)
662		return error;
663
664	error = __mm_populate(start, len, 0);
665	if (error)
666		return __mlock_posix_error_return(error);
667	return 0;
668}
669
670SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
671{
672	return do_mlock(start, len, VM_LOCKED);
673}
674
675SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
676{
677	vm_flags_t vm_flags = VM_LOCKED;
678
679	if (flags & ~MLOCK_ONFAULT)
680		return -EINVAL;
681
682	if (flags & MLOCK_ONFAULT)
683		vm_flags |= VM_LOCKONFAULT;
684
685	return do_mlock(start, len, vm_flags);
686}
687
688SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
689{
690	int ret;
691
692	start = untagged_addr(start);
693
694	len = PAGE_ALIGN(len + (offset_in_page(start)));
695	start &= PAGE_MASK;
696
697	if (mmap_write_lock_killable(current->mm))
698		return -EINTR;
699	ret = apply_vma_lock_flags(start, len, 0);
700	mmap_write_unlock(current->mm);
701
702	return ret;
703}
704
705/*
706 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
707 * and translate into the appropriate modifications to mm->def_flags and/or the
708 * flags for all current VMAs.
709 *
710 * There are a couple of subtleties with this.  If mlockall() is called multiple
711 * times with different flags, the values do not necessarily stack.  If mlockall
712 * is called once including the MCL_FUTURE flag and then a second time without
713 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
714 */
715static int apply_mlockall_flags(int flags)
716{
717	VMA_ITERATOR(vmi, current->mm, 0);
718	struct vm_area_struct *vma, *prev = NULL;
719	vm_flags_t to_add = 0;
720
721	current->mm->def_flags &= ~VM_LOCKED_MASK;
722	if (flags & MCL_FUTURE) {
723		current->mm->def_flags |= VM_LOCKED;
724
725		if (flags & MCL_ONFAULT)
726			current->mm->def_flags |= VM_LOCKONFAULT;
727
728		if (!(flags & MCL_CURRENT))
729			goto out;
730	}
731
732	if (flags & MCL_CURRENT) {
733		to_add |= VM_LOCKED;
734		if (flags & MCL_ONFAULT)
735			to_add |= VM_LOCKONFAULT;
736	}
737
738	for_each_vma(vmi, vma) {
739		vm_flags_t newflags;
740
741		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
742		newflags |= to_add;
743
744		/* Ignore errors */
745		mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
746			    newflags);
747		cond_resched();
748	}
749out:
750	return 0;
751}
752
753SYSCALL_DEFINE1(mlockall, int, flags)
754{
755	unsigned long lock_limit;
756	int ret;
757
758	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
759	    flags == MCL_ONFAULT)
760		return -EINVAL;
761
762	if (!can_do_mlock())
763		return -EPERM;
764
765	lock_limit = rlimit(RLIMIT_MEMLOCK);
766	lock_limit >>= PAGE_SHIFT;
767
768	if (mmap_write_lock_killable(current->mm))
769		return -EINTR;
770
771	ret = -ENOMEM;
772	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
773	    capable(CAP_IPC_LOCK))
774		ret = apply_mlockall_flags(flags);
775	mmap_write_unlock(current->mm);
776	if (!ret && (flags & MCL_CURRENT))
777		mm_populate(0, TASK_SIZE);
778
779	return ret;
780}
781
782SYSCALL_DEFINE0(munlockall)
783{
784	int ret;
785
786	if (mmap_write_lock_killable(current->mm))
787		return -EINTR;
788	ret = apply_mlockall_flags(0);
789	mmap_write_unlock(current->mm);
790	return ret;
791}
792
793/*
794 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
795 * shm segments) get accounted against the user_struct instead.
796 */
797static DEFINE_SPINLOCK(shmlock_user_lock);
798
799int user_shm_lock(size_t size, struct ucounts *ucounts)
800{
801	unsigned long lock_limit, locked;
802	long memlock;
803	int allowed = 0;
804
805	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
806	lock_limit = rlimit(RLIMIT_MEMLOCK);
807	if (lock_limit != RLIM_INFINITY)
808		lock_limit >>= PAGE_SHIFT;
809	spin_lock(&shmlock_user_lock);
810	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
811
812	if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
813		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
814		goto out;
815	}
816	if (!get_ucounts(ucounts)) {
817		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
818		allowed = 0;
819		goto out;
820	}
821	allowed = 1;
822out:
823	spin_unlock(&shmlock_user_lock);
824	return allowed;
825}
826
827void user_shm_unlock(size_t size, struct ucounts *ucounts)
828{
829	spin_lock(&shmlock_user_lock);
830	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
831	spin_unlock(&shmlock_user_lock);
832	put_ucounts(ucounts);
833}
834