1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright �� 2008-2015 Intel Corporation
5 */
6
7#include <linux/oom.h>
8#include <linux/sched/mm.h>
9#include <linux/shmem_fs.h>
10#include <linux/slab.h>
11#include <linux/swap.h>
12#include <linux/pci.h>
13#include <linux/dma-buf.h>
14#include <linux/vmalloc.h>
15
16#include "gt/intel_gt_requests.h"
17#include "gt/intel_gt.h"
18
19#include "i915_trace.h"
20
21static bool swap_available(void)
22{
23	return get_nr_swap_pages() > 0;
24}
25
26static bool can_release_pages(struct drm_i915_gem_object *obj)
27{
28	/* Consider only shrinkable ojects. */
29	if (!i915_gem_object_is_shrinkable(obj))
30		return false;
31
32	/*
33	 * We can only return physical pages to the system if we can either
34	 * discard the contents (because the user has marked them as being
35	 * purgeable) or if we can move their contents out to swap.
36	 */
37	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
38}
39
40static bool drop_pages(struct drm_i915_gem_object *obj,
41		       unsigned long shrink, bool trylock_vm)
42{
43	unsigned long flags;
44
45	flags = 0;
46	if (shrink & I915_SHRINK_ACTIVE)
47		flags |= I915_GEM_OBJECT_UNBIND_ACTIVE;
48	if (!(shrink & I915_SHRINK_BOUND))
49		flags |= I915_GEM_OBJECT_UNBIND_TEST;
50	if (trylock_vm)
51		flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK;
52
53	if (i915_gem_object_unbind(obj, flags) == 0)
54		return true;
55
56	return false;
57}
58
59static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags)
60{
61	if (obj->ops->shrink) {
62		unsigned int shrink_flags = 0;
63
64		if (!(flags & I915_SHRINK_ACTIVE))
65			shrink_flags |= I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT;
66
67		if (flags & I915_SHRINK_WRITEBACK)
68			shrink_flags |= I915_GEM_OBJECT_SHRINK_WRITEBACK;
69
70		return obj->ops->shrink(obj, shrink_flags);
71	}
72
73	return 0;
74}
75
76/**
77 * i915_gem_shrink - Shrink buffer object caches
78 * @ww: i915 gem ww acquire ctx, or NULL
79 * @i915: i915 device
80 * @target: amount of memory to make available, in pages
81 * @nr_scanned: optional output for number of pages scanned (incremental)
82 * @shrink: control flags for selecting cache types
83 *
84 * This function is the main interface to the shrinker. It will try to release
85 * up to @target pages of main memory backing storage from buffer objects.
86 * Selection of the specific caches can be done with @flags. This is e.g. useful
87 * when purgeable objects should be removed from caches preferentially.
88 *
89 * Note that it's not guaranteed that released amount is actually available as
90 * free system memory - the pages might still be in-used to due to other reasons
91 * (like cpu mmaps) or the mm core has reused them before we could grab them.
92 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
93 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
94 *
95 * Also note that any kind of pinning (both per-vma address space pins and
96 * backing storage pins at the buffer object level) result in the shrinker code
97 * having to skip the object.
98 *
99 * Returns:
100 * The number of pages of backing storage actually released.
101 */
102unsigned long
103i915_gem_shrink(struct i915_gem_ww_ctx *ww,
104		struct drm_i915_private *i915,
105		unsigned long target,
106		unsigned long *nr_scanned,
107		unsigned int shrink)
108{
109	const struct {
110		struct list_head *list;
111		unsigned int bit;
112	} phases[] = {
113		{ &i915->mm.purge_list, ~0u },
114		{
115			&i915->mm.shrink_list,
116			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
117		},
118		{ NULL, 0 },
119	}, *phase;
120	intel_wakeref_t wakeref = 0;
121	unsigned long count = 0;
122	unsigned long scanned = 0;
123	int err = 0, i = 0;
124	struct intel_gt *gt;
125
126	/* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
127	bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
128
129	trace_i915_gem_shrink(i915, target, shrink);
130
131	/*
132	 * Unbinding of objects will require HW access; Let us not wake the
133	 * device just to recover a little memory. If absolutely necessary,
134	 * we will force the wake during oom-notifier.
135	 */
136	if (shrink & I915_SHRINK_BOUND) {
137		wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
138		if (!wakeref)
139			shrink &= ~I915_SHRINK_BOUND;
140	}
141
142	/*
143	 * When shrinking the active list, we should also consider active
144	 * contexts. Active contexts are pinned until they are retired, and
145	 * so can not be simply unbound to retire and unpin their pages. To
146	 * shrink the contexts, we must wait until the gpu is idle and
147	 * completed its switch to the kernel context. In short, we do
148	 * not have a good mechanism for idling a specific context, but
149	 * what we can do is give them a kick so that we do not keep idle
150	 * contexts around longer than is necessary.
151	 */
152	if (shrink & I915_SHRINK_ACTIVE) {
153		for_each_gt(gt, i915, i)
154			/* Retire requests to unpin all idle contexts */
155			intel_gt_retire_requests(gt);
156	}
157
158	/*
159	 * As we may completely rewrite the (un)bound list whilst unbinding
160	 * (due to retiring requests) we have to strictly process only
161	 * one element of the list at the time, and recheck the list
162	 * on every iteration.
163	 *
164	 * In particular, we must hold a reference whilst removing the
165	 * object as we may end up waiting for and/or retiring the objects.
166	 * This might release the final reference (held by the active list)
167	 * and result in the object being freed from under us. This is
168	 * similar to the precautions the eviction code must take whilst
169	 * removing objects.
170	 *
171	 * Also note that although these lists do not hold a reference to
172	 * the object we can safely grab one here: The final object
173	 * unreferencing and the bound_list are both protected by the
174	 * dev->struct_mutex and so we won't ever be able to observe an
175	 * object on the bound_list with a reference count equals 0.
176	 */
177	for (phase = phases; phase->list; phase++) {
178		struct list_head still_in_list;
179		struct drm_i915_gem_object *obj;
180		unsigned long flags;
181
182		if ((shrink & phase->bit) == 0)
183			continue;
184
185		INIT_LIST_HEAD(&still_in_list);
186
187		/*
188		 * We serialize our access to unreferenced objects through
189		 * the use of the struct_mutex. While the objects are not
190		 * yet freed (due to RCU then a workqueue) we still want
191		 * to be able to shrink their pages, so they remain on
192		 * the unbound/bound list until actually freed.
193		 */
194		spin_lock_irqsave(&i915->mm.obj_lock, flags);
195		while (count < target &&
196		       (obj = list_first_entry_or_null(phase->list,
197						       typeof(*obj),
198						       mm.link))) {
199			list_move_tail(&obj->mm.link, &still_in_list);
200
201			if (shrink & I915_SHRINK_VMAPS &&
202			    !is_vmalloc_addr(obj->mm.mapping))
203				continue;
204
205			if (!(shrink & I915_SHRINK_ACTIVE) &&
206			    i915_gem_object_is_framebuffer(obj))
207				continue;
208
209			if (!can_release_pages(obj))
210				continue;
211
212			if (!kref_get_unless_zero(&obj->base.refcount))
213				continue;
214
215			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
216
217			/* May arrive from get_pages on another bo */
218			if (!ww) {
219				if (!i915_gem_object_trylock(obj, NULL))
220					goto skip;
221			} else {
222				err = i915_gem_object_lock(obj, ww);
223				if (err)
224					goto skip;
225			}
226
227			if (drop_pages(obj, shrink, trylock_vm) &&
228			    !__i915_gem_object_put_pages(obj) &&
229			    !try_to_writeback(obj, shrink))
230				count += obj->base.size >> PAGE_SHIFT;
231
232			if (!ww)
233				i915_gem_object_unlock(obj);
234
235			scanned += obj->base.size >> PAGE_SHIFT;
236skip:
237			i915_gem_object_put(obj);
238
239			spin_lock_irqsave(&i915->mm.obj_lock, flags);
240			if (err)
241				break;
242		}
243		list_splice_tail(&still_in_list, phase->list);
244		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
245		if (err)
246			break;
247	}
248
249	if (shrink & I915_SHRINK_BOUND)
250		intel_runtime_pm_put(&i915->runtime_pm, wakeref);
251
252	if (err)
253		return err;
254
255	if (nr_scanned)
256		*nr_scanned += scanned;
257	return count;
258}
259
260/**
261 * i915_gem_shrink_all - Shrink buffer object caches completely
262 * @i915: i915 device
263 *
264 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
265 * caches completely. It also first waits for and retires all outstanding
266 * requests to also be able to release backing storage for active objects.
267 *
268 * This should only be used in code to intentionally quiescent the gpu or as a
269 * last-ditch effort when memory seems to have run out.
270 *
271 * Returns:
272 * The number of pages of backing storage actually released.
273 */
274unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
275{
276	intel_wakeref_t wakeref;
277	unsigned long freed = 0;
278
279	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
280		freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
281					I915_SHRINK_BOUND |
282					I915_SHRINK_UNBOUND);
283	}
284
285	return freed;
286}
287
288static unsigned long
289i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
290{
291	struct drm_i915_private *i915 = shrinker->private_data;
292	unsigned long num_objects;
293	unsigned long count;
294
295	count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
296	num_objects = READ_ONCE(i915->mm.shrink_count);
297
298	/*
299	 * Update our preferred vmscan batch size for the next pass.
300	 * Our rough guess for an effective batch size is roughly 2
301	 * available GEM objects worth of pages. That is we don't want
302	 * the shrinker to fire, until it is worth the cost of freeing an
303	 * entire GEM object.
304	 */
305	if (num_objects) {
306		unsigned long avg = 2 * count / num_objects;
307
308		i915->mm.shrinker->batch =
309			max((i915->mm.shrinker->batch + avg) >> 1,
310			    128ul /* default SHRINK_BATCH */);
311	}
312
313	return count;
314}
315
316static unsigned long
317i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
318{
319	struct drm_i915_private *i915 = shrinker->private_data;
320	unsigned long freed;
321
322	sc->nr_scanned = 0;
323
324	freed = i915_gem_shrink(NULL, i915,
325				sc->nr_to_scan,
326				&sc->nr_scanned,
327				I915_SHRINK_BOUND |
328				I915_SHRINK_UNBOUND);
329	if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
330		intel_wakeref_t wakeref;
331
332		with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
333			freed += i915_gem_shrink(NULL, i915,
334						 sc->nr_to_scan - sc->nr_scanned,
335						 &sc->nr_scanned,
336						 I915_SHRINK_ACTIVE |
337						 I915_SHRINK_BOUND |
338						 I915_SHRINK_UNBOUND |
339						 I915_SHRINK_WRITEBACK);
340		}
341	}
342
343	return sc->nr_scanned ? freed : SHRINK_STOP;
344}
345
346static int
347i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
348{
349	struct drm_i915_private *i915 =
350		container_of(nb, struct drm_i915_private, mm.oom_notifier);
351	struct drm_i915_gem_object *obj;
352	unsigned long unevictable, available, freed_pages;
353	intel_wakeref_t wakeref;
354	unsigned long flags;
355
356	freed_pages = 0;
357	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
358		freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
359					       I915_SHRINK_BOUND |
360					       I915_SHRINK_UNBOUND |
361					       I915_SHRINK_WRITEBACK);
362
363	/* Because we may be allocating inside our own driver, we cannot
364	 * assert that there are no objects with pinned pages that are not
365	 * being pointed to by hardware.
366	 */
367	available = unevictable = 0;
368	spin_lock_irqsave(&i915->mm.obj_lock, flags);
369	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
370		if (!can_release_pages(obj))
371			unevictable += obj->base.size >> PAGE_SHIFT;
372		else
373			available += obj->base.size >> PAGE_SHIFT;
374	}
375	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
376
377	if (freed_pages || available)
378		pr_info("Purging GPU memory, %lu pages freed, "
379			"%lu pages still pinned, %lu pages left available.\n",
380			freed_pages, unevictable, available);
381
382	*(unsigned long *)ptr += freed_pages;
383	return NOTIFY_DONE;
384}
385
386static int
387i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
388{
389	struct drm_i915_private *i915 =
390		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
391	struct i915_vma *vma, *next;
392	unsigned long freed_pages = 0;
393	intel_wakeref_t wakeref;
394	struct intel_gt *gt;
395	int i;
396
397	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
398		freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
399					       I915_SHRINK_BOUND |
400					       I915_SHRINK_UNBOUND |
401					       I915_SHRINK_VMAPS);
402
403	/* We also want to clear any cached iomaps as they wrap vmap */
404	for_each_gt(gt, i915, i) {
405		mutex_lock(&gt->ggtt->vm.mutex);
406		list_for_each_entry_safe(vma, next,
407					 &gt->ggtt->vm.bound_list, vm_link) {
408			unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
409			struct drm_i915_gem_object *obj = vma->obj;
410
411			if (!vma->iomap || i915_vma_is_active(vma))
412				continue;
413
414			if (!i915_gem_object_trylock(obj, NULL))
415				continue;
416
417			if (__i915_vma_unbind(vma) == 0)
418				freed_pages += count;
419
420			i915_gem_object_unlock(obj);
421		}
422		mutex_unlock(&gt->ggtt->vm.mutex);
423	}
424
425	*(unsigned long *)ptr += freed_pages;
426	return NOTIFY_DONE;
427}
428
429void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
430{
431	i915->mm.shrinker = shrinker_alloc(0, "drm-i915_gem");
432	if (!i915->mm.shrinker) {
433		drm_WARN_ON(&i915->drm, 1);
434	} else {
435		i915->mm.shrinker->scan_objects = i915_gem_shrinker_scan;
436		i915->mm.shrinker->count_objects = i915_gem_shrinker_count;
437		i915->mm.shrinker->batch = 4096;
438		i915->mm.shrinker->private_data = i915;
439
440		shrinker_register(i915->mm.shrinker);
441	}
442
443	i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
444	drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
445
446	i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
447	drm_WARN_ON(&i915->drm,
448		    register_vmap_purge_notifier(&i915->mm.vmap_notifier));
449}
450
451void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
452{
453	drm_WARN_ON(&i915->drm,
454		    unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
455	drm_WARN_ON(&i915->drm,
456		    unregister_oom_notifier(&i915->mm.oom_notifier));
457	shrinker_free(i915->mm.shrinker);
458}
459
460void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
461				    struct mutex *mutex)
462{
463	if (!IS_ENABLED(CONFIG_LOCKDEP))
464		return;
465
466	fs_reclaim_acquire(GFP_KERNEL);
467
468	mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
469	mutex_release(&mutex->dep_map, _RET_IP_);
470
471	fs_reclaim_release(GFP_KERNEL);
472}
473
474/**
475 * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By
476 * default all object types that support shrinking(see IS_SHRINKABLE), will also
477 * make the object visible to the shrinker after allocating the system memory
478 * pages.
479 * @obj: The GEM object.
480 *
481 * This is typically used for special kernel internal objects that can't be
482 * easily processed by the shrinker, like if they are perma-pinned.
483 */
484void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
485{
486	struct drm_i915_private *i915 = obj_to_i915(obj);
487	unsigned long flags;
488
489	/*
490	 * We can only be called while the pages are pinned or when
491	 * the pages are released. If pinned, we should only be called
492	 * from a single caller under controlled conditions; and on release
493	 * only one caller may release us. Neither the two may cross.
494	 */
495	if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
496		return;
497
498	spin_lock_irqsave(&i915->mm.obj_lock, flags);
499	if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
500	    !list_empty(&obj->mm.link)) {
501		list_del_init(&obj->mm.link);
502		i915->mm.shrink_count--;
503		i915->mm.shrink_memory -= obj->base.size;
504	}
505	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
506}
507
508static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
509					       struct list_head *head)
510{
511	struct drm_i915_private *i915 = obj_to_i915(obj);
512	unsigned long flags;
513
514	if (!i915_gem_object_is_shrinkable(obj))
515		return;
516
517	if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
518		return;
519
520	spin_lock_irqsave(&i915->mm.obj_lock, flags);
521	GEM_BUG_ON(!kref_read(&obj->base.refcount));
522	if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
523		GEM_BUG_ON(!list_empty(&obj->mm.link));
524
525		list_add_tail(&obj->mm.link, head);
526		i915->mm.shrink_count++;
527		i915->mm.shrink_memory += obj->base.size;
528
529	}
530	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
531}
532
533/**
534 * __i915_gem_object_make_shrinkable - Move the object to the tail of the
535 * shrinkable list. Objects on this list might be swapped out. Used with
536 * WILLNEED objects.
537 * @obj: The GEM object.
538 *
539 * DO NOT USE. This is intended to be called on very special objects that don't
540 * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
541 * underneath.
542 */
543void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
544{
545	___i915_gem_object_make_shrinkable(obj,
546					   &obj_to_i915(obj)->mm.shrink_list);
547}
548
549/**
550 * __i915_gem_object_make_purgeable - Move the object to the tail of the
551 * purgeable list. Objects on this list might be swapped out. Used with
552 * DONTNEED objects.
553 * @obj: The GEM object.
554 *
555 * DO NOT USE. This is intended to be called on very special objects that don't
556 * yet have mm.pages, but are guaranteed to have potentially reclaimable pages
557 * underneath.
558 */
559void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
560{
561	___i915_gem_object_make_shrinkable(obj,
562					   &obj_to_i915(obj)->mm.purge_list);
563}
564
565/**
566 * i915_gem_object_make_shrinkable - Move the object to the tail of the
567 * shrinkable list. Objects on this list might be swapped out. Used with
568 * WILLNEED objects.
569 * @obj: The GEM object.
570 *
571 * MUST only be called on objects which have backing pages.
572 *
573 * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
574 */
575void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
576{
577	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
578	__i915_gem_object_make_shrinkable(obj);
579}
580
581/**
582 * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable
583 * list. Used with DONTNEED objects. Unlike with shrinkable objects, the
584 * shrinker will attempt to discard the backing pages, instead of trying to swap
585 * them out.
586 * @obj: The GEM object.
587 *
588 * MUST only be called on objects which have backing pages.
589 *
590 * MUST be balanced with previous call to i915_gem_object_make_unshrinkable().
591 */
592void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
593{
594	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
595	__i915_gem_object_make_purgeable(obj);
596}
597