1/* $NetBSD: i915_gem_pm.c,v 1.3 2021/12/19 11:33:30 riastradh Exp $ */ 2 3/* 4 * SPDX-License-Identifier: MIT 5 * 6 * Copyright �� 2019 Intel Corporation 7 */ 8 9#include <sys/cdefs.h> 10__KERNEL_RCSID(0, "$NetBSD: i915_gem_pm.c,v 1.3 2021/12/19 11:33:30 riastradh Exp $"); 11 12#include "gem/i915_gem_pm.h" 13#include "gt/intel_gt.h" 14#include "gt/intel_gt_pm.h" 15#include "gt/intel_gt_requests.h" 16 17#include "i915_drv.h" 18 19#include <linux/nbsd-namespace.h> 20 21void i915_gem_suspend(struct drm_i915_private *i915) 22{ 23 GEM_TRACE("%s\n", dev_name(i915->drm.dev)); 24 25 intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); 26 flush_workqueue(i915->wq); 27 28 /* 29 * We have to flush all the executing contexts to main memory so 30 * that they can saved in the hibernation image. To ensure the last 31 * context image is coherent, we have to switch away from it. That 32 * leaves the i915->kernel_context still active when 33 * we actually suspend, and its image in memory may not match the GPU 34 * state. Fortunately, the kernel_context is disposable and we do 35 * not rely on its state. 36 */ 37 intel_gt_suspend_prepare(&i915->gt); 38 39 i915_gem_drain_freed_objects(i915); 40} 41 42static struct drm_i915_gem_object *first_mm_object(struct list_head *list) 43{ 44 return list_first_entry_or_null(list, 45 struct drm_i915_gem_object, 46 mm.link); 47} 48 49void i915_gem_suspend_late(struct drm_i915_private *i915) 50{ 51 struct drm_i915_gem_object *obj; 52 struct list_head *phases[] = { 53 &i915->mm.shrink_list, 54 &i915->mm.purge_list, 55 NULL 56 }, **phase; 57 unsigned long flags; 58 59 /* 60 * Neither the BIOS, ourselves or any other kernel 61 * expects the system to be in execlists mode on startup, 62 * so we need to reset the GPU back to legacy mode. And the only 63 * known way to disable logical contexts is through a GPU reset. 64 * 65 * So in order to leave the system in a known default configuration, 66 * always reset the GPU upon unload and suspend. Afterwards we then 67 * clean up the GEM state tracking, flushing off the requests and 68 * leaving the system in a known idle state. 69 * 70 * Note that is of the upmost importance that the GPU is idle and 71 * all stray writes are flushed *before* we dismantle the backing 72 * storage for the pinned objects. 73 * 74 * However, since we are uncertain that resetting the GPU on older 75 * machines is a good idea, we don't - just in case it leaves the 76 * machine in an unusable condition. 77 */ 78 79 intel_gt_suspend_late(&i915->gt); 80 81 spin_lock_irqsave(&i915->mm.obj_lock, flags); 82 for (phase = phases; *phase; phase++) { 83 LIST_HEAD(keep); 84 85 while ((obj = first_mm_object(*phase))) { 86 list_move_tail(&obj->mm.link, &keep); 87 88 /* Beware the background _i915_gem_free_objects */ 89 if (!kref_get_unless_zero(&obj->base.refcount)) 90 continue; 91 92 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 93 94 i915_gem_object_lock(obj); 95 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); 96 i915_gem_object_unlock(obj); 97 i915_gem_object_put(obj); 98 99 spin_lock_irqsave(&i915->mm.obj_lock, flags); 100 } 101 102 list_splice_tail(&keep, *phase); 103 } 104 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 105} 106 107void i915_gem_resume(struct drm_i915_private *i915) 108{ 109 GEM_TRACE("%s\n", dev_name(i915->drm.dev)); 110 111 /* 112 * As we didn't flush the kernel context before suspend, we cannot 113 * guarantee that the context image is complete. So let's just reset 114 * it and start again. 115 */ 116 intel_gt_resume(&i915->gt); 117} 118