intel_wakeref.c revision 1.2
1/* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright �� 2019 Intel Corporation 5 */ 6 7#include <linux/wait_bit.h> 8 9#include "intel_runtime_pm.h" 10#include "intel_wakeref.h" 11 12static void rpm_get(struct intel_wakeref *wf) 13{ 14 wf->wakeref = intel_runtime_pm_get(wf->rpm); 15} 16 17static void rpm_put(struct intel_wakeref *wf) 18{ 19 intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); 20 21 intel_runtime_pm_put(wf->rpm, wakeref); 22 INTEL_WAKEREF_BUG_ON(!wakeref); 23} 24 25int __intel_wakeref_get_first(struct intel_wakeref *wf) 26{ 27 /* 28 * Treat get/put as different subclasses, as we may need to run 29 * the put callback from under the shrinker and do not want to 30 * cross-contanimate that callback with any extra work performed 31 * upon acquiring the wakeref. 32 */ 33 mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING); 34 if (!atomic_read(&wf->count)) { 35 int err; 36 37 rpm_get(wf); 38 39 err = wf->ops->get(wf); 40 if (unlikely(err)) { 41 rpm_put(wf); 42 mutex_unlock(&wf->mutex); 43 return err; 44 } 45 46 smp_mb__before_atomic(); /* release wf->count */ 47 } 48 atomic_inc(&wf->count); 49 mutex_unlock(&wf->mutex); 50 51 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); 52 return 0; 53} 54 55static void ____intel_wakeref_put_last(struct intel_wakeref *wf) 56{ 57 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); 58 if (unlikely(!atomic_dec_and_test(&wf->count))) 59 goto unlock; 60 61 /* ops->put() must reschedule its own release on error/deferral */ 62 if (likely(!wf->ops->put(wf))) { 63 rpm_put(wf); 64 wake_up_var(&wf->wakeref); 65 } 66 67unlock: 68 mutex_unlock(&wf->mutex); 69} 70 71void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags) 72{ 73 INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work)); 74 75 /* Assume we are not in process context and so cannot sleep. */ 76 if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) { 77 mod_delayed_work(system_wq, &wf->work, 78 FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags)); 79 return; 80 } 81 82 ____intel_wakeref_put_last(wf); 83} 84 85static void __intel_wakeref_put_work(struct work_struct *wrk) 86{ 87 struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work); 88 89 if (atomic_add_unless(&wf->count, -1, 1)) 90 return; 91 92 mutex_lock(&wf->mutex); 93 ____intel_wakeref_put_last(wf); 94} 95 96void __intel_wakeref_init(struct intel_wakeref *wf, 97 struct intel_runtime_pm *rpm, 98 const struct intel_wakeref_ops *ops, 99 struct intel_wakeref_lockclass *key) 100{ 101 wf->rpm = rpm; 102 wf->ops = ops; 103 104#ifdef __linux__ 105 __mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex); 106#else 107 rw_init(&wf->mutex, "wakeref.mutex"); 108#endif 109 atomic_set(&wf->count, 0); 110 wf->wakeref = 0; 111 112 INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work); 113 lockdep_init_map(&wf->work.work.lockdep_map, 114 "wakeref.work", &key->work, 0); 115} 116 117int intel_wakeref_wait_for_idle(struct intel_wakeref *wf) 118{ 119 int err; 120 121 might_sleep(); 122 123 err = wait_var_event_killable(&wf->wakeref, 124 !intel_wakeref_is_active(wf)); 125 if (err) 126 return err; 127 128 intel_wakeref_unlock_wait(wf); 129 return 0; 130} 131 132#ifdef __linux__ 133static void wakeref_auto_timeout(struct timer_list *t) 134{ 135 struct intel_wakeref_auto *wf = from_timer(wf, t, timer); 136 intel_wakeref_t wakeref; 137 unsigned long flags; 138 139 if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags)) 140 return; 141 142 wakeref = fetch_and_zero(&wf->wakeref); 143 spin_unlock_irqrestore(&wf->lock, flags); 144 145 intel_runtime_pm_put(wf->rpm, wakeref); 146} 147#else 148static void wakeref_auto_timeout(void *arg) 149{ 150 struct intel_wakeref_auto *wf = arg; 151 intel_wakeref_t wakeref; 152 unsigned long flags; 153 154 if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags)) 155 return; 156 157 wakeref = fetch_and_zero(&wf->wakeref); 158 spin_unlock_irqrestore(&wf->lock, flags); 159 160 intel_runtime_pm_put(wf->rpm, wakeref); 161} 162#endif 163 164void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, 165 struct intel_runtime_pm *rpm) 166{ 167 mtx_init(&wf->lock, IPL_TTY); 168#ifdef __linux__ 169 timer_setup(&wf->timer, wakeref_auto_timeout, 0); 170#else 171 timeout_set(&wf->timer, wakeref_auto_timeout, wf); 172#endif 173 refcount_set(&wf->count, 0); 174 wf->wakeref = 0; 175 wf->rpm = rpm; 176} 177 178void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) 179{ 180 unsigned long flags; 181 182 if (!timeout) { 183 if (del_timer_sync(&wf->timer)) 184 wakeref_auto_timeout(&wf->timer); 185 return; 186 } 187 188 /* Our mission is that we only extend an already active wakeref */ 189 assert_rpm_wakelock_held(wf->rpm); 190 191 if (!refcount_inc_not_zero(&wf->count)) { 192 spin_lock_irqsave(&wf->lock, flags); 193 if (!refcount_inc_not_zero(&wf->count)) { 194 INTEL_WAKEREF_BUG_ON(wf->wakeref); 195 wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm); 196 refcount_set(&wf->count, 1); 197 } 198 spin_unlock_irqrestore(&wf->lock, flags); 199 } 200 201 /* 202 * If we extend a pending timer, we will only get a single timer 203 * callback and so need to cancel the local inc by running the 204 * elided callback to keep the wf->count balanced. 205 */ 206 if (mod_timer(&wf->timer, jiffies + timeout)) 207 wakeref_auto_timeout(&wf->timer); 208} 209 210void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf) 211{ 212 intel_wakeref_auto(wf, 0); 213 INTEL_WAKEREF_BUG_ON(wf->wakeref); 214} 215