1/* SPDX-License-Identifier: MIT */ 2/* 3 * Copyright �� 2019 Intel Corporation 4 */ 5 6#ifndef __INTEL_CONTEXT_H__ 7#define __INTEL_CONTEXT_H__ 8 9#include <linux/bitops.h> 10#include <linux/lockdep.h> 11#include <linux/types.h> 12 13#include "i915_active.h" 14#include "i915_drv.h" 15#include "intel_context_types.h" 16#include "intel_engine_types.h" 17#include "intel_gt_pm.h" 18#include "intel_ring_types.h" 19#include "intel_timeline_types.h" 20#include "i915_trace.h" 21 22#define CE_TRACE(ce, fmt, ...) do { \ 23 const struct intel_context *ce__ = (ce); \ 24 ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \ 25 ce__->timeline->fence_context, \ 26 ##__VA_ARGS__); \ 27} while (0) 28 29#define INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS (1) 30 31struct i915_gem_ww_ctx; 32 33void intel_context_init(struct intel_context *ce, 34 struct intel_engine_cs *engine); 35void intel_context_fini(struct intel_context *ce); 36 37void i915_context_module_exit(void); 38int i915_context_module_init(void); 39 40struct intel_context * 41intel_context_create(struct intel_engine_cs *engine); 42 43int intel_context_alloc_state(struct intel_context *ce); 44 45void intel_context_free(struct intel_context *ce); 46 47int intel_context_reconfigure_sseu(struct intel_context *ce, 48 const struct intel_sseu sseu); 49 50#define PARENT_SCRATCH_SIZE PAGE_SIZE 51 52static inline bool intel_context_is_child(struct intel_context *ce) 53{ 54 return !!ce->parallel.parent; 55} 56 57static inline bool intel_context_is_parent(struct intel_context *ce) 58{ 59 return !!ce->parallel.number_children; 60} 61 62static inline bool intel_context_is_pinned(struct intel_context *ce); 63 64static inline struct intel_context * 65intel_context_to_parent(struct intel_context *ce) 66{ 67 if (intel_context_is_child(ce)) { 68 /* 69 * The parent holds ref count to the child so it is always safe 70 * for the parent to access the child, but the child has a 71 * pointer to the parent without a ref. To ensure this is safe 72 * the child should only access the parent pointer while the 73 * parent is pinned. 74 */ 75 GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); 76 77 return ce->parallel.parent; 78 } else { 79 return ce; 80 } 81} 82 83static inline bool intel_context_is_parallel(struct intel_context *ce) 84{ 85 return intel_context_is_child(ce) || intel_context_is_parent(ce); 86} 87 88void intel_context_bind_parent_child(struct intel_context *parent, 89 struct intel_context *child); 90 91#define for_each_child(parent, ce)\ 92 list_for_each_entry(ce, &(parent)->parallel.child_list,\ 93 parallel.child_link) 94#define for_each_child_safe(parent, ce, cn)\ 95 list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\ 96 parallel.child_link) 97 98/** 99 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context 100 * @ce: the context 101 * 102 * Acquire a lock on the pinned status of the HW context, such that the context 103 * can neither be bound to the GPU or unbound whilst the lock is held, i.e. 104 * intel_context_is_pinned() remains stable. 105 */ 106static inline int intel_context_lock_pinned(struct intel_context *ce) 107 __acquires(ce->pin_mutex) 108{ 109 return mutex_lock_interruptible(&ce->pin_mutex); 110} 111 112/** 113 * intel_context_is_pinned - Reports the 'pinned' status 114 * @ce: the context 115 * 116 * While in use by the GPU, the context, along with its ring and page 117 * tables is pinned into memory and the GTT. 118 * 119 * Returns: true if the context is currently pinned for use by the GPU. 120 */ 121static inline bool 122intel_context_is_pinned(struct intel_context *ce) 123{ 124 return atomic_read(&ce->pin_count); 125} 126 127static inline void intel_context_cancel_request(struct intel_context *ce, 128 struct i915_request *rq) 129{ 130 GEM_BUG_ON(!ce->ops->cancel_request); 131 return ce->ops->cancel_request(ce, rq); 132} 133 134/** 135 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status 136 * @ce: the context 137 * 138 * Releases the lock earlier acquired by intel_context_unlock_pinned(). 139 */ 140static inline void intel_context_unlock_pinned(struct intel_context *ce) 141 __releases(ce->pin_mutex) 142{ 143 mutex_unlock(&ce->pin_mutex); 144} 145 146int __intel_context_do_pin(struct intel_context *ce); 147int __intel_context_do_pin_ww(struct intel_context *ce, 148 struct i915_gem_ww_ctx *ww); 149 150static inline bool intel_context_pin_if_active(struct intel_context *ce) 151{ 152 return atomic_inc_not_zero(&ce->pin_count); 153} 154 155static inline int intel_context_pin(struct intel_context *ce) 156{ 157 if (likely(intel_context_pin_if_active(ce))) 158 return 0; 159 160 return __intel_context_do_pin(ce); 161} 162 163static inline int intel_context_pin_ww(struct intel_context *ce, 164 struct i915_gem_ww_ctx *ww) 165{ 166 if (likely(intel_context_pin_if_active(ce))) 167 return 0; 168 169 return __intel_context_do_pin_ww(ce, ww); 170} 171 172static inline void __intel_context_pin(struct intel_context *ce) 173{ 174 GEM_BUG_ON(!intel_context_is_pinned(ce)); 175 atomic_inc(&ce->pin_count); 176} 177 178void __intel_context_do_unpin(struct intel_context *ce, int sub); 179 180static inline void intel_context_sched_disable_unpin(struct intel_context *ce) 181{ 182 __intel_context_do_unpin(ce, 2); 183} 184 185static inline void intel_context_unpin(struct intel_context *ce) 186{ 187 if (!ce->ops->sched_disable) { 188 __intel_context_do_unpin(ce, 1); 189 } else { 190 /* 191 * Move ownership of this pin to the scheduling disable which is 192 * an async operation. When that operation completes the above 193 * intel_context_sched_disable_unpin is called potentially 194 * unpinning the context. 195 */ 196 while (!atomic_add_unless(&ce->pin_count, -1, 1)) { 197 if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) { 198 ce->ops->sched_disable(ce); 199 break; 200 } 201 } 202 } 203} 204 205void intel_context_enter_engine(struct intel_context *ce); 206void intel_context_exit_engine(struct intel_context *ce); 207 208static inline void intel_context_enter(struct intel_context *ce) 209{ 210 lockdep_assert_held(&ce->timeline->mutex); 211 if (ce->active_count++) 212 return; 213 214 ce->ops->enter(ce); 215 ce->wakeref = intel_gt_pm_get(ce->vm->gt); 216} 217 218static inline void intel_context_mark_active(struct intel_context *ce) 219{ 220 lockdep_assert(lockdep_is_held(&ce->timeline->mutex) || 221 test_bit(CONTEXT_IS_PARKING, &ce->flags)); 222 ++ce->active_count; 223} 224 225static inline void intel_context_exit(struct intel_context *ce) 226{ 227 lockdep_assert_held(&ce->timeline->mutex); 228 GEM_BUG_ON(!ce->active_count); 229 if (--ce->active_count) 230 return; 231 232 intel_gt_pm_put_async(ce->vm->gt, ce->wakeref); 233 ce->ops->exit(ce); 234} 235 236static inline struct intel_context *intel_context_get(struct intel_context *ce) 237{ 238 kref_get(&ce->ref); 239 return ce; 240} 241 242static inline void intel_context_put(struct intel_context *ce) 243{ 244 kref_put(&ce->ref, ce->ops->destroy); 245} 246 247static inline struct intel_timeline *__must_check 248intel_context_timeline_lock(struct intel_context *ce) 249 __acquires(&ce->timeline->mutex) 250{ 251 struct intel_timeline *tl = ce->timeline; 252 int err; 253 254 if (intel_context_is_parent(ce)) 255 err = mutex_lock_interruptible_nested(&tl->mutex, 0); 256 else if (intel_context_is_child(ce)) 257 err = mutex_lock_interruptible_nested(&tl->mutex, 258 ce->parallel.child_index + 1); 259 else 260 err = mutex_lock_interruptible(&tl->mutex); 261 if (err) 262 return ERR_PTR(err); 263 264 return tl; 265} 266 267static inline void intel_context_timeline_unlock(struct intel_timeline *tl) 268 __releases(&tl->mutex) 269{ 270 mutex_unlock(&tl->mutex); 271} 272 273int intel_context_prepare_remote_request(struct intel_context *ce, 274 struct i915_request *rq); 275 276struct i915_request *intel_context_create_request(struct intel_context *ce); 277 278struct i915_request *intel_context_get_active_request(struct intel_context *ce); 279 280static inline bool intel_context_is_barrier(const struct intel_context *ce) 281{ 282 return test_bit(CONTEXT_BARRIER_BIT, &ce->flags); 283} 284 285static inline void intel_context_close(struct intel_context *ce) 286{ 287 set_bit(CONTEXT_CLOSED_BIT, &ce->flags); 288 289 if (ce->ops->close) 290 ce->ops->close(ce); 291} 292 293static inline bool intel_context_is_closed(const struct intel_context *ce) 294{ 295 return test_bit(CONTEXT_CLOSED_BIT, &ce->flags); 296} 297 298static inline bool intel_context_has_inflight(const struct intel_context *ce) 299{ 300 return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags); 301} 302 303static inline bool intel_context_use_semaphores(const struct intel_context *ce) 304{ 305 return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 306} 307 308static inline void intel_context_set_use_semaphores(struct intel_context *ce) 309{ 310 set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 311} 312 313static inline void intel_context_clear_use_semaphores(struct intel_context *ce) 314{ 315 clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 316} 317 318static inline bool intel_context_is_banned(const struct intel_context *ce) 319{ 320 return test_bit(CONTEXT_BANNED, &ce->flags); 321} 322 323static inline bool intel_context_set_banned(struct intel_context *ce) 324{ 325 return test_and_set_bit(CONTEXT_BANNED, &ce->flags); 326} 327 328bool intel_context_ban(struct intel_context *ce, struct i915_request *rq); 329 330static inline bool intel_context_is_schedulable(const struct intel_context *ce) 331{ 332 return !test_bit(CONTEXT_EXITING, &ce->flags) && 333 !test_bit(CONTEXT_BANNED, &ce->flags); 334} 335 336static inline bool intel_context_is_exiting(const struct intel_context *ce) 337{ 338 return test_bit(CONTEXT_EXITING, &ce->flags); 339} 340 341static inline bool intel_context_set_exiting(struct intel_context *ce) 342{ 343 return test_and_set_bit(CONTEXT_EXITING, &ce->flags); 344} 345 346bool intel_context_revoke(struct intel_context *ce); 347 348static inline bool 349intel_context_force_single_submission(const struct intel_context *ce) 350{ 351 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); 352} 353 354static inline void 355intel_context_set_single_submission(struct intel_context *ce) 356{ 357 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags); 358} 359 360static inline bool 361intel_context_nopreempt(const struct intel_context *ce) 362{ 363 return test_bit(CONTEXT_NOPREEMPT, &ce->flags); 364} 365 366static inline void 367intel_context_set_nopreempt(struct intel_context *ce) 368{ 369 set_bit(CONTEXT_NOPREEMPT, &ce->flags); 370} 371 372static inline void 373intel_context_clear_nopreempt(struct intel_context *ce) 374{ 375 clear_bit(CONTEXT_NOPREEMPT, &ce->flags); 376} 377 378u64 intel_context_get_total_runtime_ns(struct intel_context *ce); 379u64 intel_context_get_avg_runtime_ns(struct intel_context *ce); 380 381static inline u64 intel_context_clock(void) 382{ 383 /* As we mix CS cycles with CPU clocks, use the raw monotonic clock. */ 384 return ktime_get_raw_fast_ns(); 385} 386 387#endif /* __INTEL_CONTEXT_H__ */ 388