1/*	$NetBSD: intel_context.h,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
2
3/*
4 * SPDX-License-Identifier: MIT
5 *
6 * Copyright �� 2019 Intel Corporation
7 */
8
9#ifndef __INTEL_CONTEXT_H__
10#define __INTEL_CONTEXT_H__
11
12#include <linux/bitops.h>
13#include <linux/lockdep.h>
14#include <linux/types.h>
15
16#include "i915_active.h"
17#include "intel_context_types.h"
18#include "intel_engine_types.h"
19#include "intel_ring_types.h"
20#include "intel_timeline_types.h"
21
22#define CE_TRACE(ce, fmt, ...) do {					\
23	const struct intel_context *ce__ = (ce);			\
24	ENGINE_TRACE(ce__->engine, "context:%llx " fmt,			\
25		     ce__->timeline->fence_context,			\
26		     ##__VA_ARGS__);					\
27} while (0)
28
29void intel_context_init(struct intel_context *ce,
30			struct intel_engine_cs *engine);
31void intel_context_fini(struct intel_context *ce);
32
33struct intel_context *
34intel_context_create(struct intel_engine_cs *engine);
35
36int intel_context_alloc_state(struct intel_context *ce);
37
38void intel_context_free(struct intel_context *ce);
39
40/**
41 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
42 * @ce - the context
43 *
44 * Acquire a lock on the pinned status of the HW context, such that the context
45 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
46 * intel_context_is_pinned() remains stable.
47 */
48static inline int intel_context_lock_pinned(struct intel_context *ce)
49	__acquires(ce->pin_mutex)
50{
51	return mutex_lock_interruptible(&ce->pin_mutex);
52}
53
54/**
55 * intel_context_is_pinned - Reports the 'pinned' status
56 * @ce - the context
57 *
58 * While in use by the GPU, the context, along with its ring and page
59 * tables is pinned into memory and the GTT.
60 *
61 * Returns: true if the context is currently pinned for use by the GPU.
62 */
63static inline bool
64intel_context_is_pinned(struct intel_context *ce)
65{
66	return atomic_read(&ce->pin_count);
67}
68
69/**
70 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
71 * @ce - the context
72 *
73 * Releases the lock earlier acquired by intel_context_unlock_pinned().
74 */
75static inline void intel_context_unlock_pinned(struct intel_context *ce)
76	__releases(ce->pin_mutex)
77{
78	mutex_unlock(&ce->pin_mutex);
79}
80
81int __intel_context_do_pin(struct intel_context *ce);
82
83static inline bool intel_context_pin_if_active(struct intel_context *ce)
84{
85	return atomic_inc_not_zero(&ce->pin_count);
86}
87
88static inline int intel_context_pin(struct intel_context *ce)
89{
90	if (likely(intel_context_pin_if_active(ce)))
91		return 0;
92
93	return __intel_context_do_pin(ce);
94}
95
96static inline void __intel_context_pin(struct intel_context *ce)
97{
98	GEM_BUG_ON(!intel_context_is_pinned(ce));
99	atomic_inc(&ce->pin_count);
100}
101
102void intel_context_unpin(struct intel_context *ce);
103
104void intel_context_enter_engine(struct intel_context *ce);
105void intel_context_exit_engine(struct intel_context *ce);
106
107static inline void intel_context_enter(struct intel_context *ce)
108{
109	lockdep_assert_held(&ce->timeline->mutex);
110	if (!ce->active_count++)
111		ce->ops->enter(ce);
112}
113
114static inline void intel_context_mark_active(struct intel_context *ce)
115{
116	lockdep_assert_held(&ce->timeline->mutex);
117	++ce->active_count;
118}
119
120static inline void intel_context_exit(struct intel_context *ce)
121{
122	lockdep_assert_held(&ce->timeline->mutex);
123	GEM_BUG_ON(!ce->active_count);
124	if (!--ce->active_count)
125		ce->ops->exit(ce);
126}
127
128static inline struct intel_context *intel_context_get(struct intel_context *ce)
129{
130	kref_get(&ce->ref);
131	return ce;
132}
133
134static inline void intel_context_put(struct intel_context *ce)
135{
136	kref_put(&ce->ref, ce->ops->destroy);
137}
138
139static inline struct intel_timeline *__must_check
140intel_context_timeline_lock(struct intel_context *ce)
141	__acquires(&ce->timeline->mutex)
142{
143	struct intel_timeline *tl = ce->timeline;
144	int err;
145
146	err = mutex_lock_interruptible(&tl->mutex);
147	if (err)
148		return ERR_PTR(err);
149
150	return tl;
151}
152
153static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
154	__releases(&tl->mutex)
155{
156	mutex_unlock(&tl->mutex);
157}
158
159int intel_context_prepare_remote_request(struct intel_context *ce,
160					 struct i915_request *rq);
161
162struct i915_request *intel_context_create_request(struct intel_context *ce);
163
164static inline struct intel_ring *__intel_context_ring_size(u64 sz)
165{
166	return u64_to_ptr(struct intel_ring, sz);
167}
168
169static inline bool intel_context_is_barrier(const struct intel_context *ce)
170{
171	return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
172}
173
174static inline bool intel_context_use_semaphores(const struct intel_context *ce)
175{
176	return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
177}
178
179static inline void intel_context_set_use_semaphores(struct intel_context *ce)
180{
181	set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
182}
183
184static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
185{
186	clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
187}
188
189static inline bool intel_context_is_banned(const struct intel_context *ce)
190{
191	return test_bit(CONTEXT_BANNED, &ce->flags);
192}
193
194static inline bool intel_context_set_banned(struct intel_context *ce)
195{
196	return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
197}
198
199static inline bool
200intel_context_force_single_submission(const struct intel_context *ce)
201{
202	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
203}
204
205static inline void
206intel_context_set_single_submission(struct intel_context *ce)
207{
208	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
209}
210
211static inline bool
212intel_context_nopreempt(const struct intel_context *ce)
213{
214	return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
215}
216
217static inline void
218intel_context_set_nopreempt(struct intel_context *ce)
219{
220	set_bit(CONTEXT_NOPREEMPT, &ce->flags);
221}
222
223static inline void
224intel_context_clear_nopreempt(struct intel_context *ce)
225{
226	clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
227}
228
229#endif /* __INTEL_CONTEXT_H__ */
230