1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2021 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/slab.h>
11
12static void encaps_handle_do_release(struct hl_cs_encaps_sig_handle *handle, bool put_hw_sob,
13					bool put_ctx)
14{
15	struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
16
17	if (put_hw_sob)
18		hw_sob_put(handle->hw_sob);
19
20	spin_lock(&mgr->lock);
21	idr_remove(&mgr->handles, handle->id);
22	spin_unlock(&mgr->lock);
23
24	if (put_ctx)
25		hl_ctx_put(handle->ctx);
26
27	kfree(handle);
28}
29
30void hl_encaps_release_handle_and_put_ctx(struct kref *ref)
31{
32	struct hl_cs_encaps_sig_handle *handle =
33			container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
34
35	encaps_handle_do_release(handle, false, true);
36}
37
38static void hl_encaps_release_handle_and_put_sob(struct kref *ref)
39{
40	struct hl_cs_encaps_sig_handle *handle =
41			container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
42
43	encaps_handle_do_release(handle, true, false);
44}
45
46void hl_encaps_release_handle_and_put_sob_ctx(struct kref *ref)
47{
48	struct hl_cs_encaps_sig_handle *handle =
49			container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
50
51	encaps_handle_do_release(handle, true, true);
52}
53
54static void hl_encaps_sig_mgr_init(struct hl_encaps_signals_mgr *mgr)
55{
56	spin_lock_init(&mgr->lock);
57	idr_init(&mgr->handles);
58}
59
60static void hl_encaps_sig_mgr_fini(struct hl_device *hdev, struct hl_encaps_signals_mgr *mgr)
61{
62	struct hl_cs_encaps_sig_handle *handle;
63	struct idr *idp;
64	u32 id;
65
66	idp = &mgr->handles;
67
68	/* The IDR is expected to be empty at this stage, because any left signal should have been
69	 * released as part of CS roll-back.
70	 */
71	if (!idr_is_empty(idp)) {
72		dev_warn(hdev->dev,
73			"device released while some encaps signals handles are still allocated\n");
74		idr_for_each_entry(idp, handle, id)
75			kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob);
76	}
77
78	idr_destroy(&mgr->handles);
79}
80
81static void hl_ctx_fini(struct hl_ctx *ctx)
82{
83	struct hl_device *hdev = ctx->hdev;
84	int i;
85
86	/* Release all allocated HW block mapped list entries and destroy
87	 * the mutex.
88	 */
89	hl_hw_block_mem_fini(ctx);
90
91	/*
92	 * If we arrived here, there are no jobs waiting for this context
93	 * on its queues so we can safely remove it.
94	 * This is because for each CS, we increment the ref count and for
95	 * every CS that was finished we decrement it and we won't arrive
96	 * to this function unless the ref count is 0
97	 */
98
99	for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
100		hl_fence_put(ctx->cs_pending[i]);
101
102	kfree(ctx->cs_pending);
103
104	if (ctx->asid != HL_KERNEL_ASID_ID) {
105		dev_dbg(hdev->dev, "closing user context, asid=%u\n", ctx->asid);
106
107		/* The engines are stopped as there is no executing CS, but the
108		 * Coresight might be still working by accessing addresses
109		 * related to the stopped engines. Hence stop it explicitly.
110		 */
111		if (hdev->in_debug)
112			hl_device_set_debug_mode(hdev, ctx, false);
113
114		hdev->asic_funcs->ctx_fini(ctx);
115
116		hl_dec_ctx_fini(ctx);
117
118		hl_cb_va_pool_fini(ctx);
119		hl_vm_ctx_fini(ctx);
120		hl_asid_free(hdev, ctx->asid);
121		hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
122		mutex_destroy(&ctx->ts_reg_lock);
123	} else {
124		dev_dbg(hdev->dev, "closing kernel context\n");
125		hdev->asic_funcs->ctx_fini(ctx);
126		hl_vm_ctx_fini(ctx);
127		hl_mmu_ctx_fini(ctx);
128	}
129}
130
131void hl_ctx_do_release(struct kref *ref)
132{
133	struct hl_ctx *ctx;
134
135	ctx = container_of(ref, struct hl_ctx, refcount);
136
137	hl_ctx_fini(ctx);
138
139	if (ctx->hpriv) {
140		struct hl_fpriv *hpriv = ctx->hpriv;
141
142		mutex_lock(&hpriv->ctx_lock);
143		hpriv->ctx = NULL;
144		mutex_unlock(&hpriv->ctx_lock);
145
146		hl_hpriv_put(hpriv);
147	}
148
149	kfree(ctx);
150}
151
152int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
153{
154	struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
155	struct hl_ctx *ctx;
156	int rc;
157
158	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
159	if (!ctx) {
160		rc = -ENOMEM;
161		goto out_err;
162	}
163
164	mutex_lock(&ctx_mgr->lock);
165	rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
166	mutex_unlock(&ctx_mgr->lock);
167
168	if (rc < 0) {
169		dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
170		goto free_ctx;
171	}
172
173	ctx->handle = rc;
174
175	rc = hl_ctx_init(hdev, ctx, false);
176	if (rc)
177		goto remove_from_idr;
178
179	hl_hpriv_get(hpriv);
180	ctx->hpriv = hpriv;
181
182	/* TODO: remove for multiple contexts per process */
183	hpriv->ctx = ctx;
184
185	/* TODO: remove the following line for multiple process support */
186	hdev->is_compute_ctx_active = true;
187
188	return 0;
189
190remove_from_idr:
191	mutex_lock(&ctx_mgr->lock);
192	idr_remove(&ctx_mgr->handles, ctx->handle);
193	mutex_unlock(&ctx_mgr->lock);
194free_ctx:
195	kfree(ctx);
196out_err:
197	return rc;
198}
199
200int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
201{
202	char task_comm[TASK_COMM_LEN];
203	int rc = 0, i;
204
205	ctx->hdev = hdev;
206
207	kref_init(&ctx->refcount);
208
209	ctx->cs_sequence = 1;
210	spin_lock_init(&ctx->cs_lock);
211	atomic_set(&ctx->thread_ctx_switch_token, 1);
212	ctx->thread_ctx_switch_wait_token = 0;
213	ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
214				sizeof(struct hl_fence *),
215				GFP_KERNEL);
216	if (!ctx->cs_pending)
217		return -ENOMEM;
218
219	INIT_LIST_HEAD(&ctx->outcome_store.used_list);
220	INIT_LIST_HEAD(&ctx->outcome_store.free_list);
221	hash_init(ctx->outcome_store.outcome_map);
222	for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
223		list_add(&ctx->outcome_store.nodes_pool[i].list_link,
224			 &ctx->outcome_store.free_list);
225
226	hl_hw_block_mem_init(ctx);
227
228	if (is_kernel_ctx) {
229		ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
230		rc = hl_vm_ctx_init(ctx);
231		if (rc) {
232			dev_err(hdev->dev, "Failed to init mem ctx module\n");
233			rc = -ENOMEM;
234			goto err_hw_block_mem_fini;
235		}
236
237		rc = hdev->asic_funcs->ctx_init(ctx);
238		if (rc) {
239			dev_err(hdev->dev, "ctx_init failed\n");
240			goto err_vm_ctx_fini;
241		}
242	} else {
243		ctx->asid = hl_asid_alloc(hdev);
244		if (!ctx->asid) {
245			dev_err(hdev->dev, "No free ASID, failed to create context\n");
246			rc = -ENOMEM;
247			goto err_hw_block_mem_fini;
248		}
249
250		rc = hl_vm_ctx_init(ctx);
251		if (rc) {
252			dev_err(hdev->dev, "Failed to init mem ctx module\n");
253			rc = -ENOMEM;
254			goto err_asid_free;
255		}
256
257		rc = hl_cb_va_pool_init(ctx);
258		if (rc) {
259			dev_err(hdev->dev,
260				"Failed to init VA pool for mapped CB\n");
261			goto err_vm_ctx_fini;
262		}
263
264		rc = hdev->asic_funcs->ctx_init(ctx);
265		if (rc) {
266			dev_err(hdev->dev, "ctx_init failed\n");
267			goto err_cb_va_pool_fini;
268		}
269
270		hl_encaps_sig_mgr_init(&ctx->sig_mgr);
271
272		mutex_init(&ctx->ts_reg_lock);
273
274		dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
275			get_task_comm(task_comm, current), ctx->asid);
276	}
277
278	return 0;
279
280err_cb_va_pool_fini:
281	hl_cb_va_pool_fini(ctx);
282err_vm_ctx_fini:
283	hl_vm_ctx_fini(ctx);
284err_asid_free:
285	if (ctx->asid != HL_KERNEL_ASID_ID)
286		hl_asid_free(hdev, ctx->asid);
287err_hw_block_mem_fini:
288	hl_hw_block_mem_fini(ctx);
289	kfree(ctx->cs_pending);
290
291	return rc;
292}
293
294static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
295{
296	return kref_get_unless_zero(&ctx->refcount);
297}
298
299void hl_ctx_get(struct hl_ctx *ctx)
300{
301	kref_get(&ctx->refcount);
302}
303
304int hl_ctx_put(struct hl_ctx *ctx)
305{
306	return kref_put(&ctx->refcount, hl_ctx_do_release);
307}
308
309struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
310{
311	struct hl_ctx *ctx = NULL;
312	struct hl_fpriv *hpriv;
313
314	mutex_lock(&hdev->fpriv_list_lock);
315
316	list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
317		mutex_lock(&hpriv->ctx_lock);
318		ctx = hpriv->ctx;
319		if (ctx && !hl_ctx_get_unless_zero(ctx))
320			ctx = NULL;
321		mutex_unlock(&hpriv->ctx_lock);
322
323		/* There can only be a single user which has opened the compute device, so exit
324		 * immediately once we find its context or if we see that it has been released
325		 */
326		break;
327	}
328
329	mutex_unlock(&hdev->fpriv_list_lock);
330
331	return ctx;
332}
333
334/*
335 * hl_ctx_get_fence_locked - get CS fence under CS lock
336 *
337 * @ctx: pointer to the context structure.
338 * @seq: CS sequences number
339 *
340 * @return valid fence pointer on success, NULL if fence is gone, otherwise
341 *         error pointer.
342 *
343 * NOTE: this function shall be called with cs_lock locked
344 */
345static struct hl_fence *hl_ctx_get_fence_locked(struct hl_ctx *ctx, u64 seq)
346{
347	struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
348	struct hl_fence *fence;
349
350	if (seq >= ctx->cs_sequence)
351		return ERR_PTR(-EINVAL);
352
353	if (seq + asic_prop->max_pending_cs < ctx->cs_sequence)
354		return NULL;
355
356	fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
357	hl_fence_get(fence);
358	return fence;
359}
360
361struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
362{
363	struct hl_fence *fence;
364
365	spin_lock(&ctx->cs_lock);
366
367	fence = hl_ctx_get_fence_locked(ctx, seq);
368
369	spin_unlock(&ctx->cs_lock);
370
371	return fence;
372}
373
374/*
375 * hl_ctx_get_fences - get multiple CS fences under the same CS lock
376 *
377 * @ctx: pointer to the context structure.
378 * @seq_arr: array of CS sequences to wait for
379 * @fence: fence array to store the CS fences
380 * @arr_len: length of seq_arr and fence_arr
381 *
382 * @return 0 on success, otherwise non 0 error code
383 */
384int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
385				struct hl_fence **fence, u32 arr_len)
386{
387	struct hl_fence **fence_arr_base = fence;
388	int i, rc = 0;
389
390	spin_lock(&ctx->cs_lock);
391
392	for (i = 0; i < arr_len; i++, fence++) {
393		u64 seq = seq_arr[i];
394
395		*fence = hl_ctx_get_fence_locked(ctx, seq);
396
397		if (IS_ERR(*fence)) {
398			dev_err(ctx->hdev->dev,
399				"Failed to get fence for CS with seq 0x%llx\n",
400					seq);
401			rc = PTR_ERR(*fence);
402			break;
403		}
404	}
405
406	spin_unlock(&ctx->cs_lock);
407
408	if (rc)
409		hl_fences_put(fence_arr_base, i);
410
411	return rc;
412}
413
414/*
415 * hl_ctx_mgr_init - initialize the context manager
416 *
417 * @ctx_mgr: pointer to context manager structure
418 *
419 * This manager is an object inside the hpriv object of the user process.
420 * The function is called when a user process opens the FD.
421 */
422void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
423{
424	mutex_init(&ctx_mgr->lock);
425	idr_init(&ctx_mgr->handles);
426}
427
428/*
429 * hl_ctx_mgr_fini - finalize the context manager
430 *
431 * @hdev: pointer to device structure
432 * @ctx_mgr: pointer to context manager structure
433 *
434 * This function goes over all the contexts in the manager and frees them.
435 * It is called when a process closes the FD.
436 */
437void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
438{
439	struct hl_ctx *ctx;
440	struct idr *idp;
441	u32 id;
442
443	idp = &ctx_mgr->handles;
444
445	idr_for_each_entry(idp, ctx, id)
446		kref_put(&ctx->refcount, hl_ctx_do_release);
447
448	idr_destroy(&ctx_mgr->handles);
449	mutex_destroy(&ctx_mgr->lock);
450}
451