1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Copyright 2016-2022 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8#include "habanalabs.h"
9
10#include <linux/slab.h>
11
12/**
13 * struct hl_eqe_work - This structure is used to schedule work of EQ
14 *                      entry and cpucp_reset event
15 *
16 * @eq_work:          workqueue object to run when EQ entry is received
17 * @hdev:             pointer to device structure
18 * @eq_entry:         copy of the EQ entry
19 */
20struct hl_eqe_work {
21	struct work_struct	eq_work;
22	struct hl_device	*hdev;
23	struct hl_eq_entry	eq_entry;
24};
25
26/**
27 * hl_cq_inc_ptr - increment ci or pi of cq
28 *
29 * @ptr: the current ci or pi value of the completion queue
30 *
31 * Increment ptr by 1. If it reaches the number of completion queue
32 * entries, set it to 0
33 */
34inline u32 hl_cq_inc_ptr(u32 ptr)
35{
36	ptr++;
37	if (unlikely(ptr == HL_CQ_LENGTH))
38		ptr = 0;
39	return ptr;
40}
41
42/**
43 * hl_eq_inc_ptr - increment ci of eq
44 *
45 * @ptr: the current ci value of the event queue
46 *
47 * Increment ptr by 1. If it reaches the number of event queue
48 * entries, set it to 0
49 */
50static inline u32 hl_eq_inc_ptr(u32 ptr)
51{
52	ptr++;
53	if (unlikely(ptr == HL_EQ_LENGTH))
54		ptr = 0;
55	return ptr;
56}
57
58static void irq_handle_eqe(struct work_struct *work)
59{
60	struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
61							eq_work);
62	struct hl_device *hdev = eqe_work->hdev;
63
64	hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
65
66	kfree(eqe_work);
67}
68
69/**
70 * job_finish - queue job finish work
71 *
72 * @hdev: pointer to device structure
73 * @cs_seq: command submission sequence
74 * @cq: completion queue
75 * @timestamp: interrupt timestamp
76 *
77 */
78static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq, ktime_t timestamp)
79{
80	struct hl_hw_queue *queue;
81	struct hl_cs_job *job;
82
83	queue = &hdev->kernel_queues[cq->hw_queue_id];
84	job = queue->shadow_queue[hl_pi_2_offset(cs_seq)];
85	job->timestamp = timestamp;
86	queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
87
88	atomic_inc(&queue->ci);
89}
90
91/**
92 * cs_finish - queue all cs jobs finish work
93 *
94 * @hdev: pointer to device structure
95 * @cs_seq: command submission sequence
96 * @timestamp: interrupt timestamp
97 *
98 */
99static void cs_finish(struct hl_device *hdev, u16 cs_seq, ktime_t timestamp)
100{
101	struct asic_fixed_properties *prop = &hdev->asic_prop;
102	struct hl_hw_queue *queue;
103	struct hl_cs *cs;
104	struct hl_cs_job *job;
105
106	cs = hdev->shadow_cs_queue[cs_seq & (prop->max_pending_cs - 1)];
107	if (!cs) {
108		dev_warn(hdev->dev,
109			"No pointer to CS in shadow array at index %d\n",
110			cs_seq);
111		return;
112	}
113
114	list_for_each_entry(job, &cs->job_list, cs_node) {
115		queue = &hdev->kernel_queues[job->hw_queue_id];
116		atomic_inc(&queue->ci);
117	}
118
119	cs->completion_timestamp = timestamp;
120	queue_work(hdev->cs_cmplt_wq, &cs->finish_work);
121}
122
123/**
124 * hl_irq_handler_cq - irq handler for completion queue
125 *
126 * @irq: irq number
127 * @arg: pointer to completion queue structure
128 *
129 */
130irqreturn_t hl_irq_handler_cq(int irq, void *arg)
131{
132	struct hl_cq *cq = arg;
133	struct hl_device *hdev = cq->hdev;
134	bool shadow_index_valid, entry_ready;
135	u16 shadow_index;
136	struct hl_cq_entry *cq_entry, *cq_base;
137	ktime_t timestamp = ktime_get();
138
139	if (hdev->disabled) {
140		dev_dbg(hdev->dev,
141			"Device disabled but received IRQ %d for CQ %d\n",
142			irq, cq->hw_queue_id);
143		return IRQ_HANDLED;
144	}
145
146	cq_base = cq->kernel_address;
147
148	while (1) {
149		cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
150
151		entry_ready = !!FIELD_GET(CQ_ENTRY_READY_MASK,
152				le32_to_cpu(cq_entry->data));
153		if (!entry_ready)
154			break;
155
156		/* Make sure we read CQ entry contents after we've
157		 * checked the ownership bit.
158		 */
159		dma_rmb();
160
161		shadow_index_valid =
162			!!FIELD_GET(CQ_ENTRY_SHADOW_INDEX_VALID_MASK,
163					le32_to_cpu(cq_entry->data));
164
165		shadow_index = FIELD_GET(CQ_ENTRY_SHADOW_INDEX_MASK,
166				le32_to_cpu(cq_entry->data));
167
168		/*
169		 * CQ interrupt handler has 2 modes of operation:
170		 * 1. Interrupt per CS completion: (Single CQ for all queues)
171		 *    CQ entry represents a completed CS
172		 *
173		 * 2. Interrupt per CS job completion in queue: (CQ per queue)
174		 *    CQ entry represents a completed job in a certain queue
175		 */
176		if (shadow_index_valid && !hdev->disabled) {
177			if (hdev->asic_prop.completion_mode ==
178					HL_COMPLETION_MODE_CS)
179				cs_finish(hdev, shadow_index, timestamp);
180			else
181				job_finish(hdev, shadow_index, cq, timestamp);
182		}
183
184		/* Clear CQ entry ready bit */
185		cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
186						~CQ_ENTRY_READY_MASK);
187
188		cq->ci = hl_cq_inc_ptr(cq->ci);
189
190		/* Increment free slots */
191		atomic_inc(&cq->free_slots_cnt);
192	}
193
194	return IRQ_HANDLED;
195}
196
197/*
198 * hl_ts_free_objects - handler of the free objects workqueue.
199 * This function should put refcount to objects that the registration node
200 * took refcount to them.
201 * @work: workqueue object pointer
202 */
203static void hl_ts_free_objects(struct work_struct *work)
204{
205	struct timestamp_reg_work_obj *job =
206			container_of(work, struct timestamp_reg_work_obj, free_obj);
207	struct list_head *dynamic_alloc_free_list_head = job->dynamic_alloc_free_obj_head;
208	struct timestamp_reg_free_node *free_obj, *temp_free_obj;
209	struct list_head *free_list_head = job->free_obj_head;
210
211	struct hl_device *hdev = job->hdev;
212
213	list_for_each_entry_safe(free_obj, temp_free_obj, free_list_head, free_objects_node) {
214		dev_dbg(hdev->dev, "About to put refcount to buf (%p) cq_cb(%p)\n",
215					free_obj->buf,
216					free_obj->cq_cb);
217
218		hl_mmap_mem_buf_put(free_obj->buf);
219		hl_cb_put(free_obj->cq_cb);
220		atomic_set(&free_obj->in_use, 0);
221	}
222
223	kfree(free_list_head);
224
225	if (dynamic_alloc_free_list_head) {
226		list_for_each_entry_safe(free_obj, temp_free_obj, dynamic_alloc_free_list_head,
227								free_objects_node) {
228			dev_dbg(hdev->dev,
229				"Dynamic_Alloc list: About to put refcount to buf (%p) cq_cb(%p)\n",
230						free_obj->buf,
231						free_obj->cq_cb);
232
233			hl_mmap_mem_buf_put(free_obj->buf);
234			hl_cb_put(free_obj->cq_cb);
235			list_del(&free_obj->free_objects_node);
236			kfree(free_obj);
237		}
238
239		kfree(dynamic_alloc_free_list_head);
240	}
241
242	kfree(job);
243}
244
245/*
246 * This function called with spin_lock of wait_list_lock taken
247 * This function will set timestamp and delete the registration node from the
248 * wait_list_lock.
249 * and since we're protected with spin_lock here, so we cannot just put the refcount
250 * for the objects here, since the release function may be called and it's also a long
251 * logic (which might sleep also) that cannot be handled in irq context.
252 * so here we'll be filling a list with nodes of "put" jobs and then will send this
253 * list to a dedicated workqueue to do the actual put.
254 */
255static int handle_registration_node(struct hl_device *hdev, struct hl_user_pending_interrupt *pend,
256						struct list_head **free_list,
257						struct list_head **dynamic_alloc_list,
258						struct hl_user_interrupt *intr)
259{
260	struct hl_ts_free_jobs *ts_free_jobs_data;
261	struct timestamp_reg_free_node *free_node;
262	u32 free_node_index;
263	u64 timestamp;
264
265	ts_free_jobs_data = &intr->ts_free_jobs_data;
266	free_node_index = ts_free_jobs_data->next_avail_free_node_idx;
267
268	if (!(*free_list)) {
269		/* Alloc/Init the timestamp registration free objects list */
270		*free_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
271		if (!(*free_list))
272			return -ENOMEM;
273
274		INIT_LIST_HEAD(*free_list);
275	}
276
277	free_node = &ts_free_jobs_data->free_nodes_pool[free_node_index];
278	if (atomic_cmpxchg(&free_node->in_use, 0, 1)) {
279		dev_dbg(hdev->dev,
280			"Timestamp free node pool is full, buff: %p, record: %p, irq: %u\n",
281				pend->ts_reg_info.buf,
282				pend,
283				intr->interrupt_id);
284
285		if (!(*dynamic_alloc_list)) {
286			*dynamic_alloc_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
287			if (!(*dynamic_alloc_list))
288				return -ENOMEM;
289
290			INIT_LIST_HEAD(*dynamic_alloc_list);
291		}
292
293		free_node = kmalloc(sizeof(struct timestamp_reg_free_node), GFP_ATOMIC);
294		if (!free_node)
295			return -ENOMEM;
296
297		free_node->dynamic_alloc = 1;
298	}
299
300	timestamp = ktime_to_ns(intr->timestamp);
301
302	*pend->ts_reg_info.timestamp_kernel_addr = timestamp;
303
304	dev_dbg(hdev->dev, "Irq handle: Timestamp record (%p) ts cb address (%p), interrupt_id: %u\n",
305			pend, pend->ts_reg_info.timestamp_kernel_addr, intr->interrupt_id);
306
307	list_del(&pend->list_node);
308
309	/* Putting the refcount for ts_buff and cq_cb objects will be handled
310	 * in workqueue context, just add job to free_list.
311	 */
312	free_node->buf = pend->ts_reg_info.buf;
313	free_node->cq_cb = pend->ts_reg_info.cq_cb;
314
315	if (free_node->dynamic_alloc) {
316		list_add(&free_node->free_objects_node, *dynamic_alloc_list);
317	} else {
318		ts_free_jobs_data->next_avail_free_node_idx =
319				(++free_node_index) % ts_free_jobs_data->free_nodes_length;
320		list_add(&free_node->free_objects_node, *free_list);
321	}
322
323	/* Mark TS record as free */
324	pend->ts_reg_info.in_use = false;
325
326	return 0;
327}
328
329static void handle_user_interrupt_ts_list(struct hl_device *hdev, struct hl_user_interrupt *intr)
330{
331	struct list_head *ts_reg_free_list_head = NULL, *dynamic_alloc_list_head = NULL;
332	struct hl_user_pending_interrupt *pend, *temp_pend;
333	struct timestamp_reg_work_obj *job;
334	bool reg_node_handle_fail = false;
335	unsigned long flags;
336	int rc;
337
338	/* For registration nodes:
339	 * As part of handling the registration nodes, we should put refcount to
340	 * some objects. the problem is that we cannot do that under spinlock
341	 * or in irq handler context at all (since release functions are long and
342	 * might sleep), so we will need to handle that part in workqueue context.
343	 * To avoid handling kmalloc failure which compels us rolling back actions
344	 * and move nodes hanged on the free list back to the interrupt ts list
345	 * we always alloc the job of the WQ at the beginning.
346	 */
347	job = kmalloc(sizeof(*job), GFP_ATOMIC);
348	if (!job)
349		return;
350
351	spin_lock_irqsave(&intr->ts_list_lock, flags);
352	list_for_each_entry_safe(pend, temp_pend, &intr->ts_list_head, list_node) {
353		if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
354				!pend->cq_kernel_addr) {
355			if (!reg_node_handle_fail) {
356				rc = handle_registration_node(hdev, pend,
357						&ts_reg_free_list_head,
358						&dynamic_alloc_list_head, intr);
359				if (rc)
360					reg_node_handle_fail = true;
361			}
362		}
363	}
364	spin_unlock_irqrestore(&intr->ts_list_lock, flags);
365
366	if (ts_reg_free_list_head) {
367		INIT_WORK(&job->free_obj, hl_ts_free_objects);
368		job->free_obj_head = ts_reg_free_list_head;
369		job->dynamic_alloc_free_obj_head = dynamic_alloc_list_head;
370		job->hdev = hdev;
371		queue_work(hdev->ts_free_obj_wq, &job->free_obj);
372	} else {
373		kfree(job);
374	}
375}
376
377static void handle_user_interrupt_wait_list(struct hl_device *hdev, struct hl_user_interrupt *intr)
378{
379	struct hl_user_pending_interrupt *pend, *temp_pend;
380	unsigned long flags;
381
382	spin_lock_irqsave(&intr->wait_list_lock, flags);
383	list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, list_node) {
384		if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
385				!pend->cq_kernel_addr) {
386			/* Handle wait target value node */
387			pend->fence.timestamp = intr->timestamp;
388			complete_all(&pend->fence.completion);
389		}
390	}
391	spin_unlock_irqrestore(&intr->wait_list_lock, flags);
392}
393
394static void handle_tpc_interrupt(struct hl_device *hdev)
395{
396	u64 event_mask;
397	u32 flags;
398
399	event_mask = HL_NOTIFIER_EVENT_TPC_ASSERT |
400		HL_NOTIFIER_EVENT_USER_ENGINE_ERR |
401		HL_NOTIFIER_EVENT_DEVICE_RESET;
402
403	flags = HL_DRV_RESET_DELAY;
404
405	dev_err_ratelimited(hdev->dev, "Received TPC assert\n");
406	hl_device_cond_reset(hdev, flags, event_mask);
407}
408
409static void handle_unexpected_user_interrupt(struct hl_device *hdev)
410{
411	dev_err_ratelimited(hdev->dev, "Received unexpected user error interrupt\n");
412}
413
414/**
415 * hl_irq_user_interrupt_handler - irq handler for user interrupts.
416 *
417 * @irq: irq number
418 * @arg: pointer to user interrupt structure
419 */
420irqreturn_t hl_irq_user_interrupt_handler(int irq, void *arg)
421{
422	struct hl_user_interrupt *user_int = arg;
423	struct hl_device *hdev = user_int->hdev;
424
425	user_int->timestamp = ktime_get();
426	switch (user_int->type) {
427	case HL_USR_INTERRUPT_CQ:
428		/* First handle user waiters threads */
429		handle_user_interrupt_wait_list(hdev, &hdev->common_user_cq_interrupt);
430		handle_user_interrupt_wait_list(hdev, user_int);
431
432		/* Second handle user timestamp registrations */
433		handle_user_interrupt_ts_list(hdev,  &hdev->common_user_cq_interrupt);
434		handle_user_interrupt_ts_list(hdev, user_int);
435		break;
436	case HL_USR_INTERRUPT_DECODER:
437		handle_user_interrupt_wait_list(hdev, &hdev->common_decoder_interrupt);
438
439		/* Handle decoder interrupt registered on this specific irq */
440		handle_user_interrupt_wait_list(hdev, user_int);
441		break;
442	default:
443		break;
444	}
445
446	return IRQ_HANDLED;
447}
448
449/**
450 * hl_irq_user_interrupt_thread_handler - irq thread handler for user interrupts.
451 * This function is invoked by threaded irq mechanism
452 *
453 * @irq: irq number
454 * @arg: pointer to user interrupt structure
455 *
456 */
457irqreturn_t hl_irq_user_interrupt_thread_handler(int irq, void *arg)
458{
459	struct hl_user_interrupt *user_int = arg;
460	struct hl_device *hdev = user_int->hdev;
461
462	user_int->timestamp = ktime_get();
463	switch (user_int->type) {
464	case HL_USR_INTERRUPT_TPC:
465		handle_tpc_interrupt(hdev);
466		break;
467	case HL_USR_INTERRUPT_UNEXPECTED:
468		handle_unexpected_user_interrupt(hdev);
469		break;
470	default:
471		break;
472	}
473
474	return IRQ_HANDLED;
475}
476
477irqreturn_t hl_irq_eq_error_interrupt_thread_handler(int irq, void *arg)
478{
479	u64 event_mask = HL_NOTIFIER_EVENT_DEVICE_RESET | HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
480	struct hl_device *hdev = arg;
481
482	dev_err(hdev->dev, "EQ error interrupt received\n");
483
484	hl_device_cond_reset(hdev, HL_DRV_RESET_HARD, event_mask);
485
486	return IRQ_HANDLED;
487}
488
489/**
490 * hl_irq_handler_eq - irq handler for event queue
491 *
492 * @irq: irq number
493 * @arg: pointer to event queue structure
494 *
495 */
496irqreturn_t hl_irq_handler_eq(int irq, void *arg)
497{
498	struct hl_eq *eq = arg;
499	struct hl_device *hdev = eq->hdev;
500	struct hl_eq_entry *eq_entry;
501	struct hl_eq_entry *eq_base;
502	struct hl_eqe_work *handle_eqe_work;
503	bool entry_ready;
504	u32 cur_eqe, ctl;
505	u16 cur_eqe_index, event_type;
506
507	eq_base = eq->kernel_address;
508
509	while (1) {
510		cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl);
511		entry_ready = !!FIELD_GET(EQ_CTL_READY_MASK, cur_eqe);
512
513		if (!entry_ready)
514			break;
515
516		cur_eqe_index = FIELD_GET(EQ_CTL_INDEX_MASK, cur_eqe);
517		if ((hdev->event_queue.check_eqe_index) &&
518				(((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) {
519			dev_err(hdev->dev,
520				"EQE %#x in queue is ready but index does not match %d!=%d",
521				cur_eqe,
522				((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK),
523				cur_eqe_index);
524			break;
525		}
526
527		eq->prev_eqe_index++;
528
529		eq_entry = &eq_base[eq->ci];
530
531		/*
532		 * Make sure we read EQ entry contents after we've
533		 * checked the ownership bit.
534		 */
535		dma_rmb();
536
537		if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
538			ctl = le32_to_cpu(eq_entry->hdr.ctl);
539			event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) >> EQ_CTL_EVENT_TYPE_SHIFT);
540			dev_warn(hdev->dev,
541				"Device disabled but received an EQ event (%u)\n", event_type);
542			goto skip_irq;
543		}
544
545		handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
546		if (handle_eqe_work) {
547			INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
548			handle_eqe_work->hdev = hdev;
549
550			memcpy(&handle_eqe_work->eq_entry, eq_entry,
551					sizeof(*eq_entry));
552
553			queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
554		}
555skip_irq:
556		/* Clear EQ entry ready bit */
557		eq_entry->hdr.ctl =
558			cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
559							~EQ_CTL_READY_MASK);
560
561		eq->ci = hl_eq_inc_ptr(eq->ci);
562
563		hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
564	}
565
566	return IRQ_HANDLED;
567}
568
569/**
570 * hl_irq_handler_dec_abnrm - Decoder error interrupt handler
571 * @irq: IRQ number
572 * @arg: pointer to decoder structure.
573 */
574irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg)
575{
576	struct hl_dec *dec = arg;
577
578	schedule_work(&dec->abnrm_intr_work);
579
580	return IRQ_HANDLED;
581}
582
583/**
584 * hl_cq_init - main initialization function for an cq object
585 *
586 * @hdev: pointer to device structure
587 * @q: pointer to cq structure
588 * @hw_queue_id: The H/W queue ID this completion queue belongs to
589 *               HL_INVALID_QUEUE if cq is not attached to any specific queue
590 *
591 * Allocate dma-able memory for the completion queue and initialize fields
592 * Returns 0 on success
593 */
594int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
595{
596	void *p;
597
598	p = hl_asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, &q->bus_address,
599					GFP_KERNEL | __GFP_ZERO);
600	if (!p)
601		return -ENOMEM;
602
603	q->hdev = hdev;
604	q->kernel_address = p;
605	q->hw_queue_id = hw_queue_id;
606	q->ci = 0;
607	q->pi = 0;
608
609	atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
610
611	return 0;
612}
613
614/**
615 * hl_cq_fini - destroy completion queue
616 *
617 * @hdev: pointer to device structure
618 * @q: pointer to cq structure
619 *
620 * Free the completion queue memory
621 */
622void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
623{
624	hl_asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, q->kernel_address, q->bus_address);
625}
626
627void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
628{
629	q->ci = 0;
630	q->pi = 0;
631
632	atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
633
634	/*
635	 * It's not enough to just reset the PI/CI because the H/W may have
636	 * written valid completion entries before it was halted and therefore
637	 * we need to clean the actual queues so we won't process old entries
638	 * when the device is operational again
639	 */
640
641	memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
642}
643
644/**
645 * hl_eq_init - main initialization function for an event queue object
646 *
647 * @hdev: pointer to device structure
648 * @q: pointer to eq structure
649 *
650 * Allocate dma-able memory for the event queue and initialize fields
651 * Returns 0 on success
652 */
653int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
654{
655	void *p;
656
657	p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address);
658	if (!p)
659		return -ENOMEM;
660
661	q->hdev = hdev;
662	q->kernel_address = p;
663	q->ci = 0;
664	q->prev_eqe_index = 0;
665
666	return 0;
667}
668
669/**
670 * hl_eq_fini - destroy event queue
671 *
672 * @hdev: pointer to device structure
673 * @q: pointer to eq structure
674 *
675 * Free the event queue memory
676 */
677void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
678{
679	flush_workqueue(hdev->eq_wq);
680
681	hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address);
682}
683
684void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
685{
686	q->ci = 0;
687	q->prev_eqe_index = 0;
688
689	/*
690	 * It's not enough to just reset the PI/CI because the H/W may have
691	 * written valid completion entries before it was halted and therefore
692	 * we need to clean the actual queues so we won't process old entries
693	 * when the device is operational again
694	 */
695
696	memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
697}
698