1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright �� 2019 Intel Corporation
4 */
5
6#ifndef __INTEL_BREADCRUMBS_TYPES__
7#define __INTEL_BREADCRUMBS_TYPES__
8
9#include <linux/irq_work.h>
10#include <linux/kref.h>
11#include <linux/list.h>
12#include <linux/spinlock.h>
13#include <linux/types.h>
14
15#include "intel_engine_types.h"
16
17/*
18 * Rather than have every client wait upon all user interrupts,
19 * with the herd waking after every interrupt and each doing the
20 * heavyweight seqno dance, we delegate the task (of being the
21 * bottom-half of the user interrupt) to the first client. After
22 * every interrupt, we wake up one client, who does the heavyweight
23 * coherent seqno read and either goes back to sleep (if incomplete),
24 * or wakes up all the completed clients in parallel, before then
25 * transferring the bottom-half status to the next client in the queue.
26 *
27 * Compared to walking the entire list of waiters in a single dedicated
28 * bottom-half, we reduce the latency of the first waiter by avoiding
29 * a context switch, but incur additional coherent seqno reads when
30 * following the chain of request breadcrumbs. Since it is most likely
31 * that we have a single client waiting on each seqno, then reducing
32 * the overhead of waking that client is much preferred.
33 */
34struct intel_breadcrumbs {
35	struct kref ref;
36	atomic_t active;
37
38	spinlock_t signalers_lock; /* protects the list of signalers */
39	struct list_head signalers;
40	struct llist_head signaled_requests;
41	atomic_t signaler_active;
42
43	spinlock_t irq_lock; /* protects the interrupt from hardirq context */
44	struct irq_work irq_work; /* for use from inside irq_lock */
45	unsigned int irq_enabled;
46	bool irq_armed;
47
48	/* Not all breadcrumbs are attached to physical HW */
49	intel_engine_mask_t	engine_mask;
50	struct intel_engine_cs *irq_engine;
51	bool	(*irq_enable)(struct intel_breadcrumbs *b);
52	void	(*irq_disable)(struct intel_breadcrumbs *b);
53};
54
55#endif /* __INTEL_BREADCRUMBS_TYPES__ */
56