1/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2/* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4#ifndef PVR_QUEUE_H
5#define PVR_QUEUE_H
6
7#include <drm/gpu_scheduler.h>
8
9#include "pvr_cccb.h"
10#include "pvr_device.h"
11
12struct pvr_context;
13struct pvr_queue;
14
15/**
16 * struct pvr_queue_fence_ctx - Queue fence context
17 *
18 * Used to implement dma_fence_ops for pvr_job::{done,cccb}_fence.
19 */
20struct pvr_queue_fence_ctx {
21	/** @id: Fence context ID allocated with dma_fence_context_alloc(). */
22	u64 id;
23
24	/** @seqno: Sequence number incremented each time a fence is created. */
25	atomic_t seqno;
26
27	/** @lock: Lock used to synchronize access to fences allocated by this context. */
28	spinlock_t lock;
29};
30
31/**
32 * struct pvr_queue_cccb_fence_ctx - CCCB fence context
33 *
34 * Context used to manage fences controlling access to the CCCB. No fences are
35 * issued if there's enough space in the CCCB to push job commands.
36 */
37struct pvr_queue_cccb_fence_ctx {
38	/** @base: Base queue fence context. */
39	struct pvr_queue_fence_ctx base;
40
41	/**
42	 * @job: Job waiting for CCCB space.
43	 *
44	 * Thanks to the serializationg done at the drm_sched_entity level,
45	 * there's no more than one job waiting for CCCB at a given time.
46	 *
47	 * This field is NULL if no jobs are currently waiting for CCCB space.
48	 *
49	 * Must be accessed with @job_lock held.
50	 */
51	struct pvr_job *job;
52
53	/** @job_lock: Lock protecting access to the job object. */
54	struct mutex job_lock;
55};
56
57/**
58 * struct pvr_queue_fence - Queue fence object
59 */
60struct pvr_queue_fence {
61	/** @base: Base dma_fence. */
62	struct dma_fence base;
63
64	/** @queue: Queue that created this fence. */
65	struct pvr_queue *queue;
66};
67
68/**
69 * struct pvr_queue - Job queue
70 *
71 * Used to queue and track execution of pvr_job objects.
72 */
73struct pvr_queue {
74	/** @scheduler: Single entity scheduler use to push jobs to this queue. */
75	struct drm_gpu_scheduler scheduler;
76
77	/** @entity: Scheduling entity backing this queue. */
78	struct drm_sched_entity entity;
79
80	/** @type: Type of jobs queued to this queue. */
81	enum drm_pvr_job_type type;
82
83	/** @ctx: Context object this queue is bound to. */
84	struct pvr_context *ctx;
85
86	/** @node: Used to add the queue to the active/idle queue list. */
87	struct list_head node;
88
89	/**
90	 * @in_flight_job_count: Number of jobs submitted to the CCCB that
91	 * have not been processed yet.
92	 */
93	atomic_t in_flight_job_count;
94
95	/**
96	 * @cccb_fence_ctx: CCCB fence context.
97	 *
98	 * Used to control access to the CCCB is full, such that we don't
99	 * end up trying to push commands to the CCCB if there's not enough
100	 * space to receive all commands needed for a job to complete.
101	 */
102	struct pvr_queue_cccb_fence_ctx cccb_fence_ctx;
103
104	/** @job_fence_ctx: Job fence context object. */
105	struct pvr_queue_fence_ctx job_fence_ctx;
106
107	/** @timeline_ufo: Timeline UFO for the context queue. */
108	struct {
109		/** @fw_obj: FW object representing the UFO value. */
110		struct pvr_fw_object *fw_obj;
111
112		/** @value: CPU mapping of the UFO value. */
113		u32 *value;
114	} timeline_ufo;
115
116	/**
117	 * @last_queued_job_scheduled_fence: The scheduled fence of the last
118	 * job queued to this queue.
119	 *
120	 * We use it to insert frag -> geom dependencies when issuing combined
121	 * geom+frag jobs, to guarantee that the fragment job that's part of
122	 * the combined operation comes after all fragment jobs that were queued
123	 * before it.
124	 */
125	struct dma_fence *last_queued_job_scheduled_fence;
126
127	/** @cccb: Client Circular Command Buffer. */
128	struct pvr_cccb cccb;
129
130	/** @reg_state_obj: FW object representing the register state of this queue. */
131	struct pvr_fw_object *reg_state_obj;
132
133	/** @ctx_offset: Offset of the queue context in the FW context object. */
134	u32 ctx_offset;
135
136	/** @callstack_addr: Initial call stack address for register state object. */
137	u64 callstack_addr;
138};
139
140bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f);
141
142int pvr_queue_job_init(struct pvr_job *job);
143
144void pvr_queue_job_cleanup(struct pvr_job *job);
145
146void pvr_queue_job_push(struct pvr_job *job);
147
148struct dma_fence *pvr_queue_job_arm(struct pvr_job *job);
149
150struct pvr_queue *pvr_queue_create(struct pvr_context *ctx,
151				   enum drm_pvr_job_type type,
152				   struct drm_pvr_ioctl_create_context_args *args,
153				   void *fw_ctx_map);
154
155void pvr_queue_kill(struct pvr_queue *queue);
156
157void pvr_queue_destroy(struct pvr_queue *queue);
158
159void pvr_queue_process(struct pvr_queue *queue);
160
161void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev);
162
163void pvr_queue_device_post_reset(struct pvr_device *pvr_dev);
164
165int pvr_queue_device_init(struct pvr_device *pvr_dev);
166
167void pvr_queue_device_fini(struct pvr_device *pvr_dev);
168
169#endif /* PVR_QUEUE_H */
170