1/* SPDX-License-Identifier: GPL-2.0 or MIT */
2/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4/* Copyright 2023 Collabora ltd. */
5
6#ifndef __PANTHOR_DEVICE_H__
7#define __PANTHOR_DEVICE_H__
8
9#include <linux/atomic.h>
10#include <linux/io-pgtable.h>
11#include <linux/regulator/consumer.h>
12#include <linux/sched.h>
13#include <linux/spinlock.h>
14
15#include <drm/drm_device.h>
16#include <drm/drm_mm.h>
17#include <drm/gpu_scheduler.h>
18#include <drm/panthor_drm.h>
19
20struct panthor_csf;
21struct panthor_csf_ctx;
22struct panthor_device;
23struct panthor_gpu;
24struct panthor_group_pool;
25struct panthor_heap_pool;
26struct panthor_job;
27struct panthor_mmu;
28struct panthor_fw;
29struct panthor_perfcnt;
30struct panthor_vm;
31struct panthor_vm_pool;
32
33/**
34 * enum panthor_device_pm_state - PM state
35 */
36enum panthor_device_pm_state {
37	/** @PANTHOR_DEVICE_PM_STATE_SUSPENDED: Device is suspended. */
38	PANTHOR_DEVICE_PM_STATE_SUSPENDED = 0,
39
40	/** @PANTHOR_DEVICE_PM_STATE_RESUMING: Device is being resumed. */
41	PANTHOR_DEVICE_PM_STATE_RESUMING,
42
43	/** @PANTHOR_DEVICE_PM_STATE_ACTIVE: Device is active. */
44	PANTHOR_DEVICE_PM_STATE_ACTIVE,
45
46	/** @PANTHOR_DEVICE_PM_STATE_SUSPENDING: Device is being suspended. */
47	PANTHOR_DEVICE_PM_STATE_SUSPENDING,
48};
49
50/**
51 * struct panthor_irq - IRQ data
52 *
53 * Used to automate IRQ handling for the 3 different IRQs we have in this driver.
54 */
55struct panthor_irq {
56	/** @ptdev: Panthor device */
57	struct panthor_device *ptdev;
58
59	/** @irq: IRQ number. */
60	int irq;
61
62	/** @mask: Current mask being applied to xxx_INT_MASK. */
63	u32 mask;
64
65	/** @suspended: Set to true when the IRQ is suspended. */
66	atomic_t suspended;
67};
68
69/**
70 * struct panthor_device - Panthor device
71 */
72struct panthor_device {
73	/** @base: Base drm_device. */
74	struct drm_device base;
75
76	/** @phys_addr: Physical address of the iomem region. */
77	phys_addr_t phys_addr;
78
79	/** @iomem: CPU mapping of the IOMEM region. */
80	void __iomem *iomem;
81
82	/** @clks: GPU clocks. */
83	struct {
84		/** @core: Core clock. */
85		struct clk *core;
86
87		/** @stacks: Stacks clock. This clock is optional. */
88		struct clk *stacks;
89
90		/** @coregroup: Core group clock. This clock is optional. */
91		struct clk *coregroup;
92	} clks;
93
94	/** @coherent: True if the CPU/GPU are memory coherent. */
95	bool coherent;
96
97	/** @gpu_info: GPU information. */
98	struct drm_panthor_gpu_info gpu_info;
99
100	/** @csif_info: Command stream interface information. */
101	struct drm_panthor_csif_info csif_info;
102
103	/** @gpu: GPU management data. */
104	struct panthor_gpu *gpu;
105
106	/** @fw: FW management data. */
107	struct panthor_fw *fw;
108
109	/** @mmu: MMU management data. */
110	struct panthor_mmu *mmu;
111
112	/** @scheduler: Scheduler management data. */
113	struct panthor_scheduler *scheduler;
114
115	/** @devfreq: Device frequency scaling management data. */
116	struct panthor_devfreq *devfreq;
117
118	/** @unplug: Device unplug related fields. */
119	struct {
120		/** @lock: Lock used to serialize unplug operations. */
121		struct mutex lock;
122
123		/**
124		 * @done: Completion object signaled when the unplug
125		 * operation is done.
126		 */
127		struct completion done;
128	} unplug;
129
130	/** @reset: Reset related fields. */
131	struct {
132		/** @wq: Ordered worqueud used to schedule reset operations. */
133		struct workqueue_struct *wq;
134
135		/** @work: Reset work. */
136		struct work_struct work;
137
138		/** @pending: Set to true if a reset is pending. */
139		atomic_t pending;
140	} reset;
141
142	/** @pm: Power management related data. */
143	struct {
144		/** @state: Power state. */
145		atomic_t state;
146
147		/**
148		 * @mmio_lock: Lock protecting MMIO userspace CPU mappings.
149		 *
150		 * This is needed to ensure we map the dummy IO pages when
151		 * the device is being suspended, and the real IO pages when
152		 * the device is being resumed. We can't just do with the
153		 * state atomicity to deal with this race.
154		 */
155		struct mutex mmio_lock;
156
157		/**
158		 * @dummy_latest_flush: Dummy LATEST_FLUSH page.
159		 *
160		 * Used to replace the real LATEST_FLUSH page when the GPU
161		 * is suspended.
162		 */
163		struct page *dummy_latest_flush;
164	} pm;
165};
166
167/**
168 * struct panthor_file - Panthor file
169 */
170struct panthor_file {
171	/** @ptdev: Device attached to this file. */
172	struct panthor_device *ptdev;
173
174	/** @vms: VM pool attached to this file. */
175	struct panthor_vm_pool *vms;
176
177	/** @groups: Scheduling group pool attached to this file. */
178	struct panthor_group_pool *groups;
179};
180
181int panthor_device_init(struct panthor_device *ptdev);
182void panthor_device_unplug(struct panthor_device *ptdev);
183
184/**
185 * panthor_device_schedule_reset() - Schedules a reset operation
186 */
187static inline void panthor_device_schedule_reset(struct panthor_device *ptdev)
188{
189	if (!atomic_cmpxchg(&ptdev->reset.pending, 0, 1) &&
190	    atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE)
191		queue_work(ptdev->reset.wq, &ptdev->reset.work);
192}
193
194/**
195 * panthor_device_reset_is_pending() - Checks if a reset is pending.
196 *
197 * Return: true if a reset is pending, false otherwise.
198 */
199static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev)
200{
201	return atomic_read(&ptdev->reset.pending) != 0;
202}
203
204int panthor_device_mmap_io(struct panthor_device *ptdev,
205			   struct vm_area_struct *vma);
206
207int panthor_device_resume(struct device *dev);
208int panthor_device_suspend(struct device *dev);
209
210enum drm_panthor_exception_type {
211	DRM_PANTHOR_EXCEPTION_OK = 0x00,
212	DRM_PANTHOR_EXCEPTION_TERMINATED = 0x04,
213	DRM_PANTHOR_EXCEPTION_KABOOM = 0x05,
214	DRM_PANTHOR_EXCEPTION_EUREKA = 0x06,
215	DRM_PANTHOR_EXCEPTION_ACTIVE = 0x08,
216	DRM_PANTHOR_EXCEPTION_CS_RES_TERM = 0x0f,
217	DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT = 0x3f,
218	DRM_PANTHOR_EXCEPTION_CS_CONFIG_FAULT = 0x40,
219	DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE = 0x41,
220	DRM_PANTHOR_EXCEPTION_CS_ENDPOINT_FAULT = 0x44,
221	DRM_PANTHOR_EXCEPTION_CS_BUS_FAULT = 0x48,
222	DRM_PANTHOR_EXCEPTION_CS_INSTR_INVALID = 0x49,
223	DRM_PANTHOR_EXCEPTION_CS_CALL_STACK_OVERFLOW = 0x4a,
224	DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT = 0x4b,
225	DRM_PANTHOR_EXCEPTION_INSTR_INVALID_PC = 0x50,
226	DRM_PANTHOR_EXCEPTION_INSTR_INVALID_ENC = 0x51,
227	DRM_PANTHOR_EXCEPTION_INSTR_BARRIER_FAULT = 0x55,
228	DRM_PANTHOR_EXCEPTION_DATA_INVALID_FAULT = 0x58,
229	DRM_PANTHOR_EXCEPTION_TILE_RANGE_FAULT = 0x59,
230	DRM_PANTHOR_EXCEPTION_ADDR_RANGE_FAULT = 0x5a,
231	DRM_PANTHOR_EXCEPTION_IMPRECISE_FAULT = 0x5b,
232	DRM_PANTHOR_EXCEPTION_OOM = 0x60,
233	DRM_PANTHOR_EXCEPTION_CSF_FW_INTERNAL_ERROR = 0x68,
234	DRM_PANTHOR_EXCEPTION_CSF_RES_EVICTION_TIMEOUT = 0x69,
235	DRM_PANTHOR_EXCEPTION_GPU_BUS_FAULT = 0x80,
236	DRM_PANTHOR_EXCEPTION_GPU_SHAREABILITY_FAULT = 0x88,
237	DRM_PANTHOR_EXCEPTION_SYS_SHAREABILITY_FAULT = 0x89,
238	DRM_PANTHOR_EXCEPTION_GPU_CACHEABILITY_FAULT = 0x8a,
239	DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_0 = 0xc0,
240	DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_1 = 0xc1,
241	DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_2 = 0xc2,
242	DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_3 = 0xc3,
243	DRM_PANTHOR_EXCEPTION_TRANSLATION_FAULT_4 = 0xc4,
244	DRM_PANTHOR_EXCEPTION_PERM_FAULT_0 = 0xc8,
245	DRM_PANTHOR_EXCEPTION_PERM_FAULT_1 = 0xc9,
246	DRM_PANTHOR_EXCEPTION_PERM_FAULT_2 = 0xca,
247	DRM_PANTHOR_EXCEPTION_PERM_FAULT_3 = 0xcb,
248	DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_1 = 0xd9,
249	DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_2 = 0xda,
250	DRM_PANTHOR_EXCEPTION_ACCESS_FLAG_3 = 0xdb,
251	DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_IN = 0xe0,
252	DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT0 = 0xe4,
253	DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT1 = 0xe5,
254	DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT2 = 0xe6,
255	DRM_PANTHOR_EXCEPTION_ADDR_SIZE_FAULT_OUT3 = 0xe7,
256	DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_0 = 0xe8,
257	DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_1 = 0xe9,
258	DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_2 = 0xea,
259	DRM_PANTHOR_EXCEPTION_MEM_ATTR_FAULT_3 = 0xeb,
260};
261
262/**
263 * panthor_exception_is_fault() - Checks if an exception is a fault.
264 *
265 * Return: true if the exception is a fault, false otherwise.
266 */
267static inline bool
268panthor_exception_is_fault(u32 exception_code)
269{
270	return exception_code > DRM_PANTHOR_EXCEPTION_MAX_NON_FAULT;
271}
272
273const char *panthor_exception_name(struct panthor_device *ptdev,
274				   u32 exception_code);
275
276/**
277 * PANTHOR_IRQ_HANDLER() - Define interrupt handlers and the interrupt
278 * registration function.
279 *
280 * The boiler-plate to gracefully deal with shared interrupts is
281 * auto-generated. All you have to do is call PANTHOR_IRQ_HANDLER()
282 * just after the actual handler. The handler prototype is:
283 *
284 * void (*handler)(struct panthor_device *, u32 status);
285 */
286#define PANTHOR_IRQ_HANDLER(__name, __reg_prefix, __handler)					\
287static irqreturn_t panthor_ ## __name ## _irq_raw_handler(int irq, void *data)			\
288{												\
289	struct panthor_irq *pirq = data;							\
290	struct panthor_device *ptdev = pirq->ptdev;						\
291												\
292	if (atomic_read(&pirq->suspended))							\
293		return IRQ_NONE;								\
294	if (!gpu_read(ptdev, __reg_prefix ## _INT_STAT))					\
295		return IRQ_NONE;								\
296												\
297	gpu_write(ptdev, __reg_prefix ## _INT_MASK, 0);						\
298	return IRQ_WAKE_THREAD;									\
299}												\
300												\
301static irqreturn_t panthor_ ## __name ## _irq_threaded_handler(int irq, void *data)		\
302{												\
303	struct panthor_irq *pirq = data;							\
304	struct panthor_device *ptdev = pirq->ptdev;						\
305	irqreturn_t ret = IRQ_NONE;								\
306												\
307	while (true) {										\
308		u32 status = gpu_read(ptdev, __reg_prefix ## _INT_RAWSTAT) & pirq->mask;	\
309												\
310		if (!status)									\
311			break;									\
312												\
313		gpu_write(ptdev, __reg_prefix ## _INT_CLEAR, status);				\
314												\
315		__handler(ptdev, status);							\
316		ret = IRQ_HANDLED;								\
317	}											\
318												\
319	if (!atomic_read(&pirq->suspended))							\
320		gpu_write(ptdev, __reg_prefix ## _INT_MASK, pirq->mask);			\
321												\
322	return ret;										\
323}												\
324												\
325static inline void panthor_ ## __name ## _irq_suspend(struct panthor_irq *pirq)			\
326{												\
327	pirq->mask = 0;										\
328	gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, 0);					\
329	synchronize_irq(pirq->irq);								\
330	atomic_set(&pirq->suspended, true);							\
331}												\
332												\
333static inline void panthor_ ## __name ## _irq_resume(struct panthor_irq *pirq, u32 mask)	\
334{												\
335	atomic_set(&pirq->suspended, false);							\
336	pirq->mask = mask;									\
337	gpu_write(pirq->ptdev, __reg_prefix ## _INT_CLEAR, mask);				\
338	gpu_write(pirq->ptdev, __reg_prefix ## _INT_MASK, mask);				\
339}												\
340												\
341static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev,			\
342					      struct panthor_irq *pirq,				\
343					      int irq, u32 mask)				\
344{												\
345	pirq->ptdev = ptdev;									\
346	pirq->irq = irq;									\
347	panthor_ ## __name ## _irq_resume(pirq, mask);						\
348												\
349	return devm_request_threaded_irq(ptdev->base.dev, irq,					\
350					 panthor_ ## __name ## _irq_raw_handler,		\
351					 panthor_ ## __name ## _irq_threaded_handler,		\
352					 IRQF_SHARED, KBUILD_MODNAME "-" # __name,		\
353					 pirq);							\
354}
355
356extern struct workqueue_struct *panthor_cleanup_wq;
357
358#endif
359