1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
7 */
8
9#ifndef __ADRENO_GPU_H__
10#define __ADRENO_GPU_H__
11
12#include <linux/firmware.h>
13#include <linux/iopoll.h>
14
15#include "msm_gpu.h"
16
17#include "adreno_common.xml.h"
18#include "adreno_pm4.xml.h"
19
20extern bool snapshot_debugbus;
21extern bool allow_vram_carveout;
22
23enum {
24	ADRENO_FW_PM4 = 0,
25	ADRENO_FW_SQE = 0, /* a6xx */
26	ADRENO_FW_PFP = 1,
27	ADRENO_FW_GMU = 1, /* a6xx */
28	ADRENO_FW_GPMU = 2,
29	ADRENO_FW_MAX,
30};
31
32/**
33 * @enum adreno_family: identify generation and possibly sub-generation
34 *
35 * In some cases there are distinct sub-generations within a major revision
36 * so it helps to be able to group the GPU devices by generation and if
37 * necessary sub-generation.
38 */
39enum adreno_family {
40	ADRENO_2XX_GEN1,  /* a20x */
41	ADRENO_2XX_GEN2,  /* a22x */
42	ADRENO_3XX,
43	ADRENO_4XX,
44	ADRENO_5XX,
45	ADRENO_6XX_GEN1,  /* a630 family */
46	ADRENO_6XX_GEN2,  /* a640 family */
47	ADRENO_6XX_GEN3,  /* a650 family */
48	ADRENO_6XX_GEN4,  /* a660 family */
49	ADRENO_7XX_GEN1,  /* a730 family */
50	ADRENO_7XX_GEN2,  /* a740 family */
51	ADRENO_7XX_GEN3,  /* a750 family */
52};
53
54#define ADRENO_QUIRK_TWO_PASS_USE_WFI		BIT(0)
55#define ADRENO_QUIRK_FAULT_DETECT_MASK		BIT(1)
56#define ADRENO_QUIRK_LMLOADKILL_DISABLE		BIT(2)
57#define ADRENO_QUIRK_HAS_HW_APRIV		BIT(3)
58#define ADRENO_QUIRK_HAS_CACHED_COHERENT	BIT(4)
59
60/* Helper for formating the chip_id in the way that userspace tools like
61 * crashdec expect.
62 */
63#define ADRENO_CHIPID_FMT "u.%u.%u.%u"
64#define ADRENO_CHIPID_ARGS(_c) \
65	(((_c) >> 24) & 0xff), \
66	(((_c) >> 16) & 0xff), \
67	(((_c) >> 8)  & 0xff), \
68	((_c) & 0xff)
69
70struct adreno_gpu_funcs {
71	struct msm_gpu_funcs base;
72	int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
73};
74
75struct adreno_reglist {
76	u32 offset;
77	u32 value;
78};
79
80extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
81extern const struct adreno_reglist a660_hwcg[], a690_hwcg[], a702_hwcg[], a730_hwcg[], a740_hwcg[];
82
83struct adreno_speedbin {
84	uint16_t fuse;
85	uint16_t speedbin;
86};
87
88struct adreno_info {
89	const char *machine;
90	/**
91	 * @chipids: Table of matching chip-ids
92	 *
93	 * Terminated with 0 sentinal
94	 */
95	uint32_t *chip_ids;
96	enum adreno_family family;
97	uint32_t revn;
98	const char *fw[ADRENO_FW_MAX];
99	uint32_t gmem;
100	u64 quirks;
101	struct msm_gpu *(*init)(struct drm_device *dev);
102	const char *zapfw;
103	u32 inactive_period;
104	const struct adreno_reglist *hwcg;
105	u64 address_space_size;
106	/**
107	 * @speedbins: Optional table of fuse to speedbin mappings
108	 *
109	 * Consists of pairs of fuse, index mappings, terminated with
110	 * {SHRT_MAX, 0} sentinal.
111	 */
112	struct adreno_speedbin *speedbins;
113};
114
115#define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 }
116
117/*
118 * Helper to build a speedbin table, ie. the table:
119 *      fuse | speedbin
120 *      -----+---------
121 *        0  |   0
122 *       169 |   1
123 *       174 |   2
124 *
125 * would be declared as:
126 *
127 *     .speedbins = ADRENO_SPEEDBINS(
128 *                      { 0,   0 },
129 *                      { 169, 1 },
130 *                      { 174, 2 },
131 *     ),
132 */
133#define ADRENO_SPEEDBINS(tbl...) (struct adreno_speedbin[]) { tbl {SHRT_MAX, 0} }
134
135struct adreno_gpu {
136	struct msm_gpu base;
137	const struct adreno_info *info;
138	uint32_t chip_id;
139	uint16_t speedbin;
140	const struct adreno_gpu_funcs *funcs;
141
142	/* interesting register offsets to dump: */
143	const unsigned int *registers;
144
145	/*
146	 * Are we loading fw from legacy path?  Prior to addition
147	 * of gpu firmware to linux-firmware, the fw files were
148	 * placed in toplevel firmware directory, following qcom's
149	 * android kernel.  But linux-firmware preferred they be
150	 * placed in a 'qcom' subdirectory.
151	 *
152	 * For backwards compatibility, we try first to load from
153	 * the new path, using request_firmware_direct() to avoid
154	 * any potential timeout waiting for usermode helper, then
155	 * fall back to the old path (with direct load).  And
156	 * finally fall back to request_firmware() with the new
157	 * path to allow the usermode helper.
158	 */
159	enum {
160		FW_LOCATION_UNKNOWN = 0,
161		FW_LOCATION_NEW,       /* /lib/firmware/qcom/$fwfile */
162		FW_LOCATION_LEGACY,    /* /lib/firmware/$fwfile */
163		FW_LOCATION_HELPER,
164	} fwloc;
165
166	/* firmware: */
167	const struct firmware *fw[ADRENO_FW_MAX];
168
169	struct {
170		u32 rgb565_predicator;
171		u32 uavflagprd_inv;
172		u32 min_acc_len;
173		u32 ubwc_mode;
174		u32 highest_bank_bit;
175		u32 amsbc;
176	} ubwc_config;
177
178	/*
179	 * Register offsets are different between some GPUs.
180	 * GPU specific offsets will be exported by GPU specific
181	 * code (a3xx_gpu.c) and stored in this common location.
182	 */
183	const unsigned int *reg_offsets;
184	bool gmu_is_wrapper;
185};
186#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
187
188struct adreno_ocmem {
189	struct ocmem *ocmem;
190	unsigned long base;
191	void *hdl;
192};
193
194/* platform config data (ie. from DT, or pdata) */
195struct adreno_platform_config {
196	uint32_t chip_id;
197	const struct adreno_info *info;
198};
199
200#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
201
202#define spin_until(X) ({                                   \
203	int __ret = -ETIMEDOUT;                            \
204	unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
205	do {                                               \
206		if (X) {                                   \
207			__ret = 0;                         \
208			break;                             \
209		}                                          \
210	} while (time_before(jiffies, __t));               \
211	__ret;                                             \
212})
213
214static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu)
215{
216	/* It is probably ok to assume legacy "adreno_rev" format
217	 * for all a6xx devices, but probably best to limit this
218	 * to older things.
219	 */
220	WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1);
221	return gpu->chip_id & 0xff;
222}
223
224static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn)
225{
226	if (WARN_ON_ONCE(!gpu->info))
227		return false;
228	return gpu->info->revn == revn;
229}
230
231static inline bool adreno_has_gmu_wrapper(const struct adreno_gpu *gpu)
232{
233	return gpu->gmu_is_wrapper;
234}
235
236static inline bool adreno_is_a2xx(const struct adreno_gpu *gpu)
237{
238	if (WARN_ON_ONCE(!gpu->info))
239		return false;
240	return gpu->info->family <= ADRENO_2XX_GEN2;
241}
242
243static inline bool adreno_is_a20x(const struct adreno_gpu *gpu)
244{
245	if (WARN_ON_ONCE(!gpu->info))
246		return false;
247	return gpu->info->family == ADRENO_2XX_GEN1;
248}
249
250static inline bool adreno_is_a225(const struct adreno_gpu *gpu)
251{
252	return adreno_is_revn(gpu, 225);
253}
254
255static inline bool adreno_is_a305(const struct adreno_gpu *gpu)
256{
257	return adreno_is_revn(gpu, 305);
258}
259
260static inline bool adreno_is_a305b(const struct adreno_gpu *gpu)
261{
262	return gpu->info->chip_ids[0] == 0x03000512;
263}
264
265static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
266{
267	/* yes, 307, because a305c is 306 */
268	return adreno_is_revn(gpu, 307);
269}
270
271static inline bool adreno_is_a320(const struct adreno_gpu *gpu)
272{
273	return adreno_is_revn(gpu, 320);
274}
275
276static inline bool adreno_is_a330(const struct adreno_gpu *gpu)
277{
278	return adreno_is_revn(gpu, 330);
279}
280
281static inline bool adreno_is_a330v2(const struct adreno_gpu *gpu)
282{
283	return adreno_is_a330(gpu) && (adreno_patchid(gpu) > 0);
284}
285
286static inline int adreno_is_a405(const struct adreno_gpu *gpu)
287{
288	return adreno_is_revn(gpu, 405);
289}
290
291static inline int adreno_is_a420(const struct adreno_gpu *gpu)
292{
293	return adreno_is_revn(gpu, 420);
294}
295
296static inline int adreno_is_a430(const struct adreno_gpu *gpu)
297{
298	return adreno_is_revn(gpu, 430);
299}
300
301static inline int adreno_is_a506(const struct adreno_gpu *gpu)
302{
303	return adreno_is_revn(gpu, 506);
304}
305
306static inline int adreno_is_a508(const struct adreno_gpu *gpu)
307{
308	return adreno_is_revn(gpu, 508);
309}
310
311static inline int adreno_is_a509(const struct adreno_gpu *gpu)
312{
313	return adreno_is_revn(gpu, 509);
314}
315
316static inline int adreno_is_a510(const struct adreno_gpu *gpu)
317{
318	return adreno_is_revn(gpu, 510);
319}
320
321static inline int adreno_is_a512(const struct adreno_gpu *gpu)
322{
323	return adreno_is_revn(gpu, 512);
324}
325
326static inline int adreno_is_a530(const struct adreno_gpu *gpu)
327{
328	return adreno_is_revn(gpu, 530);
329}
330
331static inline int adreno_is_a540(const struct adreno_gpu *gpu)
332{
333	return adreno_is_revn(gpu, 540);
334}
335
336static inline int adreno_is_a610(const struct adreno_gpu *gpu)
337{
338	return adreno_is_revn(gpu, 610);
339}
340
341static inline int adreno_is_a618(const struct adreno_gpu *gpu)
342{
343	return adreno_is_revn(gpu, 618);
344}
345
346static inline int adreno_is_a619(const struct adreno_gpu *gpu)
347{
348	return adreno_is_revn(gpu, 619);
349}
350
351static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
352{
353	return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
354}
355
356static inline int adreno_is_a630(const struct adreno_gpu *gpu)
357{
358	return adreno_is_revn(gpu, 630);
359}
360
361static inline int adreno_is_a640(const struct adreno_gpu *gpu)
362{
363	return adreno_is_revn(gpu, 640);
364}
365
366static inline int adreno_is_a650(const struct adreno_gpu *gpu)
367{
368	return adreno_is_revn(gpu, 650);
369}
370
371static inline int adreno_is_7c3(const struct adreno_gpu *gpu)
372{
373	return gpu->info->chip_ids[0] == 0x06030500;
374}
375
376static inline int adreno_is_a660(const struct adreno_gpu *gpu)
377{
378	return adreno_is_revn(gpu, 660);
379}
380
381static inline int adreno_is_a680(const struct adreno_gpu *gpu)
382{
383	return adreno_is_revn(gpu, 680);
384}
385
386static inline int adreno_is_a690(const struct adreno_gpu *gpu)
387{
388	return gpu->info->chip_ids[0] == 0x06090000;
389}
390
391static inline int adreno_is_a702(const struct adreno_gpu *gpu)
392{
393	return gpu->info->chip_ids[0] == 0x07000200;
394}
395
396static inline int adreno_is_a610_family(const struct adreno_gpu *gpu)
397{
398	if (WARN_ON_ONCE(!gpu->info))
399		return false;
400
401	/* TODO: A612 */
402	return adreno_is_a610(gpu) || adreno_is_a702(gpu);
403}
404
405/* check for a615, a616, a618, a619 or any a630 derivatives */
406static inline int adreno_is_a630_family(const struct adreno_gpu *gpu)
407{
408	if (WARN_ON_ONCE(!gpu->info))
409		return false;
410	return gpu->info->family == ADRENO_6XX_GEN1;
411}
412
413static inline int adreno_is_a660_family(const struct adreno_gpu *gpu)
414{
415	if (WARN_ON_ONCE(!gpu->info))
416		return false;
417	return gpu->info->family == ADRENO_6XX_GEN4;
418}
419
420/* check for a650, a660, or any derivatives */
421static inline int adreno_is_a650_family(const struct adreno_gpu *gpu)
422{
423	if (WARN_ON_ONCE(!gpu->info))
424		return false;
425	return gpu->info->family == ADRENO_6XX_GEN3 ||
426	       gpu->info->family == ADRENO_6XX_GEN4;
427}
428
429static inline int adreno_is_a640_family(const struct adreno_gpu *gpu)
430{
431	if (WARN_ON_ONCE(!gpu->info))
432		return false;
433	return gpu->info->family == ADRENO_6XX_GEN2;
434}
435
436static inline int adreno_is_a730(struct adreno_gpu *gpu)
437{
438	return gpu->info->chip_ids[0] == 0x07030001;
439}
440
441static inline int adreno_is_a740(struct adreno_gpu *gpu)
442{
443	return gpu->info->chip_ids[0] == 0x43050a01;
444}
445
446static inline int adreno_is_a750(struct adreno_gpu *gpu)
447{
448	return gpu->info->chip_ids[0] == 0x43051401;
449}
450
451static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
452{
453	if (WARN_ON_ONCE(!gpu->info))
454		return false;
455	return gpu->info->family == ADRENO_7XX_GEN2 ||
456	       gpu->info->family == ADRENO_7XX_GEN3;
457}
458
459static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
460{
461	/* Update with non-fake (i.e. non-A702) Gen 7 GPUs */
462	return gpu->info->family == ADRENO_7XX_GEN1 ||
463	       adreno_is_a740_family(gpu);
464}
465
466u64 adreno_private_address_space_size(struct msm_gpu *gpu);
467int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
468		     uint32_t param, uint64_t *value, uint32_t *len);
469int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
470		     uint32_t param, uint64_t value, uint32_t len);
471const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
472		const char *fwname);
473struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
474		const struct firmware *fw, u64 *iova);
475int adreno_hw_init(struct msm_gpu *gpu);
476void adreno_recover(struct msm_gpu *gpu);
477void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
478bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
479#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
480void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
481		struct drm_printer *p);
482#endif
483void adreno_dump_info(struct msm_gpu *gpu);
484void adreno_dump(struct msm_gpu *gpu);
485void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
486struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
487
488int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
489			  struct adreno_ocmem *ocmem);
490void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
491
492int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
493		struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
494		int nr_rings);
495void adreno_gpu_cleanup(struct adreno_gpu *gpu);
496int adreno_load_fw(struct adreno_gpu *adreno_gpu);
497
498void adreno_gpu_state_destroy(struct msm_gpu_state *state);
499
500int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
501int adreno_gpu_state_put(struct msm_gpu_state *state);
502void adreno_show_object(struct drm_printer *p, void **ptr, int len,
503		bool *encoded);
504
505/*
506 * Common helper function to initialize the default address space for arm-smmu
507 * attached targets
508 */
509struct msm_gem_address_space *
510adreno_create_address_space(struct msm_gpu *gpu,
511			    struct platform_device *pdev);
512
513struct msm_gem_address_space *
514adreno_iommu_create_address_space(struct msm_gpu *gpu,
515				  struct platform_device *pdev,
516				  unsigned long quirks);
517
518int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
519			 struct adreno_smmu_fault_info *info, const char *block,
520			 u32 scratch[4]);
521
522int adreno_read_speedbin(struct device *dev, u32 *speedbin);
523
524/*
525 * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
526 * out of secure mode
527 */
528int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
529
530/* ringbuffer helpers (the parts that are adreno specific) */
531
532static inline void
533OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
534{
535	adreno_wait_ring(ring, cnt+1);
536	OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
537}
538
539/* no-op packet: */
540static inline void
541OUT_PKT2(struct msm_ringbuffer *ring)
542{
543	adreno_wait_ring(ring, 1);
544	OUT_RING(ring, CP_TYPE2_PKT);
545}
546
547static inline void
548OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
549{
550	adreno_wait_ring(ring, cnt+1);
551	OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
552}
553
554static inline u32 PM4_PARITY(u32 val)
555{
556	return (0x9669 >> (0xF & (val ^
557		(val >> 4) ^ (val >> 8) ^ (val >> 12) ^
558		(val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
559		(val >> 28)))) & 1;
560}
561
562/* Maximum number of values that can be executed for one opcode */
563#define TYPE4_MAX_PAYLOAD 127
564
565#define PKT4(_reg, _cnt) \
566	(CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
567	 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
568
569static inline void
570OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
571{
572	adreno_wait_ring(ring, cnt + 1);
573	OUT_RING(ring, PKT4(regindx, cnt));
574}
575
576static inline void
577OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
578{
579	adreno_wait_ring(ring, cnt + 1);
580	OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
581		((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
582}
583
584struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
585struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
586struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
587struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
588struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
589
590static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
591{
592	return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
593}
594
595/*
596 * Given a register and a count, return a value to program into
597 * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
598 * registers starting at _reg.
599 *
600 * The register base needs to be a multiple of the length. If it is not, the
601 * hardware will quietly mask off the bits for you and shift the size. For
602 * example, if you intend the protection to start at 0x07 for a length of 4
603 * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
604 * expose registers you intended to protect!
605 */
606#define ADRENO_PROTECT_RW(_reg, _len) \
607	((1 << 30) | (1 << 29) | \
608	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
609
610/*
611 * Same as above, but allow reads over the range. For areas of mixed use (such
612 * as performance counters) this allows us to protect a much larger range with a
613 * single register
614 */
615#define ADRENO_PROTECT_RDONLY(_reg, _len) \
616	((1 << 29) \
617	((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
618
619
620#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
621	readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
622		interval, timeout)
623
624#endif /* __ADRENO_GPU_H__ */
625