amdgpu_atom.c revision 1.2
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Stanislaw Skowronek
23 */
24
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <asm/unaligned.h>
29
30#define ATOM_DEBUG
31
32#include "atom.h"
33#include "atom-names.h"
34#include "atom-bits.h"
35#include "amdgpu.h"
36
37#define ATOM_COND_ABOVE		0
38#define ATOM_COND_ABOVEOREQUAL	1
39#define ATOM_COND_ALWAYS	2
40#define ATOM_COND_BELOW		3
41#define ATOM_COND_BELOWOREQUAL	4
42#define ATOM_COND_EQUAL		5
43#define ATOM_COND_NOTEQUAL	6
44
45#define ATOM_PORT_ATI	0
46#define ATOM_PORT_PCI	1
47#define ATOM_PORT_SYSIO	2
48
49#define ATOM_UNIT_MICROSEC	0
50#define ATOM_UNIT_MILLISEC	1
51
52#define PLL_INDEX	2
53#define PLL_DATA	3
54
55typedef struct {
56	struct atom_context *ctx;
57	uint32_t *ps, *ws;
58	int ps_shift;
59	uint16_t start;
60	unsigned last_jump;
61	unsigned long last_jump_jiffies;
62	bool abort;
63} atom_exec_context;
64
65int amdgpu_atom_debug = 0;
66static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
67int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
68
69static uint32_t atom_arg_mask[8] =
70    { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
710xFF000000 };
72static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
73
74static int atom_dst_to_src[8][4] = {
75	/* translate destination alignment field to the source alignment encoding */
76	{0, 0, 0, 0},
77	{1, 2, 3, 0},
78	{1, 2, 3, 0},
79	{1, 2, 3, 0},
80	{4, 5, 6, 7},
81	{4, 5, 6, 7},
82	{4, 5, 6, 7},
83	{4, 5, 6, 7},
84};
85static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
86
87static int debug_depth = 0;
88#ifdef ATOM_DEBUG
89static void debug_print_spaces(int n)
90{
91	while (n--)
92		printk("   ");
93}
94
95#ifdef DEBUG
96#undef DEBUG
97#endif
98
99#define DEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
100#define SDEBUG(...) do if (amdgpu_atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
101#else
102#define DEBUG(...) do { } while (0)
103#define SDEBUG(...) do { } while (0)
104#endif
105
106static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
107				 uint32_t index, uint32_t data)
108{
109	uint32_t temp = 0xCDCDCDCD;
110
111	while (1)
112		switch (CU8(base)) {
113		case ATOM_IIO_NOP:
114			base++;
115			break;
116		case ATOM_IIO_READ:
117			temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
118			base += 3;
119			break;
120		case ATOM_IIO_WRITE:
121			ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
122			base += 3;
123			break;
124		case ATOM_IIO_CLEAR:
125			temp &=
126			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
127			      CU8(base + 2));
128			base += 3;
129			break;
130		case ATOM_IIO_SET:
131			temp |=
132			    (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base +
133									2);
134			base += 3;
135			break;
136		case ATOM_IIO_MOVE_INDEX:
137			temp &=
138			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
139			      CU8(base + 3));
140			temp |=
141			    ((index >> CU8(base + 2)) &
142			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
143									  3);
144			base += 4;
145			break;
146		case ATOM_IIO_MOVE_DATA:
147			temp &=
148			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
149			      CU8(base + 3));
150			temp |=
151			    ((data >> CU8(base + 2)) &
152			     (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
153									  3);
154			base += 4;
155			break;
156		case ATOM_IIO_MOVE_ATTR:
157			temp &=
158			    ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
159			      CU8(base + 3));
160			temp |=
161			    ((ctx->
162			      io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
163									  CU8
164									  (base
165									   +
166									   1))))
167			    << CU8(base + 3);
168			base += 4;
169			break;
170		case ATOM_IIO_END:
171			return temp;
172		default:
173			pr_info("Unknown IIO opcode\n");
174			return 0;
175		}
176}
177
178static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
179				 int *ptr, uint32_t *saved, int print)
180{
181	uint32_t idx, val = 0xCDCDCDCD, align, arg;
182	struct atom_context *gctx = ctx->ctx;
183	arg = attr & 7;
184	align = (attr >> 3) & 7;
185	switch (arg) {
186	case ATOM_ARG_REG:
187		idx = U16(*ptr);
188		(*ptr) += 2;
189		if (print)
190			DEBUG("REG[0x%04X]", idx);
191		idx += gctx->reg_block;
192		switch (gctx->io_mode) {
193		case ATOM_IO_MM:
194			val = gctx->card->reg_read(gctx->card, idx);
195			break;
196		case ATOM_IO_PCI:
197			pr_info("PCI registers are not implemented\n");
198			return 0;
199		case ATOM_IO_SYSIO:
200			pr_info("SYSIO registers are not implemented\n");
201			return 0;
202		default:
203			if (!(gctx->io_mode & 0x80)) {
204				pr_info("Bad IO mode\n");
205				return 0;
206			}
207			if (!gctx->iio[gctx->io_mode & 0x7F]) {
208				pr_info("Undefined indirect IO read method %d\n",
209					gctx->io_mode & 0x7F);
210				return 0;
211			}
212			val =
213			    atom_iio_execute(gctx,
214					     gctx->iio[gctx->io_mode & 0x7F],
215					     idx, 0);
216		}
217		break;
218	case ATOM_ARG_PS:
219		idx = U8(*ptr);
220		(*ptr)++;
221		/* get_unaligned_le32 avoids unaligned accesses from atombios
222		 * tables, noticed on a DEC Alpha. */
223		val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
224		if (print)
225			DEBUG("PS[0x%02X,0x%04X]", idx, val);
226		break;
227	case ATOM_ARG_WS:
228		idx = U8(*ptr);
229		(*ptr)++;
230		if (print)
231			DEBUG("WS[0x%02X]", idx);
232		switch (idx) {
233		case ATOM_WS_QUOTIENT:
234			val = gctx->divmul[0];
235			break;
236		case ATOM_WS_REMAINDER:
237			val = gctx->divmul[1];
238			break;
239		case ATOM_WS_DATAPTR:
240			val = gctx->data_block;
241			break;
242		case ATOM_WS_SHIFT:
243			val = gctx->shift;
244			break;
245		case ATOM_WS_OR_MASK:
246			val = 1 << gctx->shift;
247			break;
248		case ATOM_WS_AND_MASK:
249			val = ~(1 << gctx->shift);
250			break;
251		case ATOM_WS_FB_WINDOW:
252			val = gctx->fb_base;
253			break;
254		case ATOM_WS_ATTRIBUTES:
255			val = gctx->io_attr;
256			break;
257		case ATOM_WS_REGPTR:
258			val = gctx->reg_block;
259			break;
260		default:
261			val = ctx->ws[idx];
262		}
263		break;
264	case ATOM_ARG_ID:
265		idx = U16(*ptr);
266		(*ptr) += 2;
267		if (print) {
268			if (gctx->data_block)
269				DEBUG("ID[0x%04X+%04X]", idx, gctx->data_block);
270			else
271				DEBUG("ID[0x%04X]", idx);
272		}
273		val = U32(idx + gctx->data_block);
274		break;
275	case ATOM_ARG_FB:
276		idx = U8(*ptr);
277		(*ptr)++;
278		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
279			DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
280				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
281			val = 0;
282		} else
283			val = gctx->scratch[(gctx->fb_base / 4) + idx];
284		if (print)
285			DEBUG("FB[0x%02X]", idx);
286		break;
287	case ATOM_ARG_IMM:
288		switch (align) {
289		case ATOM_SRC_DWORD:
290			val = U32(*ptr);
291			(*ptr) += 4;
292			if (print)
293				DEBUG("IMM 0x%08X\n", val);
294			return val;
295		case ATOM_SRC_WORD0:
296		case ATOM_SRC_WORD8:
297		case ATOM_SRC_WORD16:
298			val = U16(*ptr);
299			(*ptr) += 2;
300			if (print)
301				DEBUG("IMM 0x%04X\n", val);
302			return val;
303		case ATOM_SRC_BYTE0:
304		case ATOM_SRC_BYTE8:
305		case ATOM_SRC_BYTE16:
306		case ATOM_SRC_BYTE24:
307			val = U8(*ptr);
308			(*ptr)++;
309			if (print)
310				DEBUG("IMM 0x%02X\n", val);
311			return val;
312		}
313		return 0;
314	case ATOM_ARG_PLL:
315		idx = U8(*ptr);
316		(*ptr)++;
317		if (print)
318			DEBUG("PLL[0x%02X]", idx);
319		val = gctx->card->pll_read(gctx->card, idx);
320		break;
321	case ATOM_ARG_MC:
322		idx = U8(*ptr);
323		(*ptr)++;
324		if (print)
325			DEBUG("MC[0x%02X]", idx);
326		val = gctx->card->mc_read(gctx->card, idx);
327		break;
328	}
329	if (saved)
330		*saved = val;
331	val &= atom_arg_mask[align];
332	val >>= atom_arg_shift[align];
333	if (print)
334		switch (align) {
335		case ATOM_SRC_DWORD:
336			DEBUG(".[31:0] -> 0x%08X\n", val);
337			break;
338		case ATOM_SRC_WORD0:
339			DEBUG(".[15:0] -> 0x%04X\n", val);
340			break;
341		case ATOM_SRC_WORD8:
342			DEBUG(".[23:8] -> 0x%04X\n", val);
343			break;
344		case ATOM_SRC_WORD16:
345			DEBUG(".[31:16] -> 0x%04X\n", val);
346			break;
347		case ATOM_SRC_BYTE0:
348			DEBUG(".[7:0] -> 0x%02X\n", val);
349			break;
350		case ATOM_SRC_BYTE8:
351			DEBUG(".[15:8] -> 0x%02X\n", val);
352			break;
353		case ATOM_SRC_BYTE16:
354			DEBUG(".[23:16] -> 0x%02X\n", val);
355			break;
356		case ATOM_SRC_BYTE24:
357			DEBUG(".[31:24] -> 0x%02X\n", val);
358			break;
359		}
360	return val;
361}
362
363static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
364{
365	uint32_t align = (attr >> 3) & 7, arg = attr & 7;
366	switch (arg) {
367	case ATOM_ARG_REG:
368	case ATOM_ARG_ID:
369		(*ptr) += 2;
370		break;
371	case ATOM_ARG_PLL:
372	case ATOM_ARG_MC:
373	case ATOM_ARG_PS:
374	case ATOM_ARG_WS:
375	case ATOM_ARG_FB:
376		(*ptr)++;
377		break;
378	case ATOM_ARG_IMM:
379		switch (align) {
380		case ATOM_SRC_DWORD:
381			(*ptr) += 4;
382			return;
383		case ATOM_SRC_WORD0:
384		case ATOM_SRC_WORD8:
385		case ATOM_SRC_WORD16:
386			(*ptr) += 2;
387			return;
388		case ATOM_SRC_BYTE0:
389		case ATOM_SRC_BYTE8:
390		case ATOM_SRC_BYTE16:
391		case ATOM_SRC_BYTE24:
392			(*ptr)++;
393			return;
394		}
395		return;
396	}
397}
398
399static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
400{
401	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
402}
403
404static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
405{
406	uint32_t val = 0xCDCDCDCD;
407
408	switch (align) {
409	case ATOM_SRC_DWORD:
410		val = U32(*ptr);
411		(*ptr) += 4;
412		break;
413	case ATOM_SRC_WORD0:
414	case ATOM_SRC_WORD8:
415	case ATOM_SRC_WORD16:
416		val = U16(*ptr);
417		(*ptr) += 2;
418		break;
419	case ATOM_SRC_BYTE0:
420	case ATOM_SRC_BYTE8:
421	case ATOM_SRC_BYTE16:
422	case ATOM_SRC_BYTE24:
423		val = U8(*ptr);
424		(*ptr)++;
425		break;
426	}
427	return val;
428}
429
430static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
431			     int *ptr, uint32_t *saved, int print)
432{
433	return atom_get_src_int(ctx,
434				arg | atom_dst_to_src[(attr >> 3) &
435						      7][(attr >> 6) & 3] << 3,
436				ptr, saved, print);
437}
438
439static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr)
440{
441	atom_skip_src_int(ctx,
442			  arg | atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) &
443								 3] << 3, ptr);
444}
445
446static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
447			 int *ptr, uint32_t val, uint32_t saved)
448{
449	uint32_t align =
450	    atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3], old_val =
451	    val, idx;
452	struct atom_context *gctx = ctx->ctx;
453	old_val &= atom_arg_mask[align] >> atom_arg_shift[align];
454	val <<= atom_arg_shift[align];
455	val &= atom_arg_mask[align];
456	saved &= ~atom_arg_mask[align];
457	val |= saved;
458	switch (arg) {
459	case ATOM_ARG_REG:
460		idx = U16(*ptr);
461		(*ptr) += 2;
462		DEBUG("REG[0x%04X]", idx);
463		idx += gctx->reg_block;
464		switch (gctx->io_mode) {
465		case ATOM_IO_MM:
466			if (idx == 0)
467				gctx->card->reg_write(gctx->card, idx,
468						      val << 2);
469			else
470				gctx->card->reg_write(gctx->card, idx, val);
471			break;
472		case ATOM_IO_PCI:
473			pr_info("PCI registers are not implemented\n");
474			return;
475		case ATOM_IO_SYSIO:
476			pr_info("SYSIO registers are not implemented\n");
477			return;
478		default:
479			if (!(gctx->io_mode & 0x80)) {
480				pr_info("Bad IO mode\n");
481				return;
482			}
483			if (!gctx->iio[gctx->io_mode & 0xFF]) {
484				pr_info("Undefined indirect IO write method %d\n",
485					gctx->io_mode & 0x7F);
486				return;
487			}
488			atom_iio_execute(gctx, gctx->iio[gctx->io_mode & 0xFF],
489					 idx, val);
490		}
491		break;
492	case ATOM_ARG_PS:
493		idx = U8(*ptr);
494		(*ptr)++;
495		DEBUG("PS[0x%02X]", idx);
496		ctx->ps[idx] = cpu_to_le32(val);
497		break;
498	case ATOM_ARG_WS:
499		idx = U8(*ptr);
500		(*ptr)++;
501		DEBUG("WS[0x%02X]", idx);
502		switch (idx) {
503		case ATOM_WS_QUOTIENT:
504			gctx->divmul[0] = val;
505			break;
506		case ATOM_WS_REMAINDER:
507			gctx->divmul[1] = val;
508			break;
509		case ATOM_WS_DATAPTR:
510			gctx->data_block = val;
511			break;
512		case ATOM_WS_SHIFT:
513			gctx->shift = val;
514			break;
515		case ATOM_WS_OR_MASK:
516		case ATOM_WS_AND_MASK:
517			break;
518		case ATOM_WS_FB_WINDOW:
519			gctx->fb_base = val;
520			break;
521		case ATOM_WS_ATTRIBUTES:
522			gctx->io_attr = val;
523			break;
524		case ATOM_WS_REGPTR:
525			gctx->reg_block = val;
526			break;
527		default:
528			ctx->ws[idx] = val;
529		}
530		break;
531	case ATOM_ARG_FB:
532		idx = U8(*ptr);
533		(*ptr)++;
534		if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
535			DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
536				  gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
537		} else
538			gctx->scratch[(gctx->fb_base / 4) + idx] = val;
539		DEBUG("FB[0x%02X]", idx);
540		break;
541	case ATOM_ARG_PLL:
542		idx = U8(*ptr);
543		(*ptr)++;
544		DEBUG("PLL[0x%02X]", idx);
545		gctx->card->pll_write(gctx->card, idx, val);
546		break;
547	case ATOM_ARG_MC:
548		idx = U8(*ptr);
549		(*ptr)++;
550		DEBUG("MC[0x%02X]", idx);
551		gctx->card->mc_write(gctx->card, idx, val);
552		return;
553	}
554	switch (align) {
555	case ATOM_SRC_DWORD:
556		DEBUG(".[31:0] <- 0x%08X\n", old_val);
557		break;
558	case ATOM_SRC_WORD0:
559		DEBUG(".[15:0] <- 0x%04X\n", old_val);
560		break;
561	case ATOM_SRC_WORD8:
562		DEBUG(".[23:8] <- 0x%04X\n", old_val);
563		break;
564	case ATOM_SRC_WORD16:
565		DEBUG(".[31:16] <- 0x%04X\n", old_val);
566		break;
567	case ATOM_SRC_BYTE0:
568		DEBUG(".[7:0] <- 0x%02X\n", old_val);
569		break;
570	case ATOM_SRC_BYTE8:
571		DEBUG(".[15:8] <- 0x%02X\n", old_val);
572		break;
573	case ATOM_SRC_BYTE16:
574		DEBUG(".[23:16] <- 0x%02X\n", old_val);
575		break;
576	case ATOM_SRC_BYTE24:
577		DEBUG(".[31:24] <- 0x%02X\n", old_val);
578		break;
579	}
580}
581
582static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg)
583{
584	uint8_t attr = U8((*ptr)++);
585	uint32_t dst, src, saved;
586	int dptr = *ptr;
587	SDEBUG("   dst: ");
588	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
589	SDEBUG("   src: ");
590	src = atom_get_src(ctx, attr, ptr);
591	dst += src;
592	SDEBUG("   dst: ");
593	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
594}
595
596static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg)
597{
598	uint8_t attr = U8((*ptr)++);
599	uint32_t dst, src, saved;
600	int dptr = *ptr;
601	SDEBUG("   dst: ");
602	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
603	SDEBUG("   src: ");
604	src = atom_get_src(ctx, attr, ptr);
605	dst &= src;
606	SDEBUG("   dst: ");
607	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
608}
609
610static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
611{
612	printk("ATOM BIOS beeped!\n");
613}
614
615static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
616{
617	int idx = U8((*ptr)++);
618	int r = 0;
619
620	if (idx < ATOM_TABLE_NAMES_CNT)
621		SDEBUG("   table: %d (%s)\n", idx, atom_table_names[idx]);
622	else
623		SDEBUG("   table: %d\n", idx);
624	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
625		r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
626	if (r) {
627		ctx->abort = true;
628	}
629}
630
631static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
632{
633	uint8_t attr = U8((*ptr)++);
634	uint32_t saved;
635	int dptr = *ptr;
636	attr &= 0x38;
637	attr |= atom_def_dst[attr >> 3] << 6;
638	atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
639	SDEBUG("   dst: ");
640	atom_put_dst(ctx, arg, attr, &dptr, 0, saved);
641}
642
643static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg)
644{
645	uint8_t attr = U8((*ptr)++);
646	uint32_t dst, src;
647	SDEBUG("   src1: ");
648	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
649	SDEBUG("   src2: ");
650	src = atom_get_src(ctx, attr, ptr);
651	ctx->ctx->cs_equal = (dst == src);
652	ctx->ctx->cs_above = (dst > src);
653	SDEBUG("   result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE",
654	       ctx->ctx->cs_above ? "GT" : "LE");
655}
656
657static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
658{
659	unsigned count = U8((*ptr)++);
660	SDEBUG("   count: %d\n", count);
661	if (arg == ATOM_UNIT_MICROSEC)
662		udelay(count);
663	else if (!drm_can_sleep())
664		mdelay(count);
665	else
666		drm_msleep(count);
667}
668
669static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg)
670{
671	uint8_t attr = U8((*ptr)++);
672	uint32_t dst, src;
673	SDEBUG("   src1: ");
674	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
675	SDEBUG("   src2: ");
676	src = atom_get_src(ctx, attr, ptr);
677	if (src != 0) {
678		ctx->ctx->divmul[0] = dst / src;
679		ctx->ctx->divmul[1] = dst % src;
680	} else {
681		ctx->ctx->divmul[0] = 0;
682		ctx->ctx->divmul[1] = 0;
683	}
684}
685
686static void atom_op_div32(atom_exec_context *ctx, int *ptr, int arg)
687{
688	uint64_t val64;
689	uint8_t attr = U8((*ptr)++);
690	uint32_t dst, src;
691	SDEBUG("   src1: ");
692	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
693	SDEBUG("   src2: ");
694	src = atom_get_src(ctx, attr, ptr);
695	if (src != 0) {
696		val64 = dst;
697		val64 |= ((uint64_t)ctx->ctx->divmul[1]) << 32;
698		do_div(val64, src);
699		ctx->ctx->divmul[0] = lower_32_bits(val64);
700		ctx->ctx->divmul[1] = upper_32_bits(val64);
701	} else {
702		ctx->ctx->divmul[0] = 0;
703		ctx->ctx->divmul[1] = 0;
704	}
705}
706
707static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
708{
709	/* functionally, a nop */
710}
711
712static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
713{
714	int execute = 0, target = U16(*ptr);
715	unsigned long cjiffies;
716
717	(*ptr) += 2;
718	switch (arg) {
719	case ATOM_COND_ABOVE:
720		execute = ctx->ctx->cs_above;
721		break;
722	case ATOM_COND_ABOVEOREQUAL:
723		execute = ctx->ctx->cs_above || ctx->ctx->cs_equal;
724		break;
725	case ATOM_COND_ALWAYS:
726		execute = 1;
727		break;
728	case ATOM_COND_BELOW:
729		execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal);
730		break;
731	case ATOM_COND_BELOWOREQUAL:
732		execute = !ctx->ctx->cs_above;
733		break;
734	case ATOM_COND_EQUAL:
735		execute = ctx->ctx->cs_equal;
736		break;
737	case ATOM_COND_NOTEQUAL:
738		execute = !ctx->ctx->cs_equal;
739		break;
740	}
741	if (arg != ATOM_COND_ALWAYS)
742		SDEBUG("   taken: %s\n", execute ? "yes" : "no");
743	SDEBUG("   target: 0x%04X\n", target);
744	if (execute) {
745		if (ctx->last_jump == (ctx->start + target)) {
746			cjiffies = jiffies;
747			if (time_after(cjiffies, ctx->last_jump_jiffies)) {
748				cjiffies -= ctx->last_jump_jiffies;
749				if ((jiffies_to_msecs(cjiffies) > 5000)) {
750					DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
751					ctx->abort = true;
752				}
753			} else {
754				/* jiffies wrap around we will just wait a little longer */
755				ctx->last_jump_jiffies = jiffies;
756			}
757		} else {
758			ctx->last_jump = ctx->start + target;
759			ctx->last_jump_jiffies = jiffies;
760		}
761		*ptr = ctx->start + target;
762	}
763}
764
765static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
766{
767	uint8_t attr = U8((*ptr)++);
768	uint32_t dst, mask, src, saved;
769	int dptr = *ptr;
770	SDEBUG("   dst: ");
771	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
772	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
773	SDEBUG("   mask: 0x%08x", mask);
774	SDEBUG("   src: ");
775	src = atom_get_src(ctx, attr, ptr);
776	dst &= mask;
777	dst |= src;
778	SDEBUG("   dst: ");
779	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
780}
781
782static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg)
783{
784	uint8_t attr = U8((*ptr)++);
785	uint32_t src, saved;
786	int dptr = *ptr;
787	if (((attr >> 3) & 7) != ATOM_SRC_DWORD)
788		atom_get_dst(ctx, arg, attr, ptr, &saved, 0);
789	else {
790		atom_skip_dst(ctx, arg, attr, ptr);
791		saved = 0xCDCDCDCD;
792	}
793	SDEBUG("   src: ");
794	src = atom_get_src(ctx, attr, ptr);
795	SDEBUG("   dst: ");
796	atom_put_dst(ctx, arg, attr, &dptr, src, saved);
797}
798
799static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg)
800{
801	uint8_t attr = U8((*ptr)++);
802	uint32_t dst, src;
803	SDEBUG("   src1: ");
804	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
805	SDEBUG("   src2: ");
806	src = atom_get_src(ctx, attr, ptr);
807	ctx->ctx->divmul[0] = dst * src;
808}
809
810static void atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg)
811{
812	uint64_t val64;
813	uint8_t attr = U8((*ptr)++);
814	uint32_t dst, src;
815	SDEBUG("   src1: ");
816	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
817	SDEBUG("   src2: ");
818	src = atom_get_src(ctx, attr, ptr);
819	val64 = (uint64_t)dst * (uint64_t)src;
820	ctx->ctx->divmul[0] = lower_32_bits(val64);
821	ctx->ctx->divmul[1] = upper_32_bits(val64);
822}
823
824static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg)
825{
826	/* nothing */
827}
828
829static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg)
830{
831	uint8_t attr = U8((*ptr)++);
832	uint32_t dst, src, saved;
833	int dptr = *ptr;
834	SDEBUG("   dst: ");
835	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
836	SDEBUG("   src: ");
837	src = atom_get_src(ctx, attr, ptr);
838	dst |= src;
839	SDEBUG("   dst: ");
840	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
841}
842
843static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg)
844{
845	uint8_t val = U8((*ptr)++);
846	SDEBUG("POST card output: 0x%02X\n", val);
847}
848
849static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg)
850{
851	pr_info("unimplemented!\n");
852}
853
854static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg)
855{
856	pr_info("unimplemented!\n");
857}
858
859static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg)
860{
861	pr_info("unimplemented!\n");
862}
863
864static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg)
865{
866	int idx = U8(*ptr);
867	(*ptr)++;
868	SDEBUG("   block: %d\n", idx);
869	if (!idx)
870		ctx->ctx->data_block = 0;
871	else if (idx == 255)
872		ctx->ctx->data_block = ctx->start;
873	else
874		ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx);
875	SDEBUG("   base: 0x%04X\n", ctx->ctx->data_block);
876}
877
878static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg)
879{
880	uint8_t attr = U8((*ptr)++);
881	SDEBUG("   fb_base: ");
882	ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr);
883}
884
885static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg)
886{
887	int port;
888	switch (arg) {
889	case ATOM_PORT_ATI:
890		port = U16(*ptr);
891		if (port < ATOM_IO_NAMES_CNT)
892			SDEBUG("   port: %d (%s)\n", port, atom_io_names[port]);
893		else
894			SDEBUG("   port: %d\n", port);
895		if (!port)
896			ctx->ctx->io_mode = ATOM_IO_MM;
897		else
898			ctx->ctx->io_mode = ATOM_IO_IIO | port;
899		(*ptr) += 2;
900		break;
901	case ATOM_PORT_PCI:
902		ctx->ctx->io_mode = ATOM_IO_PCI;
903		(*ptr)++;
904		break;
905	case ATOM_PORT_SYSIO:
906		ctx->ctx->io_mode = ATOM_IO_SYSIO;
907		(*ptr)++;
908		break;
909	}
910}
911
912static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
913{
914	ctx->ctx->reg_block = U16(*ptr);
915	(*ptr) += 2;
916	SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
917}
918
919static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
920{
921	uint8_t attr = U8((*ptr)++), shift;
922	uint32_t saved, dst;
923	int dptr = *ptr;
924	attr &= 0x38;
925	attr |= atom_def_dst[attr >> 3] << 6;
926	SDEBUG("   dst: ");
927	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
928	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
929	SDEBUG("   shift: %d\n", shift);
930	dst <<= shift;
931	SDEBUG("   dst: ");
932	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
933}
934
935static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
936{
937	uint8_t attr = U8((*ptr)++), shift;
938	uint32_t saved, dst;
939	int dptr = *ptr;
940	attr &= 0x38;
941	attr |= atom_def_dst[attr >> 3] << 6;
942	SDEBUG("   dst: ");
943	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
944	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
945	SDEBUG("   shift: %d\n", shift);
946	dst >>= shift;
947	SDEBUG("   dst: ");
948	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
949}
950
951static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
952{
953	uint8_t attr = U8((*ptr)++), shift;
954	uint32_t saved, dst;
955	int dptr = *ptr;
956	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
957	SDEBUG("   dst: ");
958	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
959	/* op needs to full dst value */
960	dst = saved;
961	shift = atom_get_src(ctx, attr, ptr);
962	SDEBUG("   shift: %d\n", shift);
963	dst <<= shift;
964	dst &= atom_arg_mask[dst_align];
965	dst >>= atom_arg_shift[dst_align];
966	SDEBUG("   dst: ");
967	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
968}
969
970static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
971{
972	uint8_t attr = U8((*ptr)++), shift;
973	uint32_t saved, dst;
974	int dptr = *ptr;
975	uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
976	SDEBUG("   dst: ");
977	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
978	/* op needs to full dst value */
979	dst = saved;
980	shift = atom_get_src(ctx, attr, ptr);
981	SDEBUG("   shift: %d\n", shift);
982	dst >>= shift;
983	dst &= atom_arg_mask[dst_align];
984	dst >>= atom_arg_shift[dst_align];
985	SDEBUG("   dst: ");
986	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
987}
988
989static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg)
990{
991	uint8_t attr = U8((*ptr)++);
992	uint32_t dst, src, saved;
993	int dptr = *ptr;
994	SDEBUG("   dst: ");
995	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
996	SDEBUG("   src: ");
997	src = atom_get_src(ctx, attr, ptr);
998	dst -= src;
999	SDEBUG("   dst: ");
1000	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1001}
1002
1003static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg)
1004{
1005	uint8_t attr = U8((*ptr)++);
1006	uint32_t src, val, target;
1007	SDEBUG("   switch: ");
1008	src = atom_get_src(ctx, attr, ptr);
1009	while (U16(*ptr) != ATOM_CASE_END)
1010		if (U8(*ptr) == ATOM_CASE_MAGIC) {
1011			(*ptr)++;
1012			SDEBUG("   case: ");
1013			val =
1014			    atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM,
1015					 ptr);
1016			target = U16(*ptr);
1017			if (val == src) {
1018				SDEBUG("   target: %04X\n", target);
1019				*ptr = ctx->start + target;
1020				return;
1021			}
1022			(*ptr) += 2;
1023		} else {
1024			pr_info("Bad case\n");
1025			return;
1026		}
1027	(*ptr) += 2;
1028}
1029
1030static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg)
1031{
1032	uint8_t attr = U8((*ptr)++);
1033	uint32_t dst, src;
1034	SDEBUG("   src1: ");
1035	dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1);
1036	SDEBUG("   src2: ");
1037	src = atom_get_src(ctx, attr, ptr);
1038	ctx->ctx->cs_equal = ((dst & src) == 0);
1039	SDEBUG("   result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE");
1040}
1041
1042static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg)
1043{
1044	uint8_t attr = U8((*ptr)++);
1045	uint32_t dst, src, saved;
1046	int dptr = *ptr;
1047	SDEBUG("   dst: ");
1048	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
1049	SDEBUG("   src: ");
1050	src = atom_get_src(ctx, attr, ptr);
1051	dst ^= src;
1052	SDEBUG("   dst: ");
1053	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
1054}
1055
1056static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg)
1057{
1058	uint8_t val = U8((*ptr)++);
1059	SDEBUG("DEBUG output: 0x%02X\n", val);
1060}
1061
1062static void atom_op_processds(atom_exec_context *ctx, int *ptr, int arg)
1063{
1064	uint16_t val = U16(*ptr);
1065	(*ptr) += val + 2;
1066	SDEBUG("PROCESSDS output: 0x%02X\n", val);
1067}
1068
1069static struct {
1070	void (*func) (atom_exec_context *, int *, int);
1071	int arg;
1072} opcode_table[ATOM_OP_CNT] = {
1073	{
1074	NULL, 0}, {
1075	atom_op_move, ATOM_ARG_REG}, {
1076	atom_op_move, ATOM_ARG_PS}, {
1077	atom_op_move, ATOM_ARG_WS}, {
1078	atom_op_move, ATOM_ARG_FB}, {
1079	atom_op_move, ATOM_ARG_PLL}, {
1080	atom_op_move, ATOM_ARG_MC}, {
1081	atom_op_and, ATOM_ARG_REG}, {
1082	atom_op_and, ATOM_ARG_PS}, {
1083	atom_op_and, ATOM_ARG_WS}, {
1084	atom_op_and, ATOM_ARG_FB}, {
1085	atom_op_and, ATOM_ARG_PLL}, {
1086	atom_op_and, ATOM_ARG_MC}, {
1087	atom_op_or, ATOM_ARG_REG}, {
1088	atom_op_or, ATOM_ARG_PS}, {
1089	atom_op_or, ATOM_ARG_WS}, {
1090	atom_op_or, ATOM_ARG_FB}, {
1091	atom_op_or, ATOM_ARG_PLL}, {
1092	atom_op_or, ATOM_ARG_MC}, {
1093	atom_op_shift_left, ATOM_ARG_REG}, {
1094	atom_op_shift_left, ATOM_ARG_PS}, {
1095	atom_op_shift_left, ATOM_ARG_WS}, {
1096	atom_op_shift_left, ATOM_ARG_FB}, {
1097	atom_op_shift_left, ATOM_ARG_PLL}, {
1098	atom_op_shift_left, ATOM_ARG_MC}, {
1099	atom_op_shift_right, ATOM_ARG_REG}, {
1100	atom_op_shift_right, ATOM_ARG_PS}, {
1101	atom_op_shift_right, ATOM_ARG_WS}, {
1102	atom_op_shift_right, ATOM_ARG_FB}, {
1103	atom_op_shift_right, ATOM_ARG_PLL}, {
1104	atom_op_shift_right, ATOM_ARG_MC}, {
1105	atom_op_mul, ATOM_ARG_REG}, {
1106	atom_op_mul, ATOM_ARG_PS}, {
1107	atom_op_mul, ATOM_ARG_WS}, {
1108	atom_op_mul, ATOM_ARG_FB}, {
1109	atom_op_mul, ATOM_ARG_PLL}, {
1110	atom_op_mul, ATOM_ARG_MC}, {
1111	atom_op_div, ATOM_ARG_REG}, {
1112	atom_op_div, ATOM_ARG_PS}, {
1113	atom_op_div, ATOM_ARG_WS}, {
1114	atom_op_div, ATOM_ARG_FB}, {
1115	atom_op_div, ATOM_ARG_PLL}, {
1116	atom_op_div, ATOM_ARG_MC}, {
1117	atom_op_add, ATOM_ARG_REG}, {
1118	atom_op_add, ATOM_ARG_PS}, {
1119	atom_op_add, ATOM_ARG_WS}, {
1120	atom_op_add, ATOM_ARG_FB}, {
1121	atom_op_add, ATOM_ARG_PLL}, {
1122	atom_op_add, ATOM_ARG_MC}, {
1123	atom_op_sub, ATOM_ARG_REG}, {
1124	atom_op_sub, ATOM_ARG_PS}, {
1125	atom_op_sub, ATOM_ARG_WS}, {
1126	atom_op_sub, ATOM_ARG_FB}, {
1127	atom_op_sub, ATOM_ARG_PLL}, {
1128	atom_op_sub, ATOM_ARG_MC}, {
1129	atom_op_setport, ATOM_PORT_ATI}, {
1130	atom_op_setport, ATOM_PORT_PCI}, {
1131	atom_op_setport, ATOM_PORT_SYSIO}, {
1132	atom_op_setregblock, 0}, {
1133	atom_op_setfbbase, 0}, {
1134	atom_op_compare, ATOM_ARG_REG}, {
1135	atom_op_compare, ATOM_ARG_PS}, {
1136	atom_op_compare, ATOM_ARG_WS}, {
1137	atom_op_compare, ATOM_ARG_FB}, {
1138	atom_op_compare, ATOM_ARG_PLL}, {
1139	atom_op_compare, ATOM_ARG_MC}, {
1140	atom_op_switch, 0}, {
1141	atom_op_jump, ATOM_COND_ALWAYS}, {
1142	atom_op_jump, ATOM_COND_EQUAL}, {
1143	atom_op_jump, ATOM_COND_BELOW}, {
1144	atom_op_jump, ATOM_COND_ABOVE}, {
1145	atom_op_jump, ATOM_COND_BELOWOREQUAL}, {
1146	atom_op_jump, ATOM_COND_ABOVEOREQUAL}, {
1147	atom_op_jump, ATOM_COND_NOTEQUAL}, {
1148	atom_op_test, ATOM_ARG_REG}, {
1149	atom_op_test, ATOM_ARG_PS}, {
1150	atom_op_test, ATOM_ARG_WS}, {
1151	atom_op_test, ATOM_ARG_FB}, {
1152	atom_op_test, ATOM_ARG_PLL}, {
1153	atom_op_test, ATOM_ARG_MC}, {
1154	atom_op_delay, ATOM_UNIT_MILLISEC}, {
1155	atom_op_delay, ATOM_UNIT_MICROSEC}, {
1156	atom_op_calltable, 0}, {
1157	atom_op_repeat, 0}, {
1158	atom_op_clear, ATOM_ARG_REG}, {
1159	atom_op_clear, ATOM_ARG_PS}, {
1160	atom_op_clear, ATOM_ARG_WS}, {
1161	atom_op_clear, ATOM_ARG_FB}, {
1162	atom_op_clear, ATOM_ARG_PLL}, {
1163	atom_op_clear, ATOM_ARG_MC}, {
1164	atom_op_nop, 0}, {
1165	atom_op_eot, 0}, {
1166	atom_op_mask, ATOM_ARG_REG}, {
1167	atom_op_mask, ATOM_ARG_PS}, {
1168	atom_op_mask, ATOM_ARG_WS}, {
1169	atom_op_mask, ATOM_ARG_FB}, {
1170	atom_op_mask, ATOM_ARG_PLL}, {
1171	atom_op_mask, ATOM_ARG_MC}, {
1172	atom_op_postcard, 0}, {
1173	atom_op_beep, 0}, {
1174	atom_op_savereg, 0}, {
1175	atom_op_restorereg, 0}, {
1176	atom_op_setdatablock, 0}, {
1177	atom_op_xor, ATOM_ARG_REG}, {
1178	atom_op_xor, ATOM_ARG_PS}, {
1179	atom_op_xor, ATOM_ARG_WS}, {
1180	atom_op_xor, ATOM_ARG_FB}, {
1181	atom_op_xor, ATOM_ARG_PLL}, {
1182	atom_op_xor, ATOM_ARG_MC}, {
1183	atom_op_shl, ATOM_ARG_REG}, {
1184	atom_op_shl, ATOM_ARG_PS}, {
1185	atom_op_shl, ATOM_ARG_WS}, {
1186	atom_op_shl, ATOM_ARG_FB}, {
1187	atom_op_shl, ATOM_ARG_PLL}, {
1188	atom_op_shl, ATOM_ARG_MC}, {
1189	atom_op_shr, ATOM_ARG_REG}, {
1190	atom_op_shr, ATOM_ARG_PS}, {
1191	atom_op_shr, ATOM_ARG_WS}, {
1192	atom_op_shr, ATOM_ARG_FB}, {
1193	atom_op_shr, ATOM_ARG_PLL}, {
1194	atom_op_shr, ATOM_ARG_MC}, {
1195	atom_op_debug, 0}, {
1196	atom_op_processds, 0}, {
1197	atom_op_mul32, ATOM_ARG_PS}, {
1198	atom_op_mul32, ATOM_ARG_WS}, {
1199	atom_op_div32, ATOM_ARG_PS}, {
1200	atom_op_div32, ATOM_ARG_WS},
1201};
1202
1203static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1204{
1205	int base = CU16(ctx->cmd_table + 4 + 2 * index);
1206	int len, ws, ps, ptr;
1207	unsigned char op;
1208	atom_exec_context ectx;
1209	int ret = 0;
1210
1211	if (!base)
1212		return -EINVAL;
1213
1214	len = CU16(base + ATOM_CT_SIZE_PTR);
1215	ws = CU8(base + ATOM_CT_WS_PTR);
1216	ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK;
1217	ptr = base + ATOM_CT_CODE_PTR;
1218
1219	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
1220
1221	ectx.ctx = ctx;
1222	ectx.ps_shift = ps / 4;
1223	ectx.start = base;
1224	ectx.ps = params;
1225	ectx.abort = false;
1226	ectx.last_jump = 0;
1227	if (ws)
1228		ectx.ws = kcalloc(4, ws, GFP_KERNEL);
1229	else
1230		ectx.ws = NULL;
1231
1232	debug_depth++;
1233	while (1) {
1234		op = CU8(ptr++);
1235		if (op < ATOM_OP_NAMES_CNT)
1236			SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1237		else
1238			SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1239		if (ectx.abort) {
1240			DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1241				base, len, ws, ps, ptr - 1);
1242			ret = -EINVAL;
1243			goto free;
1244		}
1245
1246		if (op < ATOM_OP_CNT && op > 0)
1247			opcode_table[op].func(&ectx, &ptr,
1248					      opcode_table[op].arg);
1249		else
1250			break;
1251
1252		if (op == ATOM_OP_EOT)
1253			break;
1254	}
1255	debug_depth--;
1256	SDEBUG("<<\n");
1257
1258free:
1259	if (ws)
1260		kfree(ectx.ws);
1261	return ret;
1262}
1263
1264int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1265{
1266	int r;
1267
1268	mutex_lock(&ctx->mutex);
1269	/* reset data block */
1270	ctx->data_block = 0;
1271	/* reset reg block */
1272	ctx->reg_block = 0;
1273	/* reset fb window */
1274	ctx->fb_base = 0;
1275	/* reset io mode */
1276	ctx->io_mode = ATOM_IO_MM;
1277	/* reset divmul */
1278	ctx->divmul[0] = 0;
1279	ctx->divmul[1] = 0;
1280	r = amdgpu_atom_execute_table_locked(ctx, index, params);
1281	mutex_unlock(&ctx->mutex);
1282	return r;
1283}
1284
1285static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1286
1287static void atom_index_iio(struct atom_context *ctx, int base)
1288{
1289	ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1290	if (!ctx->iio)
1291		return;
1292	while (CU8(base) == ATOM_IIO_START) {
1293		ctx->iio[CU8(base + 1)] = base + 2;
1294		base += 2;
1295		while (CU8(base) != ATOM_IIO_END)
1296			base += atom_iio_len[CU8(base)];
1297		base += 3;
1298	}
1299}
1300
1301struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
1302{
1303	int base;
1304	struct atom_context *ctx =
1305	    kzalloc(sizeof(struct atom_context), GFP_KERNEL);
1306	char *str;
1307	u16 idx;
1308
1309	if (!ctx)
1310		return NULL;
1311
1312	ctx->card = card;
1313	ctx->bios = bios;
1314
1315	if (CU16(0) != ATOM_BIOS_MAGIC) {
1316		pr_info("Invalid BIOS magic\n");
1317		kfree(ctx);
1318		return NULL;
1319	}
1320	if (strncmp
1321	    (CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC,
1322	     strlen(ATOM_ATI_MAGIC))) {
1323		pr_info("Invalid ATI magic\n");
1324		kfree(ctx);
1325		return NULL;
1326	}
1327
1328	base = CU16(ATOM_ROM_TABLE_PTR);
1329	if (strncmp
1330	    (CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC,
1331	     strlen(ATOM_ROM_MAGIC))) {
1332		pr_info("Invalid ATOM magic\n");
1333		kfree(ctx);
1334		return NULL;
1335	}
1336
1337	ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1338	ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1339	atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1340	if (!ctx->iio) {
1341		amdgpu_atom_destroy(ctx);
1342		return NULL;
1343	}
1344
1345	idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
1346	if (idx == 0)
1347		idx = 0x80;
1348
1349	str = CSTR(idx);
1350	if (*str != '\0') {
1351		pr_info("ATOM BIOS: %s\n", str);
1352		strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
1353	}
1354
1355
1356	return ctx;
1357}
1358
1359int amdgpu_atom_asic_init(struct atom_context *ctx)
1360{
1361	int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
1362	uint32_t ps[16];
1363	int ret;
1364
1365	memset(ps, 0, 64);
1366
1367	ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1368	ps[1] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR));
1369	if (!ps[0] || !ps[1])
1370		return 1;
1371
1372	if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1373		return 1;
1374	ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1375	if (ret)
1376		return ret;
1377
1378	memset(ps, 0, 64);
1379
1380	return ret;
1381}
1382
1383void amdgpu_atom_destroy(struct atom_context *ctx)
1384{
1385	kfree(ctx->iio);
1386	kfree(ctx);
1387}
1388
1389bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
1390			    uint16_t * size, uint8_t * frev, uint8_t * crev,
1391			    uint16_t * data_start)
1392{
1393	int offset = index * 2 + 4;
1394	int idx = CU16(ctx->data_table + offset);
1395	u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1396
1397	if (!mdt[index])
1398		return false;
1399
1400	if (size)
1401		*size = CU16(idx);
1402	if (frev)
1403		*frev = CU8(idx + 2);
1404	if (crev)
1405		*crev = CU8(idx + 3);
1406	*data_start = idx;
1407	return true;
1408}
1409
1410bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1411			   uint8_t * crev)
1412{
1413	int offset = index * 2 + 4;
1414	int idx = CU16(ctx->cmd_table + offset);
1415	u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1416
1417	if (!mct[index])
1418		return false;
1419
1420	if (frev)
1421		*frev = CU8(idx + 2);
1422	if (crev)
1423		*crev = CU8(idx + 3);
1424	return true;
1425}
1426
1427