1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * cp1emu.c: a MIPS coprocessor 1 (FPU) instruction emulator
4 *
5 * MIPS floating point support
6 * Copyright (C) 1994-2000 Algorithmics Ltd.
7 *
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000  MIPS Technologies, Inc.
10 *
11 * A complete emulator for MIPS coprocessor 1 instructions.  This is
12 * required for #float(switch) or #float(trap), where it catches all
13 * COP1 instructions via the "CoProcessor Unusable" exception.
14 *
15 * More surprisingly it is also required for #float(ieee), to help out
16 * the hardware FPU at the boundaries of the IEEE-754 representation
17 * (denormalised values, infinities, underflow, etc).  It is made
18 * quite nasty because emulation of some non-COP1 instructions is
19 * required, e.g. in branch delay slots.
20 *
21 * Note if you know that you won't have an FPU, then you'll get much
22 * better performance by compiling with -msoft-float!
23 */
24#include <linux/sched.h>
25#include <linux/debugfs.h>
26#include <linux/percpu-defs.h>
27#include <linux/perf_event.h>
28
29#include <asm/branch.h>
30#include <asm/inst.h>
31#include <asm/ptrace.h>
32#include <asm/signal.h>
33#include <linux/uaccess.h>
34
35#include <asm/cpu-info.h>
36#include <asm/processor.h>
37#include <asm/fpu_emulator.h>
38#include <asm/fpu.h>
39#include <asm/mips-r2-to-r6-emul.h>
40
41#include "ieee754.h"
42
43/* Function which emulates a floating point instruction. */
44
45static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
46	mips_instruction);
47
48static int fpux_emu(struct pt_regs *,
49	struct mips_fpu_struct *, mips_instruction, void __user **);
50
51/* Control registers */
52
53#define FPCREG_RID	0	/* $0  = revision id */
54#define FPCREG_FCCR	25	/* $25 = fccr */
55#define FPCREG_FEXR	26	/* $26 = fexr */
56#define FPCREG_FENR	28	/* $28 = fenr */
57#define FPCREG_CSR	31	/* $31 = csr */
58
59/* convert condition code register number to csr bit */
60const unsigned int fpucondbit[8] = {
61	FPU_CSR_COND,
62	FPU_CSR_COND1,
63	FPU_CSR_COND2,
64	FPU_CSR_COND3,
65	FPU_CSR_COND4,
66	FPU_CSR_COND5,
67	FPU_CSR_COND6,
68	FPU_CSR_COND7
69};
70
71/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */
72static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0};
73static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0};
74static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0};
75static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0};
76
77/*
78 * This functions translates a 32-bit microMIPS instruction
79 * into a 32-bit MIPS32 instruction. Returns 0 on success
80 * and SIGILL otherwise.
81 */
82static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
83{
84	union mips_instruction insn = *insn_ptr;
85	union mips_instruction mips32_insn = insn;
86	int func, fmt, op;
87
88	switch (insn.mm_i_format.opcode) {
89	case mm_ldc132_op:
90		mips32_insn.mm_i_format.opcode = ldc1_op;
91		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
92		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
93		break;
94	case mm_lwc132_op:
95		mips32_insn.mm_i_format.opcode = lwc1_op;
96		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
97		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
98		break;
99	case mm_sdc132_op:
100		mips32_insn.mm_i_format.opcode = sdc1_op;
101		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
102		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
103		break;
104	case mm_swc132_op:
105		mips32_insn.mm_i_format.opcode = swc1_op;
106		mips32_insn.mm_i_format.rt = insn.mm_i_format.rs;
107		mips32_insn.mm_i_format.rs = insn.mm_i_format.rt;
108		break;
109	case mm_pool32i_op:
110		/* NOTE: offset is << by 1 if in microMIPS mode. */
111		if ((insn.mm_i_format.rt == mm_bc1f_op) ||
112		    (insn.mm_i_format.rt == mm_bc1t_op)) {
113			mips32_insn.fb_format.opcode = cop1_op;
114			mips32_insn.fb_format.bc = bc_op;
115			mips32_insn.fb_format.flag =
116				(insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
117		} else
118			return SIGILL;
119		break;
120	case mm_pool32f_op:
121		switch (insn.mm_fp0_format.func) {
122		case mm_32f_01_op:
123		case mm_32f_11_op:
124		case mm_32f_02_op:
125		case mm_32f_12_op:
126		case mm_32f_41_op:
127		case mm_32f_51_op:
128		case mm_32f_42_op:
129		case mm_32f_52_op:
130			op = insn.mm_fp0_format.func;
131			if (op == mm_32f_01_op)
132				func = madd_s_op;
133			else if (op == mm_32f_11_op)
134				func = madd_d_op;
135			else if (op == mm_32f_02_op)
136				func = nmadd_s_op;
137			else if (op == mm_32f_12_op)
138				func = nmadd_d_op;
139			else if (op == mm_32f_41_op)
140				func = msub_s_op;
141			else if (op == mm_32f_51_op)
142				func = msub_d_op;
143			else if (op == mm_32f_42_op)
144				func = nmsub_s_op;
145			else
146				func = nmsub_d_op;
147			mips32_insn.fp6_format.opcode = cop1x_op;
148			mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr;
149			mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft;
150			mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs;
151			mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd;
152			mips32_insn.fp6_format.func = func;
153			break;
154		case mm_32f_10_op:
155			func = -1;	/* Invalid */
156			op = insn.mm_fp5_format.op & 0x7;
157			if (op == mm_ldxc1_op)
158				func = ldxc1_op;
159			else if (op == mm_sdxc1_op)
160				func = sdxc1_op;
161			else if (op == mm_lwxc1_op)
162				func = lwxc1_op;
163			else if (op == mm_swxc1_op)
164				func = swxc1_op;
165
166			if (func != -1) {
167				mips32_insn.r_format.opcode = cop1x_op;
168				mips32_insn.r_format.rs =
169					insn.mm_fp5_format.base;
170				mips32_insn.r_format.rt =
171					insn.mm_fp5_format.index;
172				mips32_insn.r_format.rd = 0;
173				mips32_insn.r_format.re = insn.mm_fp5_format.fd;
174				mips32_insn.r_format.func = func;
175			} else
176				return SIGILL;
177			break;
178		case mm_32f_40_op:
179			op = -1;	/* Invalid */
180			if (insn.mm_fp2_format.op == mm_fmovt_op)
181				op = 1;
182			else if (insn.mm_fp2_format.op == mm_fmovf_op)
183				op = 0;
184			if (op != -1) {
185				mips32_insn.fp0_format.opcode = cop1_op;
186				mips32_insn.fp0_format.fmt =
187					sdps_format[insn.mm_fp2_format.fmt];
188				mips32_insn.fp0_format.ft =
189					(insn.mm_fp2_format.cc<<2) + op;
190				mips32_insn.fp0_format.fs =
191					insn.mm_fp2_format.fs;
192				mips32_insn.fp0_format.fd =
193					insn.mm_fp2_format.fd;
194				mips32_insn.fp0_format.func = fmovc_op;
195			} else
196				return SIGILL;
197			break;
198		case mm_32f_60_op:
199			func = -1;	/* Invalid */
200			if (insn.mm_fp0_format.op == mm_fadd_op)
201				func = fadd_op;
202			else if (insn.mm_fp0_format.op == mm_fsub_op)
203				func = fsub_op;
204			else if (insn.mm_fp0_format.op == mm_fmul_op)
205				func = fmul_op;
206			else if (insn.mm_fp0_format.op == mm_fdiv_op)
207				func = fdiv_op;
208			if (func != -1) {
209				mips32_insn.fp0_format.opcode = cop1_op;
210				mips32_insn.fp0_format.fmt =
211					sdps_format[insn.mm_fp0_format.fmt];
212				mips32_insn.fp0_format.ft =
213					insn.mm_fp0_format.ft;
214				mips32_insn.fp0_format.fs =
215					insn.mm_fp0_format.fs;
216				mips32_insn.fp0_format.fd =
217					insn.mm_fp0_format.fd;
218				mips32_insn.fp0_format.func = func;
219			} else
220				return SIGILL;
221			break;
222		case mm_32f_70_op:
223			func = -1;	/* Invalid */
224			if (insn.mm_fp0_format.op == mm_fmovn_op)
225				func = fmovn_op;
226			else if (insn.mm_fp0_format.op == mm_fmovz_op)
227				func = fmovz_op;
228			if (func != -1) {
229				mips32_insn.fp0_format.opcode = cop1_op;
230				mips32_insn.fp0_format.fmt =
231					sdps_format[insn.mm_fp0_format.fmt];
232				mips32_insn.fp0_format.ft =
233					insn.mm_fp0_format.ft;
234				mips32_insn.fp0_format.fs =
235					insn.mm_fp0_format.fs;
236				mips32_insn.fp0_format.fd =
237					insn.mm_fp0_format.fd;
238				mips32_insn.fp0_format.func = func;
239			} else
240				return SIGILL;
241			break;
242		case mm_32f_73_op:    /* POOL32FXF */
243			switch (insn.mm_fp1_format.op) {
244			case mm_movf0_op:
245			case mm_movf1_op:
246			case mm_movt0_op:
247			case mm_movt1_op:
248				if ((insn.mm_fp1_format.op & 0x7f) ==
249				    mm_movf0_op)
250					op = 0;
251				else
252					op = 1;
253				mips32_insn.r_format.opcode = spec_op;
254				mips32_insn.r_format.rs = insn.mm_fp4_format.fs;
255				mips32_insn.r_format.rt =
256					(insn.mm_fp4_format.cc << 2) + op;
257				mips32_insn.r_format.rd = insn.mm_fp4_format.rt;
258				mips32_insn.r_format.re = 0;
259				mips32_insn.r_format.func = movc_op;
260				break;
261			case mm_fcvtd0_op:
262			case mm_fcvtd1_op:
263			case mm_fcvts0_op:
264			case mm_fcvts1_op:
265				if ((insn.mm_fp1_format.op & 0x7f) ==
266				    mm_fcvtd0_op) {
267					func = fcvtd_op;
268					fmt = swl_format[insn.mm_fp3_format.fmt];
269				} else {
270					func = fcvts_op;
271					fmt = dwl_format[insn.mm_fp3_format.fmt];
272				}
273				mips32_insn.fp0_format.opcode = cop1_op;
274				mips32_insn.fp0_format.fmt = fmt;
275				mips32_insn.fp0_format.ft = 0;
276				mips32_insn.fp0_format.fs =
277					insn.mm_fp3_format.fs;
278				mips32_insn.fp0_format.fd =
279					insn.mm_fp3_format.rt;
280				mips32_insn.fp0_format.func = func;
281				break;
282			case mm_fmov0_op:
283			case mm_fmov1_op:
284			case mm_fabs0_op:
285			case mm_fabs1_op:
286			case mm_fneg0_op:
287			case mm_fneg1_op:
288				if ((insn.mm_fp1_format.op & 0x7f) ==
289				    mm_fmov0_op)
290					func = fmov_op;
291				else if ((insn.mm_fp1_format.op & 0x7f) ==
292					 mm_fabs0_op)
293					func = fabs_op;
294				else
295					func = fneg_op;
296				mips32_insn.fp0_format.opcode = cop1_op;
297				mips32_insn.fp0_format.fmt =
298					sdps_format[insn.mm_fp3_format.fmt];
299				mips32_insn.fp0_format.ft = 0;
300				mips32_insn.fp0_format.fs =
301					insn.mm_fp3_format.fs;
302				mips32_insn.fp0_format.fd =
303					insn.mm_fp3_format.rt;
304				mips32_insn.fp0_format.func = func;
305				break;
306			case mm_ffloorl_op:
307			case mm_ffloorw_op:
308			case mm_fceill_op:
309			case mm_fceilw_op:
310			case mm_ftruncl_op:
311			case mm_ftruncw_op:
312			case mm_froundl_op:
313			case mm_froundw_op:
314			case mm_fcvtl_op:
315			case mm_fcvtw_op:
316				if (insn.mm_fp1_format.op == mm_ffloorl_op)
317					func = ffloorl_op;
318				else if (insn.mm_fp1_format.op == mm_ffloorw_op)
319					func = ffloor_op;
320				else if (insn.mm_fp1_format.op == mm_fceill_op)
321					func = fceill_op;
322				else if (insn.mm_fp1_format.op == mm_fceilw_op)
323					func = fceil_op;
324				else if (insn.mm_fp1_format.op == mm_ftruncl_op)
325					func = ftruncl_op;
326				else if (insn.mm_fp1_format.op == mm_ftruncw_op)
327					func = ftrunc_op;
328				else if (insn.mm_fp1_format.op == mm_froundl_op)
329					func = froundl_op;
330				else if (insn.mm_fp1_format.op == mm_froundw_op)
331					func = fround_op;
332				else if (insn.mm_fp1_format.op == mm_fcvtl_op)
333					func = fcvtl_op;
334				else
335					func = fcvtw_op;
336				mips32_insn.fp0_format.opcode = cop1_op;
337				mips32_insn.fp0_format.fmt =
338					sd_format[insn.mm_fp1_format.fmt];
339				mips32_insn.fp0_format.ft = 0;
340				mips32_insn.fp0_format.fs =
341					insn.mm_fp1_format.fs;
342				mips32_insn.fp0_format.fd =
343					insn.mm_fp1_format.rt;
344				mips32_insn.fp0_format.func = func;
345				break;
346			case mm_frsqrt_op:
347			case mm_fsqrt_op:
348			case mm_frecip_op:
349				if (insn.mm_fp1_format.op == mm_frsqrt_op)
350					func = frsqrt_op;
351				else if (insn.mm_fp1_format.op == mm_fsqrt_op)
352					func = fsqrt_op;
353				else
354					func = frecip_op;
355				mips32_insn.fp0_format.opcode = cop1_op;
356				mips32_insn.fp0_format.fmt =
357					sdps_format[insn.mm_fp1_format.fmt];
358				mips32_insn.fp0_format.ft = 0;
359				mips32_insn.fp0_format.fs =
360					insn.mm_fp1_format.fs;
361				mips32_insn.fp0_format.fd =
362					insn.mm_fp1_format.rt;
363				mips32_insn.fp0_format.func = func;
364				break;
365			case mm_mfc1_op:
366			case mm_mtc1_op:
367			case mm_cfc1_op:
368			case mm_ctc1_op:
369			case mm_mfhc1_op:
370			case mm_mthc1_op:
371				if (insn.mm_fp1_format.op == mm_mfc1_op)
372					op = mfc_op;
373				else if (insn.mm_fp1_format.op == mm_mtc1_op)
374					op = mtc_op;
375				else if (insn.mm_fp1_format.op == mm_cfc1_op)
376					op = cfc_op;
377				else if (insn.mm_fp1_format.op == mm_ctc1_op)
378					op = ctc_op;
379				else if (insn.mm_fp1_format.op == mm_mfhc1_op)
380					op = mfhc_op;
381				else
382					op = mthc_op;
383				mips32_insn.fp1_format.opcode = cop1_op;
384				mips32_insn.fp1_format.op = op;
385				mips32_insn.fp1_format.rt =
386					insn.mm_fp1_format.rt;
387				mips32_insn.fp1_format.fs =
388					insn.mm_fp1_format.fs;
389				mips32_insn.fp1_format.fd = 0;
390				mips32_insn.fp1_format.func = 0;
391				break;
392			default:
393				return SIGILL;
394			}
395			break;
396		case mm_32f_74_op:	/* c.cond.fmt */
397			mips32_insn.fp0_format.opcode = cop1_op;
398			mips32_insn.fp0_format.fmt =
399				sdps_format[insn.mm_fp4_format.fmt];
400			mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt;
401			mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs;
402			mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2;
403			mips32_insn.fp0_format.func =
404				insn.mm_fp4_format.cond | MM_MIPS32_COND_FC;
405			break;
406		default:
407			return SIGILL;
408		}
409		break;
410	default:
411		return SIGILL;
412	}
413
414	*insn_ptr = mips32_insn;
415	return 0;
416}
417
418/*
419 * Redundant with logic already in kernel/branch.c,
420 * embedded in compute_return_epc.  At some point,
421 * a single subroutine should be used across both
422 * modules.
423 */
424int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
425		  unsigned long *contpc)
426{
427	union mips_instruction insn = (union mips_instruction)dec_insn.insn;
428	unsigned int fcr31;
429	unsigned int bit = 0;
430	unsigned int bit0;
431	union fpureg *fpr;
432
433	switch (insn.i_format.opcode) {
434	case spec_op:
435		switch (insn.r_format.func) {
436		case jalr_op:
437			if (insn.r_format.rd != 0) {
438				regs->regs[insn.r_format.rd] =
439					regs->cp0_epc + dec_insn.pc_inc +
440					dec_insn.next_pc_inc;
441			}
442			fallthrough;
443		case jr_op:
444			/* For R6, JR already emulated in jalr_op */
445			if (NO_R6EMU && insn.r_format.func == jr_op)
446				break;
447			*contpc = regs->regs[insn.r_format.rs];
448			return 1;
449		}
450		break;
451	case bcond_op:
452		switch (insn.i_format.rt) {
453		case bltzal_op:
454		case bltzall_op:
455			if (NO_R6EMU && (insn.i_format.rs ||
456			    insn.i_format.rt == bltzall_op))
457				break;
458
459			regs->regs[31] = regs->cp0_epc +
460				dec_insn.pc_inc +
461				dec_insn.next_pc_inc;
462			fallthrough;
463		case bltzl_op:
464			if (NO_R6EMU)
465				break;
466			fallthrough;
467		case bltz_op:
468			if ((long)regs->regs[insn.i_format.rs] < 0)
469				*contpc = regs->cp0_epc +
470					dec_insn.pc_inc +
471					(insn.i_format.simmediate << 2);
472			else
473				*contpc = regs->cp0_epc +
474					dec_insn.pc_inc +
475					dec_insn.next_pc_inc;
476			return 1;
477		case bgezal_op:
478		case bgezall_op:
479			if (NO_R6EMU && (insn.i_format.rs ||
480			    insn.i_format.rt == bgezall_op))
481				break;
482
483			regs->regs[31] = regs->cp0_epc +
484				dec_insn.pc_inc +
485				dec_insn.next_pc_inc;
486			fallthrough;
487		case bgezl_op:
488			if (NO_R6EMU)
489				break;
490			fallthrough;
491		case bgez_op:
492			if ((long)regs->regs[insn.i_format.rs] >= 0)
493				*contpc = regs->cp0_epc +
494					dec_insn.pc_inc +
495					(insn.i_format.simmediate << 2);
496			else
497				*contpc = regs->cp0_epc +
498					dec_insn.pc_inc +
499					dec_insn.next_pc_inc;
500			return 1;
501		}
502		break;
503	case jalx_op:
504		set_isa16_mode(bit);
505		fallthrough;
506	case jal_op:
507		regs->regs[31] = regs->cp0_epc +
508			dec_insn.pc_inc +
509			dec_insn.next_pc_inc;
510		fallthrough;
511	case j_op:
512		*contpc = regs->cp0_epc + dec_insn.pc_inc;
513		*contpc >>= 28;
514		*contpc <<= 28;
515		*contpc |= (insn.j_format.target << 2);
516		/* Set microMIPS mode bit: XOR for jalx. */
517		*contpc ^= bit;
518		return 1;
519	case beql_op:
520		if (NO_R6EMU)
521			break;
522		fallthrough;
523	case beq_op:
524		if (regs->regs[insn.i_format.rs] ==
525		    regs->regs[insn.i_format.rt])
526			*contpc = regs->cp0_epc +
527				dec_insn.pc_inc +
528				(insn.i_format.simmediate << 2);
529		else
530			*contpc = regs->cp0_epc +
531				dec_insn.pc_inc +
532				dec_insn.next_pc_inc;
533		return 1;
534	case bnel_op:
535		if (NO_R6EMU)
536			break;
537		fallthrough;
538	case bne_op:
539		if (regs->regs[insn.i_format.rs] !=
540		    regs->regs[insn.i_format.rt])
541			*contpc = regs->cp0_epc +
542				dec_insn.pc_inc +
543				(insn.i_format.simmediate << 2);
544		else
545			*contpc = regs->cp0_epc +
546				dec_insn.pc_inc +
547				dec_insn.next_pc_inc;
548		return 1;
549	case blezl_op:
550		if (!insn.i_format.rt && NO_R6EMU)
551			break;
552		fallthrough;
553	case blez_op:
554
555		/*
556		 * Compact branches for R6 for the
557		 * blez and blezl opcodes.
558		 * BLEZ  | rs = 0 | rt != 0  == BLEZALC
559		 * BLEZ  | rs = rt != 0      == BGEZALC
560		 * BLEZ  | rs != 0 | rt != 0 == BGEUC
561		 * BLEZL | rs = 0 | rt != 0  == BLEZC
562		 * BLEZL | rs = rt != 0      == BGEZC
563		 * BLEZL | rs != 0 | rt != 0 == BGEC
564		 *
565		 * For real BLEZ{,L}, rt is always 0.
566		 */
567		if (cpu_has_mips_r6 && insn.i_format.rt) {
568			if ((insn.i_format.opcode == blez_op) &&
569			    ((!insn.i_format.rs && insn.i_format.rt) ||
570			     (insn.i_format.rs == insn.i_format.rt)))
571				regs->regs[31] = regs->cp0_epc +
572					dec_insn.pc_inc;
573			*contpc = regs->cp0_epc + dec_insn.pc_inc +
574				dec_insn.next_pc_inc;
575
576			return 1;
577		}
578		if ((long)regs->regs[insn.i_format.rs] <= 0)
579			*contpc = regs->cp0_epc +
580				dec_insn.pc_inc +
581				(insn.i_format.simmediate << 2);
582		else
583			*contpc = regs->cp0_epc +
584				dec_insn.pc_inc +
585				dec_insn.next_pc_inc;
586		return 1;
587	case bgtzl_op:
588		if (!insn.i_format.rt && NO_R6EMU)
589			break;
590		fallthrough;
591	case bgtz_op:
592		/*
593		 * Compact branches for R6 for the
594		 * bgtz and bgtzl opcodes.
595		 * BGTZ  | rs = 0 | rt != 0  == BGTZALC
596		 * BGTZ  | rs = rt != 0      == BLTZALC
597		 * BGTZ  | rs != 0 | rt != 0 == BLTUC
598		 * BGTZL | rs = 0 | rt != 0  == BGTZC
599		 * BGTZL | rs = rt != 0      == BLTZC
600		 * BGTZL | rs != 0 | rt != 0 == BLTC
601		 *
602		 * *ZALC varint for BGTZ &&& rt != 0
603		 * For real GTZ{,L}, rt is always 0.
604		 */
605		if (cpu_has_mips_r6 && insn.i_format.rt) {
606			if ((insn.i_format.opcode == blez_op) &&
607			    ((!insn.i_format.rs && insn.i_format.rt) ||
608			     (insn.i_format.rs == insn.i_format.rt)))
609				regs->regs[31] = regs->cp0_epc +
610					dec_insn.pc_inc;
611			*contpc = regs->cp0_epc + dec_insn.pc_inc +
612				dec_insn.next_pc_inc;
613
614			return 1;
615		}
616
617		if ((long)regs->regs[insn.i_format.rs] > 0)
618			*contpc = regs->cp0_epc +
619				dec_insn.pc_inc +
620				(insn.i_format.simmediate << 2);
621		else
622			*contpc = regs->cp0_epc +
623				dec_insn.pc_inc +
624				dec_insn.next_pc_inc;
625		return 1;
626	case pop10_op:
627	case pop30_op:
628		if (!cpu_has_mips_r6)
629			break;
630		if (insn.i_format.rt && !insn.i_format.rs)
631			regs->regs[31] = regs->cp0_epc + 4;
632		*contpc = regs->cp0_epc + dec_insn.pc_inc +
633			dec_insn.next_pc_inc;
634
635		return 1;
636#ifdef CONFIG_CPU_CAVIUM_OCTEON
637	case lwc2_op: /* This is bbit0 on Octeon */
638		if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
639			*contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
640		else
641			*contpc = regs->cp0_epc + 8;
642		return 1;
643	case ldc2_op: /* This is bbit032 on Octeon */
644		if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
645			*contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
646		else
647			*contpc = regs->cp0_epc + 8;
648		return 1;
649	case swc2_op: /* This is bbit1 on Octeon */
650		if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
651			*contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
652		else
653			*contpc = regs->cp0_epc + 8;
654		return 1;
655	case sdc2_op: /* This is bbit132 on Octeon */
656		if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
657			*contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
658		else
659			*contpc = regs->cp0_epc + 8;
660		return 1;
661#else
662	case bc6_op:
663		/*
664		 * Only valid for MIPS R6 but we can still end up
665		 * here from a broken userland so just tell emulator
666		 * this is not a branch and let it break later on.
667		 */
668		if  (!cpu_has_mips_r6)
669			break;
670		*contpc = regs->cp0_epc + dec_insn.pc_inc +
671			dec_insn.next_pc_inc;
672
673		return 1;
674	case balc6_op:
675		if (!cpu_has_mips_r6)
676			break;
677		regs->regs[31] = regs->cp0_epc + 4;
678		*contpc = regs->cp0_epc + dec_insn.pc_inc +
679			dec_insn.next_pc_inc;
680
681		return 1;
682	case pop66_op:
683		if (!cpu_has_mips_r6)
684			break;
685		*contpc = regs->cp0_epc + dec_insn.pc_inc +
686			dec_insn.next_pc_inc;
687
688		return 1;
689	case pop76_op:
690		if (!cpu_has_mips_r6)
691			break;
692		if (!insn.i_format.rs)
693			regs->regs[31] = regs->cp0_epc + 4;
694		*contpc = regs->cp0_epc + dec_insn.pc_inc +
695			dec_insn.next_pc_inc;
696
697		return 1;
698#endif
699	case cop0_op:
700	case cop1_op:
701		/* Need to check for R6 bc1nez and bc1eqz branches */
702		if (cpu_has_mips_r6 &&
703		    ((insn.i_format.rs == bc1eqz_op) ||
704		     (insn.i_format.rs == bc1nez_op))) {
705			bit = 0;
706			fpr = &current->thread.fpu.fpr[insn.i_format.rt];
707			bit0 = get_fpr32(fpr, 0) & 0x1;
708			switch (insn.i_format.rs) {
709			case bc1eqz_op:
710				bit = bit0 == 0;
711				break;
712			case bc1nez_op:
713				bit = bit0 != 0;
714				break;
715			}
716			if (bit)
717				*contpc = regs->cp0_epc +
718					dec_insn.pc_inc +
719					(insn.i_format.simmediate << 2);
720			else
721				*contpc = regs->cp0_epc +
722					dec_insn.pc_inc +
723					dec_insn.next_pc_inc;
724
725			return 1;
726		}
727		/* R2/R6 compatible cop1 instruction */
728		fallthrough;
729	case cop2_op:
730	case cop1x_op:
731		if (insn.i_format.rs == bc_op) {
732			preempt_disable();
733			if (is_fpu_owner())
734			        fcr31 = read_32bit_cp1_register(CP1_STATUS);
735			else
736				fcr31 = current->thread.fpu.fcr31;
737			preempt_enable();
738
739			bit = (insn.i_format.rt >> 2);
740			bit += (bit != 0);
741			bit += 23;
742			switch (insn.i_format.rt & 3) {
743			case 0:	/* bc1f */
744			case 2:	/* bc1fl */
745				if (~fcr31 & (1 << bit))
746					*contpc = regs->cp0_epc +
747						dec_insn.pc_inc +
748						(insn.i_format.simmediate << 2);
749				else
750					*contpc = regs->cp0_epc +
751						dec_insn.pc_inc +
752						dec_insn.next_pc_inc;
753				return 1;
754			case 1:	/* bc1t */
755			case 3:	/* bc1tl */
756				if (fcr31 & (1 << bit))
757					*contpc = regs->cp0_epc +
758						dec_insn.pc_inc +
759						(insn.i_format.simmediate << 2);
760				else
761					*contpc = regs->cp0_epc +
762						dec_insn.pc_inc +
763						dec_insn.next_pc_inc;
764				return 1;
765			}
766		}
767		break;
768	}
769	return 0;
770}
771
772/*
773 * In the Linux kernel, we support selection of FPR format on the
774 * basis of the Status.FR bit.	If an FPU is not present, the FR bit
775 * is hardwired to zero, which would imply a 32-bit FPU even for
776 * 64-bit CPUs so we rather look at TIF_32BIT_FPREGS.
777 * FPU emu is slow and bulky and optimizing this function offers fairly
778 * sizeable benefits so we try to be clever and make this function return
779 * a constant whenever possible, that is on 64-bit kernels without O32
780 * compatibility enabled and on 32-bit without 64-bit FPU support.
781 */
782static inline int cop1_64bit(struct pt_regs *xcp)
783{
784	if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32))
785		return 1;
786	else if (IS_ENABLED(CONFIG_32BIT) &&
787		 !IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
788		return 0;
789
790	return !test_thread_flag(TIF_32BIT_FPREGS);
791}
792
793static inline bool hybrid_fprs(void)
794{
795	return test_thread_flag(TIF_HYBRID_FPREGS);
796}
797
798#define SIFROMREG(si, x)						\
799do {									\
800	if (cop1_64bit(xcp) && !hybrid_fprs())				\
801		(si) = (int)get_fpr32(&ctx->fpr[x], 0);			\
802	else								\
803		(si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1);	\
804} while (0)
805
806#define SITOREG(si, x)							\
807do {									\
808	if (cop1_64bit(xcp) && !hybrid_fprs()) {			\
809		unsigned int i;						\
810		set_fpr32(&ctx->fpr[x], 0, si);				\
811		for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++)	\
812			set_fpr32(&ctx->fpr[x], i, 0);			\
813	} else {							\
814		set_fpr32(&ctx->fpr[(x) & ~1], (x) & 1, si);		\
815	}								\
816} while (0)
817
818#define SIFROMHREG(si, x)	((si) = (int)get_fpr32(&ctx->fpr[x], 1))
819
820#define SITOHREG(si, x)							\
821do {									\
822	unsigned int i;							\
823	set_fpr32(&ctx->fpr[x], 1, si);					\
824	for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++)		\
825		set_fpr32(&ctx->fpr[x], i, 0);				\
826} while (0)
827
828#define DIFROMREG(di, x)						\
829	((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) ^ 1)], 0))
830
831#define DITOREG(di, x)							\
832do {									\
833	unsigned int fpr, i;						\
834	fpr = (x) & ~(cop1_64bit(xcp) ^ 1);				\
835	set_fpr64(&ctx->fpr[fpr], 0, di);				\
836	for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++)		\
837		set_fpr64(&ctx->fpr[fpr], i, 0);			\
838} while (0)
839
840#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
841#define SPTOREG(sp, x)	SITOREG((sp).bits, x)
842#define DPFROMREG(dp, x)	DIFROMREG((dp).bits, x)
843#define DPTOREG(dp, x)	DITOREG((dp).bits, x)
844
845/*
846 * Emulate a CFC1 instruction.
847 */
848static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
849			    mips_instruction ir)
850{
851	u32 fcr31 = ctx->fcr31;
852	u32 value = 0;
853
854	switch (MIPSInst_RD(ir)) {
855	case FPCREG_CSR:
856		value = fcr31;
857		pr_debug("%p gpr[%d]<-csr=%08x\n",
858			 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
859		break;
860
861	case FPCREG_FENR:
862		if (!cpu_has_mips_r)
863			break;
864		value = (fcr31 >> (FPU_CSR_FS_S - MIPS_FENR_FS_S)) &
865			MIPS_FENR_FS;
866		value |= fcr31 & (FPU_CSR_ALL_E | FPU_CSR_RM);
867		pr_debug("%p gpr[%d]<-enr=%08x\n",
868			 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
869		break;
870
871	case FPCREG_FEXR:
872		if (!cpu_has_mips_r)
873			break;
874		value = fcr31 & (FPU_CSR_ALL_X | FPU_CSR_ALL_S);
875		pr_debug("%p gpr[%d]<-exr=%08x\n",
876			 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
877		break;
878
879	case FPCREG_FCCR:
880		if (!cpu_has_mips_r)
881			break;
882		value = (fcr31 >> (FPU_CSR_COND_S - MIPS_FCCR_COND0_S)) &
883			MIPS_FCCR_COND0;
884		value |= (fcr31 >> (FPU_CSR_COND1_S - MIPS_FCCR_COND1_S)) &
885			 (MIPS_FCCR_CONDX & ~MIPS_FCCR_COND0);
886		pr_debug("%p gpr[%d]<-ccr=%08x\n",
887			 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
888		break;
889
890	case FPCREG_RID:
891		value = boot_cpu_data.fpu_id;
892		break;
893
894	default:
895		break;
896	}
897
898	if (MIPSInst_RT(ir))
899		xcp->regs[MIPSInst_RT(ir)] = value;
900}
901
902/*
903 * Emulate a CTC1 instruction.
904 */
905static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
906			    mips_instruction ir)
907{
908	u32 fcr31 = ctx->fcr31;
909	u32 value;
910	u32 mask;
911
912	if (MIPSInst_RT(ir) == 0)
913		value = 0;
914	else
915		value = xcp->regs[MIPSInst_RT(ir)];
916
917	switch (MIPSInst_RD(ir)) {
918	case FPCREG_CSR:
919		pr_debug("%p gpr[%d]->csr=%08x\n",
920			 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
921
922		/* Preserve read-only bits.  */
923		mask = boot_cpu_data.fpu_msk31;
924		fcr31 = (value & ~mask) | (fcr31 & mask);
925		break;
926
927	case FPCREG_FENR:
928		if (!cpu_has_mips_r)
929			break;
930		pr_debug("%p gpr[%d]->enr=%08x\n",
931			 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
932		fcr31 &= ~(FPU_CSR_FS | FPU_CSR_ALL_E | FPU_CSR_RM);
933		fcr31 |= (value << (FPU_CSR_FS_S - MIPS_FENR_FS_S)) &
934			 FPU_CSR_FS;
935		fcr31 |= value & (FPU_CSR_ALL_E | FPU_CSR_RM);
936		break;
937
938	case FPCREG_FEXR:
939		if (!cpu_has_mips_r)
940			break;
941		pr_debug("%p gpr[%d]->exr=%08x\n",
942			 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
943		fcr31 &= ~(FPU_CSR_ALL_X | FPU_CSR_ALL_S);
944		fcr31 |= value & (FPU_CSR_ALL_X | FPU_CSR_ALL_S);
945		break;
946
947	case FPCREG_FCCR:
948		if (!cpu_has_mips_r)
949			break;
950		pr_debug("%p gpr[%d]->ccr=%08x\n",
951			 (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
952		fcr31 &= ~(FPU_CSR_CONDX | FPU_CSR_COND);
953		fcr31 |= (value << (FPU_CSR_COND_S - MIPS_FCCR_COND0_S)) &
954			 FPU_CSR_COND;
955		fcr31 |= (value << (FPU_CSR_COND1_S - MIPS_FCCR_COND1_S)) &
956			 FPU_CSR_CONDX;
957		break;
958
959	default:
960		break;
961	}
962
963	ctx->fcr31 = fcr31;
964}
965
966/*
967 * Emulate the single floating point instruction pointed at by EPC.
968 * Two instructions if the instruction is in a branch delay slot.
969 */
970
971static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
972		struct mm_decoded_insn dec_insn, void __user **fault_addr)
973{
974	unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
975	unsigned int cond, cbit, bit0;
976	mips_instruction ir;
977	int likely, pc_inc;
978	union fpureg *fpr;
979	u32 __user *wva;
980	u64 __user *dva;
981	u32 wval;
982	u64 dval;
983	int sig;
984
985	/*
986	 * These are giving gcc a gentle hint about what to expect in
987	 * dec_inst in order to do better optimization.
988	 */
989	if (!cpu_has_mmips && dec_insn.micro_mips_mode)
990		unreachable();
991
992	/* XXX NEC Vr54xx bug workaround */
993	if (delay_slot(xcp)) {
994		if (dec_insn.micro_mips_mode) {
995			if (!mm_isBranchInstr(xcp, dec_insn, &contpc))
996				clear_delay_slot(xcp);
997		} else {
998			if (!isBranchInstr(xcp, dec_insn, &contpc))
999				clear_delay_slot(xcp);
1000		}
1001	}
1002
1003	if (delay_slot(xcp)) {
1004		/*
1005		 * The instruction to be emulated is in a branch delay slot
1006		 * which means that we have to	emulate the branch instruction
1007		 * BEFORE we do the cop1 instruction.
1008		 *
1009		 * This branch could be a COP1 branch, but in that case we
1010		 * would have had a trap for that instruction, and would not
1011		 * come through this route.
1012		 *
1013		 * Linux MIPS branch emulator operates on context, updating the
1014		 * cp0_epc.
1015		 */
1016		ir = dec_insn.next_insn;  /* process delay slot instr */
1017		pc_inc = dec_insn.next_pc_inc;
1018	} else {
1019		ir = dec_insn.insn;       /* process current instr */
1020		pc_inc = dec_insn.pc_inc;
1021	}
1022
1023	/*
1024	 * Since microMIPS FPU instructios are a subset of MIPS32 FPU
1025	 * instructions, we want to convert microMIPS FPU instructions
1026	 * into MIPS32 instructions so that we could reuse all of the
1027	 * FPU emulation code.
1028	 *
1029	 * NOTE: We cannot do this for branch instructions since they
1030	 *       are not a subset. Example: Cannot emulate a 16-bit
1031	 *       aligned target address with a MIPS32 instruction.
1032	 */
1033	if (dec_insn.micro_mips_mode) {
1034		/*
1035		 * If next instruction is a 16-bit instruction, then
1036		 * it cannot be a FPU instruction. This could happen
1037		 * since we can be called for non-FPU instructions.
1038		 */
1039		if ((pc_inc == 2) ||
1040			(microMIPS32_to_MIPS32((union mips_instruction *)&ir)
1041			 == SIGILL))
1042			return SIGILL;
1043	}
1044
1045emul:
1046	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
1047	MIPS_FPU_EMU_INC_STATS(emulated);
1048	switch (MIPSInst_OPCODE(ir)) {
1049	case ldc1_op:
1050		dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
1051				     MIPSInst_SIMM(ir));
1052		MIPS_FPU_EMU_INC_STATS(loads);
1053
1054		if (!access_ok(dva, sizeof(u64))) {
1055			MIPS_FPU_EMU_INC_STATS(errors);
1056			*fault_addr = dva;
1057			return SIGBUS;
1058		}
1059		if (__get_user(dval, dva)) {
1060			MIPS_FPU_EMU_INC_STATS(errors);
1061			*fault_addr = dva;
1062			return SIGSEGV;
1063		}
1064		DITOREG(dval, MIPSInst_RT(ir));
1065		break;
1066
1067	case sdc1_op:
1068		dva = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
1069				      MIPSInst_SIMM(ir));
1070		MIPS_FPU_EMU_INC_STATS(stores);
1071		DIFROMREG(dval, MIPSInst_RT(ir));
1072		if (!access_ok(dva, sizeof(u64))) {
1073			MIPS_FPU_EMU_INC_STATS(errors);
1074			*fault_addr = dva;
1075			return SIGBUS;
1076		}
1077		if (__put_user(dval, dva)) {
1078			MIPS_FPU_EMU_INC_STATS(errors);
1079			*fault_addr = dva;
1080			return SIGSEGV;
1081		}
1082		break;
1083
1084	case lwc1_op:
1085		wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
1086				      MIPSInst_SIMM(ir));
1087		MIPS_FPU_EMU_INC_STATS(loads);
1088		if (!access_ok(wva, sizeof(u32))) {
1089			MIPS_FPU_EMU_INC_STATS(errors);
1090			*fault_addr = wva;
1091			return SIGBUS;
1092		}
1093		if (__get_user(wval, wva)) {
1094			MIPS_FPU_EMU_INC_STATS(errors);
1095			*fault_addr = wva;
1096			return SIGSEGV;
1097		}
1098		SITOREG(wval, MIPSInst_RT(ir));
1099		break;
1100
1101	case swc1_op:
1102		wva = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
1103				      MIPSInst_SIMM(ir));
1104		MIPS_FPU_EMU_INC_STATS(stores);
1105		SIFROMREG(wval, MIPSInst_RT(ir));
1106		if (!access_ok(wva, sizeof(u32))) {
1107			MIPS_FPU_EMU_INC_STATS(errors);
1108			*fault_addr = wva;
1109			return SIGBUS;
1110		}
1111		if (__put_user(wval, wva)) {
1112			MIPS_FPU_EMU_INC_STATS(errors);
1113			*fault_addr = wva;
1114			return SIGSEGV;
1115		}
1116		break;
1117
1118	case cop1_op:
1119		switch (MIPSInst_RS(ir)) {
1120		case dmfc_op:
1121			if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
1122				return SIGILL;
1123
1124			/* copregister fs -> gpr[rt] */
1125			if (MIPSInst_RT(ir) != 0) {
1126				DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
1127					MIPSInst_RD(ir));
1128			}
1129			break;
1130
1131		case dmtc_op:
1132			if (!cpu_has_mips_3_4_5 && !cpu_has_mips64)
1133				return SIGILL;
1134
1135			/* copregister fs <- rt */
1136			DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
1137			break;
1138
1139		case mfhc_op:
1140			if (!cpu_has_mips_r2_r6)
1141				return SIGILL;
1142
1143			/* copregister rd -> gpr[rt] */
1144			if (MIPSInst_RT(ir) != 0) {
1145				SIFROMHREG(xcp->regs[MIPSInst_RT(ir)],
1146					MIPSInst_RD(ir));
1147			}
1148			break;
1149
1150		case mthc_op:
1151			if (!cpu_has_mips_r2_r6)
1152				return SIGILL;
1153
1154			/* copregister rd <- gpr[rt] */
1155			SITOHREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
1156			break;
1157
1158		case mfc_op:
1159			/* copregister rd -> gpr[rt] */
1160			if (MIPSInst_RT(ir) != 0) {
1161				SIFROMREG(xcp->regs[MIPSInst_RT(ir)],
1162					MIPSInst_RD(ir));
1163			}
1164			break;
1165
1166		case mtc_op:
1167			/* copregister rd <- rt */
1168			SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
1169			break;
1170
1171		case cfc_op:
1172			/* cop control register rd -> gpr[rt] */
1173			cop1_cfc(xcp, ctx, ir);
1174			break;
1175
1176		case ctc_op:
1177			/* copregister rd <- rt */
1178			cop1_ctc(xcp, ctx, ir);
1179			if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
1180				return SIGFPE;
1181			}
1182			break;
1183
1184		case bc1eqz_op:
1185		case bc1nez_op:
1186			if (!cpu_has_mips_r6 || delay_slot(xcp))
1187				return SIGILL;
1188
1189			likely = 0;
1190			cond = 0;
1191			fpr = &current->thread.fpu.fpr[MIPSInst_RT(ir)];
1192			bit0 = get_fpr32(fpr, 0) & 0x1;
1193			switch (MIPSInst_RS(ir)) {
1194			case bc1eqz_op:
1195				MIPS_FPU_EMU_INC_STATS(bc1eqz);
1196				cond = bit0 == 0;
1197				break;
1198			case bc1nez_op:
1199				MIPS_FPU_EMU_INC_STATS(bc1nez);
1200				cond = bit0 != 0;
1201				break;
1202			}
1203			goto branch_common;
1204
1205		case bc_op:
1206			if (delay_slot(xcp))
1207				return SIGILL;
1208
1209			if (cpu_has_mips_4_5_r)
1210				cbit = fpucondbit[MIPSInst_RT(ir) >> 2];
1211			else
1212				cbit = FPU_CSR_COND;
1213			cond = ctx->fcr31 & cbit;
1214
1215			likely = 0;
1216			switch (MIPSInst_RT(ir) & 3) {
1217			case bcfl_op:
1218				if (cpu_has_mips_2_3_4_5_r)
1219					likely = 1;
1220				fallthrough;
1221			case bcf_op:
1222				cond = !cond;
1223				break;
1224			case bctl_op:
1225				if (cpu_has_mips_2_3_4_5_r)
1226					likely = 1;
1227				fallthrough;
1228			case bct_op:
1229				break;
1230			}
1231branch_common:
1232			MIPS_FPU_EMU_INC_STATS(branches);
1233			set_delay_slot(xcp);
1234			if (cond) {
1235				/*
1236				 * Branch taken: emulate dslot instruction
1237				 */
1238				unsigned long bcpc;
1239
1240				/*
1241				 * Remember EPC at the branch to point back
1242				 * at so that any delay-slot instruction
1243				 * signal is not silently ignored.
1244				 */
1245				bcpc = xcp->cp0_epc;
1246				xcp->cp0_epc += dec_insn.pc_inc;
1247
1248				contpc = MIPSInst_SIMM(ir);
1249				ir = dec_insn.next_insn;
1250				if (dec_insn.micro_mips_mode) {
1251					contpc = (xcp->cp0_epc + (contpc << 1));
1252
1253					/* If 16-bit instruction, not FPU. */
1254					if ((dec_insn.next_pc_inc == 2) ||
1255						(microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) {
1256
1257						/*
1258						 * Since this instruction will
1259						 * be put on the stack with
1260						 * 32-bit words, get around
1261						 * this problem by putting a
1262						 * NOP16 as the second one.
1263						 */
1264						if (dec_insn.next_pc_inc == 2)
1265							ir = (ir & (~0xffff)) | MM_NOP16;
1266
1267						/*
1268						 * Single step the non-CP1
1269						 * instruction in the dslot.
1270						 */
1271						sig = mips_dsemul(xcp, ir,
1272								  bcpc, contpc);
1273						if (sig < 0)
1274							break;
1275						if (sig)
1276							xcp->cp0_epc = bcpc;
1277						/*
1278						 * SIGILL forces out of
1279						 * the emulation loop.
1280						 */
1281						return sig ? sig : SIGILL;
1282					}
1283				} else
1284					contpc = (xcp->cp0_epc + (contpc << 2));
1285
1286				switch (MIPSInst_OPCODE(ir)) {
1287				case lwc1_op:
1288				case swc1_op:
1289					goto emul;
1290
1291				case ldc1_op:
1292				case sdc1_op:
1293					if (cpu_has_mips_2_3_4_5_r)
1294						goto emul;
1295
1296					goto bc_sigill;
1297
1298				case cop1_op:
1299					goto emul;
1300
1301				case cop1x_op:
1302					if (cpu_has_mips_4_5_64_r2_r6)
1303						/* its one of ours */
1304						goto emul;
1305
1306					goto bc_sigill;
1307
1308				case spec_op:
1309					switch (MIPSInst_FUNC(ir)) {
1310					case movc_op:
1311						if (cpu_has_mips_4_5_r)
1312							goto emul;
1313
1314						goto bc_sigill;
1315					}
1316					break;
1317
1318				bc_sigill:
1319					xcp->cp0_epc = bcpc;
1320					return SIGILL;
1321				}
1322
1323				/*
1324				 * Single step the non-cp1
1325				 * instruction in the dslot
1326				 */
1327				sig = mips_dsemul(xcp, ir, bcpc, contpc);
1328				if (sig < 0)
1329					break;
1330				if (sig)
1331					xcp->cp0_epc = bcpc;
1332				/* SIGILL forces out of the emulation loop.  */
1333				return sig ? sig : SIGILL;
1334			} else if (likely) {	/* branch not taken */
1335				/*
1336				 * branch likely nullifies
1337				 * dslot if not taken
1338				 */
1339				xcp->cp0_epc += dec_insn.pc_inc;
1340				contpc += dec_insn.pc_inc;
1341				/*
1342				 * else continue & execute
1343				 * dslot as normal insn
1344				 */
1345			}
1346			break;
1347
1348		default:
1349			if (!(MIPSInst_RS(ir) & 0x10))
1350				return SIGILL;
1351
1352			/* a real fpu computation instruction */
1353			sig = fpu_emu(xcp, ctx, ir);
1354			if (sig)
1355				return sig;
1356		}
1357		break;
1358
1359	case cop1x_op:
1360		if (!cpu_has_mips_4_5_64_r2_r6)
1361			return SIGILL;
1362
1363		sig = fpux_emu(xcp, ctx, ir, fault_addr);
1364		if (sig)
1365			return sig;
1366		break;
1367
1368	case spec_op:
1369		if (!cpu_has_mips_4_5_r)
1370			return SIGILL;
1371
1372		if (MIPSInst_FUNC(ir) != movc_op)
1373			return SIGILL;
1374		cond = fpucondbit[MIPSInst_RT(ir) >> 2];
1375		if (((ctx->fcr31 & cond) != 0) == ((MIPSInst_RT(ir) & 1) != 0))
1376			xcp->regs[MIPSInst_RD(ir)] =
1377				xcp->regs[MIPSInst_RS(ir)];
1378		break;
1379	default:
1380		return SIGILL;
1381	}
1382
1383	/* we did it !! */
1384	xcp->cp0_epc = contpc;
1385	clear_delay_slot(xcp);
1386
1387	return 0;
1388}
1389
1390/*
1391 * Conversion table from MIPS compare ops 48-63
1392 * cond = ieee754dp_cmp(x,y,IEEE754_UN,sig);
1393 */
1394static const unsigned char cmptab[8] = {
1395	0,			/* cmp_0 (sig) cmp_sf */
1396	IEEE754_CUN,		/* cmp_un (sig) cmp_ngle */
1397	IEEE754_CEQ,		/* cmp_eq (sig) cmp_seq */
1398	IEEE754_CEQ | IEEE754_CUN,	/* cmp_ueq (sig) cmp_ngl  */
1399	IEEE754_CLT,		/* cmp_olt (sig) cmp_lt */
1400	IEEE754_CLT | IEEE754_CUN,	/* cmp_ult (sig) cmp_nge */
1401	IEEE754_CLT | IEEE754_CEQ,	/* cmp_ole (sig) cmp_le */
1402	IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN,	/* cmp_ule (sig) cmp_ngt */
1403};
1404
1405static const unsigned char negative_cmptab[8] = {
1406	0, /* Reserved */
1407	IEEE754_CLT | IEEE754_CGT | IEEE754_CEQ,
1408	IEEE754_CLT | IEEE754_CGT | IEEE754_CUN,
1409	IEEE754_CLT | IEEE754_CGT,
1410	/* Reserved */
1411};
1412
1413
1414/*
1415 * Additional MIPS4 instructions
1416 */
1417
1418#define DEF3OP(name, p, f1, f2, f3)					\
1419static union ieee754##p fpemu_##p##_##name(union ieee754##p r,		\
1420	union ieee754##p s, union ieee754##p t)				\
1421{									\
1422	struct _ieee754_csr ieee754_csr_save;				\
1423	s = f1(s, t);							\
1424	ieee754_csr_save = ieee754_csr;					\
1425	s = f2(s, r);							\
1426	ieee754_csr_save.cx |= ieee754_csr.cx;				\
1427	ieee754_csr_save.sx |= ieee754_csr.sx;				\
1428	s = f3(s);							\
1429	ieee754_csr.cx |= ieee754_csr_save.cx;				\
1430	ieee754_csr.sx |= ieee754_csr_save.sx;				\
1431	return s;							\
1432}
1433
1434static union ieee754dp fpemu_dp_recip(union ieee754dp d)
1435{
1436	return ieee754dp_div(ieee754dp_one(0), d);
1437}
1438
1439static union ieee754dp fpemu_dp_rsqrt(union ieee754dp d)
1440{
1441	return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d));
1442}
1443
1444static union ieee754sp fpemu_sp_recip(union ieee754sp s)
1445{
1446	return ieee754sp_div(ieee754sp_one(0), s);
1447}
1448
1449static union ieee754sp fpemu_sp_rsqrt(union ieee754sp s)
1450{
1451	return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s));
1452}
1453
1454DEF3OP(madd, sp, ieee754sp_mul, ieee754sp_add, );
1455DEF3OP(msub, sp, ieee754sp_mul, ieee754sp_sub, );
1456DEF3OP(nmadd, sp, ieee754sp_mul, ieee754sp_add, ieee754sp_neg);
1457DEF3OP(nmsub, sp, ieee754sp_mul, ieee754sp_sub, ieee754sp_neg);
1458DEF3OP(madd, dp, ieee754dp_mul, ieee754dp_add, );
1459DEF3OP(msub, dp, ieee754dp_mul, ieee754dp_sub, );
1460DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
1461DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
1462
1463static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1464	mips_instruction ir, void __user **fault_addr)
1465{
1466	unsigned int rcsr = 0;	/* resulting csr */
1467
1468	MIPS_FPU_EMU_INC_STATS(cp1xops);
1469
1470	switch (MIPSInst_FMA_FFMT(ir)) {
1471	case s_fmt:{		/* 0 */
1472
1473		union ieee754sp(*handler) (union ieee754sp, union ieee754sp, union ieee754sp);
1474		union ieee754sp fd, fr, fs, ft;
1475		u32 __user *va;
1476		u32 val;
1477
1478		switch (MIPSInst_FUNC(ir)) {
1479		case lwxc1_op:
1480			va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
1481				xcp->regs[MIPSInst_FT(ir)]);
1482
1483			MIPS_FPU_EMU_INC_STATS(loads);
1484			if (!access_ok(va, sizeof(u32))) {
1485				MIPS_FPU_EMU_INC_STATS(errors);
1486				*fault_addr = va;
1487				return SIGBUS;
1488			}
1489			if (__get_user(val, va)) {
1490				MIPS_FPU_EMU_INC_STATS(errors);
1491				*fault_addr = va;
1492				return SIGSEGV;
1493			}
1494			SITOREG(val, MIPSInst_FD(ir));
1495			break;
1496
1497		case swxc1_op:
1498			va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
1499				xcp->regs[MIPSInst_FT(ir)]);
1500
1501			MIPS_FPU_EMU_INC_STATS(stores);
1502
1503			SIFROMREG(val, MIPSInst_FS(ir));
1504			if (!access_ok(va, sizeof(u32))) {
1505				MIPS_FPU_EMU_INC_STATS(errors);
1506				*fault_addr = va;
1507				return SIGBUS;
1508			}
1509			if (put_user(val, va)) {
1510				MIPS_FPU_EMU_INC_STATS(errors);
1511				*fault_addr = va;
1512				return SIGSEGV;
1513			}
1514			break;
1515
1516		case madd_s_op:
1517			if (cpu_has_mac2008_only)
1518				handler = ieee754sp_madd;
1519			else
1520				handler = fpemu_sp_madd;
1521			goto scoptop;
1522		case msub_s_op:
1523			if (cpu_has_mac2008_only)
1524				handler = ieee754sp_msub;
1525			else
1526				handler = fpemu_sp_msub;
1527			goto scoptop;
1528		case nmadd_s_op:
1529			if (cpu_has_mac2008_only)
1530				handler = ieee754sp_nmadd;
1531			else
1532				handler = fpemu_sp_nmadd;
1533			goto scoptop;
1534		case nmsub_s_op:
1535			if (cpu_has_mac2008_only)
1536				handler = ieee754sp_nmsub;
1537			else
1538				handler = fpemu_sp_nmsub;
1539			goto scoptop;
1540
1541		      scoptop:
1542			SPFROMREG(fr, MIPSInst_FR(ir));
1543			SPFROMREG(fs, MIPSInst_FS(ir));
1544			SPFROMREG(ft, MIPSInst_FT(ir));
1545			fd = (*handler) (fr, fs, ft);
1546			SPTOREG(fd, MIPSInst_FD(ir));
1547
1548		      copcsr:
1549			if (ieee754_cxtest(IEEE754_INEXACT)) {
1550				MIPS_FPU_EMU_INC_STATS(ieee754_inexact);
1551				rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
1552			}
1553			if (ieee754_cxtest(IEEE754_UNDERFLOW)) {
1554				MIPS_FPU_EMU_INC_STATS(ieee754_underflow);
1555				rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
1556			}
1557			if (ieee754_cxtest(IEEE754_OVERFLOW)) {
1558				MIPS_FPU_EMU_INC_STATS(ieee754_overflow);
1559				rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
1560			}
1561			if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) {
1562				MIPS_FPU_EMU_INC_STATS(ieee754_invalidop);
1563				rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
1564			}
1565
1566			ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
1567			if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
1568				/*printk ("SIGFPE: FPU csr = %08x\n",
1569				   ctx->fcr31); */
1570				return SIGFPE;
1571			}
1572
1573			break;
1574
1575		default:
1576			return SIGILL;
1577		}
1578		break;
1579	}
1580
1581	case d_fmt:{		/* 1 */
1582		union ieee754dp(*handler) (union ieee754dp, union ieee754dp, union ieee754dp);
1583		union ieee754dp fd, fr, fs, ft;
1584		u64 __user *va;
1585		u64 val;
1586
1587		switch (MIPSInst_FUNC(ir)) {
1588		case ldxc1_op:
1589			va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
1590				xcp->regs[MIPSInst_FT(ir)]);
1591
1592			MIPS_FPU_EMU_INC_STATS(loads);
1593			if (!access_ok(va, sizeof(u64))) {
1594				MIPS_FPU_EMU_INC_STATS(errors);
1595				*fault_addr = va;
1596				return SIGBUS;
1597			}
1598			if (__get_user(val, va)) {
1599				MIPS_FPU_EMU_INC_STATS(errors);
1600				*fault_addr = va;
1601				return SIGSEGV;
1602			}
1603			DITOREG(val, MIPSInst_FD(ir));
1604			break;
1605
1606		case sdxc1_op:
1607			va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
1608				xcp->regs[MIPSInst_FT(ir)]);
1609
1610			MIPS_FPU_EMU_INC_STATS(stores);
1611			DIFROMREG(val, MIPSInst_FS(ir));
1612			if (!access_ok(va, sizeof(u64))) {
1613				MIPS_FPU_EMU_INC_STATS(errors);
1614				*fault_addr = va;
1615				return SIGBUS;
1616			}
1617			if (__put_user(val, va)) {
1618				MIPS_FPU_EMU_INC_STATS(errors);
1619				*fault_addr = va;
1620				return SIGSEGV;
1621			}
1622			break;
1623
1624		case madd_d_op:
1625			if (cpu_has_mac2008_only)
1626				handler = ieee754dp_madd;
1627			else
1628				handler = fpemu_dp_madd;
1629			goto dcoptop;
1630		case msub_d_op:
1631			if (cpu_has_mac2008_only)
1632				handler = ieee754dp_msub;
1633			else
1634				handler = fpemu_dp_msub;
1635			goto dcoptop;
1636		case nmadd_d_op:
1637			if (cpu_has_mac2008_only)
1638				handler = ieee754dp_nmadd;
1639			else
1640				handler = fpemu_dp_nmadd;
1641			goto dcoptop;
1642		case nmsub_d_op:
1643			if (cpu_has_mac2008_only)
1644				handler = ieee754dp_nmsub;
1645			else
1646			handler = fpemu_dp_nmsub;
1647			goto dcoptop;
1648
1649		      dcoptop:
1650			DPFROMREG(fr, MIPSInst_FR(ir));
1651			DPFROMREG(fs, MIPSInst_FS(ir));
1652			DPFROMREG(ft, MIPSInst_FT(ir));
1653			fd = (*handler) (fr, fs, ft);
1654			DPTOREG(fd, MIPSInst_FD(ir));
1655			goto copcsr;
1656
1657		default:
1658			return SIGILL;
1659		}
1660		break;
1661	}
1662
1663	case 0x3:
1664		if (MIPSInst_FUNC(ir) != pfetch_op)
1665			return SIGILL;
1666
1667		/* ignore prefx operation */
1668		break;
1669
1670	default:
1671		return SIGILL;
1672	}
1673
1674	return 0;
1675}
1676
1677
1678
1679/*
1680 * Emulate a single COP1 arithmetic instruction.
1681 */
1682static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1683	mips_instruction ir)
1684{
1685	int rfmt;		/* resulting format */
1686	unsigned int rcsr = 0;	/* resulting csr */
1687	unsigned int oldrm;
1688	unsigned int cbit;
1689	unsigned int cond;
1690	union {
1691		union ieee754dp d;
1692		union ieee754sp s;
1693		int w;
1694		s64 l;
1695	} rv;			/* resulting value */
1696	u64 bits;
1697
1698	MIPS_FPU_EMU_INC_STATS(cp1ops);
1699	switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
1700	case s_fmt: {		/* 0 */
1701		union {
1702			union ieee754sp(*b) (union ieee754sp, union ieee754sp);
1703			union ieee754sp(*u) (union ieee754sp);
1704		} handler;
1705		union ieee754sp fd, fs, ft;
1706
1707		switch (MIPSInst_FUNC(ir)) {
1708			/* binary ops */
1709		case fadd_op:
1710			MIPS_FPU_EMU_INC_STATS(add_s);
1711			handler.b = ieee754sp_add;
1712			goto scopbop;
1713		case fsub_op:
1714			MIPS_FPU_EMU_INC_STATS(sub_s);
1715			handler.b = ieee754sp_sub;
1716			goto scopbop;
1717		case fmul_op:
1718			MIPS_FPU_EMU_INC_STATS(mul_s);
1719			handler.b = ieee754sp_mul;
1720			goto scopbop;
1721		case fdiv_op:
1722			MIPS_FPU_EMU_INC_STATS(div_s);
1723			handler.b = ieee754sp_div;
1724			goto scopbop;
1725
1726			/* unary  ops */
1727		case fsqrt_op:
1728			if (!cpu_has_mips_2_3_4_5_r)
1729				return SIGILL;
1730
1731			MIPS_FPU_EMU_INC_STATS(sqrt_s);
1732			handler.u = ieee754sp_sqrt;
1733			goto scopuop;
1734
1735		/*
1736		 * Note that on some MIPS IV implementations such as the
1737		 * R5000 and R8000 the FSQRT and FRECIP instructions do not
1738		 * achieve full IEEE-754 accuracy - however this emulator does.
1739		 */
1740		case frsqrt_op:
1741			if (!cpu_has_mips_4_5_64_r2_r6)
1742				return SIGILL;
1743
1744			MIPS_FPU_EMU_INC_STATS(rsqrt_s);
1745			handler.u = fpemu_sp_rsqrt;
1746			goto scopuop;
1747
1748		case frecip_op:
1749			if (!cpu_has_mips_4_5_64_r2_r6)
1750				return SIGILL;
1751
1752			MIPS_FPU_EMU_INC_STATS(recip_s);
1753			handler.u = fpemu_sp_recip;
1754			goto scopuop;
1755
1756		case fmovc_op:
1757			if (!cpu_has_mips_4_5_r)
1758				return SIGILL;
1759
1760			cond = fpucondbit[MIPSInst_FT(ir) >> 2];
1761			if (((ctx->fcr31 & cond) != 0) !=
1762				((MIPSInst_FT(ir) & 1) != 0))
1763				return 0;
1764			SPFROMREG(rv.s, MIPSInst_FS(ir));
1765			break;
1766
1767		case fmovz_op:
1768			if (!cpu_has_mips_4_5_r)
1769				return SIGILL;
1770
1771			if (xcp->regs[MIPSInst_FT(ir)] != 0)
1772				return 0;
1773			SPFROMREG(rv.s, MIPSInst_FS(ir));
1774			break;
1775
1776		case fmovn_op:
1777			if (!cpu_has_mips_4_5_r)
1778				return SIGILL;
1779
1780			if (xcp->regs[MIPSInst_FT(ir)] == 0)
1781				return 0;
1782			SPFROMREG(rv.s, MIPSInst_FS(ir));
1783			break;
1784
1785		case fseleqz_op:
1786			if (!cpu_has_mips_r6)
1787				return SIGILL;
1788
1789			MIPS_FPU_EMU_INC_STATS(seleqz_s);
1790			SPFROMREG(rv.s, MIPSInst_FT(ir));
1791			if (rv.w & 0x1)
1792				rv.w = 0;
1793			else
1794				SPFROMREG(rv.s, MIPSInst_FS(ir));
1795			break;
1796
1797		case fselnez_op:
1798			if (!cpu_has_mips_r6)
1799				return SIGILL;
1800
1801			MIPS_FPU_EMU_INC_STATS(selnez_s);
1802			SPFROMREG(rv.s, MIPSInst_FT(ir));
1803			if (rv.w & 0x1)
1804				SPFROMREG(rv.s, MIPSInst_FS(ir));
1805			else
1806				rv.w = 0;
1807			break;
1808
1809		case fmaddf_op: {
1810			union ieee754sp ft, fs, fd;
1811
1812			if (!cpu_has_mips_r6)
1813				return SIGILL;
1814
1815			MIPS_FPU_EMU_INC_STATS(maddf_s);
1816			SPFROMREG(ft, MIPSInst_FT(ir));
1817			SPFROMREG(fs, MIPSInst_FS(ir));
1818			SPFROMREG(fd, MIPSInst_FD(ir));
1819			rv.s = ieee754sp_maddf(fd, fs, ft);
1820			goto copcsr;
1821		}
1822
1823		case fmsubf_op: {
1824			union ieee754sp ft, fs, fd;
1825
1826			if (!cpu_has_mips_r6)
1827				return SIGILL;
1828
1829			MIPS_FPU_EMU_INC_STATS(msubf_s);
1830			SPFROMREG(ft, MIPSInst_FT(ir));
1831			SPFROMREG(fs, MIPSInst_FS(ir));
1832			SPFROMREG(fd, MIPSInst_FD(ir));
1833			rv.s = ieee754sp_msubf(fd, fs, ft);
1834			goto copcsr;
1835		}
1836
1837		case frint_op: {
1838			union ieee754sp fs;
1839
1840			if (!cpu_has_mips_r6)
1841				return SIGILL;
1842
1843			MIPS_FPU_EMU_INC_STATS(rint_s);
1844			SPFROMREG(fs, MIPSInst_FS(ir));
1845			rv.s = ieee754sp_rint(fs);
1846			goto copcsr;
1847		}
1848
1849		case fclass_op: {
1850			union ieee754sp fs;
1851
1852			if (!cpu_has_mips_r6)
1853				return SIGILL;
1854
1855			MIPS_FPU_EMU_INC_STATS(class_s);
1856			SPFROMREG(fs, MIPSInst_FS(ir));
1857			rv.w = ieee754sp_2008class(fs);
1858			rfmt = w_fmt;
1859			goto copcsr;
1860		}
1861
1862		case fmin_op: {
1863			union ieee754sp fs, ft;
1864
1865			if (!cpu_has_mips_r6)
1866				return SIGILL;
1867
1868			MIPS_FPU_EMU_INC_STATS(min_s);
1869			SPFROMREG(ft, MIPSInst_FT(ir));
1870			SPFROMREG(fs, MIPSInst_FS(ir));
1871			rv.s = ieee754sp_fmin(fs, ft);
1872			goto copcsr;
1873		}
1874
1875		case fmina_op: {
1876			union ieee754sp fs, ft;
1877
1878			if (!cpu_has_mips_r6)
1879				return SIGILL;
1880
1881			MIPS_FPU_EMU_INC_STATS(mina_s);
1882			SPFROMREG(ft, MIPSInst_FT(ir));
1883			SPFROMREG(fs, MIPSInst_FS(ir));
1884			rv.s = ieee754sp_fmina(fs, ft);
1885			goto copcsr;
1886		}
1887
1888		case fmax_op: {
1889			union ieee754sp fs, ft;
1890
1891			if (!cpu_has_mips_r6)
1892				return SIGILL;
1893
1894			MIPS_FPU_EMU_INC_STATS(max_s);
1895			SPFROMREG(ft, MIPSInst_FT(ir));
1896			SPFROMREG(fs, MIPSInst_FS(ir));
1897			rv.s = ieee754sp_fmax(fs, ft);
1898			goto copcsr;
1899		}
1900
1901		case fmaxa_op: {
1902			union ieee754sp fs, ft;
1903
1904			if (!cpu_has_mips_r6)
1905				return SIGILL;
1906
1907			MIPS_FPU_EMU_INC_STATS(maxa_s);
1908			SPFROMREG(ft, MIPSInst_FT(ir));
1909			SPFROMREG(fs, MIPSInst_FS(ir));
1910			rv.s = ieee754sp_fmaxa(fs, ft);
1911			goto copcsr;
1912		}
1913
1914		case fabs_op:
1915			MIPS_FPU_EMU_INC_STATS(abs_s);
1916			handler.u = ieee754sp_abs;
1917			goto scopuop;
1918
1919		case fneg_op:
1920			MIPS_FPU_EMU_INC_STATS(neg_s);
1921			handler.u = ieee754sp_neg;
1922			goto scopuop;
1923
1924		case fmov_op:
1925			/* an easy one */
1926			MIPS_FPU_EMU_INC_STATS(mov_s);
1927			SPFROMREG(rv.s, MIPSInst_FS(ir));
1928			goto copcsr;
1929
1930			/* binary op on handler */
1931scopbop:
1932			SPFROMREG(fs, MIPSInst_FS(ir));
1933			SPFROMREG(ft, MIPSInst_FT(ir));
1934
1935			rv.s = (*handler.b) (fs, ft);
1936			goto copcsr;
1937scopuop:
1938			SPFROMREG(fs, MIPSInst_FS(ir));
1939			rv.s = (*handler.u) (fs);
1940			goto copcsr;
1941copcsr:
1942			if (ieee754_cxtest(IEEE754_INEXACT)) {
1943				MIPS_FPU_EMU_INC_STATS(ieee754_inexact);
1944				rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
1945			}
1946			if (ieee754_cxtest(IEEE754_UNDERFLOW)) {
1947				MIPS_FPU_EMU_INC_STATS(ieee754_underflow);
1948				rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
1949			}
1950			if (ieee754_cxtest(IEEE754_OVERFLOW)) {
1951				MIPS_FPU_EMU_INC_STATS(ieee754_overflow);
1952				rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
1953			}
1954			if (ieee754_cxtest(IEEE754_ZERO_DIVIDE)) {
1955				MIPS_FPU_EMU_INC_STATS(ieee754_zerodiv);
1956				rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S;
1957			}
1958			if (ieee754_cxtest(IEEE754_INVALID_OPERATION)) {
1959				MIPS_FPU_EMU_INC_STATS(ieee754_invalidop);
1960				rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
1961			}
1962			break;
1963
1964			/* unary conv ops */
1965		case fcvts_op:
1966			return SIGILL;	/* not defined */
1967
1968		case fcvtd_op:
1969			MIPS_FPU_EMU_INC_STATS(cvt_d_s);
1970			SPFROMREG(fs, MIPSInst_FS(ir));
1971			rv.d = ieee754dp_fsp(fs);
1972			rfmt = d_fmt;
1973			goto copcsr;
1974
1975		case fcvtw_op:
1976			MIPS_FPU_EMU_INC_STATS(cvt_w_s);
1977			SPFROMREG(fs, MIPSInst_FS(ir));
1978			rv.w = ieee754sp_tint(fs);
1979			rfmt = w_fmt;
1980			goto copcsr;
1981
1982		case fround_op:
1983		case ftrunc_op:
1984		case fceil_op:
1985		case ffloor_op:
1986			if (!cpu_has_mips_2_3_4_5_r)
1987				return SIGILL;
1988
1989			if (MIPSInst_FUNC(ir) == fceil_op)
1990				MIPS_FPU_EMU_INC_STATS(ceil_w_s);
1991			if (MIPSInst_FUNC(ir) == ffloor_op)
1992				MIPS_FPU_EMU_INC_STATS(floor_w_s);
1993			if (MIPSInst_FUNC(ir) == fround_op)
1994				MIPS_FPU_EMU_INC_STATS(round_w_s);
1995			if (MIPSInst_FUNC(ir) == ftrunc_op)
1996				MIPS_FPU_EMU_INC_STATS(trunc_w_s);
1997
1998			oldrm = ieee754_csr.rm;
1999			SPFROMREG(fs, MIPSInst_FS(ir));
2000			ieee754_csr.rm = MIPSInst_FUNC(ir);
2001			rv.w = ieee754sp_tint(fs);
2002			ieee754_csr.rm = oldrm;
2003			rfmt = w_fmt;
2004			goto copcsr;
2005
2006		case fsel_op:
2007			if (!cpu_has_mips_r6)
2008				return SIGILL;
2009
2010			MIPS_FPU_EMU_INC_STATS(sel_s);
2011			SPFROMREG(fd, MIPSInst_FD(ir));
2012			if (fd.bits & 0x1)
2013				SPFROMREG(rv.s, MIPSInst_FT(ir));
2014			else
2015				SPFROMREG(rv.s, MIPSInst_FS(ir));
2016			break;
2017
2018		case fcvtl_op:
2019			if (!cpu_has_mips_3_4_5_64_r2_r6)
2020				return SIGILL;
2021
2022			MIPS_FPU_EMU_INC_STATS(cvt_l_s);
2023			SPFROMREG(fs, MIPSInst_FS(ir));
2024			rv.l = ieee754sp_tlong(fs);
2025			rfmt = l_fmt;
2026			goto copcsr;
2027
2028		case froundl_op:
2029		case ftruncl_op:
2030		case fceill_op:
2031		case ffloorl_op:
2032			if (!cpu_has_mips_3_4_5_64_r2_r6)
2033				return SIGILL;
2034
2035			if (MIPSInst_FUNC(ir) == fceill_op)
2036				MIPS_FPU_EMU_INC_STATS(ceil_l_s);
2037			if (MIPSInst_FUNC(ir) == ffloorl_op)
2038				MIPS_FPU_EMU_INC_STATS(floor_l_s);
2039			if (MIPSInst_FUNC(ir) == froundl_op)
2040				MIPS_FPU_EMU_INC_STATS(round_l_s);
2041			if (MIPSInst_FUNC(ir) == ftruncl_op)
2042				MIPS_FPU_EMU_INC_STATS(trunc_l_s);
2043
2044			oldrm = ieee754_csr.rm;
2045			SPFROMREG(fs, MIPSInst_FS(ir));
2046			ieee754_csr.rm = MIPSInst_FUNC(ir);
2047			rv.l = ieee754sp_tlong(fs);
2048			ieee754_csr.rm = oldrm;
2049			rfmt = l_fmt;
2050			goto copcsr;
2051
2052		default:
2053			if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
2054				unsigned int cmpop;
2055				union ieee754sp fs, ft;
2056
2057				cmpop = MIPSInst_FUNC(ir) - fcmp_op;
2058				SPFROMREG(fs, MIPSInst_FS(ir));
2059				SPFROMREG(ft, MIPSInst_FT(ir));
2060				rv.w = ieee754sp_cmp(fs, ft,
2061					cmptab[cmpop & 0x7], cmpop & 0x8);
2062				rfmt = -1;
2063				if ((cmpop & 0x8) && ieee754_cxtest
2064					(IEEE754_INVALID_OPERATION))
2065					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
2066				else
2067					goto copcsr;
2068
2069			} else
2070				return SIGILL;
2071			break;
2072		}
2073		break;
2074	}
2075
2076	case d_fmt: {
2077		union ieee754dp fd, fs, ft;
2078		union {
2079			union ieee754dp(*b) (union ieee754dp, union ieee754dp);
2080			union ieee754dp(*u) (union ieee754dp);
2081		} handler;
2082
2083		switch (MIPSInst_FUNC(ir)) {
2084			/* binary ops */
2085		case fadd_op:
2086			MIPS_FPU_EMU_INC_STATS(add_d);
2087			handler.b = ieee754dp_add;
2088			goto dcopbop;
2089		case fsub_op:
2090			MIPS_FPU_EMU_INC_STATS(sub_d);
2091			handler.b = ieee754dp_sub;
2092			goto dcopbop;
2093		case fmul_op:
2094			MIPS_FPU_EMU_INC_STATS(mul_d);
2095			handler.b = ieee754dp_mul;
2096			goto dcopbop;
2097		case fdiv_op:
2098			MIPS_FPU_EMU_INC_STATS(div_d);
2099			handler.b = ieee754dp_div;
2100			goto dcopbop;
2101
2102			/* unary  ops */
2103		case fsqrt_op:
2104			if (!cpu_has_mips_2_3_4_5_r)
2105				return SIGILL;
2106
2107			MIPS_FPU_EMU_INC_STATS(sqrt_d);
2108			handler.u = ieee754dp_sqrt;
2109			goto dcopuop;
2110		/*
2111		 * Note that on some MIPS IV implementations such as the
2112		 * R5000 and R8000 the FSQRT and FRECIP instructions do not
2113		 * achieve full IEEE-754 accuracy - however this emulator does.
2114		 */
2115		case frsqrt_op:
2116			if (!cpu_has_mips_4_5_64_r2_r6)
2117				return SIGILL;
2118
2119			MIPS_FPU_EMU_INC_STATS(rsqrt_d);
2120			handler.u = fpemu_dp_rsqrt;
2121			goto dcopuop;
2122		case frecip_op:
2123			if (!cpu_has_mips_4_5_64_r2_r6)
2124				return SIGILL;
2125
2126			MIPS_FPU_EMU_INC_STATS(recip_d);
2127			handler.u = fpemu_dp_recip;
2128			goto dcopuop;
2129		case fmovc_op:
2130			if (!cpu_has_mips_4_5_r)
2131				return SIGILL;
2132
2133			cond = fpucondbit[MIPSInst_FT(ir) >> 2];
2134			if (((ctx->fcr31 & cond) != 0) !=
2135				((MIPSInst_FT(ir) & 1) != 0))
2136				return 0;
2137			DPFROMREG(rv.d, MIPSInst_FS(ir));
2138			break;
2139		case fmovz_op:
2140			if (!cpu_has_mips_4_5_r)
2141				return SIGILL;
2142
2143			if (xcp->regs[MIPSInst_FT(ir)] != 0)
2144				return 0;
2145			DPFROMREG(rv.d, MIPSInst_FS(ir));
2146			break;
2147		case fmovn_op:
2148			if (!cpu_has_mips_4_5_r)
2149				return SIGILL;
2150
2151			if (xcp->regs[MIPSInst_FT(ir)] == 0)
2152				return 0;
2153			DPFROMREG(rv.d, MIPSInst_FS(ir));
2154			break;
2155
2156		case fseleqz_op:
2157			if (!cpu_has_mips_r6)
2158				return SIGILL;
2159
2160			MIPS_FPU_EMU_INC_STATS(seleqz_d);
2161			DPFROMREG(rv.d, MIPSInst_FT(ir));
2162			if (rv.l & 0x1)
2163				rv.l = 0;
2164			else
2165				DPFROMREG(rv.d, MIPSInst_FS(ir));
2166			break;
2167
2168		case fselnez_op:
2169			if (!cpu_has_mips_r6)
2170				return SIGILL;
2171
2172			MIPS_FPU_EMU_INC_STATS(selnez_d);
2173			DPFROMREG(rv.d, MIPSInst_FT(ir));
2174			if (rv.l & 0x1)
2175				DPFROMREG(rv.d, MIPSInst_FS(ir));
2176			else
2177				rv.l = 0;
2178			break;
2179
2180		case fmaddf_op: {
2181			union ieee754dp ft, fs, fd;
2182
2183			if (!cpu_has_mips_r6)
2184				return SIGILL;
2185
2186			MIPS_FPU_EMU_INC_STATS(maddf_d);
2187			DPFROMREG(ft, MIPSInst_FT(ir));
2188			DPFROMREG(fs, MIPSInst_FS(ir));
2189			DPFROMREG(fd, MIPSInst_FD(ir));
2190			rv.d = ieee754dp_maddf(fd, fs, ft);
2191			goto copcsr;
2192		}
2193
2194		case fmsubf_op: {
2195			union ieee754dp ft, fs, fd;
2196
2197			if (!cpu_has_mips_r6)
2198				return SIGILL;
2199
2200			MIPS_FPU_EMU_INC_STATS(msubf_d);
2201			DPFROMREG(ft, MIPSInst_FT(ir));
2202			DPFROMREG(fs, MIPSInst_FS(ir));
2203			DPFROMREG(fd, MIPSInst_FD(ir));
2204			rv.d = ieee754dp_msubf(fd, fs, ft);
2205			goto copcsr;
2206		}
2207
2208		case frint_op: {
2209			union ieee754dp fs;
2210
2211			if (!cpu_has_mips_r6)
2212				return SIGILL;
2213
2214			MIPS_FPU_EMU_INC_STATS(rint_d);
2215			DPFROMREG(fs, MIPSInst_FS(ir));
2216			rv.d = ieee754dp_rint(fs);
2217			goto copcsr;
2218		}
2219
2220		case fclass_op: {
2221			union ieee754dp fs;
2222
2223			if (!cpu_has_mips_r6)
2224				return SIGILL;
2225
2226			MIPS_FPU_EMU_INC_STATS(class_d);
2227			DPFROMREG(fs, MIPSInst_FS(ir));
2228			rv.l = ieee754dp_2008class(fs);
2229			rfmt = l_fmt;
2230			goto copcsr;
2231		}
2232
2233		case fmin_op: {
2234			union ieee754dp fs, ft;
2235
2236			if (!cpu_has_mips_r6)
2237				return SIGILL;
2238
2239			MIPS_FPU_EMU_INC_STATS(min_d);
2240			DPFROMREG(ft, MIPSInst_FT(ir));
2241			DPFROMREG(fs, MIPSInst_FS(ir));
2242			rv.d = ieee754dp_fmin(fs, ft);
2243			goto copcsr;
2244		}
2245
2246		case fmina_op: {
2247			union ieee754dp fs, ft;
2248
2249			if (!cpu_has_mips_r6)
2250				return SIGILL;
2251
2252			MIPS_FPU_EMU_INC_STATS(mina_d);
2253			DPFROMREG(ft, MIPSInst_FT(ir));
2254			DPFROMREG(fs, MIPSInst_FS(ir));
2255			rv.d = ieee754dp_fmina(fs, ft);
2256			goto copcsr;
2257		}
2258
2259		case fmax_op: {
2260			union ieee754dp fs, ft;
2261
2262			if (!cpu_has_mips_r6)
2263				return SIGILL;
2264
2265			MIPS_FPU_EMU_INC_STATS(max_d);
2266			DPFROMREG(ft, MIPSInst_FT(ir));
2267			DPFROMREG(fs, MIPSInst_FS(ir));
2268			rv.d = ieee754dp_fmax(fs, ft);
2269			goto copcsr;
2270		}
2271
2272		case fmaxa_op: {
2273			union ieee754dp fs, ft;
2274
2275			if (!cpu_has_mips_r6)
2276				return SIGILL;
2277
2278			MIPS_FPU_EMU_INC_STATS(maxa_d);
2279			DPFROMREG(ft, MIPSInst_FT(ir));
2280			DPFROMREG(fs, MIPSInst_FS(ir));
2281			rv.d = ieee754dp_fmaxa(fs, ft);
2282			goto copcsr;
2283		}
2284
2285		case fabs_op:
2286			MIPS_FPU_EMU_INC_STATS(abs_d);
2287			handler.u = ieee754dp_abs;
2288			goto dcopuop;
2289
2290		case fneg_op:
2291			MIPS_FPU_EMU_INC_STATS(neg_d);
2292			handler.u = ieee754dp_neg;
2293			goto dcopuop;
2294
2295		case fmov_op:
2296			/* an easy one */
2297			MIPS_FPU_EMU_INC_STATS(mov_d);
2298			DPFROMREG(rv.d, MIPSInst_FS(ir));
2299			goto copcsr;
2300
2301			/* binary op on handler */
2302dcopbop:
2303			DPFROMREG(fs, MIPSInst_FS(ir));
2304			DPFROMREG(ft, MIPSInst_FT(ir));
2305
2306			rv.d = (*handler.b) (fs, ft);
2307			goto copcsr;
2308dcopuop:
2309			DPFROMREG(fs, MIPSInst_FS(ir));
2310			rv.d = (*handler.u) (fs);
2311			goto copcsr;
2312
2313		/*
2314		 * unary conv ops
2315		 */
2316		case fcvts_op:
2317			MIPS_FPU_EMU_INC_STATS(cvt_s_d);
2318			DPFROMREG(fs, MIPSInst_FS(ir));
2319			rv.s = ieee754sp_fdp(fs);
2320			rfmt = s_fmt;
2321			goto copcsr;
2322
2323		case fcvtd_op:
2324			return SIGILL;	/* not defined */
2325
2326		case fcvtw_op:
2327			MIPS_FPU_EMU_INC_STATS(cvt_w_d);
2328			DPFROMREG(fs, MIPSInst_FS(ir));
2329			rv.w = ieee754dp_tint(fs);	/* wrong */
2330			rfmt = w_fmt;
2331			goto copcsr;
2332
2333		case fround_op:
2334		case ftrunc_op:
2335		case fceil_op:
2336		case ffloor_op:
2337			if (!cpu_has_mips_2_3_4_5_r)
2338				return SIGILL;
2339
2340			if (MIPSInst_FUNC(ir) == fceil_op)
2341				MIPS_FPU_EMU_INC_STATS(ceil_w_d);
2342			if (MIPSInst_FUNC(ir) == ffloor_op)
2343				MIPS_FPU_EMU_INC_STATS(floor_w_d);
2344			if (MIPSInst_FUNC(ir) == fround_op)
2345				MIPS_FPU_EMU_INC_STATS(round_w_d);
2346			if (MIPSInst_FUNC(ir) == ftrunc_op)
2347				MIPS_FPU_EMU_INC_STATS(trunc_w_d);
2348
2349			oldrm = ieee754_csr.rm;
2350			DPFROMREG(fs, MIPSInst_FS(ir));
2351			ieee754_csr.rm = MIPSInst_FUNC(ir);
2352			rv.w = ieee754dp_tint(fs);
2353			ieee754_csr.rm = oldrm;
2354			rfmt = w_fmt;
2355			goto copcsr;
2356
2357		case fsel_op:
2358			if (!cpu_has_mips_r6)
2359				return SIGILL;
2360
2361			MIPS_FPU_EMU_INC_STATS(sel_d);
2362			DPFROMREG(fd, MIPSInst_FD(ir));
2363			if (fd.bits & 0x1)
2364				DPFROMREG(rv.d, MIPSInst_FT(ir));
2365			else
2366				DPFROMREG(rv.d, MIPSInst_FS(ir));
2367			break;
2368
2369		case fcvtl_op:
2370			if (!cpu_has_mips_3_4_5_64_r2_r6)
2371				return SIGILL;
2372
2373			MIPS_FPU_EMU_INC_STATS(cvt_l_d);
2374			DPFROMREG(fs, MIPSInst_FS(ir));
2375			rv.l = ieee754dp_tlong(fs);
2376			rfmt = l_fmt;
2377			goto copcsr;
2378
2379		case froundl_op:
2380		case ftruncl_op:
2381		case fceill_op:
2382		case ffloorl_op:
2383			if (!cpu_has_mips_3_4_5_64_r2_r6)
2384				return SIGILL;
2385
2386			if (MIPSInst_FUNC(ir) == fceill_op)
2387				MIPS_FPU_EMU_INC_STATS(ceil_l_d);
2388			if (MIPSInst_FUNC(ir) == ffloorl_op)
2389				MIPS_FPU_EMU_INC_STATS(floor_l_d);
2390			if (MIPSInst_FUNC(ir) == froundl_op)
2391				MIPS_FPU_EMU_INC_STATS(round_l_d);
2392			if (MIPSInst_FUNC(ir) == ftruncl_op)
2393				MIPS_FPU_EMU_INC_STATS(trunc_l_d);
2394
2395			oldrm = ieee754_csr.rm;
2396			DPFROMREG(fs, MIPSInst_FS(ir));
2397			ieee754_csr.rm = MIPSInst_FUNC(ir);
2398			rv.l = ieee754dp_tlong(fs);
2399			ieee754_csr.rm = oldrm;
2400			rfmt = l_fmt;
2401			goto copcsr;
2402
2403		default:
2404			if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
2405				unsigned int cmpop;
2406				union ieee754dp fs, ft;
2407
2408				cmpop = MIPSInst_FUNC(ir) - fcmp_op;
2409				DPFROMREG(fs, MIPSInst_FS(ir));
2410				DPFROMREG(ft, MIPSInst_FT(ir));
2411				rv.w = ieee754dp_cmp(fs, ft,
2412					cmptab[cmpop & 0x7], cmpop & 0x8);
2413				rfmt = -1;
2414				if ((cmpop & 0x8)
2415					&&
2416					ieee754_cxtest
2417					(IEEE754_INVALID_OPERATION))
2418					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
2419				else
2420					goto copcsr;
2421
2422			}
2423			else {
2424				return SIGILL;
2425			}
2426			break;
2427		}
2428		break;
2429	}
2430
2431	case w_fmt: {
2432		union ieee754dp fs;
2433
2434		switch (MIPSInst_FUNC(ir)) {
2435		case fcvts_op:
2436			/* convert word to single precision real */
2437			MIPS_FPU_EMU_INC_STATS(cvt_s_w);
2438			SPFROMREG(fs, MIPSInst_FS(ir));
2439			rv.s = ieee754sp_fint(fs.bits);
2440			rfmt = s_fmt;
2441			goto copcsr;
2442		case fcvtd_op:
2443			/* convert word to double precision real */
2444			MIPS_FPU_EMU_INC_STATS(cvt_d_w);
2445			SPFROMREG(fs, MIPSInst_FS(ir));
2446			rv.d = ieee754dp_fint(fs.bits);
2447			rfmt = d_fmt;
2448			goto copcsr;
2449		default: {
2450			/* Emulating the new CMP.condn.fmt R6 instruction */
2451#define CMPOP_MASK	0x7
2452#define SIGN_BIT	(0x1 << 3)
2453#define PREDICATE_BIT	(0x1 << 4)
2454
2455			int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
2456			int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
2457			union ieee754sp fs, ft;
2458
2459			/* This is an R6 only instruction */
2460			if (!cpu_has_mips_r6 ||
2461			    (MIPSInst_FUNC(ir) & 0x20))
2462				return SIGILL;
2463
2464			if (!sig) {
2465				if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
2466					switch (cmpop) {
2467					case 0:
2468					MIPS_FPU_EMU_INC_STATS(cmp_af_s);
2469					break;
2470					case 1:
2471					MIPS_FPU_EMU_INC_STATS(cmp_un_s);
2472					break;
2473					case 2:
2474					MIPS_FPU_EMU_INC_STATS(cmp_eq_s);
2475					break;
2476					case 3:
2477					MIPS_FPU_EMU_INC_STATS(cmp_ueq_s);
2478					break;
2479					case 4:
2480					MIPS_FPU_EMU_INC_STATS(cmp_lt_s);
2481					break;
2482					case 5:
2483					MIPS_FPU_EMU_INC_STATS(cmp_ult_s);
2484					break;
2485					case 6:
2486					MIPS_FPU_EMU_INC_STATS(cmp_le_s);
2487					break;
2488					case 7:
2489					MIPS_FPU_EMU_INC_STATS(cmp_ule_s);
2490					break;
2491					}
2492				} else {
2493					switch (cmpop) {
2494					case 1:
2495					MIPS_FPU_EMU_INC_STATS(cmp_or_s);
2496					break;
2497					case 2:
2498					MIPS_FPU_EMU_INC_STATS(cmp_une_s);
2499					break;
2500					case 3:
2501					MIPS_FPU_EMU_INC_STATS(cmp_ne_s);
2502					break;
2503					}
2504				}
2505			} else {
2506				if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
2507					switch (cmpop) {
2508					case 0:
2509					MIPS_FPU_EMU_INC_STATS(cmp_saf_s);
2510					break;
2511					case 1:
2512					MIPS_FPU_EMU_INC_STATS(cmp_sun_s);
2513					break;
2514					case 2:
2515					MIPS_FPU_EMU_INC_STATS(cmp_seq_s);
2516					break;
2517					case 3:
2518					MIPS_FPU_EMU_INC_STATS(cmp_sueq_s);
2519					break;
2520					case 4:
2521					MIPS_FPU_EMU_INC_STATS(cmp_slt_s);
2522					break;
2523					case 5:
2524					MIPS_FPU_EMU_INC_STATS(cmp_sult_s);
2525					break;
2526					case 6:
2527					MIPS_FPU_EMU_INC_STATS(cmp_sle_s);
2528					break;
2529					case 7:
2530					MIPS_FPU_EMU_INC_STATS(cmp_sule_s);
2531					break;
2532					}
2533				} else {
2534					switch (cmpop) {
2535					case 1:
2536					MIPS_FPU_EMU_INC_STATS(cmp_sor_s);
2537					break;
2538					case 2:
2539					MIPS_FPU_EMU_INC_STATS(cmp_sune_s);
2540					break;
2541					case 3:
2542					MIPS_FPU_EMU_INC_STATS(cmp_sne_s);
2543					break;
2544					}
2545				}
2546			}
2547
2548			/* fmt is w_fmt for single precision so fix it */
2549			rfmt = s_fmt;
2550			/* default to false */
2551			rv.w = 0;
2552
2553			/* CMP.condn.S */
2554			SPFROMREG(fs, MIPSInst_FS(ir));
2555			SPFROMREG(ft, MIPSInst_FT(ir));
2556
2557			/* positive predicates */
2558			if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
2559				if (ieee754sp_cmp(fs, ft, cmptab[cmpop],
2560						  sig))
2561				    rv.w = -1; /* true, all 1s */
2562				if ((sig) &&
2563				    ieee754_cxtest(IEEE754_INVALID_OPERATION))
2564					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
2565				else
2566					goto copcsr;
2567			} else {
2568				/* negative predicates */
2569				switch (cmpop) {
2570				case 1:
2571				case 2:
2572				case 3:
2573					if (ieee754sp_cmp(fs, ft,
2574							  negative_cmptab[cmpop],
2575							  sig))
2576						rv.w = -1; /* true, all 1s */
2577					if (sig &&
2578					    ieee754_cxtest(IEEE754_INVALID_OPERATION))
2579						rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
2580					else
2581						goto copcsr;
2582					break;
2583				default:
2584					/* Reserved R6 ops */
2585					return SIGILL;
2586				}
2587			}
2588			break;
2589			}
2590		}
2591		break;
2592	}
2593
2594	case l_fmt:
2595
2596		if (!cpu_has_mips_3_4_5_64_r2_r6)
2597			return SIGILL;
2598
2599		DIFROMREG(bits, MIPSInst_FS(ir));
2600
2601		switch (MIPSInst_FUNC(ir)) {
2602		case fcvts_op:
2603			/* convert long to single precision real */
2604			MIPS_FPU_EMU_INC_STATS(cvt_s_l);
2605			rv.s = ieee754sp_flong(bits);
2606			rfmt = s_fmt;
2607			goto copcsr;
2608		case fcvtd_op:
2609			/* convert long to double precision real */
2610			MIPS_FPU_EMU_INC_STATS(cvt_d_l);
2611			rv.d = ieee754dp_flong(bits);
2612			rfmt = d_fmt;
2613			goto copcsr;
2614		default: {
2615			/* Emulating the new CMP.condn.fmt R6 instruction */
2616			int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
2617			int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
2618			union ieee754dp fs, ft;
2619
2620			if (!cpu_has_mips_r6 ||
2621			    (MIPSInst_FUNC(ir) & 0x20))
2622				return SIGILL;
2623
2624			if (!sig) {
2625				if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
2626					switch (cmpop) {
2627					case 0:
2628					MIPS_FPU_EMU_INC_STATS(cmp_af_d);
2629					break;
2630					case 1:
2631					MIPS_FPU_EMU_INC_STATS(cmp_un_d);
2632					break;
2633					case 2:
2634					MIPS_FPU_EMU_INC_STATS(cmp_eq_d);
2635					break;
2636					case 3:
2637					MIPS_FPU_EMU_INC_STATS(cmp_ueq_d);
2638					break;
2639					case 4:
2640					MIPS_FPU_EMU_INC_STATS(cmp_lt_d);
2641					break;
2642					case 5:
2643					MIPS_FPU_EMU_INC_STATS(cmp_ult_d);
2644					break;
2645					case 6:
2646					MIPS_FPU_EMU_INC_STATS(cmp_le_d);
2647					break;
2648					case 7:
2649					MIPS_FPU_EMU_INC_STATS(cmp_ule_d);
2650					break;
2651					}
2652				} else {
2653					switch (cmpop) {
2654					case 1:
2655					MIPS_FPU_EMU_INC_STATS(cmp_or_d);
2656					break;
2657					case 2:
2658					MIPS_FPU_EMU_INC_STATS(cmp_une_d);
2659					break;
2660					case 3:
2661					MIPS_FPU_EMU_INC_STATS(cmp_ne_d);
2662					break;
2663					}
2664				}
2665			} else {
2666				if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
2667					switch (cmpop) {
2668					case 0:
2669					MIPS_FPU_EMU_INC_STATS(cmp_saf_d);
2670					break;
2671					case 1:
2672					MIPS_FPU_EMU_INC_STATS(cmp_sun_d);
2673					break;
2674					case 2:
2675					MIPS_FPU_EMU_INC_STATS(cmp_seq_d);
2676					break;
2677					case 3:
2678					MIPS_FPU_EMU_INC_STATS(cmp_sueq_d);
2679					break;
2680					case 4:
2681					MIPS_FPU_EMU_INC_STATS(cmp_slt_d);
2682					break;
2683					case 5:
2684					MIPS_FPU_EMU_INC_STATS(cmp_sult_d);
2685					break;
2686					case 6:
2687					MIPS_FPU_EMU_INC_STATS(cmp_sle_d);
2688					break;
2689					case 7:
2690					MIPS_FPU_EMU_INC_STATS(cmp_sule_d);
2691					break;
2692					}
2693				} else {
2694					switch (cmpop) {
2695					case 1:
2696					MIPS_FPU_EMU_INC_STATS(cmp_sor_d);
2697					break;
2698					case 2:
2699					MIPS_FPU_EMU_INC_STATS(cmp_sune_d);
2700					break;
2701					case 3:
2702					MIPS_FPU_EMU_INC_STATS(cmp_sne_d);
2703					break;
2704					}
2705				}
2706			}
2707
2708			/* fmt is l_fmt for double precision so fix it */
2709			rfmt = d_fmt;
2710			/* default to false */
2711			rv.l = 0;
2712
2713			/* CMP.condn.D */
2714			DPFROMREG(fs, MIPSInst_FS(ir));
2715			DPFROMREG(ft, MIPSInst_FT(ir));
2716
2717			/* positive predicates */
2718			if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
2719				if (ieee754dp_cmp(fs, ft,
2720						  cmptab[cmpop], sig))
2721				    rv.l = -1LL; /* true, all 1s */
2722				if (sig &&
2723				    ieee754_cxtest(IEEE754_INVALID_OPERATION))
2724					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
2725				else
2726					goto copcsr;
2727			} else {
2728				/* negative predicates */
2729				switch (cmpop) {
2730				case 1:
2731				case 2:
2732				case 3:
2733					if (ieee754dp_cmp(fs, ft,
2734							  negative_cmptab[cmpop],
2735							  sig))
2736						rv.l = -1LL; /* true, all 1s */
2737					if (sig &&
2738					    ieee754_cxtest(IEEE754_INVALID_OPERATION))
2739						rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
2740					else
2741						goto copcsr;
2742					break;
2743				default:
2744					/* Reserved R6 ops */
2745					return SIGILL;
2746				}
2747			}
2748			break;
2749			}
2750		}
2751		break;
2752
2753	default:
2754		return SIGILL;
2755	}
2756
2757	/*
2758	 * Update the fpu CSR register for this operation.
2759	 * If an exception is required, generate a tidy SIGFPE exception,
2760	 * without updating the result register.
2761	 * Note: cause exception bits do not accumulate, they are rewritten
2762	 * for each op; only the flag/sticky bits accumulate.
2763	 */
2764	ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
2765	if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
2766		/*printk ("SIGFPE: FPU csr = %08x\n",ctx->fcr31); */
2767		return SIGFPE;
2768	}
2769
2770	/*
2771	 * Now we can safely write the result back to the register file.
2772	 */
2773	switch (rfmt) {
2774	case -1:
2775
2776		if (cpu_has_mips_4_5_r)
2777			cbit = fpucondbit[MIPSInst_FD(ir) >> 2];
2778		else
2779			cbit = FPU_CSR_COND;
2780		if (rv.w)
2781			ctx->fcr31 |= cbit;
2782		else
2783			ctx->fcr31 &= ~cbit;
2784		break;
2785
2786	case d_fmt:
2787		DPTOREG(rv.d, MIPSInst_FD(ir));
2788		break;
2789	case s_fmt:
2790		SPTOREG(rv.s, MIPSInst_FD(ir));
2791		break;
2792	case w_fmt:
2793		SITOREG(rv.w, MIPSInst_FD(ir));
2794		break;
2795	case l_fmt:
2796		if (!cpu_has_mips_3_4_5_64_r2_r6)
2797			return SIGILL;
2798
2799		DITOREG(rv.l, MIPSInst_FD(ir));
2800		break;
2801	default:
2802		return SIGILL;
2803	}
2804
2805	return 0;
2806}
2807
2808/*
2809 * Emulate FPU instructions.
2810 *
2811 * If we use FPU hardware, then we have been typically called to handle
2812 * an unimplemented operation, such as where an operand is a NaN or
2813 * denormalized.  In that case exit the emulation loop after a single
2814 * iteration so as to let hardware execute any subsequent instructions.
2815 *
2816 * If we have no FPU hardware or it has been disabled, then continue
2817 * emulating floating-point instructions until one of these conditions
2818 * has occurred:
2819 *
2820 * - a non-FPU instruction has been encountered,
2821 *
2822 * - an attempt to emulate has ended with a signal,
2823 *
2824 * - the ISA mode has been switched.
2825 *
2826 * We need to terminate the emulation loop if we got switched to the
2827 * MIPS16 mode, whether supported or not, so that we do not attempt
2828 * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
2829 * Similarly if we got switched to the microMIPS mode and only the
2830 * regular MIPS mode is supported, so that we do not attempt to emulate
2831 * a microMIPS instruction as a regular MIPS FPU instruction.  Or if
2832 * we got switched to the regular MIPS mode and only the microMIPS mode
2833 * is supported, so that we do not attempt to emulate a regular MIPS
2834 * instruction that should cause an Address Error exception instead.
2835 * For simplicity we always terminate upon an ISA mode switch.
2836 */
2837int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
2838	int has_fpu, void __user **fault_addr)
2839{
2840	unsigned long oldepc, prevepc;
2841	struct mm_decoded_insn dec_insn;
2842	u16 instr[4];
2843	u16 *instr_ptr;
2844	int sig = 0;
2845
2846	/*
2847	 * Initialize context if it hasn't been used already, otherwise ensure
2848	 * it has been saved to struct thread_struct.
2849	 */
2850	if (!init_fp_ctx(current))
2851		lose_fpu(1);
2852
2853	oldepc = xcp->cp0_epc;
2854	do {
2855		prevepc = xcp->cp0_epc;
2856
2857		if (get_isa16_mode(prevepc) && cpu_has_mmips) {
2858			/*
2859			 * Get next 2 microMIPS instructions and convert them
2860			 * into 32-bit instructions.
2861			 */
2862			if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) ||
2863			    (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) ||
2864			    (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) ||
2865			    (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) {
2866				MIPS_FPU_EMU_INC_STATS(errors);
2867				return SIGBUS;
2868			}
2869			instr_ptr = instr;
2870
2871			/* Get first instruction. */
2872			if (mm_insn_16bit(*instr_ptr)) {
2873				/* Duplicate the half-word. */
2874				dec_insn.insn = (*instr_ptr << 16) |
2875					(*instr_ptr);
2876				/* 16-bit instruction. */
2877				dec_insn.pc_inc = 2;
2878				instr_ptr += 1;
2879			} else {
2880				dec_insn.insn = (*instr_ptr << 16) |
2881					*(instr_ptr+1);
2882				/* 32-bit instruction. */
2883				dec_insn.pc_inc = 4;
2884				instr_ptr += 2;
2885			}
2886			/* Get second instruction. */
2887			if (mm_insn_16bit(*instr_ptr)) {
2888				/* Duplicate the half-word. */
2889				dec_insn.next_insn = (*instr_ptr << 16) |
2890					(*instr_ptr);
2891				/* 16-bit instruction. */
2892				dec_insn.next_pc_inc = 2;
2893			} else {
2894				dec_insn.next_insn = (*instr_ptr << 16) |
2895					*(instr_ptr+1);
2896				/* 32-bit instruction. */
2897				dec_insn.next_pc_inc = 4;
2898			}
2899			dec_insn.micro_mips_mode = 1;
2900		} else {
2901			if ((get_user(dec_insn.insn,
2902			    (mips_instruction __user *) xcp->cp0_epc)) ||
2903			    (get_user(dec_insn.next_insn,
2904			    (mips_instruction __user *)(xcp->cp0_epc+4)))) {
2905				MIPS_FPU_EMU_INC_STATS(errors);
2906				return SIGBUS;
2907			}
2908			dec_insn.pc_inc = 4;
2909			dec_insn.next_pc_inc = 4;
2910			dec_insn.micro_mips_mode = 0;
2911		}
2912
2913		if ((dec_insn.insn == 0) ||
2914		   ((dec_insn.pc_inc == 2) &&
2915		   ((dec_insn.insn & 0xffff) == MM_NOP16)))
2916			xcp->cp0_epc += dec_insn.pc_inc;	/* Skip NOPs */
2917		else {
2918			/*
2919			 * The 'ieee754_csr' is an alias of ctx->fcr31.
2920			 * No need to copy ctx->fcr31 to ieee754_csr.
2921			 */
2922			sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr);
2923		}
2924
2925		if (has_fpu)
2926			break;
2927		if (sig)
2928			break;
2929		/*
2930		 * We have to check for the ISA bit explicitly here,
2931		 * because `get_isa16_mode' may return 0 if support
2932		 * for code compression has been globally disabled,
2933		 * or otherwise we may produce the wrong signal or
2934		 * even proceed successfully where we must not.
2935		 */
2936		if ((xcp->cp0_epc ^ prevepc) & 0x1)
2937			break;
2938
2939		cond_resched();
2940	} while (xcp->cp0_epc > prevepc);
2941
2942	/* SIGILL indicates a non-fpu instruction */
2943	if (sig == SIGILL && xcp->cp0_epc != oldepc)
2944		/* but if EPC has advanced, then ignore it */
2945		sig = 0;
2946
2947	return sig;
2948}
2949