• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/x86/kernel/
1/*
2 *  Copyright (C) 1994 Linus Torvalds
3 *
4 *  Pentium III FXSR, SSE support
5 *  General FPU state handling cleanups
6 *	Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8#include <linux/module.h>
9#include <linux/regset.h>
10#include <linux/sched.h>
11#include <linux/slab.h>
12
13#include <asm/sigcontext.h>
14#include <asm/processor.h>
15#include <asm/math_emu.h>
16#include <asm/uaccess.h>
17#include <asm/ptrace.h>
18#include <asm/i387.h>
19#include <asm/user.h>
20
21#ifdef CONFIG_X86_64
22# include <asm/sigcontext32.h>
23# include <asm/user32.h>
24#else
25# define save_i387_xstate_ia32		save_i387_xstate
26# define restore_i387_xstate_ia32	restore_i387_xstate
27# define _fpstate_ia32		_fpstate
28# define _xstate_ia32		_xstate
29# define sig_xstate_ia32_size   sig_xstate_size
30# define fx_sw_reserved_ia32	fx_sw_reserved
31# define user_i387_ia32_struct	user_i387_struct
32# define user32_fxsr_struct	user_fxsr_struct
33#endif
34
35#ifdef CONFIG_MATH_EMULATION
36# define HAVE_HWFP		(boot_cpu_data.hard_math)
37#else
38# define HAVE_HWFP		1
39#endif
40
41static unsigned int		mxcsr_feature_mask __read_mostly = 0xffffffffu;
42unsigned int xstate_size;
43EXPORT_SYMBOL_GPL(xstate_size);
44unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32);
45static struct i387_fxsave_struct fx_scratch __cpuinitdata;
46
47void __cpuinit mxcsr_feature_mask_init(void)
48{
49	unsigned long mask = 0;
50
51	clts();
52	if (cpu_has_fxsr) {
53		memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
54		asm volatile("fxsave %0" : : "m" (fx_scratch));
55		mask = fx_scratch.mxcsr_mask;
56		if (mask == 0)
57			mask = 0x0000ffbf;
58	}
59	mxcsr_feature_mask &= mask;
60	stts();
61}
62
63static void __cpuinit init_thread_xstate(void)
64{
65	/*
66	 * Note that xstate_size might be overwriten later during
67	 * xsave_init().
68	 */
69
70	if (!HAVE_HWFP) {
71		xstate_size = sizeof(struct i387_soft_struct);
72		return;
73	}
74
75	if (cpu_has_fxsr)
76		xstate_size = sizeof(struct i387_fxsave_struct);
77#ifdef CONFIG_X86_32
78	else
79		xstate_size = sizeof(struct i387_fsave_struct);
80#endif
81}
82
83#ifdef CONFIG_X86_64
84/*
85 * Called at bootup to set up the initial FPU state that is later cloned
86 * into all processes.
87 */
88
89void __cpuinit fpu_init(void)
90{
91	unsigned long oldcr0 = read_cr0();
92
93	set_in_cr4(X86_CR4_OSFXSR);
94	set_in_cr4(X86_CR4_OSXMMEXCPT);
95
96	write_cr0(oldcr0 & ~(X86_CR0_TS|X86_CR0_EM)); /* clear TS and EM */
97
98	if (!smp_processor_id())
99		init_thread_xstate();
100
101	mxcsr_feature_mask_init();
102	/* clean state in init */
103	current_thread_info()->status = 0;
104	clear_used_math();
105}
106
107#else	/* CONFIG_X86_64 */
108
109void __cpuinit fpu_init(void)
110{
111	if (!smp_processor_id())
112		init_thread_xstate();
113}
114
115#endif	/* CONFIG_X86_32 */
116
117void fpu_finit(struct fpu *fpu)
118{
119#ifdef CONFIG_X86_32
120	if (!HAVE_HWFP) {
121		finit_soft_fpu(&fpu->state->soft);
122		return;
123	}
124#endif
125
126	if (cpu_has_fxsr) {
127		struct i387_fxsave_struct *fx = &fpu->state->fxsave;
128
129		memset(fx, 0, xstate_size);
130		fx->cwd = 0x37f;
131		if (cpu_has_xmm)
132			fx->mxcsr = MXCSR_DEFAULT;
133	} else {
134		struct i387_fsave_struct *fp = &fpu->state->fsave;
135		memset(fp, 0, xstate_size);
136		fp->cwd = 0xffff037fu;
137		fp->swd = 0xffff0000u;
138		fp->twd = 0xffffffffu;
139		fp->fos = 0xffff0000u;
140	}
141}
142EXPORT_SYMBOL_GPL(fpu_finit);
143
144/*
145 * The _current_ task is using the FPU for the first time
146 * so initialize it and set the mxcsr to its default
147 * value at reset if we support XMM instructions and then
148 * remeber the current task has used the FPU.
149 */
150int init_fpu(struct task_struct *tsk)
151{
152	int ret;
153
154	if (tsk_used_math(tsk)) {
155		if (HAVE_HWFP && tsk == current)
156			unlazy_fpu(tsk);
157		return 0;
158	}
159
160	/*
161	 * Memory allocation at the first usage of the FPU and other state.
162	 */
163	ret = fpu_alloc(&tsk->thread.fpu);
164	if (ret)
165		return ret;
166
167	fpu_finit(&tsk->thread.fpu);
168
169	set_stopped_child_used_math(tsk);
170	return 0;
171}
172
173/*
174 * The xstateregs_active() routine is the same as the fpregs_active() routine,
175 * as the "regset->n" for the xstate regset will be updated based on the feature
176 * capabilites supported by the xsave.
177 */
178int fpregs_active(struct task_struct *target, const struct user_regset *regset)
179{
180	return tsk_used_math(target) ? regset->n : 0;
181}
182
183int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
184{
185	return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
186}
187
188int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
189		unsigned int pos, unsigned int count,
190		void *kbuf, void __user *ubuf)
191{
192	int ret;
193
194	if (!cpu_has_fxsr)
195		return -ENODEV;
196
197	ret = init_fpu(target);
198	if (ret)
199		return ret;
200
201	sanitize_i387_state(target);
202
203	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
204				   &target->thread.fpu.state->fxsave, 0, -1);
205}
206
207int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
208		unsigned int pos, unsigned int count,
209		const void *kbuf, const void __user *ubuf)
210{
211	int ret;
212
213	if (!cpu_has_fxsr)
214		return -ENODEV;
215
216	ret = init_fpu(target);
217	if (ret)
218		return ret;
219
220	sanitize_i387_state(target);
221
222	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
223				 &target->thread.fpu.state->fxsave, 0, -1);
224
225	/*
226	 * mxcsr reserved bits must be masked to zero for security reasons.
227	 */
228	target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
229
230	/*
231	 * update the header bits in the xsave header, indicating the
232	 * presence of FP and SSE state.
233	 */
234	if (cpu_has_xsave)
235		target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
236
237	return ret;
238}
239
240int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
241		unsigned int pos, unsigned int count,
242		void *kbuf, void __user *ubuf)
243{
244	int ret;
245
246	if (!cpu_has_xsave)
247		return -ENODEV;
248
249	ret = init_fpu(target);
250	if (ret)
251		return ret;
252
253	/*
254	 * Copy the 48bytes defined by the software first into the xstate
255	 * memory layout in the thread struct, so that we can copy the entire
256	 * xstateregs to the user using one user_regset_copyout().
257	 */
258	memcpy(&target->thread.fpu.state->fxsave.sw_reserved,
259	       xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
260
261	/*
262	 * Copy the xstate memory layout.
263	 */
264	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
265				  &target->thread.fpu.state->xsave, 0, -1);
266	return ret;
267}
268
269int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
270		  unsigned int pos, unsigned int count,
271		  const void *kbuf, const void __user *ubuf)
272{
273	int ret;
274	struct xsave_hdr_struct *xsave_hdr;
275
276	if (!cpu_has_xsave)
277		return -ENODEV;
278
279	ret = init_fpu(target);
280	if (ret)
281		return ret;
282
283	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
284				 &target->thread.fpu.state->xsave, 0, -1);
285
286	/*
287	 * mxcsr reserved bits must be masked to zero for security reasons.
288	 */
289	target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
290
291	xsave_hdr = &target->thread.fpu.state->xsave.xsave_hdr;
292
293	xsave_hdr->xstate_bv &= pcntxt_mask;
294	/*
295	 * These bits must be zero.
296	 */
297	xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
298
299	return ret;
300}
301
302#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
303
304/*
305 * FPU tag word conversions.
306 */
307
308static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
309{
310	unsigned int tmp; /* to avoid 16 bit prefixes in the code */
311
312	/* Transform each pair of bits into 01 (valid) or 00 (empty) */
313	tmp = ~twd;
314	tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
315	/* and move the valid bits to the lower byte. */
316	tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
317	tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
318	tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
319
320	return tmp;
321}
322
323#define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16);
324#define FP_EXP_TAG_VALID	0
325#define FP_EXP_TAG_ZERO		1
326#define FP_EXP_TAG_SPECIAL	2
327#define FP_EXP_TAG_EMPTY	3
328
329static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
330{
331	struct _fpxreg *st;
332	u32 tos = (fxsave->swd >> 11) & 7;
333	u32 twd = (unsigned long) fxsave->twd;
334	u32 tag;
335	u32 ret = 0xffff0000u;
336	int i;
337
338	for (i = 0; i < 8; i++, twd >>= 1) {
339		if (twd & 0x1) {
340			st = FPREG_ADDR(fxsave, (i - tos) & 7);
341
342			switch (st->exponent & 0x7fff) {
343			case 0x7fff:
344				tag = FP_EXP_TAG_SPECIAL;
345				break;
346			case 0x0000:
347				if (!st->significand[0] &&
348				    !st->significand[1] &&
349				    !st->significand[2] &&
350				    !st->significand[3])
351					tag = FP_EXP_TAG_ZERO;
352				else
353					tag = FP_EXP_TAG_SPECIAL;
354				break;
355			default:
356				if (st->significand[3] & 0x8000)
357					tag = FP_EXP_TAG_VALID;
358				else
359					tag = FP_EXP_TAG_SPECIAL;
360				break;
361			}
362		} else {
363			tag = FP_EXP_TAG_EMPTY;
364		}
365		ret |= tag << (2 * i);
366	}
367	return ret;
368}
369
370/*
371 * FXSR floating point environment conversions.
372 */
373
374static void
375convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
376{
377	struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
378	struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
379	struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
380	int i;
381
382	env->cwd = fxsave->cwd | 0xffff0000u;
383	env->swd = fxsave->swd | 0xffff0000u;
384	env->twd = twd_fxsr_to_i387(fxsave);
385
386#ifdef CONFIG_X86_64
387	env->fip = fxsave->rip;
388	env->foo = fxsave->rdp;
389	if (tsk == current) {
390		/*
391		 * should be actually ds/cs at fpu exception time, but
392		 * that information is not available in 64bit mode.
393		 */
394		asm("mov %%ds, %[fos]" : [fos] "=r" (env->fos));
395		asm("mov %%cs, %[fcs]" : [fcs] "=r" (env->fcs));
396	} else {
397		struct pt_regs *regs = task_pt_regs(tsk);
398
399		env->fos = 0xffff0000 | tsk->thread.ds;
400		env->fcs = regs->cs;
401	}
402#else
403	env->fip = fxsave->fip;
404	env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
405	env->foo = fxsave->foo;
406	env->fos = fxsave->fos;
407#endif
408
409	for (i = 0; i < 8; ++i)
410		memcpy(&to[i], &from[i], sizeof(to[0]));
411}
412
413static void convert_to_fxsr(struct task_struct *tsk,
414			    const struct user_i387_ia32_struct *env)
415
416{
417	struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
418	struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
419	struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
420	int i;
421
422	fxsave->cwd = env->cwd;
423	fxsave->swd = env->swd;
424	fxsave->twd = twd_i387_to_fxsr(env->twd);
425	fxsave->fop = (u16) ((u32) env->fcs >> 16);
426#ifdef CONFIG_X86_64
427	fxsave->rip = env->fip;
428	fxsave->rdp = env->foo;
429	/* cs and ds ignored */
430#else
431	fxsave->fip = env->fip;
432	fxsave->fcs = (env->fcs & 0xffff);
433	fxsave->foo = env->foo;
434	fxsave->fos = env->fos;
435#endif
436
437	for (i = 0; i < 8; ++i)
438		memcpy(&to[i], &from[i], sizeof(from[0]));
439}
440
441int fpregs_get(struct task_struct *target, const struct user_regset *regset,
442	       unsigned int pos, unsigned int count,
443	       void *kbuf, void __user *ubuf)
444{
445	struct user_i387_ia32_struct env;
446	int ret;
447
448	ret = init_fpu(target);
449	if (ret)
450		return ret;
451
452	if (!HAVE_HWFP)
453		return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
454
455	if (!cpu_has_fxsr) {
456		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
457					   &target->thread.fpu.state->fsave, 0,
458					   -1);
459	}
460
461	sanitize_i387_state(target);
462
463	if (kbuf && pos == 0 && count == sizeof(env)) {
464		convert_from_fxsr(kbuf, target);
465		return 0;
466	}
467
468	convert_from_fxsr(&env, target);
469
470	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
471}
472
473int fpregs_set(struct task_struct *target, const struct user_regset *regset,
474	       unsigned int pos, unsigned int count,
475	       const void *kbuf, const void __user *ubuf)
476{
477	struct user_i387_ia32_struct env;
478	int ret;
479
480	ret = init_fpu(target);
481	if (ret)
482		return ret;
483
484	sanitize_i387_state(target);
485
486	if (!HAVE_HWFP)
487		return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
488
489	if (!cpu_has_fxsr) {
490		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
491					  &target->thread.fpu.state->fsave, 0, -1);
492	}
493
494	if (pos > 0 || count < sizeof(env))
495		convert_from_fxsr(&env, target);
496
497	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
498	if (!ret)
499		convert_to_fxsr(target, &env);
500
501	/*
502	 * update the header bit in the xsave header, indicating the
503	 * presence of FP.
504	 */
505	if (cpu_has_xsave)
506		target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
507	return ret;
508}
509
510/*
511 * Signal frame handlers.
512 */
513
514static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf)
515{
516	struct task_struct *tsk = current;
517	struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave;
518
519	fp->status = fp->swd;
520	if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct)))
521		return -1;
522	return 1;
523}
524
525static int save_i387_fxsave(struct _fpstate_ia32 __user *buf)
526{
527	struct task_struct *tsk = current;
528	struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
529	struct user_i387_ia32_struct env;
530	int err = 0;
531
532	convert_from_fxsr(&env, tsk);
533	if (__copy_to_user(buf, &env, sizeof(env)))
534		return -1;
535
536	err |= __put_user(fx->swd, &buf->status);
537	err |= __put_user(X86_FXSR_MAGIC, &buf->magic);
538	if (err)
539		return -1;
540
541	if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size))
542		return -1;
543	return 1;
544}
545
546static int save_i387_xsave(void __user *buf)
547{
548	struct task_struct *tsk = current;
549	struct _fpstate_ia32 __user *fx = buf;
550	int err = 0;
551
552
553	sanitize_i387_state(tsk);
554
555	/*
556	 * For legacy compatible, we always set FP/SSE bits in the bit
557	 * vector while saving the state to the user context.
558	 * This will enable us capturing any changes(during sigreturn) to
559	 * the FP/SSE bits by the legacy applications which don't touch
560	 * xstate_bv in the xsave header.
561	 *
562	 * xsave aware applications can change the xstate_bv in the xsave
563	 * header as well as change any contents in the memory layout.
564	 * xrestore as part of sigreturn will capture all the changes.
565	 */
566	tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
567
568	if (save_i387_fxsave(fx) < 0)
569		return -1;
570
571	err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32,
572			     sizeof(struct _fpx_sw_bytes));
573	err |= __put_user(FP_XSTATE_MAGIC2,
574			  (__u32 __user *) (buf + sig_xstate_ia32_size
575					    - FP_XSTATE_MAGIC2_SIZE));
576	if (err)
577		return -1;
578
579	return 1;
580}
581
582int save_i387_xstate_ia32(void __user *buf)
583{
584	struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
585	struct task_struct *tsk = current;
586
587	if (!used_math())
588		return 0;
589
590	if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size))
591		return -EACCES;
592	/*
593	 * This will cause a "finit" to be triggered by the next
594	 * attempted FPU operation by the 'current' process.
595	 */
596	clear_used_math();
597
598	if (!HAVE_HWFP) {
599		return fpregs_soft_get(current, NULL,
600				       0, sizeof(struct user_i387_ia32_struct),
601				       NULL, fp) ? -1 : 1;
602	}
603
604	unlazy_fpu(tsk);
605
606	if (cpu_has_xsave)
607		return save_i387_xsave(fp);
608	if (cpu_has_fxsr)
609		return save_i387_fxsave(fp);
610	else
611		return save_i387_fsave(fp);
612}
613
614static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
615{
616	struct task_struct *tsk = current;
617
618	return __copy_from_user(&tsk->thread.fpu.state->fsave, buf,
619				sizeof(struct i387_fsave_struct));
620}
621
622static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
623			       unsigned int size)
624{
625	struct task_struct *tsk = current;
626	struct user_i387_ia32_struct env;
627	int err;
628
629	err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0],
630			       size);
631	/* mxcsr reserved bits must be masked to zero for security reasons */
632	tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
633	if (err || __copy_from_user(&env, buf, sizeof(env)))
634		return 1;
635	convert_to_fxsr(tsk, &env);
636
637	return 0;
638}
639
640static int restore_i387_xsave(void __user *buf)
641{
642	struct _fpx_sw_bytes fx_sw_user;
643	struct _fpstate_ia32 __user *fx_user =
644			((struct _fpstate_ia32 __user *) buf);
645	struct i387_fxsave_struct __user *fx =
646		(struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0];
647	struct xsave_hdr_struct *xsave_hdr =
648				&current->thread.fpu.state->xsave.xsave_hdr;
649	u64 mask;
650	int err;
651
652	if (check_for_xstate(fx, buf, &fx_sw_user))
653		goto fx_only;
654
655	mask = fx_sw_user.xstate_bv;
656
657	err = restore_i387_fxsave(buf, fx_sw_user.xstate_size);
658
659	xsave_hdr->xstate_bv &= pcntxt_mask;
660	/*
661	 * These bits must be zero.
662	 */
663	xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0;
664
665	/*
666	 * Init the state that is not present in the memory layout
667	 * and enabled by the OS.
668	 */
669	mask = ~(pcntxt_mask & ~mask);
670	xsave_hdr->xstate_bv &= mask;
671
672	return err;
673fx_only:
674	/*
675	 * Couldn't find the extended state information in the memory
676	 * layout. Restore the FP/SSE and init the other extended state
677	 * enabled by the OS.
678	 */
679	xsave_hdr->xstate_bv = XSTATE_FPSSE;
680	return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct));
681}
682
683int restore_i387_xstate_ia32(void __user *buf)
684{
685	int err;
686	struct task_struct *tsk = current;
687	struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf;
688
689	if (HAVE_HWFP)
690		clear_fpu(tsk);
691
692	if (!buf) {
693		if (used_math()) {
694			clear_fpu(tsk);
695			clear_used_math();
696		}
697
698		return 0;
699	} else
700		if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size))
701			return -EACCES;
702
703	if (!used_math()) {
704		err = init_fpu(tsk);
705		if (err)
706			return err;
707	}
708
709	if (HAVE_HWFP) {
710		if (cpu_has_xsave)
711			err = restore_i387_xsave(buf);
712		else if (cpu_has_fxsr)
713			err = restore_i387_fxsave(fp, sizeof(struct
714							   i387_fxsave_struct));
715		else
716			err = restore_i387_fsave(fp);
717	} else {
718		err = fpregs_soft_set(current, NULL,
719				      0, sizeof(struct user_i387_ia32_struct),
720				      NULL, fp) != 0;
721	}
722	set_used_math();
723
724	return err;
725}
726
727/*
728 * FPU state for core dumps.
729 * This is only used for a.out dumps now.
730 * It is declared generically using elf_fpregset_t (which is
731 * struct user_i387_struct) but is in fact only used for 32-bit
732 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
733 */
734int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
735{
736	struct task_struct *tsk = current;
737	int fpvalid;
738
739	fpvalid = !!used_math();
740	if (fpvalid)
741		fpvalid = !fpregs_get(tsk, NULL,
742				      0, sizeof(struct user_i387_ia32_struct),
743				      fpu, NULL);
744
745	return fpvalid;
746}
747EXPORT_SYMBOL(dump_fpu);
748
749#endif	/* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
750