• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/tile/include/asm/
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#ifndef _ASM_TILE_UACCESS_H
16#define _ASM_TILE_UACCESS_H
17
18/*
19 * User space memory access functions
20 */
21#include <linux/sched.h>
22#include <linux/mm.h>
23#include <asm-generic/uaccess-unaligned.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26
27#define VERIFY_READ	0
28#define VERIFY_WRITE	1
29
30/*
31 * The fs value determines whether argument validity checking should be
32 * performed or not.  If get_fs() == USER_DS, checking is performed, with
33 * get_fs() == KERNEL_DS, checking is bypassed.
34 *
35 * For historical reasons, these macros are grossly misnamed.
36 */
37#define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
38
39#define KERNEL_DS	MAKE_MM_SEG(-1UL)
40#define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
41
42#define get_ds()	(KERNEL_DS)
43#define get_fs()	(current_thread_info()->addr_limit)
44#define set_fs(x)	(current_thread_info()->addr_limit = (x))
45
46#define segment_eq(a, b) ((a).seg == (b).seg)
47
48#ifndef __tilegx__
49/*
50 * We could allow mapping all 16 MB at 0xfc000000, but we set up a
51 * special hack in arch_setup_additional_pages() to auto-create a mapping
52 * for the first 16 KB, and it would seem strange to have different
53 * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
54 */
55static inline int is_arch_mappable_range(unsigned long addr,
56					 unsigned long size)
57{
58	return (addr >= MEM_USER_INTRPT &&
59		addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
60		size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
61}
62#define is_arch_mappable_range is_arch_mappable_range
63#else
64#define is_arch_mappable_range(addr, size) 0
65#endif
66
67/*
68 * Test whether a block of memory is a valid user space address.
69 * Returns 0 if the range is valid, nonzero otherwise.
70 */
71int __range_ok(unsigned long addr, unsigned long size);
72
73/**
74 * access_ok: - Checks if a user space pointer is valid
75 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
76 *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
77 *        to write to a block, it is always safe to read from it.
78 * @addr: User space pointer to start of block to check
79 * @size: Size of block to check
80 *
81 * Context: User context only.  This function may sleep.
82 *
83 * Checks if a pointer to a block of memory in user space is valid.
84 *
85 * Returns true (nonzero) if the memory block may be valid, false (zero)
86 * if it is definitely invalid.
87 *
88 * Note that, depending on architecture, this function probably just
89 * checks that the pointer is in the user space range - after calling
90 * this function, memory access functions may still return -EFAULT.
91 */
92#define access_ok(type, addr, size) ({ \
93	__chk_user_ptr(addr); \
94	likely(__range_ok((unsigned long)(addr), (size)) == 0);	\
95})
96
97/*
98 * The exception table consists of pairs of addresses: the first is the
99 * address of an instruction that is allowed to fault, and the second is
100 * the address at which the program should continue.  No registers are
101 * modified, so it is entirely up to the continuation code to figure out
102 * what to do.
103 *
104 * All the routines below use bits of fixup code that are out of line
105 * with the main instruction path.  This means when everything is well,
106 * we don't even have to jump over them.  Further, they do not intrude
107 * on our cache or tlb entries.
108 */
109
110struct exception_table_entry {
111	unsigned long insn, fixup;
112};
113
114extern int fixup_exception(struct pt_regs *regs);
115
116/*
117 * We return the __get_user_N function results in a structure,
118 * thus in r0 and r1.  If "err" is zero, "val" is the result
119 * of the read; otherwise, "err" is -EFAULT.
120 *
121 * We rarely need 8-byte values on a 32-bit architecture, but
122 * we size the structure to accommodate.  In practice, for the
123 * the smaller reads, we can zero the high word for free, and
124 * the caller will ignore it by virtue of casting anyway.
125 */
126struct __get_user {
127	unsigned long long val;
128	int err;
129};
130
131extern struct __get_user __get_user_1(const void __user *);
132extern struct __get_user __get_user_2(const void __user *);
133extern struct __get_user __get_user_4(const void __user *);
134extern struct __get_user __get_user_8(const void __user *);
135extern int __put_user_1(long, void __user *);
136extern int __put_user_2(long, void __user *);
137extern int __put_user_4(long, void __user *);
138extern int __put_user_8(long long, void __user *);
139
140/* Unimplemented routines to cause linker failures */
141extern struct __get_user __get_user_bad(void);
142extern int __put_user_bad(void);
143
144/*
145 * Careful: we have to cast the result to the type of the pointer
146 * for sign reasons.
147 */
148/**
149 * __get_user: - Get a simple variable from user space, with less checking.
150 * @x:   Variable to store result.
151 * @ptr: Source address, in user space.
152 *
153 * Context: User context only.  This function may sleep.
154 *
155 * This macro copies a single simple variable from user space to kernel
156 * space.  It supports simple types like char and int, but not larger
157 * data types like structures or arrays.
158 *
159 * @ptr must have pointer-to-simple-variable type, and the result of
160 * dereferencing @ptr must be assignable to @x without a cast.
161 *
162 * Returns zero on success, or -EFAULT on error.
163 * On error, the variable @x is set to zero.
164 *
165 * Caller must check the pointer with access_ok() before calling this
166 * function.
167 */
168#define __get_user(x, ptr)						\
169({	struct __get_user __ret;					\
170	__typeof__(*(ptr)) const __user *__gu_addr = (ptr);		\
171	__chk_user_ptr(__gu_addr);					\
172	switch (sizeof(*(__gu_addr))) {					\
173	case 1:								\
174		__ret = __get_user_1(__gu_addr);			\
175		break;							\
176	case 2:								\
177		__ret = __get_user_2(__gu_addr);			\
178		break;							\
179	case 4:								\
180		__ret = __get_user_4(__gu_addr);			\
181		break;							\
182	case 8:								\
183		__ret = __get_user_8(__gu_addr);			\
184		break;							\
185	default:							\
186		__ret = __get_user_bad();				\
187		break;							\
188	}								\
189	(x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \
190	  __ret.val;			                                \
191	__ret.err;							\
192})
193
194/**
195 * __put_user: - Write a simple value into user space, with less checking.
196 * @x:   Value to copy to user space.
197 * @ptr: Destination address, in user space.
198 *
199 * Context: User context only.  This function may sleep.
200 *
201 * This macro copies a single simple value from kernel space to user
202 * space.  It supports simple types like char and int, but not larger
203 * data types like structures or arrays.
204 *
205 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
206 * to the result of dereferencing @ptr.
207 *
208 * Caller must check the pointer with access_ok() before calling this
209 * function.
210 *
211 * Returns zero on success, or -EFAULT on error.
212 *
213 * Implementation note: The "case 8" logic of casting to the type of
214 * the result of subtracting the value from itself is basically a way
215 * of keeping all integer types the same, but casting any pointers to
216 * ptrdiff_t, i.e. also an integer type.  This way there are no
217 * questionable casts seen by the compiler on an ILP32 platform.
218 */
219#define __put_user(x, ptr)						\
220({									\
221	int __pu_err = 0;						\
222	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
223	typeof(*__pu_addr) __pu_val = (x);				\
224	__chk_user_ptr(__pu_addr);					\
225	switch (sizeof(__pu_val)) {					\
226	case 1:								\
227		__pu_err = __put_user_1((long)__pu_val, __pu_addr);	\
228		break;							\
229	case 2:								\
230		__pu_err = __put_user_2((long)__pu_val, __pu_addr);	\
231		break;							\
232	case 4:								\
233		__pu_err = __put_user_4((long)__pu_val, __pu_addr);	\
234		break;							\
235	case 8:								\
236		__pu_err =						\
237		  __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
238			__pu_addr);					\
239		break;							\
240	default:							\
241		__pu_err = __put_user_bad();				\
242		break;							\
243	}								\
244	__pu_err;							\
245})
246
247/*
248 * The versions of get_user and put_user without initial underscores
249 * check the address of their arguments to make sure they are not
250 * in kernel space.
251 */
252#define put_user(x, ptr)						\
253({									\
254	__typeof__(*(ptr)) __user *__Pu_addr = (ptr);			\
255	access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ?	\
256		__put_user((x), (__Pu_addr)) :				\
257		-EFAULT;						\
258})
259
260#define get_user(x, ptr)						\
261({									\
262	__typeof__(*(ptr)) const __user *__Gu_addr = (ptr);		\
263	access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ?	\
264		__get_user((x), (__Gu_addr)) :				\
265		((x) = 0, -EFAULT);					\
266})
267
268/**
269 * __copy_to_user() - copy data into user space, with less checking.
270 * @to:   Destination address, in user space.
271 * @from: Source address, in kernel space.
272 * @n:    Number of bytes to copy.
273 *
274 * Context: User context only.  This function may sleep.
275 *
276 * Copy data from kernel space to user space.  Caller must check
277 * the specified block with access_ok() before calling this function.
278 *
279 * Returns number of bytes that could not be copied.
280 * On success, this will be zero.
281 *
282 * An alternate version - __copy_to_user_inatomic() - is designed
283 * to be called from atomic context, typically bracketed by calls
284 * to pagefault_disable() and pagefault_enable().
285 */
286extern unsigned long __must_check __copy_to_user_inatomic(
287	void __user *to, const void *from, unsigned long n);
288
289static inline unsigned long __must_check
290__copy_to_user(void __user *to, const void *from, unsigned long n)
291{
292	might_fault();
293	return __copy_to_user_inatomic(to, from, n);
294}
295
296static inline unsigned long __must_check
297copy_to_user(void __user *to, const void *from, unsigned long n)
298{
299	if (access_ok(VERIFY_WRITE, to, n))
300		n = __copy_to_user(to, from, n);
301	return n;
302}
303
304/**
305 * __copy_from_user() - copy data from user space, with less checking.
306 * @to:   Destination address, in kernel space.
307 * @from: Source address, in user space.
308 * @n:    Number of bytes to copy.
309 *
310 * Context: User context only.  This function may sleep.
311 *
312 * Copy data from user space to kernel space.  Caller must check
313 * the specified block with access_ok() before calling this function.
314 *
315 * Returns number of bytes that could not be copied.
316 * On success, this will be zero.
317 *
318 * If some data could not be copied, this function will pad the copied
319 * data to the requested size using zero bytes.
320 *
321 * An alternate version - __copy_from_user_inatomic() - is designed
322 * to be called from atomic context, typically bracketed by calls
323 * to pagefault_disable() and pagefault_enable().  This version
324 * does *NOT* pad with zeros.
325 */
326extern unsigned long __must_check __copy_from_user_inatomic(
327	void *to, const void __user *from, unsigned long n);
328extern unsigned long __must_check __copy_from_user_zeroing(
329	void *to, const void __user *from, unsigned long n);
330
331static inline unsigned long __must_check
332__copy_from_user(void *to, const void __user *from, unsigned long n)
333{
334       might_fault();
335       return __copy_from_user_zeroing(to, from, n);
336}
337
338static inline unsigned long __must_check
339_copy_from_user(void *to, const void __user *from, unsigned long n)
340{
341	if (access_ok(VERIFY_READ, from, n))
342		n = __copy_from_user(to, from, n);
343	else
344		memset(to, 0, n);
345	return n;
346}
347
348#ifdef CONFIG_DEBUG_COPY_FROM_USER
349extern void copy_from_user_overflow(void)
350	__compiletime_warning("copy_from_user() size is not provably correct");
351
352static inline unsigned long __must_check copy_from_user(void *to,
353					  const void __user *from,
354					  unsigned long n)
355{
356	int sz = __compiletime_object_size(to);
357
358	if (likely(sz == -1 || sz >= n))
359		n = _copy_from_user(to, from, n);
360	else
361		copy_from_user_overflow();
362
363	return n;
364}
365#else
366#define copy_from_user _copy_from_user
367#endif
368
369#ifdef __tilegx__
370/**
371 * __copy_in_user() - copy data within user space, with less checking.
372 * @to:   Destination address, in user space.
373 * @from: Source address, in kernel space.
374 * @n:    Number of bytes to copy.
375 *
376 * Context: User context only.  This function may sleep.
377 *
378 * Copy data from user space to user space.  Caller must check
379 * the specified blocks with access_ok() before calling this function.
380 *
381 * Returns number of bytes that could not be copied.
382 * On success, this will be zero.
383 */
384extern unsigned long __copy_in_user_inatomic(
385	void __user *to, const void __user *from, unsigned long n);
386
387static inline unsigned long __must_check
388__copy_in_user(void __user *to, const void __user *from, unsigned long n)
389{
390	might_sleep();
391	return __copy_in_user_inatomic(to, from, n);
392}
393
394static inline unsigned long __must_check
395copy_in_user(void __user *to, const void __user *from, unsigned long n)
396{
397	if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
398		n = __copy_in_user(to, from, n);
399	return n;
400}
401#endif
402
403
404/**
405 * strlen_user: - Get the size of a string in user space.
406 * @str: The string to measure.
407 *
408 * Context: User context only.  This function may sleep.
409 *
410 * Get the size of a NUL-terminated string in user space.
411 *
412 * Returns the size of the string INCLUDING the terminating NUL.
413 * On exception, returns 0.
414 *
415 * If there is a limit on the length of a valid string, you may wish to
416 * consider using strnlen_user() instead.
417 */
418extern long strnlen_user_asm(const char __user *str, long n);
419static inline long __must_check strnlen_user(const char __user *str, long n)
420{
421	might_fault();
422	return strnlen_user_asm(str, n);
423}
424#define strlen_user(str) strnlen_user(str, LONG_MAX)
425
426/**
427 * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
428 * @dst:   Destination address, in kernel space.  This buffer must be at
429 *         least @count bytes long.
430 * @src:   Source address, in user space.
431 * @count: Maximum number of bytes to copy, including the trailing NUL.
432 *
433 * Copies a NUL-terminated string from userspace to kernel space.
434 * Caller must check the specified block with access_ok() before calling
435 * this function.
436 *
437 * On success, returns the length of the string (not including the trailing
438 * NUL).
439 *
440 * If access to userspace fails, returns -EFAULT (some data may have been
441 * copied).
442 *
443 * If @count is smaller than the length of the string, copies @count bytes
444 * and returns @count.
445 */
446extern long strncpy_from_user_asm(char *dst, const char __user *src, long);
447static inline long __must_check __strncpy_from_user(
448	char *dst, const char __user *src, long count)
449{
450	might_fault();
451	return strncpy_from_user_asm(dst, src, count);
452}
453static inline long __must_check strncpy_from_user(
454	char *dst, const char __user *src, long count)
455{
456	if (access_ok(VERIFY_READ, src, 1))
457		return __strncpy_from_user(dst, src, count);
458	return -EFAULT;
459}
460
461/**
462 * clear_user: - Zero a block of memory in user space.
463 * @mem:   Destination address, in user space.
464 * @len:   Number of bytes to zero.
465 *
466 * Zero a block of memory in user space.
467 *
468 * Returns number of bytes that could not be cleared.
469 * On success, this will be zero.
470 */
471extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
472static inline unsigned long __must_check __clear_user(
473	void __user *mem, unsigned long len)
474{
475	might_fault();
476	return clear_user_asm(mem, len);
477}
478static inline unsigned long __must_check clear_user(
479	void __user *mem, unsigned long len)
480{
481	if (access_ok(VERIFY_WRITE, mem, len))
482		return __clear_user(mem, len);
483	return len;
484}
485
486/**
487 * flush_user: - Flush a block of memory in user space from cache.
488 * @mem:   Destination address, in user space.
489 * @len:   Number of bytes to flush.
490 *
491 * Returns number of bytes that could not be flushed.
492 * On success, this will be zero.
493 */
494extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
495static inline unsigned long __must_check __flush_user(
496	void __user *mem, unsigned long len)
497{
498	int retval;
499
500	might_fault();
501	retval = flush_user_asm(mem, len);
502	mb_incoherent();
503	return retval;
504}
505
506static inline unsigned long __must_check flush_user(
507	void __user *mem, unsigned long len)
508{
509	if (access_ok(VERIFY_WRITE, mem, len))
510		return __flush_user(mem, len);
511	return len;
512}
513
514/**
515 * inv_user: - Invalidate a block of memory in user space from cache.
516 * @mem:   Destination address, in user space.
517 * @len:   Number of bytes to invalidate.
518 *
519 * Returns number of bytes that could not be invalidated.
520 * On success, this will be zero.
521 *
522 * Note that on Tile64, the "inv" operation is in fact a
523 * "flush and invalidate", so cache write-backs will occur prior
524 * to the cache being marked invalid.
525 */
526extern unsigned long inv_user_asm(void __user *mem, unsigned long len);
527static inline unsigned long __must_check __inv_user(
528	void __user *mem, unsigned long len)
529{
530	int retval;
531
532	might_fault();
533	retval = inv_user_asm(mem, len);
534	mb_incoherent();
535	return retval;
536}
537static inline unsigned long __must_check inv_user(
538	void __user *mem, unsigned long len)
539{
540	if (access_ok(VERIFY_WRITE, mem, len))
541		return __inv_user(mem, len);
542	return len;
543}
544
545/**
546 * finv_user: - Flush-inval a block of memory in user space from cache.
547 * @mem:   Destination address, in user space.
548 * @len:   Number of bytes to invalidate.
549 *
550 * Returns number of bytes that could not be flush-invalidated.
551 * On success, this will be zero.
552 */
553extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
554static inline unsigned long __must_check __finv_user(
555	void __user *mem, unsigned long len)
556{
557	int retval;
558
559	might_fault();
560	retval = finv_user_asm(mem, len);
561	mb_incoherent();
562	return retval;
563}
564static inline unsigned long __must_check finv_user(
565	void __user *mem, unsigned long len)
566{
567	if (access_ok(VERIFY_WRITE, mem, len))
568		return __finv_user(mem, len);
569	return len;
570}
571
572#endif /* _ASM_TILE_UACCESS_H */
573