1#define __SYSCALL_LL_E(x) (x)
2#define __SYSCALL_LL_O(x) (x)
3
4#define __scc(X) sizeof(1?(X):0ULL) < 8 ? (unsigned long) (X) : (long long) (X)
5typedef long long syscall_arg_t;
6struct __timespec { long long tv_sec; long tv_nsec; };
7struct __timespec_kernel { long long tv_sec; long long tv_nsec; };
8#define __tsc(X) ((struct __timespec*)(unsigned long)(X))
9#define __fixup(X) do { if(X) { \
10	ts->tv_sec = __tsc(X)->tv_sec; \
11	ts->tv_nsec = __tsc(X)->tv_nsec; \
12	(X) = (unsigned long)ts; } } while(0)
13#define __fixup_case_2 \
14	case SYS_nanosleep: \
15		__fixup(a1); break; \
16	case SYS_clock_settime: \
17		__fixup(a2); break;
18#define __fixup_case_3 \
19	case SYS_clock_nanosleep: case SYS_rt_sigtimedwait: case SYS_ppoll: \
20		__fixup(a3); break; \
21	case SYS_utimensat: \
22		if(a3) { \
23			ts[0].tv_sec = __tsc(a3)[0].tv_sec; \
24			ts[0].tv_nsec = __tsc(a3)[0].tv_nsec; \
25			ts[1].tv_sec = __tsc(a3)[1].tv_sec; \
26			ts[1].tv_nsec = __tsc(a3)[1].tv_nsec; \
27			a3 = (unsigned long)ts; \
28		} break;
29#define __fixup_case_4 \
30	case SYS_futex: \
31		if((a2 & (~128 /* FUTEX_PRIVATE_FLAG */)) == 0 /* FUTEX_WAIT */) __fixup(a4); break;
32#define __fixup_case_5 \
33	case SYS_mq_timedsend: case SYS_mq_timedreceive: case SYS_pselect6: \
34		__fixup(a5); break;
35
36static __inline long __syscall0(long long n)
37{
38	unsigned long ret;
39	__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n) : "rcx", "r11", "memory");
40	return ret;
41}
42
43static __inline long __syscall1(long long n, long long a1)
44{
45	unsigned long ret;
46	__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1) : "rcx", "r11", "memory");
47	return ret;
48}
49
50static __inline long __syscall2(long long n, long long a1, long long a2)
51{
52	unsigned long ret;
53	struct __timespec_kernel ts[1];
54	switch (n) {
55		__fixup_case_2;
56	}
57	__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2)
58					: "rcx", "r11", "memory");
59	return ret;
60}
61
62static __inline long __syscall3(long long n, long long a1, long long a2, long long a3)
63{
64	unsigned long ret;
65	struct __timespec_kernel ts[2];
66	switch (n) {
67		__fixup_case_2;
68		__fixup_case_3;
69	}
70	__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2),
71						  "d"(a3) : "rcx", "r11", "memory");
72	return ret;
73}
74
75static __inline long __syscall4(long long n, long long a1, long long a2, long long a3,
76                                     long long a4_)
77{
78	unsigned long ret;
79	register long long a4 __asm__("r10") = a4_;
80	struct __timespec_kernel ts[2];
81	switch (n) {
82		__fixup_case_2;
83		__fixup_case_3;
84		__fixup_case_4;
85	}
86	__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2),
87					  "d"(a3), "r"(a4): "rcx", "r11", "memory");
88	return ret;
89}
90
91static __inline long __syscall5(long long n, long long a1, long long a2, long long a3,
92                                     long long a4_, long long a5_)
93{
94	unsigned long ret;
95	register long long a4 __asm__("r10") = a4_;
96	register long long a5 __asm__("r8") = a5_;
97	struct __timespec_kernel ts[2];
98	switch (n) {
99		__fixup_case_2;
100		__fixup_case_3;
101		__fixup_case_4;
102		__fixup_case_5;
103	}
104	__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2),
105					  "d"(a3), "r"(a4), "r"(a5) : "rcx", "r11", "memory");
106	return ret;
107}
108
109static __inline long __syscall6(long long n, long long a1, long long a2, long long a3,
110                                     long long a4_, long long a5_, long long a6_)
111{
112	unsigned long ret;
113	register long long a4 __asm__("r10") = a4_;
114	register long long a5 __asm__("r8") = a5_;
115	register long long a6 __asm__("r9") = a6_;
116	struct __timespec_kernel ts[2];
117	switch (n) {
118		__fixup_case_2;
119		__fixup_case_3;
120		__fixup_case_4;
121		__fixup_case_5;
122	}
123	__asm__ __volatile__ ("syscall" : "=a"(ret) : "a"(n), "D"(a1), "S"(a2),
124					  "d"(a3), "r"(a4), "r"(a5), "r"(a6) : "rcx", "r11", "memory");
125	return ret;
126}
127