1/* Copyright (C) 2021 Free Software Foundation, Inc.
2   Contributed by Oracle.
3
4   This file is part of GNU Binutils.
5
6   This program is free software; you can redistribute it and/or modify
7   it under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 3, or (at your option)
9   any later version.
10
11   This program is distributed in the hope that it will be useful,
12   but WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   GNU General Public License for more details.
15
16   You should have received a copy of the GNU General Public License
17   along with this program; if not, write to the Free Software
18   Foundation, 51 Franklin Street - Fifth Floor, Boston,
19   MA 02110-1301, USA.  */
20
21#ifndef _LIBCOL_UTIL_H
22#define _LIBCOL_UTIL_H
23
24#include <stdarg.h>
25#include <pthread.h>
26#include <signal.h>
27
28// LIBCOLLECTOR NOT I18N
29#define NTXT(x) x
30#define STXT(x) x
31
32extern int __collector_tracelevel;
33
34/* Initialization function */
35extern	int  __collector_util_init();
36extern  void __collector_libkstat_funcs_init();
37extern  void __collector_libscf_funcs_init();
38
39/* -------  functions from libcol_util.c ----------------- */
40extern void * __collector_memcpy (void *s1, const void *s2, size_t n);
41extern int (*__collector_sscanfp)(const char *restrict s, const char *restrict fmt, ...);
42extern char * __collector_strcat (char *s1, const char *s2);
43extern char * __collector_strchr (const char *s1, int chr);
44extern size_t __collector_strlcpy (char *dst, const char *src, size_t dstsize);
45extern char* __collector_strrchr (const char *str, int chr);
46extern size_t __collector_strlen (const char *s);
47extern size_t __collector_strlcat (char *dst, const char *src, size_t dstsize);
48extern char* __collector_strchr (const char *str, int chr);
49extern int __collector_strcmp (const char *s1, const char *s2);
50extern int __collector_strncmp (const char *s1, const char *s2, size_t n);
51extern char * __collector_strstr (const char *s1, const char *s2);
52extern size_t __collector_strncpy (char *dst, const char *src, size_t dstsize);
53extern size_t __collector_strncat (char *dst, const char *src, size_t dstsize);
54extern void * __collector_malloc (size_t size);
55extern void * __collector_calloc (size_t nelem, size_t elsize);
56extern char * __collector_strdup (const char * str);
57extern int __collector_strStartWith (const char *s1, const char *s2);
58extern int __collector_xml_snprintf (char *s, size_t n, const char *format, ...) __attribute__ ((format (printf, 3, 4)));
59extern int __collector_xml_vsnprintf (char *s, size_t n, const char *format, va_list args);
60
61/* -------  collector_thread ----------------- */
62pid_t __collector_gettid ();
63extern void __collector_ext_gettid_tsd_create_key ();
64#define collector_thread_t pthread_t            // not using pid_t, since tid is defined as pthread_t in package structures, and other codes assume this type
65#define statvfs_t  struct statvfs
66#define __collector_lwp_self() (collector_thread_t)__collector_gettid() // not using pthread_self()
67#define __collector_thr_self() (collector_thread_t)__collector_gettid() // not using pthread_self()
68
69/* -------  collector_mutex ----------------- */
70/*
71 * mutex_init is defined in libthread. If we don't want to interact
72 * with libthread we should use memset to initialize mutexes
73 */
74
75typedef volatile int collector_mutex_t;
76#define  COLLECTOR_MUTEX_INITIALIZER 0
77extern int __collector_mutex_lock (collector_mutex_t *mp);
78extern int __collector_mutex_unlock (collector_mutex_t *mp);
79extern int __collector_mutex_trylock (collector_mutex_t *mp);
80
81#define __collector_mutex_init(xx) \
82  do { collector_mutex_t tmp=COLLECTOR_MUTEX_INITIALIZER; *(xx)=tmp; } while(0)
83
84void __collector_sample (char *name);
85void __collector_terminate_expt ();
86void __collector_pause ();
87void __collector_pause_m ();
88void __collector_resume ();
89
90struct DT_lineno;
91
92typedef enum
93{
94  DFUNC_API = 1, /* dynamic function declared with API */
95  DFUNC_JAVA, /* dynamically compiled java method */
96  DFUNC_KERNEL /* dynamic code mapped by the kernel (Linux) */
97} dfunc_mode_t;
98
99extern void __collector_int_func_load (dfunc_mode_t mode, char *name,
100				       char *sourcename, void *vaddr,
101				       int size, int lntsize,
102				       struct DT_lineno *lntable);
103extern void __collector_int_func_unload (dfunc_mode_t mode, void *vaddr);
104
105extern int __collector_sigaction (int sig, const struct sigaction *nact,
106				  struct sigaction *oact);
107extern void __collector_SIGDFL_handler (int sig);
108extern int __collector_ext_itimer_set (int period);
109
110#if ARCH(Intel)
111/* Atomic functions on x86/x64 */
112
113/**
114 * This function enables the inrementing (by one) of the value stored in target
115 * to occur in an atomic manner.
116 */
117static __attribute__ ((always_inline)) inline void
118__collector_inc_32 (uint32_t *ptr)
119{
120  __asm__ __volatile__("lock; incl %0"
121		       : // "=m" (*ptr)    // output
122		       : "m" (*ptr)); // input
123}
124
125/**
126 * This function enables the decrementing (by one) of the value stored in target
127 * to occur in an atomic manner.
128 */
129static __attribute__ ((always_inline)) inline void
130__collector_dec_32 (volatile uint32_t *ptr)
131{
132  __asm__ __volatile__("lock; decl %0"
133		       : // "=m" (*ptr)    // output
134		       : "m" (*ptr)); // input
135}
136
137/**
138 * This function subtrackts the value "off" of the value stored in target
139 * to occur in an atomic manner, and returns new value stored in target.
140 */
141static __attribute__ ((always_inline)) inline uint32_t
142__collector_subget_32 (uint32_t *ptr, uint32_t off)
143{
144  uint32_t r;
145  uint32_t offset = off;
146  __asm__ __volatile__("movl %2, %0; negl %0; lock; xaddl %0, %1"
147		       : "=r" (r), "=m" (*ptr) /* output */
148		       : "a" (off), "r" (*ptr) /* input */
149		       );
150  return (r - offset);
151}
152
153/**
154 * This function returns the value of the stack pointer register
155 */
156static __attribute__ ((always_inline)) inline void *
157__collector_getsp ()
158{
159  void *r;
160#if WSIZE(32) || defined(__ILP32__)
161  __asm__ __volatile__("movl %%esp, %0"
162#else
163  __asm__ __volatile__("movq %%rsp, %0"
164#endif
165	  : "=r" (r)); // output
166  return r;
167}
168
169/**
170 * This function returns the value of the frame pointer register
171 */
172static __attribute__ ((always_inline)) inline void *
173__collector_getfp ()
174{
175  void *r;
176#if WSIZE(32) || defined(__ILP32__)
177  __asm__ __volatile__("movl %%ebp, %0"
178#else
179  __asm__ __volatile__("movq %%rbp, %0"
180#endif
181	  : "=r" (r)); // output
182  return r;
183}
184
185/**
186 * This function returns the value of the processor counter register
187 */
188static __attribute__ ((always_inline)) inline void *
189__collector_getpc ()
190{
191  void *r;
192#if defined(__x86_64)
193  __asm__ __volatile__("lea (%%rip), %0" : "=r" (r));
194#else
195  __asm__ __volatile__("call  1f \n"
196		       "1: popl  %0" : "=r" (r));
197#endif
198  return r;
199}
200
201/**
202 * This function enables a compare and swap operation to occur atomically.
203 * The 32-bit value stored in target is compared with "old". If these values
204 * are equal, the value stored in target is replaced with "new". The old
205 * 32-bit value stored in target is returned by the function whether or not
206 * the replacement occurred.
207 */
208static __attribute__ ((always_inline)) inline uint32_t
209__collector_cas_32 (volatile uint32_t *pdata, uint32_t old, uint32_t new)
210{
211  uint32_t r;
212  __asm__ __volatile__("lock; cmpxchgl %2, %1"
213		       : "=a" (r), "=m" (*pdata) : "r" (new),
214		       "a" (old), "m" (*pdata));
215  return r;
216}
217/**
218 * This function enables a compare and swap operation to occur atomically.
219 * The 64-bit value stored in target is compared with "old". If these values
220 * are equal, the value stored in target is replaced with "new". The old
221 * 64-bit value stored in target is returned by the function whether or not
222 * the replacement occurred.
223 */
224static __attribute__ ((always_inline)) inline uint64_t
225__collector_cas_64p (volatile uint64_t *mem, uint64_t *old, uint64_t * new)
226{
227  uint64_t r;
228#if WSIZE(32)
229  uint32_t old1 = (uint32_t) (*old & 0xFFFFFFFFL);
230  uint32_t old2 = (uint32_t) ((*old >> 32) & 0xFFFFFFFFL);
231  uint32_t new1 = (uint32_t) (*new & 0xFFFFFFFFL);
232  uint32_t new2 = (uint32_t) ((*new >> 32) & 0xFFFFFFFFL);
233  uint32_t res1 = 0;
234  uint32_t res2 = 0;
235  __asm__ __volatile__(
236      "movl %3, %%esi; lock; cmpxchg8b (%%esi); movl %%edx, %2; movl %%eax, %1"
237      : "=m" (r), "=m" (res1), "=m" (res2) /* output */
238      : "m" (mem), "a" (old1), "d" (old2), "b" (new1), "c" (new2) /* input */
239      : "memory", "cc", "esi" //, "edx", "ecx", "ebx", "eax" /* clobbered register */
240		       );
241  r = (((uint64_t) res2) << 32) | ((uint64_t) res1);
242#else
243  __asm__ __volatile__( "lock; cmpxchgq %2, %1"
244		       : "=a" (r), "=m" (*mem) /* output */
245		       : "r" (*new), "a" (*old), "m" (*mem) /* input */
246		       : "%rcx", "rdx" /* clobbered register */
247		       );
248#endif
249  return r;
250}
251/**
252 * This function enables a compare and swap operation to occur atomically.
253 * The 32-/64-bit value stored in target is compared with "cmp". If these values
254 * are equal, the value stored in target is replaced with "new".
255 * The old value stored in target is returned by the function whether or not
256 * the replacement occurred.
257 */
258static __attribute__ ((always_inline)) inline void *
259__collector_cas_ptr (void *mem, void *cmp, void *new)
260{
261  void *r;
262#if WSIZE(32) || defined(__ILP32__)
263  r = (void *) __collector_cas_32 ((volatile uint32_t *)mem, (uint32_t) cmp, (uint32_t)new);
264#else
265  __asm__ __volatile__("lock; cmpxchgq %2, (%1)"
266		       : "=a" (r), "=b" (mem) /* output */
267		       : "r" (new), "a" (cmp), "b" (mem) /* input */
268		       );
269#endif
270  return r;
271}
272
273#elif ARCH(Aarch64)
274static __attribute__ ((always_inline)) inline uint32_t
275__collector_inc_32 (volatile uint32_t *ptr)
276{
277  return __sync_add_and_fetch (ptr, 1);
278}
279
280static __attribute__ ((always_inline)) inline uint32_t
281__collector_dec_32 (volatile uint32_t *ptr)
282{
283  return __sync_sub_and_fetch (ptr, 1);
284}
285
286static __attribute__ ((always_inline)) inline uint32_t
287__collector_subget_32 (volatile uint32_t *ptr, uint32_t off)
288{
289  return __sync_sub_and_fetch (ptr, off);
290}
291
292static __attribute__ ((always_inline)) inline uint32_t
293__collector_cas_32 (volatile uint32_t *ptr, uint32_t old, uint32_t new)
294{
295  return __sync_val_compare_and_swap (ptr, old, new);
296}
297
298static __attribute__ ((always_inline)) inline uint64_t
299__collector_cas_64p (volatile uint64_t *ptr, uint64_t *old, uint64_t * new)
300{
301  return __sync_val_compare_and_swap (ptr, *old, *new);
302}
303
304static __attribute__ ((always_inline)) inline void *
305__collector_cas_ptr (void *ptr, void *old, void *new)
306{
307  return (void *) __sync_val_compare_and_swap ((unsigned long *) ptr, (unsigned long) old, (unsigned long) new);
308}
309
310#else
311extern void __collector_flushw (); /* defined for SPARC only */
312extern void* __collector_getpc ();
313extern void* __collector_getsp ();
314extern void* __collector_getfp ();
315extern void __collector_inc_32 (volatile uint32_t *);
316extern void __collector_dec_32 (volatile uint32_t *);
317extern void* __collector_cas_ptr (volatile void *, void *, void *);
318extern uint32_t __collector_cas_32 (volatile uint32_t *, uint32_t, uint32_t);
319extern uint32_t __collector_subget_32 (volatile uint32_t *, uint32_t);
320extern uint64_t __collector_cas_64p (volatile uint64_t *, uint64_t *, uint64_t *);
321#endif /* ARCH() */
322#endif /* _LIBCOL_UTIL_H */
323