os_linux.hpp revision 8575:5916110131c4
1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef OS_LINUX_VM_OS_LINUX_HPP
26#define OS_LINUX_VM_OS_LINUX_HPP
27
28// Linux_OS defines the interface to Linux operating systems
29
30// Information about the protection of the page at address '0' on this os.
31static bool zero_page_read_protected() { return true; }
32
33class Linux {
34  friend class os;
35  friend class TestReserveMemorySpecial;
36
37  // For signal-chaining
38#define MAXSIGNUM 32
39  static struct sigaction sigact[MAXSIGNUM]; // saved preinstalled sigactions
40  static unsigned int sigs;             // mask of signals that have
41                                        // preinstalled signal handlers
42  static bool libjsig_is_loaded;        // libjsig that interposes sigaction(),
43                                        // __sigaction(), signal() is loaded
44  static struct sigaction *(*get_signal_action)(int);
45  static struct sigaction *get_preinstalled_handler(int);
46  static void save_preinstalled_handler(int, struct sigaction&);
47
48  static void check_signal_handler(int sig);
49
50  // For signal flags diagnostics
51  static int sigflags[MAXSIGNUM];
52
53  static int (*_clock_gettime)(clockid_t, struct timespec *);
54  static int (*_pthread_getcpuclockid)(pthread_t, clockid_t *);
55  static int (*_pthread_setname_np)(pthread_t, const char*);
56
57  static address   _initial_thread_stack_bottom;
58  static uintptr_t _initial_thread_stack_size;
59
60  static const char *_glibc_version;
61  static const char *_libpthread_version;
62
63  static bool _supports_fast_thread_cpu_time;
64
65  static GrowableArray<int>* _cpu_to_node;
66
67 protected:
68
69  static julong _physical_memory;
70  static pthread_t _main_thread;
71  static Mutex* _createThread_lock;
72  static int _page_size;
73  static const int _vm_default_page_size;
74
75  static julong available_memory();
76  static julong physical_memory() { return _physical_memory; }
77  static void initialize_system_info();
78
79  static int commit_memory_impl(char* addr, size_t bytes, bool exec);
80  static int commit_memory_impl(char* addr, size_t bytes,
81                                size_t alignment_hint, bool exec);
82
83  static void set_glibc_version(const char *s)      { _glibc_version = s; }
84  static void set_libpthread_version(const char *s) { _libpthread_version = s; }
85
86  static bool supports_variable_stack_size();
87
88  static void rebuild_cpu_to_node_map();
89  static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
90
91  static size_t find_large_page_size();
92  static size_t setup_large_page_size();
93
94  static bool setup_large_page_type(size_t page_size);
95  static bool transparent_huge_pages_sanity_check(bool warn, size_t pages_size);
96  static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
97
98  static char* reserve_memory_special_shm(size_t bytes, size_t alignment, char* req_addr, bool exec);
99  static char* reserve_memory_special_huge_tlbfs(size_t bytes, size_t alignment, char* req_addr, bool exec);
100  static char* reserve_memory_special_huge_tlbfs_only(size_t bytes, char* req_addr, bool exec);
101  static char* reserve_memory_special_huge_tlbfs_mixed(size_t bytes, size_t alignment, char* req_addr, bool exec);
102
103  static bool release_memory_special_impl(char* base, size_t bytes);
104  static bool release_memory_special_shm(char* base, size_t bytes);
105  static bool release_memory_special_huge_tlbfs(char* base, size_t bytes);
106
107  static void print_full_memory_info(outputStream* st);
108  static void print_distro_info(outputStream* st);
109  static void print_libversion_info(outputStream* st);
110
111 public:
112  static bool _stack_is_executable;
113  static void *dlopen_helper(const char *name, char *ebuf, int ebuflen);
114  static void *dll_load_in_vmthread(const char *name, char *ebuf, int ebuflen);
115
116  static void init_thread_fpu_state();
117  static int  get_fpu_control_word();
118  static void set_fpu_control_word(int fpu_control);
119  static pthread_t main_thread(void)                                { return _main_thread; }
120  // returns kernel thread id (similar to LWP id on Solaris), which can be
121  // used to access /proc
122  static pid_t gettid();
123  static void set_createThread_lock(Mutex* lk)                      { _createThread_lock = lk; }
124  static Mutex* createThread_lock(void)                             { return _createThread_lock; }
125  static void hotspot_sigmask(Thread* thread);
126
127  static address   initial_thread_stack_bottom(void)                { return _initial_thread_stack_bottom; }
128  static uintptr_t initial_thread_stack_size(void)                  { return _initial_thread_stack_size; }
129  static bool is_initial_thread(void);
130
131  static int page_size(void)                                        { return _page_size; }
132  static void set_page_size(int val)                                { _page_size = val; }
133
134  static int vm_default_page_size(void)                             { return _vm_default_page_size; }
135
136  static address   ucontext_get_pc(ucontext_t* uc);
137  static void ucontext_set_pc(ucontext_t* uc, address pc);
138  static intptr_t* ucontext_get_sp(ucontext_t* uc);
139  static intptr_t* ucontext_get_fp(ucontext_t* uc);
140
141  // For Analyzer Forte AsyncGetCallTrace profiling support:
142  //
143  // This interface should be declared in os_linux_i486.hpp, but
144  // that file provides extensions to the os class and not the
145  // Linux class.
146  static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc,
147                                              intptr_t** ret_sp, intptr_t** ret_fp);
148
149  // This boolean allows users to forward their own non-matching signals
150  // to JVM_handle_linux_signal, harmlessly.
151  static bool signal_handlers_are_installed;
152
153  static int get_our_sigflags(int);
154  static void set_our_sigflags(int, int);
155  static void signal_sets_init();
156  static void install_signal_handlers();
157  static void set_signal_handler(int, bool);
158  static bool is_sig_ignored(int sig);
159
160  static sigset_t* unblocked_signals();
161  static sigset_t* vm_signals();
162  static sigset_t* allowdebug_blocked_signals();
163
164  // For signal-chaining
165  static struct sigaction *get_chained_signal_action(int sig);
166  static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
167
168  // GNU libc and libpthread version strings
169  static const char *glibc_version()          { return _glibc_version; }
170  static const char *libpthread_version()     { return _libpthread_version; }
171
172  static void libpthread_init();
173  static bool libnuma_init();
174  static void* libnuma_dlsym(void* handle, const char* name);
175  // Minimum stack size a thread can be created with (allowing
176  // the VM to completely create the thread and enter user code)
177  static size_t min_stack_allowed;
178
179  // Return default stack size or guard size for the specified thread type
180  static size_t default_stack_size(os::ThreadType thr_type);
181  static size_t default_guard_size(os::ThreadType thr_type);
182
183  static void capture_initial_stack(size_t max_size);
184
185  // Stack overflow handling
186  static bool manually_expand_stack(JavaThread * t, address addr);
187  static int max_register_window_saves_before_flushing();
188
189  // Real-time clock functions
190  static void clock_init(void);
191
192  // fast POSIX clocks support
193  static void fast_thread_clock_init(void);
194
195  static int clock_gettime(clockid_t clock_id, struct timespec *tp) {
196    return _clock_gettime ? _clock_gettime(clock_id, tp) : -1;
197  }
198
199  static int pthread_getcpuclockid(pthread_t tid, clockid_t *clock_id) {
200    return _pthread_getcpuclockid ? _pthread_getcpuclockid(tid, clock_id) : -1;
201  }
202
203  static bool supports_fast_thread_cpu_time() {
204    return _supports_fast_thread_cpu_time;
205  }
206
207  static jlong fast_thread_cpu_time(clockid_t clockid);
208
209  // pthread_cond clock suppport
210 private:
211  static pthread_condattr_t _condattr[1];
212
213 public:
214  static pthread_condattr_t* condAttr() { return _condattr; }
215
216  // Stack repair handling
217
218  // none present
219
220 private:
221  typedef int (*sched_getcpu_func_t)(void);
222  typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
223  typedef int (*numa_max_node_func_t)(void);
224  typedef int (*numa_available_func_t)(void);
225  typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
226  typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
227  typedef void (*numa_set_bind_policy_func_t)(int policy);
228
229  static sched_getcpu_func_t _sched_getcpu;
230  static numa_node_to_cpus_func_t _numa_node_to_cpus;
231  static numa_max_node_func_t _numa_max_node;
232  static numa_available_func_t _numa_available;
233  static numa_tonode_memory_func_t _numa_tonode_memory;
234  static numa_interleave_memory_func_t _numa_interleave_memory;
235  static numa_set_bind_policy_func_t _numa_set_bind_policy;
236  static unsigned long* _numa_all_nodes;
237
238  static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
239  static void set_numa_node_to_cpus(numa_node_to_cpus_func_t func) { _numa_node_to_cpus = func; }
240  static void set_numa_max_node(numa_max_node_func_t func) { _numa_max_node = func; }
241  static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
242  static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
243  static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
244  static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
245  static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
246  static int sched_getcpu_syscall(void);
247 public:
248  static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
249  static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
250    return _numa_node_to_cpus != NULL ? _numa_node_to_cpus(node, buffer, bufferlen) : -1;
251  }
252  static int numa_max_node() { return _numa_max_node != NULL ? _numa_max_node() : -1; }
253  static int numa_available() { return _numa_available != NULL ? _numa_available() : -1; }
254  static int numa_tonode_memory(void *start, size_t size, int node) {
255    return _numa_tonode_memory != NULL ? _numa_tonode_memory(start, size, node) : -1;
256  }
257  static void numa_interleave_memory(void *start, size_t size) {
258    if (_numa_interleave_memory != NULL && _numa_all_nodes != NULL) {
259      _numa_interleave_memory(start, size, _numa_all_nodes);
260    }
261  }
262  static void numa_set_bind_policy(int policy) {
263    if (_numa_set_bind_policy != NULL) {
264      _numa_set_bind_policy(policy);
265    }
266  }
267  static int get_node_by_cpu(int cpu_id);
268};
269
270
271class PlatformEvent : public CHeapObj<mtInternal> {
272 private:
273  double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
274  volatile int _Event;
275  volatile int _nParked;
276  pthread_mutex_t _mutex[1];
277  pthread_cond_t  _cond[1];
278  double PostPad[2];
279  Thread * _Assoc;
280
281 public:       // TODO-FIXME: make dtor private
282  ~PlatformEvent() { guarantee(0, "invariant"); }
283
284 public:
285  PlatformEvent() {
286    int status;
287    status = pthread_cond_init(_cond, os::Linux::condAttr());
288    assert_status(status == 0, status, "cond_init");
289    status = pthread_mutex_init(_mutex, NULL);
290    assert_status(status == 0, status, "mutex_init");
291    _Event   = 0;
292    _nParked = 0;
293    _Assoc   = NULL;
294  }
295
296  // Use caution with reset() and fired() -- they may require MEMBARs
297  void reset() { _Event = 0; }
298  int  fired() { return _Event; }
299  void park();
300  void unpark();
301  int  park(jlong millis); // relative timed-wait only
302  void SetAssociation(Thread * a) { _Assoc = a; }
303};
304
305class PlatformParker : public CHeapObj<mtInternal> {
306 protected:
307  enum {
308    REL_INDEX = 0,
309    ABS_INDEX = 1
310  };
311  int _cur_index;  // which cond is in use: -1, 0, 1
312  pthread_mutex_t _mutex[1];
313  pthread_cond_t  _cond[2]; // one for relative times and one for abs.
314
315 public:       // TODO-FIXME: make dtor private
316  ~PlatformParker() { guarantee(0, "invariant"); }
317
318 public:
319  PlatformParker() {
320    int status;
321    status = pthread_cond_init(&_cond[REL_INDEX], os::Linux::condAttr());
322    assert_status(status == 0, status, "cond_init rel");
323    status = pthread_cond_init(&_cond[ABS_INDEX], NULL);
324    assert_status(status == 0, status, "cond_init abs");
325    status = pthread_mutex_init(_mutex, NULL);
326    assert_status(status == 0, status, "mutex_init");
327    _cur_index = -1; // mark as unused
328  }
329};
330
331#endif // OS_LINUX_VM_OS_LINUX_HPP
332