1/* Low level interface to ptrace, for the remote server for GDB.
2   Copyright (C) 1995-2023 Free Software Foundation, Inc.
3
4   This file is part of GDB.
5
6   This program is free software; you can redistribute it and/or modify
7   it under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 3 of the License, or
9   (at your option) any later version.
10
11   This program is distributed in the hope that it will be useful,
12   but WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   GNU General Public License for more details.
15
16   You should have received a copy of the GNU General Public License
17   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
18
19#include "server.h"
20#include "linux-low.h"
21#include "nat/linux-osdata.h"
22#include "gdbsupport/agent.h"
23#include "tdesc.h"
24#include "gdbsupport/event-loop.h"
25#include "gdbsupport/event-pipe.h"
26#include "gdbsupport/rsp-low.h"
27#include "gdbsupport/signals-state-save-restore.h"
28#include "nat/linux-nat.h"
29#include "nat/linux-waitpid.h"
30#include "gdbsupport/gdb_wait.h"
31#include "nat/gdb_ptrace.h"
32#include "nat/linux-ptrace.h"
33#include "nat/linux-procfs.h"
34#include "nat/linux-personality.h"
35#include <signal.h>
36#include <sys/ioctl.h>
37#include <fcntl.h>
38#include <unistd.h>
39#include <sys/syscall.h>
40#include <sched.h>
41#include <ctype.h>
42#include <pwd.h>
43#include <sys/types.h>
44#include <dirent.h>
45#include <sys/stat.h>
46#include <sys/vfs.h>
47#include <sys/uio.h>
48#include "gdbsupport/filestuff.h"
49#include "tracepoint.h"
50#include <inttypes.h>
51#include "gdbsupport/common-inferior.h"
52#include "nat/fork-inferior.h"
53#include "gdbsupport/environ.h"
54#include "gdbsupport/gdb-sigmask.h"
55#include "gdbsupport/scoped_restore.h"
56#ifndef ELFMAG0
57/* Don't include <linux/elf.h> here.  If it got included by gdb_proc_service.h
58   then ELFMAG0 will have been defined.  If it didn't get included by
59   gdb_proc_service.h then including it will likely introduce a duplicate
60   definition of elf_fpregset_t.  */
61#include <elf.h>
62#endif
63#include "nat/linux-namespaces.h"
64
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
68
69#ifndef AT_HWCAP2
70#define AT_HWCAP2 26
71#endif
72
73/* Some targets did not define these ptrace constants from the start,
74   so gdbserver defines them locally here.  In the future, these may
75   be removed after they are added to asm/ptrace.h.  */
76#if !(defined(PT_TEXT_ADDR) \
77      || defined(PT_DATA_ADDR) \
78      || defined(PT_TEXT_END_ADDR))
79#if defined(__mcoldfire__)
80/* These are still undefined in 3.10 kernels.  */
81#define PT_TEXT_ADDR 49*4
82#define PT_DATA_ADDR 50*4
83#define PT_TEXT_END_ADDR  51*4
84/* These are still undefined in 3.10 kernels.  */
85#elif defined(__TMS320C6X__)
86#define PT_TEXT_ADDR     (0x10000*4)
87#define PT_DATA_ADDR     (0x10004*4)
88#define PT_TEXT_END_ADDR (0x10008*4)
89#endif
90#endif
91
92#if (defined(__UCLIBC__)		\
93     && defined(HAS_NOMMU)		\
94     && defined(PT_TEXT_ADDR)		\
95     && defined(PT_DATA_ADDR)		\
96     && defined(PT_TEXT_END_ADDR))
97#define SUPPORTS_READ_OFFSETS
98#endif
99
100#ifdef HAVE_LINUX_BTRACE
101# include "nat/linux-btrace.h"
102# include "gdbsupport/btrace-common.h"
103#endif
104
105#ifndef HAVE_ELF32_AUXV_T
106/* Copied from glibc's elf.h.  */
107typedef struct
108{
109  uint32_t a_type;		/* Entry type */
110  union
111    {
112      uint32_t a_val;		/* Integer value */
113      /* We use to have pointer elements added here.  We cannot do that,
114	 though, since it does not work when using 32-bit definitions
115	 on 64-bit platforms and vice versa.  */
116    } a_un;
117} Elf32_auxv_t;
118#endif
119
120#ifndef HAVE_ELF64_AUXV_T
121/* Copied from glibc's elf.h.  */
122typedef struct
123{
124  uint64_t a_type;		/* Entry type */
125  union
126    {
127      uint64_t a_val;		/* Integer value */
128      /* We use to have pointer elements added here.  We cannot do that,
129	 though, since it does not work when using 32-bit definitions
130	 on 64-bit platforms and vice versa.  */
131    } a_un;
132} Elf64_auxv_t;
133#endif
134
135/* Does the current host support PTRACE_GETREGSET?  */
136int have_ptrace_getregset = -1;
137
138/* Return TRUE if THREAD is the leader thread of the process.  */
139
140static bool
141is_leader (thread_info *thread)
142{
143  ptid_t ptid = ptid_of (thread);
144  return ptid.pid () == ptid.lwp ();
145}
146
147/* LWP accessors.  */
148
149/* See nat/linux-nat.h.  */
150
151ptid_t
152ptid_of_lwp (struct lwp_info *lwp)
153{
154  return ptid_of (get_lwp_thread (lwp));
155}
156
157/* See nat/linux-nat.h.  */
158
159void
160lwp_set_arch_private_info (struct lwp_info *lwp,
161			   struct arch_lwp_info *info)
162{
163  lwp->arch_private = info;
164}
165
166/* See nat/linux-nat.h.  */
167
168struct arch_lwp_info *
169lwp_arch_private_info (struct lwp_info *lwp)
170{
171  return lwp->arch_private;
172}
173
174/* See nat/linux-nat.h.  */
175
176int
177lwp_is_stopped (struct lwp_info *lwp)
178{
179  return lwp->stopped;
180}
181
182/* See nat/linux-nat.h.  */
183
184enum target_stop_reason
185lwp_stop_reason (struct lwp_info *lwp)
186{
187  return lwp->stop_reason;
188}
189
190/* See nat/linux-nat.h.  */
191
192int
193lwp_is_stepping (struct lwp_info *lwp)
194{
195  return lwp->stepping;
196}
197
198/* A list of all unknown processes which receive stop signals.  Some
199   other process will presumably claim each of these as forked
200   children momentarily.  */
201
202struct simple_pid_list
203{
204  /* The process ID.  */
205  int pid;
206
207  /* The status as reported by waitpid.  */
208  int status;
209
210  /* Next in chain.  */
211  struct simple_pid_list *next;
212};
213static struct simple_pid_list *stopped_pids;
214
215/* Trivial list manipulation functions to keep track of a list of new
216   stopped processes.  */
217
218static void
219add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
220{
221  struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
222
223  new_pid->pid = pid;
224  new_pid->status = status;
225  new_pid->next = *listp;
226  *listp = new_pid;
227}
228
229static int
230pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
231{
232  struct simple_pid_list **p;
233
234  for (p = listp; *p != NULL; p = &(*p)->next)
235    if ((*p)->pid == pid)
236      {
237	struct simple_pid_list *next = (*p)->next;
238
239	*statusp = (*p)->status;
240	xfree (*p);
241	*p = next;
242	return 1;
243      }
244  return 0;
245}
246
247enum stopping_threads_kind
248  {
249    /* Not stopping threads presently.  */
250    NOT_STOPPING_THREADS,
251
252    /* Stopping threads.  */
253    STOPPING_THREADS,
254
255    /* Stopping and suspending threads.  */
256    STOPPING_AND_SUSPENDING_THREADS
257  };
258
259/* This is set while stop_all_lwps is in effect.  */
260static stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
261
262/* FIXME make into a target method?  */
263int using_threads = 1;
264
265/* True if we're presently stabilizing threads (moving them out of
266   jump pads).  */
267static int stabilizing_threads;
268
269static void unsuspend_all_lwps (struct lwp_info *except);
270static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
271static int lwp_is_marked_dead (struct lwp_info *lwp);
272static int kill_lwp (unsigned long lwpid, int signo);
273static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
274static int linux_low_ptrace_options (int attached);
275static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
276
277/* When the event-loop is doing a step-over, this points at the thread
278   being stepped.  */
279static ptid_t step_over_bkpt;
280
281bool
282linux_process_target::low_supports_breakpoints ()
283{
284  return false;
285}
286
287CORE_ADDR
288linux_process_target::low_get_pc (regcache *regcache)
289{
290  return 0;
291}
292
293void
294linux_process_target::low_set_pc (regcache *regcache, CORE_ADDR newpc)
295{
296  gdb_assert_not_reached ("linux target op low_set_pc is not implemented");
297}
298
299std::vector<CORE_ADDR>
300linux_process_target::low_get_next_pcs (regcache *regcache)
301{
302  gdb_assert_not_reached ("linux target op low_get_next_pcs is not "
303			  "implemented");
304}
305
306int
307linux_process_target::low_decr_pc_after_break ()
308{
309  return 0;
310}
311
312/* True if LWP is stopped in its stepping range.  */
313
314static int
315lwp_in_step_range (struct lwp_info *lwp)
316{
317  CORE_ADDR pc = lwp->stop_pc;
318
319  return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
320}
321
322/* The event pipe registered as a waitable file in the event loop.  */
323static event_pipe linux_event_pipe;
324
325/* True if we're currently in async mode.  */
326#define target_is_async_p() (linux_event_pipe.is_open ())
327
328static void send_sigstop (struct lwp_info *lwp);
329
330/* Return non-zero if HEADER is a 64-bit ELF file.  */
331
332static int
333elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
334{
335  if (header->e_ident[EI_MAG0] == ELFMAG0
336      && header->e_ident[EI_MAG1] == ELFMAG1
337      && header->e_ident[EI_MAG2] == ELFMAG2
338      && header->e_ident[EI_MAG3] == ELFMAG3)
339    {
340      *machine = header->e_machine;
341      return header->e_ident[EI_CLASS] == ELFCLASS64;
342
343    }
344  *machine = EM_NONE;
345  return -1;
346}
347
348/* Return non-zero if FILE is a 64-bit ELF file,
349   zero if the file is not a 64-bit ELF file,
350   and -1 if the file is not accessible or doesn't exist.  */
351
352static int
353elf_64_file_p (const char *file, unsigned int *machine)
354{
355  Elf64_Ehdr header;
356  int fd;
357
358  fd = open (file, O_RDONLY);
359  if (fd < 0)
360    return -1;
361
362  if (read (fd, &header, sizeof (header)) != sizeof (header))
363    {
364      close (fd);
365      return 0;
366    }
367  close (fd);
368
369  return elf_64_header_p (&header, machine);
370}
371
372/* Accepts an integer PID; Returns true if the executable PID is
373   running is a 64-bit ELF file..  */
374
375int
376linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377{
378  char file[PATH_MAX];
379
380  sprintf (file, "/proc/%d/exe", pid);
381  return elf_64_file_p (file, machine);
382}
383
384void
385linux_process_target::delete_lwp (lwp_info *lwp)
386{
387  struct thread_info *thr = get_lwp_thread (lwp);
388
389  threads_debug_printf ("deleting %ld", lwpid_of (thr));
390
391  remove_thread (thr);
392
393  low_delete_thread (lwp->arch_private);
394
395  delete lwp;
396}
397
398void
399linux_process_target::low_delete_thread (arch_lwp_info *info)
400{
401  /* Default implementation should be overridden if architecture-specific
402     info is being used.  */
403  gdb_assert (info == nullptr);
404}
405
406/* Open the /proc/PID/mem file for PROC.  */
407
408static void
409open_proc_mem_file (process_info *proc)
410{
411  gdb_assert (proc->priv->mem_fd == -1);
412
413  char filename[64];
414  xsnprintf (filename, sizeof filename, "/proc/%d/mem", proc->pid);
415
416  proc->priv->mem_fd
417    = gdb_open_cloexec (filename, O_RDWR | O_LARGEFILE, 0).release ();
418}
419
420process_info *
421linux_process_target::add_linux_process_no_mem_file (int pid, int attached)
422{
423  struct process_info *proc;
424
425  proc = add_process (pid, attached);
426  proc->priv = XCNEW (struct process_info_private);
427
428  proc->priv->arch_private = low_new_process ();
429  proc->priv->mem_fd = -1;
430
431  return proc;
432}
433
434
435process_info *
436linux_process_target::add_linux_process (int pid, int attached)
437{
438  process_info *proc = add_linux_process_no_mem_file (pid, attached);
439  open_proc_mem_file (proc);
440  return proc;
441}
442
443void
444linux_process_target::remove_linux_process (process_info *proc)
445{
446  if (proc->priv->mem_fd >= 0)
447    close (proc->priv->mem_fd);
448
449  this->low_delete_process (proc->priv->arch_private);
450
451  xfree (proc->priv);
452  proc->priv = nullptr;
453
454  remove_process (proc);
455}
456
457arch_process_info *
458linux_process_target::low_new_process ()
459{
460  return nullptr;
461}
462
463void
464linux_process_target::low_delete_process (arch_process_info *info)
465{
466  /* Default implementation must be overridden if architecture-specific
467     info exists.  */
468  gdb_assert (info == nullptr);
469}
470
471void
472linux_process_target::low_new_fork (process_info *parent, process_info *child)
473{
474  /* Nop.  */
475}
476
477void
478linux_process_target::arch_setup_thread (thread_info *thread)
479{
480  scoped_restore_current_thread restore_thread;
481  switch_to_thread (thread);
482
483  low_arch_setup ();
484}
485
486int
487linux_process_target::handle_extended_wait (lwp_info **orig_event_lwp,
488					    int wstat)
489{
490  client_state &cs = get_client_state ();
491  struct lwp_info *event_lwp = *orig_event_lwp;
492  int event = linux_ptrace_get_extended_event (wstat);
493  struct thread_info *event_thr = get_lwp_thread (event_lwp);
494  struct lwp_info *new_lwp;
495
496  gdb_assert (event_lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
497
498  /* All extended events we currently use are mid-syscall.  Only
499     PTRACE_EVENT_STOP is delivered more like a signal-stop, but
500     you have to be using PTRACE_SEIZE to get that.  */
501  event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
502
503  if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
504      || (event == PTRACE_EVENT_CLONE))
505    {
506      ptid_t ptid;
507      unsigned long new_pid;
508      int ret, status;
509
510      /* Get the pid of the new lwp.  */
511      ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
512	      &new_pid);
513
514      /* If we haven't already seen the new PID stop, wait for it now.  */
515      if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
516	{
517	  /* The new child has a pending SIGSTOP.  We can't affect it until it
518	     hits the SIGSTOP, but we're already attached.  */
519
520	  ret = my_waitpid (new_pid, &status, __WALL);
521
522	  if (ret == -1)
523	    perror_with_name ("waiting for new child");
524	  else if (ret != new_pid)
525	    warning ("wait returned unexpected PID %d", ret);
526	  else if (!WIFSTOPPED (status))
527	    warning ("wait returned unexpected status 0x%x", status);
528	}
529
530      if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
531	{
532	  struct process_info *parent_proc;
533	  struct process_info *child_proc;
534	  struct lwp_info *child_lwp;
535	  struct thread_info *child_thr;
536
537	  ptid = ptid_t (new_pid, new_pid);
538
539	  threads_debug_printf ("Got fork event from LWP %ld, "
540				"new child is %d",
541				ptid_of (event_thr).lwp (),
542				ptid.pid ());
543
544	  /* Add the new process to the tables and clone the breakpoint
545	     lists of the parent.  We need to do this even if the new process
546	     will be detached, since we will need the process object and the
547	     breakpoints to remove any breakpoints from memory when we
548	     detach, and the client side will access registers.  */
549	  child_proc = add_linux_process (new_pid, 0);
550	  gdb_assert (child_proc != NULL);
551	  child_lwp = add_lwp (ptid);
552	  gdb_assert (child_lwp != NULL);
553	  child_lwp->stopped = 1;
554	  child_lwp->must_set_ptrace_flags = 1;
555	  child_lwp->status_pending_p = 0;
556	  child_thr = get_lwp_thread (child_lwp);
557	  child_thr->last_resume_kind = resume_stop;
558	  child_thr->last_status.set_stopped (GDB_SIGNAL_0);
559
560	  /* If we're suspending all threads, leave this one suspended
561	     too.  If the fork/clone parent is stepping over a breakpoint,
562	     all other threads have been suspended already.  Leave the
563	     child suspended too.  */
564	  if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
565	      || event_lwp->bp_reinsert != 0)
566	    {
567	      threads_debug_printf ("leaving child suspended");
568	      child_lwp->suspended = 1;
569	    }
570
571	  parent_proc = get_thread_process (event_thr);
572	  child_proc->attached = parent_proc->attached;
573
574	  if (event_lwp->bp_reinsert != 0
575	      && supports_software_single_step ()
576	      && event == PTRACE_EVENT_VFORK)
577	    {
578	      /* If we leave single-step breakpoints there, child will
579		 hit it, so uninsert single-step breakpoints from parent
580		 (and child).  Once vfork child is done, reinsert
581		 them back to parent.  */
582	      uninsert_single_step_breakpoints (event_thr);
583	    }
584
585	  clone_all_breakpoints (child_thr, event_thr);
586
587	  target_desc_up tdesc = allocate_target_description ();
588	  copy_target_description (tdesc.get (), parent_proc->tdesc);
589	  child_proc->tdesc = tdesc.release ();
590
591	  /* Clone arch-specific process data.  */
592	  low_new_fork (parent_proc, child_proc);
593
594	  /* Save fork info in the parent thread.  */
595	  if (event == PTRACE_EVENT_FORK)
596	    event_lwp->waitstatus.set_forked (ptid);
597	  else if (event == PTRACE_EVENT_VFORK)
598	    event_lwp->waitstatus.set_vforked (ptid);
599
600	  /* The status_pending field contains bits denoting the
601	     extended event, so when the pending event is handled,
602	     the handler will look at lwp->waitstatus.  */
603	  event_lwp->status_pending_p = 1;
604	  event_lwp->status_pending = wstat;
605
606	  /* Link the threads until the parent event is passed on to
607	     higher layers.  */
608	  event_lwp->fork_relative = child_lwp;
609	  child_lwp->fork_relative = event_lwp;
610
611	  /* If the parent thread is doing step-over with single-step
612	     breakpoints, the list of single-step breakpoints are cloned
613	     from the parent's.  Remove them from the child process.
614	     In case of vfork, we'll reinsert them back once vforked
615	     child is done.  */
616	  if (event_lwp->bp_reinsert != 0
617	      && supports_software_single_step ())
618	    {
619	      /* The child process is forked and stopped, so it is safe
620		 to access its memory without stopping all other threads
621		 from other processes.  */
622	      delete_single_step_breakpoints (child_thr);
623
624	      gdb_assert (has_single_step_breakpoints (event_thr));
625	      gdb_assert (!has_single_step_breakpoints (child_thr));
626	    }
627
628	  /* Report the event.  */
629	  return 0;
630	}
631
632      threads_debug_printf
633	("Got clone event from LWP %ld, new child is LWP %ld",
634	 lwpid_of (event_thr), new_pid);
635
636      ptid = ptid_t (pid_of (event_thr), new_pid);
637      new_lwp = add_lwp (ptid);
638
639      /* Either we're going to immediately resume the new thread
640	 or leave it stopped.  resume_one_lwp is a nop if it
641	 thinks the thread is currently running, so set this first
642	 before calling resume_one_lwp.  */
643      new_lwp->stopped = 1;
644
645      /* If we're suspending all threads, leave this one suspended
646	 too.  If the fork/clone parent is stepping over a breakpoint,
647	 all other threads have been suspended already.  Leave the
648	 child suspended too.  */
649      if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
650	  || event_lwp->bp_reinsert != 0)
651	new_lwp->suspended = 1;
652
653      /* Normally we will get the pending SIGSTOP.  But in some cases
654	 we might get another signal delivered to the group first.
655	 If we do get another signal, be sure not to lose it.  */
656      if (WSTOPSIG (status) != SIGSTOP)
657	{
658	  new_lwp->stop_expected = 1;
659	  new_lwp->status_pending_p = 1;
660	  new_lwp->status_pending = status;
661	}
662      else if (cs.report_thread_events)
663	{
664	  new_lwp->waitstatus.set_thread_created ();
665	  new_lwp->status_pending_p = 1;
666	  new_lwp->status_pending = status;
667	}
668
669#ifdef USE_THREAD_DB
670      thread_db_notice_clone (event_thr, ptid);
671#endif
672
673      /* Don't report the event.  */
674      return 1;
675    }
676  else if (event == PTRACE_EVENT_VFORK_DONE)
677    {
678      event_lwp->waitstatus.set_vfork_done ();
679
680      if (event_lwp->bp_reinsert != 0 && supports_software_single_step ())
681	{
682	  reinsert_single_step_breakpoints (event_thr);
683
684	  gdb_assert (has_single_step_breakpoints (event_thr));
685	}
686
687      /* Report the event.  */
688      return 0;
689    }
690  else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
691    {
692      struct process_info *proc;
693      std::vector<int> syscalls_to_catch;
694      ptid_t event_ptid;
695      pid_t event_pid;
696
697      threads_debug_printf ("Got exec event from LWP %ld",
698			    lwpid_of (event_thr));
699
700      /* Get the event ptid.  */
701      event_ptid = ptid_of (event_thr);
702      event_pid = event_ptid.pid ();
703
704      /* Save the syscall list from the execing process.  */
705      proc = get_thread_process (event_thr);
706      syscalls_to_catch = std::move (proc->syscalls_to_catch);
707
708      /* Delete the execing process and all its threads.  */
709      mourn (proc);
710      switch_to_thread (nullptr);
711
712      /* Create a new process/lwp/thread.  */
713      proc = add_linux_process (event_pid, 0);
714      event_lwp = add_lwp (event_ptid);
715      event_thr = get_lwp_thread (event_lwp);
716      gdb_assert (current_thread == event_thr);
717      arch_setup_thread (event_thr);
718
719      /* Set the event status.  */
720      event_lwp->waitstatus.set_execd
721	(make_unique_xstrdup
722	   (linux_proc_pid_to_exec_file (lwpid_of (event_thr))));
723
724      /* Mark the exec status as pending.  */
725      event_lwp->stopped = 1;
726      event_lwp->status_pending_p = 1;
727      event_lwp->status_pending = wstat;
728      event_thr->last_resume_kind = resume_continue;
729      event_thr->last_status.set_ignore ();
730
731      /* Update syscall state in the new lwp, effectively mid-syscall too.  */
732      event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
733
734      /* Restore the list to catch.  Don't rely on the client, which is free
735	 to avoid sending a new list when the architecture doesn't change.
736	 Also, for ANY_SYSCALL, the architecture doesn't really matter.  */
737      proc->syscalls_to_catch = std::move (syscalls_to_catch);
738
739      /* Report the event.  */
740      *orig_event_lwp = event_lwp;
741      return 0;
742    }
743
744  internal_error (_("unknown ptrace event %d"), event);
745}
746
747CORE_ADDR
748linux_process_target::get_pc (lwp_info *lwp)
749{
750  process_info *proc = get_thread_process (get_lwp_thread (lwp));
751  gdb_assert (!proc->starting_up);
752
753  if (!low_supports_breakpoints ())
754    return 0;
755
756  scoped_restore_current_thread restore_thread;
757  switch_to_thread (get_lwp_thread (lwp));
758
759  struct regcache *regcache = get_thread_regcache (current_thread, 1);
760  CORE_ADDR pc = low_get_pc (regcache);
761
762  threads_debug_printf ("pc is 0x%lx", (long) pc);
763
764  return pc;
765}
766
767void
768linux_process_target::get_syscall_trapinfo (lwp_info *lwp, int *sysno)
769{
770  struct regcache *regcache;
771
772  scoped_restore_current_thread restore_thread;
773  switch_to_thread (get_lwp_thread (lwp));
774
775  regcache = get_thread_regcache (current_thread, 1);
776  low_get_syscall_trapinfo (regcache, sysno);
777
778  threads_debug_printf ("get_syscall_trapinfo sysno %d", *sysno);
779}
780
781void
782linux_process_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
783{
784  /* By default, report an unknown system call number.  */
785  *sysno = UNKNOWN_SYSCALL;
786}
787
788bool
789linux_process_target::save_stop_reason (lwp_info *lwp)
790{
791  CORE_ADDR pc;
792  CORE_ADDR sw_breakpoint_pc;
793#if USE_SIGTRAP_SIGINFO
794  siginfo_t siginfo;
795#endif
796
797  if (!low_supports_breakpoints ())
798    return false;
799
800  process_info *proc = get_thread_process (get_lwp_thread (lwp));
801  if (proc->starting_up)
802    {
803      /* Claim we have the stop PC so that the caller doesn't try to
804	 fetch it itself.  */
805      return true;
806    }
807
808  pc = get_pc (lwp);
809  sw_breakpoint_pc = pc - low_decr_pc_after_break ();
810
811  /* breakpoint_at reads from the current thread.  */
812  scoped_restore_current_thread restore_thread;
813  switch_to_thread (get_lwp_thread (lwp));
814
815#if USE_SIGTRAP_SIGINFO
816  if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
817	      (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
818    {
819      if (siginfo.si_signo == SIGTRAP)
820	{
821	  if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
822	      && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
823	    {
824	      /* The si_code is ambiguous on this arch -- check debug
825		 registers.  */
826	      if (!check_stopped_by_watchpoint (lwp))
827		lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
828	    }
829	  else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
830	    {
831	      /* If we determine the LWP stopped for a SW breakpoint,
832		 trust it.  Particularly don't check watchpoint
833		 registers, because at least on s390, we'd find
834		 stopped-by-watchpoint as long as there's a watchpoint
835		 set.  */
836	      lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
837	    }
838	  else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
839	    {
840	      /* This can indicate either a hardware breakpoint or
841		 hardware watchpoint.  Check debug registers.  */
842	      if (!check_stopped_by_watchpoint (lwp))
843		lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
844	    }
845	  else if (siginfo.si_code == TRAP_TRACE)
846	    {
847	      /* We may have single stepped an instruction that
848		 triggered a watchpoint.  In that case, on some
849		 architectures (such as x86), instead of TRAP_HWBKPT,
850		 si_code indicates TRAP_TRACE, and we need to check
851		 the debug registers separately.  */
852	      if (!check_stopped_by_watchpoint (lwp))
853		lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
854	    }
855	}
856    }
857#else
858  /* We may have just stepped a breakpoint instruction.  E.g., in
859     non-stop mode, GDB first tells the thread A to step a range, and
860     then the user inserts a breakpoint inside the range.  In that
861     case we need to report the breakpoint PC.  */
862  if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
863      && low_breakpoint_at (sw_breakpoint_pc))
864    lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
865
866  if (hardware_breakpoint_inserted_here (pc))
867    lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
868
869  if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
870    check_stopped_by_watchpoint (lwp);
871#endif
872
873  if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
874    {
875      threads_debug_printf
876	("%s stopped by software breakpoint",
877	 target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
878
879      /* Back up the PC if necessary.  */
880      if (pc != sw_breakpoint_pc)
881	{
882	  struct regcache *regcache
883	    = get_thread_regcache (current_thread, 1);
884	  low_set_pc (regcache, sw_breakpoint_pc);
885	}
886
887      /* Update this so we record the correct stop PC below.  */
888      pc = sw_breakpoint_pc;
889    }
890  else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
891    threads_debug_printf
892      ("%s stopped by hardware breakpoint",
893       target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
894  else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
895    threads_debug_printf
896      ("%s stopped by hardware watchpoint",
897       target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
898  else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
899    threads_debug_printf
900      ("%s stopped by trace",
901       target_pid_to_str (ptid_of (get_lwp_thread (lwp))).c_str ());
902
903  lwp->stop_pc = pc;
904  return true;
905}
906
907lwp_info *
908linux_process_target::add_lwp (ptid_t ptid)
909{
910  lwp_info *lwp = new lwp_info;
911
912  lwp->thread = add_thread (ptid, lwp);
913
914  low_new_thread (lwp);
915
916  return lwp;
917}
918
919void
920linux_process_target::low_new_thread (lwp_info *info)
921{
922  /* Nop.  */
923}
924
925/* Callback to be used when calling fork_inferior, responsible for
926   actually initiating the tracing of the inferior.  */
927
928static void
929linux_ptrace_fun ()
930{
931  if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
932	      (PTRACE_TYPE_ARG4) 0) < 0)
933    trace_start_error_with_name ("ptrace");
934
935  if (setpgid (0, 0) < 0)
936    trace_start_error_with_name ("setpgid");
937
938  /* If GDBserver is connected to gdb via stdio, redirect the inferior's
939     stdout to stderr so that inferior i/o doesn't corrupt the connection.
940     Also, redirect stdin to /dev/null.  */
941  if (remote_connection_is_stdio ())
942    {
943      if (close (0) < 0)
944	trace_start_error_with_name ("close");
945      if (open ("/dev/null", O_RDONLY) < 0)
946	trace_start_error_with_name ("open");
947      if (dup2 (2, 1) < 0)
948	trace_start_error_with_name ("dup2");
949      if (write (2, "stdin/stdout redirected\n",
950		 sizeof ("stdin/stdout redirected\n") - 1) < 0)
951	{
952	  /* Errors ignored.  */;
953	}
954    }
955}
956
957/* Start an inferior process and returns its pid.
958   PROGRAM is the name of the program to be started, and PROGRAM_ARGS
959   are its arguments.  */
960
961int
962linux_process_target::create_inferior (const char *program,
963				       const std::vector<char *> &program_args)
964{
965  client_state &cs = get_client_state ();
966  struct lwp_info *new_lwp;
967  int pid;
968  ptid_t ptid;
969
970  {
971    maybe_disable_address_space_randomization restore_personality
972      (cs.disable_randomization);
973    std::string str_program_args = construct_inferior_arguments (program_args);
974
975    pid = fork_inferior (program,
976			 str_program_args.c_str (),
977			 get_environ ()->envp (), linux_ptrace_fun,
978			 NULL, NULL, NULL, NULL);
979  }
980
981  /* When spawning a new process, we can't open the mem file yet.  We
982     still have to nurse the process through the shell, and that execs
983     a couple times.  The address space a /proc/PID/mem file is
984     accessing is destroyed on exec.  */
985  process_info *proc = add_linux_process_no_mem_file (pid, 0);
986
987  ptid = ptid_t (pid, pid);
988  new_lwp = add_lwp (ptid);
989  new_lwp->must_set_ptrace_flags = 1;
990
991  post_fork_inferior (pid, program);
992
993  /* PROC is now past the shell running the program we want, so we can
994     open the /proc/PID/mem file.  */
995  open_proc_mem_file (proc);
996
997  return pid;
998}
999
1000/* Implement the post_create_inferior target_ops method.  */
1001
1002void
1003linux_process_target::post_create_inferior ()
1004{
1005  struct lwp_info *lwp = get_thread_lwp (current_thread);
1006
1007  low_arch_setup ();
1008
1009  if (lwp->must_set_ptrace_flags)
1010    {
1011      struct process_info *proc = current_process ();
1012      int options = linux_low_ptrace_options (proc->attached);
1013
1014      linux_enable_event_reporting (lwpid_of (current_thread), options);
1015      lwp->must_set_ptrace_flags = 0;
1016    }
1017}
1018
1019int
1020linux_process_target::attach_lwp (ptid_t ptid)
1021{
1022  struct lwp_info *new_lwp;
1023  int lwpid = ptid.lwp ();
1024
1025  if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1026      != 0)
1027    return errno;
1028
1029  new_lwp = add_lwp (ptid);
1030
1031  /* We need to wait for SIGSTOP before being able to make the next
1032     ptrace call on this LWP.  */
1033  new_lwp->must_set_ptrace_flags = 1;
1034
1035  if (linux_proc_pid_is_stopped (lwpid))
1036    {
1037      threads_debug_printf ("Attached to a stopped process");
1038
1039      /* The process is definitely stopped.  It is in a job control
1040	 stop, unless the kernel predates the TASK_STOPPED /
1041	 TASK_TRACED distinction, in which case it might be in a
1042	 ptrace stop.  Make sure it is in a ptrace stop; from there we
1043	 can kill it, signal it, et cetera.
1044
1045	 First make sure there is a pending SIGSTOP.  Since we are
1046	 already attached, the process can not transition from stopped
1047	 to running without a PTRACE_CONT; so we know this signal will
1048	 go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
1049	 probably already in the queue (unless this kernel is old
1050	 enough to use TASK_STOPPED for ptrace stops); but since
1051	 SIGSTOP is not an RT signal, it can only be queued once.  */
1052      kill_lwp (lwpid, SIGSTOP);
1053
1054      /* Finally, resume the stopped process.  This will deliver the
1055	 SIGSTOP (or a higher priority signal, just like normal
1056	 PTRACE_ATTACH), which we'll catch later on.  */
1057      ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1058    }
1059
1060  /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1061     brings it to a halt.
1062
1063     There are several cases to consider here:
1064
1065     1) gdbserver has already attached to the process and is being notified
1066	of a new thread that is being created.
1067	In this case we should ignore that SIGSTOP and resume the
1068	process.  This is handled below by setting stop_expected = 1,
1069	and the fact that add_thread sets last_resume_kind ==
1070	resume_continue.
1071
1072     2) This is the first thread (the process thread), and we're attaching
1073	to it via attach_inferior.
1074	In this case we want the process thread to stop.
1075	This is handled by having linux_attach set last_resume_kind ==
1076	resume_stop after we return.
1077
1078	If the pid we are attaching to is also the tgid, we attach to and
1079	stop all the existing threads.  Otherwise, we attach to pid and
1080	ignore any other threads in the same group as this pid.
1081
1082     3) GDB is connecting to gdbserver and is requesting an enumeration of all
1083	existing threads.
1084	In this case we want the thread to stop.
1085	FIXME: This case is currently not properly handled.
1086	We should wait for the SIGSTOP but don't.  Things work apparently
1087	because enough time passes between when we ptrace (ATTACH) and when
1088	gdb makes the next ptrace call on the thread.
1089
1090     On the other hand, if we are currently trying to stop all threads, we
1091     should treat the new thread as if we had sent it a SIGSTOP.  This works
1092     because we are guaranteed that the add_lwp call above added us to the
1093     end of the list, and so the new thread has not yet reached
1094     wait_for_sigstop (but will).  */
1095  new_lwp->stop_expected = 1;
1096
1097  return 0;
1098}
1099
1100/* Callback for linux_proc_attach_tgid_threads.  Attach to PTID if not
1101   already attached.  Returns true if a new LWP is found, false
1102   otherwise.  */
1103
1104static int
1105attach_proc_task_lwp_callback (ptid_t ptid)
1106{
1107  /* Is this a new thread?  */
1108  if (find_thread_ptid (ptid) == NULL)
1109    {
1110      int lwpid = ptid.lwp ();
1111      int err;
1112
1113      threads_debug_printf ("Found new lwp %d", lwpid);
1114
1115      err = the_linux_target->attach_lwp (ptid);
1116
1117      /* Be quiet if we simply raced with the thread exiting.  EPERM
1118	 is returned if the thread's task still exists, and is marked
1119	 as exited or zombie, as well as other conditions, so in that
1120	 case, confirm the status in /proc/PID/status.  */
1121      if (err == ESRCH
1122	  || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1123	threads_debug_printf
1124	  ("Cannot attach to lwp %d: thread is gone (%d: %s)",
1125	   lwpid, err, safe_strerror (err));
1126      else if (err != 0)
1127	{
1128	  std::string reason
1129	    = linux_ptrace_attach_fail_reason_string (ptid, err);
1130
1131	  warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1132	}
1133
1134      return 1;
1135    }
1136  return 0;
1137}
1138
1139static void async_file_mark (void);
1140
1141/* Attach to PID.  If PID is the tgid, attach to it and all
1142   of its threads.  */
1143
1144int
1145linux_process_target::attach (unsigned long pid)
1146{
1147  struct process_info *proc;
1148  struct thread_info *initial_thread;
1149  ptid_t ptid = ptid_t (pid, pid);
1150  int err;
1151
1152  /* Delay opening the /proc/PID/mem file until we've successfully
1153     attached.  */
1154  proc = add_linux_process_no_mem_file (pid, 1);
1155
1156  /* Attach to PID.  We will check for other threads
1157     soon.  */
1158  err = attach_lwp (ptid);
1159  if (err != 0)
1160    {
1161      this->remove_linux_process (proc);
1162
1163      std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1164      error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1165    }
1166
1167  open_proc_mem_file (proc);
1168
1169  /* Don't ignore the initial SIGSTOP if we just attached to this
1170     process.  It will be collected by wait shortly.  */
1171  initial_thread = find_thread_ptid (ptid_t (pid, pid));
1172  initial_thread->last_resume_kind = resume_stop;
1173
1174  /* We must attach to every LWP.  If /proc is mounted, use that to
1175     find them now.  On the one hand, the inferior may be using raw
1176     clone instead of using pthreads.  On the other hand, even if it
1177     is using pthreads, GDB may not be connected yet (thread_db needs
1178     to do symbol lookups, through qSymbol).  Also, thread_db walks
1179     structures in the inferior's address space to find the list of
1180     threads/LWPs, and those structures may well be corrupted.  Note
1181     that once thread_db is loaded, we'll still use it to list threads
1182     and associate pthread info with each LWP.  */
1183  linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1184
1185  /* GDB will shortly read the xml target description for this
1186     process, to figure out the process' architecture.  But the target
1187     description is only filled in when the first process/thread in
1188     the thread group reports its initial PTRACE_ATTACH SIGSTOP.  Do
1189     that now, otherwise, if GDB is fast enough, it could read the
1190     target description _before_ that initial stop.  */
1191  if (non_stop)
1192    {
1193      struct lwp_info *lwp;
1194      int wstat, lwpid;
1195      ptid_t pid_ptid = ptid_t (pid);
1196
1197      lwpid = wait_for_event_filtered (pid_ptid, pid_ptid, &wstat, __WALL);
1198      gdb_assert (lwpid > 0);
1199
1200      lwp = find_lwp_pid (ptid_t (lwpid));
1201
1202      if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1203	{
1204	  lwp->status_pending_p = 1;
1205	  lwp->status_pending = wstat;
1206	}
1207
1208      initial_thread->last_resume_kind = resume_continue;
1209
1210      async_file_mark ();
1211
1212      gdb_assert (proc->tdesc != NULL);
1213    }
1214
1215  return 0;
1216}
1217
1218static int
1219last_thread_of_process_p (int pid)
1220{
1221  bool seen_one = false;
1222
1223  thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1224    {
1225      if (!seen_one)
1226	{
1227	  /* This is the first thread of this process we see.  */
1228	  seen_one = true;
1229	  return false;
1230	}
1231      else
1232	{
1233	  /* This is the second thread of this process we see.  */
1234	  return true;
1235	}
1236    });
1237
1238  return thread == NULL;
1239}
1240
1241/* Kill LWP.  */
1242
1243static void
1244linux_kill_one_lwp (struct lwp_info *lwp)
1245{
1246  struct thread_info *thr = get_lwp_thread (lwp);
1247  int pid = lwpid_of (thr);
1248
1249  /* PTRACE_KILL is unreliable.  After stepping into a signal handler,
1250     there is no signal context, and ptrace(PTRACE_KILL) (or
1251     ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1252     ptrace(CONT, pid, 0,0) and just resumes the tracee.  A better
1253     alternative is to kill with SIGKILL.  We only need one SIGKILL
1254     per process, not one for each thread.  But since we still support
1255     support debugging programs using raw clone without CLONE_THREAD,
1256     we send one for each thread.  For years, we used PTRACE_KILL
1257     only, so we're being a bit paranoid about some old kernels where
1258     PTRACE_KILL might work better (dubious if there are any such, but
1259     that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1260     second, and so we're fine everywhere.  */
1261
1262  errno = 0;
1263  kill_lwp (pid, SIGKILL);
1264  if (debug_threads)
1265    {
1266      int save_errno = errno;
1267
1268      threads_debug_printf ("kill_lwp (SIGKILL) %s, 0, 0 (%s)",
1269			    target_pid_to_str (ptid_of (thr)).c_str (),
1270			    save_errno ? safe_strerror (save_errno) : "OK");
1271    }
1272
1273  errno = 0;
1274  ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1275  if (debug_threads)
1276    {
1277      int save_errno = errno;
1278
1279      threads_debug_printf ("PTRACE_KILL %s, 0, 0 (%s)",
1280			    target_pid_to_str (ptid_of (thr)).c_str (),
1281			    save_errno ? safe_strerror (save_errno) : "OK");
1282    }
1283}
1284
1285/* Kill LWP and wait for it to die.  */
1286
1287static void
1288kill_wait_lwp (struct lwp_info *lwp)
1289{
1290  struct thread_info *thr = get_lwp_thread (lwp);
1291  int pid = ptid_of (thr).pid ();
1292  int lwpid = ptid_of (thr).lwp ();
1293  int wstat;
1294  int res;
1295
1296  threads_debug_printf ("killing lwp %d, for pid: %d", lwpid, pid);
1297
1298  do
1299    {
1300      linux_kill_one_lwp (lwp);
1301
1302      /* Make sure it died.  Notes:
1303
1304	 - The loop is most likely unnecessary.
1305
1306	 - We don't use wait_for_event as that could delete lwps
1307	   while we're iterating over them.  We're not interested in
1308	   any pending status at this point, only in making sure all
1309	   wait status on the kernel side are collected until the
1310	   process is reaped.
1311
1312	 - We don't use __WALL here as the __WALL emulation relies on
1313	   SIGCHLD, and killing a stopped process doesn't generate
1314	   one, nor an exit status.
1315      */
1316      res = my_waitpid (lwpid, &wstat, 0);
1317      if (res == -1 && errno == ECHILD)
1318	res = my_waitpid (lwpid, &wstat, __WCLONE);
1319    } while (res > 0 && WIFSTOPPED (wstat));
1320
1321  /* Even if it was stopped, the child may have already disappeared.
1322     E.g., if it was killed by SIGKILL.  */
1323  if (res < 0 && errno != ECHILD)
1324    perror_with_name ("kill_wait_lwp");
1325}
1326
1327/* Callback for `for_each_thread'.  Kills an lwp of a given process,
1328   except the leader.  */
1329
1330static void
1331kill_one_lwp_callback (thread_info *thread, int pid)
1332{
1333  struct lwp_info *lwp = get_thread_lwp (thread);
1334
1335  /* We avoid killing the first thread here, because of a Linux kernel (at
1336     least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1337     the children get a chance to be reaped, it will remain a zombie
1338     forever.  */
1339
1340  if (lwpid_of (thread) == pid)
1341    {
1342      threads_debug_printf ("is last of process %s",
1343			    target_pid_to_str (thread->id).c_str ());
1344      return;
1345    }
1346
1347  kill_wait_lwp (lwp);
1348}
1349
1350int
1351linux_process_target::kill (process_info *process)
1352{
1353  int pid = process->pid;
1354
1355  /* If we're killing a running inferior, make sure it is stopped
1356     first, as PTRACE_KILL will not work otherwise.  */
1357  stop_all_lwps (0, NULL);
1358
1359  for_each_thread (pid, [&] (thread_info *thread)
1360    {
1361      kill_one_lwp_callback (thread, pid);
1362    });
1363
1364  /* See the comment in linux_kill_one_lwp.  We did not kill the first
1365     thread in the list, so do so now.  */
1366  lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1367
1368  if (lwp == NULL)
1369    threads_debug_printf ("cannot find lwp for pid: %d", pid);
1370  else
1371    kill_wait_lwp (lwp);
1372
1373  mourn (process);
1374
1375  /* Since we presently can only stop all lwps of all processes, we
1376     need to unstop lwps of other processes.  */
1377  unstop_all_lwps (0, NULL);
1378  return 0;
1379}
1380
1381/* Get pending signal of THREAD, for detaching purposes.  This is the
1382   signal the thread last stopped for, which we need to deliver to the
1383   thread when detaching, otherwise, it'd be suppressed/lost.  */
1384
1385static int
1386get_detach_signal (struct thread_info *thread)
1387{
1388  client_state &cs = get_client_state ();
1389  enum gdb_signal signo = GDB_SIGNAL_0;
1390  int status;
1391  struct lwp_info *lp = get_thread_lwp (thread);
1392
1393  if (lp->status_pending_p)
1394    status = lp->status_pending;
1395  else
1396    {
1397      /* If the thread had been suspended by gdbserver, and it stopped
1398	 cleanly, then it'll have stopped with SIGSTOP.  But we don't
1399	 want to deliver that SIGSTOP.  */
1400      if (thread->last_status.kind () != TARGET_WAITKIND_STOPPED
1401	  || thread->last_status.sig () == GDB_SIGNAL_0)
1402	return 0;
1403
1404      /* Otherwise, we may need to deliver the signal we
1405	 intercepted.  */
1406      status = lp->last_status;
1407    }
1408
1409  if (!WIFSTOPPED (status))
1410    {
1411      threads_debug_printf ("lwp %s hasn't stopped: no pending signal",
1412			    target_pid_to_str (ptid_of (thread)).c_str ());
1413      return 0;
1414    }
1415
1416  /* Extended wait statuses aren't real SIGTRAPs.  */
1417  if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1418    {
1419      threads_debug_printf ("lwp %s had stopped with extended "
1420			    "status: no pending signal",
1421			    target_pid_to_str (ptid_of (thread)).c_str ());
1422      return 0;
1423    }
1424
1425  signo = gdb_signal_from_host (WSTOPSIG (status));
1426
1427  if (cs.program_signals_p && !cs.program_signals[signo])
1428    {
1429      threads_debug_printf ("lwp %s had signal %s, but it is in nopass state",
1430			    target_pid_to_str (ptid_of (thread)).c_str (),
1431			    gdb_signal_to_string (signo));
1432      return 0;
1433    }
1434  else if (!cs.program_signals_p
1435	   /* If we have no way to know which signals GDB does not
1436	      want to have passed to the program, assume
1437	      SIGTRAP/SIGINT, which is GDB's default.  */
1438	   && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1439    {
1440      threads_debug_printf ("lwp %s had signal %s, "
1441			    "but we don't know if we should pass it. "
1442			    "Default to not.",
1443			    target_pid_to_str (ptid_of (thread)).c_str (),
1444			    gdb_signal_to_string (signo));
1445      return 0;
1446    }
1447  else
1448    {
1449      threads_debug_printf ("lwp %s has pending signal %s: delivering it",
1450			    target_pid_to_str (ptid_of (thread)).c_str (),
1451			    gdb_signal_to_string (signo));
1452
1453      return WSTOPSIG (status);
1454    }
1455}
1456
1457void
1458linux_process_target::detach_one_lwp (lwp_info *lwp)
1459{
1460  struct thread_info *thread = get_lwp_thread (lwp);
1461  int sig;
1462  int lwpid;
1463
1464  /* If there is a pending SIGSTOP, get rid of it.  */
1465  if (lwp->stop_expected)
1466    {
1467      threads_debug_printf ("Sending SIGCONT to %s",
1468			    target_pid_to_str (ptid_of (thread)).c_str ());
1469
1470      kill_lwp (lwpid_of (thread), SIGCONT);
1471      lwp->stop_expected = 0;
1472    }
1473
1474  /* Pass on any pending signal for this thread.  */
1475  sig = get_detach_signal (thread);
1476
1477  /* Preparing to resume may try to write registers, and fail if the
1478     lwp is zombie.  If that happens, ignore the error.  We'll handle
1479     it below, when detach fails with ESRCH.  */
1480  try
1481    {
1482      /* Flush any pending changes to the process's registers.  */
1483      regcache_invalidate_thread (thread);
1484
1485      /* Finally, let it resume.  */
1486      low_prepare_to_resume (lwp);
1487    }
1488  catch (const gdb_exception_error &ex)
1489    {
1490      if (!check_ptrace_stopped_lwp_gone (lwp))
1491	throw;
1492    }
1493
1494  lwpid = lwpid_of (thread);
1495  if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1496	      (PTRACE_TYPE_ARG4) (long) sig) < 0)
1497    {
1498      int save_errno = errno;
1499
1500      /* We know the thread exists, so ESRCH must mean the lwp is
1501	 zombie.  This can happen if one of the already-detached
1502	 threads exits the whole thread group.  In that case we're
1503	 still attached, and must reap the lwp.  */
1504      if (save_errno == ESRCH)
1505	{
1506	  int ret, status;
1507
1508	  ret = my_waitpid (lwpid, &status, __WALL);
1509	  if (ret == -1)
1510	    {
1511	      warning (_("Couldn't reap LWP %d while detaching: %s"),
1512		       lwpid, safe_strerror (errno));
1513	    }
1514	  else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1515	    {
1516	      warning (_("Reaping LWP %d while detaching "
1517			 "returned unexpected status 0x%x"),
1518		       lwpid, status);
1519	    }
1520	}
1521      else
1522	{
1523	  error (_("Can't detach %s: %s"),
1524		 target_pid_to_str (ptid_of (thread)).c_str (),
1525		 safe_strerror (save_errno));
1526	}
1527    }
1528  else
1529    threads_debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)",
1530			  target_pid_to_str (ptid_of (thread)).c_str (),
1531			  strsignal (sig));
1532
1533  delete_lwp (lwp);
1534}
1535
1536int
1537linux_process_target::detach (process_info *process)
1538{
1539  struct lwp_info *main_lwp;
1540
1541  /* As there's a step over already in progress, let it finish first,
1542     otherwise nesting a stabilize_threads operation on top gets real
1543     messy.  */
1544  complete_ongoing_step_over ();
1545
1546  /* Stop all threads before detaching.  First, ptrace requires that
1547     the thread is stopped to successfully detach.  Second, thread_db
1548     may need to uninstall thread event breakpoints from memory, which
1549     only works with a stopped process anyway.  */
1550  stop_all_lwps (0, NULL);
1551
1552#ifdef USE_THREAD_DB
1553  thread_db_detach (process);
1554#endif
1555
1556  /* Stabilize threads (move out of jump pads).  */
1557  target_stabilize_threads ();
1558
1559  /* Detach from the clone lwps first.  If the thread group exits just
1560     while we're detaching, we must reap the clone lwps before we're
1561     able to reap the leader.  */
1562  for_each_thread (process->pid, [this] (thread_info *thread)
1563    {
1564      /* We don't actually detach from the thread group leader just yet.
1565	 If the thread group exits, we must reap the zombie clone lwps
1566	 before we're able to reap the leader.  */
1567      if (thread->id.pid () == thread->id.lwp ())
1568	return;
1569
1570      lwp_info *lwp = get_thread_lwp (thread);
1571      detach_one_lwp (lwp);
1572    });
1573
1574  main_lwp = find_lwp_pid (ptid_t (process->pid));
1575  detach_one_lwp (main_lwp);
1576
1577  mourn (process);
1578
1579  /* Since we presently can only stop all lwps of all processes, we
1580     need to unstop lwps of other processes.  */
1581  unstop_all_lwps (0, NULL);
1582  return 0;
1583}
1584
1585/* Remove all LWPs that belong to process PROC from the lwp list.  */
1586
1587void
1588linux_process_target::mourn (process_info *process)
1589{
1590#ifdef USE_THREAD_DB
1591  thread_db_mourn (process);
1592#endif
1593
1594  for_each_thread (process->pid, [this] (thread_info *thread)
1595    {
1596      delete_lwp (get_thread_lwp (thread));
1597    });
1598
1599  this->remove_linux_process (process);
1600}
1601
1602void
1603linux_process_target::join (int pid)
1604{
1605  int status, ret;
1606
1607  do {
1608    ret = my_waitpid (pid, &status, 0);
1609    if (WIFEXITED (status) || WIFSIGNALED (status))
1610      break;
1611  } while (ret != -1 || errno != ECHILD);
1612}
1613
1614/* Return true if the given thread is still alive.  */
1615
1616bool
1617linux_process_target::thread_alive (ptid_t ptid)
1618{
1619  struct lwp_info *lwp = find_lwp_pid (ptid);
1620
1621  /* We assume we always know if a thread exits.  If a whole process
1622     exited but we still haven't been able to report it to GDB, we'll
1623     hold on to the last lwp of the dead process.  */
1624  if (lwp != NULL)
1625    return !lwp_is_marked_dead (lwp);
1626  else
1627    return 0;
1628}
1629
1630bool
1631linux_process_target::thread_still_has_status_pending (thread_info *thread)
1632{
1633  struct lwp_info *lp = get_thread_lwp (thread);
1634
1635  if (!lp->status_pending_p)
1636    return 0;
1637
1638  if (thread->last_resume_kind != resume_stop
1639      && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1640	  || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1641    {
1642      CORE_ADDR pc;
1643      int discard = 0;
1644
1645      gdb_assert (lp->last_status != 0);
1646
1647      pc = get_pc (lp);
1648
1649      scoped_restore_current_thread restore_thread;
1650      switch_to_thread (thread);
1651
1652      if (pc != lp->stop_pc)
1653	{
1654	  threads_debug_printf ("PC of %ld changed",
1655				lwpid_of (thread));
1656	  discard = 1;
1657	}
1658
1659#if !USE_SIGTRAP_SIGINFO
1660      else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1661	       && !low_breakpoint_at (pc))
1662	{
1663	  threads_debug_printf ("previous SW breakpoint of %ld gone",
1664				lwpid_of (thread));
1665	  discard = 1;
1666	}
1667      else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1668	       && !hardware_breakpoint_inserted_here (pc))
1669	{
1670	  threads_debug_printf ("previous HW breakpoint of %ld gone",
1671				lwpid_of (thread));
1672	  discard = 1;
1673	}
1674#endif
1675
1676      if (discard)
1677	{
1678	  threads_debug_printf ("discarding pending breakpoint status");
1679	  lp->status_pending_p = 0;
1680	  return 0;
1681	}
1682    }
1683
1684  return 1;
1685}
1686
1687/* Returns true if LWP is resumed from the client's perspective.  */
1688
1689static int
1690lwp_resumed (struct lwp_info *lwp)
1691{
1692  struct thread_info *thread = get_lwp_thread (lwp);
1693
1694  if (thread->last_resume_kind != resume_stop)
1695    return 1;
1696
1697  /* Did gdb send us a `vCont;t', but we haven't reported the
1698     corresponding stop to gdb yet?  If so, the thread is still
1699     resumed/running from gdb's perspective.  */
1700  if (thread->last_resume_kind == resume_stop
1701      && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
1702    return 1;
1703
1704  return 0;
1705}
1706
1707bool
1708linux_process_target::status_pending_p_callback (thread_info *thread,
1709						 ptid_t ptid)
1710{
1711  struct lwp_info *lp = get_thread_lwp (thread);
1712
1713  /* Check if we're only interested in events from a specific process
1714     or a specific LWP.  */
1715  if (!thread->id.matches (ptid))
1716    return 0;
1717
1718  if (!lwp_resumed (lp))
1719    return 0;
1720
1721  if (lp->status_pending_p
1722      && !thread_still_has_status_pending (thread))
1723    {
1724      resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1725      return 0;
1726    }
1727
1728  return lp->status_pending_p;
1729}
1730
1731struct lwp_info *
1732find_lwp_pid (ptid_t ptid)
1733{
1734  long lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1735  thread_info *thread = find_thread ([lwp] (thread_info *thr_arg)
1736    {
1737      return thr_arg->id.lwp () == lwp;
1738    });
1739
1740  if (thread == NULL)
1741    return NULL;
1742
1743  return get_thread_lwp (thread);
1744}
1745
1746/* Return the number of known LWPs in the tgid given by PID.  */
1747
1748static int
1749num_lwps (int pid)
1750{
1751  int count = 0;
1752
1753  for_each_thread (pid, [&] (thread_info *thread)
1754    {
1755      count++;
1756    });
1757
1758  return count;
1759}
1760
1761/* See nat/linux-nat.h.  */
1762
1763struct lwp_info *
1764iterate_over_lwps (ptid_t filter,
1765		   gdb::function_view<iterate_over_lwps_ftype> callback)
1766{
1767  thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1768    {
1769      lwp_info *lwp = get_thread_lwp (thr_arg);
1770
1771      return callback (lwp);
1772    });
1773
1774  if (thread == NULL)
1775    return NULL;
1776
1777  return get_thread_lwp (thread);
1778}
1779
1780void
1781linux_process_target::check_zombie_leaders ()
1782{
1783  for_each_process ([this] (process_info *proc)
1784    {
1785      pid_t leader_pid = pid_of (proc);
1786      lwp_info *leader_lp = find_lwp_pid (ptid_t (leader_pid));
1787
1788      threads_debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1789			    "num_lwps=%d, zombie=%d",
1790			    leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1791			    linux_proc_pid_is_zombie (leader_pid));
1792
1793      if (leader_lp != NULL && !leader_lp->stopped
1794	  /* Check if there are other threads in the group, as we may
1795	     have raced with the inferior simply exiting.  Note this
1796	     isn't a watertight check.  If the inferior is
1797	     multi-threaded and is exiting, it may be we see the
1798	     leader as zombie before we reap all the non-leader
1799	     threads.  See comments below.  */
1800	  && !last_thread_of_process_p (leader_pid)
1801	  && linux_proc_pid_is_zombie (leader_pid))
1802	{
1803	  /* A zombie leader in a multi-threaded program can mean one
1804	     of three things:
1805
1806	     #1 - Only the leader exited, not the whole program, e.g.,
1807	     with pthread_exit.  Since we can't reap the leader's exit
1808	     status until all other threads are gone and reaped too,
1809	     we want to delete the zombie leader right away, as it
1810	     can't be debugged, we can't read its registers, etc.
1811	     This is the main reason we check for zombie leaders
1812	     disappearing.
1813
1814	     #2 - The whole thread-group/process exited (a group exit,
1815	     via e.g. exit(3), and there is (or will be shortly) an
1816	     exit reported for each thread in the process, and then
1817	     finally an exit for the leader once the non-leaders are
1818	     reaped.
1819
1820	     #3 - There are 3 or more threads in the group, and a
1821	     thread other than the leader exec'd.  See comments on
1822	     exec events at the top of the file.
1823
1824	     Ideally we would never delete the leader for case #2.
1825	     Instead, we want to collect the exit status of each
1826	     non-leader thread, and then finally collect the exit
1827	     status of the leader as normal and use its exit code as
1828	     whole-process exit code.  Unfortunately, there's no
1829	     race-free way to distinguish cases #1 and #2.  We can't
1830	     assume the exit events for the non-leaders threads are
1831	     already pending in the kernel, nor can we assume the
1832	     non-leader threads are in zombie state already.  Between
1833	     the leader becoming zombie and the non-leaders exiting
1834	     and becoming zombie themselves, there's a small time
1835	     window, so such a check would be racy.  Temporarily
1836	     pausing all threads and checking to see if all threads
1837	     exit or not before re-resuming them would work in the
1838	     case that all threads are running right now, but it
1839	     wouldn't work if some thread is currently already
1840	     ptrace-stopped, e.g., due to scheduler-locking.
1841
1842	     So what we do is we delete the leader anyhow, and then
1843	     later on when we see its exit status, we re-add it back.
1844	     We also make sure that we only report a whole-process
1845	     exit when we see the leader exiting, as opposed to when
1846	     the last LWP in the LWP list exits, which can be a
1847	     non-leader if we deleted the leader here.  */
1848	  threads_debug_printf ("Thread group leader %d zombie "
1849				"(it exited, or another thread execd), "
1850				"deleting it.",
1851				leader_pid);
1852	  delete_lwp (leader_lp);
1853	}
1854    });
1855}
1856
1857/* Callback for `find_thread'.  Returns the first LWP that is not
1858   stopped.  */
1859
1860static bool
1861not_stopped_callback (thread_info *thread, ptid_t filter)
1862{
1863  if (!thread->id.matches (filter))
1864    return false;
1865
1866  lwp_info *lwp = get_thread_lwp (thread);
1867
1868  return !lwp->stopped;
1869}
1870
1871/* Increment LWP's suspend count.  */
1872
1873static void
1874lwp_suspended_inc (struct lwp_info *lwp)
1875{
1876  lwp->suspended++;
1877
1878  if (lwp->suspended > 4)
1879    threads_debug_printf
1880      ("LWP %ld has a suspiciously high suspend count, suspended=%d",
1881       lwpid_of (get_lwp_thread (lwp)), lwp->suspended);
1882}
1883
1884/* Decrement LWP's suspend count.  */
1885
1886static void
1887lwp_suspended_decr (struct lwp_info *lwp)
1888{
1889  lwp->suspended--;
1890
1891  if (lwp->suspended < 0)
1892    {
1893      struct thread_info *thread = get_lwp_thread (lwp);
1894
1895      internal_error ("unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1896		      lwp->suspended);
1897    }
1898}
1899
1900/* This function should only be called if the LWP got a SIGTRAP.
1901
1902   Handle any tracepoint steps or hits.  Return true if a tracepoint
1903   event was handled, 0 otherwise.  */
1904
1905static int
1906handle_tracepoints (struct lwp_info *lwp)
1907{
1908  struct thread_info *tinfo = get_lwp_thread (lwp);
1909  int tpoint_related_event = 0;
1910
1911  gdb_assert (lwp->suspended == 0);
1912
1913  /* If this tracepoint hit causes a tracing stop, we'll immediately
1914     uninsert tracepoints.  To do this, we temporarily pause all
1915     threads, unpatch away, and then unpause threads.  We need to make
1916     sure the unpausing doesn't resume LWP too.  */
1917  lwp_suspended_inc (lwp);
1918
1919  /* And we need to be sure that any all-threads-stopping doesn't try
1920     to move threads out of the jump pads, as it could deadlock the
1921     inferior (LWP could be in the jump pad, maybe even holding the
1922     lock.)  */
1923
1924  /* Do any necessary step collect actions.  */
1925  tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1926
1927  tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1928
1929  /* See if we just hit a tracepoint and do its main collect
1930     actions.  */
1931  tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1932
1933  lwp_suspended_decr (lwp);
1934
1935  gdb_assert (lwp->suspended == 0);
1936  gdb_assert (!stabilizing_threads
1937	      || (lwp->collecting_fast_tracepoint
1938		  != fast_tpoint_collect_result::not_collecting));
1939
1940  if (tpoint_related_event)
1941    {
1942      threads_debug_printf ("got a tracepoint event");
1943      return 1;
1944    }
1945
1946  return 0;
1947}
1948
1949fast_tpoint_collect_result
1950linux_process_target::linux_fast_tracepoint_collecting
1951  (lwp_info *lwp, fast_tpoint_collect_status *status)
1952{
1953  CORE_ADDR thread_area;
1954  struct thread_info *thread = get_lwp_thread (lwp);
1955
1956  /* Get the thread area address.  This is used to recognize which
1957     thread is which when tracing with the in-process agent library.
1958     We don't read anything from the address, and treat it as opaque;
1959     it's the address itself that we assume is unique per-thread.  */
1960  if (low_get_thread_area (lwpid_of (thread), &thread_area) == -1)
1961    return fast_tpoint_collect_result::not_collecting;
1962
1963  return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1964}
1965
1966int
1967linux_process_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
1968{
1969  return -1;
1970}
1971
1972bool
1973linux_process_target::maybe_move_out_of_jump_pad (lwp_info *lwp, int *wstat)
1974{
1975  scoped_restore_current_thread restore_thread;
1976  switch_to_thread (get_lwp_thread (lwp));
1977
1978  if ((wstat == NULL
1979       || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1980      && supports_fast_tracepoints ()
1981      && agent_loaded_p ())
1982    {
1983      struct fast_tpoint_collect_status status;
1984
1985      threads_debug_printf
1986	("Checking whether LWP %ld needs to move out of the jump pad.",
1987	 lwpid_of (current_thread));
1988
1989      fast_tpoint_collect_result r
1990	= linux_fast_tracepoint_collecting (lwp, &status);
1991
1992      if (wstat == NULL
1993	  || (WSTOPSIG (*wstat) != SIGILL
1994	      && WSTOPSIG (*wstat) != SIGFPE
1995	      && WSTOPSIG (*wstat) != SIGSEGV
1996	      && WSTOPSIG (*wstat) != SIGBUS))
1997	{
1998	  lwp->collecting_fast_tracepoint = r;
1999
2000	  if (r != fast_tpoint_collect_result::not_collecting)
2001	    {
2002	      if (r == fast_tpoint_collect_result::before_insn
2003		  && lwp->exit_jump_pad_bkpt == NULL)
2004		{
2005		  /* Haven't executed the original instruction yet.
2006		     Set breakpoint there, and wait till it's hit,
2007		     then single-step until exiting the jump pad.  */
2008		  lwp->exit_jump_pad_bkpt
2009		    = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2010		}
2011
2012	      threads_debug_printf
2013		("Checking whether LWP %ld needs to move out of the jump pad..."
2014		 " it does", lwpid_of (current_thread));
2015
2016	      return true;
2017	    }
2018	}
2019      else
2020	{
2021	  /* If we get a synchronous signal while collecting, *and*
2022	     while executing the (relocated) original instruction,
2023	     reset the PC to point at the tpoint address, before
2024	     reporting to GDB.  Otherwise, it's an IPA lib bug: just
2025	     report the signal to GDB, and pray for the best.  */
2026
2027	  lwp->collecting_fast_tracepoint
2028	    = fast_tpoint_collect_result::not_collecting;
2029
2030	  if (r != fast_tpoint_collect_result::not_collecting
2031	      && (status.adjusted_insn_addr <= lwp->stop_pc
2032		  && lwp->stop_pc < status.adjusted_insn_addr_end))
2033	    {
2034	      siginfo_t info;
2035	      struct regcache *regcache;
2036
2037	      /* The si_addr on a few signals references the address
2038		 of the faulting instruction.  Adjust that as
2039		 well.  */
2040	      if ((WSTOPSIG (*wstat) == SIGILL
2041		   || WSTOPSIG (*wstat) == SIGFPE
2042		   || WSTOPSIG (*wstat) == SIGBUS
2043		   || WSTOPSIG (*wstat) == SIGSEGV)
2044		  && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2045			     (PTRACE_TYPE_ARG3) 0, &info) == 0
2046		  /* Final check just to make sure we don't clobber
2047		     the siginfo of non-kernel-sent signals.  */
2048		  && (uintptr_t) info.si_addr == lwp->stop_pc)
2049		{
2050		  info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2051		  ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2052			  (PTRACE_TYPE_ARG3) 0, &info);
2053		}
2054
2055	      regcache = get_thread_regcache (current_thread, 1);
2056	      low_set_pc (regcache, status.tpoint_addr);
2057	      lwp->stop_pc = status.tpoint_addr;
2058
2059	      /* Cancel any fast tracepoint lock this thread was
2060		 holding.  */
2061	      force_unlock_trace_buffer ();
2062	    }
2063
2064	  if (lwp->exit_jump_pad_bkpt != NULL)
2065	    {
2066	      threads_debug_printf
2067		("Cancelling fast exit-jump-pad: removing bkpt."
2068		 "stopping all threads momentarily.");
2069
2070	      stop_all_lwps (1, lwp);
2071
2072	      delete_breakpoint (lwp->exit_jump_pad_bkpt);
2073	      lwp->exit_jump_pad_bkpt = NULL;
2074
2075	      unstop_all_lwps (1, lwp);
2076
2077	      gdb_assert (lwp->suspended >= 0);
2078	    }
2079	}
2080    }
2081
2082  threads_debug_printf
2083    ("Checking whether LWP %ld needs to move out of the jump pad... no",
2084     lwpid_of (current_thread));
2085
2086  return false;
2087}
2088
2089/* Enqueue one signal in the "signals to report later when out of the
2090   jump pad" list.  */
2091
2092static void
2093enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2094{
2095  struct thread_info *thread = get_lwp_thread (lwp);
2096
2097  threads_debug_printf ("Deferring signal %d for LWP %ld.",
2098			WSTOPSIG (*wstat), lwpid_of (thread));
2099
2100  if (debug_threads)
2101    {
2102      for (const auto &sig : lwp->pending_signals_to_report)
2103	threads_debug_printf ("   Already queued %d", sig.signal);
2104
2105      threads_debug_printf ("   (no more currently queued signals)");
2106    }
2107
2108  /* Don't enqueue non-RT signals if they are already in the deferred
2109     queue.  (SIGSTOP being the easiest signal to see ending up here
2110     twice)  */
2111  if (WSTOPSIG (*wstat) < __SIGRTMIN)
2112    {
2113      for (const auto &sig : lwp->pending_signals_to_report)
2114	{
2115	  if (sig.signal == WSTOPSIG (*wstat))
2116	    {
2117	      threads_debug_printf
2118		("Not requeuing already queued non-RT signal %d for LWP %ld",
2119		 sig.signal, lwpid_of (thread));
2120	      return;
2121	    }
2122	}
2123    }
2124
2125  lwp->pending_signals_to_report.emplace_back (WSTOPSIG (*wstat));
2126
2127  ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2128	  &lwp->pending_signals_to_report.back ().info);
2129}
2130
2131/* Dequeue one signal from the "signals to report later when out of
2132   the jump pad" list.  */
2133
2134static int
2135dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2136{
2137  struct thread_info *thread = get_lwp_thread (lwp);
2138
2139  if (!lwp->pending_signals_to_report.empty ())
2140    {
2141      const pending_signal &p_sig = lwp->pending_signals_to_report.front ();
2142
2143      *wstat = W_STOPCODE (p_sig.signal);
2144      if (p_sig.info.si_signo != 0)
2145	ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2146		&p_sig.info);
2147
2148      lwp->pending_signals_to_report.pop_front ();
2149
2150      threads_debug_printf ("Reporting deferred signal %d for LWP %ld.",
2151			    WSTOPSIG (*wstat), lwpid_of (thread));
2152
2153      if (debug_threads)
2154	{
2155	  for (const auto &sig : lwp->pending_signals_to_report)
2156	    threads_debug_printf ("   Still queued %d", sig.signal);
2157
2158	  threads_debug_printf ("   (no more queued signals)");
2159	}
2160
2161      return 1;
2162    }
2163
2164  return 0;
2165}
2166
2167bool
2168linux_process_target::check_stopped_by_watchpoint (lwp_info *child)
2169{
2170  scoped_restore_current_thread restore_thread;
2171  switch_to_thread (get_lwp_thread (child));
2172
2173  if (low_stopped_by_watchpoint ())
2174    {
2175      child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2176      child->stopped_data_address = low_stopped_data_address ();
2177    }
2178
2179  return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2180}
2181
2182bool
2183linux_process_target::low_stopped_by_watchpoint ()
2184{
2185  return false;
2186}
2187
2188CORE_ADDR
2189linux_process_target::low_stopped_data_address ()
2190{
2191  return 0;
2192}
2193
2194/* Return the ptrace options that we want to try to enable.  */
2195
2196static int
2197linux_low_ptrace_options (int attached)
2198{
2199  client_state &cs = get_client_state ();
2200  int options = 0;
2201
2202  if (!attached)
2203    options |= PTRACE_O_EXITKILL;
2204
2205  if (cs.report_fork_events)
2206    options |= PTRACE_O_TRACEFORK;
2207
2208  if (cs.report_vfork_events)
2209    options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2210
2211  if (cs.report_exec_events)
2212    options |= PTRACE_O_TRACEEXEC;
2213
2214  options |= PTRACE_O_TRACESYSGOOD;
2215
2216  return options;
2217}
2218
2219void
2220linux_process_target::filter_event (int lwpid, int wstat)
2221{
2222  client_state &cs = get_client_state ();
2223  struct lwp_info *child;
2224  struct thread_info *thread;
2225  int have_stop_pc = 0;
2226
2227  child = find_lwp_pid (ptid_t (lwpid));
2228
2229  /* Check for events reported by anything not in our LWP list.  */
2230  if (child == nullptr)
2231    {
2232      if (WIFSTOPPED (wstat))
2233	{
2234	  if (WSTOPSIG (wstat) == SIGTRAP
2235	      && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2236	    {
2237	      /* A non-leader thread exec'ed after we've seen the
2238		 leader zombie, and removed it from our lists (in
2239		 check_zombie_leaders).  The non-leader thread changes
2240		 its tid to the tgid.  */
2241	      threads_debug_printf
2242		("Re-adding thread group leader LWP %d after exec.",
2243		 lwpid);
2244
2245	      child = add_lwp (ptid_t (lwpid, lwpid));
2246	      child->stopped = 1;
2247	      switch_to_thread (child->thread);
2248	    }
2249	  else
2250	    {
2251	      /* A process we are controlling has forked and the new
2252		 child's stop was reported to us by the kernel.  Save
2253		 its PID and go back to waiting for the fork event to
2254		 be reported - the stopped process might be returned
2255		 from waitpid before or after the fork event is.  */
2256	      threads_debug_printf
2257		("Saving LWP %d status %s in stopped_pids list",
2258		 lwpid, status_to_str (wstat).c_str ());
2259	      add_to_pid_list (&stopped_pids, lwpid, wstat);
2260	    }
2261	}
2262      else
2263	{
2264	  /* Don't report an event for the exit of an LWP not in our
2265	     list, i.e. not part of any inferior we're debugging.
2266	     This can happen if we detach from a program we originally
2267	     forked and then it exits.  However, note that we may have
2268	     earlier deleted a leader of an inferior we're debugging,
2269	     in check_zombie_leaders.  Re-add it back here if so.  */
2270	  find_process ([&] (process_info *proc)
2271	    {
2272	      if (proc->pid == lwpid)
2273		{
2274		  threads_debug_printf
2275		    ("Re-adding thread group leader LWP %d after exit.",
2276		     lwpid);
2277
2278		  child = add_lwp (ptid_t (lwpid, lwpid));
2279		  return true;
2280		}
2281	      return false;
2282	    });
2283	}
2284
2285      if (child == nullptr)
2286	return;
2287    }
2288
2289  thread = get_lwp_thread (child);
2290
2291  child->stopped = 1;
2292
2293  child->last_status = wstat;
2294
2295  /* Check if the thread has exited.  */
2296  if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2297    {
2298      threads_debug_printf ("%d exited", lwpid);
2299
2300      if (finish_step_over (child))
2301	{
2302	  /* Unsuspend all other LWPs, and set them back running again.  */
2303	  unsuspend_all_lwps (child);
2304	}
2305
2306      /* If this is not the leader LWP, then the exit signal was not
2307	 the end of the debugged application and should be ignored,
2308	 unless GDB wants to hear about thread exits.  */
2309      if (cs.report_thread_events || is_leader (thread))
2310	{
2311	  /* Since events are serialized to GDB core, and we can't
2312	     report this one right now.  Leave the status pending for
2313	     the next time we're able to report it.  */
2314	  mark_lwp_dead (child, wstat);
2315	  return;
2316	}
2317      else
2318	{
2319	  delete_lwp (child);
2320	  return;
2321	}
2322    }
2323
2324  gdb_assert (WIFSTOPPED (wstat));
2325
2326  if (WIFSTOPPED (wstat))
2327    {
2328      struct process_info *proc;
2329
2330      /* Architecture-specific setup after inferior is running.  */
2331      proc = find_process_pid (pid_of (thread));
2332      if (proc->tdesc == NULL)
2333	{
2334	  if (proc->attached)
2335	    {
2336	      /* This needs to happen after we have attached to the
2337		 inferior and it is stopped for the first time, but
2338		 before we access any inferior registers.  */
2339	      arch_setup_thread (thread);
2340	    }
2341	  else
2342	    {
2343	      /* The process is started, but GDBserver will do
2344		 architecture-specific setup after the program stops at
2345		 the first instruction.  */
2346	      child->status_pending_p = 1;
2347	      child->status_pending = wstat;
2348	      return;
2349	    }
2350	}
2351    }
2352
2353  if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2354    {
2355      struct process_info *proc = find_process_pid (pid_of (thread));
2356      int options = linux_low_ptrace_options (proc->attached);
2357
2358      linux_enable_event_reporting (lwpid, options);
2359      child->must_set_ptrace_flags = 0;
2360    }
2361
2362  /* Always update syscall_state, even if it will be filtered later.  */
2363  if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2364    {
2365      child->syscall_state
2366	= (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2367	   ? TARGET_WAITKIND_SYSCALL_RETURN
2368	   : TARGET_WAITKIND_SYSCALL_ENTRY);
2369    }
2370  else
2371    {
2372      /* Almost all other ptrace-stops are known to be outside of system
2373	 calls, with further exceptions in handle_extended_wait.  */
2374      child->syscall_state = TARGET_WAITKIND_IGNORE;
2375    }
2376
2377  /* Be careful to not overwrite stop_pc until save_stop_reason is
2378     called.  */
2379  if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2380      && linux_is_extended_waitstatus (wstat))
2381    {
2382      child->stop_pc = get_pc (child);
2383      if (handle_extended_wait (&child, wstat))
2384	{
2385	  /* The event has been handled, so just return without
2386	     reporting it.  */
2387	  return;
2388	}
2389    }
2390
2391  if (linux_wstatus_maybe_breakpoint (wstat))
2392    {
2393      if (save_stop_reason (child))
2394	have_stop_pc = 1;
2395    }
2396
2397  if (!have_stop_pc)
2398    child->stop_pc = get_pc (child);
2399
2400  if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2401      && child->stop_expected)
2402    {
2403      threads_debug_printf ("Expected stop.");
2404
2405      child->stop_expected = 0;
2406
2407      if (thread->last_resume_kind == resume_stop)
2408	{
2409	  /* We want to report the stop to the core.  Treat the
2410	     SIGSTOP as a normal event.  */
2411	  threads_debug_printf ("resume_stop SIGSTOP caught for %s.",
2412				target_pid_to_str (ptid_of (thread)).c_str ());
2413	}
2414      else if (stopping_threads != NOT_STOPPING_THREADS)
2415	{
2416	  /* Stopping threads.  We don't want this SIGSTOP to end up
2417	     pending.  */
2418	  threads_debug_printf ("SIGSTOP caught for %s while stopping threads.",
2419				target_pid_to_str (ptid_of (thread)).c_str ());
2420	  return;
2421	}
2422      else
2423	{
2424	  /* This is a delayed SIGSTOP.  Filter out the event.  */
2425	  threads_debug_printf ("%s %s, 0, 0 (discard delayed SIGSTOP)",
2426			  child->stepping ? "step" : "continue",
2427			  target_pid_to_str (ptid_of (thread)).c_str ());
2428
2429	  resume_one_lwp (child, child->stepping, 0, NULL);
2430	  return;
2431	}
2432    }
2433
2434  child->status_pending_p = 1;
2435  child->status_pending = wstat;
2436  return;
2437}
2438
2439bool
2440linux_process_target::maybe_hw_step (thread_info *thread)
2441{
2442  if (supports_hardware_single_step ())
2443    return true;
2444  else
2445    {
2446      /* GDBserver must insert single-step breakpoint for software
2447	 single step.  */
2448      gdb_assert (has_single_step_breakpoints (thread));
2449      return false;
2450    }
2451}
2452
2453void
2454linux_process_target::resume_stopped_resumed_lwps (thread_info *thread)
2455{
2456  struct lwp_info *lp = get_thread_lwp (thread);
2457
2458  if (lp->stopped
2459      && !lp->suspended
2460      && !lp->status_pending_p
2461      && thread->last_status.kind () == TARGET_WAITKIND_IGNORE)
2462    {
2463      int step = 0;
2464
2465      if (thread->last_resume_kind == resume_step)
2466	step = maybe_hw_step (thread);
2467
2468      threads_debug_printf ("resuming stopped-resumed LWP %s at %s: step=%d",
2469			    target_pid_to_str (ptid_of (thread)).c_str (),
2470			    paddress (lp->stop_pc), step);
2471
2472      resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2473    }
2474}
2475
2476int
2477linux_process_target::wait_for_event_filtered (ptid_t wait_ptid,
2478					       ptid_t filter_ptid,
2479					       int *wstatp, int options)
2480{
2481  struct thread_info *event_thread;
2482  struct lwp_info *event_child, *requested_child;
2483  sigset_t block_mask, prev_mask;
2484
2485 retry:
2486  /* N.B. event_thread points to the thread_info struct that contains
2487     event_child.  Keep them in sync.  */
2488  event_thread = NULL;
2489  event_child = NULL;
2490  requested_child = NULL;
2491
2492  /* Check for a lwp with a pending status.  */
2493
2494  if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2495    {
2496      event_thread = find_thread_in_random ([&] (thread_info *thread)
2497	{
2498	  return status_pending_p_callback (thread, filter_ptid);
2499	});
2500
2501      if (event_thread != NULL)
2502	{
2503	  event_child = get_thread_lwp (event_thread);
2504	  threads_debug_printf ("Got a pending child %ld", lwpid_of (event_thread));
2505	}
2506    }
2507  else if (filter_ptid != null_ptid)
2508    {
2509      requested_child = find_lwp_pid (filter_ptid);
2510
2511      if (stopping_threads == NOT_STOPPING_THREADS
2512	  && requested_child->status_pending_p
2513	  && (requested_child->collecting_fast_tracepoint
2514	      != fast_tpoint_collect_result::not_collecting))
2515	{
2516	  enqueue_one_deferred_signal (requested_child,
2517				       &requested_child->status_pending);
2518	  requested_child->status_pending_p = 0;
2519	  requested_child->status_pending = 0;
2520	  resume_one_lwp (requested_child, 0, 0, NULL);
2521	}
2522
2523      if (requested_child->suspended
2524	  && requested_child->status_pending_p)
2525	{
2526	  internal_error ("requesting an event out of a"
2527			  " suspended child?");
2528	}
2529
2530      if (requested_child->status_pending_p)
2531	{
2532	  event_child = requested_child;
2533	  event_thread = get_lwp_thread (event_child);
2534	}
2535    }
2536
2537  if (event_child != NULL)
2538    {
2539      threads_debug_printf ("Got an event from pending child %ld (%04x)",
2540			    lwpid_of (event_thread),
2541			    event_child->status_pending);
2542
2543      *wstatp = event_child->status_pending;
2544      event_child->status_pending_p = 0;
2545      event_child->status_pending = 0;
2546      switch_to_thread (event_thread);
2547      return lwpid_of (event_thread);
2548    }
2549
2550  /* But if we don't find a pending event, we'll have to wait.
2551
2552     We only enter this loop if no process has a pending wait status.
2553     Thus any action taken in response to a wait status inside this
2554     loop is responding as soon as we detect the status, not after any
2555     pending events.  */
2556
2557  /* Make sure SIGCHLD is blocked until the sigsuspend below.  Block
2558     all signals while here.  */
2559  sigfillset (&block_mask);
2560  gdb_sigmask (SIG_BLOCK, &block_mask, &prev_mask);
2561
2562  /* Always pull all events out of the kernel.  We'll randomly select
2563     an event LWP out of all that have events, to prevent
2564     starvation.  */
2565  while (event_child == NULL)
2566    {
2567      pid_t ret = 0;
2568
2569      /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2570	 quirks:
2571
2572	 - If the thread group leader exits while other threads in the
2573	   thread group still exist, waitpid(TGID, ...) hangs.  That
2574	   waitpid won't return an exit status until the other threads
2575	   in the group are reaped.
2576
2577	 - When a non-leader thread execs, that thread just vanishes
2578	   without reporting an exit (so we'd hang if we waited for it
2579	   explicitly in that case).  The exec event is reported to
2580	   the TGID pid.  */
2581      errno = 0;
2582      ret = my_waitpid (-1, wstatp, options | WNOHANG);
2583
2584      threads_debug_printf ("waitpid(-1, ...) returned %d, %s",
2585			    ret, errno ? safe_strerror (errno) : "ERRNO-OK");
2586
2587      if (ret > 0)
2588	{
2589	  threads_debug_printf ("waitpid %ld received %s",
2590				(long) ret, status_to_str (*wstatp).c_str ());
2591
2592	  /* Filter all events.  IOW, leave all events pending.  We'll
2593	     randomly select an event LWP out of all that have events
2594	     below.  */
2595	  filter_event (ret, *wstatp);
2596	  /* Retry until nothing comes out of waitpid.  A single
2597	     SIGCHLD can indicate more than one child stopped.  */
2598	  continue;
2599	}
2600
2601      /* Now that we've pulled all events out of the kernel, resume
2602	 LWPs that don't have an interesting event to report.  */
2603      if (stopping_threads == NOT_STOPPING_THREADS)
2604	for_each_thread ([this] (thread_info *thread)
2605	  {
2606	    resume_stopped_resumed_lwps (thread);
2607	  });
2608
2609      /* ... and find an LWP with a status to report to the core, if
2610	 any.  */
2611      event_thread = find_thread_in_random ([&] (thread_info *thread)
2612	{
2613	  return status_pending_p_callback (thread, filter_ptid);
2614	});
2615
2616      if (event_thread != NULL)
2617	{
2618	  event_child = get_thread_lwp (event_thread);
2619	  *wstatp = event_child->status_pending;
2620	  event_child->status_pending_p = 0;
2621	  event_child->status_pending = 0;
2622	  break;
2623	}
2624
2625      /* Check for zombie thread group leaders.  Those can't be reaped
2626	 until all other threads in the thread group are.  */
2627      check_zombie_leaders ();
2628
2629      auto not_stopped = [&] (thread_info *thread)
2630	{
2631	  return not_stopped_callback (thread, wait_ptid);
2632	};
2633
2634      /* If there are no resumed children left in the set of LWPs we
2635	 want to wait for, bail.  We can't just block in
2636	 waitpid/sigsuspend, because lwps might have been left stopped
2637	 in trace-stop state, and we'd be stuck forever waiting for
2638	 their status to change (which would only happen if we resumed
2639	 them).  Even if WNOHANG is set, this return code is preferred
2640	 over 0 (below), as it is more detailed.  */
2641      if (find_thread (not_stopped) == NULL)
2642	{
2643	  threads_debug_printf ("exit (no unwaited-for LWP)");
2644
2645	  gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2646	  return -1;
2647	}
2648
2649      /* No interesting event to report to the caller.  */
2650      if ((options & WNOHANG))
2651	{
2652	  threads_debug_printf ("WNOHANG set, no event found");
2653
2654	  gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2655	  return 0;
2656	}
2657
2658      /* Block until we get an event reported with SIGCHLD.  */
2659      threads_debug_printf ("sigsuspend'ing");
2660
2661      sigsuspend (&prev_mask);
2662      gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2663      goto retry;
2664    }
2665
2666  gdb_sigmask (SIG_SETMASK, &prev_mask, NULL);
2667
2668  switch_to_thread (event_thread);
2669
2670  return lwpid_of (event_thread);
2671}
2672
2673int
2674linux_process_target::wait_for_event (ptid_t ptid, int *wstatp, int options)
2675{
2676  return wait_for_event_filtered (ptid, ptid, wstatp, options);
2677}
2678
2679/* Select one LWP out of those that have events pending.  */
2680
2681static void
2682select_event_lwp (struct lwp_info **orig_lp)
2683{
2684  struct thread_info *event_thread = NULL;
2685
2686  /* In all-stop, give preference to the LWP that is being
2687     single-stepped.  There will be at most one, and it's the LWP that
2688     the core is most interested in.  If we didn't do this, then we'd
2689     have to handle pending step SIGTRAPs somehow in case the core
2690     later continues the previously-stepped thread, otherwise we'd
2691     report the pending SIGTRAP, and the core, not having stepped the
2692     thread, wouldn't understand what the trap was for, and therefore
2693     would report it to the user as a random signal.  */
2694  if (!non_stop)
2695    {
2696      event_thread = find_thread ([] (thread_info *thread)
2697	{
2698	  lwp_info *lp = get_thread_lwp (thread);
2699
2700	  return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2701		  && thread->last_resume_kind == resume_step
2702		  && lp->status_pending_p);
2703	});
2704
2705      if (event_thread != NULL)
2706	threads_debug_printf
2707	  ("Select single-step %s",
2708	   target_pid_to_str (ptid_of (event_thread)).c_str ());
2709    }
2710  if (event_thread == NULL)
2711    {
2712      /* No single-stepping LWP.  Select one at random, out of those
2713	 which have had events.  */
2714
2715      event_thread = find_thread_in_random ([&] (thread_info *thread)
2716	{
2717	  lwp_info *lp = get_thread_lwp (thread);
2718
2719	  /* Only resumed LWPs that have an event pending. */
2720	  return (thread->last_status.kind () == TARGET_WAITKIND_IGNORE
2721		  && lp->status_pending_p);
2722	});
2723    }
2724
2725  if (event_thread != NULL)
2726    {
2727      struct lwp_info *event_lp = get_thread_lwp (event_thread);
2728
2729      /* Switch the event LWP.  */
2730      *orig_lp = event_lp;
2731    }
2732}
2733
2734/* Decrement the suspend count of all LWPs, except EXCEPT, if non
2735   NULL.  */
2736
2737static void
2738unsuspend_all_lwps (struct lwp_info *except)
2739{
2740  for_each_thread ([&] (thread_info *thread)
2741    {
2742      lwp_info *lwp = get_thread_lwp (thread);
2743
2744      if (lwp != except)
2745	lwp_suspended_decr (lwp);
2746    });
2747}
2748
2749static bool lwp_running (thread_info *thread);
2750
2751/* Stabilize threads (move out of jump pads).
2752
2753   If a thread is midway collecting a fast tracepoint, we need to
2754   finish the collection and move it out of the jump pad before
2755   reporting the signal.
2756
2757   This avoids recursion while collecting (when a signal arrives
2758   midway, and the signal handler itself collects), which would trash
2759   the trace buffer.  In case the user set a breakpoint in a signal
2760   handler, this avoids the backtrace showing the jump pad, etc..
2761   Most importantly, there are certain things we can't do safely if
2762   threads are stopped in a jump pad (or in its callee's).  For
2763   example:
2764
2765     - starting a new trace run.  A thread still collecting the
2766   previous run, could trash the trace buffer when resumed.  The trace
2767   buffer control structures would have been reset but the thread had
2768   no way to tell.  The thread could even midway memcpy'ing to the
2769   buffer, which would mean that when resumed, it would clobber the
2770   trace buffer that had been set for a new run.
2771
2772     - we can't rewrite/reuse the jump pads for new tracepoints
2773   safely.  Say you do tstart while a thread is stopped midway while
2774   collecting.  When the thread is later resumed, it finishes the
2775   collection, and returns to the jump pad, to execute the original
2776   instruction that was under the tracepoint jump at the time the
2777   older run had been started.  If the jump pad had been rewritten
2778   since for something else in the new run, the thread would now
2779   execute the wrong / random instructions.  */
2780
2781void
2782linux_process_target::stabilize_threads ()
2783{
2784  thread_info *thread_stuck = find_thread ([this] (thread_info *thread)
2785				{
2786				  return stuck_in_jump_pad (thread);
2787				});
2788
2789  if (thread_stuck != NULL)
2790    {
2791      threads_debug_printf ("can't stabilize, LWP %ld is stuck in jump pad",
2792			    lwpid_of (thread_stuck));
2793      return;
2794    }
2795
2796  scoped_restore_current_thread restore_thread;
2797
2798  stabilizing_threads = 1;
2799
2800  /* Kick 'em all.  */
2801  for_each_thread ([this] (thread_info *thread)
2802    {
2803      move_out_of_jump_pad (thread);
2804    });
2805
2806  /* Loop until all are stopped out of the jump pads.  */
2807  while (find_thread (lwp_running) != NULL)
2808    {
2809      struct target_waitstatus ourstatus;
2810      struct lwp_info *lwp;
2811      int wstat;
2812
2813      /* Note that we go through the full wait even loop.  While
2814	 moving threads out of jump pad, we need to be able to step
2815	 over internal breakpoints and such.  */
2816      wait_1 (minus_one_ptid, &ourstatus, 0);
2817
2818      if (ourstatus.kind () == TARGET_WAITKIND_STOPPED)
2819	{
2820	  lwp = get_thread_lwp (current_thread);
2821
2822	  /* Lock it.  */
2823	  lwp_suspended_inc (lwp);
2824
2825	  if (ourstatus.sig () != GDB_SIGNAL_0
2826	      || current_thread->last_resume_kind == resume_stop)
2827	    {
2828	      wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.sig ()));
2829	      enqueue_one_deferred_signal (lwp, &wstat);
2830	    }
2831	}
2832    }
2833
2834  unsuspend_all_lwps (NULL);
2835
2836  stabilizing_threads = 0;
2837
2838  if (debug_threads)
2839    {
2840      thread_stuck = find_thread ([this] (thread_info *thread)
2841		       {
2842			 return stuck_in_jump_pad (thread);
2843		       });
2844
2845      if (thread_stuck != NULL)
2846	threads_debug_printf
2847	  ("couldn't stabilize, LWP %ld got stuck in jump pad",
2848	   lwpid_of (thread_stuck));
2849    }
2850}
2851
2852/* Convenience function that is called when the kernel reports an
2853   event that is not passed out to GDB.  */
2854
2855static ptid_t
2856ignore_event (struct target_waitstatus *ourstatus)
2857{
2858  /* If we got an event, there may still be others, as a single
2859     SIGCHLD can indicate more than one child stopped.  This forces
2860     another target_wait call.  */
2861  async_file_mark ();
2862
2863  ourstatus->set_ignore ();
2864  return null_ptid;
2865}
2866
2867ptid_t
2868linux_process_target::filter_exit_event (lwp_info *event_child,
2869					 target_waitstatus *ourstatus)
2870{
2871  client_state &cs = get_client_state ();
2872  struct thread_info *thread = get_lwp_thread (event_child);
2873  ptid_t ptid = ptid_of (thread);
2874
2875  if (!is_leader (thread))
2876    {
2877      if (cs.report_thread_events)
2878	ourstatus->set_thread_exited (0);
2879      else
2880	ourstatus->set_ignore ();
2881
2882      delete_lwp (event_child);
2883    }
2884  return ptid;
2885}
2886
2887/* Returns 1 if GDB is interested in any event_child syscalls.  */
2888
2889static int
2890gdb_catching_syscalls_p (struct lwp_info *event_child)
2891{
2892  struct thread_info *thread = get_lwp_thread (event_child);
2893  struct process_info *proc = get_thread_process (thread);
2894
2895  return !proc->syscalls_to_catch.empty ();
2896}
2897
2898bool
2899linux_process_target::gdb_catch_this_syscall (lwp_info *event_child)
2900{
2901  int sysno;
2902  struct thread_info *thread = get_lwp_thread (event_child);
2903  struct process_info *proc = get_thread_process (thread);
2904
2905  if (proc->syscalls_to_catch.empty ())
2906    return false;
2907
2908  if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
2909    return true;
2910
2911  get_syscall_trapinfo (event_child, &sysno);
2912
2913  for (int iter : proc->syscalls_to_catch)
2914    if (iter == sysno)
2915      return true;
2916
2917  return false;
2918}
2919
2920ptid_t
2921linux_process_target::wait_1 (ptid_t ptid, target_waitstatus *ourstatus,
2922			      target_wait_flags target_options)
2923{
2924  THREADS_SCOPED_DEBUG_ENTER_EXIT;
2925
2926  client_state &cs = get_client_state ();
2927  int w;
2928  struct lwp_info *event_child;
2929  int options;
2930  int pid;
2931  int step_over_finished;
2932  int bp_explains_trap;
2933  int maybe_internal_trap;
2934  int report_to_gdb;
2935  int trace_event;
2936  int in_step_range;
2937  int any_resumed;
2938
2939  threads_debug_printf ("[%s]", target_pid_to_str (ptid).c_str ());
2940
2941  /* Translate generic target options into linux options.  */
2942  options = __WALL;
2943  if (target_options & TARGET_WNOHANG)
2944    options |= WNOHANG;
2945
2946  bp_explains_trap = 0;
2947  trace_event = 0;
2948  in_step_range = 0;
2949  ourstatus->set_ignore ();
2950
2951  auto status_pending_p_any = [&] (thread_info *thread)
2952    {
2953      return status_pending_p_callback (thread, minus_one_ptid);
2954    };
2955
2956  auto not_stopped = [&] (thread_info *thread)
2957    {
2958      return not_stopped_callback (thread, minus_one_ptid);
2959    };
2960
2961  /* Find a resumed LWP, if any.  */
2962  if (find_thread (status_pending_p_any) != NULL)
2963    any_resumed = 1;
2964  else if (find_thread (not_stopped) != NULL)
2965    any_resumed = 1;
2966  else
2967    any_resumed = 0;
2968
2969  if (step_over_bkpt == null_ptid)
2970    pid = wait_for_event (ptid, &w, options);
2971  else
2972    {
2973      threads_debug_printf ("step_over_bkpt set [%s], doing a blocking wait",
2974			    target_pid_to_str (step_over_bkpt).c_str ());
2975      pid = wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2976    }
2977
2978  if (pid == 0 || (pid == -1 && !any_resumed))
2979    {
2980      gdb_assert (target_options & TARGET_WNOHANG);
2981
2982      threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_IGNORE");
2983
2984      ourstatus->set_ignore ();
2985      return null_ptid;
2986    }
2987  else if (pid == -1)
2988    {
2989      threads_debug_printf ("ret = null_ptid, TARGET_WAITKIND_NO_RESUMED");
2990
2991      ourstatus->set_no_resumed ();
2992      return null_ptid;
2993    }
2994
2995  event_child = get_thread_lwp (current_thread);
2996
2997  /* wait_for_event only returns an exit status for the last
2998     child of a process.  Report it.  */
2999  if (WIFEXITED (w) || WIFSIGNALED (w))
3000    {
3001      if (WIFEXITED (w))
3002	{
3003	  ourstatus->set_exited (WEXITSTATUS (w));
3004
3005	  threads_debug_printf
3006	    ("ret = %s, exited with retcode %d",
3007	     target_pid_to_str (ptid_of (current_thread)).c_str (),
3008	     WEXITSTATUS (w));
3009	}
3010      else
3011	{
3012	  ourstatus->set_signalled (gdb_signal_from_host (WTERMSIG (w)));
3013
3014	  threads_debug_printf
3015	    ("ret = %s, terminated with signal %d",
3016	     target_pid_to_str (ptid_of (current_thread)).c_str (),
3017	     WTERMSIG (w));
3018	}
3019
3020      if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3021	return filter_exit_event (event_child, ourstatus);
3022
3023      return ptid_of (current_thread);
3024    }
3025
3026  /* If step-over executes a breakpoint instruction, in the case of a
3027     hardware single step it means a gdb/gdbserver breakpoint had been
3028     planted on top of a permanent breakpoint, in the case of a software
3029     single step it may just mean that gdbserver hit the reinsert breakpoint.
3030     The PC has been adjusted by save_stop_reason to point at
3031     the breakpoint address.
3032     So in the case of the hardware single step advance the PC manually
3033     past the breakpoint and in the case of software single step advance only
3034     if it's not the single_step_breakpoint we are hitting.
3035     This avoids that a program would keep trapping a permanent breakpoint
3036     forever.  */
3037  if (step_over_bkpt != null_ptid
3038      && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3039      && (event_child->stepping
3040	  || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3041    {
3042      int increment_pc = 0;
3043      int breakpoint_kind = 0;
3044      CORE_ADDR stop_pc = event_child->stop_pc;
3045
3046      breakpoint_kind = breakpoint_kind_from_current_state (&stop_pc);
3047      sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3048
3049      threads_debug_printf
3050	("step-over for %s executed software breakpoint",
3051	 target_pid_to_str (ptid_of (current_thread)).c_str ());
3052
3053      if (increment_pc != 0)
3054	{
3055	  struct regcache *regcache
3056	    = get_thread_regcache (current_thread, 1);
3057
3058	  event_child->stop_pc += increment_pc;
3059	  low_set_pc (regcache, event_child->stop_pc);
3060
3061	  if (!low_breakpoint_at (event_child->stop_pc))
3062	    event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3063	}
3064    }
3065
3066  /* If this event was not handled before, and is not a SIGTRAP, we
3067     report it.  SIGILL and SIGSEGV are also treated as traps in case
3068     a breakpoint is inserted at the current PC.  If this target does
3069     not support internal breakpoints at all, we also report the
3070     SIGTRAP without further processing; it's of no concern to us.  */
3071  maybe_internal_trap
3072    = (low_supports_breakpoints ()
3073       && (WSTOPSIG (w) == SIGTRAP
3074	   || ((WSTOPSIG (w) == SIGILL
3075		|| WSTOPSIG (w) == SIGSEGV)
3076	       && low_breakpoint_at (event_child->stop_pc))));
3077
3078  if (maybe_internal_trap)
3079    {
3080      /* Handle anything that requires bookkeeping before deciding to
3081	 report the event or continue waiting.  */
3082
3083      /* First check if we can explain the SIGTRAP with an internal
3084	 breakpoint, or if we should possibly report the event to GDB.
3085	 Do this before anything that may remove or insert a
3086	 breakpoint.  */
3087      bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3088
3089      /* We have a SIGTRAP, possibly a step-over dance has just
3090	 finished.  If so, tweak the state machine accordingly,
3091	 reinsert breakpoints and delete any single-step
3092	 breakpoints.  */
3093      step_over_finished = finish_step_over (event_child);
3094
3095      /* Now invoke the callbacks of any internal breakpoints there.  */
3096      check_breakpoints (event_child->stop_pc);
3097
3098      /* Handle tracepoint data collecting.  This may overflow the
3099	 trace buffer, and cause a tracing stop, removing
3100	 breakpoints.  */
3101      trace_event = handle_tracepoints (event_child);
3102
3103      if (bp_explains_trap)
3104	threads_debug_printf ("Hit a gdbserver breakpoint.");
3105    }
3106  else
3107    {
3108      /* We have some other signal, possibly a step-over dance was in
3109	 progress, and it should be cancelled too.  */
3110      step_over_finished = finish_step_over (event_child);
3111    }
3112
3113  /* We have all the data we need.  Either report the event to GDB, or
3114     resume threads and keep waiting for more.  */
3115
3116  /* If we're collecting a fast tracepoint, finish the collection and
3117     move out of the jump pad before delivering a signal.  See
3118     linux_stabilize_threads.  */
3119
3120  if (WIFSTOPPED (w)
3121      && WSTOPSIG (w) != SIGTRAP
3122      && supports_fast_tracepoints ()
3123      && agent_loaded_p ())
3124    {
3125      threads_debug_printf ("Got signal %d for LWP %ld.  Check if we need "
3126			    "to defer or adjust it.",
3127			    WSTOPSIG (w), lwpid_of (current_thread));
3128
3129      /* Allow debugging the jump pad itself.  */
3130      if (current_thread->last_resume_kind != resume_step
3131	  && maybe_move_out_of_jump_pad (event_child, &w))
3132	{
3133	  enqueue_one_deferred_signal (event_child, &w);
3134
3135	  threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad)",
3136				WSTOPSIG (w), lwpid_of (current_thread));
3137
3138	  resume_one_lwp (event_child, 0, 0, NULL);
3139
3140	  return ignore_event (ourstatus);
3141	}
3142    }
3143
3144  if (event_child->collecting_fast_tracepoint
3145      != fast_tpoint_collect_result::not_collecting)
3146    {
3147      threads_debug_printf
3148	("LWP %ld was trying to move out of the jump pad (%d). "
3149	 "Check if we're already there.",
3150	 lwpid_of (current_thread),
3151	 (int) event_child->collecting_fast_tracepoint);
3152
3153      trace_event = 1;
3154
3155      event_child->collecting_fast_tracepoint
3156	= linux_fast_tracepoint_collecting (event_child, NULL);
3157
3158      if (event_child->collecting_fast_tracepoint
3159	  != fast_tpoint_collect_result::before_insn)
3160	{
3161	  /* No longer need this breakpoint.  */
3162	  if (event_child->exit_jump_pad_bkpt != NULL)
3163	    {
3164	      threads_debug_printf
3165		("No longer need exit-jump-pad bkpt; removing it."
3166		 "stopping all threads momentarily.");
3167
3168	      /* Other running threads could hit this breakpoint.
3169		 We don't handle moribund locations like GDB does,
3170		 instead we always pause all threads when removing
3171		 breakpoints, so that any step-over or
3172		 decr_pc_after_break adjustment is always taken
3173		 care of while the breakpoint is still
3174		 inserted.  */
3175	      stop_all_lwps (1, event_child);
3176
3177	      delete_breakpoint (event_child->exit_jump_pad_bkpt);
3178	      event_child->exit_jump_pad_bkpt = NULL;
3179
3180	      unstop_all_lwps (1, event_child);
3181
3182	      gdb_assert (event_child->suspended >= 0);
3183	    }
3184	}
3185
3186      if (event_child->collecting_fast_tracepoint
3187	  == fast_tpoint_collect_result::not_collecting)
3188	{
3189	  threads_debug_printf
3190	    ("fast tracepoint finished collecting successfully.");
3191
3192	  /* We may have a deferred signal to report.  */
3193	  if (dequeue_one_deferred_signal (event_child, &w))
3194	    threads_debug_printf ("dequeued one signal.");
3195	  else
3196	    {
3197	      threads_debug_printf ("no deferred signals.");
3198
3199	      if (stabilizing_threads)
3200		{
3201		  ourstatus->set_stopped (GDB_SIGNAL_0);
3202
3203		  threads_debug_printf
3204		    ("ret = %s, stopped while stabilizing threads",
3205		     target_pid_to_str (ptid_of (current_thread)).c_str ());
3206
3207		  return ptid_of (current_thread);
3208		}
3209	    }
3210	}
3211    }
3212
3213  /* Check whether GDB would be interested in this event.  */
3214
3215  /* Check if GDB is interested in this syscall.  */
3216  if (WIFSTOPPED (w)
3217      && WSTOPSIG (w) == SYSCALL_SIGTRAP
3218      && !gdb_catch_this_syscall (event_child))
3219    {
3220      threads_debug_printf ("Ignored syscall for LWP %ld.",
3221			    lwpid_of (current_thread));
3222
3223      resume_one_lwp (event_child, event_child->stepping, 0, NULL);
3224
3225      return ignore_event (ourstatus);
3226    }
3227
3228  /* If GDB is not interested in this signal, don't stop other
3229     threads, and don't report it to GDB.  Just resume the inferior
3230     right away.  We do this for threading-related signals as well as
3231     any that GDB specifically requested we ignore.  But never ignore
3232     SIGSTOP if we sent it ourselves, and do not ignore signals when
3233     stepping - they may require special handling to skip the signal
3234     handler. Also never ignore signals that could be caused by a
3235     breakpoint.  */
3236  if (WIFSTOPPED (w)
3237      && current_thread->last_resume_kind != resume_step
3238      && (
3239#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3240	  (current_process ()->priv->thread_db != NULL
3241	   && (WSTOPSIG (w) == __SIGRTMIN
3242	       || WSTOPSIG (w) == __SIGRTMIN + 1))
3243	  ||
3244#endif
3245	  (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3246	   && !(WSTOPSIG (w) == SIGSTOP
3247		&& current_thread->last_resume_kind == resume_stop)
3248	   && !linux_wstatus_maybe_breakpoint (w))))
3249    {
3250      siginfo_t info, *info_p;
3251
3252      threads_debug_printf ("Ignored signal %d for LWP %ld.",
3253			    WSTOPSIG (w), lwpid_of (current_thread));
3254
3255      if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3256		  (PTRACE_TYPE_ARG3) 0, &info) == 0)
3257	info_p = &info;
3258      else
3259	info_p = NULL;
3260
3261      if (step_over_finished)
3262	{
3263	  /* We cancelled this thread's step-over above.  We still
3264	     need to unsuspend all other LWPs, and set them back
3265	     running again while the signal handler runs.  */
3266	  unsuspend_all_lwps (event_child);
3267
3268	  /* Enqueue the pending signal info so that proceed_all_lwps
3269	     doesn't lose it.  */
3270	  enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3271
3272	  proceed_all_lwps ();
3273	}
3274      else
3275	{
3276	  resume_one_lwp (event_child, event_child->stepping,
3277			  WSTOPSIG (w), info_p);
3278	}
3279
3280      return ignore_event (ourstatus);
3281    }
3282
3283  /* Note that all addresses are always "out of the step range" when
3284     there's no range to begin with.  */
3285  in_step_range = lwp_in_step_range (event_child);
3286
3287  /* If GDB wanted this thread to single step, and the thread is out
3288     of the step range, we always want to report the SIGTRAP, and let
3289     GDB handle it.  Watchpoints should always be reported.  So should
3290     signals we can't explain.  A SIGTRAP we can't explain could be a
3291     GDB breakpoint --- we may or not support Z0 breakpoints.  If we
3292     do, we're be able to handle GDB breakpoints on top of internal
3293     breakpoints, by handling the internal breakpoint and still
3294     reporting the event to GDB.  If we don't, we're out of luck, GDB
3295     won't see the breakpoint hit.  If we see a single-step event but
3296     the thread should be continuing, don't pass the trap to gdb.
3297     That indicates that we had previously finished a single-step but
3298     left the single-step pending -- see
3299     complete_ongoing_step_over.  */
3300  report_to_gdb = (!maybe_internal_trap
3301		   || (current_thread->last_resume_kind == resume_step
3302		       && !in_step_range)
3303		   || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3304		   || (!in_step_range
3305		       && !bp_explains_trap
3306		       && !trace_event
3307		       && !step_over_finished
3308		       && !(current_thread->last_resume_kind == resume_continue
3309			    && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3310		   || (gdb_breakpoint_here (event_child->stop_pc)
3311		       && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3312		       && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3313		   || event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE);
3314
3315  run_breakpoint_commands (event_child->stop_pc);
3316
3317  /* We found no reason GDB would want us to stop.  We either hit one
3318     of our own breakpoints, or finished an internal step GDB
3319     shouldn't know about.  */
3320  if (!report_to_gdb)
3321    {
3322      if (bp_explains_trap)
3323	threads_debug_printf ("Hit a gdbserver breakpoint.");
3324
3325      if (step_over_finished)
3326	threads_debug_printf ("Step-over finished.");
3327
3328      if (trace_event)
3329	threads_debug_printf ("Tracepoint event.");
3330
3331      if (lwp_in_step_range (event_child))
3332	threads_debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).",
3333			      paddress (event_child->stop_pc),
3334			      paddress (event_child->step_range_start),
3335			      paddress (event_child->step_range_end));
3336
3337      /* We're not reporting this breakpoint to GDB, so apply the
3338	 decr_pc_after_break adjustment to the inferior's regcache
3339	 ourselves.  */
3340
3341      if (low_supports_breakpoints ())
3342	{
3343	  struct regcache *regcache
3344	    = get_thread_regcache (current_thread, 1);
3345	  low_set_pc (regcache, event_child->stop_pc);
3346	}
3347
3348      if (step_over_finished)
3349	{
3350	  /* If we have finished stepping over a breakpoint, we've
3351	     stopped and suspended all LWPs momentarily except the
3352	     stepping one.  This is where we resume them all again.
3353	     We're going to keep waiting, so use proceed, which
3354	     handles stepping over the next breakpoint.  */
3355	  unsuspend_all_lwps (event_child);
3356	}
3357      else
3358	{
3359	  /* Remove the single-step breakpoints if any.  Note that
3360	     there isn't single-step breakpoint if we finished stepping
3361	     over.  */
3362	  if (supports_software_single_step ()
3363	      && has_single_step_breakpoints (current_thread))
3364	    {
3365	      stop_all_lwps (0, event_child);
3366	      delete_single_step_breakpoints (current_thread);
3367	      unstop_all_lwps (0, event_child);
3368	    }
3369	}
3370
3371      threads_debug_printf ("proceeding all threads.");
3372
3373      proceed_all_lwps ();
3374
3375      return ignore_event (ourstatus);
3376    }
3377
3378    if (debug_threads)
3379      {
3380	if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3381	  threads_debug_printf ("LWP %ld: extended event with waitstatus %s",
3382				lwpid_of (get_lwp_thread (event_child)),
3383				event_child->waitstatus.to_string ().c_str ());
3384
3385	if (current_thread->last_resume_kind == resume_step)
3386	  {
3387	    if (event_child->step_range_start == event_child->step_range_end)
3388	      threads_debug_printf
3389		("GDB wanted to single-step, reporting event.");
3390	    else if (!lwp_in_step_range (event_child))
3391	      threads_debug_printf ("Out of step range, reporting event.");
3392	  }
3393
3394	if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3395	  threads_debug_printf ("Stopped by watchpoint.");
3396	else if (gdb_breakpoint_here (event_child->stop_pc))
3397	  threads_debug_printf ("Stopped by GDB breakpoint.");
3398      }
3399
3400    threads_debug_printf ("Hit a non-gdbserver trap event.");
3401
3402  /* Alright, we're going to report a stop.  */
3403
3404  /* Remove single-step breakpoints.  */
3405  if (supports_software_single_step ())
3406    {
3407      /* Remove single-step breakpoints or not.  It it is true, stop all
3408	 lwps, so that other threads won't hit the breakpoint in the
3409	 staled memory.  */
3410      int remove_single_step_breakpoints_p = 0;
3411
3412      if (non_stop)
3413	{
3414	  remove_single_step_breakpoints_p
3415	    = has_single_step_breakpoints (current_thread);
3416	}
3417      else
3418	{
3419	  /* In all-stop, a stop reply cancels all previous resume
3420	     requests.  Delete all single-step breakpoints.  */
3421
3422	  find_thread ([&] (thread_info *thread) {
3423	    if (has_single_step_breakpoints (thread))
3424	      {
3425		remove_single_step_breakpoints_p = 1;
3426		return true;
3427	      }
3428
3429	    return false;
3430	  });
3431	}
3432
3433      if (remove_single_step_breakpoints_p)
3434	{
3435	  /* If we remove single-step breakpoints from memory, stop all lwps,
3436	     so that other threads won't hit the breakpoint in the staled
3437	     memory.  */
3438	  stop_all_lwps (0, event_child);
3439
3440	  if (non_stop)
3441	    {
3442	      gdb_assert (has_single_step_breakpoints (current_thread));
3443	      delete_single_step_breakpoints (current_thread);
3444	    }
3445	  else
3446	    {
3447	      for_each_thread ([] (thread_info *thread){
3448		if (has_single_step_breakpoints (thread))
3449		  delete_single_step_breakpoints (thread);
3450	      });
3451	    }
3452
3453	  unstop_all_lwps (0, event_child);
3454	}
3455    }
3456
3457  if (!stabilizing_threads)
3458    {
3459      /* In all-stop, stop all threads.  */
3460      if (!non_stop)
3461	stop_all_lwps (0, NULL);
3462
3463      if (step_over_finished)
3464	{
3465	  if (!non_stop)
3466	    {
3467	      /* If we were doing a step-over, all other threads but
3468		 the stepping one had been paused in start_step_over,
3469		 with their suspend counts incremented.  We don't want
3470		 to do a full unstop/unpause, because we're in
3471		 all-stop mode (so we want threads stopped), but we
3472		 still need to unsuspend the other threads, to
3473		 decrement their `suspended' count back.  */
3474	      unsuspend_all_lwps (event_child);
3475	    }
3476	  else
3477	    {
3478	      /* If we just finished a step-over, then all threads had
3479		 been momentarily paused.  In all-stop, that's fine,
3480		 we want threads stopped by now anyway.  In non-stop,
3481		 we need to re-resume threads that GDB wanted to be
3482		 running.  */
3483	      unstop_all_lwps (1, event_child);
3484	    }
3485	}
3486
3487      /* If we're not waiting for a specific LWP, choose an event LWP
3488	 from among those that have had events.  Giving equal priority
3489	 to all LWPs that have had events helps prevent
3490	 starvation.  */
3491      if (ptid == minus_one_ptid)
3492	{
3493	  event_child->status_pending_p = 1;
3494	  event_child->status_pending = w;
3495
3496	  select_event_lwp (&event_child);
3497
3498	  /* current_thread and event_child must stay in sync.  */
3499	  switch_to_thread (get_lwp_thread (event_child));
3500
3501	  event_child->status_pending_p = 0;
3502	  w = event_child->status_pending;
3503	}
3504
3505
3506      /* Stabilize threads (move out of jump pads).  */
3507      if (!non_stop)
3508	target_stabilize_threads ();
3509    }
3510  else
3511    {
3512      /* If we just finished a step-over, then all threads had been
3513	 momentarily paused.  In all-stop, that's fine, we want
3514	 threads stopped by now anyway.  In non-stop, we need to
3515	 re-resume threads that GDB wanted to be running.  */
3516      if (step_over_finished)
3517	unstop_all_lwps (1, event_child);
3518    }
3519
3520  /* At this point, we haven't set OURSTATUS.  This is where we do it.  */
3521  gdb_assert (ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3522
3523  if (event_child->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
3524    {
3525      /* If the reported event is an exit, fork, vfork or exec, let
3526	 GDB know.  */
3527
3528      /* Break the unreported fork relationship chain.  */
3529      if (event_child->waitstatus.kind () == TARGET_WAITKIND_FORKED
3530	  || event_child->waitstatus.kind () == TARGET_WAITKIND_VFORKED)
3531	{
3532	  event_child->fork_relative->fork_relative = NULL;
3533	  event_child->fork_relative = NULL;
3534	}
3535
3536      *ourstatus = event_child->waitstatus;
3537      /* Clear the event lwp's waitstatus since we handled it already.  */
3538      event_child->waitstatus.set_ignore ();
3539    }
3540  else
3541    {
3542      /* The LWP stopped due to a plain signal or a syscall signal.  Either way,
3543         event_chid->waitstatus wasn't filled in with the details, so look at
3544	 the wait status W.  */
3545      if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3546	{
3547	  int syscall_number;
3548
3549	  get_syscall_trapinfo (event_child, &syscall_number);
3550	  if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
3551	    ourstatus->set_syscall_entry (syscall_number);
3552	  else if (event_child->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
3553	    ourstatus->set_syscall_return (syscall_number);
3554	  else
3555	    gdb_assert_not_reached ("unexpected syscall state");
3556	}
3557      else if (current_thread->last_resume_kind == resume_stop
3558	       && WSTOPSIG (w) == SIGSTOP)
3559	{
3560	  /* A thread that has been requested to stop by GDB with vCont;t,
3561	     and it stopped cleanly, so report as SIG0.  The use of
3562	     SIGSTOP is an implementation detail.  */
3563	  ourstatus->set_stopped (GDB_SIGNAL_0);
3564	}
3565      else
3566	ourstatus->set_stopped (gdb_signal_from_host (WSTOPSIG (w)));
3567    }
3568
3569  /* Now that we've selected our final event LWP, un-adjust its PC if
3570     it was a software breakpoint, and the client doesn't know we can
3571     adjust the breakpoint ourselves.  */
3572  if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3573      && !cs.swbreak_feature)
3574    {
3575      int decr_pc = low_decr_pc_after_break ();
3576
3577      if (decr_pc != 0)
3578	{
3579	  struct regcache *regcache
3580	    = get_thread_regcache (current_thread, 1);
3581	  low_set_pc (regcache, event_child->stop_pc + decr_pc);
3582	}
3583    }
3584
3585  gdb_assert (step_over_bkpt == null_ptid);
3586
3587  threads_debug_printf ("ret = %s, %s",
3588			target_pid_to_str (ptid_of (current_thread)).c_str (),
3589			ourstatus->to_string ().c_str ());
3590
3591  if (ourstatus->kind () == TARGET_WAITKIND_EXITED)
3592    return filter_exit_event (event_child, ourstatus);
3593
3594  return ptid_of (current_thread);
3595}
3596
3597/* Get rid of any pending event in the pipe.  */
3598static void
3599async_file_flush (void)
3600{
3601  linux_event_pipe.flush ();
3602}
3603
3604/* Put something in the pipe, so the event loop wakes up.  */
3605static void
3606async_file_mark (void)
3607{
3608  linux_event_pipe.mark ();
3609}
3610
3611ptid_t
3612linux_process_target::wait (ptid_t ptid,
3613			    target_waitstatus *ourstatus,
3614			    target_wait_flags target_options)
3615{
3616  ptid_t event_ptid;
3617
3618  /* Flush the async file first.  */
3619  if (target_is_async_p ())
3620    async_file_flush ();
3621
3622  do
3623    {
3624      event_ptid = wait_1 (ptid, ourstatus, target_options);
3625    }
3626  while ((target_options & TARGET_WNOHANG) == 0
3627	 && event_ptid == null_ptid
3628	 && ourstatus->kind () == TARGET_WAITKIND_IGNORE);
3629
3630  /* If at least one stop was reported, there may be more.  A single
3631     SIGCHLD can signal more than one child stop.  */
3632  if (target_is_async_p ()
3633      && (target_options & TARGET_WNOHANG) != 0
3634      && event_ptid != null_ptid)
3635    async_file_mark ();
3636
3637  return event_ptid;
3638}
3639
3640/* Send a signal to an LWP.  */
3641
3642static int
3643kill_lwp (unsigned long lwpid, int signo)
3644{
3645  int ret;
3646
3647  errno = 0;
3648  ret = syscall (__NR_tkill, lwpid, signo);
3649  if (errno == ENOSYS)
3650    {
3651      /* If tkill fails, then we are not using nptl threads, a
3652	 configuration we no longer support.  */
3653      perror_with_name (("tkill"));
3654    }
3655  return ret;
3656}
3657
3658void
3659linux_stop_lwp (struct lwp_info *lwp)
3660{
3661  send_sigstop (lwp);
3662}
3663
3664static void
3665send_sigstop (struct lwp_info *lwp)
3666{
3667  int pid;
3668
3669  pid = lwpid_of (get_lwp_thread (lwp));
3670
3671  /* If we already have a pending stop signal for this process, don't
3672     send another.  */
3673  if (lwp->stop_expected)
3674    {
3675      threads_debug_printf ("Have pending sigstop for lwp %d", pid);
3676
3677      return;
3678    }
3679
3680  threads_debug_printf ("Sending sigstop to lwp %d", pid);
3681
3682  lwp->stop_expected = 1;
3683  kill_lwp (pid, SIGSTOP);
3684}
3685
3686static void
3687send_sigstop (thread_info *thread, lwp_info *except)
3688{
3689  struct lwp_info *lwp = get_thread_lwp (thread);
3690
3691  /* Ignore EXCEPT.  */
3692  if (lwp == except)
3693    return;
3694
3695  if (lwp->stopped)
3696    return;
3697
3698  send_sigstop (lwp);
3699}
3700
3701/* Increment the suspend count of an LWP, and stop it, if not stopped
3702   yet.  */
3703static void
3704suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3705{
3706  struct lwp_info *lwp = get_thread_lwp (thread);
3707
3708  /* Ignore EXCEPT.  */
3709  if (lwp == except)
3710    return;
3711
3712  lwp_suspended_inc (lwp);
3713
3714  send_sigstop (thread, except);
3715}
3716
3717static void
3718mark_lwp_dead (struct lwp_info *lwp, int wstat)
3719{
3720  /* Store the exit status for later.  */
3721  lwp->status_pending_p = 1;
3722  lwp->status_pending = wstat;
3723
3724  /* Store in waitstatus as well, as there's nothing else to process
3725     for this event.  */
3726  if (WIFEXITED (wstat))
3727    lwp->waitstatus.set_exited (WEXITSTATUS (wstat));
3728  else if (WIFSIGNALED (wstat))
3729    lwp->waitstatus.set_signalled (gdb_signal_from_host (WTERMSIG (wstat)));
3730
3731  /* Prevent trying to stop it.  */
3732  lwp->stopped = 1;
3733
3734  /* No further stops are expected from a dead lwp.  */
3735  lwp->stop_expected = 0;
3736}
3737
3738/* Return true if LWP has exited already, and has a pending exit event
3739   to report to GDB.  */
3740
3741static int
3742lwp_is_marked_dead (struct lwp_info *lwp)
3743{
3744  return (lwp->status_pending_p
3745	  && (WIFEXITED (lwp->status_pending)
3746	      || WIFSIGNALED (lwp->status_pending)));
3747}
3748
3749void
3750linux_process_target::wait_for_sigstop ()
3751{
3752  struct thread_info *saved_thread;
3753  ptid_t saved_tid;
3754  int wstat;
3755  int ret;
3756
3757  saved_thread = current_thread;
3758  if (saved_thread != NULL)
3759    saved_tid = saved_thread->id;
3760  else
3761    saved_tid = null_ptid; /* avoid bogus unused warning */
3762
3763  scoped_restore_current_thread restore_thread;
3764
3765  threads_debug_printf ("pulling events");
3766
3767  /* Passing NULL_PTID as filter indicates we want all events to be
3768     left pending.  Eventually this returns when there are no
3769     unwaited-for children left.  */
3770  ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat, __WALL);
3771  gdb_assert (ret == -1);
3772
3773  if (saved_thread == NULL || mythread_alive (saved_tid))
3774    return;
3775  else
3776    {
3777      threads_debug_printf ("Previously current thread died.");
3778
3779      /* We can't change the current inferior behind GDB's back,
3780	 otherwise, a subsequent command may apply to the wrong
3781	 process.  */
3782      restore_thread.dont_restore ();
3783      switch_to_thread (nullptr);
3784    }
3785}
3786
3787bool
3788linux_process_target::stuck_in_jump_pad (thread_info *thread)
3789{
3790  struct lwp_info *lwp = get_thread_lwp (thread);
3791
3792  if (lwp->suspended != 0)
3793    {
3794      internal_error ("LWP %ld is suspended, suspended=%d\n",
3795		      lwpid_of (thread), lwp->suspended);
3796    }
3797  gdb_assert (lwp->stopped);
3798
3799  /* Allow debugging the jump pad, gdb_collect, etc..  */
3800  return (supports_fast_tracepoints ()
3801	  && agent_loaded_p ()
3802	  && (gdb_breakpoint_here (lwp->stop_pc)
3803	      || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3804	      || thread->last_resume_kind == resume_step)
3805	  && (linux_fast_tracepoint_collecting (lwp, NULL)
3806	      != fast_tpoint_collect_result::not_collecting));
3807}
3808
3809void
3810linux_process_target::move_out_of_jump_pad (thread_info *thread)
3811{
3812  struct lwp_info *lwp = get_thread_lwp (thread);
3813  int *wstat;
3814
3815  if (lwp->suspended != 0)
3816    {
3817      internal_error ("LWP %ld is suspended, suspended=%d\n",
3818		      lwpid_of (thread), lwp->suspended);
3819    }
3820  gdb_assert (lwp->stopped);
3821
3822  /* For gdb_breakpoint_here.  */
3823  scoped_restore_current_thread restore_thread;
3824  switch_to_thread (thread);
3825
3826  wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3827
3828  /* Allow debugging the jump pad, gdb_collect, etc.  */
3829  if (!gdb_breakpoint_here (lwp->stop_pc)
3830      && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3831      && thread->last_resume_kind != resume_step
3832      && maybe_move_out_of_jump_pad (lwp, wstat))
3833    {
3834      threads_debug_printf ("LWP %ld needs stabilizing (in jump pad)",
3835			    lwpid_of (thread));
3836
3837      if (wstat)
3838	{
3839	  lwp->status_pending_p = 0;
3840	  enqueue_one_deferred_signal (lwp, wstat);
3841
3842	  threads_debug_printf ("Signal %d for LWP %ld deferred (in jump pad",
3843				WSTOPSIG (*wstat), lwpid_of (thread));
3844	}
3845
3846      resume_one_lwp (lwp, 0, 0, NULL);
3847    }
3848  else
3849    lwp_suspended_inc (lwp);
3850}
3851
3852static bool
3853lwp_running (thread_info *thread)
3854{
3855  struct lwp_info *lwp = get_thread_lwp (thread);
3856
3857  if (lwp_is_marked_dead (lwp))
3858    return false;
3859
3860  return !lwp->stopped;
3861}
3862
3863void
3864linux_process_target::stop_all_lwps (int suspend, lwp_info *except)
3865{
3866  /* Should not be called recursively.  */
3867  gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3868
3869  THREADS_SCOPED_DEBUG_ENTER_EXIT;
3870
3871  threads_debug_printf
3872    ("%s, except=%s", suspend ? "stop-and-suspend" : "stop",
3873     (except != NULL
3874      ? target_pid_to_str (ptid_of (get_lwp_thread (except))).c_str ()
3875      : "none"));
3876
3877  stopping_threads = (suspend
3878		      ? STOPPING_AND_SUSPENDING_THREADS
3879		      : STOPPING_THREADS);
3880
3881  if (suspend)
3882    for_each_thread ([&] (thread_info *thread)
3883      {
3884	suspend_and_send_sigstop (thread, except);
3885      });
3886  else
3887    for_each_thread ([&] (thread_info *thread)
3888      {
3889	 send_sigstop (thread, except);
3890      });
3891
3892  wait_for_sigstop ();
3893  stopping_threads = NOT_STOPPING_THREADS;
3894
3895  threads_debug_printf ("setting stopping_threads back to !stopping");
3896}
3897
3898/* Enqueue one signal in the chain of signals which need to be
3899   delivered to this process on next resume.  */
3900
3901static void
3902enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3903{
3904  lwp->pending_signals.emplace_back (signal);
3905  if (info == nullptr)
3906    memset (&lwp->pending_signals.back ().info, 0, sizeof (siginfo_t));
3907  else
3908    lwp->pending_signals.back ().info = *info;
3909}
3910
3911void
3912linux_process_target::install_software_single_step_breakpoints (lwp_info *lwp)
3913{
3914  struct thread_info *thread = get_lwp_thread (lwp);
3915  struct regcache *regcache = get_thread_regcache (thread, 1);
3916
3917  scoped_restore_current_thread restore_thread;
3918
3919  switch_to_thread (thread);
3920  std::vector<CORE_ADDR> next_pcs = low_get_next_pcs (regcache);
3921
3922  for (CORE_ADDR pc : next_pcs)
3923    set_single_step_breakpoint (pc, current_ptid);
3924}
3925
3926int
3927linux_process_target::single_step (lwp_info* lwp)
3928{
3929  int step = 0;
3930
3931  if (supports_hardware_single_step ())
3932    {
3933      step = 1;
3934    }
3935  else if (supports_software_single_step ())
3936    {
3937      install_software_single_step_breakpoints (lwp);
3938      step = 0;
3939    }
3940  else
3941    threads_debug_printf ("stepping is not implemented on this target");
3942
3943  return step;
3944}
3945
3946/* The signal can be delivered to the inferior if we are not trying to
3947   finish a fast tracepoint collect.  Since signal can be delivered in
3948   the step-over, the program may go to signal handler and trap again
3949   after return from the signal handler.  We can live with the spurious
3950   double traps.  */
3951
3952static int
3953lwp_signal_can_be_delivered (struct lwp_info *lwp)
3954{
3955  return (lwp->collecting_fast_tracepoint
3956	  == fast_tpoint_collect_result::not_collecting);
3957}
3958
3959void
3960linux_process_target::resume_one_lwp_throw (lwp_info *lwp, int step,
3961					    int signal, siginfo_t *info)
3962{
3963  struct thread_info *thread = get_lwp_thread (lwp);
3964  int ptrace_request;
3965  struct process_info *proc = get_thread_process (thread);
3966
3967  /* Note that target description may not be initialised
3968     (proc->tdesc == NULL) at this point because the program hasn't
3969     stopped at the first instruction yet.  It means GDBserver skips
3970     the extra traps from the wrapper program (see option --wrapper).
3971     Code in this function that requires register access should be
3972     guarded by proc->tdesc == NULL or something else.  */
3973
3974  if (lwp->stopped == 0)
3975    return;
3976
3977  gdb_assert (lwp->waitstatus.kind () == TARGET_WAITKIND_IGNORE);
3978
3979  fast_tpoint_collect_result fast_tp_collecting
3980    = lwp->collecting_fast_tracepoint;
3981
3982  gdb_assert (!stabilizing_threads
3983	      || (fast_tp_collecting
3984		  != fast_tpoint_collect_result::not_collecting));
3985
3986  /* Cancel actions that rely on GDB not changing the PC (e.g., the
3987     user used the "jump" command, or "set $pc = foo").  */
3988  if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3989    {
3990      /* Collecting 'while-stepping' actions doesn't make sense
3991	 anymore.  */
3992      release_while_stepping_state_list (thread);
3993    }
3994
3995  /* If we have pending signals or status, and a new signal, enqueue the
3996     signal.  Also enqueue the signal if it can't be delivered to the
3997     inferior right now.  */
3998  if (signal != 0
3999      && (lwp->status_pending_p
4000	  || !lwp->pending_signals.empty ()
4001	  || !lwp_signal_can_be_delivered (lwp)))
4002    {
4003      enqueue_pending_signal (lwp, signal, info);
4004
4005      /* Postpone any pending signal.  It was enqueued above.  */
4006      signal = 0;
4007    }
4008
4009  if (lwp->status_pending_p)
4010    {
4011      threads_debug_printf
4012	("Not resuming lwp %ld (%s, stop %s); has pending status",
4013	 lwpid_of (thread), step ? "step" : "continue",
4014	 lwp->stop_expected ? "expected" : "not expected");
4015      return;
4016    }
4017
4018  scoped_restore_current_thread restore_thread;
4019  switch_to_thread (thread);
4020
4021  /* This bit needs some thinking about.  If we get a signal that
4022     we must report while a single-step reinsert is still pending,
4023     we often end up resuming the thread.  It might be better to
4024     (ew) allow a stack of pending events; then we could be sure that
4025     the reinsert happened right away and not lose any signals.
4026
4027     Making this stack would also shrink the window in which breakpoints are
4028     uninserted (see comment in linux_wait_for_lwp) but not enough for
4029     complete correctness, so it won't solve that problem.  It may be
4030     worthwhile just to solve this one, however.  */
4031  if (lwp->bp_reinsert != 0)
4032    {
4033      threads_debug_printf ("  pending reinsert at 0x%s",
4034			    paddress (lwp->bp_reinsert));
4035
4036      if (supports_hardware_single_step ())
4037	{
4038	  if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4039	    {
4040	      if (step == 0)
4041		warning ("BAD - reinserting but not stepping.");
4042	      if (lwp->suspended)
4043		warning ("BAD - reinserting and suspended(%d).",
4044				 lwp->suspended);
4045	    }
4046	}
4047
4048      step = maybe_hw_step (thread);
4049    }
4050
4051  if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4052    threads_debug_printf
4053      ("lwp %ld wants to get out of fast tracepoint jump pad "
4054       "(exit-jump-pad-bkpt)", lwpid_of (thread));
4055
4056  else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4057    {
4058      threads_debug_printf
4059	("lwp %ld wants to get out of fast tracepoint jump pad single-stepping",
4060	 lwpid_of (thread));
4061
4062      if (supports_hardware_single_step ())
4063	step = 1;
4064      else
4065	{
4066	  internal_error ("moving out of jump pad single-stepping"
4067			  " not implemented on this target");
4068	}
4069    }
4070
4071  /* If we have while-stepping actions in this thread set it stepping.
4072     If we have a signal to deliver, it may or may not be set to
4073     SIG_IGN, we don't know.  Assume so, and allow collecting
4074     while-stepping into a signal handler.  A possible smart thing to
4075     do would be to set an internal breakpoint at the signal return
4076     address, continue, and carry on catching this while-stepping
4077     action only when that breakpoint is hit.  A future
4078     enhancement.  */
4079  if (thread->while_stepping != NULL)
4080    {
4081      threads_debug_printf
4082	("lwp %ld has a while-stepping action -> forcing step.",
4083	 lwpid_of (thread));
4084
4085      step = single_step (lwp);
4086    }
4087
4088  if (proc->tdesc != NULL && low_supports_breakpoints ())
4089    {
4090      struct regcache *regcache = get_thread_regcache (current_thread, 1);
4091
4092      lwp->stop_pc = low_get_pc (regcache);
4093
4094      threads_debug_printf ("  %s from pc 0x%lx", step ? "step" : "continue",
4095			    (long) lwp->stop_pc);
4096    }
4097
4098  /* If we have pending signals, consume one if it can be delivered to
4099     the inferior.  */
4100  if (!lwp->pending_signals.empty () && lwp_signal_can_be_delivered (lwp))
4101    {
4102      const pending_signal &p_sig = lwp->pending_signals.front ();
4103
4104      signal = p_sig.signal;
4105      if (p_sig.info.si_signo != 0)
4106	ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4107		&p_sig.info);
4108
4109      lwp->pending_signals.pop_front ();
4110    }
4111
4112  threads_debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)",
4113			lwpid_of (thread), step ? "step" : "continue", signal,
4114			lwp->stop_expected ? "expected" : "not expected");
4115
4116  low_prepare_to_resume (lwp);
4117
4118  regcache_invalidate_thread (thread);
4119  errno = 0;
4120  lwp->stepping = step;
4121  if (step)
4122    ptrace_request = PTRACE_SINGLESTEP;
4123  else if (gdb_catching_syscalls_p (lwp))
4124    ptrace_request = PTRACE_SYSCALL;
4125  else
4126    ptrace_request = PTRACE_CONT;
4127  ptrace (ptrace_request,
4128	  lwpid_of (thread),
4129	  (PTRACE_TYPE_ARG3) 0,
4130	  /* Coerce to a uintptr_t first to avoid potential gcc warning
4131	     of coercing an 8 byte integer to a 4 byte pointer.  */
4132	  (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4133
4134  if (errno)
4135    {
4136      int saved_errno = errno;
4137
4138      threads_debug_printf ("ptrace errno = %d (%s)",
4139			    saved_errno, strerror (saved_errno));
4140
4141      errno = saved_errno;
4142      perror_with_name ("resuming thread");
4143    }
4144
4145  /* Successfully resumed.  Clear state that no longer makes sense,
4146     and mark the LWP as running.  Must not do this before resuming
4147     otherwise if that fails other code will be confused.  E.g., we'd
4148     later try to stop the LWP and hang forever waiting for a stop
4149     status.  Note that we must not throw after this is cleared,
4150     otherwise handle_zombie_lwp_error would get confused.  */
4151  lwp->stopped = 0;
4152  lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4153}
4154
4155void
4156linux_process_target::low_prepare_to_resume (lwp_info *lwp)
4157{
4158  /* Nop.  */
4159}
4160
4161/* Called when we try to resume a stopped LWP and that errors out.  If
4162   the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4163   or about to become), discard the error, clear any pending status
4164   the LWP may have, and return true (we'll collect the exit status
4165   soon enough).  Otherwise, return false.  */
4166
4167static int
4168check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4169{
4170  struct thread_info *thread = get_lwp_thread (lp);
4171
4172  /* If we get an error after resuming the LWP successfully, we'd
4173     confuse !T state for the LWP being gone.  */
4174  gdb_assert (lp->stopped);
4175
4176  /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4177     because even if ptrace failed with ESRCH, the tracee may be "not
4178     yet fully dead", but already refusing ptrace requests.  In that
4179     case the tracee has 'R (Running)' state for a little bit
4180     (observed in Linux 3.18).  See also the note on ESRCH in the
4181     ptrace(2) man page.  Instead, check whether the LWP has any state
4182     other than ptrace-stopped.  */
4183
4184  /* Don't assume anything if /proc/PID/status can't be read.  */
4185  if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4186    {
4187      lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4188      lp->status_pending_p = 0;
4189      return 1;
4190    }
4191  return 0;
4192}
4193
4194void
4195linux_process_target::resume_one_lwp (lwp_info *lwp, int step, int signal,
4196				      siginfo_t *info)
4197{
4198  try
4199    {
4200      resume_one_lwp_throw (lwp, step, signal, info);
4201    }
4202  catch (const gdb_exception_error &ex)
4203    {
4204      if (check_ptrace_stopped_lwp_gone (lwp))
4205	{
4206	  /* This could because we tried to resume an LWP after its leader
4207	     exited.  Mark it as resumed, so we can collect an exit event
4208	     from it.  */
4209	  lwp->stopped = 0;
4210	  lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4211	}
4212      else
4213	throw;
4214    }
4215}
4216
4217/* This function is called once per thread via for_each_thread.
4218   We look up which resume request applies to THREAD and mark it with a
4219   pointer to the appropriate resume request.
4220
4221   This algorithm is O(threads * resume elements), but resume elements
4222   is small (and will remain small at least until GDB supports thread
4223   suspension).  */
4224
4225static void
4226linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4227{
4228  struct lwp_info *lwp = get_thread_lwp (thread);
4229
4230  for (int ndx = 0; ndx < n; ndx++)
4231    {
4232      ptid_t ptid = resume[ndx].thread;
4233      if (ptid == minus_one_ptid
4234	  || ptid == thread->id
4235	  /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4236	     of PID'.  */
4237	  || (ptid.pid () == pid_of (thread)
4238	      && (ptid.is_pid ()
4239		  || ptid.lwp () == -1)))
4240	{
4241	  if (resume[ndx].kind == resume_stop
4242	      && thread->last_resume_kind == resume_stop)
4243	    {
4244	      threads_debug_printf
4245		("already %s LWP %ld at GDB's request",
4246		 (thread->last_status.kind () == TARGET_WAITKIND_STOPPED
4247		  ? "stopped" : "stopping"),
4248		  lwpid_of (thread));
4249
4250	      continue;
4251	    }
4252
4253	  /* Ignore (wildcard) resume requests for already-resumed
4254	     threads.  */
4255	  if (resume[ndx].kind != resume_stop
4256	      && thread->last_resume_kind != resume_stop)
4257	    {
4258	      threads_debug_printf
4259		("already %s LWP %ld at GDB's request",
4260		 (thread->last_resume_kind == resume_step
4261		  ? "stepping" : "continuing"),
4262		 lwpid_of (thread));
4263	      continue;
4264	    }
4265
4266	  /* Don't let wildcard resumes resume fork children that GDB
4267	     does not yet know are new fork children.  */
4268	  if (lwp->fork_relative != NULL)
4269	    {
4270	      struct lwp_info *rel = lwp->fork_relative;
4271
4272	      if (rel->status_pending_p
4273		  && (rel->waitstatus.kind () == TARGET_WAITKIND_FORKED
4274		      || rel->waitstatus.kind () == TARGET_WAITKIND_VFORKED))
4275		{
4276		  threads_debug_printf
4277		    ("not resuming LWP %ld: has queued stop reply",
4278		     lwpid_of (thread));
4279		  continue;
4280		}
4281	    }
4282
4283	  /* If the thread has a pending event that has already been
4284	     reported to GDBserver core, but GDB has not pulled the
4285	     event out of the vStopped queue yet, likewise, ignore the
4286	     (wildcard) resume request.  */
4287	  if (in_queued_stop_replies (thread->id))
4288	    {
4289	      threads_debug_printf
4290		("not resuming LWP %ld: has queued stop reply",
4291		 lwpid_of (thread));
4292	      continue;
4293	    }
4294
4295	  lwp->resume = &resume[ndx];
4296	  thread->last_resume_kind = lwp->resume->kind;
4297
4298	  lwp->step_range_start = lwp->resume->step_range_start;
4299	  lwp->step_range_end = lwp->resume->step_range_end;
4300
4301	  /* If we had a deferred signal to report, dequeue one now.
4302	     This can happen if LWP gets more than one signal while
4303	     trying to get out of a jump pad.  */
4304	  if (lwp->stopped
4305	      && !lwp->status_pending_p
4306	      && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4307	    {
4308	      lwp->status_pending_p = 1;
4309
4310	      threads_debug_printf
4311		("Dequeueing deferred signal %d for LWP %ld, "
4312		 "leaving status pending.",
4313		 WSTOPSIG (lwp->status_pending),
4314		 lwpid_of (thread));
4315	    }
4316
4317	  return;
4318	}
4319    }
4320
4321  /* No resume action for this thread.  */
4322  lwp->resume = NULL;
4323}
4324
4325bool
4326linux_process_target::resume_status_pending (thread_info *thread)
4327{
4328  struct lwp_info *lwp = get_thread_lwp (thread);
4329
4330  /* LWPs which will not be resumed are not interesting, because
4331     we might not wait for them next time through linux_wait.  */
4332  if (lwp->resume == NULL)
4333    return false;
4334
4335  return thread_still_has_status_pending (thread);
4336}
4337
4338bool
4339linux_process_target::thread_needs_step_over (thread_info *thread)
4340{
4341  struct lwp_info *lwp = get_thread_lwp (thread);
4342  CORE_ADDR pc;
4343  struct process_info *proc = get_thread_process (thread);
4344
4345  /* GDBserver is skipping the extra traps from the wrapper program,
4346     don't have to do step over.  */
4347  if (proc->tdesc == NULL)
4348    return false;
4349
4350  /* LWPs which will not be resumed are not interesting, because we
4351     might not wait for them next time through linux_wait.  */
4352
4353  if (!lwp->stopped)
4354    {
4355      threads_debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped",
4356			    lwpid_of (thread));
4357      return false;
4358    }
4359
4360  if (thread->last_resume_kind == resume_stop)
4361    {
4362      threads_debug_printf
4363	("Need step over [LWP %ld]? Ignoring, should remain stopped",
4364	 lwpid_of (thread));
4365      return false;
4366    }
4367
4368  gdb_assert (lwp->suspended >= 0);
4369
4370  if (lwp->suspended)
4371    {
4372      threads_debug_printf ("Need step over [LWP %ld]? Ignoring, suspended",
4373			    lwpid_of (thread));
4374      return false;
4375    }
4376
4377  if (lwp->status_pending_p)
4378    {
4379      threads_debug_printf
4380	("Need step over [LWP %ld]? Ignoring, has pending status.",
4381	 lwpid_of (thread));
4382      return false;
4383    }
4384
4385  /* Note: PC, not STOP_PC.  Either GDB has adjusted the PC already,
4386     or we have.  */
4387  pc = get_pc (lwp);
4388
4389  /* If the PC has changed since we stopped, then don't do anything,
4390     and let the breakpoint/tracepoint be hit.  This happens if, for
4391     instance, GDB handled the decr_pc_after_break subtraction itself,
4392     GDB is OOL stepping this thread, or the user has issued a "jump"
4393     command, or poked thread's registers herself.  */
4394  if (pc != lwp->stop_pc)
4395    {
4396      threads_debug_printf
4397	("Need step over [LWP %ld]? Cancelling, PC was changed. "
4398	 "Old stop_pc was 0x%s, PC is now 0x%s", lwpid_of (thread),
4399	 paddress (lwp->stop_pc), paddress (pc));
4400      return false;
4401    }
4402
4403  /* On software single step target, resume the inferior with signal
4404     rather than stepping over.  */
4405  if (supports_software_single_step ()
4406      && !lwp->pending_signals.empty ()
4407      && lwp_signal_can_be_delivered (lwp))
4408    {
4409      threads_debug_printf
4410	("Need step over [LWP %ld]? Ignoring, has pending signals.",
4411	 lwpid_of (thread));
4412
4413      return false;
4414    }
4415
4416  scoped_restore_current_thread restore_thread;
4417  switch_to_thread (thread);
4418
4419  /* We can only step over breakpoints we know about.  */
4420  if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4421    {
4422      /* Don't step over a breakpoint that GDB expects to hit
4423	 though.  If the condition is being evaluated on the target's side
4424	 and it evaluate to false, step over this breakpoint as well.  */
4425      if (gdb_breakpoint_here (pc)
4426	  && gdb_condition_true_at_breakpoint (pc)
4427	  && gdb_no_commands_at_breakpoint (pc))
4428	{
4429	  threads_debug_printf ("Need step over [LWP %ld]? yes, but found"
4430				" GDB breakpoint at 0x%s; skipping step over",
4431				lwpid_of (thread), paddress (pc));
4432
4433	  return false;
4434	}
4435      else
4436	{
4437	  threads_debug_printf ("Need step over [LWP %ld]? yes, "
4438				"found breakpoint at 0x%s",
4439				lwpid_of (thread), paddress (pc));
4440
4441	  /* We've found an lwp that needs stepping over --- return 1 so
4442	     that find_thread stops looking.  */
4443	  return true;
4444	}
4445    }
4446
4447  threads_debug_printf
4448    ("Need step over [LWP %ld]? No, no breakpoint found at 0x%s",
4449     lwpid_of (thread), paddress (pc));
4450
4451  return false;
4452}
4453
4454void
4455linux_process_target::start_step_over (lwp_info *lwp)
4456{
4457  struct thread_info *thread = get_lwp_thread (lwp);
4458  CORE_ADDR pc;
4459
4460  threads_debug_printf ("Starting step-over on LWP %ld.  Stopping all threads",
4461			lwpid_of (thread));
4462
4463  stop_all_lwps (1, lwp);
4464
4465  if (lwp->suspended != 0)
4466    {
4467      internal_error ("LWP %ld suspended=%d\n", lwpid_of (thread),
4468		      lwp->suspended);
4469    }
4470
4471  threads_debug_printf ("Done stopping all threads for step-over.");
4472
4473  /* Note, we should always reach here with an already adjusted PC,
4474     either by GDB (if we're resuming due to GDB's request), or by our
4475     caller, if we just finished handling an internal breakpoint GDB
4476     shouldn't care about.  */
4477  pc = get_pc (lwp);
4478
4479  bool step = false;
4480  {
4481    scoped_restore_current_thread restore_thread;
4482    switch_to_thread (thread);
4483
4484    lwp->bp_reinsert = pc;
4485    uninsert_breakpoints_at (pc);
4486    uninsert_fast_tracepoint_jumps_at (pc);
4487
4488    step = single_step (lwp);
4489  }
4490
4491  resume_one_lwp (lwp, step, 0, NULL);
4492
4493  /* Require next event from this LWP.  */
4494  step_over_bkpt = thread->id;
4495}
4496
4497bool
4498linux_process_target::finish_step_over (lwp_info *lwp)
4499{
4500  if (lwp->bp_reinsert != 0)
4501    {
4502      scoped_restore_current_thread restore_thread;
4503
4504      threads_debug_printf ("Finished step over.");
4505
4506      switch_to_thread (get_lwp_thread (lwp));
4507
4508      /* Reinsert any breakpoint at LWP->BP_REINSERT.  Note that there
4509	 may be no breakpoint to reinsert there by now.  */
4510      reinsert_breakpoints_at (lwp->bp_reinsert);
4511      reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4512
4513      lwp->bp_reinsert = 0;
4514
4515      /* Delete any single-step breakpoints.  No longer needed.  We
4516	 don't have to worry about other threads hitting this trap,
4517	 and later not being able to explain it, because we were
4518	 stepping over a breakpoint, and we hold all threads but
4519	 LWP stopped while doing that.  */
4520      if (!supports_hardware_single_step ())
4521	{
4522	  gdb_assert (has_single_step_breakpoints (current_thread));
4523	  delete_single_step_breakpoints (current_thread);
4524	}
4525
4526      step_over_bkpt = null_ptid;
4527      return true;
4528    }
4529  else
4530    return false;
4531}
4532
4533void
4534linux_process_target::complete_ongoing_step_over ()
4535{
4536  if (step_over_bkpt != null_ptid)
4537    {
4538      struct lwp_info *lwp;
4539      int wstat;
4540      int ret;
4541
4542      threads_debug_printf ("detach: step over in progress, finish it first");
4543
4544      /* Passing NULL_PTID as filter indicates we want all events to
4545	 be left pending.  Eventually this returns when there are no
4546	 unwaited-for children left.  */
4547      ret = wait_for_event_filtered (minus_one_ptid, null_ptid, &wstat,
4548				     __WALL);
4549      gdb_assert (ret == -1);
4550
4551      lwp = find_lwp_pid (step_over_bkpt);
4552      if (lwp != NULL)
4553	{
4554	  finish_step_over (lwp);
4555
4556	  /* If we got our step SIGTRAP, don't leave it pending,
4557	     otherwise we would report it to GDB as a spurious
4558	     SIGTRAP.  */
4559	  gdb_assert (lwp->status_pending_p);
4560	  if (WIFSTOPPED (lwp->status_pending)
4561	      && WSTOPSIG (lwp->status_pending) == SIGTRAP)
4562	    {
4563	      thread_info *thread = get_lwp_thread (lwp);
4564	      if (thread->last_resume_kind != resume_step)
4565		{
4566		  threads_debug_printf ("detach: discard step-over SIGTRAP");
4567
4568		  lwp->status_pending_p = 0;
4569		  lwp->status_pending = 0;
4570		  resume_one_lwp (lwp, lwp->stepping, 0, NULL);
4571		}
4572	      else
4573		threads_debug_printf
4574		  ("detach: resume_step, not discarding step-over SIGTRAP");
4575	    }
4576	}
4577      step_over_bkpt = null_ptid;
4578      unsuspend_all_lwps (lwp);
4579    }
4580}
4581
4582void
4583linux_process_target::resume_one_thread (thread_info *thread,
4584					 bool leave_all_stopped)
4585{
4586  struct lwp_info *lwp = get_thread_lwp (thread);
4587  int leave_pending;
4588
4589  if (lwp->resume == NULL)
4590    return;
4591
4592  if (lwp->resume->kind == resume_stop)
4593    {
4594      threads_debug_printf ("resume_stop request for LWP %ld",
4595			    lwpid_of (thread));
4596
4597      if (!lwp->stopped)
4598	{
4599	  threads_debug_printf ("stopping LWP %ld", lwpid_of (thread));
4600
4601	  /* Stop the thread, and wait for the event asynchronously,
4602	     through the event loop.  */
4603	  send_sigstop (lwp);
4604	}
4605      else
4606	{
4607	  threads_debug_printf ("already stopped LWP %ld", lwpid_of (thread));
4608
4609	  /* The LWP may have been stopped in an internal event that
4610	     was not meant to be notified back to GDB (e.g., gdbserver
4611	     breakpoint), so we should be reporting a stop event in
4612	     this case too.  */
4613
4614	  /* If the thread already has a pending SIGSTOP, this is a
4615	     no-op.  Otherwise, something later will presumably resume
4616	     the thread and this will cause it to cancel any pending
4617	     operation, due to last_resume_kind == resume_stop.  If
4618	     the thread already has a pending status to report, we
4619	     will still report it the next time we wait - see
4620	     status_pending_p_callback.  */
4621
4622	  /* If we already have a pending signal to report, then
4623	     there's no need to queue a SIGSTOP, as this means we're
4624	     midway through moving the LWP out of the jumppad, and we
4625	     will report the pending signal as soon as that is
4626	     finished.  */
4627	  if (lwp->pending_signals_to_report.empty ())
4628	    send_sigstop (lwp);
4629	}
4630
4631      /* For stop requests, we're done.  */
4632      lwp->resume = NULL;
4633      thread->last_status.set_ignore ();
4634      return;
4635    }
4636
4637  /* If this thread which is about to be resumed has a pending status,
4638     then don't resume it - we can just report the pending status.
4639     Likewise if it is suspended, because e.g., another thread is
4640     stepping past a breakpoint.  Make sure to queue any signals that
4641     would otherwise be sent.  In all-stop mode, we do this decision
4642     based on if *any* thread has a pending status.  If there's a
4643     thread that needs the step-over-breakpoint dance, then don't
4644     resume any other thread but that particular one.  */
4645  leave_pending = (lwp->suspended
4646		   || lwp->status_pending_p
4647		   || leave_all_stopped);
4648
4649  /* If we have a new signal, enqueue the signal.  */
4650  if (lwp->resume->sig != 0)
4651    {
4652      siginfo_t info, *info_p;
4653
4654      /* If this is the same signal we were previously stopped by,
4655	 make sure to queue its siginfo.  */
4656      if (WIFSTOPPED (lwp->last_status)
4657	  && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4658	  && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4659		     (PTRACE_TYPE_ARG3) 0, &info) == 0)
4660	info_p = &info;
4661      else
4662	info_p = NULL;
4663
4664      enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4665    }
4666
4667  if (!leave_pending)
4668    {
4669      threads_debug_printf ("resuming LWP %ld", lwpid_of (thread));
4670
4671      proceed_one_lwp (thread, NULL);
4672    }
4673  else
4674    threads_debug_printf ("leaving LWP %ld stopped", lwpid_of (thread));
4675
4676  thread->last_status.set_ignore ();
4677  lwp->resume = NULL;
4678}
4679
4680void
4681linux_process_target::resume (thread_resume *resume_info, size_t n)
4682{
4683  struct thread_info *need_step_over = NULL;
4684
4685 THREADS_SCOPED_DEBUG_ENTER_EXIT;
4686
4687  for_each_thread ([&] (thread_info *thread)
4688    {
4689      linux_set_resume_request (thread, resume_info, n);
4690    });
4691
4692  /* If there is a thread which would otherwise be resumed, which has
4693     a pending status, then don't resume any threads - we can just
4694     report the pending status.  Make sure to queue any signals that
4695     would otherwise be sent.  In non-stop mode, we'll apply this
4696     logic to each thread individually.  We consume all pending events
4697     before considering to start a step-over (in all-stop).  */
4698  bool any_pending = false;
4699  if (!non_stop)
4700    any_pending = find_thread ([this] (thread_info *thread)
4701		    {
4702		      return resume_status_pending (thread);
4703		    }) != nullptr;
4704
4705  /* If there is a thread which would otherwise be resumed, which is
4706     stopped at a breakpoint that needs stepping over, then don't
4707     resume any threads - have it step over the breakpoint with all
4708     other threads stopped, then resume all threads again.  Make sure
4709     to queue any signals that would otherwise be delivered or
4710     queued.  */
4711  if (!any_pending && low_supports_breakpoints ())
4712    need_step_over = find_thread ([this] (thread_info *thread)
4713		       {
4714			 return thread_needs_step_over (thread);
4715		       });
4716
4717  bool leave_all_stopped = (need_step_over != NULL || any_pending);
4718
4719  if (need_step_over != NULL)
4720    threads_debug_printf ("Not resuming all, need step over");
4721  else if (any_pending)
4722    threads_debug_printf ("Not resuming, all-stop and found "
4723			  "an LWP with pending status");
4724  else
4725    threads_debug_printf ("Resuming, no pending status or step over needed");
4726
4727  /* Even if we're leaving threads stopped, queue all signals we'd
4728     otherwise deliver.  */
4729  for_each_thread ([&] (thread_info *thread)
4730    {
4731      resume_one_thread (thread, leave_all_stopped);
4732    });
4733
4734  if (need_step_over)
4735    start_step_over (get_thread_lwp (need_step_over));
4736
4737  /* We may have events that were pending that can/should be sent to
4738     the client now.  Trigger a linux_wait call.  */
4739  if (target_is_async_p ())
4740    async_file_mark ();
4741}
4742
4743void
4744linux_process_target::proceed_one_lwp (thread_info *thread, lwp_info *except)
4745{
4746  struct lwp_info *lwp = get_thread_lwp (thread);
4747  int step;
4748
4749  if (lwp == except)
4750    return;
4751
4752  threads_debug_printf ("lwp %ld", lwpid_of (thread));
4753
4754  if (!lwp->stopped)
4755    {
4756      threads_debug_printf ("   LWP %ld already running", lwpid_of (thread));
4757      return;
4758    }
4759
4760  if (thread->last_resume_kind == resume_stop
4761      && thread->last_status.kind () != TARGET_WAITKIND_IGNORE)
4762    {
4763      threads_debug_printf ("   client wants LWP to remain %ld stopped",
4764			    lwpid_of (thread));
4765      return;
4766    }
4767
4768  if (lwp->status_pending_p)
4769    {
4770      threads_debug_printf ("   LWP %ld has pending status, leaving stopped",
4771			    lwpid_of (thread));
4772      return;
4773    }
4774
4775  gdb_assert (lwp->suspended >= 0);
4776
4777  if (lwp->suspended)
4778    {
4779      threads_debug_printf ("   LWP %ld is suspended", lwpid_of (thread));
4780      return;
4781    }
4782
4783  if (thread->last_resume_kind == resume_stop
4784      && lwp->pending_signals_to_report.empty ()
4785      && (lwp->collecting_fast_tracepoint
4786	  == fast_tpoint_collect_result::not_collecting))
4787    {
4788      /* We haven't reported this LWP as stopped yet (otherwise, the
4789	 last_status.kind check above would catch it, and we wouldn't
4790	 reach here.  This LWP may have been momentarily paused by a
4791	 stop_all_lwps call while handling for example, another LWP's
4792	 step-over.  In that case, the pending expected SIGSTOP signal
4793	 that was queued at vCont;t handling time will have already
4794	 been consumed by wait_for_sigstop, and so we need to requeue
4795	 another one here.  Note that if the LWP already has a SIGSTOP
4796	 pending, this is a no-op.  */
4797
4798      threads_debug_printf
4799	("Client wants LWP %ld to stop.  Making sure it has a SIGSTOP pending",
4800	 lwpid_of (thread));
4801
4802      send_sigstop (lwp);
4803    }
4804
4805  if (thread->last_resume_kind == resume_step)
4806    {
4807      threads_debug_printf ("   stepping LWP %ld, client wants it stepping",
4808			    lwpid_of (thread));
4809
4810      /* If resume_step is requested by GDB, install single-step
4811	 breakpoints when the thread is about to be actually resumed if
4812	 the single-step breakpoints weren't removed.  */
4813      if (supports_software_single_step ()
4814	  && !has_single_step_breakpoints (thread))
4815	install_software_single_step_breakpoints (lwp);
4816
4817      step = maybe_hw_step (thread);
4818    }
4819  else if (lwp->bp_reinsert != 0)
4820    {
4821      threads_debug_printf ("   stepping LWP %ld, reinsert set",
4822			    lwpid_of (thread));
4823
4824      step = maybe_hw_step (thread);
4825    }
4826  else
4827    step = 0;
4828
4829  resume_one_lwp (lwp, step, 0, NULL);
4830}
4831
4832void
4833linux_process_target::unsuspend_and_proceed_one_lwp (thread_info *thread,
4834						     lwp_info *except)
4835{
4836  struct lwp_info *lwp = get_thread_lwp (thread);
4837
4838  if (lwp == except)
4839    return;
4840
4841  lwp_suspended_decr (lwp);
4842
4843  proceed_one_lwp (thread, except);
4844}
4845
4846void
4847linux_process_target::proceed_all_lwps ()
4848{
4849  struct thread_info *need_step_over;
4850
4851  /* If there is a thread which would otherwise be resumed, which is
4852     stopped at a breakpoint that needs stepping over, then don't
4853     resume any threads - have it step over the breakpoint with all
4854     other threads stopped, then resume all threads again.  */
4855
4856  if (low_supports_breakpoints ())
4857    {
4858      need_step_over = find_thread ([this] (thread_info *thread)
4859			 {
4860			   return thread_needs_step_over (thread);
4861			 });
4862
4863      if (need_step_over != NULL)
4864	{
4865	  threads_debug_printf ("found thread %ld needing a step-over",
4866				lwpid_of (need_step_over));
4867
4868	  start_step_over (get_thread_lwp (need_step_over));
4869	  return;
4870	}
4871    }
4872
4873  threads_debug_printf ("Proceeding, no step-over needed");
4874
4875  for_each_thread ([this] (thread_info *thread)
4876    {
4877      proceed_one_lwp (thread, NULL);
4878    });
4879}
4880
4881void
4882linux_process_target::unstop_all_lwps (int unsuspend, lwp_info *except)
4883{
4884  THREADS_SCOPED_DEBUG_ENTER_EXIT;
4885
4886  if (except)
4887    threads_debug_printf ("except=(LWP %ld)",
4888		  lwpid_of (get_lwp_thread (except)));
4889  else
4890    threads_debug_printf ("except=nullptr");
4891
4892  if (unsuspend)
4893    for_each_thread ([&] (thread_info *thread)
4894      {
4895	unsuspend_and_proceed_one_lwp (thread, except);
4896      });
4897  else
4898    for_each_thread ([&] (thread_info *thread)
4899      {
4900	proceed_one_lwp (thread, except);
4901      });
4902}
4903
4904
4905#ifdef HAVE_LINUX_REGSETS
4906
4907#define use_linux_regsets 1
4908
4909/* Returns true if REGSET has been disabled.  */
4910
4911static int
4912regset_disabled (struct regsets_info *info, struct regset_info *regset)
4913{
4914  return (info->disabled_regsets != NULL
4915	  && info->disabled_regsets[regset - info->regsets]);
4916}
4917
4918/* Disable REGSET.  */
4919
4920static void
4921disable_regset (struct regsets_info *info, struct regset_info *regset)
4922{
4923  int dr_offset;
4924
4925  dr_offset = regset - info->regsets;
4926  if (info->disabled_regsets == NULL)
4927    info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4928  info->disabled_regsets[dr_offset] = 1;
4929}
4930
4931static int
4932regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4933				  struct regcache *regcache)
4934{
4935  struct regset_info *regset;
4936  int saw_general_regs = 0;
4937  int pid;
4938  struct iovec iov;
4939
4940  pid = lwpid_of (current_thread);
4941  for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4942    {
4943      void *buf, *data;
4944      int nt_type, res;
4945
4946      if (regset->size == 0 || regset_disabled (regsets_info, regset))
4947	continue;
4948
4949      buf = xmalloc (regset->size);
4950
4951      nt_type = regset->nt_type;
4952      if (nt_type)
4953	{
4954	  iov.iov_base = buf;
4955	  iov.iov_len = regset->size;
4956	  data = (void *) &iov;
4957	}
4958      else
4959	data = buf;
4960
4961#ifndef __sparc__
4962      res = ptrace (regset->get_request, pid,
4963		    (PTRACE_TYPE_ARG3) (long) nt_type, data);
4964#else
4965      res = ptrace (regset->get_request, pid, data, nt_type);
4966#endif
4967      if (res < 0)
4968	{
4969	  if (errno == EIO
4970	      || (errno == EINVAL && regset->type == OPTIONAL_REGS))
4971	    {
4972	      /* If we get EIO on a regset, or an EINVAL and the regset is
4973		 optional, do not try it again for this process mode.  */
4974	      disable_regset (regsets_info, regset);
4975	    }
4976	  else if (errno == ENODATA)
4977	    {
4978	      /* ENODATA may be returned if the regset is currently
4979		 not "active".  This can happen in normal operation,
4980		 so suppress the warning in this case.  */
4981	    }
4982	  else if (errno == ESRCH)
4983	    {
4984	      /* At this point, ESRCH should mean the process is
4985		 already gone, in which case we simply ignore attempts
4986		 to read its registers.  */
4987	    }
4988	  else
4989	    {
4990	      char s[256];
4991	      sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4992		       pid);
4993	      perror (s);
4994	    }
4995	}
4996      else
4997	{
4998	  if (regset->type == GENERAL_REGS)
4999	    saw_general_regs = 1;
5000	  regset->store_function (regcache, buf);
5001	}
5002      free (buf);
5003    }
5004  if (saw_general_regs)
5005    return 0;
5006  else
5007    return 1;
5008}
5009
5010static int
5011regsets_store_inferior_registers (struct regsets_info *regsets_info,
5012				  struct regcache *regcache)
5013{
5014  struct regset_info *regset;
5015  int saw_general_regs = 0;
5016  int pid;
5017  struct iovec iov;
5018
5019  pid = lwpid_of (current_thread);
5020  for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5021    {
5022      void *buf, *data;
5023      int nt_type, res;
5024
5025      if (regset->size == 0 || regset_disabled (regsets_info, regset)
5026	  || regset->fill_function == NULL)
5027	continue;
5028
5029      buf = xmalloc (regset->size);
5030
5031      /* First fill the buffer with the current register set contents,
5032	 in case there are any items in the kernel's regset that are
5033	 not in gdbserver's regcache.  */
5034
5035      nt_type = regset->nt_type;
5036      if (nt_type)
5037	{
5038	  iov.iov_base = buf;
5039	  iov.iov_len = regset->size;
5040	  data = (void *) &iov;
5041	}
5042      else
5043	data = buf;
5044
5045#ifndef __sparc__
5046      res = ptrace (regset->get_request, pid,
5047		    (PTRACE_TYPE_ARG3) (long) nt_type, data);
5048#else
5049      res = ptrace (regset->get_request, pid, data, nt_type);
5050#endif
5051
5052      if (res == 0)
5053	{
5054	  /* Then overlay our cached registers on that.  */
5055	  regset->fill_function (regcache, buf);
5056
5057	  /* Only now do we write the register set.  */
5058#ifndef __sparc__
5059	  res = ptrace (regset->set_request, pid,
5060			(PTRACE_TYPE_ARG3) (long) nt_type, data);
5061#else
5062	  res = ptrace (regset->set_request, pid, data, nt_type);
5063#endif
5064	}
5065
5066      if (res < 0)
5067	{
5068	  if (errno == EIO
5069	      || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5070	    {
5071	      /* If we get EIO on a regset, or an EINVAL and the regset is
5072		 optional, do not try it again for this process mode.  */
5073	      disable_regset (regsets_info, regset);
5074	    }
5075	  else if (errno == ESRCH)
5076	    {
5077	      /* At this point, ESRCH should mean the process is
5078		 already gone, in which case we simply ignore attempts
5079		 to change its registers.  See also the related
5080		 comment in resume_one_lwp.  */
5081	      free (buf);
5082	      return 0;
5083	    }
5084	  else
5085	    {
5086	      perror ("Warning: ptrace(regsets_store_inferior_registers)");
5087	    }
5088	}
5089      else if (regset->type == GENERAL_REGS)
5090	saw_general_regs = 1;
5091      free (buf);
5092    }
5093  if (saw_general_regs)
5094    return 0;
5095  else
5096    return 1;
5097}
5098
5099#else /* !HAVE_LINUX_REGSETS */
5100
5101#define use_linux_regsets 0
5102#define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5103#define regsets_store_inferior_registers(regsets_info, regcache) 1
5104
5105#endif
5106
5107/* Return 1 if register REGNO is supported by one of the regset ptrace
5108   calls or 0 if it has to be transferred individually.  */
5109
5110static int
5111linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5112{
5113  unsigned char mask = 1 << (regno % 8);
5114  size_t index = regno / 8;
5115
5116  return (use_linux_regsets
5117	  && (regs_info->regset_bitmap == NULL
5118	      || (regs_info->regset_bitmap[index] & mask) != 0));
5119}
5120
5121#ifdef HAVE_LINUX_USRREGS
5122
5123static int
5124register_addr (const struct usrregs_info *usrregs, int regnum)
5125{
5126  int addr;
5127
5128  if (regnum < 0 || regnum >= usrregs->num_regs)
5129    error ("Invalid register number %d.", regnum);
5130
5131  addr = usrregs->regmap[regnum];
5132
5133  return addr;
5134}
5135
5136
5137void
5138linux_process_target::fetch_register (const usrregs_info *usrregs,
5139				      regcache *regcache, int regno)
5140{
5141  CORE_ADDR regaddr;
5142  int i, size;
5143  char *buf;
5144  int pid;
5145
5146  if (regno >= usrregs->num_regs)
5147    return;
5148  if (low_cannot_fetch_register (regno))
5149    return;
5150
5151  regaddr = register_addr (usrregs, regno);
5152  if (regaddr == -1)
5153    return;
5154
5155  size = ((register_size (regcache->tdesc, regno)
5156	   + sizeof (PTRACE_XFER_TYPE) - 1)
5157	  & -sizeof (PTRACE_XFER_TYPE));
5158  buf = (char *) alloca (size);
5159
5160  pid = lwpid_of (current_thread);
5161  for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5162    {
5163      errno = 0;
5164      *(PTRACE_XFER_TYPE *) (buf + i) =
5165	ptrace (PTRACE_PEEKUSER, pid,
5166		/* Coerce to a uintptr_t first to avoid potential gcc warning
5167		   of coercing an 8 byte integer to a 4 byte pointer.  */
5168		(PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5169      regaddr += sizeof (PTRACE_XFER_TYPE);
5170      if (errno != 0)
5171	{
5172	  /* Mark register REGNO unavailable.  */
5173	  supply_register (regcache, regno, NULL);
5174	  return;
5175	}
5176    }
5177
5178  low_supply_ptrace_register (regcache, regno, buf);
5179}
5180
5181void
5182linux_process_target::store_register (const usrregs_info *usrregs,
5183				      regcache *regcache, int regno)
5184{
5185  CORE_ADDR regaddr;
5186  int i, size;
5187  char *buf;
5188  int pid;
5189
5190  if (regno >= usrregs->num_regs)
5191    return;
5192  if (low_cannot_store_register (regno))
5193    return;
5194
5195  regaddr = register_addr (usrregs, regno);
5196  if (regaddr == -1)
5197    return;
5198
5199  size = ((register_size (regcache->tdesc, regno)
5200	   + sizeof (PTRACE_XFER_TYPE) - 1)
5201	  & -sizeof (PTRACE_XFER_TYPE));
5202  buf = (char *) alloca (size);
5203  memset (buf, 0, size);
5204
5205  low_collect_ptrace_register (regcache, regno, buf);
5206
5207  pid = lwpid_of (current_thread);
5208  for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5209    {
5210      errno = 0;
5211      ptrace (PTRACE_POKEUSER, pid,
5212	    /* Coerce to a uintptr_t first to avoid potential gcc warning
5213	       about coercing an 8 byte integer to a 4 byte pointer.  */
5214	      (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5215	      (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5216      if (errno != 0)
5217	{
5218	  /* At this point, ESRCH should mean the process is
5219	     already gone, in which case we simply ignore attempts
5220	     to change its registers.  See also the related
5221	     comment in resume_one_lwp.  */
5222	  if (errno == ESRCH)
5223	    return;
5224
5225
5226	  if (!low_cannot_store_register (regno))
5227	    error ("writing register %d: %s", regno, safe_strerror (errno));
5228	}
5229      regaddr += sizeof (PTRACE_XFER_TYPE);
5230    }
5231}
5232#endif /* HAVE_LINUX_USRREGS */
5233
5234void
5235linux_process_target::low_collect_ptrace_register (regcache *regcache,
5236						   int regno, char *buf)
5237{
5238  collect_register (regcache, regno, buf);
5239}
5240
5241void
5242linux_process_target::low_supply_ptrace_register (regcache *regcache,
5243						  int regno, const char *buf)
5244{
5245  supply_register (regcache, regno, buf);
5246}
5247
5248void
5249linux_process_target::usr_fetch_inferior_registers (const regs_info *regs_info,
5250						    regcache *regcache,
5251						    int regno, int all)
5252{
5253#ifdef HAVE_LINUX_USRREGS
5254  struct usrregs_info *usr = regs_info->usrregs;
5255
5256  if (regno == -1)
5257    {
5258      for (regno = 0; regno < usr->num_regs; regno++)
5259	if (all || !linux_register_in_regsets (regs_info, regno))
5260	  fetch_register (usr, regcache, regno);
5261    }
5262  else
5263    fetch_register (usr, regcache, regno);
5264#endif
5265}
5266
5267void
5268linux_process_target::usr_store_inferior_registers (const regs_info *regs_info,
5269						    regcache *regcache,
5270						    int regno, int all)
5271{
5272#ifdef HAVE_LINUX_USRREGS
5273  struct usrregs_info *usr = regs_info->usrregs;
5274
5275  if (regno == -1)
5276    {
5277      for (regno = 0; regno < usr->num_regs; regno++)
5278	if (all || !linux_register_in_regsets (regs_info, regno))
5279	  store_register (usr, regcache, regno);
5280    }
5281  else
5282    store_register (usr, regcache, regno);
5283#endif
5284}
5285
5286void
5287linux_process_target::fetch_registers (regcache *regcache, int regno)
5288{
5289  int use_regsets;
5290  int all = 0;
5291  const regs_info *regs_info = get_regs_info ();
5292
5293  if (regno == -1)
5294    {
5295      if (regs_info->usrregs != NULL)
5296	for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5297	  low_fetch_register (regcache, regno);
5298
5299      all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5300      if (regs_info->usrregs != NULL)
5301	usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5302    }
5303  else
5304    {
5305      if (low_fetch_register (regcache, regno))
5306	return;
5307
5308      use_regsets = linux_register_in_regsets (regs_info, regno);
5309      if (use_regsets)
5310	all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5311						regcache);
5312      if ((!use_regsets || all) && regs_info->usrregs != NULL)
5313	usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5314    }
5315}
5316
5317void
5318linux_process_target::store_registers (regcache *regcache, int regno)
5319{
5320  int use_regsets;
5321  int all = 0;
5322  const regs_info *regs_info = get_regs_info ();
5323
5324  if (regno == -1)
5325    {
5326      all = regsets_store_inferior_registers (regs_info->regsets_info,
5327					      regcache);
5328      if (regs_info->usrregs != NULL)
5329	usr_store_inferior_registers (regs_info, regcache, regno, all);
5330    }
5331  else
5332    {
5333      use_regsets = linux_register_in_regsets (regs_info, regno);
5334      if (use_regsets)
5335	all = regsets_store_inferior_registers (regs_info->regsets_info,
5336						regcache);
5337      if ((!use_regsets || all) && regs_info->usrregs != NULL)
5338	usr_store_inferior_registers (regs_info, regcache, regno, 1);
5339    }
5340}
5341
5342bool
5343linux_process_target::low_fetch_register (regcache *regcache, int regno)
5344{
5345  return false;
5346}
5347
5348/* A wrapper for the read_memory target op.  */
5349
5350static int
5351linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5352{
5353  return the_target->read_memory (memaddr, myaddr, len);
5354}
5355
5356
5357/* Helper for read_memory/write_memory using /proc/PID/mem.  Because
5358   we can use a single read/write call, this can be much more
5359   efficient than banging away at PTRACE_PEEKTEXT.  Also, unlike
5360   PTRACE_PEEKTEXT/PTRACE_POKETEXT, this works with running threads.
5361   One an only one of READBUF and WRITEBUF is non-null.  If READBUF is
5362   not null, then we're reading, otherwise we're writing.  */
5363
5364static int
5365proc_xfer_memory (CORE_ADDR memaddr, unsigned char *readbuf,
5366		  const gdb_byte *writebuf, int len)
5367{
5368  gdb_assert ((readbuf == nullptr) != (writebuf == nullptr));
5369
5370  process_info *proc = current_process ();
5371
5372  int fd = proc->priv->mem_fd;
5373  if (fd == -1)
5374    return EIO;
5375
5376  while (len > 0)
5377    {
5378      int bytes;
5379
5380      /* If pread64 is available, use it.  It's faster if the kernel
5381	 supports it (only one syscall), and it's 64-bit safe even on
5382	 32-bit platforms (for instance, SPARC debugging a SPARC64
5383	 application).  */
5384#ifdef HAVE_PREAD64
5385      bytes = (readbuf != nullptr
5386	       ? pread64 (fd, readbuf, len, memaddr)
5387	       : pwrite64 (fd, writebuf, len, memaddr));
5388#else
5389      bytes = -1;
5390      if (lseek (fd, memaddr, SEEK_SET) != -1)
5391	bytes = (readbuf != nullptr
5392		 ? read (fd, readbuf, len)
5393		 : write (fd, writebuf, len));
5394#endif
5395
5396      if (bytes < 0)
5397	return errno;
5398      else if (bytes == 0)
5399	{
5400	  /* EOF means the address space is gone, the whole process
5401	     exited or execed.  */
5402	  return EIO;
5403	}
5404
5405      memaddr += bytes;
5406      if (readbuf != nullptr)
5407	readbuf += bytes;
5408      else
5409	writebuf += bytes;
5410      len -= bytes;
5411    }
5412
5413  return 0;
5414}
5415
5416int
5417linux_process_target::read_memory (CORE_ADDR memaddr,
5418				   unsigned char *myaddr, int len)
5419{
5420  return proc_xfer_memory (memaddr, myaddr, nullptr, len);
5421}
5422
5423/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5424   memory at MEMADDR.  On failure (cannot write to the inferior)
5425   returns the value of errno.  Always succeeds if LEN is zero.  */
5426
5427int
5428linux_process_target::write_memory (CORE_ADDR memaddr,
5429				    const unsigned char *myaddr, int len)
5430{
5431  if (debug_threads)
5432    {
5433      /* Dump up to four bytes.  */
5434      char str[4 * 2 + 1];
5435      char *p = str;
5436      int dump = len < 4 ? len : 4;
5437
5438      for (int i = 0; i < dump; i++)
5439	{
5440	  sprintf (p, "%02x", myaddr[i]);
5441	  p += 2;
5442	}
5443      *p = '\0';
5444
5445      threads_debug_printf ("Writing %s to 0x%08lx in process %d",
5446			    str, (long) memaddr, current_process ()->pid);
5447    }
5448
5449  return proc_xfer_memory (memaddr, nullptr, myaddr, len);
5450}
5451
5452void
5453linux_process_target::look_up_symbols ()
5454{
5455#ifdef USE_THREAD_DB
5456  struct process_info *proc = current_process ();
5457
5458  if (proc->priv->thread_db != NULL)
5459    return;
5460
5461  thread_db_init ();
5462#endif
5463}
5464
5465void
5466linux_process_target::request_interrupt ()
5467{
5468  /* Send a SIGINT to the process group.  This acts just like the user
5469     typed a ^C on the controlling terminal.  */
5470  int res = ::kill (-signal_pid, SIGINT);
5471  if (res == -1)
5472    warning (_("Sending SIGINT to process group of pid %ld failed: %s"),
5473	     signal_pid, safe_strerror (errno));
5474}
5475
5476bool
5477linux_process_target::supports_read_auxv ()
5478{
5479  return true;
5480}
5481
5482/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5483   to debugger memory starting at MYADDR.  */
5484
5485int
5486linux_process_target::read_auxv (CORE_ADDR offset, unsigned char *myaddr,
5487				 unsigned int len)
5488{
5489  char filename[PATH_MAX];
5490  int fd, n;
5491  int pid = lwpid_of (current_thread);
5492
5493  xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5494
5495  fd = open (filename, O_RDONLY);
5496  if (fd < 0)
5497    return -1;
5498
5499  if (offset != (CORE_ADDR) 0
5500      && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5501    n = -1;
5502  else
5503    n = read (fd, myaddr, len);
5504
5505  close (fd);
5506
5507  return n;
5508}
5509
5510int
5511linux_process_target::insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5512				    int size, raw_breakpoint *bp)
5513{
5514  if (type == raw_bkpt_type_sw)
5515    return insert_memory_breakpoint (bp);
5516  else
5517    return low_insert_point (type, addr, size, bp);
5518}
5519
5520int
5521linux_process_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
5522					int size, raw_breakpoint *bp)
5523{
5524  /* Unsupported (see target.h).  */
5525  return 1;
5526}
5527
5528int
5529linux_process_target::remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5530				    int size, raw_breakpoint *bp)
5531{
5532  if (type == raw_bkpt_type_sw)
5533    return remove_memory_breakpoint (bp);
5534  else
5535    return low_remove_point (type, addr, size, bp);
5536}
5537
5538int
5539linux_process_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
5540					int size, raw_breakpoint *bp)
5541{
5542  /* Unsupported (see target.h).  */
5543  return 1;
5544}
5545
5546/* Implement the stopped_by_sw_breakpoint target_ops
5547   method.  */
5548
5549bool
5550linux_process_target::stopped_by_sw_breakpoint ()
5551{
5552  struct lwp_info *lwp = get_thread_lwp (current_thread);
5553
5554  return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5555}
5556
5557/* Implement the supports_stopped_by_sw_breakpoint target_ops
5558   method.  */
5559
5560bool
5561linux_process_target::supports_stopped_by_sw_breakpoint ()
5562{
5563  return USE_SIGTRAP_SIGINFO;
5564}
5565
5566/* Implement the stopped_by_hw_breakpoint target_ops
5567   method.  */
5568
5569bool
5570linux_process_target::stopped_by_hw_breakpoint ()
5571{
5572  struct lwp_info *lwp = get_thread_lwp (current_thread);
5573
5574  return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5575}
5576
5577/* Implement the supports_stopped_by_hw_breakpoint target_ops
5578   method.  */
5579
5580bool
5581linux_process_target::supports_stopped_by_hw_breakpoint ()
5582{
5583  return USE_SIGTRAP_SIGINFO;
5584}
5585
5586/* Implement the supports_hardware_single_step target_ops method.  */
5587
5588bool
5589linux_process_target::supports_hardware_single_step ()
5590{
5591  return true;
5592}
5593
5594bool
5595linux_process_target::stopped_by_watchpoint ()
5596{
5597  struct lwp_info *lwp = get_thread_lwp (current_thread);
5598
5599  return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5600}
5601
5602CORE_ADDR
5603linux_process_target::stopped_data_address ()
5604{
5605  struct lwp_info *lwp = get_thread_lwp (current_thread);
5606
5607  return lwp->stopped_data_address;
5608}
5609
5610/* This is only used for targets that define PT_TEXT_ADDR,
5611   PT_DATA_ADDR and PT_TEXT_END_ADDR.  If those are not defined, supposedly
5612   the target has different ways of acquiring this information, like
5613   loadmaps.  */
5614
5615bool
5616linux_process_target::supports_read_offsets ()
5617{
5618#ifdef SUPPORTS_READ_OFFSETS
5619  return true;
5620#else
5621  return false;
5622#endif
5623}
5624
5625/* Under uClinux, programs are loaded at non-zero offsets, which we need
5626   to tell gdb about.  */
5627
5628int
5629linux_process_target::read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5630{
5631#ifdef SUPPORTS_READ_OFFSETS
5632  unsigned long text, text_end, data;
5633  int pid = lwpid_of (current_thread);
5634
5635  errno = 0;
5636
5637  text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5638		 (PTRACE_TYPE_ARG4) 0);
5639  text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5640		     (PTRACE_TYPE_ARG4) 0);
5641  data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5642		 (PTRACE_TYPE_ARG4) 0);
5643
5644  if (errno == 0)
5645    {
5646      /* Both text and data offsets produced at compile-time (and so
5647	 used by gdb) are relative to the beginning of the program,
5648	 with the data segment immediately following the text segment.
5649	 However, the actual runtime layout in memory may put the data
5650	 somewhere else, so when we send gdb a data base-address, we
5651	 use the real data base address and subtract the compile-time
5652	 data base-address from it (which is just the length of the
5653	 text segment).  BSS immediately follows data in both
5654	 cases.  */
5655      *text_p = text;
5656      *data_p = data - (text_end - text);
5657
5658      return 1;
5659    }
5660  return 0;
5661#else
5662  gdb_assert_not_reached ("target op read_offsets not supported");
5663#endif
5664}
5665
5666bool
5667linux_process_target::supports_get_tls_address ()
5668{
5669#ifdef USE_THREAD_DB
5670  return true;
5671#else
5672  return false;
5673#endif
5674}
5675
5676int
5677linux_process_target::get_tls_address (thread_info *thread,
5678				       CORE_ADDR offset,
5679				       CORE_ADDR load_module,
5680				       CORE_ADDR *address)
5681{
5682#ifdef USE_THREAD_DB
5683  return thread_db_get_tls_address (thread, offset, load_module, address);
5684#else
5685  return -1;
5686#endif
5687}
5688
5689bool
5690linux_process_target::supports_qxfer_osdata ()
5691{
5692  return true;
5693}
5694
5695int
5696linux_process_target::qxfer_osdata (const char *annex,
5697				    unsigned char *readbuf,
5698				    unsigned const char *writebuf,
5699				    CORE_ADDR offset, int len)
5700{
5701  return linux_common_xfer_osdata (annex, readbuf, offset, len);
5702}
5703
5704void
5705linux_process_target::siginfo_fixup (siginfo_t *siginfo,
5706				     gdb_byte *inf_siginfo, int direction)
5707{
5708  bool done = low_siginfo_fixup (siginfo, inf_siginfo, direction);
5709
5710  /* If there was no callback, or the callback didn't do anything,
5711     then just do a straight memcpy.  */
5712  if (!done)
5713    {
5714      if (direction == 1)
5715	memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5716      else
5717	memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5718    }
5719}
5720
5721bool
5722linux_process_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
5723					 int direction)
5724{
5725  return false;
5726}
5727
5728bool
5729linux_process_target::supports_qxfer_siginfo ()
5730{
5731  return true;
5732}
5733
5734int
5735linux_process_target::qxfer_siginfo (const char *annex,
5736				     unsigned char *readbuf,
5737				     unsigned const char *writebuf,
5738				     CORE_ADDR offset, int len)
5739{
5740  int pid;
5741  siginfo_t siginfo;
5742  gdb_byte inf_siginfo[sizeof (siginfo_t)];
5743
5744  if (current_thread == NULL)
5745    return -1;
5746
5747  pid = lwpid_of (current_thread);
5748
5749  threads_debug_printf ("%s siginfo for lwp %d.",
5750			readbuf != NULL ? "Reading" : "Writing",
5751			pid);
5752
5753  if (offset >= sizeof (siginfo))
5754    return -1;
5755
5756  if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5757    return -1;
5758
5759  /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5760     SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
5761     inferior with a 64-bit GDBSERVER should look the same as debugging it
5762     with a 32-bit GDBSERVER, we need to convert it.  */
5763  siginfo_fixup (&siginfo, inf_siginfo, 0);
5764
5765  if (offset + len > sizeof (siginfo))
5766    len = sizeof (siginfo) - offset;
5767
5768  if (readbuf != NULL)
5769    memcpy (readbuf, inf_siginfo + offset, len);
5770  else
5771    {
5772      memcpy (inf_siginfo + offset, writebuf, len);
5773
5774      /* Convert back to ptrace layout before flushing it out.  */
5775      siginfo_fixup (&siginfo, inf_siginfo, 1);
5776
5777      if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5778	return -1;
5779    }
5780
5781  return len;
5782}
5783
5784/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5785   so we notice when children change state; as the handler for the
5786   sigsuspend in my_waitpid.  */
5787
5788static void
5789sigchld_handler (int signo)
5790{
5791  int old_errno = errno;
5792
5793  if (debug_threads)
5794    {
5795      do
5796	{
5797	  /* Use the async signal safe debug function.  */
5798	  if (debug_write ("sigchld_handler\n",
5799			   sizeof ("sigchld_handler\n") - 1) < 0)
5800	    break; /* just ignore */
5801	} while (0);
5802    }
5803
5804  if (target_is_async_p ())
5805    async_file_mark (); /* trigger a linux_wait */
5806
5807  errno = old_errno;
5808}
5809
5810bool
5811linux_process_target::supports_non_stop ()
5812{
5813  return true;
5814}
5815
5816bool
5817linux_process_target::async (bool enable)
5818{
5819  bool previous = target_is_async_p ();
5820
5821  threads_debug_printf ("async (%d), previous=%d",
5822			enable, previous);
5823
5824  if (previous != enable)
5825    {
5826      sigset_t mask;
5827      sigemptyset (&mask);
5828      sigaddset (&mask, SIGCHLD);
5829
5830      gdb_sigmask (SIG_BLOCK, &mask, NULL);
5831
5832      if (enable)
5833	{
5834	  if (!linux_event_pipe.open_pipe ())
5835	    {
5836	      gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5837
5838	      warning ("creating event pipe failed.");
5839	      return previous;
5840	    }
5841
5842	  /* Register the event loop handler.  */
5843	  add_file_handler (linux_event_pipe.event_fd (),
5844			    handle_target_event, NULL,
5845			    "linux-low");
5846
5847	  /* Always trigger a linux_wait.  */
5848	  async_file_mark ();
5849	}
5850      else
5851	{
5852	  delete_file_handler (linux_event_pipe.event_fd ());
5853
5854	  linux_event_pipe.close_pipe ();
5855	}
5856
5857      gdb_sigmask (SIG_UNBLOCK, &mask, NULL);
5858    }
5859
5860  return previous;
5861}
5862
5863int
5864linux_process_target::start_non_stop (bool nonstop)
5865{
5866  /* Register or unregister from event-loop accordingly.  */
5867  target_async (nonstop);
5868
5869  if (target_is_async_p () != (nonstop != false))
5870    return -1;
5871
5872  return 0;
5873}
5874
5875bool
5876linux_process_target::supports_multi_process ()
5877{
5878  return true;
5879}
5880
5881/* Check if fork events are supported.  */
5882
5883bool
5884linux_process_target::supports_fork_events ()
5885{
5886  return true;
5887}
5888
5889/* Check if vfork events are supported.  */
5890
5891bool
5892linux_process_target::supports_vfork_events ()
5893{
5894  return true;
5895}
5896
5897/* Check if exec events are supported.  */
5898
5899bool
5900linux_process_target::supports_exec_events ()
5901{
5902  return true;
5903}
5904
5905/* Target hook for 'handle_new_gdb_connection'.  Causes a reset of the
5906   ptrace flags for all inferiors.  This is in case the new GDB connection
5907   doesn't support the same set of events that the previous one did.  */
5908
5909void
5910linux_process_target::handle_new_gdb_connection ()
5911{
5912  /* Request that all the lwps reset their ptrace options.  */
5913  for_each_thread ([] (thread_info *thread)
5914    {
5915      struct lwp_info *lwp = get_thread_lwp (thread);
5916
5917      if (!lwp->stopped)
5918	{
5919	  /* Stop the lwp so we can modify its ptrace options.  */
5920	  lwp->must_set_ptrace_flags = 1;
5921	  linux_stop_lwp (lwp);
5922	}
5923      else
5924	{
5925	  /* Already stopped; go ahead and set the ptrace options.  */
5926	  struct process_info *proc = find_process_pid (pid_of (thread));
5927	  int options = linux_low_ptrace_options (proc->attached);
5928
5929	  linux_enable_event_reporting (lwpid_of (thread), options);
5930	  lwp->must_set_ptrace_flags = 0;
5931	}
5932    });
5933}
5934
5935int
5936linux_process_target::handle_monitor_command (char *mon)
5937{
5938#ifdef USE_THREAD_DB
5939  return thread_db_handle_monitor_command (mon);
5940#else
5941  return 0;
5942#endif
5943}
5944
5945int
5946linux_process_target::core_of_thread (ptid_t ptid)
5947{
5948  return linux_common_core_of_thread (ptid);
5949}
5950
5951bool
5952linux_process_target::supports_disable_randomization ()
5953{
5954  return true;
5955}
5956
5957bool
5958linux_process_target::supports_agent ()
5959{
5960  return true;
5961}
5962
5963bool
5964linux_process_target::supports_range_stepping ()
5965{
5966  if (supports_software_single_step ())
5967    return true;
5968
5969  return low_supports_range_stepping ();
5970}
5971
5972bool
5973linux_process_target::low_supports_range_stepping ()
5974{
5975  return false;
5976}
5977
5978bool
5979linux_process_target::supports_pid_to_exec_file ()
5980{
5981  return true;
5982}
5983
5984const char *
5985linux_process_target::pid_to_exec_file (int pid)
5986{
5987  return linux_proc_pid_to_exec_file (pid);
5988}
5989
5990bool
5991linux_process_target::supports_multifs ()
5992{
5993  return true;
5994}
5995
5996int
5997linux_process_target::multifs_open (int pid, const char *filename,
5998				    int flags, mode_t mode)
5999{
6000  return linux_mntns_open_cloexec (pid, filename, flags, mode);
6001}
6002
6003int
6004linux_process_target::multifs_unlink (int pid, const char *filename)
6005{
6006  return linux_mntns_unlink (pid, filename);
6007}
6008
6009ssize_t
6010linux_process_target::multifs_readlink (int pid, const char *filename,
6011					char *buf, size_t bufsiz)
6012{
6013  return linux_mntns_readlink (pid, filename, buf, bufsiz);
6014}
6015
6016#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6017struct target_loadseg
6018{
6019  /* Core address to which the segment is mapped.  */
6020  Elf32_Addr addr;
6021  /* VMA recorded in the program header.  */
6022  Elf32_Addr p_vaddr;
6023  /* Size of this segment in memory.  */
6024  Elf32_Word p_memsz;
6025};
6026
6027# if defined PT_GETDSBT
6028struct target_loadmap
6029{
6030  /* Protocol version number, must be zero.  */
6031  Elf32_Word version;
6032  /* Pointer to the DSBT table, its size, and the DSBT index.  */
6033  unsigned *dsbt_table;
6034  unsigned dsbt_size, dsbt_index;
6035  /* Number of segments in this map.  */
6036  Elf32_Word nsegs;
6037  /* The actual memory map.  */
6038  struct target_loadseg segs[/*nsegs*/];
6039};
6040#  define LINUX_LOADMAP		PT_GETDSBT
6041#  define LINUX_LOADMAP_EXEC	PTRACE_GETDSBT_EXEC
6042#  define LINUX_LOADMAP_INTERP	PTRACE_GETDSBT_INTERP
6043# else
6044struct target_loadmap
6045{
6046  /* Protocol version number, must be zero.  */
6047  Elf32_Half version;
6048  /* Number of segments in this map.  */
6049  Elf32_Half nsegs;
6050  /* The actual memory map.  */
6051  struct target_loadseg segs[/*nsegs*/];
6052};
6053#  define LINUX_LOADMAP		PTRACE_GETFDPIC
6054#  define LINUX_LOADMAP_EXEC	PTRACE_GETFDPIC_EXEC
6055#  define LINUX_LOADMAP_INTERP	PTRACE_GETFDPIC_INTERP
6056# endif
6057
6058bool
6059linux_process_target::supports_read_loadmap ()
6060{
6061  return true;
6062}
6063
6064int
6065linux_process_target::read_loadmap (const char *annex, CORE_ADDR offset,
6066				    unsigned char *myaddr, unsigned int len)
6067{
6068  int pid = lwpid_of (current_thread);
6069  int addr = -1;
6070  struct target_loadmap *data = NULL;
6071  unsigned int actual_length, copy_length;
6072
6073  if (strcmp (annex, "exec") == 0)
6074    addr = (int) LINUX_LOADMAP_EXEC;
6075  else if (strcmp (annex, "interp") == 0)
6076    addr = (int) LINUX_LOADMAP_INTERP;
6077  else
6078    return -1;
6079
6080  if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6081    return -1;
6082
6083  if (data == NULL)
6084    return -1;
6085
6086  actual_length = sizeof (struct target_loadmap)
6087    + sizeof (struct target_loadseg) * data->nsegs;
6088
6089  if (offset < 0 || offset > actual_length)
6090    return -1;
6091
6092  copy_length = actual_length - offset < len ? actual_length - offset : len;
6093  memcpy (myaddr, (char *) data + offset, copy_length);
6094  return copy_length;
6095}
6096#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6097
6098bool
6099linux_process_target::supports_catch_syscall ()
6100{
6101  return low_supports_catch_syscall ();
6102}
6103
6104bool
6105linux_process_target::low_supports_catch_syscall ()
6106{
6107  return false;
6108}
6109
6110CORE_ADDR
6111linux_process_target::read_pc (regcache *regcache)
6112{
6113  if (!low_supports_breakpoints ())
6114    return 0;
6115
6116  return low_get_pc (regcache);
6117}
6118
6119void
6120linux_process_target::write_pc (regcache *regcache, CORE_ADDR pc)
6121{
6122  gdb_assert (low_supports_breakpoints ());
6123
6124  low_set_pc (regcache, pc);
6125}
6126
6127bool
6128linux_process_target::supports_thread_stopped ()
6129{
6130  return true;
6131}
6132
6133bool
6134linux_process_target::thread_stopped (thread_info *thread)
6135{
6136  return get_thread_lwp (thread)->stopped;
6137}
6138
6139/* This exposes stop-all-threads functionality to other modules.  */
6140
6141void
6142linux_process_target::pause_all (bool freeze)
6143{
6144  stop_all_lwps (freeze, NULL);
6145}
6146
6147/* This exposes unstop-all-threads functionality to other gdbserver
6148   modules.  */
6149
6150void
6151linux_process_target::unpause_all (bool unfreeze)
6152{
6153  unstop_all_lwps (unfreeze, NULL);
6154}
6155
6156/* Extract &phdr and num_phdr in the inferior.  Return 0 on success.  */
6157
6158static int
6159get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6160			       CORE_ADDR *phdr_memaddr, int *num_phdr)
6161{
6162  char filename[PATH_MAX];
6163  int fd;
6164  const int auxv_size = is_elf64
6165    ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6166  char buf[sizeof (Elf64_auxv_t)];  /* The larger of the two.  */
6167
6168  xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6169
6170  fd = open (filename, O_RDONLY);
6171  if (fd < 0)
6172    return 1;
6173
6174  *phdr_memaddr = 0;
6175  *num_phdr = 0;
6176  while (read (fd, buf, auxv_size) == auxv_size
6177	 && (*phdr_memaddr == 0 || *num_phdr == 0))
6178    {
6179      if (is_elf64)
6180	{
6181	  Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6182
6183	  switch (aux->a_type)
6184	    {
6185	    case AT_PHDR:
6186	      *phdr_memaddr = aux->a_un.a_val;
6187	      break;
6188	    case AT_PHNUM:
6189	      *num_phdr = aux->a_un.a_val;
6190	      break;
6191	    }
6192	}
6193      else
6194	{
6195	  Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6196
6197	  switch (aux->a_type)
6198	    {
6199	    case AT_PHDR:
6200	      *phdr_memaddr = aux->a_un.a_val;
6201	      break;
6202	    case AT_PHNUM:
6203	      *num_phdr = aux->a_un.a_val;
6204	      break;
6205	    }
6206	}
6207    }
6208
6209  close (fd);
6210
6211  if (*phdr_memaddr == 0 || *num_phdr == 0)
6212    {
6213      warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6214	       "phdr_memaddr = %ld, phdr_num = %d",
6215	       (long) *phdr_memaddr, *num_phdr);
6216      return 2;
6217    }
6218
6219  return 0;
6220}
6221
6222/* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present.  */
6223
6224static CORE_ADDR
6225get_dynamic (const int pid, const int is_elf64)
6226{
6227  CORE_ADDR phdr_memaddr, relocation;
6228  int num_phdr, i;
6229  unsigned char *phdr_buf;
6230  const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6231
6232  if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6233    return 0;
6234
6235  gdb_assert (num_phdr < 100);  /* Basic sanity check.  */
6236  phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6237
6238  if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6239    return 0;
6240
6241  /* Compute relocation: it is expected to be 0 for "regular" executables,
6242     non-zero for PIE ones.  */
6243  relocation = -1;
6244  for (i = 0; relocation == -1 && i < num_phdr; i++)
6245    if (is_elf64)
6246      {
6247	Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6248
6249	if (p->p_type == PT_PHDR)
6250	  relocation = phdr_memaddr - p->p_vaddr;
6251      }
6252    else
6253      {
6254	Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6255
6256	if (p->p_type == PT_PHDR)
6257	  relocation = phdr_memaddr - p->p_vaddr;
6258      }
6259
6260  if (relocation == -1)
6261    {
6262      /* PT_PHDR is optional, but necessary for PIE in general.  Fortunately
6263	 any real world executables, including PIE executables, have always
6264	 PT_PHDR present.  PT_PHDR is not present in some shared libraries or
6265	 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6266	 or present DT_DEBUG anyway (fpc binaries are statically linked).
6267
6268	 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6269
6270	 GDB could find RELOCATION also from AT_ENTRY - e_entry.  */
6271
6272      return 0;
6273    }
6274
6275  for (i = 0; i < num_phdr; i++)
6276    {
6277      if (is_elf64)
6278	{
6279	  Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6280
6281	  if (p->p_type == PT_DYNAMIC)
6282	    return p->p_vaddr + relocation;
6283	}
6284      else
6285	{
6286	  Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6287
6288	  if (p->p_type == PT_DYNAMIC)
6289	    return p->p_vaddr + relocation;
6290	}
6291    }
6292
6293  return 0;
6294}
6295
6296/* Return &_r_debug in the inferior, or -1 if not present.  Return value
6297   can be 0 if the inferior does not yet have the library list initialized.
6298   We look for DT_MIPS_RLD_MAP first.  MIPS executables use this instead of
6299   DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too.  */
6300
6301static CORE_ADDR
6302get_r_debug (const int pid, const int is_elf64)
6303{
6304  CORE_ADDR dynamic_memaddr;
6305  const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6306  unsigned char buf[sizeof (Elf64_Dyn)];  /* The larger of the two.  */
6307  CORE_ADDR map = -1;
6308
6309  dynamic_memaddr = get_dynamic (pid, is_elf64);
6310  if (dynamic_memaddr == 0)
6311    return map;
6312
6313  while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6314    {
6315      if (is_elf64)
6316	{
6317	  Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6318#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6319	  union
6320	    {
6321	      Elf64_Xword map;
6322	      unsigned char buf[sizeof (Elf64_Xword)];
6323	    }
6324	  rld_map;
6325#endif
6326#ifdef DT_MIPS_RLD_MAP
6327	  if (dyn->d_tag == DT_MIPS_RLD_MAP)
6328	    {
6329	      if (linux_read_memory (dyn->d_un.d_val,
6330				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6331		return rld_map.map;
6332	      else
6333		break;
6334	    }
6335#endif	/* DT_MIPS_RLD_MAP */
6336#ifdef DT_MIPS_RLD_MAP_REL
6337	  if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6338	    {
6339	      if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6340				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6341		return rld_map.map;
6342	      else
6343		break;
6344	    }
6345#endif	/* DT_MIPS_RLD_MAP_REL */
6346
6347	  if (dyn->d_tag == DT_DEBUG && map == -1)
6348	    map = dyn->d_un.d_val;
6349
6350	  if (dyn->d_tag == DT_NULL)
6351	    break;
6352	}
6353      else
6354	{
6355	  Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6356#if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6357	  union
6358	    {
6359	      Elf32_Word map;
6360	      unsigned char buf[sizeof (Elf32_Word)];
6361	    }
6362	  rld_map;
6363#endif
6364#ifdef DT_MIPS_RLD_MAP
6365	  if (dyn->d_tag == DT_MIPS_RLD_MAP)
6366	    {
6367	      if (linux_read_memory (dyn->d_un.d_val,
6368				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6369		return rld_map.map;
6370	      else
6371		break;
6372	    }
6373#endif	/* DT_MIPS_RLD_MAP */
6374#ifdef DT_MIPS_RLD_MAP_REL
6375	  if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6376	    {
6377	      if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6378				     rld_map.buf, sizeof (rld_map.buf)) == 0)
6379		return rld_map.map;
6380	      else
6381		break;
6382	    }
6383#endif	/* DT_MIPS_RLD_MAP_REL */
6384
6385	  if (dyn->d_tag == DT_DEBUG && map == -1)
6386	    map = dyn->d_un.d_val;
6387
6388	  if (dyn->d_tag == DT_NULL)
6389	    break;
6390	}
6391
6392      dynamic_memaddr += dyn_size;
6393    }
6394
6395  return map;
6396}
6397
6398/* Read one pointer from MEMADDR in the inferior.  */
6399
6400static int
6401read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6402{
6403  int ret;
6404
6405  /* Go through a union so this works on either big or little endian
6406     hosts, when the inferior's pointer size is smaller than the size
6407     of CORE_ADDR.  It is assumed the inferior's endianness is the
6408     same of the superior's.  */
6409  union
6410  {
6411    CORE_ADDR core_addr;
6412    unsigned int ui;
6413    unsigned char uc;
6414  } addr;
6415
6416  ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6417  if (ret == 0)
6418    {
6419      if (ptr_size == sizeof (CORE_ADDR))
6420	*ptr = addr.core_addr;
6421      else if (ptr_size == sizeof (unsigned int))
6422	*ptr = addr.ui;
6423      else
6424	gdb_assert_not_reached ("unhandled pointer size");
6425    }
6426  return ret;
6427}
6428
6429bool
6430linux_process_target::supports_qxfer_libraries_svr4 ()
6431{
6432  return true;
6433}
6434
6435struct link_map_offsets
6436  {
6437    /* Offset and size of r_debug.r_version.  */
6438    int r_version_offset;
6439
6440    /* Offset and size of r_debug.r_map.  */
6441    int r_map_offset;
6442
6443    /* Offset of r_debug_extended.r_next.  */
6444    int r_next_offset;
6445
6446    /* Offset to l_addr field in struct link_map.  */
6447    int l_addr_offset;
6448
6449    /* Offset to l_name field in struct link_map.  */
6450    int l_name_offset;
6451
6452    /* Offset to l_ld field in struct link_map.  */
6453    int l_ld_offset;
6454
6455    /* Offset to l_next field in struct link_map.  */
6456    int l_next_offset;
6457
6458    /* Offset to l_prev field in struct link_map.  */
6459    int l_prev_offset;
6460  };
6461
6462static const link_map_offsets lmo_32bit_offsets =
6463  {
6464    0,     /* r_version offset.  */
6465    4,     /* r_debug.r_map offset.  */
6466    20,    /* r_debug_extended.r_next.  */
6467    0,     /* l_addr offset in link_map.  */
6468    4,     /* l_name offset in link_map.  */
6469    8,     /* l_ld offset in link_map.  */
6470    12,    /* l_next offset in link_map.  */
6471    16     /* l_prev offset in link_map.  */
6472  };
6473
6474static const link_map_offsets lmo_64bit_offsets =
6475  {
6476    0,     /* r_version offset.  */
6477    8,     /* r_debug.r_map offset.  */
6478    40,    /* r_debug_extended.r_next.  */
6479    0,     /* l_addr offset in link_map.  */
6480    8,     /* l_name offset in link_map.  */
6481    16,    /* l_ld offset in link_map.  */
6482    24,    /* l_next offset in link_map.  */
6483    32     /* l_prev offset in link_map.  */
6484  };
6485
6486/* Get the loaded shared libraries from one namespace.  */
6487
6488static void
6489read_link_map (std::string &document, CORE_ADDR lmid, CORE_ADDR lm_addr,
6490	       CORE_ADDR lm_prev, int ptr_size, const link_map_offsets *lmo)
6491{
6492  CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6493
6494  while (lm_addr
6495	 && read_one_ptr (lm_addr + lmo->l_name_offset,
6496			  &l_name, ptr_size) == 0
6497	 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6498			  &l_addr, ptr_size) == 0
6499	 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6500			  &l_ld, ptr_size) == 0
6501	 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6502			  &l_prev, ptr_size) == 0
6503	 && read_one_ptr (lm_addr + lmo->l_next_offset,
6504			  &l_next, ptr_size) == 0)
6505    {
6506      unsigned char libname[PATH_MAX];
6507
6508      if (lm_prev != l_prev)
6509	{
6510	  warning ("Corrupted shared library list: 0x%s != 0x%s",
6511		   paddress (lm_prev), paddress (l_prev));
6512	  break;
6513	}
6514
6515      /* Not checking for error because reading may stop before we've got
6516	 PATH_MAX worth of characters.  */
6517      libname[0] = '\0';
6518      linux_read_memory (l_name, libname, sizeof (libname) - 1);
6519      libname[sizeof (libname) - 1] = '\0';
6520      if (libname[0] != '\0')
6521	{
6522	  string_appendf (document, "<library name=\"");
6523	  xml_escape_text_append (document, (char *) libname);
6524	  string_appendf (document, "\" lm=\"0x%s\" l_addr=\"0x%s\" "
6525			  "l_ld=\"0x%s\" lmid=\"0x%s\"/>",
6526			  paddress (lm_addr), paddress (l_addr),
6527			  paddress (l_ld), paddress (lmid));
6528	}
6529
6530      lm_prev = lm_addr;
6531      lm_addr = l_next;
6532    }
6533}
6534
6535/* Construct qXfer:libraries-svr4:read reply.  */
6536
6537int
6538linux_process_target::qxfer_libraries_svr4 (const char *annex,
6539					    unsigned char *readbuf,
6540					    unsigned const char *writebuf,
6541					    CORE_ADDR offset, int len)
6542{
6543  struct process_info_private *const priv = current_process ()->priv;
6544  char filename[PATH_MAX];
6545  int pid, is_elf64;
6546  unsigned int machine;
6547  CORE_ADDR lmid = 0, lm_addr = 0, lm_prev = 0;
6548
6549  if (writebuf != NULL)
6550    return -2;
6551  if (readbuf == NULL)
6552    return -1;
6553
6554  pid = lwpid_of (current_thread);
6555  xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6556  is_elf64 = elf_64_file_p (filename, &machine);
6557  const link_map_offsets *lmo;
6558  int ptr_size;
6559  if (is_elf64)
6560    {
6561      lmo = &lmo_64bit_offsets;
6562      ptr_size = 8;
6563    }
6564  else
6565    {
6566      lmo = &lmo_32bit_offsets;
6567      ptr_size = 4;
6568    }
6569
6570  while (annex[0] != '\0')
6571    {
6572      const char *sep;
6573      CORE_ADDR *addrp;
6574      int name_len;
6575
6576      sep = strchr (annex, '=');
6577      if (sep == NULL)
6578	break;
6579
6580      name_len = sep - annex;
6581      if (name_len == 4 && startswith (annex, "lmid"))
6582	addrp = &lmid;
6583      else if (name_len == 5 && startswith (annex, "start"))
6584	addrp = &lm_addr;
6585      else if (name_len == 4 && startswith (annex, "prev"))
6586	addrp = &lm_prev;
6587      else
6588	{
6589	  annex = strchr (sep, ';');
6590	  if (annex == NULL)
6591	    break;
6592	  annex++;
6593	  continue;
6594	}
6595
6596      annex = decode_address_to_semicolon (addrp, sep + 1);
6597    }
6598
6599  std::string document = "<library-list-svr4 version=\"1.0\"";
6600
6601  /* When the starting LM_ADDR is passed in the annex, only traverse that
6602     namespace, which is assumed to be identified by LMID.
6603
6604     Otherwise, start with R_DEBUG and traverse all namespaces we find.  */
6605  if (lm_addr != 0)
6606    {
6607      document += ">";
6608      read_link_map (document, lmid, lm_addr, lm_prev, ptr_size, lmo);
6609    }
6610  else
6611    {
6612      if (lm_prev != 0)
6613	warning ("ignoring prev=0x%s without start", paddress (lm_prev));
6614
6615      /* We could interpret LMID as 'provide only the libraries for this
6616	 namespace' but GDB is currently only providing lmid, start, and
6617	 prev, or nothing.  */
6618      if (lmid != 0)
6619	warning ("ignoring lmid=0x%s without start", paddress (lmid));
6620
6621      CORE_ADDR r_debug = priv->r_debug;
6622      if (r_debug == 0)
6623	r_debug = priv->r_debug = get_r_debug (pid, is_elf64);
6624
6625      /* We failed to find DT_DEBUG.  Such situation will not change
6626	 for this inferior - do not retry it.  Report it to GDB as
6627	 E01, see for the reasons at the GDB solib-svr4.c side.  */
6628      if (r_debug == (CORE_ADDR) -1)
6629	return -1;
6630
6631      /* Terminate the header if we end up with an empty list.  */
6632      if (r_debug == 0)
6633	document += ">";
6634
6635      while (r_debug != 0)
6636	{
6637	  int r_version = 0;
6638	  if (linux_read_memory (r_debug + lmo->r_version_offset,
6639				 (unsigned char *) &r_version,
6640				 sizeof (r_version)) != 0)
6641	    {
6642	      warning ("unable to read r_version from 0x%s",
6643		       paddress (r_debug + lmo->r_version_offset));
6644	      break;
6645	    }
6646
6647	  if (r_version < 1)
6648	    {
6649	      warning ("unexpected r_debug version %d", r_version);
6650	      break;
6651	    }
6652
6653	  if (read_one_ptr (r_debug + lmo->r_map_offset, &lm_addr,
6654			    ptr_size) != 0)
6655	    {
6656	      warning ("unable to read r_map from 0x%s",
6657		       paddress (r_debug + lmo->r_map_offset));
6658	      break;
6659	    }
6660
6661	  /* We read the entire namespace.  */
6662	  lm_prev = 0;
6663
6664	  /* The first entry corresponds to the main executable unless the
6665	     dynamic loader was loaded late by a static executable.  But
6666	     in such case the main executable does not have PT_DYNAMIC
6667	     present and we would not have gotten here.  */
6668	  if (r_debug == priv->r_debug)
6669	    {
6670	      if (lm_addr != 0)
6671		string_appendf (document, " main-lm=\"0x%s\">",
6672				paddress (lm_addr));
6673	      else
6674		document += ">";
6675
6676	      lm_prev = lm_addr;
6677	      if (read_one_ptr (lm_addr + lmo->l_next_offset,
6678				&lm_addr, ptr_size) != 0)
6679		{
6680		  warning ("unable to read l_next from 0x%s",
6681			   paddress (lm_addr + lmo->l_next_offset));
6682		  break;
6683		}
6684	    }
6685
6686	  read_link_map (document, r_debug, lm_addr, lm_prev, ptr_size, lmo);
6687
6688	  if (r_version < 2)
6689	    break;
6690
6691	  if (read_one_ptr (r_debug + lmo->r_next_offset, &r_debug,
6692			    ptr_size) != 0)
6693	    {
6694	      warning ("unable to read r_next from 0x%s",
6695		       paddress (r_debug + lmo->r_next_offset));
6696	      break;
6697	    }
6698	}
6699    }
6700
6701  document += "</library-list-svr4>";
6702
6703  int document_len = document.length ();
6704  if (offset < document_len)
6705    document_len -= offset;
6706  else
6707    document_len = 0;
6708  if (len > document_len)
6709    len = document_len;
6710
6711  memcpy (readbuf, document.data () + offset, len);
6712
6713  return len;
6714}
6715
6716#ifdef HAVE_LINUX_BTRACE
6717
6718bool
6719linux_process_target::supports_btrace ()
6720{
6721  return true;
6722}
6723
6724btrace_target_info *
6725linux_process_target::enable_btrace (thread_info *tp,
6726				     const btrace_config *conf)
6727{
6728  return linux_enable_btrace (tp->id, conf);
6729}
6730
6731/* See to_disable_btrace target method.  */
6732
6733int
6734linux_process_target::disable_btrace (btrace_target_info *tinfo)
6735{
6736  enum btrace_error err;
6737
6738  err = linux_disable_btrace (tinfo);
6739  return (err == BTRACE_ERR_NONE ? 0 : -1);
6740}
6741
6742/* Encode an Intel Processor Trace configuration.  */
6743
6744static void
6745linux_low_encode_pt_config (struct buffer *buffer,
6746			    const struct btrace_data_pt_config *config)
6747{
6748  buffer_grow_str (buffer, "<pt-config>\n");
6749
6750  switch (config->cpu.vendor)
6751    {
6752    case CV_INTEL:
6753      buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6754			 "model=\"%u\" stepping=\"%u\"/>\n",
6755			 config->cpu.family, config->cpu.model,
6756			 config->cpu.stepping);
6757      break;
6758
6759    default:
6760      break;
6761    }
6762
6763  buffer_grow_str (buffer, "</pt-config>\n");
6764}
6765
6766/* Encode a raw buffer.  */
6767
6768static void
6769linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6770		      unsigned int size)
6771{
6772  if (size == 0)
6773    return;
6774
6775  /* We use hex encoding - see gdbsupport/rsp-low.h.  */
6776  buffer_grow_str (buffer, "<raw>\n");
6777
6778  while (size-- > 0)
6779    {
6780      char elem[2];
6781
6782      elem[0] = tohex ((*data >> 4) & 0xf);
6783      elem[1] = tohex (*data++ & 0xf);
6784
6785      buffer_grow (buffer, elem, 2);
6786    }
6787
6788  buffer_grow_str (buffer, "</raw>\n");
6789}
6790
6791/* See to_read_btrace target method.  */
6792
6793int
6794linux_process_target::read_btrace (btrace_target_info *tinfo,
6795				   buffer *buffer,
6796				   enum btrace_read_type type)
6797{
6798  struct btrace_data btrace;
6799  enum btrace_error err;
6800
6801  err = linux_read_btrace (&btrace, tinfo, type);
6802  if (err != BTRACE_ERR_NONE)
6803    {
6804      if (err == BTRACE_ERR_OVERFLOW)
6805	buffer_grow_str0 (buffer, "E.Overflow.");
6806      else
6807	buffer_grow_str0 (buffer, "E.Generic Error.");
6808
6809      return -1;
6810    }
6811
6812  switch (btrace.format)
6813    {
6814    case BTRACE_FORMAT_NONE:
6815      buffer_grow_str0 (buffer, "E.No Trace.");
6816      return -1;
6817
6818    case BTRACE_FORMAT_BTS:
6819      buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6820      buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6821
6822      for (const btrace_block &block : *btrace.variant.bts.blocks)
6823	buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6824			   paddress (block.begin), paddress (block.end));
6825
6826      buffer_grow_str0 (buffer, "</btrace>\n");
6827      break;
6828
6829    case BTRACE_FORMAT_PT:
6830      buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6831      buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6832      buffer_grow_str (buffer, "<pt>\n");
6833
6834      linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6835
6836      linux_low_encode_raw (buffer, btrace.variant.pt.data,
6837			    btrace.variant.pt.size);
6838
6839      buffer_grow_str (buffer, "</pt>\n");
6840      buffer_grow_str0 (buffer, "</btrace>\n");
6841      break;
6842
6843    default:
6844      buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6845      return -1;
6846    }
6847
6848  return 0;
6849}
6850
6851/* See to_btrace_conf target method.  */
6852
6853int
6854linux_process_target::read_btrace_conf (const btrace_target_info *tinfo,
6855					buffer *buffer)
6856{
6857  const struct btrace_config *conf;
6858
6859  buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6860  buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6861
6862  conf = linux_btrace_conf (tinfo);
6863  if (conf != NULL)
6864    {
6865      switch (conf->format)
6866	{
6867	case BTRACE_FORMAT_NONE:
6868	  break;
6869
6870	case BTRACE_FORMAT_BTS:
6871	  buffer_xml_printf (buffer, "<bts");
6872	  buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6873	  buffer_xml_printf (buffer, " />\n");
6874	  break;
6875
6876	case BTRACE_FORMAT_PT:
6877	  buffer_xml_printf (buffer, "<pt");
6878	  buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6879	  buffer_xml_printf (buffer, "/>\n");
6880	  break;
6881	}
6882    }
6883
6884  buffer_grow_str0 (buffer, "</btrace-conf>\n");
6885  return 0;
6886}
6887#endif /* HAVE_LINUX_BTRACE */
6888
6889/* See nat/linux-nat.h.  */
6890
6891ptid_t
6892current_lwp_ptid (void)
6893{
6894  return ptid_of (current_thread);
6895}
6896
6897const char *
6898linux_process_target::thread_name (ptid_t thread)
6899{
6900  return linux_proc_tid_get_name (thread);
6901}
6902
6903#if USE_THREAD_DB
6904bool
6905linux_process_target::thread_handle (ptid_t ptid, gdb_byte **handle,
6906				     int *handle_len)
6907{
6908  return thread_db_thread_handle (ptid, handle, handle_len);
6909}
6910#endif
6911
6912thread_info *
6913linux_process_target::thread_pending_parent (thread_info *thread)
6914{
6915  lwp_info *parent = get_thread_lwp (thread)->pending_parent ();
6916
6917  if (parent == nullptr)
6918    return nullptr;
6919
6920  return get_lwp_thread (parent);
6921}
6922
6923thread_info *
6924linux_process_target::thread_pending_child (thread_info *thread)
6925{
6926  lwp_info *child = get_thread_lwp (thread)->pending_child ();
6927
6928  if (child == nullptr)
6929    return nullptr;
6930
6931  return get_lwp_thread (child);
6932}
6933
6934/* Default implementation of linux_target_ops method "set_pc" for
6935   32-bit pc register which is literally named "pc".  */
6936
6937void
6938linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
6939{
6940  uint32_t newpc = pc;
6941
6942  supply_register_by_name (regcache, "pc", &newpc);
6943}
6944
6945/* Default implementation of linux_target_ops method "get_pc" for
6946   32-bit pc register which is literally named "pc".  */
6947
6948CORE_ADDR
6949linux_get_pc_32bit (struct regcache *regcache)
6950{
6951  uint32_t pc;
6952
6953  collect_register_by_name (regcache, "pc", &pc);
6954  threads_debug_printf ("stop pc is 0x%" PRIx32, pc);
6955  return pc;
6956}
6957
6958/* Default implementation of linux_target_ops method "set_pc" for
6959   64-bit pc register which is literally named "pc".  */
6960
6961void
6962linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
6963{
6964  uint64_t newpc = pc;
6965
6966  supply_register_by_name (regcache, "pc", &newpc);
6967}
6968
6969/* Default implementation of linux_target_ops method "get_pc" for
6970   64-bit pc register which is literally named "pc".  */
6971
6972CORE_ADDR
6973linux_get_pc_64bit (struct regcache *regcache)
6974{
6975  uint64_t pc;
6976
6977  collect_register_by_name (regcache, "pc", &pc);
6978  threads_debug_printf ("stop pc is 0x%" PRIx64, pc);
6979  return pc;
6980}
6981
6982/* See linux-low.h.  */
6983
6984int
6985linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
6986{
6987  gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
6988  int offset = 0;
6989
6990  gdb_assert (wordsize == 4 || wordsize == 8);
6991
6992  while (the_target->read_auxv (offset, data, 2 * wordsize) == 2 * wordsize)
6993    {
6994      if (wordsize == 4)
6995	{
6996	  uint32_t *data_p = (uint32_t *) data;
6997	  if (data_p[0] == match)
6998	    {
6999	      *valp = data_p[1];
7000	      return 1;
7001	    }
7002	}
7003      else
7004	{
7005	  uint64_t *data_p = (uint64_t *) data;
7006	  if (data_p[0] == match)
7007	    {
7008	      *valp = data_p[1];
7009	      return 1;
7010	    }
7011	}
7012
7013      offset += 2 * wordsize;
7014    }
7015
7016  return 0;
7017}
7018
7019/* See linux-low.h.  */
7020
7021CORE_ADDR
7022linux_get_hwcap (int wordsize)
7023{
7024  CORE_ADDR hwcap = 0;
7025  linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7026  return hwcap;
7027}
7028
7029/* See linux-low.h.  */
7030
7031CORE_ADDR
7032linux_get_hwcap2 (int wordsize)
7033{
7034  CORE_ADDR hwcap2 = 0;
7035  linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7036  return hwcap2;
7037}
7038
7039#ifdef HAVE_LINUX_REGSETS
7040void
7041initialize_regsets_info (struct regsets_info *info)
7042{
7043  for (info->num_regsets = 0;
7044       info->regsets[info->num_regsets].size >= 0;
7045       info->num_regsets++)
7046    ;
7047}
7048#endif
7049
7050void
7051initialize_low (void)
7052{
7053  struct sigaction sigchld_action;
7054
7055  memset (&sigchld_action, 0, sizeof (sigchld_action));
7056  set_target_ops (the_linux_target);
7057
7058  linux_ptrace_init_warnings ();
7059  linux_proc_init_warnings ();
7060
7061  sigchld_action.sa_handler = sigchld_handler;
7062  sigemptyset (&sigchld_action.sa_mask);
7063  sigchld_action.sa_flags = SA_RESTART;
7064  sigaction (SIGCHLD, &sigchld_action, NULL);
7065
7066  initialize_low_arch ();
7067
7068  linux_check_ptrace_features ();
7069}
7070