lwproc.c revision 1.29
1/* $NetBSD: lwproc.c,v 1.29 2014/04/09 23:53:36 pooka Exp $ */ 2 3/* 4 * Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#define RUMP__CURLWP_PRIVATE 29 30#include <sys/cdefs.h> 31__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.29 2014/04/09 23:53:36 pooka Exp $"); 32 33#include <sys/param.h> 34#include <sys/atomic.h> 35#include <sys/filedesc.h> 36#include <sys/kauth.h> 37#include <sys/kmem.h> 38#include <sys/lwp.h> 39#include <sys/ktrace.h> 40#include <sys/pool.h> 41#include <sys/proc.h> 42#include <sys/queue.h> 43#include <sys/resourcevar.h> 44#include <sys/uidinfo.h> 45 46#include <rump/rumpuser.h> 47#include "rump_private.h" 48#include "rump_curlwp.h" 49 50struct emul *emul_default = &emul_netbsd; 51 52void 53rump_lwproc_init(void) 54{ 55 56 lwproc_curlwpop(RUMPUSER_LWP_CREATE, &lwp0); 57} 58 59struct lwp * 60rump_lwproc_curlwp_hypercall(void) 61{ 62 63 return rumpuser_curlwp(); 64} 65 66void 67rump_lwproc_curlwp_set(struct lwp *l) 68{ 69 70 KASSERT(curlwp == NULL); 71 lwproc_curlwpop(RUMPUSER_LWP_SET, l); 72} 73 74void 75rump_lwproc_curlwp_clear(struct lwp *l) 76{ 77 78 KASSERT(l == curlwp); 79 lwproc_curlwpop(RUMPUSER_LWP_CLEAR, l); 80} 81 82static void 83lwproc_proc_free(struct proc *p) 84{ 85 kauth_cred_t cred; 86 87 KASSERT(p->p_stat == SDYING || p->p_stat == SDEAD); 88 89#ifdef KTRACE 90 if (p->p_tracep) { 91 mutex_enter(&ktrace_lock); 92 ktrderef(p); 93 mutex_exit(&ktrace_lock); 94 } 95#endif 96 97 mutex_enter(proc_lock); 98 99 KASSERT(p->p_nlwps == 0); 100 KASSERT(LIST_EMPTY(&p->p_lwps)); 101 102 LIST_REMOVE(p, p_list); 103 LIST_REMOVE(p, p_sibling); 104 proc_free_pid(p->p_pid); /* decrements nprocs */ 105 proc_leavepgrp(p); /* releases proc_lock */ 106 107 cred = p->p_cred; 108 chgproccnt(kauth_cred_getuid(cred), -1); 109 if (rump_proc_vfs_release) 110 rump_proc_vfs_release(p); 111 112 doexithooks(p); 113 lim_free(p->p_limit); 114 pstatsfree(p->p_stats); 115 kauth_cred_free(p->p_cred); 116 proc_finispecific(p); 117 118 mutex_obj_free(p->p_lock); 119 mutex_destroy(&p->p_stmutex); 120 mutex_destroy(&p->p_auxlock); 121 rw_destroy(&p->p_reflock); 122 cv_destroy(&p->p_waitcv); 123 cv_destroy(&p->p_lwpcv); 124 125 /* non-kernel vmspaces are not shared */ 126 if (!RUMP_LOCALPROC_P(p)) { 127 KASSERT(p->p_vmspace->vm_refcnt == 1); 128 kmem_free(p->p_vmspace, sizeof(*p->p_vmspace)); 129 } 130 131 proc_free_mem(p); 132} 133 134/* 135 * Allocate a new process. Mostly mimic fork by 136 * copying the properties of the parent. However, there are some 137 * differences. 138 * 139 * Switch to the new lwp and return a pointer to it. 140 */ 141static struct proc * 142lwproc_newproc(struct proc *parent, int flags) 143{ 144 uid_t uid = kauth_cred_getuid(parent->p_cred); 145 struct proc *p; 146 147 /* maxproc not enforced */ 148 atomic_inc_uint(&nprocs); 149 150 /* allocate process */ 151 p = proc_alloc(); 152 memset(&p->p_startzero, 0, 153 offsetof(struct proc, p_endzero) 154 - offsetof(struct proc, p_startzero)); 155 memcpy(&p->p_startcopy, &parent->p_startcopy, 156 offsetof(struct proc, p_endcopy) 157 - offsetof(struct proc, p_startcopy)); 158 159 /* some other garbage we need to zero */ 160 p->p_sigacts = NULL; 161 p->p_aio = NULL; 162 p->p_dtrace = NULL; 163 p->p_mqueue_cnt = p->p_exitsig = 0; 164 p->p_flag = p->p_sflag = p->p_slflag = p->p_lflag = p->p_stflag = 0; 165 p->p_trace_enabled = 0; 166 p->p_xstat = p->p_acflag = 0; 167 p->p_stackbase = 0; 168 169 p->p_stats = pstatscopy(parent->p_stats); 170 171 p->p_vmspace = vmspace_kernel(); 172 p->p_emul = emul_default; 173#ifdef __HAVE_SYSCALL_INTERN 174 p->p_emul->e_syscall_intern(p); 175#endif 176 if (*parent->p_comm) 177 strcpy(p->p_comm, parent->p_comm); 178 else 179 strcpy(p->p_comm, "rumproc"); 180 181 if ((flags & RUMP_RFCFDG) == 0) 182 KASSERT(parent == curproc); 183 if (flags & RUMP_RFFDG) 184 p->p_fd = fd_copy(); 185 else if (flags & RUMP_RFCFDG) 186 p->p_fd = fd_init(NULL); 187 else 188 fd_share(p); 189 190 lim_addref(parent->p_limit); 191 p->p_limit = parent->p_limit; 192 193 LIST_INIT(&p->p_lwps); 194 LIST_INIT(&p->p_children); 195 196 p->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 197 mutex_init(&p->p_stmutex, MUTEX_DEFAULT, IPL_HIGH); 198 mutex_init(&p->p_auxlock, MUTEX_DEFAULT, IPL_NONE); 199 rw_init(&p->p_reflock); 200 cv_init(&p->p_waitcv, "pwait"); 201 cv_init(&p->p_lwpcv, "plwp"); 202 203 p->p_pptr = parent; 204 p->p_ppid = parent->p_pid; 205 p->p_stat = SACTIVE; 206 207 kauth_proc_fork(parent, p); 208 209 /* initialize cwd in rump kernels with vfs */ 210 if (rump_proc_vfs_init) 211 rump_proc_vfs_init(p); 212 213 chgproccnt(uid, 1); /* not enforced */ 214 215 /* publish proc various proc lists */ 216 mutex_enter(proc_lock); 217 LIST_INSERT_HEAD(&allproc, p, p_list); 218 LIST_INSERT_HEAD(&parent->p_children, p, p_sibling); 219 LIST_INSERT_AFTER(parent, p, p_pglist); 220 mutex_exit(proc_lock); 221 222 return p; 223} 224 225static void 226lwproc_freelwp(struct lwp *l) 227{ 228 struct proc *p; 229 230 p = l->l_proc; 231 mutex_enter(p->p_lock); 232 233 KASSERT(l->l_flag & LW_WEXIT); 234 KASSERT(l->l_refcnt == 0); 235 236 /* ok, zero references, continue with nuke */ 237 LIST_REMOVE(l, l_sibling); 238 KASSERT(p->p_nlwps >= 1); 239 if (--p->p_nlwps == 0) { 240 KASSERT(p != &proc0); 241 p->p_stat = SDEAD; 242 } 243 cv_broadcast(&p->p_lwpcv); /* nobody sleeps on this in a rump kernel? */ 244 kauth_cred_free(l->l_cred); 245 mutex_exit(p->p_lock); 246 247 mutex_enter(proc_lock); 248 LIST_REMOVE(l, l_list); 249 mutex_exit(proc_lock); 250 251 if (l->l_name) 252 kmem_free(l->l_name, MAXCOMLEN); 253 lwp_finispecific(l); 254 255 lwproc_curlwpop(RUMPUSER_LWP_DESTROY, l); 256 membar_exit(); 257 kmem_free(l, sizeof(*l)); 258 259 if (p->p_stat == SDEAD) 260 lwproc_proc_free(p); 261} 262 263extern kmutex_t unruntime_lock; 264 265/* 266 * called with p_lock held, releases lock before return 267 */ 268static void 269lwproc_makelwp(struct proc *p, struct lwp *l, bool doswitch, bool procmake) 270{ 271 272 p->p_nlwps++; 273 l->l_refcnt = 1; 274 l->l_proc = p; 275 276 l->l_lid = p->p_nlwpid++; 277 LIST_INSERT_HEAD(&p->p_lwps, l, l_sibling); 278 279 l->l_fd = p->p_fd; 280 l->l_cpu = rump_cpu; 281 l->l_target_cpu = rump_cpu; /* Initial target CPU always the same */ 282 l->l_stat = LSRUN; 283 l->l_mutex = &unruntime_lock; 284 TAILQ_INIT(&l->l_ld_locks); 285 mutex_exit(p->p_lock); 286 287 lwp_update_creds(l); 288 lwp_initspecific(l); 289 290 membar_enter(); 291 lwproc_curlwpop(RUMPUSER_LWP_CREATE, l); 292 if (doswitch) { 293 rump_lwproc_switch(l); 294 } 295 296 /* filedesc already has refcount 1 when process is created */ 297 if (!procmake) { 298 fd_hold(l); 299 } 300 301 mutex_enter(proc_lock); 302 LIST_INSERT_HEAD(&alllwp, l, l_list); 303 mutex_exit(proc_lock); 304} 305 306struct lwp * 307rump__lwproc_alloclwp(struct proc *p) 308{ 309 struct lwp *l; 310 bool newproc = false; 311 312 if (p == NULL) { 313 p = lwproc_newproc(&proc0, 0); 314 newproc = true; 315 } 316 317 l = kmem_zalloc(sizeof(*l), KM_SLEEP); 318 319 mutex_enter(p->p_lock); 320 KASSERT((p->p_sflag & PS_RUMP_LWPEXIT) == 0); 321 lwproc_makelwp(p, l, false, newproc); 322 323 return l; 324} 325 326int 327rump_lwproc_newlwp(pid_t pid) 328{ 329 struct proc *p; 330 struct lwp *l; 331 332 l = kmem_zalloc(sizeof(*l), KM_SLEEP); 333 mutex_enter(proc_lock); 334 p = proc_find_raw(pid); 335 if (p == NULL) { 336 mutex_exit(proc_lock); 337 kmem_free(l, sizeof(*l)); 338 return ESRCH; 339 } 340 mutex_enter(p->p_lock); 341 if (p->p_sflag & PS_RUMP_LWPEXIT) { 342 mutex_exit(proc_lock); 343 mutex_exit(p->p_lock); 344 kmem_free(l, sizeof(*l)); 345 return EBUSY; 346 } 347 mutex_exit(proc_lock); 348 lwproc_makelwp(p, l, true, false); 349 350 return 0; 351} 352 353int 354rump_lwproc_rfork(int flags) 355{ 356 struct proc *p; 357 struct lwp *l; 358 359 if (flags & ~(RUMP_RFFDG|RUMP_RFCFDG) || 360 (~flags & (RUMP_RFFDG|RUMP_RFCFDG)) == 0) 361 return EINVAL; 362 363 p = lwproc_newproc(curproc, flags); 364 l = kmem_zalloc(sizeof(*l), KM_SLEEP); 365 mutex_enter(p->p_lock); 366 KASSERT((p->p_sflag & PS_RUMP_LWPEXIT) == 0); 367 lwproc_makelwp(p, l, true, true); 368 369 return 0; 370} 371 372/* 373 * Switch to a new process/thread. Release previous one if 374 * deemed to be exiting. This is considered a slow path for 375 * rump kernel entry. 376 */ 377void 378rump_lwproc_switch(struct lwp *newlwp) 379{ 380 struct lwp *l = curlwp; 381 382 KASSERT(!(l->l_flag & LW_WEXIT) || newlwp); 383 384 if (__predict_false(newlwp && (newlwp->l_pflag & LP_RUNNING))) 385 panic("lwp %p (%d:%d) already running", 386 newlwp, newlwp->l_proc->p_pid, newlwp->l_lid); 387 388 if (newlwp == NULL) { 389 l->l_pflag &= ~LP_RUNNING; 390 l->l_flag |= LW_RUMP_CLEAR; 391 return; 392 } 393 394 /* fd_free() must be called from curlwp context. talk about ugh */ 395 if (l->l_flag & LW_WEXIT) { 396 fd_free(); 397 } 398 399 KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks); 400 lwproc_curlwpop(RUMPUSER_LWP_CLEAR, l); 401 402 newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu; 403 newlwp->l_mutex = l->l_mutex; 404 newlwp->l_pflag |= LP_RUNNING; 405 406 lwproc_curlwpop(RUMPUSER_LWP_SET, newlwp); 407 curcpu()->ci_curlwp = newlwp; 408 KERNEL_LOCK(newlwp->l_biglocks, NULL); 409 410 /* 411 * Check if the thread should get a signal. This is 412 * mostly to satisfy the "record" rump sigmodel. 413 */ 414 mutex_enter(newlwp->l_proc->p_lock); 415 if (sigispending(newlwp, 0)) { 416 newlwp->l_flag |= LW_PENDSIG; 417 } 418 mutex_exit(newlwp->l_proc->p_lock); 419 420 l->l_mutex = &unruntime_lock; 421 l->l_pflag &= ~LP_RUNNING; 422 l->l_flag &= ~LW_PENDSIG; 423 l->l_stat = LSRUN; 424 425 if (l->l_flag & LW_WEXIT) { 426 lwproc_freelwp(l); 427 } 428} 429 430/* 431 * Mark the current thread to be released upon return from 432 * kernel. 433 */ 434void 435rump_lwproc_releaselwp(void) 436{ 437 struct lwp *l = curlwp; 438 439 if (l->l_refcnt == 0 || l->l_flag & LW_WEXIT) 440 panic("releasing non-pertinent lwp"); 441 442 rump__lwproc_lwprele(); 443 KASSERT(l->l_refcnt == 0 && (l->l_flag & LW_WEXIT)); 444} 445 446/* 447 * In-kernel routines used to add and remove references for the 448 * current thread. The main purpose is to make it possible for 449 * implicit threads to persist over scheduling operations in 450 * rump kernel drivers. Note that we don't need p_lock in a 451 * rump kernel, since we do refcounting only for curlwp. 452 */ 453void 454rump__lwproc_lwphold(void) 455{ 456 struct lwp *l = curlwp; 457 458 l->l_refcnt++; 459 l->l_flag &= ~LW_WEXIT; 460} 461 462void 463rump__lwproc_lwprele(void) 464{ 465 struct lwp *l = curlwp; 466 467 l->l_refcnt--; 468 if (l->l_refcnt == 0) 469 l->l_flag |= LW_WEXIT; 470} 471 472struct lwp * 473rump_lwproc_curlwp(void) 474{ 475 struct lwp *l = curlwp; 476 477 if (l->l_flag & LW_WEXIT) 478 return NULL; 479 return l; 480} 481 482/* this interface is under construction (like the proverbial 90's web page) */ 483int rump_i_know_what_i_am_doing_with_sysents = 0; 484void 485rump_lwproc_sysent_usenative() 486{ 487 488 if (!rump_i_know_what_i_am_doing_with_sysents) 489 panic("don't use rump_lwproc_sysent_usenative()"); 490 curproc->p_emul = &emul_netbsd; 491} 492