lwproc.c revision 1.24
1/* $NetBSD: lwproc.c,v 1.24 2013/10/27 20:25:45 pooka Exp $ */ 2 3/* 4 * Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.24 2013/10/27 20:25:45 pooka Exp $"); 30 31#include <sys/param.h> 32#include <sys/atomic.h> 33#include <sys/filedesc.h> 34#include <sys/kauth.h> 35#include <sys/kmem.h> 36#include <sys/lwp.h> 37#include <sys/pool.h> 38#include <sys/proc.h> 39#include <sys/queue.h> 40#include <sys/resourcevar.h> 41#include <sys/uidinfo.h> 42 43#include <rump/rumpuser.h> 44 45#include "rump_private.h" 46 47struct emul *emul_default = &emul_netbsd; 48 49static void 50lwproc_proc_free(struct proc *p) 51{ 52 kauth_cred_t cred; 53 54 mutex_enter(proc_lock); 55 56 KASSERT(p->p_nlwps == 0); 57 KASSERT(LIST_EMPTY(&p->p_lwps)); 58 KASSERT(p->p_stat == SACTIVE || p->p_stat == SDYING || 59 p->p_stat == SDEAD); 60 61 LIST_REMOVE(p, p_list); 62 LIST_REMOVE(p, p_sibling); 63 proc_free_pid(p->p_pid); /* decrements nprocs */ 64 proc_leavepgrp(p); /* releases proc_lock */ 65 66 cred = p->p_cred; 67 chgproccnt(kauth_cred_getuid(cred), -1); 68 if (rump_proc_vfs_release) 69 rump_proc_vfs_release(p); 70 71 lim_free(p->p_limit); 72 pstatsfree(p->p_stats); 73 kauth_cred_free(p->p_cred); 74 proc_finispecific(p); 75 76 mutex_obj_free(p->p_lock); 77 mutex_destroy(&p->p_stmutex); 78 mutex_destroy(&p->p_auxlock); 79 rw_destroy(&p->p_reflock); 80 cv_destroy(&p->p_waitcv); 81 cv_destroy(&p->p_lwpcv); 82 83 /* non-kernel vmspaces are not shared */ 84 if (!RUMP_LOCALPROC_P(p)) { 85 KASSERT(p->p_vmspace->vm_refcnt == 1); 86 kmem_free(p->p_vmspace, sizeof(*p->p_vmspace)); 87 } 88 89 proc_free_mem(p); 90} 91 92/* 93 * Allocate a new process. Mostly mimic fork by 94 * copying the properties of the parent. However, there are some 95 * differences. 96 * 97 * Switch to the new lwp and return a pointer to it. 98 */ 99static struct proc * 100lwproc_newproc(struct proc *parent, int flags) 101{ 102 uid_t uid = kauth_cred_getuid(parent->p_cred); 103 struct proc *p; 104 105 /* maxproc not enforced */ 106 atomic_inc_uint(&nprocs); 107 108 /* allocate process */ 109 p = proc_alloc(); 110 memset(&p->p_startzero, 0, 111 offsetof(struct proc, p_endzero) 112 - offsetof(struct proc, p_startzero)); 113 memcpy(&p->p_startcopy, &parent->p_startcopy, 114 offsetof(struct proc, p_endcopy) 115 - offsetof(struct proc, p_startcopy)); 116 117 /* some other garbage we need to zero */ 118 p->p_sigacts = NULL; 119 p->p_aio = NULL; 120 p->p_dtrace = NULL; 121 p->p_mqueue_cnt = p->p_exitsig = 0; 122 p->p_flag = p->p_sflag = p->p_slflag = p->p_lflag = p->p_stflag = 0; 123 p->p_trace_enabled = 0; 124 p->p_xstat = p->p_acflag = 0; 125 p->p_stackbase = 0; 126 127 p->p_stats = pstatscopy(parent->p_stats); 128 129 p->p_vmspace = vmspace_kernel(); 130 p->p_emul = emul_default; 131 if (*parent->p_comm) 132 strcpy(p->p_comm, parent->p_comm); 133 else 134 strcpy(p->p_comm, "rumproc"); 135 136 if ((flags & RUMP_RFCFDG) == 0) 137 KASSERT(parent == curproc); 138 if (flags & RUMP_RFFDG) 139 p->p_fd = fd_copy(); 140 else if (flags & RUMP_RFCFDG) 141 p->p_fd = fd_init(NULL); 142 else 143 fd_share(p); 144 145 lim_addref(parent->p_limit); 146 p->p_limit = parent->p_limit; 147 148 LIST_INIT(&p->p_lwps); 149 LIST_INIT(&p->p_children); 150 151 p->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 152 mutex_init(&p->p_stmutex, MUTEX_DEFAULT, IPL_HIGH); 153 mutex_init(&p->p_auxlock, MUTEX_DEFAULT, IPL_NONE); 154 rw_init(&p->p_reflock); 155 cv_init(&p->p_waitcv, "pwait"); 156 cv_init(&p->p_lwpcv, "plwp"); 157 158 p->p_pptr = parent; 159 p->p_ppid = parent->p_pid; 160 p->p_stat = SACTIVE; 161 162 kauth_proc_fork(parent, p); 163 164 /* initialize cwd in rump kernels with vfs */ 165 if (rump_proc_vfs_init) 166 rump_proc_vfs_init(p); 167 168 chgproccnt(uid, 1); /* not enforced */ 169 170 /* publish proc various proc lists */ 171 mutex_enter(proc_lock); 172 LIST_INSERT_HEAD(&allproc, p, p_list); 173 LIST_INSERT_HEAD(&parent->p_children, p, p_sibling); 174 LIST_INSERT_AFTER(parent, p, p_pglist); 175 mutex_exit(proc_lock); 176 177 return p; 178} 179 180static void 181lwproc_freelwp(struct lwp *l) 182{ 183 struct proc *p; 184 185 p = l->l_proc; 186 mutex_enter(p->p_lock); 187 188 KASSERT(l->l_flag & LW_WEXIT); 189 KASSERT(l->l_refcnt == 0); 190 191 /* ok, zero references, continue with nuke */ 192 LIST_REMOVE(l, l_sibling); 193 KASSERT(p->p_nlwps >= 1); 194 if (--p->p_nlwps == 0) { 195 KASSERT(p != &proc0); 196 p->p_stat = SDEAD; 197 } 198 cv_broadcast(&p->p_lwpcv); /* nobody sleeps on this in a rump kernel? */ 199 kauth_cred_free(l->l_cred); 200 mutex_exit(p->p_lock); 201 202 mutex_enter(proc_lock); 203 LIST_REMOVE(l, l_list); 204 mutex_exit(proc_lock); 205 206 if (l->l_name) 207 kmem_free(l->l_name, MAXCOMLEN); 208 lwp_finispecific(l); 209 210 rumpuser_curlwpop(RUMPUSER_LWP_DESTROY, l); 211 membar_exit(); 212 kmem_free(l, sizeof(*l)); 213 214 if (p->p_stat == SDEAD) 215 lwproc_proc_free(p); 216} 217 218extern kmutex_t unruntime_lock; 219 220/* 221 * called with p_lock held, releases lock before return 222 */ 223static void 224lwproc_makelwp(struct proc *p, struct lwp *l, bool doswitch, bool procmake) 225{ 226 227 p->p_nlwps++; 228 l->l_refcnt = 1; 229 l->l_proc = p; 230 231 l->l_lid = p->p_nlwpid++; 232 LIST_INSERT_HEAD(&p->p_lwps, l, l_sibling); 233 234 l->l_fd = p->p_fd; 235 l->l_cpu = rump_cpu; 236 l->l_target_cpu = rump_cpu; /* Initial target CPU always the same */ 237 l->l_stat = LSRUN; 238 l->l_mutex = &unruntime_lock; 239 TAILQ_INIT(&l->l_ld_locks); 240 mutex_exit(p->p_lock); 241 242 lwp_update_creds(l); 243 lwp_initspecific(l); 244 245 membar_enter(); 246 rumpuser_curlwpop(RUMPUSER_LWP_CREATE, l); 247 if (doswitch) { 248 rump_lwproc_switch(l); 249 } 250 251 /* filedesc already has refcount 1 when process is created */ 252 if (!procmake) { 253 fd_hold(l); 254 } 255 256 mutex_enter(proc_lock); 257 LIST_INSERT_HEAD(&alllwp, l, l_list); 258 mutex_exit(proc_lock); 259} 260 261struct lwp * 262rump__lwproc_alloclwp(struct proc *p) 263{ 264 struct lwp *l; 265 bool newproc = false; 266 267 if (p == NULL) { 268 p = lwproc_newproc(&proc0, 0); 269 newproc = true; 270 } 271 272 l = kmem_zalloc(sizeof(*l), KM_SLEEP); 273 274 mutex_enter(p->p_lock); 275 KASSERT((p->p_sflag & PS_RUMP_LWPEXIT) == 0); 276 lwproc_makelwp(p, l, false, newproc); 277 278 return l; 279} 280 281int 282rump_lwproc_newlwp(pid_t pid) 283{ 284 struct proc *p; 285 struct lwp *l; 286 287 l = kmem_zalloc(sizeof(*l), KM_SLEEP); 288 mutex_enter(proc_lock); 289 p = proc_find_raw(pid); 290 if (p == NULL) { 291 mutex_exit(proc_lock); 292 kmem_free(l, sizeof(*l)); 293 return ESRCH; 294 } 295 mutex_enter(p->p_lock); 296 if (p->p_sflag & PS_RUMP_LWPEXIT) { 297 mutex_exit(proc_lock); 298 mutex_exit(p->p_lock); 299 kmem_free(l, sizeof(*l)); 300 return EBUSY; 301 } 302 mutex_exit(proc_lock); 303 lwproc_makelwp(p, l, true, false); 304 305 return 0; 306} 307 308int 309rump_lwproc_rfork(int flags) 310{ 311 struct proc *p; 312 struct lwp *l; 313 314 if (flags & ~(RUMP_RFFDG|RUMP_RFCFDG) || 315 (~flags & (RUMP_RFFDG|RUMP_RFCFDG)) == 0) 316 return EINVAL; 317 318 p = lwproc_newproc(curproc, flags); 319 l = kmem_zalloc(sizeof(*l), KM_SLEEP); 320 mutex_enter(p->p_lock); 321 KASSERT((p->p_sflag & PS_RUMP_LWPEXIT) == 0); 322 lwproc_makelwp(p, l, true, true); 323 324 return 0; 325} 326 327/* 328 * Switch to a new process/thread. Release previous one if 329 * deemed to be exiting. This is considered a slow path for 330 * rump kernel entry. 331 */ 332void 333rump_lwproc_switch(struct lwp *newlwp) 334{ 335 struct lwp *l = curlwp; 336 337 KASSERT(!(l->l_flag & LW_WEXIT) || newlwp); 338 339 if (__predict_false(newlwp && (newlwp->l_pflag & LP_RUNNING))) 340 panic("lwp %p (%d:%d) already running", 341 newlwp, newlwp->l_proc->p_pid, newlwp->l_lid); 342 343 if (newlwp == NULL) { 344 l->l_pflag &= ~LP_RUNNING; 345 l->l_flag |= LW_RUMP_CLEAR; 346 return; 347 } 348 349 /* fd_free() must be called from curlwp context. talk about ugh */ 350 if (l->l_flag & LW_WEXIT) { 351 fd_free(); 352 } 353 354 KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks); 355 rumpuser_curlwpop(RUMPUSER_LWP_CLEAR, l); 356 357 newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu; 358 newlwp->l_mutex = l->l_mutex; 359 newlwp->l_pflag |= LP_RUNNING; 360 361 rumpuser_curlwpop(RUMPUSER_LWP_SET, newlwp); 362 curcpu()->ci_curlwp = newlwp; 363 KERNEL_LOCK(newlwp->l_biglocks, NULL); 364 365 /* 366 * Check if the thread should get a signal. This is 367 * mostly to satisfy the "record" rump sigmodel. 368 */ 369 mutex_enter(newlwp->l_proc->p_lock); 370 if (sigispending(newlwp, 0)) { 371 newlwp->l_flag |= LW_PENDSIG; 372 } 373 mutex_exit(newlwp->l_proc->p_lock); 374 375 l->l_mutex = &unruntime_lock; 376 l->l_pflag &= ~LP_RUNNING; 377 l->l_flag &= ~LW_PENDSIG; 378 l->l_stat = LSRUN; 379 380 if (l->l_flag & LW_WEXIT) { 381 lwproc_freelwp(l); 382 } 383} 384 385/* 386 * Mark the current thread to be released upon return from 387 * kernel. 388 */ 389void 390rump_lwproc_releaselwp(void) 391{ 392 struct lwp *l = curlwp; 393 394 if (l->l_refcnt == 0 || l->l_flag & LW_WEXIT) 395 panic("releasing non-pertinent lwp"); 396 397 rump__lwproc_lwprele(); 398 KASSERT(l->l_refcnt == 0 && (l->l_flag & LW_WEXIT)); 399} 400 401/* 402 * In-kernel routines used to add and remove references for the 403 * current thread. The main purpose is to make it possible for 404 * implicit threads to persist over scheduling operations in 405 * rump kernel drivers. Note that we don't need p_lock in a 406 * rump kernel, since we do refcounting only for curlwp. 407 */ 408void 409rump__lwproc_lwphold(void) 410{ 411 struct lwp *l = curlwp; 412 413 l->l_refcnt++; 414 l->l_flag &= ~LW_WEXIT; 415} 416 417void 418rump__lwproc_lwprele(void) 419{ 420 struct lwp *l = curlwp; 421 422 l->l_refcnt--; 423 if (l->l_refcnt == 0) 424 l->l_flag |= LW_WEXIT; 425} 426 427struct lwp * 428rump_lwproc_curlwp(void) 429{ 430 struct lwp *l = curlwp; 431 432 if (l->l_flag & LW_WEXIT) 433 return NULL; 434 return l; 435} 436 437/* this interface is under construction (like the proverbial 90's web page) */ 438int rump_i_know_what_i_am_doing_with_sysents = 0; 439void 440rump_lwproc_sysent_usenative() 441{ 442 443 if (!rump_i_know_what_i_am_doing_with_sysents) 444 panic("don't use rump_lwproc_sysent_usenative()"); 445 curproc->p_emul = &emul_netbsd; 446} 447