linux_schedule.c revision 330851
1/*- 2 * Copyright (c) 2017 Mark Johnston <markj@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conds 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conds, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conds and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/compat/linuxkpi/common/src/linux_schedule.c 330851 2018-03-13 16:17:36Z hselasky $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/proc.h> 33#include <sys/signalvar.h> 34#include <sys/sleepqueue.h> 35 36#include <linux/delay.h> 37#include <linux/errno.h> 38#include <linux/kernel.h> 39#include <linux/list.h> 40#include <linux/sched.h> 41#include <linux/spinlock.h> 42#include <linux/wait.h> 43 44static int 45linux_add_to_sleepqueue(void *wchan, struct task_struct *task, 46 const char *wmesg, int timeout, int state) 47{ 48 int flags, ret; 49 50 MPASS((state & ~TASK_NORMAL) == 0); 51 52 flags = SLEEPQ_SLEEP | ((state & TASK_INTERRUPTIBLE) != 0 ? 53 SLEEPQ_INTERRUPTIBLE : 0); 54 55 sleepq_add(wchan, NULL, wmesg, flags, 0); 56 if (timeout != 0) 57 sleepq_set_timeout(wchan, timeout); 58 if ((state & TASK_INTERRUPTIBLE) != 0) { 59 if (timeout == 0) 60 ret = -sleepq_wait_sig(wchan, 0); 61 else 62 ret = -sleepq_timedwait_sig(wchan, 0); 63 } else { 64 if (timeout == 0) { 65 sleepq_wait(wchan, 0); 66 ret = 0; 67 } else 68 ret = -sleepq_timedwait(wchan, 0); 69 } 70 /* filter return value */ 71 if (ret != 0 && ret != -EWOULDBLOCK) { 72 linux_schedule_save_interrupt_value(task, ret); 73 ret = -ERESTARTSYS; 74 } 75 return (ret); 76} 77 78unsigned int 79linux_msleep_interruptible(unsigned int ms) 80{ 81 int ret; 82 83 /* guard against invalid values */ 84 if (ms == 0) 85 ms = 1; 86 ret = -pause_sbt("lnxsleep", mstosbt(ms), 0, C_HARDCLOCK | C_CATCH); 87 88 switch (ret) { 89 case -EWOULDBLOCK: 90 return (0); 91 default: 92 linux_schedule_save_interrupt_value(current, ret); 93 return (ms); 94 } 95} 96 97static int 98wake_up_task(struct task_struct *task, unsigned int state) 99{ 100 int ret, wakeup_swapper; 101 102 ret = wakeup_swapper = 0; 103 sleepq_lock(task); 104 if ((atomic_read(&task->state) & state) != 0) { 105 set_task_state(task, TASK_WAKING); 106 wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0); 107 ret = 1; 108 } 109 sleepq_release(task); 110 if (wakeup_swapper) 111 kick_proc0(); 112 return (ret); 113} 114 115bool 116linux_signal_pending(struct task_struct *task) 117{ 118 struct thread *td; 119 sigset_t pending; 120 121 td = task->task_thread; 122 PROC_LOCK(td->td_proc); 123 pending = td->td_siglist; 124 SIGSETOR(pending, td->td_proc->p_siglist); 125 SIGSETNAND(pending, td->td_sigmask); 126 PROC_UNLOCK(td->td_proc); 127 return (!SIGISEMPTY(pending)); 128} 129 130bool 131linux_fatal_signal_pending(struct task_struct *task) 132{ 133 struct thread *td; 134 bool ret; 135 136 td = task->task_thread; 137 PROC_LOCK(td->td_proc); 138 ret = SIGISMEMBER(td->td_siglist, SIGKILL) || 139 SIGISMEMBER(td->td_proc->p_siglist, SIGKILL); 140 PROC_UNLOCK(td->td_proc); 141 return (ret); 142} 143 144bool 145linux_signal_pending_state(long state, struct task_struct *task) 146{ 147 148 MPASS((state & ~TASK_NORMAL) == 0); 149 150 if ((state & TASK_INTERRUPTIBLE) == 0) 151 return (false); 152 return (linux_signal_pending(task)); 153} 154 155void 156linux_send_sig(int signo, struct task_struct *task) 157{ 158 struct thread *td; 159 160 td = task->task_thread; 161 PROC_LOCK(td->td_proc); 162 tdsignal(td, signo); 163 PROC_UNLOCK(td->td_proc); 164} 165 166int 167autoremove_wake_function(wait_queue_t *wq, unsigned int state, int flags, 168 void *key __unused) 169{ 170 struct task_struct *task; 171 int ret; 172 173 task = wq->private; 174 if ((ret = wake_up_task(task, state)) != 0) 175 list_del_init(&wq->task_list); 176 return (ret); 177} 178 179void 180linux_wake_up(wait_queue_head_t *wqh, unsigned int state, int nr, bool locked) 181{ 182 wait_queue_t *pos, *next; 183 184 if (!locked) 185 spin_lock(&wqh->lock); 186 list_for_each_entry_safe(pos, next, &wqh->task_list, task_list) { 187 if (pos->func == NULL) { 188 if (wake_up_task(pos->private, state) != 0 && --nr == 0) 189 break; 190 } else { 191 if (pos->func(pos, state, 0, NULL) != 0 && --nr == 0) 192 break; 193 } 194 } 195 if (!locked) 196 spin_unlock(&wqh->lock); 197} 198 199void 200linux_prepare_to_wait(wait_queue_head_t *wqh, wait_queue_t *wq, int state) 201{ 202 203 spin_lock(&wqh->lock); 204 if (list_empty(&wq->task_list)) 205 __add_wait_queue(wqh, wq); 206 set_task_state(current, state); 207 spin_unlock(&wqh->lock); 208} 209 210void 211linux_finish_wait(wait_queue_head_t *wqh, wait_queue_t *wq) 212{ 213 214 spin_lock(&wqh->lock); 215 set_task_state(current, TASK_RUNNING); 216 if (!list_empty(&wq->task_list)) { 217 __remove_wait_queue(wqh, wq); 218 INIT_LIST_HEAD(&wq->task_list); 219 } 220 spin_unlock(&wqh->lock); 221} 222 223bool 224linux_waitqueue_active(wait_queue_head_t *wqh) 225{ 226 bool ret; 227 228 spin_lock(&wqh->lock); 229 ret = !list_empty(&wqh->task_list); 230 spin_unlock(&wqh->lock); 231 return (ret); 232} 233 234int 235linux_wait_event_common(wait_queue_head_t *wqh, wait_queue_t *wq, int timeout, 236 unsigned int state, spinlock_t *lock) 237{ 238 struct task_struct *task; 239 int ret; 240 241 if (lock != NULL) 242 spin_unlock_irq(lock); 243 244 DROP_GIANT(); 245 246 /* range check timeout */ 247 if (timeout < 1) 248 timeout = 1; 249 else if (timeout == MAX_SCHEDULE_TIMEOUT) 250 timeout = 0; 251 252 task = current; 253 254 /* 255 * Our wait queue entry is on the stack - make sure it doesn't 256 * get swapped out while we sleep. 257 */ 258 PHOLD(task->task_thread->td_proc); 259 sleepq_lock(task); 260 if (atomic_read(&task->state) != TASK_WAKING) { 261 ret = linux_add_to_sleepqueue(task, task, "wevent", timeout, state); 262 } else { 263 sleepq_release(task); 264 ret = 0; 265 } 266 PRELE(task->task_thread->td_proc); 267 268 PICKUP_GIANT(); 269 270 if (lock != NULL) 271 spin_lock_irq(lock); 272 return (ret); 273} 274 275int 276linux_schedule_timeout(int timeout) 277{ 278 struct task_struct *task; 279 int ret; 280 int state; 281 int remainder; 282 283 task = current; 284 285 /* range check timeout */ 286 if (timeout < 1) 287 timeout = 1; 288 else if (timeout == MAX_SCHEDULE_TIMEOUT) 289 timeout = 0; 290 291 remainder = ticks + timeout; 292 293 DROP_GIANT(); 294 295 sleepq_lock(task); 296 state = atomic_read(&task->state); 297 if (state != TASK_WAKING) { 298 ret = linux_add_to_sleepqueue(task, task, "sched", timeout, state); 299 } else { 300 sleepq_release(task); 301 ret = 0; 302 } 303 set_task_state(task, TASK_RUNNING); 304 305 PICKUP_GIANT(); 306 307 if (timeout == 0) 308 return (MAX_SCHEDULE_TIMEOUT); 309 310 /* range check return value */ 311 remainder -= ticks; 312 313 /* range check return value */ 314 if (ret == -ERESTARTSYS && remainder < 1) 315 remainder = 1; 316 else if (remainder < 0) 317 remainder = 0; 318 else if (remainder > timeout) 319 remainder = timeout; 320 return (remainder); 321} 322 323static void 324wake_up_sleepers(void *wchan) 325{ 326 int wakeup_swapper; 327 328 sleepq_lock(wchan); 329 wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0); 330 sleepq_release(wchan); 331 if (wakeup_swapper) 332 kick_proc0(); 333} 334 335#define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit))) 336 337void 338linux_wake_up_bit(void *word, int bit) 339{ 340 341 wake_up_sleepers(bit_to_wchan(word, bit)); 342} 343 344int 345linux_wait_on_bit_timeout(unsigned long *word, int bit, unsigned int state, 346 int timeout) 347{ 348 struct task_struct *task; 349 void *wchan; 350 int ret; 351 352 DROP_GIANT(); 353 354 /* range check timeout */ 355 if (timeout < 1) 356 timeout = 1; 357 else if (timeout == MAX_SCHEDULE_TIMEOUT) 358 timeout = 0; 359 360 task = current; 361 wchan = bit_to_wchan(word, bit); 362 for (;;) { 363 sleepq_lock(wchan); 364 if ((*word & (1 << bit)) == 0) { 365 sleepq_release(wchan); 366 ret = 0; 367 break; 368 } 369 set_task_state(task, state); 370 ret = linux_add_to_sleepqueue(wchan, task, "wbit", timeout, state); 371 if (ret != 0) 372 break; 373 } 374 set_task_state(task, TASK_RUNNING); 375 376 PICKUP_GIANT(); 377 378 return (ret); 379} 380 381void 382linux_wake_up_atomic_t(atomic_t *a) 383{ 384 385 wake_up_sleepers(a); 386} 387 388int 389linux_wait_on_atomic_t(atomic_t *a, unsigned int state) 390{ 391 struct task_struct *task; 392 void *wchan; 393 int ret; 394 395 DROP_GIANT(); 396 397 task = current; 398 wchan = a; 399 for (;;) { 400 sleepq_lock(wchan); 401 if (atomic_read(a) == 0) { 402 sleepq_release(wchan); 403 ret = 0; 404 break; 405 } 406 set_task_state(task, state); 407 ret = linux_add_to_sleepqueue(wchan, task, "watomic", 0, state); 408 if (ret != 0) 409 break; 410 } 411 set_task_state(task, TASK_RUNNING); 412 413 PICKUP_GIANT(); 414 415 return (ret); 416} 417 418bool 419linux_wake_up_state(struct task_struct *task, unsigned int state) 420{ 421 422 return (wake_up_task(task, state) != 0); 423} 424