Searched refs:need_resched (Results 1 - 25 of 113) sorted by relevance

12345

/linux-master/arch/powerpc/lib/
H A Dvmx-helper.c44 * kernels need to be preempted as soon as possible if need_resched is
48 if (IS_ENABLED(CONFIG_PREEMPT) && need_resched())
/linux-master/arch/arm64/include/asm/
H A Dpreempt.h32 current_thread_info()->preempt.need_resched = 0;
37 current_thread_info()->preempt.need_resched = 1;
42 return !current_thread_info()->preempt.need_resched;
64 /* Update only the count field, leaving need_resched unchanged */
70 * preempt_count in case the need_resched flag was cleared by an
H A Dthread_info.h33 u32 need_resched; member in struct:thread_info::__anon5::__anon6
37 u32 need_resched;
/linux-master/kernel/entry/
H A Dkvm.c27 } while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched());
/linux-master/arch/mips/kernel/
H A Dentry.S48 # interrupt setting need_resched
84 local_irq_disable # make sure need_resched and
128 local_irq_disable # make sure need_resched and
150 local_irq_disable # make sure need_resched doesn't
H A Didle.c46 * This variant is preferable as it allows testing need_resched and going to
54 if (!need_resched())
68 if (!need_resched())
/linux-master/drivers/cpuidle/
H A Dpoll_state.c29 while (!need_resched()) {
H A Dcoupled.c422 * need_resched() must be tested after this function returns to make sure
482 if (need_resched()) {
524 if (need_resched()) {
540 if (need_resched()) {
H A Dcpuidle-powernv.c82 while (!need_resched()) {
87 * cleared to order subsequent test of need_resched().
H A Dcpuidle-pseries.c49 while (!need_resched()) {
56 * cleared to order subsequent test of need_resched().
/linux-master/fs/
H A Ddrop_caches.c32 (mapping_empty(inode->i_mapping) && !need_resched())) {
/linux-master/arch/x86/include/asm/
H A Dmwait.h109 * which can obviate IPI to trigger checking of need_resched.
110 * We execute MONITOR against need_resched and enter optimized wait state
111 * through MWAIT. Whenever someone changes need_resched, we would be woken
128 if (!need_resched()) {
/linux-master/kernel/locking/
H A Dosq_lock.c141 * Wait to acquire the lock or cancellation. Note that need_resched()
146 if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
H A Dmutex.c373 if (!owner_on_cpu(owner) || need_resched()) {
399 if (need_resched())
500 * If we fell out of the spin path because of need_resched(),
504 if (need_resched()) {
/linux-master/fs/jbd2/
H A Dcheckpoint.c271 need_resched() || spin_needbreak(&journal->j_list_lock) ||
387 if (need_resched())
447 if (need_resched() || spin_needbreak(&journal->j_list_lock))
502 if (need_resched())
/linux-master/kernel/sched/
H A Didle.c177 if (need_resched()) {
272 * then setting need_resched is guaranteed to cause the CPU to
279 while (!need_resched()) {
286 * wakes from the sleeping instruction. And testing need_resched()
303 * need_resched() check before re-executing the sleeping
350 * need_resched() is set while polling is set. That means that clearing
/linux-master/drivers/net/ethernet/sfc/
H A Def100_rep.c399 bool need_resched; local
424 need_resched = efv->write_index != read_index;
426 if (need_resched)
/linux-master/drivers/char/hw_random/
H A Ds390-trng.c77 if (need_resched()) {
/linux-master/mm/
H A Ddmapool_test.c85 if (need_resched())
/linux-master/drivers/acpi/
H A Dacpi_pad.c163 while (!need_resched()) {
204 /* If an external event has set the need_resched flag, then
208 if (unlikely(need_resched()))
/linux-master/kernel/trace/
H A Dtrace_output.c447 char need_resched; local
469 need_resched = 'N';
472 need_resched = 'n';
475 need_resched = 'p';
478 need_resched = '.';
491 irqs_off, need_resched, hardsoft_irq);
/linux-master/drivers/net/wireguard/
H A Dsend.c282 if (need_resched())
306 if (need_resched())
/linux-master/arch/arm/mach-omap2/
H A Dcpuidle34xx.c114 if (omap_irq_pending() || need_resched())
H A Dpm33xx-core.c221 if (omap_irq_pending() || need_resched())
/linux-master/crypto/
H A Djitterentropy-testing.c206 if (large_request && need_resched()) {

Completed in 433 milliseconds

12345