Searched refs:next (Results 1 - 25 of 3343) sorted by relevance

1234567891011>>

/linux-master/arch/xtensa/include/asm/
H A Dswitch_to.h15 extern void *_switch_to(void *last, void *next);
17 #define switch_to(prev,next,last) \
19 (last) = _switch_to(prev, next); \
/linux-master/lib/
H A Dlist_debug.c21 struct list_head *next)
23 if (CHECK_DATA_CORRUPTION(next->prev != prev,
24 "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
25 prev, next->prev, next) ||
26 CHECK_DATA_CORRUPTION(prev->next != next,
27 "list_add corruption. prev->next should be next (
20 __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next) argument
40 struct list_head *prev, *next; local
[all...]
/linux-master/arch/microblaze/include/asm/
H A Dswitch_to.h13 struct thread_info *next);
15 #define switch_to(prev, next, last) \
18 task_thread_info(next)); \
/linux-master/arch/nios2/include/asm/
H A Dswitch_to.h17 #define switch_to(prev, next, last) \
26 : "r" (prev), "r" (next) \
/linux-master/tools/testing/selftests/bpf/progs/
H A Dtest_core_read_macros.c13 struct callback_head___shuffled *next; member in struct:callback_head___shuffled
38 /* next pointers for kernel address space have to be initialized from
41 k_probe_in.next = &k_probe_in;
42 __builtin_preserve_access_index(({k_core_in.next = &k_core_in;}));
44 k_probe_out = (long)BPF_PROBE_READ(&k_probe_in, next, next, func);
45 k_core_out = (long)BPF_CORE_READ(&k_core_in, next, next, func);
46 u_probe_out = (long)BPF_PROBE_READ_USER(u_probe_in, next, next, fun
[all...]
/linux-master/arch/arm64/kvm/hyp/nvhe/
H A Dlist_debug.c30 struct list_head *next)
32 if (NVHE_CHECK_DATA_CORRUPTION(next->prev != prev) ||
33 NVHE_CHECK_DATA_CORRUPTION(prev->next != next) ||
34 NVHE_CHECK_DATA_CORRUPTION(new == prev || new == next))
42 struct list_head *prev, *next; local
45 next = entry->next;
47 if (NVHE_CHECK_DATA_CORRUPTION(next == LIST_POISON1) ||
49 NVHE_CHECK_DATA_CORRUPTION(prev->next !
29 __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next) argument
[all...]
/linux-master/arch/arc/include/asm/
H A Dswitch_to.h17 #define switch_to(prev, next, last) \
19 dsp_save_restore(prev, next); \
20 fpu_save_restore(prev, next); \
21 last = __switch_to(prev, next);\
/linux-master/tools/usb/usbip/libsrc/
H A Dlist.h14 * sometimes we already know the next/prev entries and we can
20 struct list_head *next, *prev; member in struct:list_head
30 list->next = list;
38 * the prev/next entries already!
42 struct list_head *next)
44 next->prev = new;
45 new->next = next;
47 prev->next = new;
60 __list_add(new, head, head->next);
40 __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) argument
70 __list_del(struct list_head * prev, struct list_head * next) argument
[all...]
/linux-master/kernel/locking/
H A Dmcs_spinlock.h19 struct mcs_spinlock *next; member in struct:mcs_spinlock
71 node->next = NULL;
91 WRITE_ONCE(prev->next, node);
104 struct mcs_spinlock *next = READ_ONCE(node->next); local
106 if (likely(!next)) {
112 /* Wait until the next pointer is set */
113 while (!(next = READ_ONCE(node->next)))
117 /* Pass lock to next waite
[all...]
H A Dosq_lock.c38 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
46 struct optimistic_spin_node *next = NULL; local
69 * We must xchg() the @node->next value, because if we were to
71 * @node->next might complete Step-A and think its @prev is
76 * wait for a new @node->next from its Step-C.
78 if (node->next) {
79 next = xchg(&node->next, NULL);
80 if (next)
87 return next;
93 struct optimistic_spin_node *prev, *next; local
209 struct optimistic_spin_node *node, *next; local
[all...]
/linux-master/scripts/kconfig/
H A Dlist.h25 struct list_head *next, *prev; member in struct:list_head
50 for (pos = list_entry((head)->next, typeof(*pos), member); \
52 pos = list_entry(pos->member.next, typeof(*pos), member))
62 for (pos = list_entry((head)->next, typeof(*pos), member), \
63 n = list_entry(pos->member.next, typeof(*pos), member); \
65 pos = n, n = list_entry(n->member.next, typeof(*n), member))
73 return head->next == head;
80 * the prev/next entries already!
84 struct list_head *next)
86 next
82 __list_add(struct list_head *_new, struct list_head *prev, struct list_head *next) argument
112 __list_del(struct list_head *prev, struct list_head *next) argument
[all...]
/linux-master/arch/mips/include/asm/
H A Dswitch_to.h26 * @next: The task to begin executing.
27 * @next_ti: task_thread_info(next).
30 * the context of next. Returns prev.
33 struct task_struct *next, struct thread_info *next_ti);
62 next->thread.emulated_fp = 0; \
88 # define __sanitize_fcr31(next) \
90 unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
94 pc = (void __user *)task_pt_regs(next)->cp0_epc; \
95 next->thread.fpu.fcr31 &= ~fcr31; \
96 force_fcr31_sig(fcr31, pc, next); \
[all...]
/linux-master/arch/csky/include/asm/
H A Dswitch_to.h10 struct task_struct *next)
13 restore_from_user_fp(&next->thread.user_fp);
17 struct task_struct *next)
27 #define switch_to(prev, next, last) \
30 struct task_struct *__next = (next); \
32 ((last) = __switch_to((prev), (next))); \
9 __switch_to_fpu(struct task_struct *prev, struct task_struct *next) argument
16 __switch_to_fpu(struct task_struct *prev, struct task_struct *next) argument
/linux-master/arch/parisc/include/asm/
H A Dswitch_to.h9 #define switch_to(prev, next, last) do { \
10 (last) = _switch_to(prev, next); \
H A Dmmu_context.h51 struct mm_struct *next, struct task_struct *tsk)
53 if (prev != next) {
57 spinlock_t *pgd_lock = &next->page_table_lock;
60 mtctl(__pa(next->pgd), 25);
61 load_context(next->context);
66 struct mm_struct *next, struct task_struct *tsk)
70 if (prev == next)
74 switch_mm_irqs_off(prev, next, tsk);
80 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) argument
90 BUG_ON(next
50 switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) argument
65 switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) argument
[all...]
/linux-master/include/asm-generic/
H A Dswitch_to.h21 #define switch_to(prev, next, last) \
23 ((last) = __switch_to((prev), (next))); \
/linux-master/arch/powerpc/include/asm/
H A Dmembarrier.h5 struct mm_struct *next,
16 likely(!(atomic_read(&next->membarrier_state) &
4 membarrier_arch_switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) argument
/linux-master/arch/sparc/include/asm/
H A Dswitch_to_64.h7 #define prepare_arch_switch(next) \
15 * for l0/l1. It will use one for 'next' and the other to hold
16 * the output value of 'last'. 'next' is not referenced again
21 #define switch_to(prev, next, last) \
26 task_thread_info(next); \
57 : "0" (task_thread_info(next)), \
/linux-master/tools/firewire/
H A Dlist.h3 struct list *next, *prev; member in struct:list
9 list->next = list;
16 return list->next == list;
23 new_link->next = link;
24 new_link->prev->next = new_link;
25 new_link->next->prev = new_link;
37 list_insert(list->next, new_link);
43 link->prev->next = link->next;
44 link->next
[all...]
/linux-master/include/drm/
H A Dspsc_queue.h35 struct spsc_node *next; member in struct:spsc_node
69 node->next = NULL;
73 tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
91 struct spsc_node *next, *node; local
101 next = READ_ONCE(node->next);
102 WRITE_ONCE(queue->head, next);
104 if (unlikely(!next)) {
108 (long)&node->next, (long) &queue->head) != (long)&node->next) {
[all...]
/linux-master/arch/hexagon/include/asm/
H A Dmmu_context.h29 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, argument
38 if (next->context.generation < prev->context.generation) {
40 next->pgd[l1] = init_mm.pgd[l1];
42 next->context.generation = prev->context.generation;
45 __vmnewmap((void *)next->context.ptbase);
52 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) argument
57 switch_mm(prev, next, current_thread_info()->task);
/linux-master/tools/lib/
H A Dlist_sort.c24 tail = &a->next;
25 a = a->next;
32 tail = &b->next;
33 b = b->next;
60 tail->next = a;
63 a = a->next;
67 tail->next = b;
70 b = b->next;
79 tail->next = b;
92 b = b->next;
242 struct list_head *next = pending->prev; local
[all...]
/linux-master/arch/ia64/include/asm/
H A Dswitch_to.h37 #define __switch_to(prev,next,last) do { \
40 if (IA64_HAS_EXTRA_STATE(next)) \
41 ia64_load_extra(next); \
42 ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
43 (last) = ia64_switch_to((next)); \
53 # define switch_to(prev,next,last) do { \
59 __switch_to(prev, next, last); \
60 /* "next" in old context is "current" in new context */ \
68 # define switch_to(prev,next,las
[all...]
/linux-master/include/linux/
H A Dlist_bl.h39 struct hlist_bl_node *next, **pprev; member in struct:hlist_bl_node
46 h->next = NULL;
82 n->next = first;
84 first->pprev = &n->next;
90 struct hlist_bl_node *next)
92 struct hlist_bl_node **pprev = next->pprev;
95 n->next = next;
96 next->pprev = &n->next;
89 hlist_bl_add_before(struct hlist_bl_node *n, struct hlist_bl_node *next) argument
117 struct hlist_bl_node *next = n->next; local
[all...]
H A Dlist.h18 * sometimes we already know the next/prev entries and we can
37 WRITE_ONCE(list->next, list);
44 struct list_head *next);
49 struct list_head *next)
63 * the prev/next entries already!
67 struct list_head *next)
69 if (!__list_add_valid(new, prev, next))
72 next->prev = new;
73 new->next = next;
47 __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next) argument
65 __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) argument
112 __list_del(struct list_head * prev, struct list_head * next) argument
328 struct list_head *next = smp_load_acquire(&head->next); local
442 __list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) argument
881 struct hlist_node *next = n->next; local
940 hlist_add_before(struct hlist_node *n, struct hlist_node *next) argument
[all...]

Completed in 309 milliseconds

1234567891011>>