Lines Matching defs:node

10  * Using a single mcs node per CPU is safe because sleeping locks should not be
32 static inline int node_cpu(struct optimistic_spin_node *node)
34 return node->cpu - 1;
45 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
48 * If osq_lock() is being cancelled there must be a previous node
50 * For osq_unlock() there is never a previous node and old_cpu is
55 struct optimistic_spin_node *node,
72 * We must xchg() the @node->next value, because if we were to
74 * @node->next might complete Step-A and think its @prev is
79 * wait for a new @node->next from its Step-C.
81 if (node->next) {
84 next = xchg(&node->next, NULL);
95 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
100 node->locked = 0;
101 node->next = NULL;
102 node->cpu = curr;
107 * the node fields we just initialised) semantics when updating
115 node->prev = prev;
120 * node->prev = prev osq_wait_next()
122 * prev->next = node next->prev = prev // unqueue-C
124 * Here 'node->prev' and 'next->prev' are the same variable and we need
129 WRITE_ONCE(prev->next, node);
133 * moment unlock can proceed and wipe the node element from stack.
146 if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
147 vcpu_is_preempted(node_cpu(node->prev))))
164 if (data_race(prev->next) == node &&
165 cmpxchg(&prev->next, node, NULL) == node)
170 * in which case we should observe @node->locked becoming
173 if (smp_load_acquire(&node->locked))
180 * case its step-C will write us a new @node->prev pointer.
182 prev = READ_ONCE(node->prev);
188 * Similar to unlock(), wait for @node->next or move @lock from @node
192 next = osq_wait_next(lock, node, prev->cpu);
200 * pointer, @next is stable because our @node->next pointer is NULL and
212 struct optimistic_spin_node *node, *next;
225 node = this_cpu_ptr(&osq_node);
226 next = xchg(&node->next, NULL);
232 next = osq_wait_next(lock, node, OSQ_UNLOCKED_VAL);