Lines Matching refs:ctx

48  * @ctx: [in] Pointer to the eventfd context.
56 void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
71 spin_lock_irqsave(&ctx->wqh.lock, flags);
73 if (ctx->count < ULLONG_MAX)
74 ctx->count++;
75 if (waitqueue_active(&ctx->wqh))
76 wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
78 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
82 static void eventfd_free_ctx(struct eventfd_ctx *ctx)
84 if (ctx->id >= 0)
85 ida_free(&eventfd_ida, ctx->id);
86 kfree(ctx);
91 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
93 eventfd_free_ctx(ctx);
98 * @ctx: [in] Pointer to eventfd context.
103 void eventfd_ctx_put(struct eventfd_ctx *ctx)
105 kref_put(&ctx->kref, eventfd_free);
111 struct eventfd_ctx *ctx = file->private_data;
113 wake_up_poll(&ctx->wqh, EPOLLHUP);
114 eventfd_ctx_put(ctx);
120 struct eventfd_ctx *ctx = file->private_data;
124 poll_wait(file, &ctx->wqh, wait);
127 * All writes to ctx->count occur within ctx->wqh.lock. This read
128 * can be done outside ctx->wqh.lock because we know that poll_wait
138 * lock ctx->wqh.lock (in poll_wait)
139 * count = ctx->count
141 * unlock ctx->wqh.lock
142 * lock ctx->qwh.lock
143 * ctx->count += n
146 * unlock ctx->qwh.lock
153 * count = ctx->count (INVALID!)
154 * lock ctx->qwh.lock
155 * ctx->count += n
158 * unlock ctx->qwh.lock
159 * lock ctx->wqh.lock (in poll_wait)
161 * unlock ctx->wqh.lock
164 count = READ_ONCE(ctx->count);
176 void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
178 lockdep_assert_held(&ctx->wqh.lock);
180 *cnt = ((ctx->flags & EFD_SEMAPHORE) && ctx->count) ? 1 : ctx->count;
181 ctx->count -= *cnt;
187 * @ctx: [in] Pointer to eventfd context.
198 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
203 spin_lock_irqsave(&ctx->wqh.lock, flags);
204 eventfd_ctx_do_read(ctx, cnt);
205 __remove_wait_queue(&ctx->wqh, wait);
206 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
207 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
208 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
217 struct eventfd_ctx *ctx = file->private_data;
222 spin_lock_irq(&ctx->wqh.lock);
223 if (!ctx->count) {
226 spin_unlock_irq(&ctx->wqh.lock);
230 if (wait_event_interruptible_locked_irq(ctx->wqh, ctx->count)) {
231 spin_unlock_irq(&ctx->wqh.lock);
235 eventfd_ctx_do_read(ctx, &ucnt);
237 if (waitqueue_active(&ctx->wqh))
238 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
240 spin_unlock_irq(&ctx->wqh.lock);
250 struct eventfd_ctx *ctx = file->private_data;
260 spin_lock_irq(&ctx->wqh.lock);
262 if (ULLONG_MAX - ctx->count > ucnt)
265 res = wait_event_interruptible_locked_irq(ctx->wqh,
266 ULLONG_MAX - ctx->count > ucnt);
271 ctx->count += ucnt;
273 if (waitqueue_active(&ctx->wqh))
274 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
277 spin_unlock_irq(&ctx->wqh.lock);
285 struct eventfd_ctx *ctx = f->private_data;
288 spin_lock_irq(&ctx->wqh.lock);
289 cnt = ctx->count;
290 spin_unlock_irq(&ctx->wqh.lock);
297 ctx->id,
298 !!(ctx->flags & EFD_SEMAPHORE));
350 struct eventfd_ctx *ctx;
354 ctx = eventfd_ctx_fileget(f.file);
356 return ctx;
371 struct eventfd_ctx *ctx;
376 ctx = file->private_data;
377 kref_get(&ctx->kref);
378 return ctx;
384 struct eventfd_ctx *ctx;
396 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
397 if (!ctx)
400 kref_init(&ctx->kref);
401 init_waitqueue_head(&ctx->wqh);
402 ctx->count = count;
403 ctx->flags = flags;
404 ctx->id = ida_alloc(&eventfd_ida, GFP_KERNEL);
412 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
423 eventfd_free_ctx(ctx);