119370Spst/* SPDX-License-Identifier: GPL-2.0-only */
298944Sobrien
398944Sobrien#ifndef LWQ_H
419370Spst#define LWQ_H
519370Spst/*
698944Sobrien * Light-weight single-linked queue built from llist
719370Spst *
898944Sobrien * Entries can be enqueued from any context with no locking.
998944Sobrien * Entries can be dequeued from process context with integrated locking.
1098944Sobrien *
1198944Sobrien * This is particularly suitable when work items are queued in
1219370Spst * BH or IRQ context, and where work items are handled one at a time
1398944Sobrien * by dedicated threads.
1498944Sobrien */
1598944Sobrien#include <linux/container_of.h>
1698944Sobrien#include <linux/spinlock.h>
1719370Spst#include <linux/llist.h>
1898944Sobrien
1998944Sobrienstruct lwq_node {
2098944Sobrien	struct llist_node node;
2198944Sobrien};
2219370Spst
2319370Spststruct lwq {
2419370Spst	spinlock_t		lock;
2519370Spst	struct llist_node	*ready;		/* entries to be dequeued */
2619370Spst	struct llist_head	new;		/* entries being enqueued */
2719370Spst};
2819370Spst
2919370Spst/**
30130803Smarcel * lwq_init - initialise a lwq
3119370Spst * @q:	the lwq object
3298944Sobrien */
3319370Spststatic inline void lwq_init(struct lwq *q)
3498944Sobrien{
3519370Spst	spin_lock_init(&q->lock);
3698944Sobrien	q->ready = NULL;
3719370Spst	init_llist_head(&q->new);
3898944Sobrien}
3919370Spst
4098944Sobrien/**
4119370Spst * lwq_empty - test if lwq contains any entry
4298944Sobrien * @q:	the lwq object
4398944Sobrien *
4419370Spst * This empty test contains an acquire barrier so that if a wakeup
4519370Spst * is sent when lwq_dequeue returns true, it is safe to go to sleep after
4619370Spst * a test on lwq_empty().
4719370Spst */
4819370Spststatic inline bool lwq_empty(struct lwq *q)
4919370Spst{
5019370Spst	/* acquire ensures ordering wrt lwq_enqueue() */
5119370Spst	return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
5298944Sobrien}
5319370Spst
5419370Spststruct llist_node *__lwq_dequeue(struct lwq *q);
5519370Spst/**
5619370Spst * lwq_dequeue - dequeue first (oldest) entry from lwq
5719370Spst * @q:		the queue to dequeue from
5819370Spst * @type:	the type of object to return
5919370Spst * @member:	them member in returned object which is an lwq_node.
6019370Spst *
6119370Spst * Remove a single object from the lwq and return it.  This will take
6219370Spst * a spinlock and so must always be called in the same context, typcially
6319370Spst * process contet.
6419370Spst */
6519370Spst#define lwq_dequeue(q, type, member)					\
6619370Spst	({ struct llist_node *_n = __lwq_dequeue(q);			\
6719370Spst	  _n ? container_of(_n, type, member.node) : NULL; })
6819370Spst
6998944Sobrienstruct llist_node *lwq_dequeue_all(struct lwq *q);
7019370Spst
7119370Spst/**
7219370Spst * lwq_for_each_safe - iterate over detached queue allowing deletion
7319370Spst * @_n:		iterator variable
7419370Spst * @_t1:	temporary struct llist_node **
7598944Sobrien * @_t2:	temporary struct llist_node *
7619370Spst * @_l:		address of llist_node pointer from lwq_dequeue_all()
7798944Sobrien * @_member:	member in _n where lwq_node is found.
7819370Spst *
7998944Sobrien * Iterate over members in a dequeued list.  If the iterator variable
8019370Spst * is set to NULL, the iterator removes that entry from the queue.
8198944Sobrien */
8298944Sobrien#define lwq_for_each_safe(_n, _t1, _t2, _l, _member)			\
8319370Spst	for (_t1 = (_l);						\
8498944Sobrien	     *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
8519370Spst		       _t2 = ((*_t1)->next),				\
8698944Sobrien		       true)						\
8798944Sobrien	     : false;							\
8898944Sobrien	     (_n) ? (_t1 = &(_n)->_member.node.next, 0)			\
8998944Sobrien	     : ((*(_t1) = (_t2)),  0))
9098944Sobrien
9119370Spst/**
9219370Spst * lwq_enqueue - add a new item to the end of the queue
9398944Sobrien * @n	- the lwq_node embedded in the item to be added
9419370Spst * @q	- the lwq to append to.
9519370Spst *
9619370Spst * No locking is needed to append to the queue so this can
9719370Spst * be called from any context.
9819370Spst * Return %true is the list may have previously been empty.
9919370Spst */
10019370Spststatic inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
10119370Spst{
10219370Spst	/* acquire enqures ordering wrt lwq_dequeue */
10398944Sobrien	return llist_add(&n->node, &q->new) &&
10419370Spst		smp_load_acquire(&q->ready) == NULL;
10519370Spst}
10619370Spst
10719370Spst/**
10819370Spst * lwq_enqueue_batch - add a list of new items to the end of the queue
10919370Spst * @n	- the lwq_node embedded in the first item to be added
11019370Spst * @q	- the lwq to append to.
11198944Sobrien *
11298944Sobrien * No locking is needed to append to the queue so this can
11319370Spst * be called from any context.
11419370Spst * Return %true is the list may have previously been empty.
11519370Spst */
11698944Sobrienstatic inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
11719370Spst{
11819370Spst	struct llist_node *e = n;
11919370Spst
12098944Sobrien	/* acquire enqures ordering wrt lwq_dequeue */
12119370Spst	return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
12219370Spst		smp_load_acquire(&q->ready) == NULL;
12398944Sobrien}
12419370Spst#endif /* LWQ_H */
12598944Sobrien