1/* SPDX-License-Identifier: MIT */
2
3/*
4 * Copyright �� 2019 Intel Corporation
5 */
6
7#ifndef I915_SW_FENCE_WORK_H
8#define I915_SW_FENCE_WORK_H
9
10#include <linux/dma-fence.h>
11#include <linux/spinlock.h>
12#include <linux/workqueue.h>
13
14#include "i915_sw_fence.h"
15
16struct dma_fence_work;
17
18struct dma_fence_work_ops {
19	const char *name;
20	void (*work)(struct dma_fence_work *f);
21	void (*release)(struct dma_fence_work *f);
22};
23
24struct dma_fence_work {
25	struct dma_fence dma;
26	spinlock_t lock;
27
28	struct i915_sw_fence chain;
29	struct i915_sw_dma_fence_cb cb;
30
31	struct work_struct work;
32	const struct dma_fence_work_ops *ops;
33};
34
35enum {
36	DMA_FENCE_WORK_IMM = DMA_FENCE_FLAG_USER_BITS,
37};
38
39void dma_fence_work_init(struct dma_fence_work *f,
40			 const struct dma_fence_work_ops *ops);
41int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal);
42
43static inline void dma_fence_work_commit(struct dma_fence_work *f)
44{
45	i915_sw_fence_commit(&f->chain);
46}
47
48/**
49 * dma_fence_work_commit_imm: Commit the fence, and if possible execute locally.
50 * @f: the fenced worker
51 *
52 * Instead of always scheduling a worker to execute the callback (see
53 * dma_fence_work_commit()), we try to execute the callback immediately in
54 * the local context. It is required that the fence be committed before it
55 * is published, and that no other threads try to tamper with the number
56 * of asynchronous waits on the fence (or else the callback will be
57 * executed in the wrong context, i.e. not the callers).
58 */
59static inline void dma_fence_work_commit_imm(struct dma_fence_work *f)
60{
61	if (atomic_read(&f->chain.pending) <= 1)
62		__set_bit(DMA_FENCE_WORK_IMM, &f->dma.flags);
63
64	dma_fence_work_commit(f);
65}
66
67#endif /* I915_SW_FENCE_WORK_H */
68