1/*	$NetBSD: nouveau_nvkm_core_notify.c,v 1.4 2021/12/18 23:45:34 riastradh Exp $	*/
2
3/*
4 * Copyright 2014 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Ben Skeggs <bskeggs@redhat.com>
25 */
26#include <sys/cdefs.h>
27__KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_core_notify.c,v 1.4 2021/12/18 23:45:34 riastradh Exp $");
28
29#include <core/notify.h>
30#include <core/event.h>
31
32static inline void
33nvkm_notify_put_locked(struct nvkm_notify *notify)
34{
35	if (notify->block++ == 0)
36		nvkm_event_put(notify->event, notify->types, notify->index);
37}
38
39void
40nvkm_notify_put(struct nvkm_notify *notify)
41{
42	struct nvkm_event *event = notify->event;
43	unsigned long flags;
44	if (likely(event) &&
45	    test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
46		spin_lock_irqsave(&event->refs_lock, flags);
47		nvkm_notify_put_locked(notify);
48		spin_unlock_irqrestore(&event->refs_lock, flags);
49		if (test_bit(NVKM_NOTIFY_WORK, &notify->flags))
50			flush_work(&notify->work);
51	}
52}
53
54static inline void
55nvkm_notify_get_locked(struct nvkm_notify *notify)
56{
57	if (--notify->block == 0)
58		nvkm_event_get(notify->event, notify->types, notify->index);
59}
60
61void
62nvkm_notify_get(struct nvkm_notify *notify)
63{
64	struct nvkm_event *event = notify->event;
65	unsigned long flags;
66	if (likely(event) &&
67	    !test_and_set_bit(NVKM_NOTIFY_USER, &notify->flags)) {
68		spin_lock_irqsave(&event->refs_lock, flags);
69		nvkm_notify_get_locked(notify);
70		spin_unlock_irqrestore(&event->refs_lock, flags);
71	}
72}
73
74static inline void
75nvkm_notify_func(struct nvkm_notify *notify)
76{
77	struct nvkm_event *event = notify->event;
78	int ret = notify->func(notify);
79	unsigned long flags;
80	if ((ret == NVKM_NOTIFY_KEEP) ||
81	    !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
82		spin_lock_irqsave(&event->refs_lock, flags);
83		nvkm_notify_get_locked(notify);
84		spin_unlock_irqrestore(&event->refs_lock, flags);
85	}
86}
87
88static void
89nvkm_notify_work(struct work_struct *work)
90{
91	struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
92	nvkm_notify_func(notify);
93}
94
95void
96nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
97{
98	struct nvkm_event *event = notify->event;
99	unsigned long flags;
100
101	assert_spin_locked(&event->list_lock);
102	BUG_ON(size != notify->size);
103
104	spin_lock_irqsave(&event->refs_lock, flags);
105	if (notify->block) {
106		spin_unlock_irqrestore(&event->refs_lock, flags);
107		return;
108	}
109	nvkm_notify_put_locked(notify);
110	spin_unlock_irqrestore(&event->refs_lock, flags);
111
112	if (test_bit(NVKM_NOTIFY_WORK, &notify->flags)) {
113		memcpy(__UNCONST(notify->data), data, size);
114		schedule_work(&notify->work);
115	} else {
116		notify->data = data;
117		nvkm_notify_func(notify);
118		notify->data = NULL;
119	}
120}
121
122void
123nvkm_notify_fini(struct nvkm_notify *notify)
124{
125	unsigned long flags;
126	if (notify->event) {
127		nvkm_notify_put(notify);
128		spin_lock_irqsave(&notify->event->list_lock, flags);
129		list_del(&notify->head);
130		spin_unlock_irqrestore(&notify->event->list_lock, flags);
131		kfree(__UNCONST(notify->data));
132		notify->event = NULL;
133	}
134}
135
136int
137nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event,
138		 int (*func)(struct nvkm_notify *), bool work,
139		 void *data, u32 size, u32 reply,
140		 struct nvkm_notify *notify)
141{
142	unsigned long flags;
143	int ret = -ENODEV;
144	if ((notify->event = event), event->refs) {
145		ret = event->func->ctor(object, data, size, notify);
146		if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
147			notify->flags = 0;
148			notify->block = 1;
149			notify->func = func;
150			notify->data = NULL;
151			if (ret = 0, work) {
152				INIT_WORK(&notify->work, nvkm_notify_work);
153				set_bit(NVKM_NOTIFY_WORK, &notify->flags);
154				notify->data = kmalloc(reply, GFP_KERNEL);
155				if (!notify->data)
156					ret = -ENOMEM;
157			}
158		}
159		if (ret == 0) {
160			spin_lock_irqsave(&event->list_lock, flags);
161			list_add_tail(&notify->head, &event->list);
162			spin_unlock_irqrestore(&event->list_lock, flags);
163		}
164	}
165	if (ret)
166		notify->event = NULL;
167	return ret;
168}
169