1// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (C) 2021 ROHM Semiconductors
4// regulator IRQ based event notification helpers
5//
6// Logic has been partially adapted from qcom-labibb driver.
7//
8// Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
9
10#include <linux/device.h>
11#include <linux/err.h>
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <linux/reboot.h>
15#include <linux/regmap.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/regulator/driver.h>
19
20#include "internal.h"
21
22#define REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS 10000
23
24struct regulator_irq {
25	struct regulator_irq_data rdata;
26	struct regulator_irq_desc desc;
27	int irq;
28	int retry_cnt;
29	struct delayed_work isr_work;
30};
31
32/*
33 * Should only be called from threaded handler to prevent potential deadlock
34 */
35static void rdev_flag_err(struct regulator_dev *rdev, int err)
36{
37	spin_lock(&rdev->err_lock);
38	rdev->cached_err |= err;
39	spin_unlock(&rdev->err_lock);
40}
41
42static void rdev_clear_err(struct regulator_dev *rdev, int err)
43{
44	spin_lock(&rdev->err_lock);
45	rdev->cached_err &= ~err;
46	spin_unlock(&rdev->err_lock);
47}
48
49static void regulator_notifier_isr_work(struct work_struct *work)
50{
51	struct regulator_irq *h;
52	struct regulator_irq_desc *d;
53	struct regulator_irq_data *rid;
54	int ret = 0;
55	int tmo, i;
56	int num_rdevs;
57
58	h = container_of(work, struct regulator_irq,
59			    isr_work.work);
60	d = &h->desc;
61	rid = &h->rdata;
62	num_rdevs = rid->num_states;
63
64reread:
65	if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
66		if (!d->die)
67			return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
68						      REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
69		ret = d->die(rid);
70		/*
71		 * If the 'last resort' IC recovery failed we will have
72		 * nothing else left to do...
73		 */
74		if (ret)
75			return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
76						      REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
77
78		/*
79		 * If h->die() was implemented we assume recovery has been
80		 * attempted (probably regulator was shut down) and we
81		 * just enable IRQ and bail-out.
82		 */
83		goto enable_out;
84	}
85	if (d->renable) {
86		ret = d->renable(rid);
87
88		if (ret == REGULATOR_FAILED_RETRY) {
89			/* Driver could not get current status */
90			h->retry_cnt++;
91			if (!d->reread_ms)
92				goto reread;
93
94			tmo = d->reread_ms;
95			goto reschedule;
96		}
97
98		if (ret) {
99			/*
100			 * IC status reading succeeded. update error info
101			 * just in case the renable changed it.
102			 */
103			for (i = 0; i < num_rdevs; i++) {
104				struct regulator_err_state *stat;
105				struct regulator_dev *rdev;
106
107				stat = &rid->states[i];
108				rdev = stat->rdev;
109				rdev_clear_err(rdev, (~stat->errors) &
110						      stat->possible_errs);
111			}
112			h->retry_cnt++;
113			/*
114			 * The IC indicated problem is still ON - no point in
115			 * re-enabling the IRQ. Retry later.
116			 */
117			tmo = d->irq_off_ms;
118			goto reschedule;
119		}
120	}
121
122	/*
123	 * Either IC reported problem cleared or no status checker was provided.
124	 * If problems are gone - good. If not - then the IRQ will fire again
125	 * and we'll have a new nice loop. In any case we should clear error
126	 * flags here and re-enable IRQs.
127	 */
128	for (i = 0; i < num_rdevs; i++) {
129		struct regulator_err_state *stat;
130		struct regulator_dev *rdev;
131
132		stat = &rid->states[i];
133		rdev = stat->rdev;
134		rdev_clear_err(rdev, stat->possible_errs);
135	}
136
137	/*
138	 * Things have been seemingly successful => zero retry-counter.
139	 */
140	h->retry_cnt = 0;
141
142enable_out:
143	enable_irq(h->irq);
144
145	return;
146
147reschedule:
148	if (!d->high_prio)
149		mod_delayed_work(system_wq, &h->isr_work,
150				 msecs_to_jiffies(tmo));
151	else
152		mod_delayed_work(system_highpri_wq, &h->isr_work,
153				 msecs_to_jiffies(tmo));
154}
155
156static irqreturn_t regulator_notifier_isr(int irq, void *data)
157{
158	struct regulator_irq *h = data;
159	struct regulator_irq_desc *d;
160	struct regulator_irq_data *rid;
161	unsigned long rdev_map = 0;
162	int num_rdevs;
163	int ret, i;
164
165	d = &h->desc;
166	rid = &h->rdata;
167	num_rdevs = rid->num_states;
168
169	if (d->fatal_cnt)
170		h->retry_cnt++;
171
172	/*
173	 * we spare a few cycles by not clearing statuses prior to this call.
174	 * The IC driver must initialize the status buffers for rdevs
175	 * which it indicates having active events via rdev_map.
176	 *
177	 * Maybe we should just to be on a safer side(?)
178	 */
179	ret = d->map_event(irq, rid, &rdev_map);
180
181	/*
182	 * If status reading fails (which is unlikely) we don't ack/disable
183	 * IRQ but just increase fail count and retry when IRQ fires again.
184	 * If retry_count exceeds the given safety limit we call IC specific die
185	 * handler which can try disabling regulator(s).
186	 *
187	 * If no die handler is given we will just power-off as a last resort.
188	 *
189	 * We could try disabling all associated rdevs - but we might shoot
190	 * ourselves in the head and leave the problematic regulator enabled. So
191	 * if IC has no die-handler populated we just assume the regulator
192	 * can't be disabled.
193	 */
194	if (unlikely(ret == REGULATOR_FAILED_RETRY))
195		goto fail_out;
196
197	h->retry_cnt = 0;
198	/*
199	 * Let's not disable IRQ if there were no status bits for us. We'd
200	 * better leave spurious IRQ handling to genirq
201	 */
202	if (ret || !rdev_map)
203		return IRQ_NONE;
204
205	/*
206	 * Some events are bogus if the regulator is disabled. Skip such events
207	 * if all relevant regulators are disabled
208	 */
209	if (d->skip_off) {
210		for_each_set_bit(i, &rdev_map, num_rdevs) {
211			struct regulator_dev *rdev;
212			const struct regulator_ops *ops;
213
214			rdev = rid->states[i].rdev;
215			ops = rdev->desc->ops;
216
217			/*
218			 * If any of the flagged regulators is enabled we do
219			 * handle this
220			 */
221			if (ops->is_enabled(rdev))
222				break;
223		}
224		if (i == num_rdevs)
225			return IRQ_NONE;
226	}
227
228	/* Disable IRQ if HW keeps line asserted */
229	if (d->irq_off_ms)
230		disable_irq_nosync(irq);
231
232	/*
233	 * IRQ seems to be for us. Let's fire correct notifiers / store error
234	 * flags
235	 */
236	for_each_set_bit(i, &rdev_map, num_rdevs) {
237		struct regulator_err_state *stat;
238		struct regulator_dev *rdev;
239
240		stat = &rid->states[i];
241		rdev = stat->rdev;
242
243		rdev_dbg(rdev, "Sending regulator notification EVT 0x%lx\n",
244			 stat->notifs);
245
246		regulator_notifier_call_chain(rdev, stat->notifs, NULL);
247		rdev_flag_err(rdev, stat->errors);
248	}
249
250	if (d->irq_off_ms) {
251		if (!d->high_prio)
252			schedule_delayed_work(&h->isr_work,
253					      msecs_to_jiffies(d->irq_off_ms));
254		else
255			mod_delayed_work(system_highpri_wq,
256					 &h->isr_work,
257					 msecs_to_jiffies(d->irq_off_ms));
258	}
259
260	return IRQ_HANDLED;
261
262fail_out:
263	if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
264		/* If we have no recovery, just try shut down straight away */
265		if (!d->die) {
266			hw_protection_shutdown("Regulator failure. Retry count exceeded",
267					       REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
268		} else {
269			ret = d->die(rid);
270			/* If die() failed shut down as a last attempt to save the HW */
271			if (ret)
272				hw_protection_shutdown("Regulator failure. Recovery failed",
273						       REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
274		}
275	}
276
277	return IRQ_NONE;
278}
279
280static int init_rdev_state(struct device *dev, struct regulator_irq *h,
281			   struct regulator_dev **rdev, int common_err,
282			   int *rdev_err, int rdev_amount)
283{
284	int i;
285
286	h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) *
287				       rdev_amount, GFP_KERNEL);
288	if (!h->rdata.states)
289		return -ENOMEM;
290
291	h->rdata.num_states = rdev_amount;
292	h->rdata.data = h->desc.data;
293
294	for (i = 0; i < rdev_amount; i++) {
295		h->rdata.states[i].possible_errs = common_err;
296		if (rdev_err)
297			h->rdata.states[i].possible_errs |= *rdev_err++;
298		h->rdata.states[i].rdev = *rdev++;
299	}
300
301	return 0;
302}
303
304static void init_rdev_errors(struct regulator_irq *h)
305{
306	int i;
307
308	for (i = 0; i < h->rdata.num_states; i++)
309		if (h->rdata.states[i].possible_errs)
310			h->rdata.states[i].rdev->use_cached_err = true;
311}
312
313/**
314 * regulator_irq_helper - register IRQ based regulator event/error notifier
315 *
316 * @dev:		device providing the IRQs
317 * @d:			IRQ helper descriptor.
318 * @irq:		IRQ used to inform events/errors to be notified.
319 * @irq_flags:		Extra IRQ flags to be OR'ed with the default
320 *			IRQF_ONESHOT when requesting the (threaded) irq.
321 * @common_errs:	Errors which can be flagged by this IRQ for all rdevs.
322 *			When IRQ is re-enabled these errors will be cleared
323 *			from all associated regulators. Use this instead of the
324 *			per_rdev_errs if you use
325 *			regulator_irq_map_event_simple() for event mapping.
326 * @per_rdev_errs:	Optional error flag array describing errors specific
327 *			for only some of the regulators. These errors will be
328 *			or'ed with common errors. If this is given the array
329 *			should contain rdev_amount flags. Can be set to NULL
330 *			if there is no regulator specific error flags for this
331 *			IRQ.
332 * @rdev:		Array of pointers to regulators associated with this
333 *			IRQ.
334 * @rdev_amount:	Amount of regulators associated with this IRQ.
335 *
336 * Return: handle to irq_helper or an ERR_PTR() encoded error code.
337 */
338void *regulator_irq_helper(struct device *dev,
339			   const struct regulator_irq_desc *d, int irq,
340			   int irq_flags, int common_errs, int *per_rdev_errs,
341			   struct regulator_dev **rdev, int rdev_amount)
342{
343	struct regulator_irq *h;
344	int ret;
345
346	if (!rdev_amount || !d || !d->map_event || !d->name)
347		return ERR_PTR(-EINVAL);
348
349	h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
350	if (!h)
351		return ERR_PTR(-ENOMEM);
352
353	h->irq = irq;
354	h->desc = *d;
355	h->desc.name = devm_kstrdup(dev, d->name, GFP_KERNEL);
356	if (!h->desc.name)
357		return ERR_PTR(-ENOMEM);
358
359	ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
360			      rdev_amount);
361	if (ret)
362		return ERR_PTR(ret);
363
364	init_rdev_errors(h);
365
366	if (h->desc.irq_off_ms)
367		INIT_DELAYED_WORK(&h->isr_work, regulator_notifier_isr_work);
368
369	ret = request_threaded_irq(h->irq, NULL, regulator_notifier_isr,
370				   IRQF_ONESHOT | irq_flags, h->desc.name, h);
371	if (ret) {
372		dev_err(dev, "Failed to request IRQ %d\n", irq);
373
374		return ERR_PTR(ret);
375	}
376
377	return h;
378}
379EXPORT_SYMBOL_GPL(regulator_irq_helper);
380
381/**
382 * regulator_irq_helper_cancel - drop IRQ based regulator event/error notifier
383 *
384 * @handle:		Pointer to handle returned by a successful call to
385 *			regulator_irq_helper(). Will be NULLed upon return.
386 *
387 * The associated IRQ is released and work is cancelled when the function
388 * returns.
389 */
390void regulator_irq_helper_cancel(void **handle)
391{
392	if (handle && *handle) {
393		struct regulator_irq *h = *handle;
394
395		free_irq(h->irq, h);
396		if (h->desc.irq_off_ms)
397			cancel_delayed_work_sync(&h->isr_work);
398
399		h = NULL;
400	}
401}
402EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
403
404/**
405 * regulator_irq_map_event_simple - regulator IRQ notification for trivial IRQs
406 *
407 * @irq:	Number of IRQ that occurred
408 * @rid:	Information about the event IRQ indicates
409 * @dev_mask:	mask indicating the regulator originating the IRQ
410 *
411 * Regulators whose IRQ has single, well defined purpose (always indicate
412 * exactly one event, and are relevant to exactly one regulator device) can
413 * use this function as their map_event callbac for their regulator IRQ
414 * notification helperk. Exactly one rdev and exactly one error (in
415 * "common_errs"-field) can be given at IRQ helper registration for
416 * regulator_irq_map_event_simple() to be viable.
417 */
418int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
419			    unsigned long *dev_mask)
420{
421	int err = rid->states[0].possible_errs;
422
423	*dev_mask = 1;
424	/*
425	 * This helper should only be used in a situation where the IRQ
426	 * can indicate only one type of problem for one specific rdev.
427	 * Something fishy is going on if we are having multiple rdevs or ERROR
428	 * flags here.
429	 */
430	if (WARN_ON(rid->num_states != 1 || hweight32(err) != 1))
431		return 0;
432
433	rid->states[0].errors = err;
434	rid->states[0].notifs = regulator_err2notif(err);
435
436	return 0;
437}
438EXPORT_SYMBOL_GPL(regulator_irq_map_event_simple);
439
440