1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for the Diolan DLN-2 USB-ADC adapter
4 *
5 * Copyright (c) 2017 Jack Andersen
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/platform_device.h>
12#include <linux/mfd/dln2.h>
13
14#include <linux/iio/iio.h>
15#include <linux/iio/sysfs.h>
16#include <linux/iio/trigger.h>
17#include <linux/iio/trigger_consumer.h>
18#include <linux/iio/triggered_buffer.h>
19#include <linux/iio/buffer.h>
20#include <linux/iio/kfifo_buf.h>
21
22#define DLN2_ADC_MOD_NAME "dln2-adc"
23
24#define DLN2_ADC_ID             0x06
25
26#define DLN2_ADC_GET_CHANNEL_COUNT	DLN2_CMD(0x01, DLN2_ADC_ID)
27#define DLN2_ADC_ENABLE			DLN2_CMD(0x02, DLN2_ADC_ID)
28#define DLN2_ADC_DISABLE		DLN2_CMD(0x03, DLN2_ADC_ID)
29#define DLN2_ADC_CHANNEL_ENABLE		DLN2_CMD(0x05, DLN2_ADC_ID)
30#define DLN2_ADC_CHANNEL_DISABLE	DLN2_CMD(0x06, DLN2_ADC_ID)
31#define DLN2_ADC_SET_RESOLUTION		DLN2_CMD(0x08, DLN2_ADC_ID)
32#define DLN2_ADC_CHANNEL_GET_VAL	DLN2_CMD(0x0A, DLN2_ADC_ID)
33#define DLN2_ADC_CHANNEL_GET_ALL_VAL	DLN2_CMD(0x0B, DLN2_ADC_ID)
34#define DLN2_ADC_CHANNEL_SET_CFG	DLN2_CMD(0x0C, DLN2_ADC_ID)
35#define DLN2_ADC_CHANNEL_GET_CFG	DLN2_CMD(0x0D, DLN2_ADC_ID)
36#define DLN2_ADC_CONDITION_MET_EV	DLN2_CMD(0x10, DLN2_ADC_ID)
37
38#define DLN2_ADC_EVENT_NONE		0
39#define DLN2_ADC_EVENT_BELOW		1
40#define DLN2_ADC_EVENT_LEVEL_ABOVE	2
41#define DLN2_ADC_EVENT_OUTSIDE		3
42#define DLN2_ADC_EVENT_INSIDE		4
43#define DLN2_ADC_EVENT_ALWAYS		5
44
45#define DLN2_ADC_MAX_CHANNELS 8
46#define DLN2_ADC_DATA_BITS 10
47
48/*
49 * Plays similar role to iio_demux_table in subsystem core; except allocated
50 * in a fixed 8-element array.
51 */
52struct dln2_adc_demux_table {
53	unsigned int from;
54	unsigned int to;
55	unsigned int length;
56};
57
58struct dln2_adc {
59	struct platform_device *pdev;
60	struct iio_chan_spec iio_channels[DLN2_ADC_MAX_CHANNELS + 1];
61	int port, trigger_chan;
62	struct iio_trigger *trig;
63	struct mutex mutex;
64	/* Cached sample period in milliseconds */
65	unsigned int sample_period;
66	/* Demux table */
67	unsigned int demux_count;
68	struct dln2_adc_demux_table demux[DLN2_ADC_MAX_CHANNELS];
69	/* Precomputed timestamp padding offset and length */
70	unsigned int ts_pad_offset, ts_pad_length;
71};
72
73struct dln2_adc_port_chan {
74	u8 port;
75	u8 chan;
76};
77
78struct dln2_adc_get_all_vals {
79	__le16 channel_mask;
80	__le16 values[DLN2_ADC_MAX_CHANNELS];
81};
82
83static void dln2_adc_add_demux(struct dln2_adc *dln2,
84	unsigned int in_loc, unsigned int out_loc,
85	unsigned int length)
86{
87	struct dln2_adc_demux_table *p = dln2->demux_count ?
88		&dln2->demux[dln2->demux_count - 1] : NULL;
89
90	if (p && p->from + p->length == in_loc &&
91		p->to + p->length == out_loc) {
92		p->length += length;
93	} else if (dln2->demux_count < DLN2_ADC_MAX_CHANNELS) {
94		p = &dln2->demux[dln2->demux_count++];
95		p->from = in_loc;
96		p->to = out_loc;
97		p->length = length;
98	}
99}
100
101static void dln2_adc_update_demux(struct dln2_adc *dln2)
102{
103	int in_ind = -1, out_ind;
104	unsigned int in_loc = 0, out_loc = 0;
105	struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
106
107	/* Clear out any old demux */
108	dln2->demux_count = 0;
109
110	/* Optimize all 8-channels case */
111	if (indio_dev->masklength &&
112	    (*indio_dev->active_scan_mask & 0xff) == 0xff) {
113		dln2_adc_add_demux(dln2, 0, 0, 16);
114		dln2->ts_pad_offset = 0;
115		dln2->ts_pad_length = 0;
116		return;
117	}
118
119	/* Build demux table from fixed 8-channels to active_scan_mask */
120	for_each_set_bit(out_ind,
121			 indio_dev->active_scan_mask,
122			 indio_dev->masklength) {
123		/* Handle timestamp separately */
124		if (out_ind == DLN2_ADC_MAX_CHANNELS)
125			break;
126		for (++in_ind; in_ind != out_ind; ++in_ind)
127			in_loc += 2;
128		dln2_adc_add_demux(dln2, in_loc, out_loc, 2);
129		out_loc += 2;
130		in_loc += 2;
131	}
132
133	if (indio_dev->scan_timestamp) {
134		size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
135
136		dln2->ts_pad_offset = out_loc;
137		dln2->ts_pad_length = ts_offset * sizeof(int64_t) - out_loc;
138	} else {
139		dln2->ts_pad_offset = 0;
140		dln2->ts_pad_length = 0;
141	}
142}
143
144static int dln2_adc_get_chan_count(struct dln2_adc *dln2)
145{
146	int ret;
147	u8 port = dln2->port;
148	u8 count;
149	int olen = sizeof(count);
150
151	ret = dln2_transfer(dln2->pdev, DLN2_ADC_GET_CHANNEL_COUNT,
152			    &port, sizeof(port), &count, &olen);
153	if (ret < 0) {
154		dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
155		return ret;
156	}
157	if (olen < sizeof(count))
158		return -EPROTO;
159
160	return count;
161}
162
163static int dln2_adc_set_port_resolution(struct dln2_adc *dln2)
164{
165	int ret;
166	struct dln2_adc_port_chan port_chan = {
167		.port = dln2->port,
168		.chan = DLN2_ADC_DATA_BITS,
169	};
170
171	ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_SET_RESOLUTION,
172			       &port_chan, sizeof(port_chan));
173	if (ret < 0)
174		dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
175
176	return ret;
177}
178
179static int dln2_adc_set_chan_enabled(struct dln2_adc *dln2,
180				     int channel, bool enable)
181{
182	int ret;
183	struct dln2_adc_port_chan port_chan = {
184		.port = dln2->port,
185		.chan = channel,
186	};
187	u16 cmd = enable ? DLN2_ADC_CHANNEL_ENABLE : DLN2_ADC_CHANNEL_DISABLE;
188
189	ret = dln2_transfer_tx(dln2->pdev, cmd, &port_chan, sizeof(port_chan));
190	if (ret < 0)
191		dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
192
193	return ret;
194}
195
196static int dln2_adc_set_port_enabled(struct dln2_adc *dln2, bool enable,
197				     u16 *conflict_out)
198{
199	int ret;
200	u8 port = dln2->port;
201	__le16 conflict;
202	int olen = sizeof(conflict);
203	u16 cmd = enable ? DLN2_ADC_ENABLE : DLN2_ADC_DISABLE;
204
205	if (conflict_out)
206		*conflict_out = 0;
207
208	ret = dln2_transfer(dln2->pdev, cmd, &port, sizeof(port),
209			    &conflict, &olen);
210	if (ret < 0) {
211		dev_dbg(&dln2->pdev->dev, "Problem in %s(%d)\n",
212			__func__, (int)enable);
213		if (conflict_out && enable && olen >= sizeof(conflict))
214			*conflict_out = le16_to_cpu(conflict);
215		return ret;
216	}
217	if (enable && olen < sizeof(conflict))
218		return -EPROTO;
219
220	return ret;
221}
222
223static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
224	unsigned int channel, unsigned int period)
225{
226	int ret;
227	struct {
228		struct dln2_adc_port_chan port_chan;
229		__u8 type;
230		__le16 period;
231		__le16 low;
232		__le16 high;
233	} __packed set_cfg = {
234		.port_chan.port = dln2->port,
235		.port_chan.chan = channel,
236		.type = period ? DLN2_ADC_EVENT_ALWAYS : DLN2_ADC_EVENT_NONE,
237		.period = cpu_to_le16(period)
238	};
239
240	ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_CHANNEL_SET_CFG,
241			       &set_cfg, sizeof(set_cfg));
242	if (ret < 0)
243		dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
244
245	return ret;
246}
247
248static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
249{
250	int ret, i;
251	u16 conflict;
252	__le16 value;
253	int olen = sizeof(value);
254	struct dln2_adc_port_chan port_chan = {
255		.port = dln2->port,
256		.chan = channel,
257	};
258
259	ret = dln2_adc_set_chan_enabled(dln2, channel, true);
260	if (ret < 0)
261		return ret;
262
263	ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
264	if (ret < 0) {
265		if (conflict) {
266			dev_err(&dln2->pdev->dev,
267				"ADC pins conflict with mask %04X\n",
268				(int)conflict);
269			ret = -EBUSY;
270		}
271		goto disable_chan;
272	}
273
274	/*
275	 * Call GET_VAL twice due to initial zero-return immediately after
276	 * enabling channel.
277	 */
278	for (i = 0; i < 2; ++i) {
279		ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_VAL,
280				    &port_chan, sizeof(port_chan),
281				    &value, &olen);
282		if (ret < 0) {
283			dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
284			goto disable_port;
285		}
286		if (olen < sizeof(value)) {
287			ret = -EPROTO;
288			goto disable_port;
289		}
290	}
291
292	ret = le16_to_cpu(value);
293
294disable_port:
295	dln2_adc_set_port_enabled(dln2, false, NULL);
296disable_chan:
297	dln2_adc_set_chan_enabled(dln2, channel, false);
298
299	return ret;
300}
301
302static int dln2_adc_read_all(struct dln2_adc *dln2,
303			     struct dln2_adc_get_all_vals *get_all_vals)
304{
305	int ret;
306	__u8 port = dln2->port;
307	int olen = sizeof(*get_all_vals);
308
309	ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_ALL_VAL,
310			    &port, sizeof(port), get_all_vals, &olen);
311	if (ret < 0) {
312		dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
313		return ret;
314	}
315	if (olen < sizeof(*get_all_vals))
316		return -EPROTO;
317
318	return ret;
319}
320
321static int dln2_adc_read_raw(struct iio_dev *indio_dev,
322			     struct iio_chan_spec const *chan,
323			     int *val,
324			     int *val2,
325			     long mask)
326{
327	int ret;
328	unsigned int microhertz;
329	struct dln2_adc *dln2 = iio_priv(indio_dev);
330
331	switch (mask) {
332	case IIO_CHAN_INFO_RAW:
333		ret = iio_device_claim_direct_mode(indio_dev);
334		if (ret < 0)
335			return ret;
336
337		mutex_lock(&dln2->mutex);
338		ret = dln2_adc_read(dln2, chan->channel);
339		mutex_unlock(&dln2->mutex);
340
341		iio_device_release_direct_mode(indio_dev);
342
343		if (ret < 0)
344			return ret;
345
346		*val = ret;
347		return IIO_VAL_INT;
348
349	case IIO_CHAN_INFO_SCALE:
350		/*
351		 * Voltage reference is fixed at 3.3v
352		 *  3.3 / (1 << 10) * 1000000000
353		 */
354		*val = 0;
355		*val2 = 3222656;
356		return IIO_VAL_INT_PLUS_NANO;
357
358	case IIO_CHAN_INFO_SAMP_FREQ:
359		if (dln2->sample_period) {
360			microhertz = 1000000000 / dln2->sample_period;
361			*val = microhertz / 1000000;
362			*val2 = microhertz % 1000000;
363		} else {
364			*val = 0;
365			*val2 = 0;
366		}
367
368		return IIO_VAL_INT_PLUS_MICRO;
369
370	default:
371		return -EINVAL;
372	}
373}
374
375static int dln2_adc_write_raw(struct iio_dev *indio_dev,
376			      struct iio_chan_spec const *chan,
377			      int val,
378			      int val2,
379			      long mask)
380{
381	int ret;
382	unsigned int microhertz;
383	struct dln2_adc *dln2 = iio_priv(indio_dev);
384
385	switch (mask) {
386	case IIO_CHAN_INFO_SAMP_FREQ:
387		microhertz = 1000000 * val + val2;
388
389		mutex_lock(&dln2->mutex);
390
391		dln2->sample_period =
392			microhertz ? 1000000000 / microhertz : UINT_MAX;
393		if (dln2->sample_period > 65535) {
394			dln2->sample_period = 65535;
395			dev_warn(&dln2->pdev->dev,
396				 "clamping period to 65535ms\n");
397		}
398
399		/*
400		 * The first requested channel is arbitrated as a shared
401		 * trigger source, so only one event is registered with the
402		 * DLN. The event handler will then read all enabled channel
403		 * values using DLN2_ADC_CHANNEL_GET_ALL_VAL to maintain
404		 * synchronization between ADC readings.
405		 */
406		if (dln2->trigger_chan != -1)
407			ret = dln2_adc_set_chan_period(dln2,
408				dln2->trigger_chan, dln2->sample_period);
409		else
410			ret = 0;
411
412		mutex_unlock(&dln2->mutex);
413
414		return ret;
415
416	default:
417		return -EINVAL;
418	}
419}
420
421static int dln2_update_scan_mode(struct iio_dev *indio_dev,
422				 const unsigned long *scan_mask)
423{
424	struct dln2_adc *dln2 = iio_priv(indio_dev);
425	int chan_count = indio_dev->num_channels - 1;
426	int ret, i, j;
427
428	mutex_lock(&dln2->mutex);
429
430	for (i = 0; i < chan_count; ++i) {
431		ret = dln2_adc_set_chan_enabled(dln2, i,
432						test_bit(i, scan_mask));
433		if (ret < 0) {
434			for (j = 0; j < i; ++j)
435				dln2_adc_set_chan_enabled(dln2, j, false);
436			mutex_unlock(&dln2->mutex);
437			dev_err(&dln2->pdev->dev,
438				"Unable to enable ADC channel %d\n", i);
439			return -EBUSY;
440		}
441	}
442
443	dln2_adc_update_demux(dln2);
444
445	mutex_unlock(&dln2->mutex);
446
447	return 0;
448}
449
450#define DLN2_ADC_CHAN(lval, idx) {					\
451	lval.type = IIO_VOLTAGE;					\
452	lval.channel = idx;						\
453	lval.indexed = 1;						\
454	lval.info_mask_separate = BIT(IIO_CHAN_INFO_RAW);		\
455	lval.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) |	\
456				       BIT(IIO_CHAN_INFO_SAMP_FREQ);	\
457	lval.scan_index = idx;						\
458	lval.scan_type.sign = 'u';					\
459	lval.scan_type.realbits = DLN2_ADC_DATA_BITS;			\
460	lval.scan_type.storagebits = 16;				\
461	lval.scan_type.endianness = IIO_LE;				\
462}
463
464/* Assignment version of IIO_CHAN_SOFT_TIMESTAMP */
465#define IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(lval, _si) {	\
466	lval.type = IIO_TIMESTAMP;			\
467	lval.channel = -1;				\
468	lval.scan_index = _si;				\
469	lval.scan_type.sign = 's';			\
470	lval.scan_type.realbits = 64;			\
471	lval.scan_type.storagebits = 64;		\
472}
473
474static const struct iio_info dln2_adc_info = {
475	.read_raw = dln2_adc_read_raw,
476	.write_raw = dln2_adc_write_raw,
477	.update_scan_mode = dln2_update_scan_mode,
478};
479
480static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
481{
482	struct iio_poll_func *pf = p;
483	struct iio_dev *indio_dev = pf->indio_dev;
484	struct {
485		__le16 values[DLN2_ADC_MAX_CHANNELS];
486		int64_t timestamp_space;
487	} data;
488	struct dln2_adc_get_all_vals dev_data;
489	struct dln2_adc *dln2 = iio_priv(indio_dev);
490	const struct dln2_adc_demux_table *t;
491	int ret, i;
492
493	mutex_lock(&dln2->mutex);
494	ret = dln2_adc_read_all(dln2, &dev_data);
495	mutex_unlock(&dln2->mutex);
496	if (ret < 0)
497		goto done;
498
499	/* Demux operation */
500	for (i = 0; i < dln2->demux_count; ++i) {
501		t = &dln2->demux[i];
502		memcpy((void *)data.values + t->to,
503		       (void *)dev_data.values + t->from, t->length);
504	}
505
506	/* Zero padding space between values and timestamp */
507	if (dln2->ts_pad_length)
508		memset((void *)data.values + dln2->ts_pad_offset,
509		       0, dln2->ts_pad_length);
510
511	iio_push_to_buffers_with_timestamp(indio_dev, &data,
512					   iio_get_time_ns(indio_dev));
513
514done:
515	iio_trigger_notify_done(indio_dev->trig);
516	return IRQ_HANDLED;
517}
518
519static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
520{
521	int ret;
522	struct dln2_adc *dln2 = iio_priv(indio_dev);
523	u16 conflict;
524	unsigned int trigger_chan;
525
526	mutex_lock(&dln2->mutex);
527
528	/* Enable ADC */
529	ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
530	if (ret < 0) {
531		mutex_unlock(&dln2->mutex);
532		dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
533		if (conflict) {
534			dev_err(&dln2->pdev->dev,
535				"ADC pins conflict with mask %04X\n",
536				(int)conflict);
537			ret = -EBUSY;
538		}
539		return ret;
540	}
541
542	/* Assign trigger channel based on first enabled channel */
543	trigger_chan = find_first_bit(indio_dev->active_scan_mask,
544				      indio_dev->masklength);
545	if (trigger_chan < DLN2_ADC_MAX_CHANNELS) {
546		dln2->trigger_chan = trigger_chan;
547		ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan,
548					       dln2->sample_period);
549		mutex_unlock(&dln2->mutex);
550		if (ret < 0) {
551			dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
552			return ret;
553		}
554	} else {
555		dln2->trigger_chan = -1;
556		mutex_unlock(&dln2->mutex);
557	}
558
559	return 0;
560}
561
562static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
563{
564	int ret;
565	struct dln2_adc *dln2 = iio_priv(indio_dev);
566
567	mutex_lock(&dln2->mutex);
568
569	/* Disable trigger channel */
570	if (dln2->trigger_chan != -1) {
571		dln2_adc_set_chan_period(dln2, dln2->trigger_chan, 0);
572		dln2->trigger_chan = -1;
573	}
574
575	/* Disable ADC */
576	ret = dln2_adc_set_port_enabled(dln2, false, NULL);
577
578	mutex_unlock(&dln2->mutex);
579	if (ret < 0)
580		dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
581
582	return ret;
583}
584
585static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
586	.postenable = dln2_adc_triggered_buffer_postenable,
587	.predisable = dln2_adc_triggered_buffer_predisable,
588};
589
590static void dln2_adc_event(struct platform_device *pdev, u16 echo,
591			   const void *data, int len)
592{
593	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
594	struct dln2_adc *dln2 = iio_priv(indio_dev);
595
596	/* Called via URB completion handler */
597	iio_trigger_poll(dln2->trig);
598}
599
600static int dln2_adc_probe(struct platform_device *pdev)
601{
602	struct device *dev = &pdev->dev;
603	struct dln2_adc *dln2;
604	struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
605	struct iio_dev *indio_dev;
606	int i, ret, chans;
607
608	indio_dev = devm_iio_device_alloc(dev, sizeof(*dln2));
609	if (!indio_dev) {
610		dev_err(dev, "failed allocating iio device\n");
611		return -ENOMEM;
612	}
613
614	dln2 = iio_priv(indio_dev);
615	dln2->pdev = pdev;
616	dln2->port = pdata->port;
617	dln2->trigger_chan = -1;
618	mutex_init(&dln2->mutex);
619
620	platform_set_drvdata(pdev, indio_dev);
621
622	ret = dln2_adc_set_port_resolution(dln2);
623	if (ret < 0) {
624		dev_err(dev, "failed to set ADC resolution to 10 bits\n");
625		return ret;
626	}
627
628	chans = dln2_adc_get_chan_count(dln2);
629	if (chans < 0) {
630		dev_err(dev, "failed to get channel count: %d\n", chans);
631		return chans;
632	}
633	if (chans > DLN2_ADC_MAX_CHANNELS) {
634		chans = DLN2_ADC_MAX_CHANNELS;
635		dev_warn(dev, "clamping channels to %d\n",
636			 DLN2_ADC_MAX_CHANNELS);
637	}
638
639	for (i = 0; i < chans; ++i)
640		DLN2_ADC_CHAN(dln2->iio_channels[i], i)
641	IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(dln2->iio_channels[i], i);
642
643	indio_dev->name = DLN2_ADC_MOD_NAME;
644	indio_dev->info = &dln2_adc_info;
645	indio_dev->modes = INDIO_DIRECT_MODE;
646	indio_dev->channels = dln2->iio_channels;
647	indio_dev->num_channels = chans + 1;
648	indio_dev->setup_ops = &dln2_adc_buffer_setup_ops;
649
650	dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
651					    indio_dev->name,
652					    iio_device_id(indio_dev));
653	if (!dln2->trig) {
654		dev_err(dev, "failed to allocate trigger\n");
655		return -ENOMEM;
656	}
657	iio_trigger_set_drvdata(dln2->trig, dln2);
658	ret = devm_iio_trigger_register(dev, dln2->trig);
659	if (ret) {
660		dev_err(dev, "failed to register trigger: %d\n", ret);
661		return ret;
662	}
663	iio_trigger_set_immutable(indio_dev, dln2->trig);
664
665	ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
666					      dln2_adc_trigger_h,
667					      &dln2_adc_buffer_setup_ops);
668	if (ret) {
669		dev_err(dev, "failed to allocate triggered buffer: %d\n", ret);
670		return ret;
671	}
672
673	ret = dln2_register_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV,
674				     dln2_adc_event);
675	if (ret) {
676		dev_err(dev, "failed to setup DLN2 periodic event: %d\n", ret);
677		return ret;
678	}
679
680	ret = iio_device_register(indio_dev);
681	if (ret) {
682		dev_err(dev, "failed to register iio device: %d\n", ret);
683		goto unregister_event;
684	}
685
686	return ret;
687
688unregister_event:
689	dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
690
691	return ret;
692}
693
694static void dln2_adc_remove(struct platform_device *pdev)
695{
696	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
697
698	iio_device_unregister(indio_dev);
699	dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
700}
701
702static struct platform_driver dln2_adc_driver = {
703	.driver.name	= DLN2_ADC_MOD_NAME,
704	.probe		= dln2_adc_probe,
705	.remove_new	= dln2_adc_remove,
706};
707
708module_platform_driver(dln2_adc_driver);
709
710MODULE_AUTHOR("Jack Andersen <jackoalan@gmail.com");
711MODULE_DESCRIPTION("Driver for the Diolan DLN2 ADC interface");
712MODULE_LICENSE("GPL v2");
713MODULE_ALIAS("platform:dln2-adc");
714