1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Quadrature Encoder Peripheral driver
4 *
5 * Copyright (C) 2019-2021 Intel Corporation
6 *
7 * Author: Felipe Balbi (Intel)
8 * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
9 * Author: Raymond Tan <raymond.tan@intel.com>
10 */
11#include <linux/counter.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/pci.h>
16#include <linux/pm_runtime.h>
17
18#define INTEL_QEPCON			0x00
19#define INTEL_QEPFLT			0x04
20#define INTEL_QEPCOUNT			0x08
21#define INTEL_QEPMAX			0x0c
22#define INTEL_QEPWDT			0x10
23#define INTEL_QEPCAPDIV			0x14
24#define INTEL_QEPCNTR			0x18
25#define INTEL_QEPCAPBUF			0x1c
26#define INTEL_QEPINT_STAT		0x20
27#define INTEL_QEPINT_MASK		0x24
28
29/* QEPCON */
30#define INTEL_QEPCON_EN			BIT(0)
31#define INTEL_QEPCON_FLT_EN		BIT(1)
32#define INTEL_QEPCON_EDGE_A		BIT(2)
33#define INTEL_QEPCON_EDGE_B		BIT(3)
34#define INTEL_QEPCON_EDGE_INDX		BIT(4)
35#define INTEL_QEPCON_SWPAB		BIT(5)
36#define INTEL_QEPCON_OP_MODE		BIT(6)
37#define INTEL_QEPCON_PH_ERR		BIT(7)
38#define INTEL_QEPCON_COUNT_RST_MODE	BIT(8)
39#define INTEL_QEPCON_INDX_GATING_MASK	GENMASK(10, 9)
40#define INTEL_QEPCON_INDX_GATING(n)	(((n) & 3) << 9)
41#define INTEL_QEPCON_INDX_PAL_PBL	INTEL_QEPCON_INDX_GATING(0)
42#define INTEL_QEPCON_INDX_PAL_PBH	INTEL_QEPCON_INDX_GATING(1)
43#define INTEL_QEPCON_INDX_PAH_PBL	INTEL_QEPCON_INDX_GATING(2)
44#define INTEL_QEPCON_INDX_PAH_PBH	INTEL_QEPCON_INDX_GATING(3)
45#define INTEL_QEPCON_CAP_MODE		BIT(11)
46#define INTEL_QEPCON_FIFO_THRE_MASK	GENMASK(14, 12)
47#define INTEL_QEPCON_FIFO_THRE(n)	((((n) - 1) & 7) << 12)
48#define INTEL_QEPCON_FIFO_EMPTY		BIT(15)
49
50/* QEPFLT */
51#define INTEL_QEPFLT_MAX_COUNT(n)	((n) & 0x1fffff)
52
53/* QEPINT */
54#define INTEL_QEPINT_FIFOCRIT		BIT(5)
55#define INTEL_QEPINT_FIFOENTRY		BIT(4)
56#define INTEL_QEPINT_QEPDIR		BIT(3)
57#define INTEL_QEPINT_QEPRST_UP		BIT(2)
58#define INTEL_QEPINT_QEPRST_DOWN	BIT(1)
59#define INTEL_QEPINT_WDT		BIT(0)
60
61#define INTEL_QEPINT_MASK_ALL		GENMASK(5, 0)
62
63#define INTEL_QEP_CLK_PERIOD_NS		10
64
65struct intel_qep {
66	struct mutex lock;
67	struct device *dev;
68	void __iomem *regs;
69	bool enabled;
70	/* Context save registers */
71	u32 qepcon;
72	u32 qepflt;
73	u32 qepmax;
74};
75
76static inline u32 intel_qep_readl(struct intel_qep *qep, u32 offset)
77{
78	return readl(qep->regs + offset);
79}
80
81static inline void intel_qep_writel(struct intel_qep *qep,
82				    u32 offset, u32 value)
83{
84	writel(value, qep->regs + offset);
85}
86
87static void intel_qep_init(struct intel_qep *qep)
88{
89	u32 reg;
90
91	reg = intel_qep_readl(qep, INTEL_QEPCON);
92	reg &= ~INTEL_QEPCON_EN;
93	intel_qep_writel(qep, INTEL_QEPCON, reg);
94	qep->enabled = false;
95	/*
96	 * Make sure peripheral is disabled by flushing the write with
97	 * a dummy read
98	 */
99	reg = intel_qep_readl(qep, INTEL_QEPCON);
100
101	reg &= ~(INTEL_QEPCON_OP_MODE | INTEL_QEPCON_FLT_EN);
102	reg |= INTEL_QEPCON_EDGE_A | INTEL_QEPCON_EDGE_B |
103	       INTEL_QEPCON_EDGE_INDX | INTEL_QEPCON_COUNT_RST_MODE;
104	intel_qep_writel(qep, INTEL_QEPCON, reg);
105	intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
106}
107
108static int intel_qep_count_read(struct counter_device *counter,
109				struct counter_count *count, u64 *val)
110{
111	struct intel_qep *const qep = counter_priv(counter);
112
113	pm_runtime_get_sync(qep->dev);
114	*val = intel_qep_readl(qep, INTEL_QEPCOUNT);
115	pm_runtime_put(qep->dev);
116
117	return 0;
118}
119
120static const enum counter_function intel_qep_count_functions[] = {
121	COUNTER_FUNCTION_QUADRATURE_X4,
122};
123
124static int intel_qep_function_read(struct counter_device *counter,
125				   struct counter_count *count,
126				   enum counter_function *function)
127{
128	*function = COUNTER_FUNCTION_QUADRATURE_X4;
129
130	return 0;
131}
132
133static const enum counter_synapse_action intel_qep_synapse_actions[] = {
134	COUNTER_SYNAPSE_ACTION_BOTH_EDGES,
135};
136
137static int intel_qep_action_read(struct counter_device *counter,
138				 struct counter_count *count,
139				 struct counter_synapse *synapse,
140				 enum counter_synapse_action *action)
141{
142	*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
143	return 0;
144}
145
146static const struct counter_ops intel_qep_counter_ops = {
147	.count_read = intel_qep_count_read,
148	.function_read = intel_qep_function_read,
149	.action_read = intel_qep_action_read,
150};
151
152#define INTEL_QEP_SIGNAL(_id, _name) {				\
153	.id = (_id),						\
154	.name = (_name),					\
155}
156
157static struct counter_signal intel_qep_signals[] = {
158	INTEL_QEP_SIGNAL(0, "Phase A"),
159	INTEL_QEP_SIGNAL(1, "Phase B"),
160	INTEL_QEP_SIGNAL(2, "Index"),
161};
162
163#define INTEL_QEP_SYNAPSE(_signal_id) {				\
164	.actions_list = intel_qep_synapse_actions,		\
165	.num_actions = ARRAY_SIZE(intel_qep_synapse_actions),	\
166	.signal = &intel_qep_signals[(_signal_id)],		\
167}
168
169static struct counter_synapse intel_qep_count_synapses[] = {
170	INTEL_QEP_SYNAPSE(0),
171	INTEL_QEP_SYNAPSE(1),
172	INTEL_QEP_SYNAPSE(2),
173};
174
175static int intel_qep_ceiling_read(struct counter_device *counter,
176				  struct counter_count *count, u64 *ceiling)
177{
178	struct intel_qep *qep = counter_priv(counter);
179
180	pm_runtime_get_sync(qep->dev);
181	*ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
182	pm_runtime_put(qep->dev);
183
184	return 0;
185}
186
187static int intel_qep_ceiling_write(struct counter_device *counter,
188				   struct counter_count *count, u64 max)
189{
190	struct intel_qep *qep = counter_priv(counter);
191	int ret = 0;
192
193	/* Intel QEP ceiling configuration only supports 32-bit values */
194	if (max != (u32)max)
195		return -ERANGE;
196
197	mutex_lock(&qep->lock);
198	if (qep->enabled) {
199		ret = -EBUSY;
200		goto out;
201	}
202
203	pm_runtime_get_sync(qep->dev);
204	intel_qep_writel(qep, INTEL_QEPMAX, max);
205	pm_runtime_put(qep->dev);
206
207out:
208	mutex_unlock(&qep->lock);
209	return ret;
210}
211
212static int intel_qep_enable_read(struct counter_device *counter,
213				 struct counter_count *count, u8 *enable)
214{
215	struct intel_qep *qep = counter_priv(counter);
216
217	*enable = qep->enabled;
218
219	return 0;
220}
221
222static int intel_qep_enable_write(struct counter_device *counter,
223				  struct counter_count *count, u8 val)
224{
225	struct intel_qep *qep = counter_priv(counter);
226	u32 reg;
227	bool changed;
228
229	mutex_lock(&qep->lock);
230	changed = val ^ qep->enabled;
231	if (!changed)
232		goto out;
233
234	pm_runtime_get_sync(qep->dev);
235	reg = intel_qep_readl(qep, INTEL_QEPCON);
236	if (val) {
237		/* Enable peripheral and keep runtime PM always on */
238		reg |= INTEL_QEPCON_EN;
239		pm_runtime_get_noresume(qep->dev);
240	} else {
241		/* Let runtime PM be idle and disable peripheral */
242		pm_runtime_put_noidle(qep->dev);
243		reg &= ~INTEL_QEPCON_EN;
244	}
245	intel_qep_writel(qep, INTEL_QEPCON, reg);
246	pm_runtime_put(qep->dev);
247	qep->enabled = val;
248
249out:
250	mutex_unlock(&qep->lock);
251	return 0;
252}
253
254static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
255					  struct counter_count *count,
256					  u64 *length)
257{
258	struct intel_qep *qep = counter_priv(counter);
259	u32 reg;
260
261	pm_runtime_get_sync(qep->dev);
262	reg = intel_qep_readl(qep, INTEL_QEPCON);
263	if (!(reg & INTEL_QEPCON_FLT_EN)) {
264		pm_runtime_put(qep->dev);
265		return 0;
266	}
267	reg = INTEL_QEPFLT_MAX_COUNT(intel_qep_readl(qep, INTEL_QEPFLT));
268	pm_runtime_put(qep->dev);
269
270	*length = (reg + 2) * INTEL_QEP_CLK_PERIOD_NS;
271
272	return 0;
273}
274
275static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
276					   struct counter_count *count,
277					   u64 length)
278{
279	struct intel_qep *qep = counter_priv(counter);
280	u32 reg;
281	bool enable;
282	int ret = 0;
283
284	/*
285	 * Spike filter length is (MAX_COUNT + 2) clock periods.
286	 * Disable filter when userspace writes 0, enable for valid
287	 * nanoseconds values and error out otherwise.
288	 */
289	do_div(length, INTEL_QEP_CLK_PERIOD_NS);
290	if (length == 0) {
291		enable = false;
292		length = 0;
293	} else if (length >= 2) {
294		enable = true;
295		length -= 2;
296	} else {
297		return -EINVAL;
298	}
299
300	if (length > INTEL_QEPFLT_MAX_COUNT(length))
301		return -ERANGE;
302
303	mutex_lock(&qep->lock);
304	if (qep->enabled) {
305		ret = -EBUSY;
306		goto out;
307	}
308
309	pm_runtime_get_sync(qep->dev);
310	reg = intel_qep_readl(qep, INTEL_QEPCON);
311	if (enable)
312		reg |= INTEL_QEPCON_FLT_EN;
313	else
314		reg &= ~INTEL_QEPCON_FLT_EN;
315	intel_qep_writel(qep, INTEL_QEPFLT, length);
316	intel_qep_writel(qep, INTEL_QEPCON, reg);
317	pm_runtime_put(qep->dev);
318
319out:
320	mutex_unlock(&qep->lock);
321	return ret;
322}
323
324static int intel_qep_preset_enable_read(struct counter_device *counter,
325					struct counter_count *count,
326					u8 *preset_enable)
327{
328	struct intel_qep *qep = counter_priv(counter);
329	u32 reg;
330
331	pm_runtime_get_sync(qep->dev);
332	reg = intel_qep_readl(qep, INTEL_QEPCON);
333	pm_runtime_put(qep->dev);
334
335	*preset_enable = !(reg & INTEL_QEPCON_COUNT_RST_MODE);
336
337	return 0;
338}
339
340static int intel_qep_preset_enable_write(struct counter_device *counter,
341					 struct counter_count *count, u8 val)
342{
343	struct intel_qep *qep = counter_priv(counter);
344	u32 reg;
345	int ret = 0;
346
347	mutex_lock(&qep->lock);
348	if (qep->enabled) {
349		ret = -EBUSY;
350		goto out;
351	}
352
353	pm_runtime_get_sync(qep->dev);
354	reg = intel_qep_readl(qep, INTEL_QEPCON);
355	if (val)
356		reg &= ~INTEL_QEPCON_COUNT_RST_MODE;
357	else
358		reg |= INTEL_QEPCON_COUNT_RST_MODE;
359
360	intel_qep_writel(qep, INTEL_QEPCON, reg);
361	pm_runtime_put(qep->dev);
362
363out:
364	mutex_unlock(&qep->lock);
365
366	return ret;
367}
368
369static struct counter_comp intel_qep_count_ext[] = {
370	COUNTER_COMP_ENABLE(intel_qep_enable_read, intel_qep_enable_write),
371	COUNTER_COMP_CEILING(intel_qep_ceiling_read, intel_qep_ceiling_write),
372	COUNTER_COMP_PRESET_ENABLE(intel_qep_preset_enable_read,
373				   intel_qep_preset_enable_write),
374	COUNTER_COMP_COUNT_U64("spike_filter_ns",
375			       intel_qep_spike_filter_ns_read,
376			       intel_qep_spike_filter_ns_write),
377};
378
379static struct counter_count intel_qep_counter_count[] = {
380	{
381		.id = 0,
382		.name = "Channel 1 Count",
383		.functions_list = intel_qep_count_functions,
384		.num_functions = ARRAY_SIZE(intel_qep_count_functions),
385		.synapses = intel_qep_count_synapses,
386		.num_synapses = ARRAY_SIZE(intel_qep_count_synapses),
387		.ext = intel_qep_count_ext,
388		.num_ext = ARRAY_SIZE(intel_qep_count_ext),
389	},
390};
391
392static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
393{
394	struct counter_device *counter;
395	struct intel_qep *qep;
396	struct device *dev = &pci->dev;
397	void __iomem *regs;
398	int ret;
399
400	counter = devm_counter_alloc(dev, sizeof(*qep));
401	if (!counter)
402		return -ENOMEM;
403	qep = counter_priv(counter);
404
405	ret = pcim_enable_device(pci);
406	if (ret)
407		return ret;
408
409	pci_set_master(pci);
410
411	ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
412	if (ret)
413		return ret;
414
415	regs = pcim_iomap_table(pci)[0];
416	if (!regs)
417		return -ENOMEM;
418
419	qep->dev = dev;
420	qep->regs = regs;
421	mutex_init(&qep->lock);
422
423	intel_qep_init(qep);
424	pci_set_drvdata(pci, qep);
425
426	counter->name = pci_name(pci);
427	counter->parent = dev;
428	counter->ops = &intel_qep_counter_ops;
429	counter->counts = intel_qep_counter_count;
430	counter->num_counts = ARRAY_SIZE(intel_qep_counter_count);
431	counter->signals = intel_qep_signals;
432	counter->num_signals = ARRAY_SIZE(intel_qep_signals);
433	qep->enabled = false;
434
435	pm_runtime_put(dev);
436	pm_runtime_allow(dev);
437
438	ret = devm_counter_add(&pci->dev, counter);
439	if (ret < 0)
440		return dev_err_probe(&pci->dev, ret, "Failed to add counter\n");
441
442	return 0;
443}
444
445static void intel_qep_remove(struct pci_dev *pci)
446{
447	struct intel_qep *qep = pci_get_drvdata(pci);
448	struct device *dev = &pci->dev;
449
450	pm_runtime_forbid(dev);
451	if (!qep->enabled)
452		pm_runtime_get(dev);
453
454	intel_qep_writel(qep, INTEL_QEPCON, 0);
455}
456
457static int __maybe_unused intel_qep_suspend(struct device *dev)
458{
459	struct pci_dev *pdev = to_pci_dev(dev);
460	struct intel_qep *qep = pci_get_drvdata(pdev);
461
462	qep->qepcon = intel_qep_readl(qep, INTEL_QEPCON);
463	qep->qepflt = intel_qep_readl(qep, INTEL_QEPFLT);
464	qep->qepmax = intel_qep_readl(qep, INTEL_QEPMAX);
465
466	return 0;
467}
468
469static int __maybe_unused intel_qep_resume(struct device *dev)
470{
471	struct pci_dev *pdev = to_pci_dev(dev);
472	struct intel_qep *qep = pci_get_drvdata(pdev);
473
474	/*
475	 * Make sure peripheral is disabled when restoring registers and
476	 * control register bits that are writable only when the peripheral
477	 * is disabled
478	 */
479	intel_qep_writel(qep, INTEL_QEPCON, 0);
480	intel_qep_readl(qep, INTEL_QEPCON);
481
482	intel_qep_writel(qep, INTEL_QEPFLT, qep->qepflt);
483	intel_qep_writel(qep, INTEL_QEPMAX, qep->qepmax);
484	intel_qep_writel(qep, INTEL_QEPINT_MASK, INTEL_QEPINT_MASK_ALL);
485
486	/* Restore all other control register bits except enable status */
487	intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon & ~INTEL_QEPCON_EN);
488	intel_qep_readl(qep, INTEL_QEPCON);
489
490	/* Restore enable status */
491	intel_qep_writel(qep, INTEL_QEPCON, qep->qepcon);
492
493	return 0;
494}
495
496static UNIVERSAL_DEV_PM_OPS(intel_qep_pm_ops,
497			    intel_qep_suspend, intel_qep_resume, NULL);
498
499static const struct pci_device_id intel_qep_id_table[] = {
500	/* EHL */
501	{ PCI_VDEVICE(INTEL, 0x4bc3), },
502	{ PCI_VDEVICE(INTEL, 0x4b81), },
503	{ PCI_VDEVICE(INTEL, 0x4b82), },
504	{ PCI_VDEVICE(INTEL, 0x4b83), },
505	{  } /* Terminating Entry */
506};
507MODULE_DEVICE_TABLE(pci, intel_qep_id_table);
508
509static struct pci_driver intel_qep_driver = {
510	.name = "intel-qep",
511	.id_table = intel_qep_id_table,
512	.probe = intel_qep_probe,
513	.remove = intel_qep_remove,
514	.driver = {
515		.pm = &intel_qep_pm_ops,
516	}
517};
518
519module_pci_driver(intel_qep_driver);
520
521MODULE_AUTHOR("Felipe Balbi (Intel)");
522MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
523MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
524MODULE_LICENSE("GPL");
525MODULE_DESCRIPTION("Intel Quadrature Encoder Peripheral driver");
526MODULE_IMPORT_NS(COUNTER);
527