1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2023, Intel Corporation.
4 * Intel Visual Sensing Controller Interface Linux driver
5 */
6
7#include <linux/align.h>
8#include <linux/cache.h>
9#include <linux/cleanup.h>
10#include <linux/iopoll.h>
11#include <linux/list.h>
12#include <linux/mei.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/overflow.h>
16#include <linux/platform_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/timekeeping.h>
19#include <linux/types.h>
20
21#include <asm-generic/bug.h>
22#include <asm-generic/unaligned.h>
23
24#include "mei_dev.h"
25#include "vsc-tp.h"
26
27#define MEI_VSC_DRV_NAME		"intel_vsc"
28
29#define MEI_VSC_MAX_MSG_SIZE		512
30
31#define MEI_VSC_POLL_DELAY_US		(50 * USEC_PER_MSEC)
32#define MEI_VSC_POLL_TIMEOUT_US		(200 * USEC_PER_MSEC)
33
34#define mei_dev_to_vsc_hw(dev)		((struct mei_vsc_hw *)((dev)->hw))
35
36struct mei_vsc_host_timestamp {
37	u64 realtime;
38	u64 boottime;
39};
40
41struct mei_vsc_hw {
42	struct vsc_tp *tp;
43
44	bool fw_ready;
45	bool host_ready;
46
47	atomic_t write_lock_cnt;
48
49	u32 rx_len;
50	u32 rx_hdr;
51
52	/* buffer for tx */
53	char tx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned;
54	/* buffer for rx */
55	char rx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned;
56};
57
58static int mei_vsc_read_helper(struct mei_vsc_hw *hw, u8 *buf,
59			       u32 max_len)
60{
61	struct mei_vsc_host_timestamp ts = {
62		.realtime = ktime_to_ns(ktime_get_real()),
63		.boottime = ktime_to_ns(ktime_get_boottime()),
64	};
65
66	return vsc_tp_xfer(hw->tp, VSC_TP_CMD_READ, &ts, sizeof(ts),
67			   buf, max_len);
68}
69
70static int mei_vsc_write_helper(struct mei_vsc_hw *hw, u8 *buf, u32 len)
71{
72	u8 status;
73
74	return vsc_tp_xfer(hw->tp, VSC_TP_CMD_WRITE, buf, len, &status,
75			   sizeof(status));
76}
77
78static int mei_vsc_fw_status(struct mei_device *mei_dev,
79			     struct mei_fw_status *fw_status)
80{
81	if (!fw_status)
82		return -EINVAL;
83
84	fw_status->count = 0;
85
86	return 0;
87}
88
89static inline enum mei_pg_state mei_vsc_pg_state(struct mei_device *mei_dev)
90{
91	return MEI_PG_OFF;
92}
93
94static void mei_vsc_intr_enable(struct mei_device *mei_dev)
95{
96	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
97
98	vsc_tp_intr_enable(hw->tp);
99}
100
101static void mei_vsc_intr_disable(struct mei_device *mei_dev)
102{
103	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
104
105	vsc_tp_intr_disable(hw->tp);
106}
107
108/* mei framework requires this ops */
109static void mei_vsc_intr_clear(struct mei_device *mei_dev)
110{
111}
112
113/* wait for pending irq handler */
114static void mei_vsc_synchronize_irq(struct mei_device *mei_dev)
115{
116	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
117
118	vsc_tp_intr_synchronize(hw->tp);
119}
120
121static int mei_vsc_hw_config(struct mei_device *mei_dev)
122{
123	return 0;
124}
125
126static bool mei_vsc_host_is_ready(struct mei_device *mei_dev)
127{
128	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
129
130	return hw->host_ready;
131}
132
133static bool mei_vsc_hw_is_ready(struct mei_device *mei_dev)
134{
135	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
136
137	return hw->fw_ready;
138}
139
140static int mei_vsc_hw_start(struct mei_device *mei_dev)
141{
142	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
143	int ret, rlen;
144	u8 buf;
145
146	hw->host_ready = true;
147
148	vsc_tp_intr_enable(hw->tp);
149
150	ret = read_poll_timeout(mei_vsc_read_helper, rlen,
151				rlen >= 0, MEI_VSC_POLL_DELAY_US,
152				MEI_VSC_POLL_TIMEOUT_US, true,
153				hw, &buf, sizeof(buf));
154	if (ret) {
155		dev_err(mei_dev->dev, "wait fw ready failed: %d\n", ret);
156		return ret;
157	}
158
159	hw->fw_ready = true;
160
161	return 0;
162}
163
164static bool mei_vsc_hbuf_is_ready(struct mei_device *mei_dev)
165{
166	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
167
168	return atomic_read(&hw->write_lock_cnt) == 0;
169}
170
171static int mei_vsc_hbuf_empty_slots(struct mei_device *mei_dev)
172{
173	return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
174}
175
176static u32 mei_vsc_hbuf_depth(const struct mei_device *mei_dev)
177{
178	return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
179}
180
181static int mei_vsc_write(struct mei_device *mei_dev,
182			 const void *hdr, size_t hdr_len,
183			 const void *data, size_t data_len)
184{
185	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
186	char *buf = hw->tx_buf;
187	int ret;
188
189	if (WARN_ON(!hdr || !IS_ALIGNED(hdr_len, 4)))
190		return -EINVAL;
191
192	if (!data || data_len > MEI_VSC_MAX_MSG_SIZE)
193		return -EINVAL;
194
195	atomic_inc(&hw->write_lock_cnt);
196
197	memcpy(buf, hdr, hdr_len);
198	memcpy(buf + hdr_len, data, data_len);
199
200	ret = mei_vsc_write_helper(hw, buf, hdr_len + data_len);
201
202	atomic_dec_if_positive(&hw->write_lock_cnt);
203
204	return ret < 0 ? ret : 0;
205}
206
207static inline u32 mei_vsc_read(const struct mei_device *mei_dev)
208{
209	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
210	int ret;
211
212	ret = mei_vsc_read_helper(hw, hw->rx_buf, sizeof(hw->rx_buf));
213	if (ret < 0 || ret < sizeof(u32))
214		return 0;
215	hw->rx_len = ret;
216
217	hw->rx_hdr = get_unaligned_le32(hw->rx_buf);
218
219	return hw->rx_hdr;
220}
221
222static int mei_vsc_count_full_read_slots(struct mei_device *mei_dev)
223{
224	return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE;
225}
226
227static int mei_vsc_read_slots(struct mei_device *mei_dev, unsigned char *buf,
228			      unsigned long len)
229{
230	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
231	struct mei_msg_hdr *hdr;
232
233	hdr = (struct mei_msg_hdr *)&hw->rx_hdr;
234	if (len != hdr->length || hdr->length + sizeof(*hdr) != hw->rx_len)
235		return -EINVAL;
236
237	memcpy(buf, hw->rx_buf + sizeof(*hdr), len);
238
239	return 0;
240}
241
242static bool mei_vsc_pg_in_transition(struct mei_device *mei_dev)
243{
244	return mei_dev->pg_event >= MEI_PG_EVENT_WAIT &&
245	       mei_dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
246}
247
248static bool mei_vsc_pg_is_enabled(struct mei_device *mei_dev)
249{
250	return false;
251}
252
253static int mei_vsc_hw_reset(struct mei_device *mei_dev, bool intr_enable)
254{
255	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
256
257	vsc_tp_reset(hw->tp);
258
259	vsc_tp_intr_disable(hw->tp);
260
261	return vsc_tp_init(hw->tp, mei_dev->dev);
262}
263
264static const struct mei_hw_ops mei_vsc_hw_ops = {
265	.fw_status = mei_vsc_fw_status,
266	.pg_state = mei_vsc_pg_state,
267
268	.host_is_ready = mei_vsc_host_is_ready,
269	.hw_is_ready = mei_vsc_hw_is_ready,
270	.hw_reset = mei_vsc_hw_reset,
271	.hw_config = mei_vsc_hw_config,
272	.hw_start = mei_vsc_hw_start,
273
274	.pg_in_transition = mei_vsc_pg_in_transition,
275	.pg_is_enabled = mei_vsc_pg_is_enabled,
276
277	.intr_clear = mei_vsc_intr_clear,
278	.intr_enable = mei_vsc_intr_enable,
279	.intr_disable = mei_vsc_intr_disable,
280	.synchronize_irq = mei_vsc_synchronize_irq,
281
282	.hbuf_free_slots = mei_vsc_hbuf_empty_slots,
283	.hbuf_is_ready = mei_vsc_hbuf_is_ready,
284	.hbuf_depth = mei_vsc_hbuf_depth,
285	.write = mei_vsc_write,
286
287	.rdbuf_full_slots = mei_vsc_count_full_read_slots,
288	.read_hdr = mei_vsc_read,
289	.read = mei_vsc_read_slots,
290};
291
292static void mei_vsc_event_cb(void *context)
293{
294	struct mei_device *mei_dev = context;
295	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
296	struct list_head cmpl_list;
297	s32 slots;
298	int ret;
299
300	if (mei_dev->dev_state == MEI_DEV_RESETTING ||
301	    mei_dev->dev_state == MEI_DEV_INITIALIZING)
302		return;
303
304	INIT_LIST_HEAD(&cmpl_list);
305
306	guard(mutex)(&mei_dev->device_lock);
307
308	while (vsc_tp_need_read(hw->tp)) {
309		/* check slots available for reading */
310		slots = mei_count_full_read_slots(mei_dev);
311
312		ret = mei_irq_read_handler(mei_dev, &cmpl_list, &slots);
313		if (ret) {
314			if (ret != -ENODATA) {
315				if (mei_dev->dev_state != MEI_DEV_RESETTING &&
316				    mei_dev->dev_state != MEI_DEV_POWER_DOWN)
317					schedule_work(&mei_dev->reset_work);
318			}
319
320			return;
321		}
322	}
323
324	mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
325	ret = mei_irq_write_handler(mei_dev, &cmpl_list);
326	if (ret)
327		dev_err(mei_dev->dev, "dispatch write request failed: %d\n", ret);
328
329	mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
330	mei_irq_compl_handler(mei_dev, &cmpl_list);
331}
332
333static int mei_vsc_probe(struct platform_device *pdev)
334{
335	struct device *dev = &pdev->dev;
336	struct mei_device *mei_dev;
337	struct mei_vsc_hw *hw;
338	struct vsc_tp *tp;
339	int ret;
340
341	tp = *(struct vsc_tp **)dev_get_platdata(dev);
342	if (!tp)
343		return dev_err_probe(dev, -ENODEV, "no platform data\n");
344
345	mei_dev = devm_kzalloc(dev, size_add(sizeof(*mei_dev), sizeof(*hw)),
346			       GFP_KERNEL);
347	if (!mei_dev)
348		return -ENOMEM;
349
350	mei_device_init(mei_dev, dev, false, &mei_vsc_hw_ops);
351	mei_dev->fw_f_fw_ver_supported = 0;
352	mei_dev->kind = "ivsc";
353
354	hw = mei_dev_to_vsc_hw(mei_dev);
355	atomic_set(&hw->write_lock_cnt, 0);
356	hw->tp = tp;
357
358	platform_set_drvdata(pdev, mei_dev);
359
360	vsc_tp_register_event_cb(tp, mei_vsc_event_cb, mei_dev);
361
362	ret = mei_start(mei_dev);
363	if (ret) {
364		dev_err_probe(dev, ret, "init hw failed\n");
365		goto err_cancel;
366	}
367
368	ret = mei_register(mei_dev, dev);
369	if (ret)
370		goto err_stop;
371
372	pm_runtime_enable(mei_dev->dev);
373
374	return 0;
375
376err_stop:
377	mei_stop(mei_dev);
378
379err_cancel:
380	mei_cancel_work(mei_dev);
381
382	mei_disable_interrupts(mei_dev);
383
384	return ret;
385}
386
387static void mei_vsc_remove(struct platform_device *pdev)
388{
389	struct mei_device *mei_dev = platform_get_drvdata(pdev);
390
391	pm_runtime_disable(mei_dev->dev);
392
393	mei_stop(mei_dev);
394
395	mei_disable_interrupts(mei_dev);
396
397	mei_deregister(mei_dev);
398}
399
400static int mei_vsc_suspend(struct device *dev)
401{
402	struct mei_device *mei_dev = dev_get_drvdata(dev);
403	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
404
405	mei_stop(mei_dev);
406
407	mei_disable_interrupts(mei_dev);
408
409	vsc_tp_free_irq(hw->tp);
410
411	return 0;
412}
413
414static int mei_vsc_resume(struct device *dev)
415{
416	struct mei_device *mei_dev = dev_get_drvdata(dev);
417	struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
418	int ret;
419
420	ret = vsc_tp_request_irq(hw->tp);
421	if (ret)
422		return ret;
423
424	ret = mei_restart(mei_dev);
425	if (ret)
426		goto err_free;
427
428	/* start timer if stopped in suspend */
429	schedule_delayed_work(&mei_dev->timer_work, HZ);
430
431	return 0;
432
433err_free:
434	vsc_tp_free_irq(hw->tp);
435
436	return ret;
437}
438
439static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
440
441static const struct platform_device_id mei_vsc_id_table[] = {
442	{ MEI_VSC_DRV_NAME },
443	{ /* sentinel */ }
444};
445MODULE_DEVICE_TABLE(platform, mei_vsc_id_table);
446
447static struct platform_driver mei_vsc_drv = {
448	.probe = mei_vsc_probe,
449	.remove_new = mei_vsc_remove,
450	.id_table = mei_vsc_id_table,
451	.driver = {
452		.name = MEI_VSC_DRV_NAME,
453		.pm = &mei_vsc_pm_ops,
454		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
455	},
456};
457module_platform_driver(mei_vsc_drv);
458
459MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
460MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
461MODULE_DESCRIPTION("Intel Visual Sensing Controller Interface");
462MODULE_LICENSE("GPL");
463MODULE_IMPORT_NS(VSC_TP);
464