1166818Scognet// SPDX-License-Identifier: GPL-2.0
2135675Scognet/*
3135675Scognet * camss-vfe-gen1.c
4135675Scognet *
5135675Scognet * Qualcomm MSM Camera Subsystem - VFE Common functionality for Gen 1 versions of hw (4.1, 4.7..)
6135675Scognet *
7135675Scognet * Copyright (C) 2020 Linaro Ltd.
8135675Scognet */
9135675Scognet
10135675Scognet#include "camss.h"
11135675Scognet#include "camss-vfe.h"
12135675Scognet#include "camss-vfe-gen1.h"
13135675Scognet
14236988Simp/* Max number of frame drop updates per frame */
15236988Simp#define VFE_FRAME_DROP_UPDATES 2
16135675Scognet#define VFE_NEXT_SOF_MS 500
17135675Scognet
18135675Scognetint vfe_gen1_halt(struct vfe_device *vfe)
19135675Scognet{
20135675Scognet	unsigned long time;
21135675Scognet
22135675Scognet	reinit_completion(&vfe->halt_complete);
23191954Skuriyama
24191954Skuriyama	vfe->ops_gen1->halt_request(vfe);
25191954Skuriyama
26191954Skuriyama	time = wait_for_completion_timeout(&vfe->halt_complete,
27135675Scognet					   msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
28135675Scognet	if (!time) {
29135675Scognet		dev_err(vfe->camss->dev, "VFE halt timeout\n");
30186525Sbz		return -EIO;
31135675Scognet	}
32146595Scognet
33135675Scognet	return 0;
34191954Skuriyama}
35191954Skuriyama
36135675Scognetstatic int vfe_disable_output(struct vfe_line *line)
37135675Scognet{
38135675Scognet	struct vfe_device *vfe = to_vfe(line);
39135675Scognet	struct vfe_output *output = &line->output;
40135675Scognet	const struct vfe_hw_ops *ops = vfe->ops;
41135675Scognet	unsigned long flags;
42135675Scognet	unsigned long time;
43135675Scognet	unsigned int i;
44224699Srmacklem
45224699Srmacklem	spin_lock_irqsave(&vfe->output_lock, flags);
46177662Sdfr
47224699Srmacklem	output->gen1.wait_sof = 1;
48135675Scognet	spin_unlock_irqrestore(&vfe->output_lock, flags);
49135675Scognet
50135675Scognet	time = wait_for_completion_timeout(&output->sof, msecs_to_jiffies(VFE_NEXT_SOF_MS));
51135675Scognet	if (!time)
52137137Sandre		dev_err(vfe->camss->dev, "VFE sof timeout\n");
53135675Scognet
54135675Scognet	spin_lock_irqsave(&vfe->output_lock, flags);
55135675Scognet	for (i = 0; i < output->wm_num; i++)
56135675Scognet		vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 0);
57135675Scognet
58135675Scognet	ops->reg_update(vfe, line->id);
59191954Skuriyama	output->wait_reg_update = 1;
60191954Skuriyama	spin_unlock_irqrestore(&vfe->output_lock, flags);
61191954Skuriyama
62191954Skuriyama	time = wait_for_completion_timeout(&output->reg_update, msecs_to_jiffies(VFE_NEXT_SOF_MS));
63191954Skuriyama	if (!time)
64191954Skuriyama		dev_err(vfe->camss->dev, "VFE reg update timeout\n");
65135675Scognet
66135675Scognet	spin_lock_irqsave(&vfe->output_lock, flags);
67135675Scognet
68135675Scognet	if (line->id != VFE_LINE_PIX) {
69135675Scognet		vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 0);
70135675Scognet		vfe->ops_gen1->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
71135675Scognet		vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
72135675Scognet		vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 0);
73147168Scognet		spin_unlock_irqrestore(&vfe->output_lock, flags);
74135675Scognet	} else {
75221071Smav		for (i = 0; i < output->wm_num; i++) {
76147168Scognet			vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
77220982Smav			vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 0);
78220982Smav		}
79220982Smav
80220982Smav		vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 0);
81220982Smav		vfe->ops_gen1->set_module_cfg(vfe, 0);
82147168Scognet		vfe->ops_gen1->set_realign_cfg(vfe, line, 0);
83147168Scognet		vfe->ops_gen1->set_xbar_cfg(vfe, output, 0);
84140304Scognet		vfe->ops_gen1->set_camif_cmd(vfe, 0);
85150873Scognet
86150873Scognet		spin_unlock_irqrestore(&vfe->output_lock, flags);
87140309Scognet
88135675Scognet		vfe->ops_gen1->camif_wait_for_stop(vfe, vfe->camss->dev);
89135675Scognet	}
90135675Scognet
91135675Scognet	return 0;
92135675Scognet}
93135675Scognet
94191954Skuriyama/*
95135675Scognet * vfe_gen1_disable - Disable streaming on VFE line
96203938Sattilio * @line: VFE line
97135675Scognet *
98135675Scognet * Return 0 on success or a negative error code otherwise
99135675Scognet */
100135675Scognetint vfe_gen1_disable(struct vfe_line *line)
101191954Skuriyama{
102135675Scognet	struct vfe_device *vfe = to_vfe(line);
103135675Scognet
104135675Scognet	vfe_disable_output(line);
105135675Scognet
106191954Skuriyama	vfe_put_output(line);
107191954Skuriyama
108191954Skuriyama	mutex_lock(&vfe->stream_lock);
109135675Scognet
110191954Skuriyama	if (vfe->stream_count == 1)
111135675Scognet		vfe->ops_gen1->bus_enable_wr_if(vfe, 0);
112135675Scognet
113	vfe->stream_count--;
114
115	mutex_unlock(&vfe->stream_lock);
116
117	return 0;
118}
119
120static void vfe_output_init_addrs(struct vfe_device *vfe,
121				  struct vfe_output *output, u8 sync,
122				  struct vfe_line *line)
123{
124	u32 ping_addr;
125	u32 pong_addr;
126	unsigned int i;
127
128	output->gen1.active_buf = 0;
129
130	for (i = 0; i < output->wm_num; i++) {
131		if (output->buf[0])
132			ping_addr = output->buf[0]->addr[i];
133		else
134			ping_addr = 0;
135
136		if (output->buf[1])
137			pong_addr = output->buf[1]->addr[i];
138		else
139			pong_addr = ping_addr;
140
141		vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
142		vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
143		if (sync)
144			vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
145	}
146}
147
148static void vfe_output_frame_drop(struct vfe_device *vfe,
149				  struct vfe_output *output,
150				  u32 drop_pattern)
151{
152	u8 drop_period;
153	unsigned int i;
154
155	/* We need to toggle update period to be valid on next frame */
156	output->drop_update_idx++;
157	output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
158	drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
159
160	for (i = 0; i < output->wm_num; i++) {
161		vfe->ops_gen1->wm_set_framedrop_period(vfe, output->wm_idx[i], drop_period);
162		vfe->ops_gen1->wm_set_framedrop_pattern(vfe, output->wm_idx[i], drop_pattern);
163	}
164
165	vfe->ops->reg_update(vfe, container_of(output, struct vfe_line, output)->id);
166}
167
168static int vfe_enable_output(struct vfe_line *line)
169{
170	struct vfe_device *vfe = to_vfe(line);
171	struct vfe_output *output = &line->output;
172	const struct vfe_hw_ops *ops = vfe->ops;
173	struct media_entity *sensor;
174	unsigned long flags;
175	unsigned int frame_skip = 0;
176	unsigned int i;
177	u16 ub_size;
178
179	ub_size = vfe->ops_gen1->get_ub_size(vfe->id);
180	if (!ub_size)
181		return -EINVAL;
182
183	sensor = camss_find_sensor(&line->subdev.entity);
184	if (sensor) {
185		struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
186
187		v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
188		/* Max frame skip is 29 frames */
189		if (frame_skip > VFE_FRAME_DROP_VAL - 1)
190			frame_skip = VFE_FRAME_DROP_VAL - 1;
191	}
192
193	spin_lock_irqsave(&vfe->output_lock, flags);
194
195	ops->reg_update_clear(vfe, line->id);
196
197	if (output->state > VFE_OUTPUT_RESERVED) {
198		dev_err(vfe->camss->dev, "Output is not in reserved state %d\n", output->state);
199		spin_unlock_irqrestore(&vfe->output_lock, flags);
200		return -EINVAL;
201	}
202	output->state = VFE_OUTPUT_IDLE;
203
204	output->buf[0] = vfe_buf_get_pending(output);
205	output->buf[1] = vfe_buf_get_pending(output);
206
207	if (!output->buf[0] && output->buf[1]) {
208		output->buf[0] = output->buf[1];
209		output->buf[1] = NULL;
210	}
211
212	if (output->buf[0])
213		output->state = VFE_OUTPUT_SINGLE;
214
215	if (output->buf[1])
216		output->state = VFE_OUTPUT_CONTINUOUS;
217
218	switch (output->state) {
219	case VFE_OUTPUT_SINGLE:
220		vfe_output_frame_drop(vfe, output, 1 << frame_skip);
221		break;
222	case VFE_OUTPUT_CONTINUOUS:
223		vfe_output_frame_drop(vfe, output, 3 << frame_skip);
224		break;
225	default:
226		vfe_output_frame_drop(vfe, output, 0);
227		break;
228	}
229
230	output->sequence = 0;
231	output->gen1.wait_sof = 0;
232	output->wait_reg_update = 0;
233	reinit_completion(&output->sof);
234	reinit_completion(&output->reg_update);
235
236	vfe_output_init_addrs(vfe, output, 0, line);
237
238	if (line->id != VFE_LINE_PIX) {
239		vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 1);
240		vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
241		vfe->ops_gen1->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
242		vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[0]);
243		vfe->ops_gen1->set_rdi_cid(vfe, line->id, 0);
244		vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[0],
245					    (ub_size + 1) * output->wm_idx[0], ub_size);
246		vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 1);
247		vfe->ops_gen1->wm_enable(vfe, output->wm_idx[0], 1);
248		vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[0]);
249	} else {
250		ub_size /= output->wm_num;
251		for (i = 0; i < output->wm_num; i++) {
252			vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 1);
253			vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[i]);
254			vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[i],
255						     (ub_size + 1) * output->wm_idx[i], ub_size);
256			vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i],
257						     &line->video_out.active_fmt.fmt.pix_mp, i, 1);
258			vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 1);
259			vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
260		}
261		vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 1);
262		vfe->ops_gen1->set_module_cfg(vfe, 1);
263		vfe->ops_gen1->set_camif_cfg(vfe, line);
264		vfe->ops_gen1->set_realign_cfg(vfe, line, 1);
265		vfe->ops_gen1->set_xbar_cfg(vfe, output, 1);
266		vfe->ops_gen1->set_demux_cfg(vfe, line);
267		vfe->ops_gen1->set_scale_cfg(vfe, line);
268		vfe->ops_gen1->set_crop_cfg(vfe, line);
269		vfe->ops_gen1->set_clamp_cfg(vfe);
270		vfe->ops_gen1->set_camif_cmd(vfe, 1);
271	}
272
273	ops->reg_update(vfe, line->id);
274
275	spin_unlock_irqrestore(&vfe->output_lock, flags);
276
277	return 0;
278}
279
280static int vfe_get_output(struct vfe_line *line)
281{
282	struct vfe_device *vfe = to_vfe(line);
283	struct vfe_output *output;
284	struct v4l2_format *f = &line->video_out.active_fmt;
285	unsigned long flags;
286	int i;
287	int wm_idx;
288
289	spin_lock_irqsave(&vfe->output_lock, flags);
290
291	output = &line->output;
292	if (output->state > VFE_OUTPUT_RESERVED) {
293		dev_err(vfe->camss->dev, "Output is running\n");
294		goto error;
295	}
296	output->state = VFE_OUTPUT_RESERVED;
297
298	output->gen1.active_buf = 0;
299
300	switch (f->fmt.pix_mp.pixelformat) {
301	case V4L2_PIX_FMT_NV12:
302	case V4L2_PIX_FMT_NV21:
303	case V4L2_PIX_FMT_NV16:
304	case V4L2_PIX_FMT_NV61:
305		output->wm_num = 2;
306		break;
307	default:
308		output->wm_num = 1;
309		break;
310	}
311
312	for (i = 0; i < output->wm_num; i++) {
313		wm_idx = vfe_reserve_wm(vfe, line->id);
314		if (wm_idx < 0) {
315			dev_err(vfe->camss->dev, "Can not reserve wm\n");
316			goto error_get_wm;
317		}
318		output->wm_idx[i] = wm_idx;
319	}
320
321	output->drop_update_idx = 0;
322
323	spin_unlock_irqrestore(&vfe->output_lock, flags);
324
325	return 0;
326
327error_get_wm:
328	for (i--; i >= 0; i--)
329		vfe_release_wm(vfe, output->wm_idx[i]);
330	output->state = VFE_OUTPUT_OFF;
331error:
332	spin_unlock_irqrestore(&vfe->output_lock, flags);
333
334	return -EINVAL;
335}
336
337int vfe_gen1_enable(struct vfe_line *line)
338{
339	struct vfe_device *vfe = to_vfe(line);
340	int ret;
341
342	mutex_lock(&vfe->stream_lock);
343
344	if (!vfe->stream_count) {
345		vfe->ops_gen1->enable_irq_common(vfe);
346		vfe->ops_gen1->bus_enable_wr_if(vfe, 1);
347		vfe->ops_gen1->set_qos(vfe);
348		vfe->ops_gen1->set_ds(vfe);
349	}
350
351	vfe->stream_count++;
352
353	mutex_unlock(&vfe->stream_lock);
354
355	ret = vfe_get_output(line);
356	if (ret < 0)
357		goto error_get_output;
358
359	ret = vfe_enable_output(line);
360	if (ret < 0)
361		goto error_enable_output;
362
363	vfe->was_streaming = 1;
364
365	return 0;
366
367error_enable_output:
368	vfe_put_output(line);
369
370error_get_output:
371	mutex_lock(&vfe->stream_lock);
372
373	if (vfe->stream_count == 1)
374		vfe->ops_gen1->bus_enable_wr_if(vfe, 0);
375
376	vfe->stream_count--;
377
378	mutex_unlock(&vfe->stream_lock);
379
380	return ret;
381}
382
383static void vfe_output_update_ping_addr(struct vfe_device *vfe,
384					struct vfe_output *output, u8 sync,
385					struct vfe_line *line)
386{
387	u32 addr;
388	unsigned int i;
389
390	for (i = 0; i < output->wm_num; i++) {
391		if (output->buf[0])
392			addr = output->buf[0]->addr[i];
393		else
394			addr = 0;
395
396		vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], addr);
397		if (sync)
398			vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
399	}
400}
401
402static void vfe_output_update_pong_addr(struct vfe_device *vfe,
403					struct vfe_output *output, u8 sync,
404					struct vfe_line *line)
405{
406	u32 addr;
407	unsigned int i;
408
409	for (i = 0; i < output->wm_num; i++) {
410		if (output->buf[1])
411			addr = output->buf[1]->addr[i];
412		else
413			addr = 0;
414
415		vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], addr);
416		if (sync)
417			vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
418	}
419}
420
421static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
422				      struct vfe_output *output)
423{
424	switch (output->state) {
425	case VFE_OUTPUT_CONTINUOUS:
426		vfe_output_frame_drop(vfe, output, 3);
427		break;
428	case VFE_OUTPUT_SINGLE:
429	default:
430		dev_err_ratelimited(vfe->camss->dev,
431				    "Next buf in wrong state! %d\n",
432				    output->state);
433		break;
434	}
435}
436
437static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
438				      struct vfe_output *output)
439{
440	switch (output->state) {
441	case VFE_OUTPUT_CONTINUOUS:
442		output->state = VFE_OUTPUT_SINGLE;
443		vfe_output_frame_drop(vfe, output, 1);
444		break;
445	case VFE_OUTPUT_SINGLE:
446		output->state = VFE_OUTPUT_STOPPING;
447		vfe_output_frame_drop(vfe, output, 0);
448		break;
449	default:
450		dev_err_ratelimited(vfe->camss->dev,
451				    "Last buff in wrong state! %d\n",
452				    output->state);
453		break;
454	}
455}
456
457static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
458				     struct vfe_output *output,
459				     struct camss_buffer *new_buf,
460				     struct vfe_line *line)
461{
462	int inactive_idx;
463
464	switch (output->state) {
465	case VFE_OUTPUT_SINGLE:
466		inactive_idx = !output->gen1.active_buf;
467
468		if (!output->buf[inactive_idx]) {
469			output->buf[inactive_idx] = new_buf;
470
471			if (inactive_idx)
472				vfe_output_update_pong_addr(vfe, output, 0, line);
473			else
474				vfe_output_update_ping_addr(vfe, output, 0, line);
475
476			vfe_output_frame_drop(vfe, output, 3);
477			output->state = VFE_OUTPUT_CONTINUOUS;
478		} else {
479			vfe_buf_add_pending(output, new_buf);
480			dev_err_ratelimited(vfe->camss->dev,
481					    "Inactive buffer is busy\n");
482		}
483		break;
484
485	case VFE_OUTPUT_IDLE:
486		if (!output->buf[0]) {
487			output->buf[0] = new_buf;
488
489			vfe_output_init_addrs(vfe, output, 1, line);
490			vfe_output_frame_drop(vfe, output, 1);
491
492			output->state = VFE_OUTPUT_SINGLE;
493		} else {
494			vfe_buf_add_pending(output, new_buf);
495			dev_err_ratelimited(vfe->camss->dev,
496					    "Output idle with buffer set!\n");
497		}
498		break;
499
500	case VFE_OUTPUT_CONTINUOUS:
501	default:
502		vfe_buf_add_pending(output, new_buf);
503		break;
504	}
505}
506
507/*
508 * vfe_isr_halt_ack - Process halt ack
509 * @vfe: VFE Device
510 */
511static void vfe_isr_halt_ack(struct vfe_device *vfe)
512{
513	complete(&vfe->halt_complete);
514	vfe->ops_gen1->halt_clear(vfe);
515}
516
517/*
518 * vfe_isr_sof - Process start of frame interrupt
519 * @vfe: VFE Device
520 * @line_id: VFE line
521 */
522static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
523{
524	struct vfe_output *output;
525	unsigned long flags;
526
527	spin_lock_irqsave(&vfe->output_lock, flags);
528	output = &vfe->line[line_id].output;
529	if (output->gen1.wait_sof) {
530		output->gen1.wait_sof = 0;
531		complete(&output->sof);
532	}
533	spin_unlock_irqrestore(&vfe->output_lock, flags);
534}
535
536/*
537 * vfe_isr_reg_update - Process reg update interrupt
538 * @vfe: VFE Device
539 * @line_id: VFE line
540 */
541static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
542{
543	struct vfe_output *output;
544	struct vfe_line *line = &vfe->line[line_id];
545	unsigned long flags;
546
547	spin_lock_irqsave(&vfe->output_lock, flags);
548	vfe->ops->reg_update_clear(vfe, line_id);
549
550	output = &line->output;
551
552	if (output->wait_reg_update) {
553		output->wait_reg_update = 0;
554		complete(&output->reg_update);
555		spin_unlock_irqrestore(&vfe->output_lock, flags);
556		return;
557	}
558
559	if (output->state == VFE_OUTPUT_STOPPING) {
560		/* Release last buffer when hw is idle */
561		if (output->last_buffer) {
562			vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
563					VB2_BUF_STATE_DONE);
564			output->last_buffer = NULL;
565		}
566		output->state = VFE_OUTPUT_IDLE;
567
568		/* Buffers received in stopping state are queued in */
569		/* dma pending queue, start next capture here */
570
571		output->buf[0] = vfe_buf_get_pending(output);
572		output->buf[1] = vfe_buf_get_pending(output);
573
574		if (!output->buf[0] && output->buf[1]) {
575			output->buf[0] = output->buf[1];
576			output->buf[1] = NULL;
577		}
578
579		if (output->buf[0])
580			output->state = VFE_OUTPUT_SINGLE;
581
582		if (output->buf[1])
583			output->state = VFE_OUTPUT_CONTINUOUS;
584
585		switch (output->state) {
586		case VFE_OUTPUT_SINGLE:
587			vfe_output_frame_drop(vfe, output, 2);
588			break;
589		case VFE_OUTPUT_CONTINUOUS:
590			vfe_output_frame_drop(vfe, output, 3);
591			break;
592		default:
593			vfe_output_frame_drop(vfe, output, 0);
594			break;
595		}
596
597		vfe_output_init_addrs(vfe, output, 1, &vfe->line[line_id]);
598	}
599
600	spin_unlock_irqrestore(&vfe->output_lock, flags);
601}
602
603/*
604 * vfe_isr_wm_done - Process write master done interrupt
605 * @vfe: VFE Device
606 * @wm: Write master id
607 */
608static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
609{
610	struct camss_buffer *ready_buf;
611	struct vfe_output *output;
612	dma_addr_t *new_addr;
613	unsigned long flags;
614	u32 active_index;
615	u64 ts = ktime_get_ns();
616	unsigned int i;
617
618	active_index = vfe->ops_gen1->wm_get_ping_pong_status(vfe, wm);
619
620	spin_lock_irqsave(&vfe->output_lock, flags);
621
622	if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
623		dev_err_ratelimited(vfe->camss->dev,
624				    "Received wm done for unmapped index\n");
625		goto out_unlock;
626	}
627	output = &vfe->line[vfe->wm_output_map[wm]].output;
628
629	if (output->gen1.active_buf == active_index && 0) {
630		dev_err_ratelimited(vfe->camss->dev,
631				    "Active buffer mismatch!\n");
632		goto out_unlock;
633	}
634	output->gen1.active_buf = active_index;
635
636	ready_buf = output->buf[!active_index];
637	if (!ready_buf) {
638		dev_err_ratelimited(vfe->camss->dev,
639				    "Missing ready buf %d %d!\n",
640				    !active_index, output->state);
641		goto out_unlock;
642	}
643
644	ready_buf->vb.vb2_buf.timestamp = ts;
645	ready_buf->vb.sequence = output->sequence++;
646
647	/* Get next buffer */
648	output->buf[!active_index] = vfe_buf_get_pending(output);
649	if (!output->buf[!active_index]) {
650		/* No next buffer - set same address */
651		new_addr = ready_buf->addr;
652		vfe_buf_update_wm_on_last(vfe, output);
653	} else {
654		new_addr = output->buf[!active_index]->addr;
655		vfe_buf_update_wm_on_next(vfe, output);
656	}
657
658	if (active_index)
659		for (i = 0; i < output->wm_num; i++)
660			vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], new_addr[i]);
661	else
662		for (i = 0; i < output->wm_num; i++)
663			vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], new_addr[i]);
664
665	spin_unlock_irqrestore(&vfe->output_lock, flags);
666
667	if (output->state == VFE_OUTPUT_STOPPING)
668		output->last_buffer = ready_buf;
669	else
670		vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
671
672	return;
673
674out_unlock:
675	spin_unlock_irqrestore(&vfe->output_lock, flags);
676}
677
678/*
679 * vfe_queue_buffer - Add empty buffer
680 * @vid: Video device structure
681 * @buf: Buffer to be enqueued
682 *
683 * Add an empty buffer - depending on the current number of buffers it will be
684 * put in pending buffer queue or directly given to the hardware to be filled.
685 *
686 * Return 0 on success or a negative error code otherwise
687 */
688static int vfe_queue_buffer(struct camss_video *vid, struct camss_buffer *buf)
689{
690	struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
691	struct vfe_device *vfe = to_vfe(line);
692	struct vfe_output *output;
693	unsigned long flags;
694
695	output = &line->output;
696
697	spin_lock_irqsave(&vfe->output_lock, flags);
698
699	vfe_buf_update_wm_on_new(vfe, output, buf, line);
700
701	spin_unlock_irqrestore(&vfe->output_lock, flags);
702
703	return 0;
704}
705
706#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
707
708int vfe_word_per_line(u32 format, u32 width)
709{
710	int val = 0;
711
712	switch (format) {
713	case V4L2_PIX_FMT_NV12:
714	case V4L2_PIX_FMT_NV21:
715	case V4L2_PIX_FMT_NV16:
716	case V4L2_PIX_FMT_NV61:
717		val = CALC_WORD(width, 1, 8);
718		break;
719	case V4L2_PIX_FMT_YUYV:
720	case V4L2_PIX_FMT_YVYU:
721	case V4L2_PIX_FMT_UYVY:
722	case V4L2_PIX_FMT_VYUY:
723		val = CALC_WORD(width, 2, 8);
724		break;
725	}
726
727	return val;
728}
729
730const struct vfe_isr_ops vfe_isr_ops_gen1 = {
731	.reset_ack = vfe_isr_reset_ack,
732	.halt_ack = vfe_isr_halt_ack,
733	.reg_update = vfe_isr_reg_update,
734	.sof = vfe_isr_sof,
735	.comp_done = vfe_isr_comp_done,
736	.wm_done = vfe_isr_wm_done,
737};
738
739const struct camss_video_ops vfe_video_ops_gen1 = {
740	.queue_buffer = vfe_queue_buffer,
741	.flush_buffers = vfe_flush_buffers,
742};
743