1/*	$NetBSD: amdgpu_dcn10_dpp_cm.c,v 1.2 2021/12/18 23:45:03 riastradh Exp $	*/
2
3/*
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: AMD
25 *
26 */
27
28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: amdgpu_dcn10_dpp_cm.c,v 1.2 2021/12/18 23:45:03 riastradh Exp $");
30
31#include "dm_services.h"
32
33#include "core_types.h"
34
35#include "reg_helper.h"
36#include "dcn10_dpp.h"
37#include "basics/conversion.h"
38#include "dcn10_cm_common.h"
39
40#define NUM_PHASES    64
41#define HORZ_MAX_TAPS 8
42#define VERT_MAX_TAPS 8
43
44#define BLACK_OFFSET_RGB_Y 0x0
45#define BLACK_OFFSET_CBCR  0x8000
46
47#define REG(reg)\
48	dpp->tf_regs->reg
49
50#define CTX \
51	dpp->base.ctx
52
53#undef FN
54#define FN(reg_name, field_name) \
55	dpp->tf_shift->field_name, dpp->tf_mask->field_name
56
57#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
58
59
60enum dcn10_coef_filter_type_sel {
61	SCL_COEF_LUMA_VERT_FILTER = 0,
62	SCL_COEF_LUMA_HORZ_FILTER = 1,
63	SCL_COEF_CHROMA_VERT_FILTER = 2,
64	SCL_COEF_CHROMA_HORZ_FILTER = 3,
65	SCL_COEF_ALPHA_VERT_FILTER = 4,
66	SCL_COEF_ALPHA_HORZ_FILTER = 5
67};
68
69enum dscl_autocal_mode {
70	AUTOCAL_MODE_OFF = 0,
71
72	/* Autocal calculate the scaling ratio and initial phase and the
73	 * DSCL_MODE_SEL must be set to 1
74	 */
75	AUTOCAL_MODE_AUTOSCALE = 1,
76	/* Autocal perform auto centering without replication and the
77	 * DSCL_MODE_SEL must be set to 0
78	 */
79	AUTOCAL_MODE_AUTOCENTER = 2,
80	/* Autocal perform auto centering and auto replication and the
81	 * DSCL_MODE_SEL must be set to 0
82	 */
83	AUTOCAL_MODE_AUTOREPLICATE = 3
84};
85
86enum dscl_mode_sel {
87	DSCL_MODE_SCALING_444_BYPASS = 0,
88	DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
89	DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
90	DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
91	DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
92	DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
93	DSCL_MODE_DSCL_BYPASS = 6
94};
95
96static void program_gamut_remap(
97		struct dcn10_dpp *dpp,
98		const uint16_t *regval,
99		enum gamut_remap_select select)
100{
101	uint16_t selection = 0;
102	struct color_matrices_reg gam_regs;
103
104	if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
105		REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
106				CM_GAMUT_REMAP_MODE, 0);
107		return;
108	}
109	switch (select) {
110	case GAMUT_REMAP_COEFF:
111		selection = 1;
112		break;
113	case GAMUT_REMAP_COMA_COEFF:
114		selection = 2;
115		break;
116	case GAMUT_REMAP_COMB_COEFF:
117		selection = 3;
118		break;
119	default:
120		break;
121	}
122
123	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
124	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
125	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
126	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
127
128
129	if (select == GAMUT_REMAP_COEFF) {
130		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
131		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
132
133		cm_helper_program_color_matrices(
134				dpp->base.ctx,
135				regval,
136				&gam_regs);
137
138	} else  if (select == GAMUT_REMAP_COMA_COEFF) {
139
140		gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
141		gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
142
143		cm_helper_program_color_matrices(
144				dpp->base.ctx,
145				regval,
146				&gam_regs);
147
148	} else {
149
150		gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
151		gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
152
153		cm_helper_program_color_matrices(
154				dpp->base.ctx,
155				regval,
156				&gam_regs);
157	}
158
159	REG_SET(
160			CM_GAMUT_REMAP_CONTROL, 0,
161			CM_GAMUT_REMAP_MODE, selection);
162
163}
164
165void dpp1_cm_set_gamut_remap(
166	struct dpp *dpp_base,
167	const struct dpp_grph_csc_adjustment *adjust)
168{
169	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
170	int i = 0;
171
172	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
173		/* Bypass if type is bypass or hw */
174		program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
175	else {
176		struct fixed31_32 arr_matrix[12];
177		uint16_t arr_reg_val[12];
178
179		for (i = 0; i < 12; i++)
180			arr_matrix[i] = adjust->temperature_matrix[i];
181
182		convert_float_matrix(
183			arr_reg_val, arr_matrix, 12);
184
185		program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
186	}
187}
188
189static void dpp1_cm_program_color_matrix(
190		struct dcn10_dpp *dpp,
191		const uint16_t *regval)
192{
193	uint32_t ocsc_mode;
194	uint32_t cur_mode;
195	struct color_matrices_reg gam_regs;
196
197	if (regval == NULL) {
198		BREAK_TO_DEBUGGER();
199		return;
200	}
201
202	/* determine which CSC matrix (ocsc or comb) we are using
203	 * currently.  select the alternate set to double buffer
204	 * the CSC update so CSC is updated on frame boundary
205	 */
206	REG_SET(CM_TEST_DEBUG_INDEX, 0,
207			CM_TEST_DEBUG_INDEX, 9);
208
209	REG_GET(CM_TEST_DEBUG_DATA,
210			CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode);
211
212	if (cur_mode != 4)
213		ocsc_mode = 4;
214	else
215		ocsc_mode = 5;
216
217
218	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
219	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_OCSC_C11;
220	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
221	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
222
223	if (ocsc_mode == 4) {
224
225		gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
226		gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
227
228	} else {
229
230		gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
231		gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
232
233	}
234
235	cm_helper_program_color_matrices(
236			dpp->base.ctx,
237			regval,
238			&gam_regs);
239
240	REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
241
242}
243
244void dpp1_cm_set_output_csc_default(
245		struct dpp *dpp_base,
246		enum dc_color_space colorspace)
247{
248	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
249	const uint16_t *regval = NULL;
250	int arr_size;
251
252	regval = find_color_matrix(colorspace, &arr_size);
253	if (regval == NULL) {
254		BREAK_TO_DEBUGGER();
255		return;
256	}
257
258	dpp1_cm_program_color_matrix(dpp, regval);
259}
260
261static void dpp1_cm_get_reg_field(
262		struct dcn10_dpp *dpp,
263		struct xfer_func_reg *reg)
264{
265	reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
266	reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
267	reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
268	reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
269	reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
270	reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
271	reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
272	reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
273
274	reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B;
275	reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B;
276	reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
277	reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
278	reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
279	reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
280	reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
281	reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
282	reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B;
283	reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B;
284	reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
285	reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
286}
287
288static void dpp1_cm_get_degamma_reg_field(
289		struct dcn10_dpp *dpp,
290		struct xfer_func_reg *reg)
291{
292	reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
293	reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
294	reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
295	reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
296	reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
297	reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
298	reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
299	reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
300
301	reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B;
302	reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B;
303	reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
304	reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
305	reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
306	reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
307	reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
308	reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
309	reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B;
310	reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B;
311	reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
312	reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
313}
314void dpp1_cm_set_output_csc_adjustment(
315		struct dpp *dpp_base,
316		const uint16_t *regval)
317{
318	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
319
320	dpp1_cm_program_color_matrix(dpp, regval);
321}
322
323void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
324				  bool power_on)
325{
326	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
327
328	REG_SET(CM_MEM_PWR_CTRL, 0,
329		RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
330
331}
332
333void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
334				 const struct pwl_result_data *rgb,
335				 uint32_t num)
336{
337	uint32_t i;
338	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
339
340	REG_SEQ_START();
341
342	for (i = 0 ; i < num; i++) {
343		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
344		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
345		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
346
347		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
348		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
349		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
350
351	}
352
353}
354
355void dpp1_cm_configure_regamma_lut(
356		struct dpp *dpp_base,
357		bool is_ram_a)
358{
359	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
360
361	REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
362			CM_RGAM_LUT_WRITE_EN_MASK, 7);
363	REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
364			CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
365	REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0);
366}
367
368/*program re gamma RAM A*/
369void dpp1_cm_program_regamma_luta_settings(
370		struct dpp *dpp_base,
371		const struct pwl_params *params)
372{
373	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
374	struct xfer_func_reg gam_regs;
375
376	dpp1_cm_get_reg_field(dpp, &gam_regs);
377
378	gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B);
379	gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G);
380	gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R);
381	gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B);
382	gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G);
383	gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R);
384	gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B);
385	gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B);
386	gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G);
387	gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G);
388	gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R);
389	gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R);
390	gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1);
391	gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33);
392
393	cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
394
395}
396
397/*program re gamma RAM B*/
398void dpp1_cm_program_regamma_lutb_settings(
399		struct dpp *dpp_base,
400		const struct pwl_params *params)
401{
402	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
403	struct xfer_func_reg gam_regs;
404
405	dpp1_cm_get_reg_field(dpp, &gam_regs);
406
407	gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B);
408	gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G);
409	gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R);
410	gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B);
411	gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G);
412	gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R);
413	gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B);
414	gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B);
415	gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G);
416	gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G);
417	gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R);
418	gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R);
419	gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1);
420	gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33);
421
422	cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
423}
424
425void dpp1_program_input_csc(
426		struct dpp *dpp_base,
427		enum dc_color_space color_space,
428		enum dcn10_input_csc_select input_select,
429		const struct out_csc_color_matrix *tbl_entry)
430{
431	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
432	int i;
433	int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
434	const uint16_t *regval = NULL;
435	uint32_t cur_select = 0;
436	enum dcn10_input_csc_select select;
437	struct color_matrices_reg gam_regs;
438
439	if (input_select == INPUT_CSC_SELECT_BYPASS) {
440		REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
441		return;
442	}
443
444	if (tbl_entry == NULL) {
445		for (i = 0; i < arr_size; i++)
446			if (dpp_input_csc_matrix[i].color_space == color_space) {
447				regval = dpp_input_csc_matrix[i].regval;
448				break;
449			}
450
451		if (regval == NULL) {
452			BREAK_TO_DEBUGGER();
453			return;
454		}
455	} else {
456		regval = tbl_entry->regval;
457	}
458
459	/* determine which CSC matrix (icsc or coma) we are using
460	 * currently.  select the alternate set to double buffer
461	 * the CSC update so CSC is updated on frame boundary
462	 */
463	REG_SET(CM_TEST_DEBUG_INDEX, 0,
464			CM_TEST_DEBUG_INDEX, 9);
465
466	REG_GET(CM_TEST_DEBUG_DATA,
467			CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select);
468
469	if (cur_select != INPUT_CSC_SELECT_ICSC)
470		select = INPUT_CSC_SELECT_ICSC;
471	else
472		select = INPUT_CSC_SELECT_COMA;
473
474	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
475	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_ICSC_C11;
476	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
477	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
478
479	if (select == INPUT_CSC_SELECT_ICSC) {
480
481		gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
482		gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
483
484	} else {
485
486		gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
487		gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
488
489	}
490
491	cm_helper_program_color_matrices(
492			dpp->base.ctx,
493			regval,
494			&gam_regs);
495
496	REG_SET(CM_ICSC_CONTROL, 0,
497				CM_ICSC_MODE, select);
498}
499
500//keep here for now, decide multi dce support later
501void dpp1_program_bias_and_scale(
502	struct dpp *dpp_base,
503	struct dc_bias_and_scale *params)
504{
505	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
506
507	REG_SET_2(CM_BNS_VALUES_R, 0,
508		CM_BNS_SCALE_R, params->scale_red,
509		CM_BNS_BIAS_R, params->bias_red);
510
511	REG_SET_2(CM_BNS_VALUES_G, 0,
512		CM_BNS_SCALE_G, params->scale_green,
513		CM_BNS_BIAS_G, params->bias_green);
514
515	REG_SET_2(CM_BNS_VALUES_B, 0,
516		CM_BNS_SCALE_B, params->scale_blue,
517		CM_BNS_BIAS_B, params->bias_blue);
518
519}
520
521/*program de gamma RAM B*/
522void dpp1_program_degamma_lutb_settings(
523		struct dpp *dpp_base,
524		const struct pwl_params *params)
525{
526	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
527	struct xfer_func_reg gam_regs;
528
529	dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
530
531	gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
532	gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
533	gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R);
534	gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B);
535	gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G);
536	gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R);
537	gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B);
538	gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B);
539	gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G);
540	gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G);
541	gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R);
542	gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R);
543	gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1);
544	gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15);
545
546
547	cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
548}
549
550/*program de gamma RAM A*/
551void dpp1_program_degamma_luta_settings(
552		struct dpp *dpp_base,
553		const struct pwl_params *params)
554{
555	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
556	struct xfer_func_reg gam_regs;
557
558	dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
559
560	gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
561	gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
562	gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R);
563	gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B);
564	gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G);
565	gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R);
566	gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B);
567	gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B);
568	gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G);
569	gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G);
570	gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R);
571	gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R);
572	gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1);
573	gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15);
574
575	cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
576}
577
578void dpp1_power_on_degamma_lut(
579		struct dpp *dpp_base,
580	bool power_on)
581{
582	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
583
584	REG_SET(CM_MEM_PWR_CTRL, 0,
585			SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
586
587}
588
589static void dpp1_enable_cm_block(
590		struct dpp *dpp_base)
591{
592	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
593
594	REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8);
595	REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0);
596}
597
598void dpp1_set_degamma(
599		struct dpp *dpp_base,
600		enum ipp_degamma_mode mode)
601{
602	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
603	dpp1_enable_cm_block(dpp_base);
604
605	switch (mode) {
606	case IPP_DEGAMMA_MODE_BYPASS:
607		/* Setting de gamma bypass for now */
608		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
609		break;
610	case IPP_DEGAMMA_MODE_HW_sRGB:
611		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
612		break;
613	case IPP_DEGAMMA_MODE_HW_xvYCC:
614		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
615			break;
616	case IPP_DEGAMMA_MODE_USER_PWL:
617		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
618		break;
619	default:
620		BREAK_TO_DEBUGGER();
621		break;
622	}
623
624	REG_SEQ_SUBMIT();
625	REG_SEQ_WAIT_DONE();
626}
627
628void dpp1_degamma_ram_select(
629		struct dpp *dpp_base,
630							bool use_ram_a)
631{
632	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
633
634	if (use_ram_a)
635		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
636	else
637		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4);
638
639}
640
641static bool dpp1_degamma_ram_inuse(
642		struct dpp *dpp_base,
643							bool *ram_a_inuse)
644{
645	bool ret = false;
646	uint32_t status_reg = 0;
647	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
648
649	REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
650			&status_reg);
651
652	if (status_reg == 9) {
653		*ram_a_inuse = true;
654		ret = true;
655	} else if (status_reg == 10) {
656		*ram_a_inuse = false;
657		ret = true;
658	}
659	return ret;
660}
661
662void dpp1_program_degamma_lut(
663		struct dpp *dpp_base,
664		const struct pwl_result_data *rgb,
665		uint32_t num,
666		bool is_ram_a)
667{
668	uint32_t i;
669
670	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
671	REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0);
672	REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
673				   CM_DGAM_LUT_WRITE_EN_MASK, 7);
674	REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
675					is_ram_a == true ? 0:1);
676
677	REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
678	for (i = 0 ; i < num; i++) {
679		REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
680		REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
681		REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
682
683		REG_SET(CM_DGAM_LUT_DATA, 0,
684				CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
685		REG_SET(CM_DGAM_LUT_DATA, 0,
686				CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
687		REG_SET(CM_DGAM_LUT_DATA, 0,
688				CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
689	}
690}
691
692void dpp1_set_degamma_pwl(struct dpp *dpp_base,
693								 const struct pwl_params *params)
694{
695	bool is_ram_a = true;
696
697	dpp1_power_on_degamma_lut(dpp_base, true);
698	dpp1_enable_cm_block(dpp_base);
699	dpp1_degamma_ram_inuse(dpp_base, &is_ram_a);
700	if (is_ram_a == true)
701		dpp1_program_degamma_lutb_settings(dpp_base, params);
702	else
703		dpp1_program_degamma_luta_settings(dpp_base, params);
704
705	dpp1_program_degamma_lut(dpp_base, params->rgb_resulted,
706							params->hw_points_num, !is_ram_a);
707	dpp1_degamma_ram_select(dpp_base, !is_ram_a);
708}
709
710void dpp1_full_bypass(struct dpp *dpp_base)
711{
712	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
713
714	/* Input pixel format: ARGB8888 */
715	REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
716			CNVC_SURFACE_PIXEL_FORMAT, 0x8);
717
718	/* Zero expansion */
719	REG_SET_3(FORMAT_CONTROL, 0,
720			CNVC_BYPASS, 0,
721			FORMAT_CONTROL__ALPHA_EN, 0,
722			FORMAT_EXPANSION_MODE, 0);
723
724	/* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
725	if (dpp->tf_mask->CM_BYPASS_EN)
726		REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
727	else
728		REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
729
730	/* Setting degamma bypass for now */
731	REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
732}
733
734static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base,
735							bool *ram_a_inuse)
736{
737	bool in_use = false;
738	uint32_t status_reg = 0;
739	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
740
741	REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
742				&status_reg);
743
744	// 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB
745	if (status_reg == 1 || status_reg == 3 || status_reg == 4) {
746		*ram_a_inuse = true;
747		in_use = true;
748	// 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB
749	} else if (status_reg == 2 || status_reg == 5 || status_reg == 6) {
750		*ram_a_inuse = false;
751		in_use = true;
752	}
753	return in_use;
754}
755
756/*
757 * Input gamma LUT currently supports 256 values only. This means input color
758 * can have a maximum of 8 bits per channel (= 256 possible values) in order to
759 * have a one-to-one mapping with the LUT. Truncation will occur with color
760 * values greater than 8 bits.
761 *
762 * In the future, this function should support additional input gamma methods,
763 * such as piecewise linear mapping, and input gamma bypass.
764 */
765void dpp1_program_input_lut(
766		struct dpp *dpp_base,
767		const struct dc_gamma *gamma)
768{
769	int i;
770	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
771	bool rama_occupied = false;
772	uint32_t ram_num;
773	// Power on LUT memory.
774	REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1);
775	dpp1_enable_cm_block(dpp_base);
776	// Determine whether to use RAM A or RAM B
777	dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied);
778	if (!rama_occupied)
779		REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0);
780	else
781		REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1);
782	// RW mode is 256-entry LUT
783	REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0);
784	// IGAM Input format should be 8 bits per channel.
785	REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0);
786	// Do not mask any R,G,B values
787	REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7);
788	// LUT-256, unsigned, integer, new u0.12 format
789	REG_UPDATE_3(
790		CM_IGAM_CONTROL,
791		CM_IGAM_LUT_FORMAT_R, 3,
792		CM_IGAM_LUT_FORMAT_G, 3,
793		CM_IGAM_LUT_FORMAT_B, 3);
794	// Start at index 0 of IGAM LUT
795	REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
796	for (i = 0; i < gamma->num_entries; i++) {
797		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
798				dc_fixpt_round(
799					gamma->entries.red[i]));
800		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
801				dc_fixpt_round(
802					gamma->entries.green[i]));
803		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
804				dc_fixpt_round(
805					gamma->entries.blue[i]));
806	}
807	// Power off LUT memory
808	REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0);
809	// Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB
810	REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
811	REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
812}
813
814void dpp1_set_hdr_multiplier(
815		struct dpp *dpp_base,
816		uint32_t multiplier)
817{
818	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
819
820	REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
821}
822