1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services.h"
27
28#include "core_types.h"
29
30#include "reg_helper.h"
31#include "dcn10_dpp.h"
32#include "basics/conversion.h"
33#include "dcn10_cm_common.h"
34
35#define NUM_PHASES    64
36#define HORZ_MAX_TAPS 8
37#define VERT_MAX_TAPS 8
38
39#define BLACK_OFFSET_RGB_Y 0x0
40#define BLACK_OFFSET_CBCR  0x8000
41
42#define REG(reg)\
43	dpp->tf_regs->reg
44
45#define CTX \
46	dpp->base.ctx
47
48#undef FN
49#define FN(reg_name, field_name) \
50	dpp->tf_shift->field_name, dpp->tf_mask->field_name
51
52#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
53
54
55enum dcn10_coef_filter_type_sel {
56	SCL_COEF_LUMA_VERT_FILTER = 0,
57	SCL_COEF_LUMA_HORZ_FILTER = 1,
58	SCL_COEF_CHROMA_VERT_FILTER = 2,
59	SCL_COEF_CHROMA_HORZ_FILTER = 3,
60	SCL_COEF_ALPHA_VERT_FILTER = 4,
61	SCL_COEF_ALPHA_HORZ_FILTER = 5
62};
63
64enum dscl_autocal_mode {
65	AUTOCAL_MODE_OFF = 0,
66
67	/* Autocal calculate the scaling ratio and initial phase and the
68	 * DSCL_MODE_SEL must be set to 1
69	 */
70	AUTOCAL_MODE_AUTOSCALE = 1,
71	/* Autocal perform auto centering without replication and the
72	 * DSCL_MODE_SEL must be set to 0
73	 */
74	AUTOCAL_MODE_AUTOCENTER = 2,
75	/* Autocal perform auto centering and auto replication and the
76	 * DSCL_MODE_SEL must be set to 0
77	 */
78	AUTOCAL_MODE_AUTOREPLICATE = 3
79};
80
81enum dscl_mode_sel {
82	DSCL_MODE_SCALING_444_BYPASS = 0,
83	DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
84	DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
85	DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
86	DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
87	DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
88	DSCL_MODE_DSCL_BYPASS = 6
89};
90
91static void program_gamut_remap(
92		struct dcn10_dpp *dpp,
93		const uint16_t *regval,
94		enum gamut_remap_select select)
95{
96	uint16_t selection = 0;
97	struct color_matrices_reg gam_regs;
98
99	if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
100		REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
101			CM_GAMUT_REMAP_MODE, 0);
102		return;
103	}
104	switch (select) {
105	case GAMUT_REMAP_COEFF:
106		selection = 1;
107		break;
108	case GAMUT_REMAP_COMA_COEFF:
109		selection = 2;
110		break;
111	case GAMUT_REMAP_COMB_COEFF:
112		selection = 3;
113		break;
114	default:
115		break;
116	}
117
118	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
119	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
120	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
121	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
122
123
124	if (select == GAMUT_REMAP_COEFF) {
125		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
126		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
127
128		cm_helper_program_color_matrices(
129				dpp->base.ctx,
130				regval,
131				&gam_regs);
132
133	} else  if (select == GAMUT_REMAP_COMA_COEFF) {
134
135		gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
136		gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
137
138		cm_helper_program_color_matrices(
139				dpp->base.ctx,
140				regval,
141				&gam_regs);
142
143	} else {
144
145		gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
146		gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
147
148		cm_helper_program_color_matrices(
149				dpp->base.ctx,
150				regval,
151				&gam_regs);
152	}
153
154	REG_SET(
155			CM_GAMUT_REMAP_CONTROL, 0,
156			CM_GAMUT_REMAP_MODE, selection);
157
158}
159
160void dpp1_cm_set_gamut_remap(
161	struct dpp *dpp_base,
162	const struct dpp_grph_csc_adjustment *adjust)
163{
164	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
165	int i = 0;
166
167	if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
168		/* Bypass if type is bypass or hw */
169		program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
170	else {
171		struct fixed31_32 arr_matrix[12];
172		uint16_t arr_reg_val[12];
173
174		for (i = 0; i < 12; i++)
175			arr_matrix[i] = adjust->temperature_matrix[i];
176
177		convert_float_matrix(
178			arr_reg_val, arr_matrix, 12);
179
180		program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
181	}
182}
183
184static void read_gamut_remap(struct dcn10_dpp *dpp,
185			     uint16_t *regval,
186			     enum gamut_remap_select *select)
187{
188	struct color_matrices_reg gam_regs;
189	uint32_t selection;
190
191	REG_GET(CM_GAMUT_REMAP_CONTROL,
192					CM_GAMUT_REMAP_MODE, &selection);
193
194	*select = selection;
195
196	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
197	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_GAMUT_REMAP_C11;
198	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
199	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
200
201	if (*select == GAMUT_REMAP_COEFF) {
202
203		gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
204		gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
205
206		cm_helper_read_color_matrices(
207				dpp->base.ctx,
208				regval,
209				&gam_regs);
210
211	} else if (*select == GAMUT_REMAP_COMA_COEFF) {
212
213		gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
214		gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
215
216		cm_helper_read_color_matrices(
217				dpp->base.ctx,
218				regval,
219				&gam_regs);
220
221	} else if (*select == GAMUT_REMAP_COMB_COEFF) {
222
223		gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
224		gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
225
226		cm_helper_read_color_matrices(
227				dpp->base.ctx,
228				regval,
229				&gam_regs);
230	}
231}
232
233void dpp1_cm_get_gamut_remap(struct dpp *dpp_base,
234			     struct dpp_grph_csc_adjustment *adjust)
235{
236	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
237	uint16_t arr_reg_val[12];
238	enum gamut_remap_select select;
239
240	read_gamut_remap(dpp, arr_reg_val, &select);
241
242	if (select == GAMUT_REMAP_BYPASS) {
243		adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
244		return;
245	}
246
247	adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
248	convert_hw_matrix(adjust->temperature_matrix,
249			  arr_reg_val, ARRAY_SIZE(arr_reg_val));
250}
251
252static void dpp1_cm_program_color_matrix(
253		struct dcn10_dpp *dpp,
254		const uint16_t *regval)
255{
256	uint32_t ocsc_mode;
257	uint32_t cur_mode;
258	struct color_matrices_reg gam_regs;
259
260	if (regval == NULL) {
261		BREAK_TO_DEBUGGER();
262		return;
263	}
264
265	/* determine which CSC matrix (ocsc or comb) we are using
266	 * currently.  select the alternate set to double buffer
267	 * the CSC update so CSC is updated on frame boundary
268	 */
269	REG_SET(CM_TEST_DEBUG_INDEX, 0,
270			CM_TEST_DEBUG_INDEX, 9);
271
272	REG_GET(CM_TEST_DEBUG_DATA,
273			CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode);
274
275	if (cur_mode != 4)
276		ocsc_mode = 4;
277	else
278		ocsc_mode = 5;
279
280
281	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
282	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_OCSC_C11;
283	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
284	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
285
286	if (ocsc_mode == 4) {
287
288		gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
289		gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
290
291	} else {
292
293		gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
294		gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
295
296	}
297
298	cm_helper_program_color_matrices(
299			dpp->base.ctx,
300			regval,
301			&gam_regs);
302
303	REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
304
305}
306
307void dpp1_cm_set_output_csc_default(
308		struct dpp *dpp_base,
309		enum dc_color_space colorspace)
310{
311	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
312	const uint16_t *regval = NULL;
313	int arr_size;
314
315	regval = find_color_matrix(colorspace, &arr_size);
316	if (regval == NULL) {
317		BREAK_TO_DEBUGGER();
318		return;
319	}
320
321	dpp1_cm_program_color_matrix(dpp, regval);
322}
323
324static void dpp1_cm_get_reg_field(
325		struct dcn10_dpp *dpp,
326		struct xfer_func_reg *reg)
327{
328	reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
329	reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
330	reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
331	reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
332	reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
333	reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
334	reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
335	reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
336
337	reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B;
338	reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B;
339	reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
340	reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
341	reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
342	reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
343	reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
344	reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
345	reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B;
346	reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B;
347	reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
348	reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
349}
350
351static void dpp1_cm_get_degamma_reg_field(
352		struct dcn10_dpp *dpp,
353		struct xfer_func_reg *reg)
354{
355	reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
356	reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
357	reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
358	reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
359	reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
360	reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
361	reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
362	reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
363
364	reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B;
365	reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B;
366	reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
367	reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
368	reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
369	reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
370	reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
371	reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
372	reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B;
373	reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B;
374	reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
375	reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
376}
377void dpp1_cm_set_output_csc_adjustment(
378		struct dpp *dpp_base,
379		const uint16_t *regval)
380{
381	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
382
383	dpp1_cm_program_color_matrix(dpp, regval);
384}
385
386void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
387				  bool power_on)
388{
389	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
390
391	REG_SET(CM_MEM_PWR_CTRL, 0,
392		RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
393
394}
395
396void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
397				 const struct pwl_result_data *rgb,
398				 uint32_t num)
399{
400	uint32_t i;
401	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
402
403	REG_SEQ_START();
404
405	for (i = 0 ; i < num; i++) {
406		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
407		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
408		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
409
410		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
411		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
412		REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
413
414	}
415
416}
417
418void dpp1_cm_configure_regamma_lut(
419		struct dpp *dpp_base,
420		bool is_ram_a)
421{
422	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
423
424	REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
425			CM_RGAM_LUT_WRITE_EN_MASK, 7);
426	REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
427			CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
428	REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0);
429}
430
431/*program re gamma RAM A*/
432void dpp1_cm_program_regamma_luta_settings(
433		struct dpp *dpp_base,
434		const struct pwl_params *params)
435{
436	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
437	struct xfer_func_reg gam_regs;
438
439	dpp1_cm_get_reg_field(dpp, &gam_regs);
440
441	gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B);
442	gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G);
443	gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R);
444	gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B);
445	gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G);
446	gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R);
447	gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B);
448	gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B);
449	gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G);
450	gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G);
451	gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R);
452	gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R);
453	gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1);
454	gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33);
455
456	cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
457
458}
459
460/*program re gamma RAM B*/
461void dpp1_cm_program_regamma_lutb_settings(
462		struct dpp *dpp_base,
463		const struct pwl_params *params)
464{
465	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
466	struct xfer_func_reg gam_regs;
467
468	dpp1_cm_get_reg_field(dpp, &gam_regs);
469
470	gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B);
471	gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G);
472	gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R);
473	gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B);
474	gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G);
475	gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R);
476	gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B);
477	gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B);
478	gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G);
479	gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G);
480	gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R);
481	gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R);
482	gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1);
483	gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33);
484
485	cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
486}
487
488void dpp1_program_input_csc(
489		struct dpp *dpp_base,
490		enum dc_color_space color_space,
491		enum dcn10_input_csc_select input_select,
492		const struct out_csc_color_matrix *tbl_entry)
493{
494	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
495	int i;
496	int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
497	const uint16_t *regval = NULL;
498	uint32_t cur_select = 0;
499	enum dcn10_input_csc_select select;
500	struct color_matrices_reg gam_regs;
501
502	if (input_select == INPUT_CSC_SELECT_BYPASS) {
503		REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
504		return;
505	}
506
507	if (tbl_entry == NULL) {
508		for (i = 0; i < arr_size; i++)
509			if (dpp_input_csc_matrix[i].color_space == color_space) {
510				regval = dpp_input_csc_matrix[i].regval;
511				break;
512			}
513
514		if (regval == NULL) {
515			BREAK_TO_DEBUGGER();
516			return;
517		}
518	} else {
519		regval = tbl_entry->regval;
520	}
521
522	/* determine which CSC matrix (icsc or coma) we are using
523	 * currently.  select the alternate set to double buffer
524	 * the CSC update so CSC is updated on frame boundary
525	 */
526	REG_SET(CM_TEST_DEBUG_INDEX, 0,
527			CM_TEST_DEBUG_INDEX, 9);
528
529	REG_GET(CM_TEST_DEBUG_DATA,
530			CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select);
531
532	if (cur_select != INPUT_CSC_SELECT_ICSC)
533		select = INPUT_CSC_SELECT_ICSC;
534	else
535		select = INPUT_CSC_SELECT_COMA;
536
537	gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
538	gam_regs.masks.csc_c11  = dpp->tf_mask->CM_ICSC_C11;
539	gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
540	gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
541
542	if (select == INPUT_CSC_SELECT_ICSC) {
543
544		gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
545		gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
546
547	} else {
548
549		gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
550		gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
551
552	}
553
554	cm_helper_program_color_matrices(
555			dpp->base.ctx,
556			regval,
557			&gam_regs);
558
559	REG_SET(CM_ICSC_CONTROL, 0,
560				CM_ICSC_MODE, select);
561}
562
563//keep here for now, decide multi dce support later
564void dpp1_program_bias_and_scale(
565	struct dpp *dpp_base,
566	struct dc_bias_and_scale *params)
567{
568	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
569
570	REG_SET_2(CM_BNS_VALUES_R, 0,
571		CM_BNS_SCALE_R, params->scale_red,
572		CM_BNS_BIAS_R, params->bias_red);
573
574	REG_SET_2(CM_BNS_VALUES_G, 0,
575		CM_BNS_SCALE_G, params->scale_green,
576		CM_BNS_BIAS_G, params->bias_green);
577
578	REG_SET_2(CM_BNS_VALUES_B, 0,
579		CM_BNS_SCALE_B, params->scale_blue,
580		CM_BNS_BIAS_B, params->bias_blue);
581
582}
583
584/*program de gamma RAM B*/
585void dpp1_program_degamma_lutb_settings(
586		struct dpp *dpp_base,
587		const struct pwl_params *params)
588{
589	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
590	struct xfer_func_reg gam_regs;
591
592	dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
593
594	gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
595	gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
596	gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R);
597	gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B);
598	gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G);
599	gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R);
600	gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B);
601	gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B);
602	gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G);
603	gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G);
604	gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R);
605	gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R);
606	gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1);
607	gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15);
608
609
610	cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
611}
612
613/*program de gamma RAM A*/
614void dpp1_program_degamma_luta_settings(
615		struct dpp *dpp_base,
616		const struct pwl_params *params)
617{
618	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
619	struct xfer_func_reg gam_regs;
620
621	dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
622
623	gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
624	gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
625	gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R);
626	gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B);
627	gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G);
628	gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R);
629	gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B);
630	gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B);
631	gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G);
632	gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G);
633	gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R);
634	gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R);
635	gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1);
636	gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15);
637
638	cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
639}
640
641void dpp1_power_on_degamma_lut(
642		struct dpp *dpp_base,
643	bool power_on)
644{
645	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
646
647	REG_SET(CM_MEM_PWR_CTRL, 0,
648			SHARED_MEM_PWR_DIS, power_on ? 0:1);
649
650}
651
652static void dpp1_enable_cm_block(
653		struct dpp *dpp_base)
654{
655	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
656
657	REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8);
658	REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0);
659}
660
661void dpp1_set_degamma(
662		struct dpp *dpp_base,
663		enum ipp_degamma_mode mode)
664{
665	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
666	dpp1_enable_cm_block(dpp_base);
667
668	switch (mode) {
669	case IPP_DEGAMMA_MODE_BYPASS:
670		/* Setting de gamma bypass for now */
671		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
672		break;
673	case IPP_DEGAMMA_MODE_HW_sRGB:
674		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
675		break;
676	case IPP_DEGAMMA_MODE_HW_xvYCC:
677		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
678			break;
679	case IPP_DEGAMMA_MODE_USER_PWL:
680		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
681		break;
682	default:
683		BREAK_TO_DEBUGGER();
684		break;
685	}
686
687	REG_SEQ_SUBMIT();
688	REG_SEQ_WAIT_DONE();
689}
690
691void dpp1_degamma_ram_select(
692		struct dpp *dpp_base,
693							bool use_ram_a)
694{
695	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
696
697	if (use_ram_a)
698		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
699	else
700		REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4);
701
702}
703
704static bool dpp1_degamma_ram_inuse(
705		struct dpp *dpp_base,
706							bool *ram_a_inuse)
707{
708	bool ret = false;
709	uint32_t status_reg = 0;
710	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
711
712	REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
713			&status_reg);
714
715	if (status_reg == 9) {
716		*ram_a_inuse = true;
717		ret = true;
718	} else if (status_reg == 10) {
719		*ram_a_inuse = false;
720		ret = true;
721	}
722	return ret;
723}
724
725void dpp1_program_degamma_lut(
726		struct dpp *dpp_base,
727		const struct pwl_result_data *rgb,
728		uint32_t num,
729		bool is_ram_a)
730{
731	uint32_t i;
732
733	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
734	REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0);
735	REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
736				   CM_DGAM_LUT_WRITE_EN_MASK, 7);
737	REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
738					is_ram_a == true ? 0:1);
739
740	REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
741	for (i = 0 ; i < num; i++) {
742		REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
743		REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
744		REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
745
746		REG_SET(CM_DGAM_LUT_DATA, 0,
747				CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
748		REG_SET(CM_DGAM_LUT_DATA, 0,
749				CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
750		REG_SET(CM_DGAM_LUT_DATA, 0,
751				CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
752	}
753}
754
755void dpp1_set_degamma_pwl(struct dpp *dpp_base,
756								 const struct pwl_params *params)
757{
758	bool is_ram_a = true;
759
760	dpp1_power_on_degamma_lut(dpp_base, true);
761	dpp1_enable_cm_block(dpp_base);
762	dpp1_degamma_ram_inuse(dpp_base, &is_ram_a);
763	if (is_ram_a == true)
764		dpp1_program_degamma_lutb_settings(dpp_base, params);
765	else
766		dpp1_program_degamma_luta_settings(dpp_base, params);
767
768	dpp1_program_degamma_lut(dpp_base, params->rgb_resulted,
769							params->hw_points_num, !is_ram_a);
770	dpp1_degamma_ram_select(dpp_base, !is_ram_a);
771}
772
773void dpp1_full_bypass(struct dpp *dpp_base)
774{
775	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
776
777	/* Input pixel format: ARGB8888 */
778	REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
779			CNVC_SURFACE_PIXEL_FORMAT, 0x8);
780
781	/* Zero expansion */
782	REG_SET_3(FORMAT_CONTROL, 0,
783			CNVC_BYPASS, 0,
784			FORMAT_CONTROL__ALPHA_EN, 0,
785			FORMAT_EXPANSION_MODE, 0);
786
787	/* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
788	if (dpp->tf_mask->CM_BYPASS_EN)
789		REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
790	else
791		REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
792
793	/* Setting degamma bypass for now */
794	REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
795}
796
797static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base,
798							bool *ram_a_inuse)
799{
800	bool in_use = false;
801	uint32_t status_reg = 0;
802	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
803
804	REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
805				&status_reg);
806
807	// 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB
808	if (status_reg == 1 || status_reg == 3 || status_reg == 4) {
809		*ram_a_inuse = true;
810		in_use = true;
811	// 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB
812	} else if (status_reg == 2 || status_reg == 5 || status_reg == 6) {
813		*ram_a_inuse = false;
814		in_use = true;
815	}
816	return in_use;
817}
818
819/*
820 * Input gamma LUT currently supports 256 values only. This means input color
821 * can have a maximum of 8 bits per channel (= 256 possible values) in order to
822 * have a one-to-one mapping with the LUT. Truncation will occur with color
823 * values greater than 8 bits.
824 *
825 * In the future, this function should support additional input gamma methods,
826 * such as piecewise linear mapping, and input gamma bypass.
827 */
828void dpp1_program_input_lut(
829		struct dpp *dpp_base,
830		const struct dc_gamma *gamma)
831{
832	int i;
833	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
834	bool rama_occupied = false;
835	uint32_t ram_num;
836	// Power on LUT memory.
837	REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1);
838	dpp1_enable_cm_block(dpp_base);
839	// Determine whether to use RAM A or RAM B
840	dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied);
841	if (!rama_occupied)
842		REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0);
843	else
844		REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1);
845	// RW mode is 256-entry LUT
846	REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0);
847	// IGAM Input format should be 8 bits per channel.
848	REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0);
849	// Do not mask any R,G,B values
850	REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7);
851	// LUT-256, unsigned, integer, new u0.12 format
852	REG_UPDATE_3(
853		CM_IGAM_CONTROL,
854		CM_IGAM_LUT_FORMAT_R, 3,
855		CM_IGAM_LUT_FORMAT_G, 3,
856		CM_IGAM_LUT_FORMAT_B, 3);
857	// Start at index 0 of IGAM LUT
858	REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
859	for (i = 0; i < gamma->num_entries; i++) {
860		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
861				dc_fixpt_round(
862					gamma->entries.red[i]));
863		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
864				dc_fixpt_round(
865					gamma->entries.green[i]));
866		REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
867				dc_fixpt_round(
868					gamma->entries.blue[i]));
869	}
870	// Power off LUT memory
871	REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0);
872	// Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB
873	REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
874	REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
875}
876
877void dpp1_set_hdr_multiplier(
878		struct dpp *dpp_base,
879		uint32_t multiplier)
880{
881	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
882
883	REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
884}
885