1// SPDX-License-Identifier: MIT
2/*
3 * Copyright �� 2023 Intel Corporation
4 */
5
6#include <linux/bitops.h>
7
8#include "i915_drv.h"
9#include "i915_reg.h"
10#include "intel_atomic.h"
11#include "intel_bw.h"
12#include "intel_cdclk.h"
13#include "intel_de.h"
14#include "intel_display_trace.h"
15#include "intel_pmdemand.h"
16#include "skl_watermark.h"
17
18static struct intel_global_state *
19intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
20{
21	struct intel_pmdemand_state *pmdemand_state;
22
23	pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
24	if (!pmdemand_state)
25		return NULL;
26
27	return &pmdemand_state->base;
28}
29
30static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
31					 struct intel_global_state *state)
32{
33	kfree(state);
34}
35
36static const struct intel_global_state_funcs intel_pmdemand_funcs = {
37	.atomic_duplicate_state = intel_pmdemand_duplicate_state,
38	.atomic_destroy_state = intel_pmdemand_destroy_state,
39};
40
41static struct intel_pmdemand_state *
42intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
43{
44	struct drm_i915_private *i915 = to_i915(state->base.dev);
45	struct intel_global_state *pmdemand_state =
46		intel_atomic_get_global_obj_state(state,
47						  &i915->display.pmdemand.obj);
48
49	if (IS_ERR(pmdemand_state))
50		return ERR_CAST(pmdemand_state);
51
52	return to_intel_pmdemand_state(pmdemand_state);
53}
54
55static struct intel_pmdemand_state *
56intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
57{
58	struct drm_i915_private *i915 = to_i915(state->base.dev);
59	struct intel_global_state *pmdemand_state =
60		intel_atomic_get_old_global_obj_state(state,
61						      &i915->display.pmdemand.obj);
62
63	if (!pmdemand_state)
64		return NULL;
65
66	return to_intel_pmdemand_state(pmdemand_state);
67}
68
69static struct intel_pmdemand_state *
70intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
71{
72	struct drm_i915_private *i915 = to_i915(state->base.dev);
73	struct intel_global_state *pmdemand_state =
74		intel_atomic_get_new_global_obj_state(state,
75						      &i915->display.pmdemand.obj);
76
77	if (!pmdemand_state)
78		return NULL;
79
80	return to_intel_pmdemand_state(pmdemand_state);
81}
82
83int intel_pmdemand_init(struct drm_i915_private *i915)
84{
85	struct intel_pmdemand_state *pmdemand_state;
86
87	pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
88	if (!pmdemand_state)
89		return -ENOMEM;
90
91	intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj,
92				     &pmdemand_state->base,
93				     &intel_pmdemand_funcs);
94
95	if (IS_DISPLAY_IP_STEP(i915, IP_VER(14, 0), STEP_A0, STEP_C0))
96		/* Wa_14016740474 */
97		intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
98
99	return 0;
100}
101
102void intel_pmdemand_init_early(struct drm_i915_private *i915)
103{
104	mutex_init(&i915->display.pmdemand.lock);
105	init_waitqueue_head(&i915->display.pmdemand.waitqueue);
106}
107
108void
109intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
110				struct intel_encoder *encoder,
111				struct intel_pmdemand_state *pmdemand_state,
112				bool set_bit)
113{
114	enum phy phy;
115
116	if (DISPLAY_VER(i915) < 14)
117		return;
118
119	if (!encoder)
120		return;
121
122	phy = intel_port_to_phy(i915, encoder->port);
123	if (intel_phy_is_tc(i915, phy))
124		return;
125
126	if (set_bit)
127		pmdemand_state->active_combo_phys_mask |= BIT(phy);
128	else
129		pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
130}
131
132void
133intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
134				 struct intel_pmdemand_state *pmdemand_state,
135				 enum pipe pipe, int port_clock)
136{
137	if (DISPLAY_VER(i915) < 14)
138		return;
139
140	pmdemand_state->ddi_clocks[pipe] = port_clock;
141}
142
143static void
144intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
145				 struct intel_atomic_state *state,
146				 struct intel_pmdemand_state *pmdemand_state)
147{
148	int max_ddiclk = 0;
149	const struct intel_crtc_state *new_crtc_state;
150	struct intel_crtc *crtc;
151	int i;
152
153	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
154		intel_pmdemand_update_port_clock(i915, pmdemand_state,
155						 crtc->pipe,
156						 new_crtc_state->port_clock);
157
158	for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
159		max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
160
161	pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
162}
163
164static void
165intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
166				     struct intel_atomic_state *state,
167				     struct drm_connector_state *conn_state,
168				     bool set_bit,
169				     struct intel_pmdemand_state *pmdemand_state)
170{
171	struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
172	struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
173	struct intel_crtc_state *crtc_state;
174
175	if (!crtc)
176		return;
177
178	if (set_bit)
179		crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
180	else
181		crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
182
183	if (!crtc_state->hw.active)
184		return;
185
186	intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state,
187					set_bit);
188}
189
190static void
191intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
192					 struct intel_atomic_state *state,
193					 struct intel_pmdemand_state *pmdemand_state)
194{
195	struct drm_connector_state *old_conn_state;
196	struct drm_connector_state *new_conn_state;
197	struct drm_connector *connector;
198	int i;
199
200	for_each_oldnew_connector_in_state(&state->base, connector,
201					   old_conn_state, new_conn_state, i) {
202		if (!intel_connector_needs_modeset(state, connector))
203			continue;
204
205		/* First clear the active phys in the old connector state */
206		intel_pmdemand_update_connector_phys(i915, state,
207						     old_conn_state, false,
208						     pmdemand_state);
209
210		/* Then set the active phys in new connector state */
211		intel_pmdemand_update_connector_phys(i915, state,
212						     new_conn_state, true,
213						     pmdemand_state);
214	}
215
216	pmdemand_state->params.active_phys =
217		min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
218		      7);
219}
220
221static bool
222intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
223				  struct intel_encoder *encoder)
224{
225	enum phy phy;
226
227	if (!encoder)
228		return false;
229
230	phy = intel_port_to_phy(i915, encoder->port);
231
232	return intel_phy_is_tc(i915, phy);
233}
234
235static bool
236intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
237{
238	struct drm_i915_private *i915 = to_i915(state->base.dev);
239	struct drm_connector_state *old_conn_state;
240	struct drm_connector_state *new_conn_state;
241	struct drm_connector *connector;
242	int i;
243
244	for_each_oldnew_connector_in_state(&state->base, connector,
245					   old_conn_state, new_conn_state, i) {
246		struct intel_encoder *old_encoder =
247			to_intel_encoder(old_conn_state->best_encoder);
248		struct intel_encoder *new_encoder =
249			to_intel_encoder(new_conn_state->best_encoder);
250
251		if (!intel_connector_needs_modeset(state, connector))
252			continue;
253
254		if (old_encoder == new_encoder ||
255		    (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) &&
256		     intel_pmdemand_encoder_has_tc_phy(i915, new_encoder)))
257			continue;
258
259		return true;
260	}
261
262	return false;
263}
264
265static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
266{
267	const struct intel_bw_state *new_bw_state, *old_bw_state;
268	const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
269	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
270	const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
271	struct intel_crtc *crtc;
272	int i;
273
274	new_bw_state = intel_atomic_get_new_bw_state(state);
275	old_bw_state = intel_atomic_get_old_bw_state(state);
276	if (new_bw_state && new_bw_state->qgv_point_peakbw !=
277	    old_bw_state->qgv_point_peakbw)
278		return true;
279
280	new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
281	old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
282	if (new_dbuf_state &&
283	    (new_dbuf_state->active_pipes !=
284	     old_dbuf_state->active_pipes ||
285	     new_dbuf_state->enabled_slices !=
286	     old_dbuf_state->enabled_slices))
287		return true;
288
289	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
290	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
291	if (new_cdclk_state &&
292	    (new_cdclk_state->actual.cdclk !=
293	     old_cdclk_state->actual.cdclk ||
294	     new_cdclk_state->actual.voltage_level !=
295	     old_cdclk_state->actual.voltage_level))
296		return true;
297
298	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
299					    new_crtc_state, i)
300		if (new_crtc_state->port_clock != old_crtc_state->port_clock)
301			return true;
302
303	return intel_pmdemand_connector_needs_update(state);
304}
305
306int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
307{
308	struct drm_i915_private *i915 = to_i915(state->base.dev);
309	const struct intel_bw_state *new_bw_state;
310	const struct intel_cdclk_state *new_cdclk_state;
311	const struct intel_dbuf_state *new_dbuf_state;
312	struct intel_pmdemand_state *new_pmdemand_state;
313
314	if (DISPLAY_VER(i915) < 14)
315		return 0;
316
317	if (!intel_pmdemand_needs_update(state))
318		return 0;
319
320	new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
321	if (IS_ERR(new_pmdemand_state))
322		return PTR_ERR(new_pmdemand_state);
323
324	new_bw_state = intel_atomic_get_bw_state(state);
325	if (IS_ERR(new_bw_state))
326		return PTR_ERR(new_bw_state);
327
328	/* firmware will calculate the qclk_gv_index, requirement is set to 0 */
329	new_pmdemand_state->params.qclk_gv_index = 0;
330	new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
331
332	new_dbuf_state = intel_atomic_get_dbuf_state(state);
333	if (IS_ERR(new_dbuf_state))
334		return PTR_ERR(new_dbuf_state);
335
336	new_pmdemand_state->params.active_pipes =
337		min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
338	new_pmdemand_state->params.active_dbufs =
339		min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
340
341	new_cdclk_state = intel_atomic_get_cdclk_state(state);
342	if (IS_ERR(new_cdclk_state))
343		return PTR_ERR(new_cdclk_state);
344
345	new_pmdemand_state->params.voltage_index =
346		new_cdclk_state->actual.voltage_level;
347	new_pmdemand_state->params.cdclk_freq_mhz =
348		DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
349
350	intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state);
351
352	intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state);
353
354	/*
355	 * Active_PLLs starts with 1 because of CDCLK PLL.
356	 * TODO: Missing to account genlock filter when it gets used.
357	 */
358	new_pmdemand_state->params.plls =
359		min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
360
361	/*
362	 * Setting scalers to max as it can not be calculated during flips and
363	 * fastsets without taking global states locks.
364	 */
365	new_pmdemand_state->params.scalers = 7;
366
367	if (state->base.allow_modeset)
368		return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
369	else
370		return intel_atomic_lock_global_state(&new_pmdemand_state->base);
371}
372
373static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915)
374{
375	return !(intel_de_wait_for_clear(i915,
376					 XELPDP_INITIATE_PMDEMAND_REQUEST(1),
377					 XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
378		 intel_de_wait_for_clear(i915,
379					 GEN12_DCPR_STATUS_1,
380					 XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
381}
382
383void
384intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
385				    struct intel_pmdemand_state *pmdemand_state)
386{
387	u32 reg1, reg2;
388
389	if (DISPLAY_VER(i915) < 14)
390		return;
391
392	mutex_lock(&i915->display.pmdemand.lock);
393	if (drm_WARN_ON(&i915->drm,
394			!intel_pmdemand_check_prev_transaction(i915))) {
395		memset(&pmdemand_state->params, 0,
396		       sizeof(pmdemand_state->params));
397		goto unlock;
398	}
399
400	reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
401
402	reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
403
404	/* Set 1*/
405	pmdemand_state->params.qclk_gv_bw =
406		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
407	pmdemand_state->params.voltage_index =
408		REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
409	pmdemand_state->params.qclk_gv_index =
410		REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
411	pmdemand_state->params.active_pipes =
412		REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
413	pmdemand_state->params.active_dbufs =
414		REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
415	pmdemand_state->params.active_phys =
416		REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
417
418	/* Set 2*/
419	pmdemand_state->params.cdclk_freq_mhz =
420		REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
421	pmdemand_state->params.ddiclk_max =
422		REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
423	pmdemand_state->params.scalers =
424		REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
425
426unlock:
427	mutex_unlock(&i915->display.pmdemand.lock);
428}
429
430static bool intel_pmdemand_req_complete(struct drm_i915_private *i915)
431{
432	return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
433		 XELPDP_PMDEMAND_REQ_ENABLE);
434}
435
436static void intel_pmdemand_wait(struct drm_i915_private *i915)
437{
438	if (!wait_event_timeout(i915->display.pmdemand.waitqueue,
439				intel_pmdemand_req_complete(i915),
440				msecs_to_jiffies_timeout(10)))
441		drm_err(&i915->drm,
442			"timed out waiting for Punit PM Demand Response\n");
443}
444
445/* Required to be programmed during Display Init Sequences. */
446void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
447				 u8 dbuf_slices)
448{
449	u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
450
451	mutex_lock(&i915->display.pmdemand.lock);
452	if (drm_WARN_ON(&i915->drm,
453			!intel_pmdemand_check_prev_transaction(i915)))
454		goto unlock;
455
456	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
457		     XELPDP_PMDEMAND_DBUFS_MASK,
458		     REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
459	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
460		     XELPDP_PMDEMAND_REQ_ENABLE);
461
462	intel_pmdemand_wait(i915);
463
464unlock:
465	mutex_unlock(&i915->display.pmdemand.lock);
466}
467
468static void
469intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
470			     const struct intel_pmdemand_state *old,
471			     u32 *reg1, u32 *reg2, bool serialized)
472{
473	/*
474	 * The pmdemand parameter updates happens in two steps. Pre plane and
475	 * post plane updates. During the pre plane, as DE might still be
476	 * handling with some old operations, to avoid unexpected performance
477	 * issues, program the pmdemand parameters with higher of old and new
478	 * values. And then after once settled, use the new parameter values
479	 * as part of the post plane update.
480	 *
481	 * If the pmdemand params update happens without modeset allowed, this
482	 * means we can't serialize the updates. So that implies possibility of
483	 * some parallel atomic commits affecting the pmdemand parameters. In
484	 * that case, we need to consider the current values from the register
485	 * as well. So in pre-plane case, we need to check the max of old, new
486	 * and current register value if not serialized. In post plane update
487	 * we need to consider max of new and current register value if not
488	 * serialized
489	 */
490
491#define update_reg(reg, field, mask) do { \
492	u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
493	u32 old_val = old ? old->params.field : 0; \
494	u32 new_val = new->params.field; \
495\
496	*(reg) &= ~(mask); \
497	*(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
498} while (0)
499
500	/* Set 1*/
501	update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
502	update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
503	update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
504	update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
505	update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
506	update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
507
508	/* Set 2*/
509	update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
510	update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
511	update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
512	update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
513
514#undef update_reg
515}
516
517static void
518intel_pmdemand_program_params(struct drm_i915_private *i915,
519			      const struct intel_pmdemand_state *new,
520			      const struct intel_pmdemand_state *old,
521			      bool serialized)
522{
523	bool changed = false;
524	u32 reg1, mod_reg1;
525	u32 reg2, mod_reg2;
526
527	mutex_lock(&i915->display.pmdemand.lock);
528	if (drm_WARN_ON(&i915->drm,
529			!intel_pmdemand_check_prev_transaction(i915)))
530		goto unlock;
531
532	reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
533	mod_reg1 = reg1;
534
535	reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
536	mod_reg2 = reg2;
537
538	intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2,
539				     serialized);
540
541	if (reg1 != mod_reg1) {
542		intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
543			       mod_reg1);
544		changed = true;
545	}
546
547	if (reg2 != mod_reg2) {
548		intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
549			       mod_reg2);
550		changed = true;
551	}
552
553	/* Initiate pm demand request only if register values are changed */
554	if (!changed)
555		goto unlock;
556
557	drm_dbg_kms(&i915->drm,
558		    "initate pmdemand request values: (0x%x 0x%x)\n",
559		    mod_reg1, mod_reg2);
560
561	intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
562		     XELPDP_PMDEMAND_REQ_ENABLE);
563
564	intel_pmdemand_wait(i915);
565
566unlock:
567	mutex_unlock(&i915->display.pmdemand.lock);
568}
569
570static bool
571intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
572			     const struct intel_pmdemand_state *old)
573{
574	return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
575}
576
577void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
578{
579	struct drm_i915_private *i915 = to_i915(state->base.dev);
580	const struct intel_pmdemand_state *new_pmdemand_state =
581		intel_atomic_get_new_pmdemand_state(state);
582	const struct intel_pmdemand_state *old_pmdemand_state =
583		intel_atomic_get_old_pmdemand_state(state);
584
585	if (DISPLAY_VER(i915) < 14)
586		return;
587
588	if (!new_pmdemand_state ||
589	    !intel_pmdemand_state_changed(new_pmdemand_state,
590					  old_pmdemand_state))
591		return;
592
593	WARN_ON(!new_pmdemand_state->base.changed);
594
595	intel_pmdemand_program_params(i915, new_pmdemand_state,
596				      old_pmdemand_state,
597				      intel_atomic_global_state_is_serialized(state));
598}
599
600void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
601{
602	struct drm_i915_private *i915 = to_i915(state->base.dev);
603	const struct intel_pmdemand_state *new_pmdemand_state =
604		intel_atomic_get_new_pmdemand_state(state);
605	const struct intel_pmdemand_state *old_pmdemand_state =
606		intel_atomic_get_old_pmdemand_state(state);
607
608	if (DISPLAY_VER(i915) < 14)
609		return;
610
611	if (!new_pmdemand_state ||
612	    !intel_pmdemand_state_changed(new_pmdemand_state,
613					  old_pmdemand_state))
614		return;
615
616	WARN_ON(!new_pmdemand_state->base.changed);
617
618	intel_pmdemand_program_params(i915, new_pmdemand_state, NULL,
619				      intel_atomic_global_state_is_serialized(state));
620}
621