1/*	$NetBSD: amdgpu_display.c,v 1.8 2021/12/18 23:44:58 riastradh Exp $	*/
2
3/*
4 * Copyright 2007-8 Advanced Micro Devices, Inc.
5 * Copyright 2008 Red Hat Inc.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors: Dave Airlie
26 *          Alex Deucher
27 */
28
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: amdgpu_display.c,v 1.8 2021/12/18 23:44:58 riastradh Exp $");
31
32#include <drm/amdgpu_drm.h>
33#include "amdgpu.h"
34#include "amdgpu_i2c.h"
35#include "atom.h"
36#include "amdgpu_connectors.h"
37#include "amdgpu_display.h"
38#include <asm/div64.h>
39
40#include <linux/pci.h>
41#include <linux/pm_runtime.h>
42#include <drm/drm_crtc_helper.h>
43#include <drm/drm_edid.h>
44#include <drm/drm_gem_framebuffer_helper.h>
45#include <drm/drm_fb_helper.h>
46#include <drm/drm_vblank.h>
47
48static void amdgpu_display_flip_callback(struct dma_fence *f,
49					 struct dma_fence_cb *cb)
50{
51	struct amdgpu_flip_work *work =
52		container_of(cb, struct amdgpu_flip_work, cb);
53
54	dma_fence_put(f);
55	schedule_work(&work->flip_work.work);
56}
57
58static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
59					     struct dma_fence **f)
60{
61	struct dma_fence *fence= *f;
62
63	if (fence == NULL)
64		return false;
65
66	*f = NULL;
67
68	if (!dma_fence_add_callback(fence, &work->cb,
69				    amdgpu_display_flip_callback))
70		return true;
71
72	dma_fence_put(fence);
73	return false;
74}
75
76static void amdgpu_display_flip_work_func(struct work_struct *__work)
77{
78	struct delayed_work *delayed_work =
79		container_of(__work, struct delayed_work, work);
80	struct amdgpu_flip_work *work =
81		container_of(delayed_work, struct amdgpu_flip_work, flip_work);
82	struct amdgpu_device *adev = work->adev;
83	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
84
85	struct drm_crtc *crtc = &amdgpu_crtc->base;
86	unsigned long flags;
87	unsigned i;
88	int vpos, hpos;
89
90	if (amdgpu_display_flip_handle_fence(work, &work->excl))
91		return;
92
93	for (i = 0; i < work->shared_count; ++i)
94		if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
95			return;
96
97	/* Wait until we're out of the vertical blank period before the one
98	 * targeted by the flip
99	 */
100	if (amdgpu_crtc->enabled &&
101	    (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
102						&vpos, &hpos, NULL, NULL,
103						&crtc->hwmode)
104	     & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
105	    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
106	    (int)(work->target_vblank -
107		  amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
108		schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
109		return;
110	}
111
112	/* We borrow the event spin lock for protecting flip_status */
113	spin_lock_irqsave(&crtc->dev->event_lock, flags);
114
115	/* Do the flip (mmio) */
116	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
117
118	/* Set the flip status */
119	amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
120	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
121
122
123	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
124					 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
125
126}
127
128/*
129 * Handle unpin events outside the interrupt handler proper.
130 */
131static void amdgpu_display_unpin_work_func(struct work_struct *__work)
132{
133	struct amdgpu_flip_work *work =
134		container_of(__work, struct amdgpu_flip_work, unpin_work);
135	int r;
136
137	/* unpin of the old buffer */
138	r = amdgpu_bo_reserve(work->old_abo, true);
139	if (likely(r == 0)) {
140		r = amdgpu_bo_unpin(work->old_abo);
141		if (unlikely(r != 0)) {
142			DRM_ERROR("failed to unpin buffer after flip\n");
143		}
144		amdgpu_bo_unreserve(work->old_abo);
145	} else
146		DRM_ERROR("failed to reserve buffer after flip\n");
147
148	amdgpu_bo_unref(&work->old_abo);
149	kfree(work->shared);
150	kfree(work);
151}
152
153int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
154				struct drm_framebuffer *fb,
155				struct drm_pending_vblank_event *event,
156				uint32_t page_flip_flags, uint32_t target,
157				struct drm_modeset_acquire_ctx *ctx)
158{
159	struct drm_device *dev = crtc->dev;
160	struct amdgpu_device *adev = dev->dev_private;
161	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
162	struct drm_gem_object *obj;
163	struct amdgpu_flip_work *work;
164	struct amdgpu_bo *new_abo;
165	unsigned long flags;
166	u64 tiling_flags;
167	int i, r;
168
169	work = kzalloc(sizeof *work, GFP_KERNEL);
170	if (work == NULL)
171		return -ENOMEM;
172
173	INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
174	INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
175
176	work->event = event;
177	work->adev = adev;
178	work->crtc_id = amdgpu_crtc->crtc_id;
179	work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
180
181	/* schedule unpin of the old buffer */
182	obj = crtc->primary->fb->obj[0];
183
184	/* take a reference to the old object */
185	work->old_abo = gem_to_amdgpu_bo(obj);
186	amdgpu_bo_ref(work->old_abo);
187
188	obj = fb->obj[0];
189	new_abo = gem_to_amdgpu_bo(obj);
190
191	/* pin the new buffer */
192	r = amdgpu_bo_reserve(new_abo, false);
193	if (unlikely(r != 0)) {
194		DRM_ERROR("failed to reserve new abo buffer before flip\n");
195		goto cleanup;
196	}
197
198	if (!adev->enable_virtual_display) {
199		r = amdgpu_bo_pin(new_abo,
200				  amdgpu_display_supported_domains(adev, new_abo->flags));
201		if (unlikely(r != 0)) {
202			DRM_ERROR("failed to pin new abo buffer before flip\n");
203			goto unreserve;
204		}
205	}
206
207	r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
208	if (unlikely(r != 0)) {
209		DRM_ERROR("%p bind failed\n", new_abo);
210		goto unpin;
211	}
212
213	r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
214					      &work->shared_count,
215					      &work->shared);
216	if (unlikely(r != 0)) {
217		DRM_ERROR("failed to get fences for buffer\n");
218		goto unpin;
219	}
220
221	amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
222	amdgpu_bo_unreserve(new_abo);
223
224	if (!adev->enable_virtual_display)
225		work->base = amdgpu_bo_gpu_offset(new_abo);
226	work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
227		amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
228
229	/* we borrow the event spin lock for protecting flip_wrok */
230	spin_lock_irqsave(&crtc->dev->event_lock, flags);
231	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
232		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
233		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
234		r = -EBUSY;
235		goto pflip_cleanup;
236	}
237
238	amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
239	amdgpu_crtc->pflip_works = work;
240
241
242	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
243					 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
244	/* update crtc fb */
245	crtc->primary->fb = fb;
246	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
247	amdgpu_display_flip_work_func(&work->flip_work.work);
248	return 0;
249
250pflip_cleanup:
251	if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
252		DRM_ERROR("failed to reserve new abo in error path\n");
253		goto cleanup;
254	}
255unpin:
256	if (!adev->enable_virtual_display)
257		if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
258			DRM_ERROR("failed to unpin new abo in error path\n");
259
260unreserve:
261	amdgpu_bo_unreserve(new_abo);
262
263cleanup:
264	amdgpu_bo_unref(&work->old_abo);
265	dma_fence_put(work->excl);
266	for (i = 0; i < work->shared_count; ++i)
267		dma_fence_put(work->shared[i]);
268	kfree(work->shared);
269	kfree(work);
270
271	return r;
272}
273
274int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
275				   struct drm_modeset_acquire_ctx *ctx)
276{
277	struct drm_device *dev;
278	struct amdgpu_device *adev;
279	struct drm_crtc *crtc;
280	bool active = false;
281	int ret;
282
283	if (!set || !set->crtc)
284		return -EINVAL;
285
286	dev = set->crtc->dev;
287
288	ret = pm_runtime_get_sync(dev->dev);
289	if (ret < 0)
290		return ret;
291
292	ret = drm_crtc_helper_set_config(set, ctx);
293
294	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
295		if (crtc->enabled)
296			active = true;
297
298	pm_runtime_mark_last_busy(dev->dev);
299
300	adev = dev->dev_private;
301	/* if we have active crtcs and we don't have a power ref,
302	   take the current one */
303	if (active && !adev->have_disp_power_ref) {
304		adev->have_disp_power_ref = true;
305		return ret;
306	}
307	/* if we have no active crtcs, then drop the power ref
308	   we got before */
309	if (!active && adev->have_disp_power_ref) {
310		pm_runtime_put_autosuspend(dev->dev);
311		adev->have_disp_power_ref = false;
312	}
313
314	/* drop the power reference we got coming in here */
315	pm_runtime_put_autosuspend(dev->dev);
316	return ret;
317}
318
319static const char *encoder_names[41] = {
320	"NONE",
321	"INTERNAL_LVDS",
322	"INTERNAL_TMDS1",
323	"INTERNAL_TMDS2",
324	"INTERNAL_DAC1",
325	"INTERNAL_DAC2",
326	"INTERNAL_SDVOA",
327	"INTERNAL_SDVOB",
328	"SI170B",
329	"CH7303",
330	"CH7301",
331	"INTERNAL_DVO1",
332	"EXTERNAL_SDVOA",
333	"EXTERNAL_SDVOB",
334	"TITFP513",
335	"INTERNAL_LVTM1",
336	"VT1623",
337	"HDMI_SI1930",
338	"HDMI_INTERNAL",
339	"INTERNAL_KLDSCP_TMDS1",
340	"INTERNAL_KLDSCP_DVO1",
341	"INTERNAL_KLDSCP_DAC1",
342	"INTERNAL_KLDSCP_DAC2",
343	"SI178",
344	"MVPU_FPGA",
345	"INTERNAL_DDI",
346	"VT1625",
347	"HDMI_SI1932",
348	"DP_AN9801",
349	"DP_DP501",
350	"INTERNAL_UNIPHY",
351	"INTERNAL_KLDSCP_LVTMA",
352	"INTERNAL_UNIPHY1",
353	"INTERNAL_UNIPHY2",
354	"NUTMEG",
355	"TRAVIS",
356	"INTERNAL_VCE",
357	"INTERNAL_UNIPHY3",
358	"HDMI_ANX9805",
359	"INTERNAL_AMCLK",
360	"VIRTUAL",
361};
362
363static const char *hpd_names[6] = {
364	"HPD1",
365	"HPD2",
366	"HPD3",
367	"HPD4",
368	"HPD5",
369	"HPD6",
370};
371
372void amdgpu_display_print_display_setup(struct drm_device *dev)
373{
374	struct drm_connector *connector;
375	struct amdgpu_connector *amdgpu_connector;
376	struct drm_encoder *encoder;
377	struct amdgpu_encoder *amdgpu_encoder;
378	struct drm_connector_list_iter iter;
379	uint32_t devices;
380	int i = 0;
381
382	drm_connector_list_iter_begin(dev, &iter);
383	DRM_INFO("AMDGPU Display Connectors\n");
384	drm_for_each_connector_iter(connector, &iter) {
385		amdgpu_connector = to_amdgpu_connector(connector);
386		DRM_INFO("Connector %d:\n", i);
387		DRM_INFO("  %s\n", connector->name);
388		if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
389			DRM_INFO("  %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
390		if (amdgpu_connector->ddc_bus) {
391			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
392				 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
393				 amdgpu_connector->ddc_bus->rec.mask_data_reg,
394				 amdgpu_connector->ddc_bus->rec.a_clk_reg,
395				 amdgpu_connector->ddc_bus->rec.a_data_reg,
396				 amdgpu_connector->ddc_bus->rec.en_clk_reg,
397				 amdgpu_connector->ddc_bus->rec.en_data_reg,
398				 amdgpu_connector->ddc_bus->rec.y_clk_reg,
399				 amdgpu_connector->ddc_bus->rec.y_data_reg);
400			if (amdgpu_connector->router.ddc_valid)
401				DRM_INFO("  DDC Router 0x%x/0x%x\n",
402					 amdgpu_connector->router.ddc_mux_control_pin,
403					 amdgpu_connector->router.ddc_mux_state);
404			if (amdgpu_connector->router.cd_valid)
405				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
406					 amdgpu_connector->router.cd_mux_control_pin,
407					 amdgpu_connector->router.cd_mux_state);
408		} else {
409			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
410			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
411			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
412			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
413			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
414			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
415				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
416		}
417		DRM_INFO("  Encoders:\n");
418		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
419			amdgpu_encoder = to_amdgpu_encoder(encoder);
420			devices = amdgpu_encoder->devices & amdgpu_connector->devices;
421			if (devices) {
422				if (devices & ATOM_DEVICE_CRT1_SUPPORT)
423					DRM_INFO("    CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
424				if (devices & ATOM_DEVICE_CRT2_SUPPORT)
425					DRM_INFO("    CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
426				if (devices & ATOM_DEVICE_LCD1_SUPPORT)
427					DRM_INFO("    LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
428				if (devices & ATOM_DEVICE_DFP1_SUPPORT)
429					DRM_INFO("    DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
430				if (devices & ATOM_DEVICE_DFP2_SUPPORT)
431					DRM_INFO("    DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
432				if (devices & ATOM_DEVICE_DFP3_SUPPORT)
433					DRM_INFO("    DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
434				if (devices & ATOM_DEVICE_DFP4_SUPPORT)
435					DRM_INFO("    DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
436				if (devices & ATOM_DEVICE_DFP5_SUPPORT)
437					DRM_INFO("    DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
438				if (devices & ATOM_DEVICE_DFP6_SUPPORT)
439					DRM_INFO("    DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
440				if (devices & ATOM_DEVICE_TV1_SUPPORT)
441					DRM_INFO("    TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
442				if (devices & ATOM_DEVICE_CV_SUPPORT)
443					DRM_INFO("    CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
444			}
445		}
446		i++;
447	}
448	drm_connector_list_iter_end(&iter);
449}
450
451/**
452 * amdgpu_display_ddc_probe
453 *
454 */
455bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
456			      bool use_aux)
457{
458	u8 out = 0x0;
459	u8 buf[8];
460	int ret;
461	struct i2c_msg msgs[] = {
462		{
463			.addr = DDC_ADDR,
464			.flags = 0,
465			.len = 1,
466			.buf = &out,
467		},
468		{
469			.addr = DDC_ADDR,
470			.flags = I2C_M_RD,
471			.len = 8,
472			.buf = buf,
473		}
474	};
475
476	/* on hw with routers, select right port */
477	if (amdgpu_connector->router.ddc_valid)
478		amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
479
480	if (use_aux) {
481		ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
482	} else {
483		ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
484	}
485
486	if (ret != 2)
487		/* Couldn't find an accessible DDC on this connector */
488		return false;
489	/* Probe also for valid EDID header
490	 * EDID header starts with:
491	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
492	 * Only the first 6 bytes must be valid as
493	 * drm_edid_block_valid() can fix the last 2 bytes */
494	if (drm_edid_header_is_valid(buf) < 6) {
495		/* Couldn't find an accessible EDID on this
496		 * connector */
497		return false;
498	}
499	return true;
500}
501
502static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
503	.destroy = drm_gem_fb_destroy,
504	.create_handle = drm_gem_fb_create_handle,
505};
506
507uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
508					  uint64_t bo_flags)
509{
510	uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
511
512#if defined(CONFIG_DRM_AMD_DC)
513	/*
514	 * if amdgpu_bo_support_uswc returns false it means that USWC mappings
515	 * is not supported for this board. But this mapping is required
516	 * to avoid hang caused by placement of scanout BO in GTT on certain
517	 * APUs. So force the BO placement to VRAM in case this architecture
518	 * will not allow USWC mappings.
519	 * Also, don't allow GTT domain if the BO doens't have USWC falg set.
520	 */
521	if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
522	    amdgpu_bo_support_uswc(bo_flags) &&
523	    amdgpu_device_asic_has_dc_support(adev->asic_type)) {
524		switch (adev->asic_type) {
525		case CHIP_CARRIZO:
526		case CHIP_STONEY:
527			domain |= AMDGPU_GEM_DOMAIN_GTT;
528			break;
529		case CHIP_RAVEN:
530			/* enable S/G on PCO and RV2 */
531			if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
532				domain |= AMDGPU_GEM_DOMAIN_GTT;
533			break;
534		default:
535			break;
536		}
537	}
538#endif
539
540	return domain;
541}
542
543int amdgpu_display_framebuffer_init(struct drm_device *dev,
544				    struct amdgpu_framebuffer *rfb,
545				    const struct drm_mode_fb_cmd2 *mode_cmd,
546				    struct drm_gem_object *obj)
547{
548	int ret;
549	rfb->base.obj[0] = obj;
550	drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
551	ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
552	if (ret) {
553		rfb->base.obj[0] = NULL;
554		return ret;
555	}
556	return 0;
557}
558
559struct drm_framebuffer *
560amdgpu_display_user_framebuffer_create(struct drm_device *dev,
561				       struct drm_file *file_priv,
562				       const struct drm_mode_fb_cmd2 *mode_cmd)
563{
564	struct drm_gem_object *obj;
565	struct amdgpu_framebuffer *amdgpu_fb;
566	int ret;
567
568	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
569	if (obj ==  NULL) {
570		dev_err(pci_dev_dev(dev->pdev), "No GEM object associated to handle 0x%08X, "
571			"can't create framebuffer\n", mode_cmd->handles[0]);
572		return ERR_PTR(-ENOENT);
573	}
574
575	/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
576	if (obj->import_attach) {
577		DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
578		return ERR_PTR(-EINVAL);
579	}
580
581	amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
582	if (amdgpu_fb == NULL) {
583		drm_gem_object_put_unlocked(obj);
584		return ERR_PTR(-ENOMEM);
585	}
586
587	ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
588	if (ret) {
589		kfree(amdgpu_fb);
590		drm_gem_object_put_unlocked(obj);
591		return ERR_PTR(ret);
592	}
593
594	return &amdgpu_fb->base;
595}
596
597const struct drm_mode_config_funcs amdgpu_mode_funcs = {
598	.fb_create = amdgpu_display_user_framebuffer_create,
599	.output_poll_changed = drm_fb_helper_output_poll_changed,
600};
601
602static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
603{	{ UNDERSCAN_OFF, "off" },
604	{ UNDERSCAN_ON, "on" },
605	{ UNDERSCAN_AUTO, "auto" },
606};
607
608static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
609{	{ AMDGPU_AUDIO_DISABLE, "off" },
610	{ AMDGPU_AUDIO_ENABLE, "on" },
611	{ AMDGPU_AUDIO_AUTO, "auto" },
612};
613
614/* XXX support different dither options? spatial, temporal, both, etc. */
615static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
616{	{ AMDGPU_FMT_DITHER_DISABLE, "off" },
617	{ AMDGPU_FMT_DITHER_ENABLE, "on" },
618};
619
620int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
621{
622	int sz;
623
624	adev->mode_info.coherent_mode_property =
625		drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
626	if (!adev->mode_info.coherent_mode_property)
627		return -ENOMEM;
628
629	adev->mode_info.load_detect_property =
630		drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
631	if (!adev->mode_info.load_detect_property)
632		return -ENOMEM;
633
634	drm_mode_create_scaling_mode_property(adev->ddev);
635
636	sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
637	adev->mode_info.underscan_property =
638		drm_property_create_enum(adev->ddev, 0,
639				    "underscan",
640				    amdgpu_underscan_enum_list, sz);
641
642	adev->mode_info.underscan_hborder_property =
643		drm_property_create_range(adev->ddev, 0,
644					"underscan hborder", 0, 128);
645	if (!adev->mode_info.underscan_hborder_property)
646		return -ENOMEM;
647
648	adev->mode_info.underscan_vborder_property =
649		drm_property_create_range(adev->ddev, 0,
650					"underscan vborder", 0, 128);
651	if (!adev->mode_info.underscan_vborder_property)
652		return -ENOMEM;
653
654	sz = ARRAY_SIZE(amdgpu_audio_enum_list);
655	adev->mode_info.audio_property =
656		drm_property_create_enum(adev->ddev, 0,
657					 "audio",
658					 amdgpu_audio_enum_list, sz);
659
660	sz = ARRAY_SIZE(amdgpu_dither_enum_list);
661	adev->mode_info.dither_property =
662		drm_property_create_enum(adev->ddev, 0,
663					 "dither",
664					 amdgpu_dither_enum_list, sz);
665
666	if (amdgpu_device_has_dc_support(adev)) {
667		adev->mode_info.abm_level_property =
668			drm_property_create_range(adev->ddev, 0,
669						"abm level", 0, 4);
670		if (!adev->mode_info.abm_level_property)
671			return -ENOMEM;
672	}
673
674	return 0;
675}
676
677void amdgpu_display_update_priority(struct amdgpu_device *adev)
678{
679	/* adjustment options for the display watermarks */
680	if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
681		adev->mode_info.disp_priority = 0;
682	else
683		adev->mode_info.disp_priority = amdgpu_disp_priority;
684
685}
686
687static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
688{
689	/* try and guess if this is a tv or a monitor */
690	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
691	    (mode->vdisplay == 576) || /* 576p */
692	    (mode->vdisplay == 720) || /* 720p */
693	    (mode->vdisplay == 1080)) /* 1080p */
694		return true;
695	else
696		return false;
697}
698
699bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
700					const struct drm_display_mode *mode,
701					struct drm_display_mode *adjusted_mode)
702{
703	struct drm_device *dev = crtc->dev;
704	struct drm_encoder *encoder;
705	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
706	struct amdgpu_encoder *amdgpu_encoder;
707	struct drm_connector *connector;
708	u32 src_v = 1, dst_v = 1;
709	u32 src_h = 1, dst_h = 1;
710
711	amdgpu_crtc->h_border = 0;
712	amdgpu_crtc->v_border = 0;
713
714	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
715		if (encoder->crtc != crtc)
716			continue;
717		amdgpu_encoder = to_amdgpu_encoder(encoder);
718		connector = amdgpu_get_connector_for_encoder(encoder);
719
720		/* set scaling */
721		if (amdgpu_encoder->rmx_type == RMX_OFF)
722			amdgpu_crtc->rmx_type = RMX_OFF;
723		else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
724			 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
725			amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
726		else
727			amdgpu_crtc->rmx_type = RMX_OFF;
728		/* copy native mode */
729		memcpy(&amdgpu_crtc->native_mode,
730		       &amdgpu_encoder->native_mode,
731		       sizeof(struct drm_display_mode));
732		src_v = crtc->mode.vdisplay;
733		dst_v = amdgpu_crtc->native_mode.vdisplay;
734		src_h = crtc->mode.hdisplay;
735		dst_h = amdgpu_crtc->native_mode.hdisplay;
736
737		/* fix up for overscan on hdmi */
738		if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
739		    ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
740		     ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
741		      drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
742		      amdgpu_display_is_hdtv_mode(mode)))) {
743			if (amdgpu_encoder->underscan_hborder != 0)
744				amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
745			else
746				amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
747			if (amdgpu_encoder->underscan_vborder != 0)
748				amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
749			else
750				amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
751			amdgpu_crtc->rmx_type = RMX_FULL;
752			src_v = crtc->mode.vdisplay;
753			dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
754			src_h = crtc->mode.hdisplay;
755			dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
756		}
757	}
758	if (amdgpu_crtc->rmx_type != RMX_OFF) {
759		fixed20_12 a, b;
760		a.full = dfixed_const(src_v);
761		b.full = dfixed_const(dst_v);
762		amdgpu_crtc->vsc.full = dfixed_div(a, b);
763		a.full = dfixed_const(src_h);
764		b.full = dfixed_const(dst_h);
765		amdgpu_crtc->hsc.full = dfixed_div(a, b);
766	} else {
767		amdgpu_crtc->vsc.full = dfixed_const(1);
768		amdgpu_crtc->hsc.full = dfixed_const(1);
769	}
770	return true;
771}
772
773/*
774 * Retrieve current video scanout position of crtc on a given gpu, and
775 * an optional accurate timestamp of when query happened.
776 *
777 * \param dev Device to query.
778 * \param pipe Crtc to query.
779 * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
780 *              For driver internal use only also supports these flags:
781 *
782 *              USE_REAL_VBLANKSTART to use the real start of vblank instead
783 *              of a fudged earlier start of vblank.
784 *
785 *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
786 *              fudged earlier start of vblank in *vpos and the distance
787 *              to true start of vblank in *hpos.
788 *
789 * \param *vpos Location where vertical scanout position should be stored.
790 * \param *hpos Location where horizontal scanout position should go.
791 * \param *stime Target location for timestamp taken immediately before
792 *               scanout position query. Can be NULL to skip timestamp.
793 * \param *etime Target location for timestamp taken immediately after
794 *               scanout position query. Can be NULL to skip timestamp.
795 *
796 * Returns vpos as a positive number while in active scanout area.
797 * Returns vpos as a negative number inside vblank, counting the number
798 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
799 * until start of active scanout / end of vblank."
800 *
801 * \return Flags, or'ed together as follows:
802 *
803 * DRM_SCANOUTPOS_VALID = Query successful.
804 * DRM_SCANOUTPOS_INVBL = Inside vblank.
805 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
806 * this flag means that returned position may be offset by a constant but
807 * unknown small number of scanlines wrt. real scanout position.
808 *
809 */
810int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
811			unsigned int pipe, unsigned int flags, int *vpos,
812			int *hpos, ktime_t *stime, ktime_t *etime,
813			const struct drm_display_mode *mode)
814{
815	u32 vbl = 0, position = 0;
816	int vbl_start, vbl_end, vtotal, ret = 0;
817	bool in_vbl = true;
818
819	struct amdgpu_device *adev = dev->dev_private;
820
821	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
822
823	/* Get optional system timestamp before query. */
824	if (stime)
825		*stime = ktime_get();
826
827	if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
828		ret |= DRM_SCANOUTPOS_VALID;
829
830	/* Get optional system timestamp after query. */
831	if (etime)
832		*etime = ktime_get();
833
834	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
835
836	/* Decode into vertical and horizontal scanout position. */
837	*vpos = position & 0x1fff;
838	*hpos = (position >> 16) & 0x1fff;
839
840	/* Valid vblank area boundaries from gpu retrieved? */
841	if (vbl > 0) {
842		/* Yes: Decode. */
843		ret |= DRM_SCANOUTPOS_ACCURATE;
844		vbl_start = vbl & 0x1fff;
845		vbl_end = (vbl >> 16) & 0x1fff;
846	}
847	else {
848		/* No: Fake something reasonable which gives at least ok results. */
849		vbl_start = mode->crtc_vdisplay;
850		vbl_end = 0;
851	}
852
853	/* Called from driver internal vblank counter query code? */
854	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
855	    /* Caller wants distance from real vbl_start in *hpos */
856	    *hpos = *vpos - vbl_start;
857	}
858
859	/* Fudge vblank to start a few scanlines earlier to handle the
860	 * problem that vblank irqs fire a few scanlines before start
861	 * of vblank. Some driver internal callers need the true vblank
862	 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
863	 *
864	 * The cause of the "early" vblank irq is that the irq is triggered
865	 * by the line buffer logic when the line buffer read position enters
866	 * the vblank, whereas our crtc scanout position naturally lags the
867	 * line buffer read position.
868	 */
869	if (!(flags & USE_REAL_VBLANKSTART))
870		vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
871
872	/* Test scanout position against vblank region. */
873	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
874		in_vbl = false;
875
876	/* In vblank? */
877	if (in_vbl)
878	    ret |= DRM_SCANOUTPOS_IN_VBLANK;
879
880	/* Called from driver internal vblank counter query code? */
881	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
882		/* Caller wants distance from fudged earlier vbl_start */
883		*vpos -= vbl_start;
884		return ret;
885	}
886
887	/* Check if inside vblank area and apply corrective offsets:
888	 * vpos will then be >=0 in video scanout area, but negative
889	 * within vblank area, counting down the number of lines until
890	 * start of scanout.
891	 */
892
893	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
894	if (in_vbl && (*vpos >= vbl_start)) {
895		vtotal = mode->crtc_vtotal;
896
897		/* With variable refresh rate displays the vpos can exceed
898		 * the vtotal value. Clamp to 0 to return -vbl_end instead
899		 * of guessing the remaining number of lines until scanout.
900		 */
901		*vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
902	}
903
904	/* Correct for shifted end of vbl at vbl_end. */
905	*vpos = *vpos - vbl_end;
906
907	return ret;
908}
909
910int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
911{
912	if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
913		return AMDGPU_CRTC_IRQ_NONE;
914
915	switch (crtc) {
916	case 0:
917		return AMDGPU_CRTC_IRQ_VBLANK1;
918	case 1:
919		return AMDGPU_CRTC_IRQ_VBLANK2;
920	case 2:
921		return AMDGPU_CRTC_IRQ_VBLANK3;
922	case 3:
923		return AMDGPU_CRTC_IRQ_VBLANK4;
924	case 4:
925		return AMDGPU_CRTC_IRQ_VBLANK5;
926	case 5:
927		return AMDGPU_CRTC_IRQ_VBLANK6;
928	default:
929		return AMDGPU_CRTC_IRQ_NONE;
930	}
931}
932