1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/module.h>
25
26#ifdef CONFIG_X86
27#include <asm/hypervisor.h>
28#endif
29
30#include <drm/drm_drv.h>
31#include <xen/xen.h>
32
33#include "amdgpu.h"
34#include "amdgpu_ras.h"
35#include "vi.h"
36#include "soc15.h"
37#include "nv.h"
38
39#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
40	do { \
41		vf2pf_info->ucode_info[ucode].id = ucode; \
42		vf2pf_info->ucode_info[ucode].version = ver; \
43	} while (0)
44
45bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
46{
47	/* By now all MMIO pages except mailbox are blocked */
48	/* if blocking is enabled in hypervisor. Choose the */
49	/* SCRATCH_REG0 to test. */
50	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
51}
52
53void amdgpu_virt_init_setting(struct amdgpu_device *adev)
54{
55	struct drm_device *ddev = adev_to_drm(adev);
56
57	/* enable virtual display */
58	if (adev->asic_type != CHIP_ALDEBARAN &&
59	    adev->asic_type != CHIP_ARCTURUS &&
60	    ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
61		if (adev->mode_info.num_crtc == 0)
62			adev->mode_info.num_crtc = 1;
63		adev->enable_virtual_display = true;
64	}
65	ddev->driver_features &= ~DRIVER_ATOMIC;
66	adev->cg_flags = 0;
67	adev->pg_flags = 0;
68
69	/* Reduce kcq number to 2 to reduce latency */
70	if (amdgpu_num_kcq == -1)
71		amdgpu_num_kcq = 2;
72}
73
74void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
75					uint32_t reg0, uint32_t reg1,
76					uint32_t ref, uint32_t mask)
77{
78	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
79	struct amdgpu_ring *ring = &kiq->ring;
80	signed long r, cnt = 0;
81	unsigned long flags;
82	uint32_t seq;
83
84	if (adev->mes.ring.sched.ready) {
85		amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
86					      ref, mask);
87		return;
88	}
89
90	spin_lock_irqsave(&kiq->ring_lock, flags);
91	amdgpu_ring_alloc(ring, 32);
92	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
93					    ref, mask);
94	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
95	if (r)
96		goto failed_undo;
97
98	amdgpu_ring_commit(ring);
99	spin_unlock_irqrestore(&kiq->ring_lock, flags);
100
101	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
102
103	/* don't wait anymore for IRQ context */
104	if (r < 1 && in_interrupt())
105		goto failed_kiq;
106
107	might_sleep();
108	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
109
110		drm_msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
111		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
112	}
113
114	if (cnt > MAX_KIQ_REG_TRY)
115		goto failed_kiq;
116
117	return;
118
119failed_undo:
120	amdgpu_ring_undo(ring);
121	spin_unlock_irqrestore(&kiq->ring_lock, flags);
122failed_kiq:
123	dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
124}
125
126/**
127 * amdgpu_virt_request_full_gpu() - request full gpu access
128 * @adev:	amdgpu device.
129 * @init:	is driver init time.
130 * When start to init/fini driver, first need to request full gpu access.
131 * Return: Zero if request success, otherwise will return error.
132 */
133int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
134{
135	struct amdgpu_virt *virt = &adev->virt;
136	int r;
137
138	if (virt->ops && virt->ops->req_full_gpu) {
139		r = virt->ops->req_full_gpu(adev, init);
140		if (r)
141			return r;
142
143		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
144	}
145
146	return 0;
147}
148
149/**
150 * amdgpu_virt_release_full_gpu() - release full gpu access
151 * @adev:	amdgpu device.
152 * @init:	is driver init time.
153 * When finishing driver init/fini, need to release full gpu access.
154 * Return: Zero if release success, otherwise will returen error.
155 */
156int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
157{
158	struct amdgpu_virt *virt = &adev->virt;
159	int r;
160
161	if (virt->ops && virt->ops->rel_full_gpu) {
162		r = virt->ops->rel_full_gpu(adev, init);
163		if (r)
164			return r;
165
166		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
167	}
168	return 0;
169}
170
171/**
172 * amdgpu_virt_reset_gpu() - reset gpu
173 * @adev:	amdgpu device.
174 * Send reset command to GPU hypervisor to reset GPU that VM is using
175 * Return: Zero if reset success, otherwise will return error.
176 */
177int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
178{
179	struct amdgpu_virt *virt = &adev->virt;
180	int r;
181
182	if (virt->ops && virt->ops->reset_gpu) {
183		r = virt->ops->reset_gpu(adev);
184		if (r)
185			return r;
186
187		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
188	}
189
190	return 0;
191}
192
193void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
194{
195	struct amdgpu_virt *virt = &adev->virt;
196
197	if (virt->ops && virt->ops->req_init_data)
198		virt->ops->req_init_data(adev);
199
200	if (adev->virt.req_init_data_ver > 0)
201		DRM_INFO("host supports REQ_INIT_DATA handshake\n");
202	else
203		DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
204}
205
206/**
207 * amdgpu_virt_wait_reset() - wait for reset gpu completed
208 * @adev:	amdgpu device.
209 * Wait for GPU reset completed.
210 * Return: Zero if reset success, otherwise will return error.
211 */
212int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
213{
214	struct amdgpu_virt *virt = &adev->virt;
215
216	if (!virt->ops || !virt->ops->wait_reset)
217		return -EINVAL;
218
219	return virt->ops->wait_reset(adev);
220}
221
222/**
223 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
224 * @adev:	amdgpu device.
225 * MM table is used by UVD and VCE for its initialization
226 * Return: Zero if allocate success.
227 */
228int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
229{
230	int r;
231
232	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
233		return 0;
234
235	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
236				    AMDGPU_GEM_DOMAIN_VRAM |
237				    AMDGPU_GEM_DOMAIN_GTT,
238				    &adev->virt.mm_table.bo,
239				    &adev->virt.mm_table.gpu_addr,
240				    (void *)&adev->virt.mm_table.cpu_addr);
241	if (r) {
242		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
243		return r;
244	}
245
246	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
247	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
248		 adev->virt.mm_table.gpu_addr,
249		 adev->virt.mm_table.cpu_addr);
250	return 0;
251}
252
253/**
254 * amdgpu_virt_free_mm_table() - free mm table memory
255 * @adev:	amdgpu device.
256 * Free MM table memory
257 */
258void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
259{
260	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
261		return;
262
263	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
264			      &adev->virt.mm_table.gpu_addr,
265			      (void *)&adev->virt.mm_table.cpu_addr);
266	adev->virt.mm_table.gpu_addr = 0;
267}
268
269
270unsigned int amd_sriov_msg_checksum(void *obj,
271				unsigned long obj_size,
272				unsigned int key,
273				unsigned int checksum)
274{
275	unsigned int ret = key;
276	unsigned long i = 0;
277	unsigned char *pos;
278
279	pos = (char *)obj;
280	/* calculate checksum */
281	for (i = 0; i < obj_size; ++i)
282		ret += *(pos + i);
283	/* minus the checksum itself */
284	pos = (char *)&checksum;
285	for (i = 0; i < sizeof(checksum); ++i)
286		ret -= *(pos + i);
287	return ret;
288}
289
290static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
291{
292	struct amdgpu_virt *virt = &adev->virt;
293	struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
294	/* GPU will be marked bad on host if bp count more then 10,
295	 * so alloc 512 is enough.
296	 */
297	unsigned int align_space = 512;
298	void *bps = NULL;
299	struct amdgpu_bo **bps_bo = NULL;
300
301	*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
302	if (!*data)
303		goto data_failure;
304
305	bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
306	if (!bps)
307		goto bps_failure;
308
309	bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
310	if (!bps_bo)
311		goto bps_bo_failure;
312
313	(*data)->bps = bps;
314	(*data)->bps_bo = bps_bo;
315	(*data)->count = 0;
316	(*data)->last_reserved = 0;
317
318	virt->ras_init_done = true;
319
320	return 0;
321
322bps_bo_failure:
323	kfree(bps);
324bps_failure:
325	kfree(*data);
326data_failure:
327	return -ENOMEM;
328}
329
330static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
331{
332	struct amdgpu_virt *virt = &adev->virt;
333	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
334	struct amdgpu_bo *bo;
335	int i;
336
337	if (!data)
338		return;
339
340	for (i = data->last_reserved - 1; i >= 0; i--) {
341		bo = data->bps_bo[i];
342		amdgpu_bo_free_kernel(&bo, NULL, NULL);
343		data->bps_bo[i] = bo;
344		data->last_reserved = i;
345	}
346}
347
348void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
349{
350	struct amdgpu_virt *virt = &adev->virt;
351	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
352
353	virt->ras_init_done = false;
354
355	if (!data)
356		return;
357
358	amdgpu_virt_ras_release_bp(adev);
359
360	kfree(data->bps);
361	kfree(data->bps_bo);
362	kfree(data);
363	virt->virt_eh_data = NULL;
364}
365
366static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
367		struct eeprom_table_record *bps, int pages)
368{
369	struct amdgpu_virt *virt = &adev->virt;
370	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
371
372	if (!data)
373		return;
374
375	memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
376	data->count += pages;
377}
378
379static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
380{
381	struct amdgpu_virt *virt = &adev->virt;
382	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
383	struct amdgpu_bo *bo = NULL;
384	uint64_t bp;
385	int i;
386
387	if (!data)
388		return;
389
390	for (i = data->last_reserved; i < data->count; i++) {
391		bp = data->bps[i].retired_page;
392
393		/* There are two cases of reserve error should be ignored:
394		 * 1) a ras bad page has been allocated (used by someone);
395		 * 2) a ras bad page has been reserved (duplicate error injection
396		 *    for one page);
397		 */
398		if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
399					       AMDGPU_GPU_PAGE_SIZE,
400					       &bo, NULL))
401			DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
402
403		data->bps_bo[i] = bo;
404		data->last_reserved = i + 1;
405		bo = NULL;
406	}
407}
408
409static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
410		uint64_t retired_page)
411{
412	struct amdgpu_virt *virt = &adev->virt;
413	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
414	int i;
415
416	if (!data)
417		return true;
418
419	for (i = 0; i < data->count; i++)
420		if (retired_page == data->bps[i].retired_page)
421			return true;
422
423	return false;
424}
425
426static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
427		uint64_t bp_block_offset, uint32_t bp_block_size)
428{
429	struct eeprom_table_record bp;
430	uint64_t retired_page;
431	uint32_t bp_idx, bp_cnt;
432	void *vram_usage_va = NULL;
433
434	if (adev->mman.fw_vram_usage_va)
435		vram_usage_va = adev->mman.fw_vram_usage_va;
436	else
437		vram_usage_va = adev->mman.drv_vram_usage_va;
438
439	if (bp_block_size) {
440		bp_cnt = bp_block_size / sizeof(uint64_t);
441		for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
442			retired_page = *(uint64_t *)(vram_usage_va +
443					bp_block_offset + bp_idx * sizeof(uint64_t));
444			bp.retired_page = retired_page;
445
446			if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
447				continue;
448
449			amdgpu_virt_ras_add_bps(adev, &bp, 1);
450
451			amdgpu_virt_ras_reserve_bps(adev);
452		}
453	}
454}
455
456static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
457{
458	struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
459	uint32_t checksum;
460	uint32_t checkval;
461
462	uint32_t i;
463	uint32_t tmp;
464
465	if (adev->virt.fw_reserve.p_pf2vf == NULL)
466		return -EINVAL;
467
468	if (pf2vf_info->size > 1024) {
469		DRM_ERROR("invalid pf2vf message size\n");
470		return -EINVAL;
471	}
472
473	switch (pf2vf_info->version) {
474	case 1:
475		checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
476		checkval = amd_sriov_msg_checksum(
477			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
478			adev->virt.fw_reserve.checksum_key, checksum);
479		if (checksum != checkval) {
480			DRM_ERROR("invalid pf2vf message\n");
481			return -EINVAL;
482		}
483
484		adev->virt.gim_feature =
485			((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
486		break;
487	case 2:
488		/* TODO: missing key, need to add it later */
489		checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
490		checkval = amd_sriov_msg_checksum(
491			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
492			0, checksum);
493		if (checksum != checkval) {
494			DRM_ERROR("invalid pf2vf message\n");
495			return -EINVAL;
496		}
497
498		adev->virt.vf2pf_update_interval_ms =
499			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
500		adev->virt.gim_feature =
501			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
502		adev->virt.reg_access =
503			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
504
505		adev->virt.decode_max_dimension_pixels = 0;
506		adev->virt.decode_max_frame_pixels = 0;
507		adev->virt.encode_max_dimension_pixels = 0;
508		adev->virt.encode_max_frame_pixels = 0;
509		adev->virt.is_mm_bw_enabled = false;
510		for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
511			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
512			adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
513
514			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
515			adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
516
517			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
518			adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
519
520			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
521			adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
522		}
523		if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
524			adev->virt.is_mm_bw_enabled = true;
525
526		adev->unique_id =
527			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
528		break;
529	default:
530		DRM_ERROR("invalid pf2vf version\n");
531		return -EINVAL;
532	}
533
534	/* correct too large or too little interval value */
535	if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
536		adev->virt.vf2pf_update_interval_ms = 2000;
537
538	return 0;
539}
540
541static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
542{
543	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
544	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
545
546	if (adev->virt.fw_reserve.p_vf2pf == NULL)
547		return;
548
549	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
550	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
551	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
552	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
553	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
554	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
555	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
556	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
557	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
558	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
559	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
560	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
561	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
562	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
563			    adev->psp.asd_context.bin_desc.fw_version);
564	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
565			    adev->psp.ras_context.context.bin_desc.fw_version);
566	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
567			    adev->psp.xgmi_context.context.bin_desc.fw_version);
568	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
569	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
570	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
571	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
572	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
573}
574
575static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
576{
577	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
578
579	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
580
581	if (adev->virt.fw_reserve.p_vf2pf == NULL)
582		return -EINVAL;
583
584	memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
585
586	vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
587	vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
588
589#ifdef MODULE
590	if (THIS_MODULE->version != NULL)
591		strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
592	else
593#endif
594		strlcpy(vf2pf_info->driver_version, "N/A",
595		    sizeof(vf2pf_info->driver_version));
596
597	vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
598	vf2pf_info->driver_cert = 0;
599	vf2pf_info->os_info.all = 0;
600
601	vf2pf_info->fb_usage =
602		ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
603	vf2pf_info->fb_vis_usage =
604		amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
605	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
606	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
607
608	amdgpu_virt_populate_vf2pf_ucode_info(adev);
609
610	/* TODO: read dynamic info */
611	vf2pf_info->gfx_usage = 0;
612	vf2pf_info->compute_usage = 0;
613	vf2pf_info->encode_usage = 0;
614	vf2pf_info->decode_usage = 0;
615
616	vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
617	vf2pf_info->checksum =
618		amd_sriov_msg_checksum(
619		vf2pf_info, vf2pf_info->header.size, 0, 0);
620
621	return 0;
622}
623
624static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
625{
626	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
627	int ret;
628
629	ret = amdgpu_virt_read_pf2vf_data(adev);
630	if (ret)
631		goto out;
632	amdgpu_virt_write_vf2pf_data(adev);
633
634out:
635	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
636}
637
638void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
639{
640	if (adev->virt.vf2pf_update_interval_ms != 0) {
641		DRM_INFO("clean up the vf2pf work item\n");
642		cancel_delayed_work_sync(&adev->virt.vf2pf_work);
643		adev->virt.vf2pf_update_interval_ms = 0;
644	}
645}
646
647void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
648{
649	adev->virt.fw_reserve.p_pf2vf = NULL;
650	adev->virt.fw_reserve.p_vf2pf = NULL;
651	adev->virt.vf2pf_update_interval_ms = 0;
652
653	if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
654		DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
655	} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
656		/* go through this logic in ip_init and reset to init workqueue*/
657		amdgpu_virt_exchange_data(adev);
658
659		INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
660		schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
661	} else if (adev->bios != NULL) {
662		/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
663		adev->virt.fw_reserve.p_pf2vf =
664			(struct amd_sriov_msg_pf2vf_info_header *)
665			(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
666
667		amdgpu_virt_read_pf2vf_data(adev);
668	}
669}
670
671
672void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
673{
674	uint64_t bp_block_offset = 0;
675	uint32_t bp_block_size = 0;
676	struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
677
678	if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
679		if (adev->mman.fw_vram_usage_va) {
680			adev->virt.fw_reserve.p_pf2vf =
681				(struct amd_sriov_msg_pf2vf_info_header *)
682				(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
683			adev->virt.fw_reserve.p_vf2pf =
684				(struct amd_sriov_msg_vf2pf_info_header *)
685				(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
686		} else if (adev->mman.drv_vram_usage_va) {
687			adev->virt.fw_reserve.p_pf2vf =
688				(struct amd_sriov_msg_pf2vf_info_header *)
689				(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
690			adev->virt.fw_reserve.p_vf2pf =
691				(struct amd_sriov_msg_vf2pf_info_header *)
692				(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
693		}
694
695		amdgpu_virt_read_pf2vf_data(adev);
696		amdgpu_virt_write_vf2pf_data(adev);
697
698		/* bad page handling for version 2 */
699		if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
700			pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
701
702			bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
703				((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
704			bp_block_size = pf2vf_v2->bp_block_size;
705
706			if (bp_block_size && !adev->virt.ras_init_done)
707				amdgpu_virt_init_ras_err_handler_data(adev);
708
709			if (adev->virt.ras_init_done)
710				amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
711		}
712	}
713}
714
715void amdgpu_detect_virtualization(struct amdgpu_device *adev)
716{
717	uint32_t reg;
718
719	switch (adev->asic_type) {
720	case CHIP_TONGA:
721	case CHIP_FIJI:
722		reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
723		break;
724	case CHIP_VEGA10:
725	case CHIP_VEGA20:
726	case CHIP_NAVI10:
727	case CHIP_NAVI12:
728	case CHIP_SIENNA_CICHLID:
729	case CHIP_ARCTURUS:
730	case CHIP_ALDEBARAN:
731	case CHIP_IP_DISCOVERY:
732		reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
733		break;
734	default: /* other chip doesn't support SRIOV */
735		reg = 0;
736		break;
737	}
738
739	if (reg & 1)
740		adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
741
742	if (reg & 0x80000000)
743		adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
744
745	if (!reg) {
746		/* passthrough mode exclus sriov mod */
747		if (is_virtual_machine() && !xen_initial_domain())
748			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
749	}
750
751	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
752		/* VF MMIO access (except mailbox range) from CPU
753		 * will be blocked during sriov runtime
754		 */
755		adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
756
757	/* we have the ability to check now */
758	if (amdgpu_sriov_vf(adev)) {
759		switch (adev->asic_type) {
760		case CHIP_TONGA:
761		case CHIP_FIJI:
762			vi_set_virt_ops(adev);
763			break;
764		case CHIP_VEGA10:
765			soc15_set_virt_ops(adev);
766#ifdef CONFIG_X86
767			/* not send GPU_INIT_DATA with MS_HYPERV*/
768			if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
769#endif
770				/* send a dummy GPU_INIT_DATA request to host on vega10 */
771				amdgpu_virt_request_init_data(adev);
772			break;
773		case CHIP_VEGA20:
774		case CHIP_ARCTURUS:
775		case CHIP_ALDEBARAN:
776			soc15_set_virt_ops(adev);
777			break;
778		case CHIP_NAVI10:
779		case CHIP_NAVI12:
780		case CHIP_SIENNA_CICHLID:
781		case CHIP_IP_DISCOVERY:
782			nv_set_virt_ops(adev);
783			/* try send GPU_INIT_DATA request to host */
784			amdgpu_virt_request_init_data(adev);
785			break;
786		default: /* other chip doesn't support SRIOV */
787			DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
788			break;
789		}
790	}
791}
792
793static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
794{
795	return amdgpu_sriov_is_debug(adev) ? true : false;
796}
797
798static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
799{
800	return amdgpu_sriov_is_normal(adev) ? true : false;
801}
802
803int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
804{
805	if (!amdgpu_sriov_vf(adev) ||
806	    amdgpu_virt_access_debugfs_is_kiq(adev))
807		return 0;
808
809	if (amdgpu_virt_access_debugfs_is_mmio(adev))
810		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
811	else
812		return -EPERM;
813
814	return 0;
815}
816
817void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
818{
819	if (amdgpu_sriov_vf(adev))
820		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
821}
822
823enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
824{
825	enum amdgpu_sriov_vf_mode mode;
826
827	if (amdgpu_sriov_vf(adev)) {
828		if (amdgpu_sriov_is_pp_one_vf(adev))
829			mode = SRIOV_VF_MODE_ONE_VF;
830		else
831			mode = SRIOV_VF_MODE_MULTI_VF;
832	} else {
833		mode = SRIOV_VF_MODE_BARE_METAL;
834	}
835
836	return mode;
837}
838
839void amdgpu_virt_post_reset(struct amdgpu_device *adev)
840{
841	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {
842		/* force set to GFXOFF state after reset,
843		 * to avoid some invalid operation before GC enable
844		 */
845		adev->gfx.is_poweron = false;
846	}
847}
848
849bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
850{
851	switch (adev->ip_versions[MP0_HWIP][0]) {
852	case IP_VERSION(13, 0, 0):
853		/* no vf autoload, white list */
854		if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
855		    ucode_id == AMDGPU_UCODE_ID_VCN)
856			return false;
857		else
858			return true;
859	case IP_VERSION(11, 0, 9):
860	case IP_VERSION(11, 0, 7):
861		/* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
862		if (ucode_id == AMDGPU_UCODE_ID_RLC_G
863		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
864		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
865		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
866		    || ucode_id == AMDGPU_UCODE_ID_SMC)
867			return true;
868		else
869			return false;
870	case IP_VERSION(13, 0, 10):
871		/* white list */
872		if (ucode_id == AMDGPU_UCODE_ID_CAP
873		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
874		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
875		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
876		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
877		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
878		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
879		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
880		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
881		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
882		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
883		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
884		|| ucode_id == AMDGPU_UCODE_ID_CP_MES
885		|| ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
886		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1
887		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
888		|| ucode_id == AMDGPU_UCODE_ID_VCN1
889		|| ucode_id == AMDGPU_UCODE_ID_VCN)
890			return false;
891		else
892			return true;
893	default:
894		/* lagacy black list */
895		if (ucode_id == AMDGPU_UCODE_ID_SDMA0
896		    || ucode_id == AMDGPU_UCODE_ID_SDMA1
897		    || ucode_id == AMDGPU_UCODE_ID_SDMA2
898		    || ucode_id == AMDGPU_UCODE_ID_SDMA3
899		    || ucode_id == AMDGPU_UCODE_ID_SDMA4
900		    || ucode_id == AMDGPU_UCODE_ID_SDMA5
901		    || ucode_id == AMDGPU_UCODE_ID_SDMA6
902		    || ucode_id == AMDGPU_UCODE_ID_SDMA7
903		    || ucode_id == AMDGPU_UCODE_ID_RLC_G
904		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
905		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
906		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
907		    || ucode_id == AMDGPU_UCODE_ID_SMC)
908			return true;
909		else
910			return false;
911	}
912}
913
914void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
915			struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
916			struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
917{
918	uint32_t i;
919
920	if (!adev->virt.is_mm_bw_enabled)
921		return;
922
923	if (encode) {
924		for (i = 0; i < encode_array_size; i++) {
925			encode[i].max_width = adev->virt.encode_max_dimension_pixels;
926			encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
927			if (encode[i].max_width > 0)
928				encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
929			else
930				encode[i].max_height = 0;
931		}
932	}
933
934	if (decode) {
935		for (i = 0; i < decode_array_size; i++) {
936			decode[i].max_width = adev->virt.decode_max_dimension_pixels;
937			decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
938			if (decode[i].max_width > 0)
939				decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
940			else
941				decode[i].max_height = 0;
942		}
943	}
944}
945
946static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
947						 u32 acc_flags, u32 hwip,
948						 bool write, u32 *rlcg_flag)
949{
950	bool ret = false;
951
952	switch (hwip) {
953	case GC_HWIP:
954		if (amdgpu_sriov_reg_indirect_gc(adev)) {
955			*rlcg_flag =
956				write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
957			ret = true;
958		/* only in new version, AMDGPU_REGS_NO_KIQ and
959		 * AMDGPU_REGS_RLC are enabled simultaneously */
960		} else if ((acc_flags & AMDGPU_REGS_RLC) &&
961				!(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
962			*rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
963			ret = true;
964		}
965		break;
966	case MMHUB_HWIP:
967		if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
968		    (acc_flags & AMDGPU_REGS_RLC) && write) {
969			*rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
970			ret = true;
971		}
972		break;
973	default:
974		break;
975	}
976	return ret;
977}
978
979static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
980{
981	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
982	uint32_t timeout = 50000;
983	uint32_t i, tmp;
984	uint32_t ret = 0;
985	void *scratch_reg0;
986	void *scratch_reg1;
987	void *scratch_reg2;
988	void *scratch_reg3;
989	void *spare_int;
990
991	if (!adev->gfx.rlc.rlcg_reg_access_supported) {
992		dev_err(adev->dev,
993			"indirect registers access through rlcg is not available\n");
994		return 0;
995	}
996
997	if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
998		dev_err(adev->dev, "invalid xcc\n");
999		return 0;
1000	}
1001
1002	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
1003	scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
1004	scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
1005	scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
1006	scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
1007	if (reg_access_ctrl->spare_int)
1008		spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
1009
1010	if (offset == reg_access_ctrl->grbm_cntl) {
1011		/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
1012		writel(v, scratch_reg2);
1013		if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
1014			writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1015	} else if (offset == reg_access_ctrl->grbm_idx) {
1016		/* if the target reg offset is grbm_idx, write to scratch_reg3 */
1017		writel(v, scratch_reg3);
1018		if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
1019			writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1020	} else {
1021		/*
1022		 * SCRATCH_REG0 	= read/write value
1023		 * SCRATCH_REG1[30:28]	= command
1024		 * SCRATCH_REG1[19:0]	= address in dword
1025		 * SCRATCH_REG1[26:24]	= Error reporting
1026		 */
1027		writel(v, scratch_reg0);
1028		writel((offset | flag), scratch_reg1);
1029		if (reg_access_ctrl->spare_int)
1030			writel(1, spare_int);
1031
1032		for (i = 0; i < timeout; i++) {
1033			tmp = readl(scratch_reg1);
1034			if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
1035				break;
1036			udelay(10);
1037		}
1038
1039		if (i >= timeout) {
1040			if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
1041				if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
1042					dev_err(adev->dev,
1043						"vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
1044				} else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
1045					dev_err(adev->dev,
1046						"wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
1047				} else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
1048					dev_err(adev->dev,
1049						"register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
1050				} else {
1051					dev_err(adev->dev,
1052						"unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
1053				}
1054			} else {
1055				dev_err(adev->dev,
1056					"timeout: rlcg faled to program reg: 0x%05x\n", offset);
1057			}
1058		}
1059	}
1060
1061	ret = readl(scratch_reg0);
1062	return ret;
1063}
1064
1065void amdgpu_sriov_wreg(struct amdgpu_device *adev,
1066		       u32 offset, u32 value,
1067		       u32 acc_flags, u32 hwip, u32 xcc_id)
1068{
1069	u32 rlcg_flag;
1070
1071	if (!amdgpu_sriov_runtime(adev) &&
1072		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1073		amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
1074		return;
1075	}
1076
1077	if (acc_flags & AMDGPU_REGS_NO_KIQ)
1078		WREG32_NO_KIQ(offset, value);
1079	else
1080		WREG32(offset, value);
1081}
1082
1083u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1084		      u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
1085{
1086	u32 rlcg_flag;
1087
1088	if (!amdgpu_sriov_runtime(adev) &&
1089		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1090		return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
1091
1092	if (acc_flags & AMDGPU_REGS_NO_KIQ)
1093		return RREG32_NO_KIQ(offset);
1094	else
1095		return RREG32(offset);
1096}
1097