1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
25#include "nbio/nbio_2_3_offset.h"
26#include "nbio/nbio_2_3_sh_mask.h"
27#include "gc/gc_10_1_0_offset.h"
28#include "gc/gc_10_1_0_sh_mask.h"
29#include "soc15.h"
30#include "navi10_ih.h"
31#include "soc15_common.h"
32#include "mxgpu_nv.h"
33
34#include "amdgpu_reset.h"
35
36static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37{
38	WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
39}
40
41static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42{
43	WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
44}
45
46/*
47 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
48 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
49 * by host.
50 *
51 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
52 * correct value since it doesn't return the RCV_DW0 under the case that
53 * RCV_MSG_VALID is set by host.
54 */
55static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56{
57	return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
58}
59
60
61static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
62				   enum idh_event event)
63{
64	u32 reg;
65
66	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
67	if (reg != event)
68		return -ENOENT;
69
70	xgpu_nv_mailbox_send_ack(adev);
71
72	return 0;
73}
74
75static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
76{
77	return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
78}
79
80static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
81{
82	int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
83	u8 reg;
84
85	do {
86		reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
87		if (reg & 2)
88			return 0;
89
90		mdelay(5);
91		timeout -= 5;
92	} while (timeout > 1);
93
94	pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
95
96	return -ETIME;
97}
98
99static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
100{
101	int r;
102	uint64_t timeout, now;
103
104	now = (uint64_t)ktime_to_ms(ktime_get());
105	timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
106
107	do {
108		r = xgpu_nv_mailbox_rcv_msg(adev, event);
109		if (!r)
110			return 0;
111
112		msleep(10);
113		now = (uint64_t)ktime_to_ms(ktime_get());
114	} while (timeout > now);
115
116
117	return -ETIME;
118}
119
120static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
121	      enum idh_request req, u32 data1, u32 data2, u32 data3)
122{
123	int r;
124	uint8_t trn;
125
126	/* IMPORTANT:
127	 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
128	 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
129	 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
130	 * will return immediatly
131	 */
132	do {
133		xgpu_nv_mailbox_set_valid(adev, false);
134		trn = xgpu_nv_peek_ack(adev);
135		if (trn) {
136			pr_err("trn=%x ACK should not assert! wait again !\n", trn);
137			msleep(1);
138		}
139	} while (trn);
140
141	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
142	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
143	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
144	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
145	xgpu_nv_mailbox_set_valid(adev, true);
146
147	/* start to poll ack */
148	r = xgpu_nv_poll_ack(adev);
149	if (r)
150		pr_err("Doesn't get ack from pf, continue\n");
151
152	xgpu_nv_mailbox_set_valid(adev, false);
153}
154
155static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
156			enum idh_request req, u32 data1, u32 data2, u32 data3)
157{
158	int r, retry = 1;
159	enum idh_event event = -1;
160
161send_request:
162	xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
163
164	switch (req) {
165	case IDH_REQ_GPU_INIT_ACCESS:
166	case IDH_REQ_GPU_FINI_ACCESS:
167	case IDH_REQ_GPU_RESET_ACCESS:
168		event = IDH_READY_TO_ACCESS_GPU;
169		break;
170	case IDH_REQ_GPU_INIT_DATA:
171		event = IDH_REQ_GPU_INIT_DATA_READY;
172		break;
173	case IDH_RAS_POISON:
174		if (data1 != 0)
175			event = IDH_RAS_POISON_READY;
176		break;
177	default:
178		break;
179	}
180
181	if (event != -1) {
182		r = xgpu_nv_poll_msg(adev, event);
183		if (r) {
184			if (retry++ < 2)
185				goto send_request;
186
187			if (req != IDH_REQ_GPU_INIT_DATA) {
188				pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
189				return r;
190			} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
191				adev->virt.req_init_data_ver = 0;
192		} else {
193			if (req == IDH_REQ_GPU_INIT_DATA) {
194				adev->virt.req_init_data_ver =
195					RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
196
197				/* assume V1 in case host doesn't set version number */
198				if (adev->virt.req_init_data_ver < 1)
199					adev->virt.req_init_data_ver = 1;
200			}
201		}
202
203		/* Retrieve checksum from mailbox2 */
204		if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
205			adev->virt.fw_reserve.checksum_key =
206				RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
207		}
208	}
209
210	return 0;
211}
212
213static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
214					enum idh_request req)
215{
216	return xgpu_nv_send_access_requests_with_param(adev,
217						req, 0, 0, 0);
218}
219
220static int xgpu_nv_request_reset(struct amdgpu_device *adev)
221{
222	int ret, i = 0;
223
224	while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
225		ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
226		if (!ret)
227			break;
228		i++;
229	}
230
231	return ret;
232}
233
234static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
235					   bool init)
236{
237	enum idh_request req;
238
239	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
240	return xgpu_nv_send_access_requests(adev, req);
241}
242
243static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
244					   bool init)
245{
246	enum idh_request req;
247	int r = 0;
248
249	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
250	r = xgpu_nv_send_access_requests(adev, req);
251
252	return r;
253}
254
255static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
256{
257	return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
258}
259
260static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
261					struct amdgpu_irq_src *source,
262					struct amdgpu_iv_entry *entry)
263{
264	DRM_DEBUG("get ack intr and do nothing.\n");
265	return 0;
266}
267
268static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
269					struct amdgpu_irq_src *source,
270					unsigned type,
271					enum amdgpu_interrupt_state state)
272{
273	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
274
275	if (state == AMDGPU_IRQ_STATE_ENABLE)
276		tmp |= 2;
277	else
278		tmp &= ~2;
279
280	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
281
282	return 0;
283}
284
285static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
286{
287	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
288	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
289	int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
290
291	/* block amdgpu_gpu_recover till msg FLR COMPLETE received,
292	 * otherwise the mailbox msg will be ruined/reseted by
293	 * the VF FLR.
294	 */
295	if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
296		return;
297
298	down_write(&adev->reset_domain->sem);
299
300	amdgpu_virt_fini_data_exchange(adev);
301
302	xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
303
304	do {
305		if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
306			goto flr_done;
307
308		msleep(10);
309		timeout -= 10;
310	} while (timeout > 1);
311
312flr_done:
313	atomic_set(&adev->reset_domain->in_gpu_reset, 0);
314	up_write(&adev->reset_domain->sem);
315
316	/* Trigger recovery for world switch failure if no TDR */
317	if (amdgpu_device_should_recover_gpu(adev)
318		&& (!amdgpu_device_has_job_running(adev) ||
319		adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
320		adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
321		adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
322		adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) {
323		struct amdgpu_reset_context reset_context;
324		memset(&reset_context, 0, sizeof(reset_context));
325
326		reset_context.method = AMD_RESET_METHOD_NONE;
327		reset_context.reset_req_dev = adev;
328		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
329
330		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
331	}
332}
333
334static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
335				       struct amdgpu_irq_src *src,
336				       unsigned type,
337				       enum amdgpu_interrupt_state state)
338{
339	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
340
341	if (state == AMDGPU_IRQ_STATE_ENABLE)
342		tmp |= 1;
343	else
344		tmp &= ~1;
345
346	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
347
348	return 0;
349}
350
351static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
352				   struct amdgpu_irq_src *source,
353				   struct amdgpu_iv_entry *entry)
354{
355	enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
356
357	switch (event) {
358	case IDH_FLR_NOTIFICATION:
359		if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
360			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
361				   &adev->virt.flr_work),
362				  "Failed to queue work! at %s",
363				  __func__);
364		break;
365		/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
366		 * it byfar since that polling thread will handle it,
367		 * other msg like flr complete is not handled here.
368		 */
369	case IDH_CLR_MSG_BUF:
370	case IDH_FLR_NOTIFICATION_CMPL:
371	case IDH_READY_TO_ACCESS_GPU:
372	default:
373		break;
374	}
375
376	return 0;
377}
378
379static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
380	.set = xgpu_nv_set_mailbox_ack_irq,
381	.process = xgpu_nv_mailbox_ack_irq,
382};
383
384static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
385	.set = xgpu_nv_set_mailbox_rcv_irq,
386	.process = xgpu_nv_mailbox_rcv_irq,
387};
388
389void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
390{
391	adev->virt.ack_irq.num_types = 1;
392	adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
393	adev->virt.rcv_irq.num_types = 1;
394	adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
395}
396
397int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
398{
399	int r;
400
401	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
402	if (r)
403		return r;
404
405	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
406	if (r) {
407		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
408		return r;
409	}
410
411	return 0;
412}
413
414int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
415{
416	int r;
417
418	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
419	if (r)
420		return r;
421	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
422	if (r) {
423		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
424		return r;
425	}
426
427	INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
428
429	return 0;
430}
431
432void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
433{
434	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
435	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
436}
437
438static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev,
439		enum amdgpu_ras_block block)
440{
441	if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
442		xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
443	} else {
444		amdgpu_virt_fini_data_exchange(adev);
445		xgpu_nv_send_access_requests_with_param(adev,
446					IDH_RAS_POISON,	block, 0, 0);
447		amdgpu_virt_init_data_exchange(adev);
448	}
449}
450
451const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
452	.req_full_gpu	= xgpu_nv_request_full_gpu_access,
453	.rel_full_gpu	= xgpu_nv_release_full_gpu_access,
454	.req_init_data  = xgpu_nv_request_init_data,
455	.reset_gpu = xgpu_nv_request_reset,
456	.wait_reset = NULL,
457	.trans_msg = xgpu_nv_mailbox_trans_msg,
458	.ras_poison_handler = xgpu_nv_ras_poison_handler,
459};
460