1// SPDX-License-Identifier: GPL-2.0-only
2/* Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
4 */
5#include <linux/pci.h>
6#include <linux/delay.h>
7#include <linux/types.h>
8#include <linux/completion.h>
9#include <linux/semaphore.h>
10#include <linux/spinlock.h>
11#include <linux/workqueue.h>
12
13#include "hinic_hw_if.h"
14#include "hinic_hw_mgmt.h"
15#include "hinic_hw_csr.h"
16#include "hinic_hw_dev.h"
17#include "hinic_hw_mbox.h"
18
19#define HINIC_MBOX_INT_DST_FUNC_SHIFT				0
20#define HINIC_MBOX_INT_DST_AEQN_SHIFT				10
21#define HINIC_MBOX_INT_SRC_RESP_AEQN_SHIFT			12
22#define HINIC_MBOX_INT_STAT_DMA_SHIFT				14
23/* The size of data to be sended (unit of 4 bytes) */
24#define HINIC_MBOX_INT_TX_SIZE_SHIFT				20
25/* SO_RO(strong order, relax order) */
26#define HINIC_MBOX_INT_STAT_DMA_SO_RO_SHIFT			25
27#define HINIC_MBOX_INT_WB_EN_SHIFT				28
28
29#define HINIC_MBOX_INT_DST_FUNC_MASK				0x3FF
30#define HINIC_MBOX_INT_DST_AEQN_MASK				0x3
31#define HINIC_MBOX_INT_SRC_RESP_AEQN_MASK			0x3
32#define HINIC_MBOX_INT_STAT_DMA_MASK				0x3F
33#define HINIC_MBOX_INT_TX_SIZE_MASK				0x1F
34#define HINIC_MBOX_INT_STAT_DMA_SO_RO_MASK			0x3
35#define HINIC_MBOX_INT_WB_EN_MASK				0x1
36
37#define HINIC_MBOX_INT_SET(val, field)	\
38			(((val) & HINIC_MBOX_INT_##field##_MASK) << \
39			HINIC_MBOX_INT_##field##_SHIFT)
40
41enum hinic_mbox_tx_status {
42	TX_NOT_DONE = 1,
43};
44
45#define HINIC_MBOX_CTRL_TRIGGER_AEQE_SHIFT			0
46
47/* specifies the issue request for the message data.
48 * 0 - Tx request is done;
49 * 1 - Tx request is in process.
50 */
51#define HINIC_MBOX_CTRL_TX_STATUS_SHIFT				1
52
53#define HINIC_MBOX_CTRL_TRIGGER_AEQE_MASK			0x1
54#define HINIC_MBOX_CTRL_TX_STATUS_MASK				0x1
55
56#define HINIC_MBOX_CTRL_SET(val, field)	\
57			(((val) & HINIC_MBOX_CTRL_##field##_MASK) << \
58			HINIC_MBOX_CTRL_##field##_SHIFT)
59
60#define HINIC_MBOX_HEADER_MSG_LEN_SHIFT				0
61#define HINIC_MBOX_HEADER_MODULE_SHIFT				11
62#define HINIC_MBOX_HEADER_SEG_LEN_SHIFT				16
63#define HINIC_MBOX_HEADER_NO_ACK_SHIFT				22
64#define HINIC_MBOX_HEADER_SEQID_SHIFT				24
65#define HINIC_MBOX_HEADER_LAST_SHIFT				30
66
67/* specifies the mailbox message direction
68 * 0 - send
69 * 1 - receive
70 */
71#define HINIC_MBOX_HEADER_DIRECTION_SHIFT			31
72#define HINIC_MBOX_HEADER_CMD_SHIFT				32
73#define HINIC_MBOX_HEADER_MSG_ID_SHIFT				40
74#define HINIC_MBOX_HEADER_STATUS_SHIFT				48
75#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_SHIFT		54
76
77#define HINIC_MBOX_HEADER_MSG_LEN_MASK				0x7FF
78#define HINIC_MBOX_HEADER_MODULE_MASK				0x1F
79#define HINIC_MBOX_HEADER_SEG_LEN_MASK				0x3F
80#define HINIC_MBOX_HEADER_NO_ACK_MASK				0x1
81#define HINIC_MBOX_HEADER_SEQID_MASK				0x3F
82#define HINIC_MBOX_HEADER_LAST_MASK				0x1
83#define HINIC_MBOX_HEADER_DIRECTION_MASK			0x1
84#define HINIC_MBOX_HEADER_CMD_MASK				0xFF
85#define HINIC_MBOX_HEADER_MSG_ID_MASK				0xFF
86#define HINIC_MBOX_HEADER_STATUS_MASK				0x3F
87#define HINIC_MBOX_HEADER_SRC_GLB_FUNC_IDX_MASK			0x3FF
88
89#define HINIC_MBOX_HEADER_GET(val, field)	\
90			(((val) >> HINIC_MBOX_HEADER_##field##_SHIFT) & \
91			HINIC_MBOX_HEADER_##field##_MASK)
92#define HINIC_MBOX_HEADER_SET(val, field)	\
93			((u64)((val) & HINIC_MBOX_HEADER_##field##_MASK) << \
94			HINIC_MBOX_HEADER_##field##_SHIFT)
95
96#define MBOX_SEGLEN_MASK			\
97		HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEG_LEN_MASK, SEG_LEN)
98
99#define HINIC_MBOX_SEG_LEN			48
100#define HINIC_MBOX_COMP_TIME			8000U
101#define MBOX_MSG_POLLING_TIMEOUT		8000
102
103#define HINIC_MBOX_DATA_SIZE			2040
104
105#define MBOX_MAX_BUF_SZ				2048UL
106#define MBOX_HEADER_SZ				8
107
108#define MBOX_INFO_SZ				4
109
110/* MBOX size is 64B, 8B for mbox_header, 4B reserved */
111#define MBOX_SEG_LEN				48
112#define MBOX_SEG_LEN_ALIGN			4
113#define MBOX_WB_STATUS_LEN			16UL
114
115/* mbox write back status is 16B, only first 4B is used */
116#define MBOX_WB_STATUS_ERRCODE_MASK		0xFFFF
117#define MBOX_WB_STATUS_MASK			0xFF
118#define MBOX_WB_ERROR_CODE_MASK			0xFF00
119#define MBOX_WB_STATUS_FINISHED_SUCCESS		0xFF
120#define MBOX_WB_STATUS_NOT_FINISHED		0x00
121
122#define MBOX_STATUS_FINISHED(wb)	\
123	(((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED)
124#define MBOX_STATUS_SUCCESS(wb)		\
125	(((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS)
126#define MBOX_STATUS_ERRCODE(wb)		\
127	((wb) & MBOX_WB_ERROR_CODE_MASK)
128
129#define SEQ_ID_START_VAL			0
130#define SEQ_ID_MAX_VAL				42
131
132#define NO_DMA_ATTRIBUTE_VAL			0
133
134#define HINIC_MBOX_RSP_AEQN			2
135#define HINIC_MBOX_RECV_AEQN			0
136
137#define MBOX_MSG_NO_DATA_LEN			1
138
139#define MBOX_BODY_FROM_HDR(header)	((u8 *)(header) + MBOX_HEADER_SZ)
140#define MBOX_AREA(hwif)			\
141	((hwif)->cfg_regs_bar + HINIC_FUNC_CSR_MAILBOX_DATA_OFF)
142
143#define IS_PF_OR_PPF_SRC(src_func_idx)	((src_func_idx) < HINIC_MAX_PF_FUNCS)
144
145#define MBOX_MSG_ID_MASK		0xFF
146#define MBOX_MSG_ID(func_to_func)	((func_to_func)->send_msg_id)
147#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
148			(MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK)
149
150#define FUNC_ID_OFF_SET_8B		8
151
152/* max message counter wait to process for one function */
153#define HINIC_MAX_MSG_CNT_TO_PROCESS	10
154
155#define HINIC_QUEUE_MIN_DEPTH		6
156#define HINIC_QUEUE_MAX_DEPTH		12
157#define HINIC_MAX_RX_BUFFER_SIZE		15
158
159enum hinic_hwif_direction_type {
160	HINIC_HWIF_DIRECT_SEND	= 0,
161	HINIC_HWIF_RESPONSE	= 1,
162};
163
164enum mbox_send_mod {
165	MBOX_SEND_MSG_INT,
166};
167
168enum mbox_seg_type {
169	NOT_LAST_SEG,
170	LAST_SEG,
171};
172
173enum mbox_ordering_type {
174	STRONG_ORDER,
175};
176
177enum mbox_write_back_type {
178	WRITE_BACK = 1,
179};
180
181enum mbox_aeq_trig_type {
182	NOT_TRIGGER,
183	TRIGGER,
184};
185
186static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx,
187			  const void *buf_in, u16 in_size, u16 offset)
188{
189	u16 func_idx;
190
191	if (in_size < offset + sizeof(func_idx)) {
192		dev_warn(&hwdev->hwif->pdev->dev,
193			 "Receive mailbox msg len: %d less than %d Bytes is invalid\n",
194			 in_size, offset);
195		return false;
196	}
197
198	func_idx = *((u16 *)((u8 *)buf_in + offset));
199
200	if (src_func_idx != func_idx) {
201		dev_warn(&hwdev->hwif->pdev->dev,
202			 "Receive mailbox function id: 0x%x not equal to msg function id: 0x%x\n",
203			 src_func_idx, func_idx);
204		return false;
205	}
206
207	return true;
208}
209
210bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx,
211				 void *buf_in, u16 in_size)
212{
213	return check_func_id(hwdev, func_idx, buf_in, in_size,
214			     FUNC_ID_OFF_SET_8B);
215}
216
217/**
218 * hinic_register_pf_mbox_cb - register mbox callback for pf
219 * @hwdev: the pointer to hw device
220 * @mod:	specific mod that the callback will handle
221 * @callback:	callback function
222 * Return: 0 - success, negative - failure
223 */
224int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev,
225			      enum hinic_mod_type mod,
226			      hinic_pf_mbox_cb callback)
227{
228	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
229
230	if (mod >= HINIC_MOD_MAX)
231		return -EFAULT;
232
233	func_to_func->pf_mbox_cb[mod] = callback;
234
235	set_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
236
237	return 0;
238}
239
240/**
241 * hinic_register_vf_mbox_cb - register mbox callback for vf
242 * @hwdev: the pointer to hw device
243 * @mod:	specific mod that the callback will handle
244 * @callback:	callback function
245 * Return: 0 - success, negative - failure
246 */
247int hinic_register_vf_mbox_cb(struct hinic_hwdev *hwdev,
248			      enum hinic_mod_type mod,
249			      hinic_vf_mbox_cb callback)
250{
251	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
252
253	if (mod >= HINIC_MOD_MAX)
254		return -EFAULT;
255
256	func_to_func->vf_mbox_cb[mod] = callback;
257
258	set_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
259
260	return 0;
261}
262
263/**
264 * hinic_unregister_pf_mbox_cb - unregister the mbox callback for pf
265 * @hwdev:	the pointer to hw device
266 * @mod:	specific mod that the callback will handle
267 */
268void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
269				 enum hinic_mod_type mod)
270{
271	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
272
273	clear_bit(HINIC_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]);
274
275	while (test_bit(HINIC_PF_MBOX_CB_RUNNING,
276			&func_to_func->pf_mbox_cb_state[mod]))
277		usleep_range(900, 1000);
278
279	func_to_func->pf_mbox_cb[mod] = NULL;
280}
281
282/**
283 * hinic_unregister_vf_mbox_cb - unregister the mbox callback for vf
284 * @hwdev:	the pointer to hw device
285 * @mod:	specific mod that the callback will handle
286 */
287void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
288				 enum hinic_mod_type mod)
289{
290	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
291
292	clear_bit(HINIC_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]);
293
294	while (test_bit(HINIC_VF_MBOX_CB_RUNNING,
295			&func_to_func->vf_mbox_cb_state[mod]))
296		usleep_range(900, 1000);
297
298	func_to_func->vf_mbox_cb[mod] = NULL;
299}
300
301static int recv_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
302				struct hinic_recv_mbox *recv_mbox,
303				void *buf_out, u16 *out_size)
304{
305	hinic_vf_mbox_cb cb;
306	int ret = 0;
307
308	if (recv_mbox->mod >= HINIC_MOD_MAX) {
309		dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
310			recv_mbox->mod);
311		return -EINVAL;
312	}
313
314	set_bit(HINIC_VF_MBOX_CB_RUNNING,
315		&func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
316
317	cb = func_to_func->vf_mbox_cb[recv_mbox->mod];
318	if (cb && test_bit(HINIC_VF_MBOX_CB_REG,
319			   &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) {
320		cb(func_to_func->hwdev, recv_mbox->cmd, recv_mbox->mbox,
321		   recv_mbox->mbox_len, buf_out, out_size);
322	} else {
323		dev_err(&func_to_func->hwif->pdev->dev, "VF mbox cb is not registered\n");
324		ret = -EINVAL;
325	}
326
327	clear_bit(HINIC_VF_MBOX_CB_RUNNING,
328		  &func_to_func->vf_mbox_cb_state[recv_mbox->mod]);
329
330	return ret;
331}
332
333static int
334recv_pf_from_vf_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
335			     struct hinic_recv_mbox *recv_mbox,
336			     u16 src_func_idx, void *buf_out,
337			     u16 *out_size)
338{
339	hinic_pf_mbox_cb cb;
340	u16 vf_id = 0;
341	int ret;
342
343	if (recv_mbox->mod >= HINIC_MOD_MAX) {
344		dev_err(&func_to_func->hwif->pdev->dev, "Receive illegal mbox message, mod = %d\n",
345			recv_mbox->mod);
346		return -EINVAL;
347	}
348
349	set_bit(HINIC_PF_MBOX_CB_RUNNING,
350		&func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
351
352	cb = func_to_func->pf_mbox_cb[recv_mbox->mod];
353	if (cb && test_bit(HINIC_PF_MBOX_CB_REG,
354			   &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) {
355		vf_id = src_func_idx -
356			hinic_glb_pf_vf_offset(func_to_func->hwif);
357		ret = cb(func_to_func->hwdev, vf_id, recv_mbox->cmd,
358			 recv_mbox->mbox, recv_mbox->mbox_len,
359			 buf_out, out_size);
360	} else {
361		dev_err(&func_to_func->hwif->pdev->dev, "PF mbox mod(0x%x) cb is not registered\n",
362			recv_mbox->mod);
363		ret = -EINVAL;
364	}
365
366	clear_bit(HINIC_PF_MBOX_CB_RUNNING,
367		  &func_to_func->pf_mbox_cb_state[recv_mbox->mod]);
368
369	return ret;
370}
371
372static bool check_mbox_seq_id_and_seg_len(struct hinic_recv_mbox *recv_mbox,
373					  u8 seq_id, u8 seg_len)
374{
375	if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN)
376		return false;
377
378	if (seq_id == 0) {
379		recv_mbox->seq_id = seq_id;
380	} else {
381		if (seq_id != recv_mbox->seq_id + 1)
382			return false;
383
384		recv_mbox->seq_id = seq_id;
385	}
386
387	return true;
388}
389
390static void resp_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
391			      struct hinic_recv_mbox *recv_mbox)
392{
393	spin_lock(&func_to_func->mbox_lock);
394	if (recv_mbox->msg_info.msg_id == func_to_func->send_msg_id &&
395	    func_to_func->event_flag == EVENT_START)
396		complete(&recv_mbox->recv_done);
397	else
398		dev_err(&func_to_func->hwif->pdev->dev,
399			"Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n",
400			func_to_func->send_msg_id, recv_mbox->msg_info.msg_id,
401			recv_mbox->msg_info.status);
402	spin_unlock(&func_to_func->mbox_lock);
403}
404
405static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
406				   struct hinic_recv_mbox *recv_mbox,
407				   u16 src_func_idx);
408
409static void recv_func_mbox_work_handler(struct work_struct *work)
410{
411	struct hinic_mbox_work *mbox_work =
412			container_of(work, struct hinic_mbox_work, work);
413	struct hinic_recv_mbox *recv_mbox;
414
415	recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox,
416			       mbox_work->src_func_idx);
417
418	recv_mbox =
419		&mbox_work->func_to_func->mbox_send[mbox_work->src_func_idx];
420
421	atomic_dec(&recv_mbox->msg_cnt);
422
423	kfree(mbox_work);
424}
425
426static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
427			      void *header, struct hinic_recv_mbox *recv_mbox)
428{
429	void *mbox_body = MBOX_BODY_FROM_HDR(header);
430	struct hinic_recv_mbox *rcv_mbox_temp = NULL;
431	u64 mbox_header = *((u64 *)header);
432	struct hinic_mbox_work *mbox_work;
433	u8 seq_id, seg_len;
434	u16 src_func_idx;
435	int pos;
436
437	seq_id = HINIC_MBOX_HEADER_GET(mbox_header, SEQID);
438	seg_len = HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN);
439	src_func_idx = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
440
441	if (!check_mbox_seq_id_and_seg_len(recv_mbox, seq_id, seg_len)) {
442		dev_err(&func_to_func->hwif->pdev->dev,
443			"Mailbox sequence and segment check fail, src func id: 0x%x, front id: 0x%x, current id: 0x%x, seg len: 0x%x\n",
444			src_func_idx, recv_mbox->seq_id, seq_id, seg_len);
445		recv_mbox->seq_id = SEQ_ID_MAX_VAL;
446		return;
447	}
448
449	pos = seq_id * MBOX_SEG_LEN;
450	memcpy((u8 *)recv_mbox->mbox + pos, mbox_body,
451	       HINIC_MBOX_HEADER_GET(mbox_header, SEG_LEN));
452
453	if (!HINIC_MBOX_HEADER_GET(mbox_header, LAST))
454		return;
455
456	recv_mbox->cmd = HINIC_MBOX_HEADER_GET(mbox_header, CMD);
457	recv_mbox->mod = HINIC_MBOX_HEADER_GET(mbox_header, MODULE);
458	recv_mbox->mbox_len = HINIC_MBOX_HEADER_GET(mbox_header, MSG_LEN);
459	recv_mbox->ack_type = HINIC_MBOX_HEADER_GET(mbox_header, NO_ACK);
460	recv_mbox->msg_info.msg_id = HINIC_MBOX_HEADER_GET(mbox_header, MSG_ID);
461	recv_mbox->msg_info.status = HINIC_MBOX_HEADER_GET(mbox_header, STATUS);
462	recv_mbox->seq_id = SEQ_ID_MAX_VAL;
463
464	if (HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION) ==
465	    HINIC_HWIF_RESPONSE) {
466		resp_mbox_handler(func_to_func, recv_mbox);
467		return;
468	}
469
470	if (atomic_read(&recv_mbox->msg_cnt) > HINIC_MAX_MSG_CNT_TO_PROCESS) {
471		dev_warn(&func_to_func->hwif->pdev->dev,
472			 "This function(%u) have %d message wait to process,can't add to work queue\n",
473			 src_func_idx, atomic_read(&recv_mbox->msg_cnt));
474		return;
475	}
476
477	rcv_mbox_temp = kmemdup(recv_mbox, sizeof(*rcv_mbox_temp), GFP_KERNEL);
478	if (!rcv_mbox_temp)
479		return;
480
481	rcv_mbox_temp->mbox = kmemdup(recv_mbox->mbox, MBOX_MAX_BUF_SZ,
482				      GFP_KERNEL);
483	if (!rcv_mbox_temp->mbox)
484		goto err_alloc_rcv_mbox_msg;
485
486	rcv_mbox_temp->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
487	if (!rcv_mbox_temp->buf_out)
488		goto err_alloc_rcv_mbox_buf;
489
490	mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
491	if (!mbox_work)
492		goto err_alloc_mbox_work;
493
494	mbox_work->func_to_func = func_to_func;
495	mbox_work->recv_mbox = rcv_mbox_temp;
496	mbox_work->src_func_idx = src_func_idx;
497
498	atomic_inc(&recv_mbox->msg_cnt);
499	INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler);
500	queue_work(func_to_func->workq, &mbox_work->work);
501
502	return;
503
504err_alloc_mbox_work:
505	kfree(rcv_mbox_temp->buf_out);
506
507err_alloc_rcv_mbox_buf:
508	kfree(rcv_mbox_temp->mbox);
509
510err_alloc_rcv_mbox_msg:
511	kfree(rcv_mbox_temp);
512}
513
514static int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id)
515{
516	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
517	struct hinic_set_random_id rand_info = {0};
518	u16 out_size = sizeof(rand_info);
519	struct hinic_pfhwdev *pfhwdev;
520	int ret;
521
522	pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
523
524	rand_info.version = HINIC_CMD_VER_FUNC_ID;
525	rand_info.func_idx = func_id;
526	rand_info.vf_in_pf = func_id - hinic_glb_pf_vf_offset(hwdev->hwif);
527	rand_info.random_id = get_random_u32();
528
529	func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id;
530
531	ret = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
532				HINIC_MGMT_CMD_SET_VF_RANDOM_ID,
533				&rand_info, sizeof(rand_info),
534				&rand_info, &out_size, HINIC_MGMT_MSG_SYNC);
535	if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED &&
536	     rand_info.status) || !out_size || ret) {
537		dev_err(&hwdev->hwif->pdev->dev, "Set VF random id failed, err: %d, status: 0x%x, out size: 0x%x\n",
538			ret, rand_info.status, out_size);
539		return -EIO;
540	}
541
542	if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED)
543		return rand_info.status;
544
545	func_to_func->vf_mbx_old_rand_id[func_id] =
546				func_to_func->vf_mbx_rand_id[func_id];
547
548	return 0;
549}
550
551static void update_random_id_work_handler(struct work_struct *work)
552{
553	struct hinic_mbox_work *mbox_work =
554			container_of(work, struct hinic_mbox_work, work);
555	struct hinic_mbox_func_to_func *func_to_func;
556	u16 src = mbox_work->src_func_idx;
557
558	func_to_func = mbox_work->func_to_func;
559
560	if (set_vf_mbox_random_id(func_to_func->hwdev, src))
561		dev_warn(&func_to_func->hwdev->hwif->pdev->dev, "Update VF id: 0x%x random id failed\n",
562			 mbox_work->src_func_idx);
563
564	kfree(mbox_work);
565}
566
567static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func,
568				    u8 *header)
569{
570	struct hinic_hwdev *hwdev = func_to_func->hwdev;
571	struct hinic_mbox_work *mbox_work = NULL;
572	u64 mbox_header = *((u64 *)header);
573	u16 offset, src;
574	u32 random_id;
575	int vf_in_pf;
576
577	src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
578
579	if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random)
580		return true;
581
582	if (!HINIC_IS_PPF(hwdev->hwif)) {
583		offset = hinic_glb_pf_vf_offset(hwdev->hwif);
584		vf_in_pf = src - offset;
585
586		if (vf_in_pf < 1 || vf_in_pf > hwdev->nic_cap.max_vf) {
587			dev_warn(&hwdev->hwif->pdev->dev,
588				 "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n",
589				 src, offset + 1,
590				 hwdev->nic_cap.max_vf + offset);
591			return false;
592		}
593	}
594
595	random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN +
596					 MBOX_HEADER_SZ));
597
598	if (random_id == func_to_func->vf_mbx_rand_id[src] ||
599	    random_id == func_to_func->vf_mbx_old_rand_id[src])
600		return true;
601
602	dev_warn(&hwdev->hwif->pdev->dev,
603		 "The mailbox random id(0x%x) of func_id(0x%x) doesn't match with pf reservation(0x%x)\n",
604		 random_id, src, func_to_func->vf_mbx_rand_id[src]);
605
606	mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL);
607	if (!mbox_work)
608		return false;
609
610	mbox_work->func_to_func = func_to_func;
611	mbox_work->src_func_idx = src;
612
613	INIT_WORK(&mbox_work->work, update_random_id_work_handler);
614	queue_work(func_to_func->workq, &mbox_work->work);
615
616	return false;
617}
618
619static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
620{
621	struct hinic_mbox_func_to_func *func_to_func;
622	u64 mbox_header = *((u64 *)header);
623	struct hinic_recv_mbox *recv_mbox;
624	u64 src, dir;
625
626	func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
627
628	dir = HINIC_MBOX_HEADER_GET(mbox_header, DIRECTION);
629	src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX);
630
631	if (src >= HINIC_MAX_FUNCTIONS) {
632		dev_err(&func_to_func->hwif->pdev->dev,
633			"Mailbox source function id:%u is invalid\n", (u32)src);
634		return;
635	}
636
637	if (!check_vf_mbox_random_id(func_to_func, header))
638		return;
639
640	recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ?
641		    &func_to_func->mbox_send[src] :
642		    &func_to_func->mbox_resp[src];
643
644	recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
645}
646
647static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
648{
649	struct hinic_mbox_func_to_func *func_to_func;
650	struct hinic_send_mbox *send_mbox;
651
652	func_to_func = ((struct hinic_hwdev *)handle)->func_to_func;
653	send_mbox = &func_to_func->send_mbox;
654
655	complete(&send_mbox->send_done);
656}
657
658static void clear_mbox_status(struct hinic_send_mbox *mbox)
659{
660	*mbox->wb_status = 0;
661
662	/* clear mailbox write back status */
663	wmb();
664}
665
666static void mbox_copy_header(struct hinic_hwdev *hwdev,
667			     struct hinic_send_mbox *mbox, u64 *header)
668{
669	u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32);
670	u32 *data = (u32 *)header;
671
672	for (i = 0; i < idx_max; i++)
673		__raw_writel(*(data + i), mbox->data + i * sizeof(u32));
674}
675
676static void mbox_copy_send_data(struct hinic_hwdev *hwdev,
677				struct hinic_send_mbox *mbox, void *seg,
678				u16 seg_len)
679{
680	u8 mbox_max_buf[MBOX_SEG_LEN] = {0};
681	u32 data_len, chk_sz = sizeof(u32);
682	u32 *data = seg;
683	u32 i, idx_max;
684
685	/* The mbox message should be aligned in 4 bytes. */
686	if (seg_len % chk_sz) {
687		memcpy(mbox_max_buf, seg, seg_len);
688		data = (u32 *)mbox_max_buf;
689	}
690
691	data_len = seg_len;
692	idx_max = ALIGN(data_len, chk_sz) / chk_sz;
693
694	for (i = 0; i < idx_max; i++)
695		__raw_writel(*(data + i),
696			     mbox->data + MBOX_HEADER_SZ + i * sizeof(u32));
697}
698
699static void write_mbox_msg_attr(struct hinic_mbox_func_to_func *func_to_func,
700				u16 dst_func, u16 dst_aeqn, u16 seg_len,
701				int poll)
702{
703	u16 rsp_aeq = (dst_aeqn == 0) ? 0 : HINIC_MBOX_RSP_AEQN;
704	u32 mbox_int, mbox_ctrl;
705
706	mbox_int = HINIC_MBOX_INT_SET(dst_func, DST_FUNC) |
707		   HINIC_MBOX_INT_SET(dst_aeqn, DST_AEQN) |
708		   HINIC_MBOX_INT_SET(rsp_aeq, SRC_RESP_AEQN) |
709		   HINIC_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) |
710		   HINIC_MBOX_INT_SET(ALIGN(MBOX_SEG_LEN + MBOX_HEADER_SZ +
711				      MBOX_INFO_SZ, MBOX_SEG_LEN_ALIGN) >> 2,
712				      TX_SIZE) |
713		   HINIC_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) |
714		   HINIC_MBOX_INT_SET(WRITE_BACK, WB_EN);
715
716	hinic_hwif_write_reg(func_to_func->hwif,
717			     HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int);
718
719	wmb(); /* writing the mbox int attributes */
720	mbox_ctrl = HINIC_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS);
721
722	if (poll)
723		mbox_ctrl |= HINIC_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE);
724	else
725		mbox_ctrl |= HINIC_MBOX_CTRL_SET(TRIGGER, TRIGGER_AEQE);
726
727	hinic_hwif_write_reg(func_to_func->hwif,
728			     HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl);
729}
730
731static void dump_mox_reg(struct hinic_hwdev *hwdev)
732{
733	u32 val;
734
735	val = hinic_hwif_read_reg(hwdev->hwif,
736				  HINIC_FUNC_CSR_MAILBOX_CONTROL_OFF);
737	dev_err(&hwdev->hwif->pdev->dev, "Mailbox control reg: 0x%x\n", val);
738
739	val = hinic_hwif_read_reg(hwdev->hwif,
740				  HINIC_FUNC_CSR_MAILBOX_INT_OFFSET_OFF);
741	dev_err(&hwdev->hwif->pdev->dev, "Mailbox interrupt offset: 0x%x\n",
742		val);
743}
744
745static u16 get_mbox_status(struct hinic_send_mbox *mbox)
746{
747	/* write back is 16B, but only use first 4B */
748	u64 wb_val = be64_to_cpu(*mbox->wb_status);
749
750	rmb(); /* verify reading before check */
751
752	return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK);
753}
754
755static int
756wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func,
757			     int poll, u16 *wb_status)
758{
759	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
760	struct hinic_hwdev *hwdev = func_to_func->hwdev;
761	struct completion *done = &send_mbox->send_done;
762	u32 cnt = 0;
763	unsigned long jif;
764
765	if (poll) {
766		while (cnt < MBOX_MSG_POLLING_TIMEOUT) {
767			*wb_status = get_mbox_status(send_mbox);
768			if (MBOX_STATUS_FINISHED(*wb_status))
769				break;
770
771			usleep_range(900, 1000);
772			cnt++;
773		}
774
775		if (cnt == MBOX_MSG_POLLING_TIMEOUT) {
776			dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout, wb status: 0x%x\n",
777				*wb_status);
778			dump_mox_reg(hwdev);
779			return -ETIMEDOUT;
780		}
781	} else {
782		jif = msecs_to_jiffies(HINIC_MBOX_COMP_TIME);
783		if (!wait_for_completion_timeout(done, jif)) {
784			dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n");
785			dump_mox_reg(hwdev);
786			hinic_dump_aeq_info(hwdev);
787			return -ETIMEDOUT;
788		}
789
790		*wb_status = get_mbox_status(send_mbox);
791	}
792
793	return 0;
794}
795
796static int send_mbox_seg(struct hinic_mbox_func_to_func *func_to_func,
797			 u64 header, u16 dst_func, void *seg, u16 seg_len,
798			 int poll, void *msg_info)
799{
800	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
801	u16 seq_dir = HINIC_MBOX_HEADER_GET(header, DIRECTION);
802	struct hinic_hwdev *hwdev = func_to_func->hwdev;
803	struct completion *done = &send_mbox->send_done;
804	u8 num_aeqs = hwdev->hwif->attr.num_aeqs;
805	u16 dst_aeqn, wb_status = 0, errcode;
806
807	if (num_aeqs >= 4)
808		dst_aeqn = (seq_dir == HINIC_HWIF_DIRECT_SEND) ?
809			   HINIC_MBOX_RECV_AEQN : HINIC_MBOX_RSP_AEQN;
810	else
811		dst_aeqn = 0;
812
813	if (!poll)
814		init_completion(done);
815
816	clear_mbox_status(send_mbox);
817
818	mbox_copy_header(hwdev, send_mbox, &header);
819
820	mbox_copy_send_data(hwdev, send_mbox, seg, seg_len);
821
822	write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len, poll);
823
824	wmb(); /* writing the mbox msg attributes */
825
826	if (wait_for_mbox_seg_completion(func_to_func, poll, &wb_status))
827		return -ETIMEDOUT;
828
829	if (!MBOX_STATUS_SUCCESS(wb_status)) {
830		dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment to function %d error, wb status: 0x%x\n",
831			dst_func, wb_status);
832		errcode = MBOX_STATUS_ERRCODE(wb_status);
833		return errcode ? errcode : -EFAULT;
834	}
835
836	return 0;
837}
838
839static int send_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
840			     enum hinic_mod_type mod, u16 cmd, void *msg,
841			     u16 msg_len, u16 dst_func,
842			     enum hinic_hwif_direction_type direction,
843			     enum hinic_mbox_ack_type ack_type,
844			     struct mbox_msg_info *msg_info)
845{
846	struct hinic_hwdev *hwdev = func_to_func->hwdev;
847	u16 seg_len = MBOX_SEG_LEN;
848	u8 *msg_seg = (u8 *)msg;
849	u16 left = msg_len;
850	u32 seq_id = 0;
851	u64 header = 0;
852	int err = 0;
853
854	down(&func_to_func->msg_send_sem);
855
856	header = HINIC_MBOX_HEADER_SET(msg_len, MSG_LEN) |
857		 HINIC_MBOX_HEADER_SET(mod, MODULE) |
858		 HINIC_MBOX_HEADER_SET(seg_len, SEG_LEN) |
859		 HINIC_MBOX_HEADER_SET(ack_type, NO_ACK) |
860		 HINIC_MBOX_HEADER_SET(SEQ_ID_START_VAL, SEQID) |
861		 HINIC_MBOX_HEADER_SET(NOT_LAST_SEG, LAST) |
862		 HINIC_MBOX_HEADER_SET(direction, DIRECTION) |
863		 HINIC_MBOX_HEADER_SET(cmd, CMD) |
864		 /* The vf's offset to it's associated pf */
865		 HINIC_MBOX_HEADER_SET(msg_info->msg_id, MSG_ID) |
866		 HINIC_MBOX_HEADER_SET(msg_info->status, STATUS) |
867		 HINIC_MBOX_HEADER_SET(hinic_global_func_id_hw(hwdev->hwif),
868				       SRC_GLB_FUNC_IDX);
869
870	while (!(HINIC_MBOX_HEADER_GET(header, LAST))) {
871		if (left <= HINIC_MBOX_SEG_LEN) {
872			header &= ~MBOX_SEGLEN_MASK;
873			header |= HINIC_MBOX_HEADER_SET(left, SEG_LEN);
874			header |= HINIC_MBOX_HEADER_SET(LAST_SEG, LAST);
875
876			seg_len = left;
877		}
878
879		err = send_mbox_seg(func_to_func, header, dst_func, msg_seg,
880				    seg_len, MBOX_SEND_MSG_INT, msg_info);
881		if (err) {
882			dev_err(&hwdev->hwif->pdev->dev, "Failed to send mbox seg, seq_id=0x%llx\n",
883				HINIC_MBOX_HEADER_GET(header, SEQID));
884			goto err_send_mbox_seg;
885		}
886
887		left -= HINIC_MBOX_SEG_LEN;
888		msg_seg += HINIC_MBOX_SEG_LEN;
889
890		seq_id++;
891		header &= ~(HINIC_MBOX_HEADER_SET(HINIC_MBOX_HEADER_SEQID_MASK,
892						  SEQID));
893		header |= HINIC_MBOX_HEADER_SET(seq_id, SEQID);
894	}
895
896err_send_mbox_seg:
897	up(&func_to_func->msg_send_sem);
898
899	return err;
900}
901
902static void
903response_for_recv_func_mbox(struct hinic_mbox_func_to_func *func_to_func,
904			    struct hinic_recv_mbox *recv_mbox, int err,
905			    u16 out_size, u16 src_func_idx)
906{
907	struct mbox_msg_info msg_info = {0};
908
909	if (recv_mbox->ack_type == MBOX_ACK) {
910		msg_info.msg_id = recv_mbox->msg_info.msg_id;
911		if (err == HINIC_MBOX_PF_BUSY_ACTIVE_FW)
912			msg_info.status = HINIC_MBOX_PF_BUSY_ACTIVE_FW;
913		else if (err == HINIC_MBOX_VF_CMD_ERROR)
914			msg_info.status = HINIC_MBOX_VF_CMD_ERROR;
915		else if (err)
916			msg_info.status = HINIC_MBOX_PF_SEND_ERR;
917
918		/* if no data needs to response, set out_size to 1 */
919		if (!out_size || err)
920			out_size = MBOX_MSG_NO_DATA_LEN;
921
922		send_mbox_to_func(func_to_func, recv_mbox->mod, recv_mbox->cmd,
923				  recv_mbox->buf_out, out_size, src_func_idx,
924				  HINIC_HWIF_RESPONSE, MBOX_ACK,
925				  &msg_info);
926	}
927}
928
929static void recv_func_mbox_handler(struct hinic_mbox_func_to_func *func_to_func,
930				   struct hinic_recv_mbox *recv_mbox,
931				   u16 src_func_idx)
932{
933	void *buf_out = recv_mbox->buf_out;
934	u16 out_size = MBOX_MAX_BUF_SZ;
935	int err = 0;
936
937	if (HINIC_IS_VF(func_to_func->hwif)) {
938		err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out,
939					   &out_size);
940	} else {
941		if (IS_PF_OR_PPF_SRC(src_func_idx))
942			dev_warn(&func_to_func->hwif->pdev->dev,
943				 "Unsupported pf2pf mbox msg\n");
944		else
945			err = recv_pf_from_vf_mbox_handler(func_to_func,
946							   recv_mbox,
947							   src_func_idx,
948							   buf_out, &out_size);
949	}
950
951	response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size,
952				    src_func_idx);
953	kfree(recv_mbox->buf_out);
954	kfree(recv_mbox->mbox);
955	kfree(recv_mbox);
956}
957
958static void set_mbox_to_func_event(struct hinic_mbox_func_to_func *func_to_func,
959				   enum mbox_event_state event_flag)
960{
961	spin_lock(&func_to_func->mbox_lock);
962	func_to_func->event_flag = event_flag;
963	spin_unlock(&func_to_func->mbox_lock);
964}
965
966static int mbox_resp_info_handler(struct hinic_mbox_func_to_func *func_to_func,
967				  struct hinic_recv_mbox *mbox_for_resp,
968				  enum hinic_mod_type mod, u16 cmd,
969				  void *buf_out, u16 *out_size)
970{
971	int err;
972
973	if (mbox_for_resp->msg_info.status) {
974		err = mbox_for_resp->msg_info.status;
975		if (err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
976			dev_err(&func_to_func->hwif->pdev->dev, "Mbox response error(0x%x)\n",
977				mbox_for_resp->msg_info.status);
978		return err;
979	}
980
981	if (buf_out && out_size) {
982		if (*out_size < mbox_for_resp->mbox_len) {
983			dev_err(&func_to_func->hwif->pdev->dev,
984				"Invalid response mbox message length: %d for mod %d cmd %d, should less than: %d\n",
985				mbox_for_resp->mbox_len, mod, cmd, *out_size);
986			return -EFAULT;
987		}
988
989		if (mbox_for_resp->mbox_len)
990			memcpy(buf_out, mbox_for_resp->mbox,
991			       mbox_for_resp->mbox_len);
992
993		*out_size = mbox_for_resp->mbox_len;
994	}
995
996	return 0;
997}
998
999int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func,
1000		       enum hinic_mod_type mod, u16 cmd, u16 dst_func,
1001		       void *buf_in, u16 in_size, void *buf_out,
1002		       u16 *out_size, u32 timeout)
1003{
1004	struct hinic_recv_mbox *mbox_for_resp;
1005	struct mbox_msg_info msg_info = {0};
1006	unsigned long timeo;
1007	int err;
1008
1009	mbox_for_resp = &func_to_func->mbox_resp[dst_func];
1010
1011	down(&func_to_func->mbox_send_sem);
1012
1013	init_completion(&mbox_for_resp->recv_done);
1014
1015	msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func);
1016
1017	set_mbox_to_func_event(func_to_func, EVENT_START);
1018
1019	err = send_mbox_to_func(func_to_func, mod, cmd, buf_in, in_size,
1020				dst_func, HINIC_HWIF_DIRECT_SEND, MBOX_ACK,
1021				&msg_info);
1022	if (err) {
1023		dev_err(&func_to_func->hwif->pdev->dev, "Send mailbox failed, msg_id: %d\n",
1024			msg_info.msg_id);
1025		set_mbox_to_func_event(func_to_func, EVENT_FAIL);
1026		goto err_send_mbox;
1027	}
1028
1029	timeo = msecs_to_jiffies(timeout ? timeout : HINIC_MBOX_COMP_TIME);
1030	if (!wait_for_completion_timeout(&mbox_for_resp->recv_done, timeo)) {
1031		set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT);
1032		dev_err(&func_to_func->hwif->pdev->dev,
1033			"Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id);
1034		hinic_dump_aeq_info(func_to_func->hwdev);
1035		err = -ETIMEDOUT;
1036		goto err_send_mbox;
1037	}
1038
1039	set_mbox_to_func_event(func_to_func, EVENT_END);
1040
1041	err = mbox_resp_info_handler(func_to_func, mbox_for_resp, mod, cmd,
1042				     buf_out, out_size);
1043
1044err_send_mbox:
1045	up(&func_to_func->mbox_send_sem);
1046
1047	return err;
1048}
1049
1050static int mbox_func_params_valid(struct hinic_mbox_func_to_func *func_to_func,
1051				  void *buf_in, u16 in_size)
1052{
1053	if (in_size > HINIC_MBOX_DATA_SIZE) {
1054		dev_err(&func_to_func->hwif->pdev->dev,
1055			"Mbox msg len(%d) exceed limit(%d)\n",
1056			in_size, HINIC_MBOX_DATA_SIZE);
1057		return -EINVAL;
1058	}
1059
1060	return 0;
1061}
1062
1063int hinic_mbox_to_pf(struct hinic_hwdev *hwdev,
1064		     enum hinic_mod_type mod, u8 cmd, void *buf_in,
1065		     u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
1066{
1067	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
1068	int err = mbox_func_params_valid(func_to_func, buf_in, in_size);
1069
1070	if (err)
1071		return err;
1072
1073	if (!HINIC_IS_VF(hwdev->hwif)) {
1074		dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
1075			HINIC_FUNC_TYPE(hwdev->hwif));
1076		return -EINVAL;
1077	}
1078
1079	return hinic_mbox_to_func(func_to_func, mod, cmd,
1080				  hinic_pf_id_of_vf_hw(hwdev->hwif), buf_in,
1081				  in_size, buf_out, out_size, timeout);
1082}
1083
1084int hinic_mbox_to_vf(struct hinic_hwdev *hwdev,
1085		     enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in,
1086		     u16 in_size, void *buf_out, u16 *out_size, u32 timeout)
1087{
1088	struct hinic_mbox_func_to_func *func_to_func;
1089	u16 dst_func_idx;
1090	int err;
1091
1092	if (!hwdev)
1093		return -EINVAL;
1094
1095	func_to_func = hwdev->func_to_func;
1096	err = mbox_func_params_valid(func_to_func, buf_in, in_size);
1097	if (err)
1098		return err;
1099
1100	if (HINIC_IS_VF(hwdev->hwif)) {
1101		dev_err(&hwdev->hwif->pdev->dev, "Params error, func_type: %d\n",
1102			HINIC_FUNC_TYPE(hwdev->hwif));
1103		return -EINVAL;
1104	}
1105
1106	if (!vf_id) {
1107		dev_err(&hwdev->hwif->pdev->dev,
1108			"VF id(%d) error!\n", vf_id);
1109		return -EINVAL;
1110	}
1111
1112	/* vf_offset_to_pf + vf_id is the vf's global function id of vf in
1113	 * this pf
1114	 */
1115	dst_func_idx = hinic_glb_pf_vf_offset(hwdev->hwif) + vf_id;
1116
1117	return hinic_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in,
1118				  in_size, buf_out, out_size, timeout);
1119}
1120
1121static int init_mbox_info(struct hinic_recv_mbox *mbox_info)
1122{
1123	int err;
1124
1125	mbox_info->seq_id = SEQ_ID_MAX_VAL;
1126
1127	mbox_info->mbox = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
1128	if (!mbox_info->mbox)
1129		return -ENOMEM;
1130
1131	mbox_info->buf_out = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL);
1132	if (!mbox_info->buf_out) {
1133		err = -ENOMEM;
1134		goto err_alloc_buf_out;
1135	}
1136
1137	atomic_set(&mbox_info->msg_cnt, 0);
1138
1139	return 0;
1140
1141err_alloc_buf_out:
1142	kfree(mbox_info->mbox);
1143
1144	return err;
1145}
1146
1147static void clean_mbox_info(struct hinic_recv_mbox *mbox_info)
1148{
1149	kfree(mbox_info->buf_out);
1150	kfree(mbox_info->mbox);
1151}
1152
1153static int alloc_mbox_info(struct hinic_hwdev *hwdev,
1154			   struct hinic_recv_mbox *mbox_info)
1155{
1156	u16 func_idx, i;
1157	int err;
1158
1159	for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++) {
1160		err = init_mbox_info(&mbox_info[func_idx]);
1161		if (err) {
1162			dev_err(&hwdev->hwif->pdev->dev, "Failed to init function %d mbox info\n",
1163				func_idx);
1164			goto err_init_mbox_info;
1165		}
1166	}
1167
1168	return 0;
1169
1170err_init_mbox_info:
1171	for (i = 0; i < func_idx; i++)
1172		clean_mbox_info(&mbox_info[i]);
1173
1174	return err;
1175}
1176
1177static void free_mbox_info(struct hinic_recv_mbox *mbox_info)
1178{
1179	u16 func_idx;
1180
1181	for (func_idx = 0; func_idx < HINIC_MAX_FUNCTIONS; func_idx++)
1182		clean_mbox_info(&mbox_info[func_idx]);
1183}
1184
1185static void prepare_send_mbox(struct hinic_mbox_func_to_func *func_to_func)
1186{
1187	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1188
1189	send_mbox->data = MBOX_AREA(func_to_func->hwif);
1190}
1191
1192static int alloc_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
1193{
1194	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1195	struct hinic_hwdev *hwdev = func_to_func->hwdev;
1196	u32 addr_h, addr_l;
1197
1198	send_mbox->wb_vaddr = dma_alloc_coherent(&hwdev->hwif->pdev->dev,
1199						 MBOX_WB_STATUS_LEN,
1200						 &send_mbox->wb_paddr,
1201						 GFP_KERNEL);
1202	if (!send_mbox->wb_vaddr)
1203		return -ENOMEM;
1204
1205	send_mbox->wb_status = send_mbox->wb_vaddr;
1206
1207	addr_h = upper_32_bits(send_mbox->wb_paddr);
1208	addr_l = lower_32_bits(send_mbox->wb_paddr);
1209
1210	hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
1211			     addr_h);
1212	hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
1213			     addr_l);
1214
1215	return 0;
1216}
1217
1218static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func)
1219{
1220	struct hinic_send_mbox *send_mbox = &func_to_func->send_mbox;
1221	struct hinic_hwdev *hwdev = func_to_func->hwdev;
1222
1223	hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF,
1224			     0);
1225	hinic_hwif_write_reg(hwdev->hwif, HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF,
1226			     0);
1227
1228	dma_free_coherent(&hwdev->hwif->pdev->dev, MBOX_WB_STATUS_LEN,
1229			  send_mbox->wb_vaddr,
1230			  send_mbox->wb_paddr);
1231}
1232
1233bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev,
1234				struct vf_cmd_check_handle *cmd_handle,
1235				u16 vf_id, u8 cmd, void *buf_in,
1236				u16 in_size, u8 size)
1237{
1238	u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev->hwif);
1239	int i;
1240
1241	for (i = 0; i < size; i++) {
1242		if (cmd == cmd_handle[i].cmd) {
1243			if (cmd_handle[i].check_cmd)
1244				return cmd_handle[i].check_cmd(hwdev, src_idx,
1245							       buf_in, in_size);
1246			else
1247				return true;
1248		}
1249	}
1250
1251	dev_err(&hwdev->hwif->pdev->dev,
1252		"PF Receive VF(%d) unsupported cmd(0x%x)\n",
1253		vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd);
1254
1255	return false;
1256}
1257
1258static bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev,
1259				     struct hinic_cmdq_ctxt *cmdq_ctxt)
1260{
1261	struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
1262	u64 curr_pg_pfn, wq_block_pfn;
1263
1264	if (cmdq_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif) ||
1265	    cmdq_ctxt->cmdq_type > HINIC_MAX_CMDQ_TYPES)
1266		return false;
1267
1268	curr_pg_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_GET
1269		(ctxt_info->curr_wqe_page_pfn, CURR_WQE_PAGE_PFN);
1270	wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_GET
1271		(ctxt_info->wq_block_pfn, WQ_BLOCK_PFN);
1272	/* VF must use 0-level CLA */
1273	if (curr_pg_pfn != wq_block_pfn)
1274		return false;
1275
1276	return true;
1277}
1278
1279static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx,
1280			    void *buf_in, u16 in_size)
1281{
1282	if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
1283		return false;
1284
1285	return hinic_cmdq_check_vf_ctxt(hwdev, buf_in);
1286}
1287
1288#define HW_CTX_QPS_VALID(hw_ctxt)   \
1289		((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH &&	\
1290		(hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH &&	\
1291		(hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH &&	\
1292		(hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH &&	\
1293		(hw_ctxt)->rx_buf_sz_idx <= HINIC_MAX_RX_BUFFER_SIZE)
1294
1295static bool hw_ctxt_qps_param_valid(struct hinic_cmd_hw_ioctxt *hw_ctxt)
1296{
1297	if (HW_CTX_QPS_VALID(hw_ctxt))
1298		return true;
1299
1300	if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth &&
1301	    !hw_ctxt->rx_buf_sz_idx)
1302		return true;
1303
1304	return false;
1305}
1306
1307static bool check_hwctxt(struct hinic_hwdev *hwdev, u16 func_idx,
1308			 void *buf_in, u16 in_size)
1309{
1310	struct hinic_cmd_hw_ioctxt *hw_ctxt = buf_in;
1311
1312	if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
1313		return false;
1314
1315	if (hw_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
1316		return false;
1317
1318	if (hw_ctxt->set_cmdq_depth) {
1319		if (hw_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH &&
1320		    hw_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH)
1321			return true;
1322
1323		return false;
1324	}
1325
1326	return hw_ctxt_qps_param_valid(hw_ctxt);
1327}
1328
1329static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx,
1330				   void *buf_in, u16 in_size)
1331{
1332	struct hinic_wq_page_size *page_size_info = buf_in;
1333
1334	if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size))
1335		return false;
1336
1337	if (page_size_info->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif))
1338		return false;
1339
1340	if (((1U << page_size_info->page_size) * SZ_4K) !=
1341	    HINIC_DEFAULT_WQ_PAGE_SIZE)
1342		return false;
1343
1344	return true;
1345}
1346
1347static struct vf_cmd_check_handle hw_cmd_support_vf[] = {
1348	{HINIC_COMM_CMD_START_FLR, hinic_mbox_check_func_id_8B},
1349	{HINIC_COMM_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B},
1350	{HINIC_COMM_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt},
1351	{HINIC_COMM_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt},
1352	{HINIC_COMM_CMD_HWCTXT_SET, check_hwctxt},
1353	{HINIC_COMM_CMD_HWCTXT_GET, check_hwctxt},
1354	{HINIC_COMM_CMD_SQ_HI_CI_SET, hinic_mbox_check_func_id_8B},
1355	{HINIC_COMM_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B},
1356	{HINIC_COMM_CMD_IO_RES_CLEAR, hinic_mbox_check_func_id_8B},
1357	{HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
1358	{HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B},
1359	{HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B},
1360	{HINIC_COMM_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B},
1361	{HINIC_COMM_CMD_PAGESIZE_SET, check_set_wq_page_size},
1362};
1363
1364static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in,
1365				u16 in_size, void *buf_out, u16 *out_size)
1366{
1367	u8 size = ARRAY_SIZE(hw_cmd_support_vf);
1368	struct hinic_hwdev *hwdev = handle;
1369	struct hinic_pfhwdev *pfhwdev;
1370	int err = 0;
1371
1372	pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
1373
1374	if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd,
1375					buf_in, in_size, size)) {
1376		dev_err(&hwdev->hwif->pdev->dev,
1377			"PF Receive VF: %d common cmd: 0x%x or mbox len: 0x%x is invalid\n",
1378			vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd,
1379			in_size);
1380		return HINIC_MBOX_VF_CMD_ERROR;
1381	}
1382
1383	if (cmd == HINIC_COMM_CMD_START_FLR) {
1384		*out_size = 0;
1385	} else {
1386		err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
1387					cmd, buf_in, in_size, buf_out, out_size,
1388					HINIC_MGMT_MSG_SYNC);
1389		if (err && err != HINIC_MBOX_PF_BUSY_ACTIVE_FW)
1390			dev_err(&hwdev->hwif->pdev->dev,
1391				"PF mbox common callback handler err: %d\n",
1392				err);
1393	}
1394
1395	return err;
1396}
1397
1398int hinic_func_to_func_init(struct hinic_hwdev *hwdev)
1399{
1400	struct hinic_mbox_func_to_func *func_to_func;
1401	struct hinic_pfhwdev *pfhwdev;
1402	int err;
1403
1404	pfhwdev =  container_of(hwdev, struct hinic_pfhwdev, hwdev);
1405	func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL);
1406	if (!func_to_func)
1407		return -ENOMEM;
1408
1409	hwdev->func_to_func = func_to_func;
1410	func_to_func->hwdev = hwdev;
1411	func_to_func->hwif = hwdev->hwif;
1412	sema_init(&func_to_func->mbox_send_sem, 1);
1413	sema_init(&func_to_func->msg_send_sem, 1);
1414	spin_lock_init(&func_to_func->mbox_lock);
1415	func_to_func->workq = create_singlethread_workqueue(HINIC_MBOX_WQ_NAME);
1416	if (!func_to_func->workq) {
1417		dev_err(&hwdev->hwif->pdev->dev, "Failed to initialize MBOX workqueue\n");
1418		err = -ENOMEM;
1419		goto err_create_mbox_workq;
1420	}
1421
1422	err = alloc_mbox_info(hwdev, func_to_func->mbox_send);
1423	if (err) {
1424		dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_active\n");
1425		goto err_alloc_mbox_for_send;
1426	}
1427
1428	err = alloc_mbox_info(hwdev, func_to_func->mbox_resp);
1429	if (err) {
1430		dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mem for mbox_passive\n");
1431		goto err_alloc_mbox_for_resp;
1432	}
1433
1434	err = alloc_mbox_wb_status(func_to_func);
1435	if (err) {
1436		dev_err(&hwdev->hwif->pdev->dev, "Failed to alloc mbox write back status\n");
1437		goto err_alloc_wb_status;
1438	}
1439
1440	prepare_send_mbox(func_to_func);
1441
1442	hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC,
1443				 &pfhwdev->hwdev, hinic_mbox_func_aeqe_handler);
1444	hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT,
1445				 &pfhwdev->hwdev, hinic_mbox_self_aeqe_handler);
1446
1447	if (!HINIC_IS_VF(hwdev->hwif))
1448		hinic_register_pf_mbox_cb(hwdev, HINIC_MOD_COMM,
1449					  comm_pf_mbox_handler);
1450
1451	return 0;
1452
1453err_alloc_wb_status:
1454	free_mbox_info(func_to_func->mbox_resp);
1455
1456err_alloc_mbox_for_resp:
1457	free_mbox_info(func_to_func->mbox_send);
1458
1459err_alloc_mbox_for_send:
1460	destroy_workqueue(func_to_func->workq);
1461
1462err_create_mbox_workq:
1463	kfree(func_to_func);
1464
1465	return err;
1466}
1467
1468void hinic_func_to_func_free(struct hinic_hwdev *hwdev)
1469{
1470	struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func;
1471
1472	hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_FROM_FUNC);
1473	hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MBX_SEND_RSLT);
1474
1475	hinic_unregister_pf_mbox_cb(hwdev, HINIC_MOD_COMM);
1476	/* destroy workqueue before free related mbox resources in case of
1477	 * illegal resource access
1478	 */
1479	destroy_workqueue(func_to_func->workq);
1480
1481	free_mbox_wb_status(func_to_func);
1482	free_mbox_info(func_to_func->mbox_resp);
1483	free_mbox_info(func_to_func->mbox_send);
1484
1485	kfree(func_to_func);
1486}
1487
1488int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev)
1489{
1490	u16 vf_offset;
1491	u8 vf_in_pf;
1492	int err = 0;
1493
1494	if (HINIC_IS_VF(hwdev->hwif))
1495		return 0;
1496
1497	vf_offset = hinic_glb_pf_vf_offset(hwdev->hwif);
1498
1499	for (vf_in_pf = 1; vf_in_pf <= hwdev->nic_cap.max_vf; vf_in_pf++) {
1500		err = set_vf_mbox_random_id(hwdev, vf_offset + vf_in_pf);
1501		if (err)
1502			break;
1503	}
1504
1505	if (err == HINIC_MGMT_CMD_UNSUPPORTED) {
1506		hwdev->func_to_func->support_vf_random = false;
1507		err = 0;
1508		dev_warn(&hwdev->hwif->pdev->dev, "Mgmt is unsupported to set VF%d random id\n",
1509			 vf_in_pf - 1);
1510	} else if (!err) {
1511		hwdev->func_to_func->support_vf_random = true;
1512	}
1513
1514	return err;
1515}
1516