• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/net/benet/
1/*
2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
19#include "be_cmds.h"
20
21static void be_mcc_notify(struct be_adapter *adapter)
22{
23	struct be_queue_info *mccq = &adapter->mcc_obj.q;
24	u32 val = 0;
25
26	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
29	wmb();
30	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
31}
32
33/* To check if valid bit is set, check the entire word as we don't know
34 * the endianness of the data (old entry is host endian while a new entry is
35 * little endian) */
36static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
37{
38	if (compl->flags != 0) {
39		compl->flags = le32_to_cpu(compl->flags);
40		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
41		return true;
42	} else {
43		return false;
44	}
45}
46
47/* Need to reset the entire word that houses the valid bit */
48static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
49{
50	compl->flags = 0;
51}
52
53static int be_mcc_compl_process(struct be_adapter *adapter,
54	struct be_mcc_compl *compl)
55{
56	u16 compl_status, extd_status;
57
58	/* Just swap the status to host endian; mcc tag is opaquely copied
59	 * from mcc_wrb */
60	be_dws_le_to_cpu(compl, 4);
61
62	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
63				CQE_STATUS_COMPL_MASK;
64
65	if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
66		(compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
67		adapter->flash_status = compl_status;
68		complete(&adapter->flash_compl);
69	}
70
71	if (compl_status == MCC_STATUS_SUCCESS) {
72		if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
73			struct be_cmd_resp_get_stats *resp =
74						adapter->stats.cmd.va;
75			be_dws_le_to_cpu(&resp->hw_stats,
76						sizeof(resp->hw_stats));
77			netdev_stats_update(adapter);
78			adapter->stats_ioctl_sent = false;
79		}
80	} else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81		   (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
82		extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
83				CQE_STATUS_EXTD_MASK;
84		dev_warn(&adapter->pdev->dev,
85		"Error in cmd completion - opcode %d, compl %d, extd %d\n",
86			compl->tag0, compl_status, extd_status);
87	}
88	return compl_status;
89}
90
91/* Link state evt is a string of bytes; no need for endian swapping */
92static void be_async_link_state_process(struct be_adapter *adapter,
93		struct be_async_event_link_state *evt)
94{
95	be_link_status_update(adapter,
96		evt->port_link_status == ASYNC_EVENT_LINK_UP);
97}
98
99static inline bool is_link_state_evt(u32 trailer)
100{
101	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
102		ASYNC_TRAILER_EVENT_CODE_MASK) ==
103				ASYNC_EVENT_CODE_LINK_STATE);
104}
105
106static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
107{
108	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
109	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
110
111	if (be_mcc_compl_is_new(compl)) {
112		queue_tail_inc(mcc_cq);
113		return compl;
114	}
115	return NULL;
116}
117
118void be_async_mcc_enable(struct be_adapter *adapter)
119{
120	spin_lock_bh(&adapter->mcc_cq_lock);
121
122	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
123	adapter->mcc_obj.rearm_cq = true;
124
125	spin_unlock_bh(&adapter->mcc_cq_lock);
126}
127
128void be_async_mcc_disable(struct be_adapter *adapter)
129{
130	adapter->mcc_obj.rearm_cq = false;
131}
132
133int be_process_mcc(struct be_adapter *adapter, int *status)
134{
135	struct be_mcc_compl *compl;
136	int num = 0;
137	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
138
139	spin_lock_bh(&adapter->mcc_cq_lock);
140	while ((compl = be_mcc_compl_get(adapter))) {
141		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
142			/* Interpret flags as an async trailer */
143			if (is_link_state_evt(compl->flags))
144				be_async_link_state_process(adapter,
145				(struct be_async_event_link_state *) compl);
146		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
147				*status = be_mcc_compl_process(adapter, compl);
148				atomic_dec(&mcc_obj->q.used);
149		}
150		be_mcc_compl_use(compl);
151		num++;
152	}
153
154	spin_unlock_bh(&adapter->mcc_cq_lock);
155	return num;
156}
157
158/* Wait till no more pending mcc requests are present */
159static int be_mcc_wait_compl(struct be_adapter *adapter)
160{
161#define mcc_timeout		120000 /* 12s timeout */
162	int i, num, status = 0;
163	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
164
165	for (i = 0; i < mcc_timeout; i++) {
166		num = be_process_mcc(adapter, &status);
167		if (num)
168			be_cq_notify(adapter, mcc_obj->cq.id,
169				mcc_obj->rearm_cq, num);
170
171		if (atomic_read(&mcc_obj->q.used) == 0)
172			break;
173		udelay(100);
174	}
175	if (i == mcc_timeout) {
176		dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
177		return -1;
178	}
179	return status;
180}
181
182/* Notify MCC requests and wait for completion */
183static int be_mcc_notify_wait(struct be_adapter *adapter)
184{
185	be_mcc_notify(adapter);
186	return be_mcc_wait_compl(adapter);
187}
188
189static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
190{
191	int msecs = 0;
192	u32 ready;
193
194	do {
195		ready = ioread32(db);
196		if (ready == 0xffffffff) {
197			dev_err(&adapter->pdev->dev,
198				"pci slot disconnected\n");
199			return -1;
200		}
201
202		ready &= MPU_MAILBOX_DB_RDY_MASK;
203		if (ready)
204			break;
205
206		if (msecs > 4000) {
207			dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
208			be_detect_dump_ue(adapter);
209			return -1;
210		}
211
212		set_current_state(TASK_INTERRUPTIBLE);
213		schedule_timeout(msecs_to_jiffies(1));
214		msecs++;
215	} while (true);
216
217	return 0;
218}
219
220/*
221 * Insert the mailbox address into the doorbell in two steps
222 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
223 */
224static int be_mbox_notify_wait(struct be_adapter *adapter)
225{
226	int status;
227	u32 val = 0;
228	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
229	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
230	struct be_mcc_mailbox *mbox = mbox_mem->va;
231	struct be_mcc_compl *compl = &mbox->compl;
232
233	/* wait for ready to be set */
234	status = be_mbox_db_ready_wait(adapter, db);
235	if (status != 0)
236		return status;
237
238	val |= MPU_MAILBOX_DB_HI_MASK;
239	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
240	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
241	iowrite32(val, db);
242
243	/* wait for ready to be set */
244	status = be_mbox_db_ready_wait(adapter, db);
245	if (status != 0)
246		return status;
247
248	val = 0;
249	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
250	val |= (u32)(mbox_mem->dma >> 4) << 2;
251	iowrite32(val, db);
252
253	status = be_mbox_db_ready_wait(adapter, db);
254	if (status != 0)
255		return status;
256
257	/* A cq entry has been made now */
258	if (be_mcc_compl_is_new(compl)) {
259		status = be_mcc_compl_process(adapter, &mbox->compl);
260		be_mcc_compl_use(compl);
261		if (status)
262			return status;
263	} else {
264		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
265		return -1;
266	}
267	return 0;
268}
269
270static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
271{
272	u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
273
274	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
275	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
276		return -1;
277	else
278		return 0;
279}
280
281int be_cmd_POST(struct be_adapter *adapter)
282{
283	u16 stage;
284	int status, timeout = 0;
285
286	do {
287		status = be_POST_stage_get(adapter, &stage);
288		if (status) {
289			dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
290				stage);
291			return -1;
292		} else if (stage != POST_STAGE_ARMFW_RDY) {
293			set_current_state(TASK_INTERRUPTIBLE);
294			schedule_timeout(2 * HZ);
295			timeout += 2;
296		} else {
297			return 0;
298		}
299	} while (timeout < 40);
300
301	dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
302	return -1;
303}
304
305static inline void *embedded_payload(struct be_mcc_wrb *wrb)
306{
307	return wrb->payload.embedded_payload;
308}
309
310static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
311{
312	return &wrb->payload.sgl[0];
313}
314
315/* Don't touch the hdr after it's prepared */
316static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
317				bool embedded, u8 sge_cnt, u32 opcode)
318{
319	if (embedded)
320		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
321	else
322		wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
323				MCC_WRB_SGE_CNT_SHIFT;
324	wrb->payload_length = payload_len;
325	wrb->tag0 = opcode;
326	be_dws_cpu_to_le(wrb, 8);
327}
328
329/* Don't touch the hdr after it's prepared */
330static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
331				u8 subsystem, u8 opcode, int cmd_len)
332{
333	req_hdr->opcode = opcode;
334	req_hdr->subsystem = subsystem;
335	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
336	req_hdr->version = 0;
337}
338
339static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
340			struct be_dma_mem *mem)
341{
342	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
343	u64 dma = (u64)mem->dma;
344
345	for (i = 0; i < buf_pages; i++) {
346		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
347		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
348		dma += PAGE_SIZE_4K;
349	}
350}
351
352/* Converts interrupt delay in microseconds to multiplier value */
353static u32 eq_delay_to_mult(u32 usec_delay)
354{
355#define MAX_INTR_RATE			651042
356	const u32 round = 10;
357	u32 multiplier;
358
359	if (usec_delay == 0)
360		multiplier = 0;
361	else {
362		u32 interrupt_rate = 1000000 / usec_delay;
363		/* Max delay, corresponding to the lowest interrupt rate */
364		if (interrupt_rate == 0)
365			multiplier = 1023;
366		else {
367			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
368			multiplier /= interrupt_rate;
369			/* Round the multiplier to the closest value.*/
370			multiplier = (multiplier + round/2) / round;
371			multiplier = min(multiplier, (u32)1023);
372		}
373	}
374	return multiplier;
375}
376
377static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
378{
379	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
380	struct be_mcc_wrb *wrb
381		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
382	memset(wrb, 0, sizeof(*wrb));
383	return wrb;
384}
385
386static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
387{
388	struct be_queue_info *mccq = &adapter->mcc_obj.q;
389	struct be_mcc_wrb *wrb;
390
391	if (atomic_read(&mccq->used) >= mccq->len) {
392		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
393		return NULL;
394	}
395
396	wrb = queue_head_node(mccq);
397	queue_head_inc(mccq);
398	atomic_inc(&mccq->used);
399	memset(wrb, 0, sizeof(*wrb));
400	return wrb;
401}
402
403/* Tell fw we're about to start firing cmds by writing a
404 * special pattern across the wrb hdr; uses mbox
405 */
406int be_cmd_fw_init(struct be_adapter *adapter)
407{
408	u8 *wrb;
409	int status;
410
411	spin_lock(&adapter->mbox_lock);
412
413	wrb = (u8 *)wrb_from_mbox(adapter);
414	*wrb++ = 0xFF;
415	*wrb++ = 0x12;
416	*wrb++ = 0x34;
417	*wrb++ = 0xFF;
418	*wrb++ = 0xFF;
419	*wrb++ = 0x56;
420	*wrb++ = 0x78;
421	*wrb = 0xFF;
422
423	status = be_mbox_notify_wait(adapter);
424
425	spin_unlock(&adapter->mbox_lock);
426	return status;
427}
428
429/* Tell fw we're done with firing cmds by writing a
430 * special pattern across the wrb hdr; uses mbox
431 */
432int be_cmd_fw_clean(struct be_adapter *adapter)
433{
434	u8 *wrb;
435	int status;
436
437	if (adapter->eeh_err)
438		return -EIO;
439
440	spin_lock(&adapter->mbox_lock);
441
442	wrb = (u8 *)wrb_from_mbox(adapter);
443	*wrb++ = 0xFF;
444	*wrb++ = 0xAA;
445	*wrb++ = 0xBB;
446	*wrb++ = 0xFF;
447	*wrb++ = 0xFF;
448	*wrb++ = 0xCC;
449	*wrb++ = 0xDD;
450	*wrb = 0xFF;
451
452	status = be_mbox_notify_wait(adapter);
453
454	spin_unlock(&adapter->mbox_lock);
455	return status;
456}
457int be_cmd_eq_create(struct be_adapter *adapter,
458		struct be_queue_info *eq, int eq_delay)
459{
460	struct be_mcc_wrb *wrb;
461	struct be_cmd_req_eq_create *req;
462	struct be_dma_mem *q_mem = &eq->dma_mem;
463	int status;
464
465	spin_lock(&adapter->mbox_lock);
466
467	wrb = wrb_from_mbox(adapter);
468	req = embedded_payload(wrb);
469
470	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
471
472	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
473		OPCODE_COMMON_EQ_CREATE, sizeof(*req));
474
475	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
476
477	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
478	/* 4byte eqe*/
479	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
480	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
481			__ilog2_u32(eq->len/256));
482	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
483			eq_delay_to_mult(eq_delay));
484	be_dws_cpu_to_le(req->context, sizeof(req->context));
485
486	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
487
488	status = be_mbox_notify_wait(adapter);
489	if (!status) {
490		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
491		eq->id = le16_to_cpu(resp->eq_id);
492		eq->created = true;
493	}
494
495	spin_unlock(&adapter->mbox_lock);
496	return status;
497}
498
499/* Uses mbox */
500int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
501			u8 type, bool permanent, u32 if_handle)
502{
503	struct be_mcc_wrb *wrb;
504	struct be_cmd_req_mac_query *req;
505	int status;
506
507	spin_lock(&adapter->mbox_lock);
508
509	wrb = wrb_from_mbox(adapter);
510	req = embedded_payload(wrb);
511
512	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
513			OPCODE_COMMON_NTWK_MAC_QUERY);
514
515	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
516		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
517
518	req->type = type;
519	if (permanent) {
520		req->permanent = 1;
521	} else {
522		req->if_id = cpu_to_le16((u16) if_handle);
523		req->permanent = 0;
524	}
525
526	status = be_mbox_notify_wait(adapter);
527	if (!status) {
528		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
529		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
530	}
531
532	spin_unlock(&adapter->mbox_lock);
533	return status;
534}
535
536/* Uses synchronous MCCQ */
537int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
538		u32 if_id, u32 *pmac_id)
539{
540	struct be_mcc_wrb *wrb;
541	struct be_cmd_req_pmac_add *req;
542	int status;
543
544	spin_lock_bh(&adapter->mcc_lock);
545
546	wrb = wrb_from_mccq(adapter);
547	if (!wrb) {
548		status = -EBUSY;
549		goto err;
550	}
551	req = embedded_payload(wrb);
552
553	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
554			OPCODE_COMMON_NTWK_PMAC_ADD);
555
556	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
557		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
558
559	req->if_id = cpu_to_le32(if_id);
560	memcpy(req->mac_address, mac_addr, ETH_ALEN);
561
562	status = be_mcc_notify_wait(adapter);
563	if (!status) {
564		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
565		*pmac_id = le32_to_cpu(resp->pmac_id);
566	}
567
568err:
569	spin_unlock_bh(&adapter->mcc_lock);
570	return status;
571}
572
573/* Uses synchronous MCCQ */
574int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
575{
576	struct be_mcc_wrb *wrb;
577	struct be_cmd_req_pmac_del *req;
578	int status;
579
580	spin_lock_bh(&adapter->mcc_lock);
581
582	wrb = wrb_from_mccq(adapter);
583	if (!wrb) {
584		status = -EBUSY;
585		goto err;
586	}
587	req = embedded_payload(wrb);
588
589	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
590			OPCODE_COMMON_NTWK_PMAC_DEL);
591
592	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
593		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
594
595	req->if_id = cpu_to_le32(if_id);
596	req->pmac_id = cpu_to_le32(pmac_id);
597
598	status = be_mcc_notify_wait(adapter);
599
600err:
601	spin_unlock_bh(&adapter->mcc_lock);
602	return status;
603}
604
605/* Uses Mbox */
606int be_cmd_cq_create(struct be_adapter *adapter,
607		struct be_queue_info *cq, struct be_queue_info *eq,
608		bool sol_evts, bool no_delay, int coalesce_wm)
609{
610	struct be_mcc_wrb *wrb;
611	struct be_cmd_req_cq_create *req;
612	struct be_dma_mem *q_mem = &cq->dma_mem;
613	void *ctxt;
614	int status;
615
616	spin_lock(&adapter->mbox_lock);
617
618	wrb = wrb_from_mbox(adapter);
619	req = embedded_payload(wrb);
620	ctxt = &req->context;
621
622	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
623			OPCODE_COMMON_CQ_CREATE);
624
625	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
626		OPCODE_COMMON_CQ_CREATE, sizeof(*req));
627
628	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
629
630	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
631	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
632	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
633			__ilog2_u32(cq->len/256));
634	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
635	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
636	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
637	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
638	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
639	be_dws_cpu_to_le(ctxt, sizeof(req->context));
640
641	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
642
643	status = be_mbox_notify_wait(adapter);
644	if (!status) {
645		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
646		cq->id = le16_to_cpu(resp->cq_id);
647		cq->created = true;
648	}
649
650	spin_unlock(&adapter->mbox_lock);
651
652	return status;
653}
654
655static u32 be_encoded_q_len(int q_len)
656{
657	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
658	if (len_encoded == 16)
659		len_encoded = 0;
660	return len_encoded;
661}
662
663int be_cmd_mccq_create(struct be_adapter *adapter,
664			struct be_queue_info *mccq,
665			struct be_queue_info *cq)
666{
667	struct be_mcc_wrb *wrb;
668	struct be_cmd_req_mcc_create *req;
669	struct be_dma_mem *q_mem = &mccq->dma_mem;
670	void *ctxt;
671	int status;
672
673	spin_lock(&adapter->mbox_lock);
674
675	wrb = wrb_from_mbox(adapter);
676	req = embedded_payload(wrb);
677	ctxt = &req->context;
678
679	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
680			OPCODE_COMMON_MCC_CREATE);
681
682	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
683			OPCODE_COMMON_MCC_CREATE, sizeof(*req));
684
685	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
686
687	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
688	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
689		be_encoded_q_len(mccq->len));
690	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
691
692	be_dws_cpu_to_le(ctxt, sizeof(req->context));
693
694	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
695
696	status = be_mbox_notify_wait(adapter);
697	if (!status) {
698		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
699		mccq->id = le16_to_cpu(resp->id);
700		mccq->created = true;
701	}
702	spin_unlock(&adapter->mbox_lock);
703
704	return status;
705}
706
707int be_cmd_txq_create(struct be_adapter *adapter,
708			struct be_queue_info *txq,
709			struct be_queue_info *cq)
710{
711	struct be_mcc_wrb *wrb;
712	struct be_cmd_req_eth_tx_create *req;
713	struct be_dma_mem *q_mem = &txq->dma_mem;
714	void *ctxt;
715	int status;
716
717	spin_lock(&adapter->mbox_lock);
718
719	wrb = wrb_from_mbox(adapter);
720	req = embedded_payload(wrb);
721	ctxt = &req->context;
722
723	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
724			OPCODE_ETH_TX_CREATE);
725
726	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
727		sizeof(*req));
728
729	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
730	req->ulp_num = BE_ULP1_NUM;
731	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
732
733	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
734		be_encoded_q_len(txq->len));
735	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
736	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
737
738	be_dws_cpu_to_le(ctxt, sizeof(req->context));
739
740	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
741
742	status = be_mbox_notify_wait(adapter);
743	if (!status) {
744		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
745		txq->id = le16_to_cpu(resp->cid);
746		txq->created = true;
747	}
748
749	spin_unlock(&adapter->mbox_lock);
750
751	return status;
752}
753
754/* Uses mbox */
755int be_cmd_rxq_create(struct be_adapter *adapter,
756		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
757		u16 max_frame_size, u32 if_id, u32 rss)
758{
759	struct be_mcc_wrb *wrb;
760	struct be_cmd_req_eth_rx_create *req;
761	struct be_dma_mem *q_mem = &rxq->dma_mem;
762	int status;
763
764	spin_lock(&adapter->mbox_lock);
765
766	wrb = wrb_from_mbox(adapter);
767	req = embedded_payload(wrb);
768
769	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
770			OPCODE_ETH_RX_CREATE);
771
772	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
773		sizeof(*req));
774
775	req->cq_id = cpu_to_le16(cq_id);
776	req->frag_size = fls(frag_size) - 1;
777	req->num_pages = 2;
778	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
779	req->interface_id = cpu_to_le32(if_id);
780	req->max_frame_size = cpu_to_le16(max_frame_size);
781	req->rss_queue = cpu_to_le32(rss);
782
783	status = be_mbox_notify_wait(adapter);
784	if (!status) {
785		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
786		rxq->id = le16_to_cpu(resp->id);
787		rxq->created = true;
788	}
789
790	spin_unlock(&adapter->mbox_lock);
791
792	return status;
793}
794
795/* Generic destroyer function for all types of queues
796 * Uses Mbox
797 */
798int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
799		int queue_type)
800{
801	struct be_mcc_wrb *wrb;
802	struct be_cmd_req_q_destroy *req;
803	u8 subsys = 0, opcode = 0;
804	int status;
805
806	if (adapter->eeh_err)
807		return -EIO;
808
809	spin_lock(&adapter->mbox_lock);
810
811	wrb = wrb_from_mbox(adapter);
812	req = embedded_payload(wrb);
813
814	switch (queue_type) {
815	case QTYPE_EQ:
816		subsys = CMD_SUBSYSTEM_COMMON;
817		opcode = OPCODE_COMMON_EQ_DESTROY;
818		break;
819	case QTYPE_CQ:
820		subsys = CMD_SUBSYSTEM_COMMON;
821		opcode = OPCODE_COMMON_CQ_DESTROY;
822		break;
823	case QTYPE_TXQ:
824		subsys = CMD_SUBSYSTEM_ETH;
825		opcode = OPCODE_ETH_TX_DESTROY;
826		break;
827	case QTYPE_RXQ:
828		subsys = CMD_SUBSYSTEM_ETH;
829		opcode = OPCODE_ETH_RX_DESTROY;
830		break;
831	case QTYPE_MCCQ:
832		subsys = CMD_SUBSYSTEM_COMMON;
833		opcode = OPCODE_COMMON_MCC_DESTROY;
834		break;
835	default:
836		BUG();
837	}
838
839	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
840
841	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
842	req->id = cpu_to_le16(q->id);
843
844	status = be_mbox_notify_wait(adapter);
845
846	spin_unlock(&adapter->mbox_lock);
847
848	return status;
849}
850
851/* Create an rx filtering policy configuration on an i/f
852 * Uses mbox
853 */
854int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
855		u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
856		u32 domain)
857{
858	struct be_mcc_wrb *wrb;
859	struct be_cmd_req_if_create *req;
860	int status;
861
862	spin_lock(&adapter->mbox_lock);
863
864	wrb = wrb_from_mbox(adapter);
865	req = embedded_payload(wrb);
866
867	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
868			OPCODE_COMMON_NTWK_INTERFACE_CREATE);
869
870	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
871		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
872
873	req->hdr.domain = domain;
874	req->capability_flags = cpu_to_le32(cap_flags);
875	req->enable_flags = cpu_to_le32(en_flags);
876	req->pmac_invalid = pmac_invalid;
877	if (!pmac_invalid)
878		memcpy(req->mac_addr, mac, ETH_ALEN);
879
880	status = be_mbox_notify_wait(adapter);
881	if (!status) {
882		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
883		*if_handle = le32_to_cpu(resp->interface_id);
884		if (!pmac_invalid)
885			*pmac_id = le32_to_cpu(resp->pmac_id);
886	}
887
888	spin_unlock(&adapter->mbox_lock);
889	return status;
890}
891
892/* Uses mbox */
893int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
894{
895	struct be_mcc_wrb *wrb;
896	struct be_cmd_req_if_destroy *req;
897	int status;
898
899	if (adapter->eeh_err)
900		return -EIO;
901
902	spin_lock(&adapter->mbox_lock);
903
904	wrb = wrb_from_mbox(adapter);
905	req = embedded_payload(wrb);
906
907	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
908			OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
909
910	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
911		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
912
913	req->interface_id = cpu_to_le32(interface_id);
914
915	status = be_mbox_notify_wait(adapter);
916
917	spin_unlock(&adapter->mbox_lock);
918
919	return status;
920}
921
922/* Get stats is a non embedded command: the request is not embedded inside
923 * WRB but is a separate dma memory block
924 * Uses asynchronous MCC
925 */
926int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
927{
928	struct be_mcc_wrb *wrb;
929	struct be_cmd_req_get_stats *req;
930	struct be_sge *sge;
931	int status = 0;
932
933	spin_lock_bh(&adapter->mcc_lock);
934
935	wrb = wrb_from_mccq(adapter);
936	if (!wrb) {
937		status = -EBUSY;
938		goto err;
939	}
940	req = nonemb_cmd->va;
941	sge = nonembedded_sgl(wrb);
942
943	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
944			OPCODE_ETH_GET_STATISTICS);
945
946	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
947		OPCODE_ETH_GET_STATISTICS, sizeof(*req));
948	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
949	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
950	sge->len = cpu_to_le32(nonemb_cmd->size);
951
952	be_mcc_notify(adapter);
953	adapter->stats_ioctl_sent = true;
954
955err:
956	spin_unlock_bh(&adapter->mcc_lock);
957	return status;
958}
959
960/* Uses synchronous mcc */
961int be_cmd_link_status_query(struct be_adapter *adapter,
962			bool *link_up, u8 *mac_speed, u16 *link_speed)
963{
964	struct be_mcc_wrb *wrb;
965	struct be_cmd_req_link_status *req;
966	int status;
967
968	spin_lock_bh(&adapter->mcc_lock);
969
970	wrb = wrb_from_mccq(adapter);
971	if (!wrb) {
972		status = -EBUSY;
973		goto err;
974	}
975	req = embedded_payload(wrb);
976
977	*link_up = false;
978
979	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
980			OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
981
982	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
983		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
984
985	status = be_mcc_notify_wait(adapter);
986	if (!status) {
987		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
988		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
989			*link_up = true;
990			*link_speed = le16_to_cpu(resp->link_speed);
991			*mac_speed = resp->mac_speed;
992		}
993	}
994
995err:
996	spin_unlock_bh(&adapter->mcc_lock);
997	return status;
998}
999
1000/* Uses Mbox */
1001int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1002{
1003	struct be_mcc_wrb *wrb;
1004	struct be_cmd_req_get_fw_version *req;
1005	int status;
1006
1007	spin_lock(&adapter->mbox_lock);
1008
1009	wrb = wrb_from_mbox(adapter);
1010	req = embedded_payload(wrb);
1011
1012	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1013			OPCODE_COMMON_GET_FW_VERSION);
1014
1015	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1016		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1017
1018	status = be_mbox_notify_wait(adapter);
1019	if (!status) {
1020		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1021		strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1022	}
1023
1024	spin_unlock(&adapter->mbox_lock);
1025	return status;
1026}
1027
1028/* set the EQ delay interval of an EQ to specified value
1029 * Uses async mcc
1030 */
1031int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1032{
1033	struct be_mcc_wrb *wrb;
1034	struct be_cmd_req_modify_eq_delay *req;
1035	int status = 0;
1036
1037	spin_lock_bh(&adapter->mcc_lock);
1038
1039	wrb = wrb_from_mccq(adapter);
1040	if (!wrb) {
1041		status = -EBUSY;
1042		goto err;
1043	}
1044	req = embedded_payload(wrb);
1045
1046	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1047			OPCODE_COMMON_MODIFY_EQ_DELAY);
1048
1049	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1050		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1051
1052	req->num_eq = cpu_to_le32(1);
1053	req->delay[0].eq_id = cpu_to_le32(eq_id);
1054	req->delay[0].phase = 0;
1055	req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1056
1057	be_mcc_notify(adapter);
1058
1059err:
1060	spin_unlock_bh(&adapter->mcc_lock);
1061	return status;
1062}
1063
1064/* Uses sycnhronous mcc */
1065int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1066			u32 num, bool untagged, bool promiscuous)
1067{
1068	struct be_mcc_wrb *wrb;
1069	struct be_cmd_req_vlan_config *req;
1070	int status;
1071
1072	spin_lock_bh(&adapter->mcc_lock);
1073
1074	wrb = wrb_from_mccq(adapter);
1075	if (!wrb) {
1076		status = -EBUSY;
1077		goto err;
1078	}
1079	req = embedded_payload(wrb);
1080
1081	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1082			OPCODE_COMMON_NTWK_VLAN_CONFIG);
1083
1084	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1085		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1086
1087	req->interface_id = if_id;
1088	req->promiscuous = promiscuous;
1089	req->untagged = untagged;
1090	req->num_vlan = num;
1091	if (!promiscuous) {
1092		memcpy(req->normal_vlan, vtag_array,
1093			req->num_vlan * sizeof(vtag_array[0]));
1094	}
1095
1096	status = be_mcc_notify_wait(adapter);
1097
1098err:
1099	spin_unlock_bh(&adapter->mcc_lock);
1100	return status;
1101}
1102
1103/* Uses MCC for this command as it may be called in BH context
1104 * Uses synchronous mcc
1105 */
1106int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1107{
1108	struct be_mcc_wrb *wrb;
1109	struct be_cmd_req_promiscuous_config *req;
1110	int status;
1111
1112	spin_lock_bh(&adapter->mcc_lock);
1113
1114	wrb = wrb_from_mccq(adapter);
1115	if (!wrb) {
1116		status = -EBUSY;
1117		goto err;
1118	}
1119	req = embedded_payload(wrb);
1120
1121	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
1122
1123	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1124		OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1125
1126	/* In FW versions X.102.149/X.101.487 and later,
1127	 * the port setting associated only with the
1128	 * issuing pci function will take effect
1129	 */
1130	if (port_num)
1131		req->port1_promiscuous = en;
1132	else
1133		req->port0_promiscuous = en;
1134
1135	status = be_mcc_notify_wait(adapter);
1136
1137err:
1138	spin_unlock_bh(&adapter->mcc_lock);
1139	return status;
1140}
1141
1142/*
1143 * Uses MCC for this command as it may be called in BH context
1144 * (mc == NULL) => multicast promiscous
1145 */
1146int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1147		struct net_device *netdev, struct be_dma_mem *mem)
1148{
1149	struct be_mcc_wrb *wrb;
1150	struct be_cmd_req_mcast_mac_config *req = mem->va;
1151	struct be_sge *sge;
1152	int status;
1153
1154	spin_lock_bh(&adapter->mcc_lock);
1155
1156	wrb = wrb_from_mccq(adapter);
1157	if (!wrb) {
1158		status = -EBUSY;
1159		goto err;
1160	}
1161	sge = nonembedded_sgl(wrb);
1162	memset(req, 0, sizeof(*req));
1163
1164	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1165			OPCODE_COMMON_NTWK_MULTICAST_SET);
1166	sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1167	sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1168	sge->len = cpu_to_le32(mem->size);
1169
1170	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1171		OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1172
1173	req->interface_id = if_id;
1174	if (netdev) {
1175		int i;
1176		struct netdev_hw_addr *ha;
1177
1178		req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1179
1180		i = 0;
1181		netdev_for_each_mc_addr(ha, netdev)
1182			memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1183	} else {
1184		req->promiscuous = 1;
1185	}
1186
1187	status = be_mcc_notify_wait(adapter);
1188
1189err:
1190	spin_unlock_bh(&adapter->mcc_lock);
1191	return status;
1192}
1193
1194/* Uses synchrounous mcc */
1195int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1196{
1197	struct be_mcc_wrb *wrb;
1198	struct be_cmd_req_set_flow_control *req;
1199	int status;
1200
1201	spin_lock_bh(&adapter->mcc_lock);
1202
1203	wrb = wrb_from_mccq(adapter);
1204	if (!wrb) {
1205		status = -EBUSY;
1206		goto err;
1207	}
1208	req = embedded_payload(wrb);
1209
1210	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1211			OPCODE_COMMON_SET_FLOW_CONTROL);
1212
1213	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1214		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1215
1216	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1217	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1218
1219	status = be_mcc_notify_wait(adapter);
1220
1221err:
1222	spin_unlock_bh(&adapter->mcc_lock);
1223	return status;
1224}
1225
1226/* Uses sycn mcc */
1227int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1228{
1229	struct be_mcc_wrb *wrb;
1230	struct be_cmd_req_get_flow_control *req;
1231	int status;
1232
1233	spin_lock_bh(&adapter->mcc_lock);
1234
1235	wrb = wrb_from_mccq(adapter);
1236	if (!wrb) {
1237		status = -EBUSY;
1238		goto err;
1239	}
1240	req = embedded_payload(wrb);
1241
1242	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1243			OPCODE_COMMON_GET_FLOW_CONTROL);
1244
1245	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1246		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1247
1248	status = be_mcc_notify_wait(adapter);
1249	if (!status) {
1250		struct be_cmd_resp_get_flow_control *resp =
1251						embedded_payload(wrb);
1252		*tx_fc = le16_to_cpu(resp->tx_flow_control);
1253		*rx_fc = le16_to_cpu(resp->rx_flow_control);
1254	}
1255
1256err:
1257	spin_unlock_bh(&adapter->mcc_lock);
1258	return status;
1259}
1260
1261/* Uses mbox */
1262int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode)
1263{
1264	struct be_mcc_wrb *wrb;
1265	struct be_cmd_req_query_fw_cfg *req;
1266	int status;
1267
1268	spin_lock(&adapter->mbox_lock);
1269
1270	wrb = wrb_from_mbox(adapter);
1271	req = embedded_payload(wrb);
1272
1273	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1274			OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1275
1276	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1277		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1278
1279	status = be_mbox_notify_wait(adapter);
1280	if (!status) {
1281		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1282		*port_num = le32_to_cpu(resp->phys_port);
1283		*mode = le32_to_cpu(resp->function_mode);
1284	}
1285
1286	spin_unlock(&adapter->mbox_lock);
1287	return status;
1288}
1289
1290/* Uses mbox */
1291int be_cmd_reset_function(struct be_adapter *adapter)
1292{
1293	struct be_mcc_wrb *wrb;
1294	struct be_cmd_req_hdr *req;
1295	int status;
1296
1297	spin_lock(&adapter->mbox_lock);
1298
1299	wrb = wrb_from_mbox(adapter);
1300	req = embedded_payload(wrb);
1301
1302	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1303			OPCODE_COMMON_FUNCTION_RESET);
1304
1305	be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1306		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1307
1308	status = be_mbox_notify_wait(adapter);
1309
1310	spin_unlock(&adapter->mbox_lock);
1311	return status;
1312}
1313
1314/* Uses sync mcc */
1315int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1316			u8 bcn, u8 sts, u8 state)
1317{
1318	struct be_mcc_wrb *wrb;
1319	struct be_cmd_req_enable_disable_beacon *req;
1320	int status;
1321
1322	spin_lock_bh(&adapter->mcc_lock);
1323
1324	wrb = wrb_from_mccq(adapter);
1325	if (!wrb) {
1326		status = -EBUSY;
1327		goto err;
1328	}
1329	req = embedded_payload(wrb);
1330
1331	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1332			OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1333
1334	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1335		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1336
1337	req->port_num = port_num;
1338	req->beacon_state = state;
1339	req->beacon_duration = bcn;
1340	req->status_duration = sts;
1341
1342	status = be_mcc_notify_wait(adapter);
1343
1344err:
1345	spin_unlock_bh(&adapter->mcc_lock);
1346	return status;
1347}
1348
1349/* Uses sync mcc */
1350int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1351{
1352	struct be_mcc_wrb *wrb;
1353	struct be_cmd_req_get_beacon_state *req;
1354	int status;
1355
1356	spin_lock_bh(&adapter->mcc_lock);
1357
1358	wrb = wrb_from_mccq(adapter);
1359	if (!wrb) {
1360		status = -EBUSY;
1361		goto err;
1362	}
1363	req = embedded_payload(wrb);
1364
1365	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1366			OPCODE_COMMON_GET_BEACON_STATE);
1367
1368	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1369		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1370
1371	req->port_num = port_num;
1372
1373	status = be_mcc_notify_wait(adapter);
1374	if (!status) {
1375		struct be_cmd_resp_get_beacon_state *resp =
1376						embedded_payload(wrb);
1377		*state = resp->beacon_state;
1378	}
1379
1380err:
1381	spin_unlock_bh(&adapter->mcc_lock);
1382	return status;
1383}
1384
1385/* Uses sync mcc */
1386int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1387				u8 *connector)
1388{
1389	struct be_mcc_wrb *wrb;
1390	struct be_cmd_req_port_type *req;
1391	int status;
1392
1393	spin_lock_bh(&adapter->mcc_lock);
1394
1395	wrb = wrb_from_mccq(adapter);
1396	if (!wrb) {
1397		status = -EBUSY;
1398		goto err;
1399	}
1400	req = embedded_payload(wrb);
1401
1402	be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
1403			OPCODE_COMMON_READ_TRANSRECV_DATA);
1404
1405	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1406		OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1407
1408	req->port = cpu_to_le32(port);
1409	req->page_num = cpu_to_le32(TR_PAGE_A0);
1410	status = be_mcc_notify_wait(adapter);
1411	if (!status) {
1412		struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1413			*connector = resp->data.connector;
1414	}
1415
1416err:
1417	spin_unlock_bh(&adapter->mcc_lock);
1418	return status;
1419}
1420
1421int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1422			u32 flash_type, u32 flash_opcode, u32 buf_size)
1423{
1424	struct be_mcc_wrb *wrb;
1425	struct be_cmd_write_flashrom *req;
1426	struct be_sge *sge;
1427	int status;
1428
1429	spin_lock_bh(&adapter->mcc_lock);
1430	adapter->flash_status = 0;
1431
1432	wrb = wrb_from_mccq(adapter);
1433	if (!wrb) {
1434		status = -EBUSY;
1435		goto err_unlock;
1436	}
1437	req = cmd->va;
1438	sge = nonembedded_sgl(wrb);
1439
1440	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1441			OPCODE_COMMON_WRITE_FLASHROM);
1442	wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1443
1444	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1445		OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1446	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1447	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1448	sge->len = cpu_to_le32(cmd->size);
1449
1450	req->params.op_type = cpu_to_le32(flash_type);
1451	req->params.op_code = cpu_to_le32(flash_opcode);
1452	req->params.data_buf_size = cpu_to_le32(buf_size);
1453
1454	be_mcc_notify(adapter);
1455	spin_unlock_bh(&adapter->mcc_lock);
1456
1457	if (!wait_for_completion_timeout(&adapter->flash_compl,
1458			msecs_to_jiffies(12000)))
1459		status = -1;
1460	else
1461		status = adapter->flash_status;
1462
1463	return status;
1464
1465err_unlock:
1466	spin_unlock_bh(&adapter->mcc_lock);
1467	return status;
1468}
1469
1470int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1471			 int offset)
1472{
1473	struct be_mcc_wrb *wrb;
1474	struct be_cmd_write_flashrom *req;
1475	int status;
1476
1477	spin_lock_bh(&adapter->mcc_lock);
1478
1479	wrb = wrb_from_mccq(adapter);
1480	if (!wrb) {
1481		status = -EBUSY;
1482		goto err;
1483	}
1484	req = embedded_payload(wrb);
1485
1486	be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1487			OPCODE_COMMON_READ_FLASHROM);
1488
1489	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1490		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1491
1492	req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1493	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1494	req->params.offset = cpu_to_le32(offset);
1495	req->params.data_buf_size = cpu_to_le32(0x4);
1496
1497	status = be_mcc_notify_wait(adapter);
1498	if (!status)
1499		memcpy(flashed_crc, req->params.data_buf, 4);
1500
1501err:
1502	spin_unlock_bh(&adapter->mcc_lock);
1503	return status;
1504}
1505
1506int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1507				struct be_dma_mem *nonemb_cmd)
1508{
1509	struct be_mcc_wrb *wrb;
1510	struct be_cmd_req_acpi_wol_magic_config *req;
1511	struct be_sge *sge;
1512	int status;
1513
1514	spin_lock_bh(&adapter->mcc_lock);
1515
1516	wrb = wrb_from_mccq(adapter);
1517	if (!wrb) {
1518		status = -EBUSY;
1519		goto err;
1520	}
1521	req = nonemb_cmd->va;
1522	sge = nonembedded_sgl(wrb);
1523
1524	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1525			OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1526
1527	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1528		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1529	memcpy(req->magic_mac, mac, ETH_ALEN);
1530
1531	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1532	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1533	sge->len = cpu_to_le32(nonemb_cmd->size);
1534
1535	status = be_mcc_notify_wait(adapter);
1536
1537err:
1538	spin_unlock_bh(&adapter->mcc_lock);
1539	return status;
1540}
1541
1542int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1543			u8 loopback_type, u8 enable)
1544{
1545	struct be_mcc_wrb *wrb;
1546	struct be_cmd_req_set_lmode *req;
1547	int status;
1548
1549	spin_lock_bh(&adapter->mcc_lock);
1550
1551	wrb = wrb_from_mccq(adapter);
1552	if (!wrb) {
1553		status = -EBUSY;
1554		goto err;
1555	}
1556
1557	req = embedded_payload(wrb);
1558
1559	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1560				OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1561
1562	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1563			OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1564			sizeof(*req));
1565
1566	req->src_port = port_num;
1567	req->dest_port = port_num;
1568	req->loopback_type = loopback_type;
1569	req->loopback_state = enable;
1570
1571	status = be_mcc_notify_wait(adapter);
1572err:
1573	spin_unlock_bh(&adapter->mcc_lock);
1574	return status;
1575}
1576
1577int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1578		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1579{
1580	struct be_mcc_wrb *wrb;
1581	struct be_cmd_req_loopback_test *req;
1582	int status;
1583
1584	spin_lock_bh(&adapter->mcc_lock);
1585
1586	wrb = wrb_from_mccq(adapter);
1587	if (!wrb) {
1588		status = -EBUSY;
1589		goto err;
1590	}
1591
1592	req = embedded_payload(wrb);
1593
1594	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1595				OPCODE_LOWLEVEL_LOOPBACK_TEST);
1596
1597	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1598			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1599	req->hdr.timeout = cpu_to_le32(4);
1600
1601	req->pattern = cpu_to_le64(pattern);
1602	req->src_port = cpu_to_le32(port_num);
1603	req->dest_port = cpu_to_le32(port_num);
1604	req->pkt_size = cpu_to_le32(pkt_size);
1605	req->num_pkts = cpu_to_le32(num_pkts);
1606	req->loopback_type = cpu_to_le32(loopback_type);
1607
1608	status = be_mcc_notify_wait(adapter);
1609	if (!status) {
1610		struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1611		status = le32_to_cpu(resp->status);
1612	}
1613
1614err:
1615	spin_unlock_bh(&adapter->mcc_lock);
1616	return status;
1617}
1618
1619int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1620				u32 byte_cnt, struct be_dma_mem *cmd)
1621{
1622	struct be_mcc_wrb *wrb;
1623	struct be_cmd_req_ddrdma_test *req;
1624	struct be_sge *sge;
1625	int status;
1626	int i, j = 0;
1627
1628	spin_lock_bh(&adapter->mcc_lock);
1629
1630	wrb = wrb_from_mccq(adapter);
1631	if (!wrb) {
1632		status = -EBUSY;
1633		goto err;
1634	}
1635	req = cmd->va;
1636	sge = nonembedded_sgl(wrb);
1637	be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1638				OPCODE_LOWLEVEL_HOST_DDR_DMA);
1639	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1640			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1641
1642	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1643	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1644	sge->len = cpu_to_le32(cmd->size);
1645
1646	req->pattern = cpu_to_le64(pattern);
1647	req->byte_count = cpu_to_le32(byte_cnt);
1648	for (i = 0; i < byte_cnt; i++) {
1649		req->snd_buff[i] = (u8)(pattern >> (j*8));
1650		j++;
1651		if (j > 7)
1652			j = 0;
1653	}
1654
1655	status = be_mcc_notify_wait(adapter);
1656
1657	if (!status) {
1658		struct be_cmd_resp_ddrdma_test *resp;
1659		resp = cmd->va;
1660		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1661				resp->snd_err) {
1662			status = -1;
1663		}
1664	}
1665
1666err:
1667	spin_unlock_bh(&adapter->mcc_lock);
1668	return status;
1669}
1670
1671int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1672				struct be_dma_mem *nonemb_cmd)
1673{
1674	struct be_mcc_wrb *wrb;
1675	struct be_cmd_req_seeprom_read *req;
1676	struct be_sge *sge;
1677	int status;
1678
1679	spin_lock_bh(&adapter->mcc_lock);
1680
1681	wrb = wrb_from_mccq(adapter);
1682	req = nonemb_cmd->va;
1683	sge = nonembedded_sgl(wrb);
1684
1685	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1686			OPCODE_COMMON_SEEPROM_READ);
1687
1688	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1689			OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1690
1691	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1692	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1693	sge->len = cpu_to_le32(nonemb_cmd->size);
1694
1695	status = be_mcc_notify_wait(adapter);
1696
1697	spin_unlock_bh(&adapter->mcc_lock);
1698	return status;
1699}
1700
1701int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
1702{
1703	struct be_mcc_wrb *wrb;
1704	struct be_cmd_req_get_phy_info *req;
1705	struct be_sge *sge;
1706	int status;
1707
1708	spin_lock_bh(&adapter->mcc_lock);
1709
1710	wrb = wrb_from_mccq(adapter);
1711	if (!wrb) {
1712		status = -EBUSY;
1713		goto err;
1714	}
1715
1716	req = cmd->va;
1717	sge = nonembedded_sgl(wrb);
1718
1719	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1720				OPCODE_COMMON_GET_PHY_DETAILS);
1721
1722	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1723			OPCODE_COMMON_GET_PHY_DETAILS,
1724			sizeof(*req));
1725
1726	sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1727	sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1728	sge->len = cpu_to_le32(cmd->size);
1729
1730	status = be_mcc_notify_wait(adapter);
1731err:
1732	spin_unlock_bh(&adapter->mcc_lock);
1733	return status;
1734}
1735
1736int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1737{
1738	struct be_mcc_wrb *wrb;
1739	struct be_cmd_req_set_qos *req;
1740	int status;
1741
1742	spin_lock_bh(&adapter->mcc_lock);
1743
1744	wrb = wrb_from_mccq(adapter);
1745	if (!wrb) {
1746		status = -EBUSY;
1747		goto err;
1748	}
1749
1750	req = embedded_payload(wrb);
1751
1752	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1753				OPCODE_COMMON_SET_QOS);
1754
1755	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1756			OPCODE_COMMON_SET_QOS, sizeof(*req));
1757
1758	req->hdr.domain = domain;
1759	req->valid_bits = BE_QOS_BITS_NIC;
1760	req->max_bps_nic = bps;
1761
1762	status = be_mcc_notify_wait(adapter);
1763
1764err:
1765	spin_unlock_bh(&adapter->mcc_lock);
1766	return status;
1767}
1768