ecore_sp.c revision 284470
1255736Sdavidch/*-
2265411Sdavidcs * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3255736Sdavidch *
4255736Sdavidch * Redistribution and use in source and binary forms, with or without
5255736Sdavidch * modification, are permitted provided that the following conditions
6255736Sdavidch * are met:
7255736Sdavidch *
8255736Sdavidch * 1. Redistributions of source code must retain the above copyright
9255736Sdavidch *    notice, this list of conditions and the following disclaimer.
10255736Sdavidch * 2. Redistributions in binary form must reproduce the above copyright
11255736Sdavidch *    notice, this list of conditions and the following disclaimer in the
12255736Sdavidch *    documentation and/or other materials provided with the distribution.
13255736Sdavidch *
14255736Sdavidch * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15255736Sdavidch * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16255736Sdavidch * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17255736Sdavidch * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18255736Sdavidch * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19255736Sdavidch * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20255736Sdavidch * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21255736Sdavidch * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22255736Sdavidch * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23255736Sdavidch * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24255736Sdavidch * THE POSSIBILITY OF SUCH DAMAGE.
25255736Sdavidch */
26255736Sdavidch
27255736Sdavidch#include <sys/cdefs.h>
28255736Sdavidch__FBSDID("$FreeBSD: head/sys/dev/bxe/ecore_sp.c 284470 2015-06-16 21:11:32Z davidcs $");
29255736Sdavidch
30255736Sdavidch#include "bxe.h"
31255736Sdavidch#include "ecore_init.h"
32255736Sdavidch
33255736Sdavidch/**** Exe Queue interfaces ****/
34255736Sdavidch
35255736Sdavidch/**
36255736Sdavidch * ecore_exe_queue_init - init the Exe Queue object
37255736Sdavidch *
38255736Sdavidch * @o:		pointer to the object
39255736Sdavidch * @exe_len:	length
40255736Sdavidch * @owner:	pointer to the owner
41255736Sdavidch * @validate:	validate function pointer
42255736Sdavidch * @optimize:	optimize function pointer
43255736Sdavidch * @exec:	execute function pointer
44255736Sdavidch * @get:	get function pointer
45255736Sdavidch */
46255736Sdavidchstatic inline void ecore_exe_queue_init(struct bxe_softc *sc,
47255736Sdavidch					struct ecore_exe_queue_obj *o,
48255736Sdavidch					int exe_len,
49255736Sdavidch					union ecore_qable_obj *owner,
50255736Sdavidch					exe_q_validate validate,
51255736Sdavidch					exe_q_remove remove,
52255736Sdavidch					exe_q_optimize optimize,
53255736Sdavidch					exe_q_execute exec,
54255736Sdavidch					exe_q_get get)
55255736Sdavidch{
56255736Sdavidch	ECORE_MEMSET(o, 0, sizeof(*o));
57255736Sdavidch
58255736Sdavidch	ECORE_LIST_INIT(&o->exe_queue);
59255736Sdavidch	ECORE_LIST_INIT(&o->pending_comp);
60255736Sdavidch
61255736Sdavidch	ECORE_SPIN_LOCK_INIT(&o->lock, sc);
62255736Sdavidch
63255736Sdavidch	o->exe_chunk_len = exe_len;
64255736Sdavidch	o->owner         = owner;
65255736Sdavidch
66255736Sdavidch	/* Owner specific callbacks */
67255736Sdavidch	o->validate      = validate;
68255736Sdavidch	o->remove        = remove;
69255736Sdavidch	o->optimize      = optimize;
70255736Sdavidch	o->execute       = exec;
71255736Sdavidch	o->get           = get;
72255736Sdavidch
73255736Sdavidch	ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
74255736Sdavidch		  exe_len);
75255736Sdavidch}
76255736Sdavidch
77255736Sdavidchstatic inline void ecore_exe_queue_free_elem(struct bxe_softc *sc,
78255736Sdavidch					     struct ecore_exeq_elem *elem)
79255736Sdavidch{
80255736Sdavidch	ECORE_MSG(sc, "Deleting an exe_queue element\n");
81255736Sdavidch	ECORE_FREE(sc, elem, sizeof(*elem));
82255736Sdavidch}
83255736Sdavidch
84255736Sdavidchstatic inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
85255736Sdavidch{
86255736Sdavidch	struct ecore_exeq_elem *elem;
87255736Sdavidch	int cnt = 0;
88255736Sdavidch
89255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->lock);
90255736Sdavidch
91255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
92255736Sdavidch				  struct ecore_exeq_elem)
93255736Sdavidch		cnt++;
94255736Sdavidch
95255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->lock);
96255736Sdavidch
97255736Sdavidch	return cnt;
98255736Sdavidch}
99255736Sdavidch
100255736Sdavidch/**
101255736Sdavidch * ecore_exe_queue_add - add a new element to the execution queue
102255736Sdavidch *
103255736Sdavidch * @sc:		driver handle
104255736Sdavidch * @o:		queue
105255736Sdavidch * @cmd:	new command to add
106255736Sdavidch * @restore:	true - do not optimize the command
107255736Sdavidch *
108255736Sdavidch * If the element is optimized or is illegal, frees it.
109255736Sdavidch */
110255736Sdavidchstatic inline int ecore_exe_queue_add(struct bxe_softc *sc,
111255736Sdavidch				      struct ecore_exe_queue_obj *o,
112255736Sdavidch				      struct ecore_exeq_elem *elem,
113255736Sdavidch				      bool restore)
114255736Sdavidch{
115255736Sdavidch	int rc;
116255736Sdavidch
117255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->lock);
118255736Sdavidch
119255736Sdavidch	if (!restore) {
120255736Sdavidch		/* Try to cancel this element queue */
121255736Sdavidch		rc = o->optimize(sc, o->owner, elem);
122255736Sdavidch		if (rc)
123255736Sdavidch			goto free_and_exit;
124255736Sdavidch
125255736Sdavidch		/* Check if this request is ok */
126255736Sdavidch		rc = o->validate(sc, o->owner, elem);
127255736Sdavidch		if (rc) {
128255736Sdavidch			ECORE_MSG(sc, "Preamble failed: %d\n", rc);
129255736Sdavidch			goto free_and_exit;
130255736Sdavidch		}
131255736Sdavidch	}
132255736Sdavidch
133255736Sdavidch	/* If so, add it to the execution queue */
134255736Sdavidch	ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
135255736Sdavidch
136255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->lock);
137255736Sdavidch
138255736Sdavidch	return ECORE_SUCCESS;
139255736Sdavidch
140255736Sdavidchfree_and_exit:
141255736Sdavidch	ecore_exe_queue_free_elem(sc, elem);
142255736Sdavidch
143255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->lock);
144255736Sdavidch
145255736Sdavidch	return rc;
146255736Sdavidch}
147255736Sdavidch
148255736Sdavidchstatic inline void __ecore_exe_queue_reset_pending(
149255736Sdavidch	struct bxe_softc *sc,
150255736Sdavidch	struct ecore_exe_queue_obj *o)
151255736Sdavidch{
152255736Sdavidch	struct ecore_exeq_elem *elem;
153255736Sdavidch
154255736Sdavidch	while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
155255736Sdavidch		elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
156255736Sdavidch					      struct ecore_exeq_elem,
157255736Sdavidch					      link);
158255736Sdavidch
159255736Sdavidch		ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
160255736Sdavidch		ecore_exe_queue_free_elem(sc, elem);
161255736Sdavidch	}
162255736Sdavidch}
163255736Sdavidch
164255736Sdavidch/**
165255736Sdavidch * ecore_exe_queue_step - execute one execution chunk atomically
166255736Sdavidch *
167255736Sdavidch * @sc:			driver handle
168255736Sdavidch * @o:			queue
169255736Sdavidch * @ramrod_flags:	flags
170255736Sdavidch *
171255736Sdavidch * (Should be called while holding the exe_queue->lock).
172255736Sdavidch */
173255736Sdavidchstatic inline int ecore_exe_queue_step(struct bxe_softc *sc,
174255736Sdavidch				       struct ecore_exe_queue_obj *o,
175255736Sdavidch				       unsigned long *ramrod_flags)
176255736Sdavidch{
177255736Sdavidch	struct ecore_exeq_elem *elem, spacer;
178255736Sdavidch	int cur_len = 0, rc;
179255736Sdavidch
180255736Sdavidch	ECORE_MEMSET(&spacer, 0, sizeof(spacer));
181255736Sdavidch
182255736Sdavidch	/* Next step should not be performed until the current is finished,
183255736Sdavidch	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
184255736Sdavidch	 * properly clear object internals without sending any command to the FW
185255736Sdavidch	 * which also implies there won't be any completion to clear the
186255736Sdavidch	 * 'pending' list.
187255736Sdavidch	 */
188255736Sdavidch	if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
189255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
190255736Sdavidch			ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
191255736Sdavidch			__ecore_exe_queue_reset_pending(sc, o);
192255736Sdavidch		} else {
193255736Sdavidch			return ECORE_PENDING;
194255736Sdavidch		}
195255736Sdavidch	}
196255736Sdavidch
197255736Sdavidch	/* Run through the pending commands list and create a next
198255736Sdavidch	 * execution chunk.
199255736Sdavidch	 */
200255736Sdavidch	while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
201255736Sdavidch		elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
202255736Sdavidch					      struct ecore_exeq_elem,
203255736Sdavidch					      link);
204255736Sdavidch		ECORE_DBG_BREAK_IF(!elem->cmd_len);
205255736Sdavidch
206255736Sdavidch		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
207255736Sdavidch			cur_len += elem->cmd_len;
208255736Sdavidch			/* Prevent from both lists being empty when moving an
209255736Sdavidch			 * element. This will allow the call of
210255736Sdavidch			 * ecore_exe_queue_empty() without locking.
211255736Sdavidch			 */
212255736Sdavidch			ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
213255736Sdavidch			mb();
214255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
215255736Sdavidch			ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
216255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
217255736Sdavidch		} else
218255736Sdavidch			break;
219255736Sdavidch	}
220255736Sdavidch
221255736Sdavidch	/* Sanity check */
222255736Sdavidch	if (!cur_len)
223255736Sdavidch		return ECORE_SUCCESS;
224255736Sdavidch
225255736Sdavidch	rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
226255736Sdavidch	if (rc < 0)
227255736Sdavidch		/* In case of an error return the commands back to the queue
228255736Sdavidch		 *  and reset the pending_comp.
229255736Sdavidch		 */
230255736Sdavidch		ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
231255736Sdavidch	else if (!rc)
232255736Sdavidch		/* If zero is returned, means there are no outstanding pending
233255736Sdavidch		 * completions and we may dismiss the pending list.
234255736Sdavidch		 */
235255736Sdavidch		__ecore_exe_queue_reset_pending(sc, o);
236255736Sdavidch
237255736Sdavidch	return rc;
238255736Sdavidch}
239255736Sdavidch
240255736Sdavidchstatic inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
241255736Sdavidch{
242255736Sdavidch	bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
243255736Sdavidch
244255736Sdavidch	/* Don't reorder!!! */
245255736Sdavidch	mb();
246255736Sdavidch
247255736Sdavidch	return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
248255736Sdavidch}
249255736Sdavidch
250255736Sdavidchstatic inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
251255736Sdavidch	struct bxe_softc *sc)
252255736Sdavidch{
253255736Sdavidch	ECORE_MSG(sc, "Allocating a new exe_queue element\n");
254255736Sdavidch	return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
255255736Sdavidch			    sc);
256255736Sdavidch}
257255736Sdavidch
258255736Sdavidch/************************ raw_obj functions ***********************************/
259255736Sdavidchstatic bool ecore_raw_check_pending(struct ecore_raw_obj *o)
260255736Sdavidch{
261255736Sdavidch	/*
262255736Sdavidch     * !! converts the value returned by ECORE_TEST_BIT such that it
263255736Sdavidch     * is guaranteed not to be truncated regardless of bool definition.
264255736Sdavidch	 *
265255736Sdavidch	 * Note we cannot simply define the function's return value type
266255736Sdavidch     * to match the type returned by ECORE_TEST_BIT, as it varies by
267255736Sdavidch     * platform/implementation.
268255736Sdavidch	 */
269255736Sdavidch
270255736Sdavidch	return !!ECORE_TEST_BIT(o->state, o->pstate);
271255736Sdavidch}
272255736Sdavidch
273255736Sdavidchstatic void ecore_raw_clear_pending(struct ecore_raw_obj *o)
274255736Sdavidch{
275255736Sdavidch	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
276255736Sdavidch	ECORE_CLEAR_BIT(o->state, o->pstate);
277255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
278255736Sdavidch}
279255736Sdavidch
280255736Sdavidchstatic void ecore_raw_set_pending(struct ecore_raw_obj *o)
281255736Sdavidch{
282255736Sdavidch	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
283255736Sdavidch	ECORE_SET_BIT(o->state, o->pstate);
284255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
285255736Sdavidch}
286255736Sdavidch
287255736Sdavidch/**
288255736Sdavidch * ecore_state_wait - wait until the given bit(state) is cleared
289255736Sdavidch *
290255736Sdavidch * @sc:		device handle
291255736Sdavidch * @state:	state which is to be cleared
292255736Sdavidch * @state_p:	state buffer
293255736Sdavidch *
294255736Sdavidch */
295255736Sdavidchstatic inline int ecore_state_wait(struct bxe_softc *sc, int state,
296255736Sdavidch				   unsigned long *pstate)
297255736Sdavidch{
298255736Sdavidch	/* can take a while if any port is running */
299255736Sdavidch	int cnt = 5000;
300255736Sdavidch
301255736Sdavidch
302255736Sdavidch	if (CHIP_REV_IS_EMUL(sc))
303255736Sdavidch		cnt *= 20;
304255736Sdavidch
305255736Sdavidch	ECORE_MSG(sc, "waiting for state to become %d\n", state);
306255736Sdavidch
307255736Sdavidch	ECORE_MIGHT_SLEEP();
308255736Sdavidch	while (cnt--) {
309255736Sdavidch		if (!ECORE_TEST_BIT(state, pstate)) {
310255736Sdavidch#ifdef ECORE_STOP_ON_ERROR
311255736Sdavidch			ECORE_MSG(sc, "exit  (cnt %d)\n", 5000 - cnt);
312255736Sdavidch#endif
313255736Sdavidch			return ECORE_SUCCESS;
314255736Sdavidch		}
315255736Sdavidch
316255736Sdavidch		ECORE_WAIT(sc, delay_us);
317255736Sdavidch
318255736Sdavidch		if (sc->panic)
319255736Sdavidch			return ECORE_IO;
320255736Sdavidch	}
321255736Sdavidch
322255736Sdavidch	/* timeout! */
323255736Sdavidch	ECORE_ERR("timeout waiting for state %d\n", state);
324255736Sdavidch#ifdef ECORE_STOP_ON_ERROR
325255736Sdavidch	ecore_panic();
326255736Sdavidch#endif
327255736Sdavidch
328255736Sdavidch	return ECORE_TIMEOUT;
329255736Sdavidch}
330255736Sdavidch
331255736Sdavidchstatic int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw)
332255736Sdavidch{
333255736Sdavidch	return ecore_state_wait(sc, raw->state, raw->pstate);
334255736Sdavidch}
335255736Sdavidch
336255736Sdavidch/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
337255736Sdavidch/* credit handling callbacks */
338255736Sdavidchstatic bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
339255736Sdavidch{
340255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
341255736Sdavidch
342255736Sdavidch	ECORE_DBG_BREAK_IF(!mp);
343255736Sdavidch
344255736Sdavidch	return mp->get_entry(mp, offset);
345255736Sdavidch}
346255736Sdavidch
347255736Sdavidchstatic bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
348255736Sdavidch{
349255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
350255736Sdavidch
351255736Sdavidch	ECORE_DBG_BREAK_IF(!mp);
352255736Sdavidch
353255736Sdavidch	return mp->get(mp, 1);
354255736Sdavidch}
355255736Sdavidch
356255736Sdavidchstatic bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
357255736Sdavidch{
358255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
359255736Sdavidch
360255736Sdavidch	ECORE_DBG_BREAK_IF(!vp);
361255736Sdavidch
362255736Sdavidch	return vp->get_entry(vp, offset);
363255736Sdavidch}
364255736Sdavidch
365255736Sdavidchstatic bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
366255736Sdavidch{
367255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
368255736Sdavidch
369255736Sdavidch	ECORE_DBG_BREAK_IF(!vp);
370255736Sdavidch
371255736Sdavidch	return vp->get(vp, 1);
372255736Sdavidch}
373255736Sdavidch
374255736Sdavidchstatic bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
375255736Sdavidch{
376255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
377255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
378255736Sdavidch
379255736Sdavidch	if (!mp->get(mp, 1))
380255736Sdavidch		return FALSE;
381255736Sdavidch
382255736Sdavidch	if (!vp->get(vp, 1)) {
383255736Sdavidch		mp->put(mp, 1);
384255736Sdavidch		return FALSE;
385255736Sdavidch	}
386255736Sdavidch
387255736Sdavidch	return TRUE;
388255736Sdavidch}
389255736Sdavidch
390255736Sdavidchstatic bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
391255736Sdavidch{
392255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
393255736Sdavidch
394255736Sdavidch	return mp->put_entry(mp, offset);
395255736Sdavidch}
396255736Sdavidch
397255736Sdavidchstatic bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
398255736Sdavidch{
399255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
400255736Sdavidch
401255736Sdavidch	return mp->put(mp, 1);
402255736Sdavidch}
403255736Sdavidch
404255736Sdavidchstatic bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
405255736Sdavidch{
406255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
407255736Sdavidch
408255736Sdavidch	return vp->put_entry(vp, offset);
409255736Sdavidch}
410255736Sdavidch
411255736Sdavidchstatic bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
412255736Sdavidch{
413255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
414255736Sdavidch
415255736Sdavidch	return vp->put(vp, 1);
416255736Sdavidch}
417255736Sdavidch
418255736Sdavidchstatic bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
419255736Sdavidch{
420255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
421255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
422255736Sdavidch
423255736Sdavidch	if (!mp->put(mp, 1))
424255736Sdavidch		return FALSE;
425255736Sdavidch
426255736Sdavidch	if (!vp->put(vp, 1)) {
427255736Sdavidch		mp->get(mp, 1);
428255736Sdavidch		return FALSE;
429255736Sdavidch	}
430255736Sdavidch
431255736Sdavidch	return TRUE;
432255736Sdavidch}
433255736Sdavidch
434255736Sdavidch/**
435255736Sdavidch * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
436255736Sdavidch * head list.
437255736Sdavidch *
438255736Sdavidch * @sc:		device handle
439255736Sdavidch * @o:		vlan_mac object
440255736Sdavidch *
441255736Sdavidch * @details: Non-blocking implementation; should be called under execution
442255736Sdavidch *           queue lock.
443255736Sdavidch */
444255736Sdavidchstatic int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc,
445255736Sdavidch					    struct ecore_vlan_mac_obj *o)
446255736Sdavidch{
447255736Sdavidch	if (o->head_reader) {
448255736Sdavidch		ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
449255736Sdavidch		return ECORE_BUSY;
450255736Sdavidch	}
451255736Sdavidch
452255736Sdavidch	ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
453255736Sdavidch	return ECORE_SUCCESS;
454255736Sdavidch}
455255736Sdavidch
456255736Sdavidch/**
457255736Sdavidch * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
458255736Sdavidch * which wasn't able to run due to a taken lock on vlan mac head list.
459255736Sdavidch *
460255736Sdavidch * @sc:		device handle
461255736Sdavidch * @o:		vlan_mac object
462255736Sdavidch *
463255736Sdavidch * @details Should be called under execution queue lock; notice it might release
464255736Sdavidch *          and reclaim it during its run.
465255736Sdavidch */
466255736Sdavidchstatic void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc,
467255736Sdavidch					    struct ecore_vlan_mac_obj *o)
468255736Sdavidch{
469255736Sdavidch	int rc;
470255736Sdavidch	unsigned long ramrod_flags = o->saved_ramrod_flags;
471255736Sdavidch
472255736Sdavidch	ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
473255736Sdavidch		  ramrod_flags);
474255736Sdavidch	o->head_exe_request = FALSE;
475255736Sdavidch	o->saved_ramrod_flags = 0;
476258187Sedavis	rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
477284470Sdavidcs	if ((rc != ECORE_SUCCESS) && (rc != ECORE_PENDING)) {
478255736Sdavidch		ECORE_ERR("execution of pending commands failed with rc %d\n",
479255736Sdavidch			  rc);
480255736Sdavidch#ifdef ECORE_STOP_ON_ERROR
481255736Sdavidch		ecore_panic();
482255736Sdavidch#endif
483255736Sdavidch	}
484255736Sdavidch}
485255736Sdavidch
486255736Sdavidch/**
487255736Sdavidch * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
488255736Sdavidch * called due to vlan mac head list lock being taken.
489255736Sdavidch *
490255736Sdavidch * @sc:			device handle
491255736Sdavidch * @o:			vlan_mac object
492255736Sdavidch * @ramrod_flags:	ramrod flags of missed execution
493255736Sdavidch *
494255736Sdavidch * @details Should be called under execution queue lock.
495255736Sdavidch */
496255736Sdavidchstatic void __ecore_vlan_mac_h_pend(struct bxe_softc *sc,
497255736Sdavidch				    struct ecore_vlan_mac_obj *o,
498255736Sdavidch				    unsigned long ramrod_flags)
499255736Sdavidch{
500255736Sdavidch	o->head_exe_request = TRUE;
501255736Sdavidch	o->saved_ramrod_flags = ramrod_flags;
502255736Sdavidch	ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
503255736Sdavidch		  ramrod_flags);
504255736Sdavidch}
505255736Sdavidch
506255736Sdavidch/**
507255736Sdavidch * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
508255736Sdavidch *
509255736Sdavidch * @sc:			device handle
510255736Sdavidch * @o:			vlan_mac object
511255736Sdavidch *
512255736Sdavidch * @details Should be called under execution queue lock. Notice if a pending
513255736Sdavidch *          execution exists, it would perform it - possibly releasing and
514255736Sdavidch *          reclaiming the execution queue lock.
515255736Sdavidch */
516255736Sdavidchstatic void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
517255736Sdavidch					    struct ecore_vlan_mac_obj *o)
518255736Sdavidch{
519255736Sdavidch	/* It's possible a new pending execution was added since this writer
520255736Sdavidch	 * executed. If so, execute again. [Ad infinitum]
521255736Sdavidch	 */
522255736Sdavidch	while(o->head_exe_request) {
523255736Sdavidch		ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
524255736Sdavidch		__ecore_vlan_mac_h_exec_pending(sc, o);
525255736Sdavidch	}
526255736Sdavidch}
527255736Sdavidch
528255736Sdavidch/**
529255736Sdavidch * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
530255736Sdavidch *
531255736Sdavidch * @sc:			device handle
532255736Sdavidch * @o:			vlan_mac object
533255736Sdavidch *
534255736Sdavidch * @details Notice if a pending execution exists, it would perform it -
535255736Sdavidch *          possibly releasing and reclaiming the execution queue lock.
536255736Sdavidch */
537255736Sdavidchvoid ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
538255736Sdavidch				   struct ecore_vlan_mac_obj *o)
539255736Sdavidch{
540255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
541255736Sdavidch	__ecore_vlan_mac_h_write_unlock(sc, o);
542255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
543255736Sdavidch}
544255736Sdavidch
545255736Sdavidch/**
546255736Sdavidch * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
547255736Sdavidch *
548255736Sdavidch * @sc:			device handle
549255736Sdavidch * @o:			vlan_mac object
550255736Sdavidch *
551255736Sdavidch * @details Should be called under the execution queue lock. May sleep. May
552255736Sdavidch *          release and reclaim execution queue lock during its run.
553255736Sdavidch */
554255736Sdavidchstatic int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
555255736Sdavidch					struct ecore_vlan_mac_obj *o)
556255736Sdavidch{
557255736Sdavidch	/* If we got here, we're holding lock --> no WRITER exists */
558255736Sdavidch	o->head_reader++;
559255736Sdavidch	ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
560255736Sdavidch		  o->head_reader);
561255736Sdavidch
562255736Sdavidch	return ECORE_SUCCESS;
563255736Sdavidch}
564255736Sdavidch
565255736Sdavidch/**
566255736Sdavidch * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
567255736Sdavidch *
568255736Sdavidch * @sc:			device handle
569255736Sdavidch * @o:			vlan_mac object
570255736Sdavidch *
571255736Sdavidch * @details May sleep. Claims and releases execution queue lock during its run.
572255736Sdavidch */
573255736Sdavidchint ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
574255736Sdavidch			       struct ecore_vlan_mac_obj *o)
575255736Sdavidch{
576255736Sdavidch	int rc;
577255736Sdavidch
578255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
579255736Sdavidch	rc = __ecore_vlan_mac_h_read_lock(sc, o);
580255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
581255736Sdavidch
582255736Sdavidch	return rc;
583255736Sdavidch}
584255736Sdavidch
585255736Sdavidch/**
586255736Sdavidch * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
587255736Sdavidch *
588255736Sdavidch * @sc:			device handle
589255736Sdavidch * @o:			vlan_mac object
590255736Sdavidch *
591255736Sdavidch * @details Should be called under execution queue lock. Notice if a pending
592255736Sdavidch *          execution exists, it would be performed if this was the last
593255736Sdavidch *          reader. possibly releasing and reclaiming the execution queue lock.
594255736Sdavidch */
595255736Sdavidchstatic void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
596255736Sdavidch					  struct ecore_vlan_mac_obj *o)
597255736Sdavidch{
598255736Sdavidch	if (!o->head_reader) {
599255736Sdavidch		ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
600255736Sdavidch#ifdef ECORE_STOP_ON_ERROR
601255736Sdavidch		ecore_panic();
602255736Sdavidch#endif
603255736Sdavidch	} else {
604255736Sdavidch		o->head_reader--;
605255736Sdavidch		ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
606255736Sdavidch			  o->head_reader);
607255736Sdavidch	}
608255736Sdavidch
609255736Sdavidch	/* It's possible a new pending execution was added, and that this reader
610255736Sdavidch	 * was last - if so we need to execute the command.
611255736Sdavidch	 */
612255736Sdavidch	if (!o->head_reader && o->head_exe_request) {
613255736Sdavidch		ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
614255736Sdavidch
615255736Sdavidch		/* Writer release will do the trick */
616255736Sdavidch		__ecore_vlan_mac_h_write_unlock(sc, o);
617255736Sdavidch	}
618255736Sdavidch}
619255736Sdavidch
620255736Sdavidch/**
621255736Sdavidch * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
622255736Sdavidch *
623255736Sdavidch * @sc:			device handle
624255736Sdavidch * @o:			vlan_mac object
625255736Sdavidch *
626255736Sdavidch * @details Notice if a pending execution exists, it would be performed if this
627255736Sdavidch *          was the last reader. Claims and releases the execution queue lock
628255736Sdavidch *          during its run.
629255736Sdavidch */
630255736Sdavidchvoid ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
631255736Sdavidch				  struct ecore_vlan_mac_obj *o)
632255736Sdavidch{
633255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
634255736Sdavidch	__ecore_vlan_mac_h_read_unlock(sc, o);
635255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
636255736Sdavidch}
637255736Sdavidch
638255736Sdavidch/**
639255736Sdavidch * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
640255736Sdavidch *
641255736Sdavidch * @sc:			device handle
642255736Sdavidch * @o:			vlan_mac object
643255736Sdavidch * @n:			number of elements to get
644255736Sdavidch * @base:		base address for element placement
645255736Sdavidch * @stride:		stride between elements (in bytes)
646255736Sdavidch */
647255736Sdavidchstatic int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
648255736Sdavidch				 int n, uint8_t *base, uint8_t stride, uint8_t size)
649255736Sdavidch{
650255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
651255736Sdavidch	uint8_t *next = base;
652258187Sedavis	int counter = 0;
653258187Sedavis	int read_lock;
654255736Sdavidch
655255736Sdavidch	ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
656255736Sdavidch	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
657255736Sdavidch	if (read_lock != ECORE_SUCCESS)
658255736Sdavidch		ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
659255736Sdavidch
660255736Sdavidch	/* traverse list */
661255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
662255736Sdavidch				  struct ecore_vlan_mac_registry_elem) {
663255736Sdavidch		if (counter < n) {
664255736Sdavidch			ECORE_MEMCPY(next, &pos->u, size);
665255736Sdavidch			counter++;
666258187Sedavis			ECORE_MSG(sc, "copied element number %d to address %p element was:\n",
667255736Sdavidch				  counter, next);
668255736Sdavidch			next += stride + size;
669255736Sdavidch		}
670255736Sdavidch	}
671255736Sdavidch
672255736Sdavidch	if (read_lock == ECORE_SUCCESS) {
673255736Sdavidch		ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
674255736Sdavidch		ecore_vlan_mac_h_read_unlock(sc, o);
675255736Sdavidch	}
676255736Sdavidch
677255736Sdavidch	return counter * ETH_ALEN;
678255736Sdavidch}
679255736Sdavidch
680255736Sdavidch/* check_add() callbacks */
681255736Sdavidchstatic int ecore_check_mac_add(struct bxe_softc *sc,
682255736Sdavidch			       struct ecore_vlan_mac_obj *o,
683255736Sdavidch			       union ecore_classification_ramrod_data *data)
684255736Sdavidch{
685255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
686255736Sdavidch
687255736Sdavidch	ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
688255736Sdavidch
689255736Sdavidch	if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
690255736Sdavidch		return ECORE_INVAL;
691255736Sdavidch
692255736Sdavidch	/* Check if a requested MAC already exists */
693255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
694255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
695255736Sdavidch		if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
696255736Sdavidch		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
697255736Sdavidch			return ECORE_EXISTS;
698255736Sdavidch
699255736Sdavidch	return ECORE_SUCCESS;
700255736Sdavidch}
701255736Sdavidch
702255736Sdavidchstatic int ecore_check_vlan_add(struct bxe_softc *sc,
703255736Sdavidch				struct ecore_vlan_mac_obj *o,
704255736Sdavidch				union ecore_classification_ramrod_data *data)
705255736Sdavidch{
706255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
707255736Sdavidch
708255736Sdavidch	ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
709255736Sdavidch
710255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
711255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
712255736Sdavidch		if (data->vlan.vlan == pos->u.vlan.vlan)
713255736Sdavidch			return ECORE_EXISTS;
714255736Sdavidch
715255736Sdavidch	return ECORE_SUCCESS;
716255736Sdavidch}
717255736Sdavidch
718255736Sdavidchstatic int ecore_check_vlan_mac_add(struct bxe_softc *sc,
719255736Sdavidch				    struct ecore_vlan_mac_obj *o,
720255736Sdavidch				   union ecore_classification_ramrod_data *data)
721255736Sdavidch{
722255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
723255736Sdavidch
724255736Sdavidch	ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
725255736Sdavidch		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
726255736Sdavidch
727255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
728255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
729255736Sdavidch		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
730255736Sdavidch		    (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
731255736Sdavidch				  ETH_ALEN)) &&
732255736Sdavidch		    (data->vlan_mac.is_inner_mac ==
733255736Sdavidch		     pos->u.vlan_mac.is_inner_mac))
734255736Sdavidch			return ECORE_EXISTS;
735255736Sdavidch
736255736Sdavidch	return ECORE_SUCCESS;
737255736Sdavidch}
738255736Sdavidch
739255736Sdavidch/* check_del() callbacks */
740255736Sdavidchstatic struct ecore_vlan_mac_registry_elem *
741255736Sdavidch	ecore_check_mac_del(struct bxe_softc *sc,
742255736Sdavidch			    struct ecore_vlan_mac_obj *o,
743255736Sdavidch			    union ecore_classification_ramrod_data *data)
744255736Sdavidch{
745255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
746255736Sdavidch
747255736Sdavidch	ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
748255736Sdavidch
749255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
750255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
751255736Sdavidch		if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
752255736Sdavidch		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
753255736Sdavidch			return pos;
754255736Sdavidch
755255736Sdavidch	return NULL;
756255736Sdavidch}
757255736Sdavidch
758255736Sdavidchstatic struct ecore_vlan_mac_registry_elem *
759255736Sdavidch	ecore_check_vlan_del(struct bxe_softc *sc,
760255736Sdavidch			     struct ecore_vlan_mac_obj *o,
761255736Sdavidch			     union ecore_classification_ramrod_data *data)
762255736Sdavidch{
763255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
764255736Sdavidch
765255736Sdavidch	ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
766255736Sdavidch
767255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
768255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
769255736Sdavidch		if (data->vlan.vlan == pos->u.vlan.vlan)
770255736Sdavidch			return pos;
771255736Sdavidch
772255736Sdavidch	return NULL;
773255736Sdavidch}
774255736Sdavidch
775255736Sdavidchstatic struct ecore_vlan_mac_registry_elem *
776255736Sdavidch	ecore_check_vlan_mac_del(struct bxe_softc *sc,
777255736Sdavidch				 struct ecore_vlan_mac_obj *o,
778255736Sdavidch				 union ecore_classification_ramrod_data *data)
779255736Sdavidch{
780255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
781255736Sdavidch
782255736Sdavidch	ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
783255736Sdavidch		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
784255736Sdavidch
785255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
786255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
787255736Sdavidch		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
788255736Sdavidch		    (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
789255736Sdavidch			     ETH_ALEN)) &&
790255736Sdavidch		    (data->vlan_mac.is_inner_mac ==
791255736Sdavidch		     pos->u.vlan_mac.is_inner_mac))
792255736Sdavidch			return pos;
793255736Sdavidch
794255736Sdavidch	return NULL;
795255736Sdavidch}
796255736Sdavidch
797255736Sdavidch/* check_move() callback */
798255736Sdavidchstatic bool ecore_check_move(struct bxe_softc *sc,
799255736Sdavidch			     struct ecore_vlan_mac_obj *src_o,
800255736Sdavidch			     struct ecore_vlan_mac_obj *dst_o,
801255736Sdavidch			     union ecore_classification_ramrod_data *data)
802255736Sdavidch{
803255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
804255736Sdavidch	int rc;
805255736Sdavidch
806255736Sdavidch	/* Check if we can delete the requested configuration from the first
807255736Sdavidch	 * object.
808255736Sdavidch	 */
809255736Sdavidch	pos = src_o->check_del(sc, src_o, data);
810255736Sdavidch
811255736Sdavidch	/*  check if configuration can be added */
812255736Sdavidch	rc = dst_o->check_add(sc, dst_o, data);
813255736Sdavidch
814255736Sdavidch	/* If this classification can not be added (is already set)
815255736Sdavidch	 * or can't be deleted - return an error.
816255736Sdavidch	 */
817255736Sdavidch	if (rc || !pos)
818255736Sdavidch		return FALSE;
819255736Sdavidch
820255736Sdavidch	return TRUE;
821255736Sdavidch}
822255736Sdavidch
823255736Sdavidchstatic bool ecore_check_move_always_err(
824255736Sdavidch	struct bxe_softc *sc,
825255736Sdavidch	struct ecore_vlan_mac_obj *src_o,
826255736Sdavidch	struct ecore_vlan_mac_obj *dst_o,
827255736Sdavidch	union ecore_classification_ramrod_data *data)
828255736Sdavidch{
829255736Sdavidch	return FALSE;
830255736Sdavidch}
831255736Sdavidch
832255736Sdavidchstatic inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
833255736Sdavidch{
834255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
835255736Sdavidch	uint8_t rx_tx_flag = 0;
836255736Sdavidch
837255736Sdavidch	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
838255736Sdavidch	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
839255736Sdavidch		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
840255736Sdavidch
841255736Sdavidch	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
842255736Sdavidch	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
843255736Sdavidch		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
844255736Sdavidch
845255736Sdavidch	return rx_tx_flag;
846255736Sdavidch}
847255736Sdavidch
848255736Sdavidchvoid ecore_set_mac_in_nig(struct bxe_softc *sc,
849255736Sdavidch			  bool add, unsigned char *dev_addr, int index)
850255736Sdavidch{
851255736Sdavidch	uint32_t wb_data[2];
852255736Sdavidch	uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
853255736Sdavidch			 NIG_REG_LLH0_FUNC_MEM;
854255736Sdavidch
855255736Sdavidch	if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
856255736Sdavidch		return;
857255736Sdavidch
858255736Sdavidch	if (index > ECORE_LLH_CAM_MAX_PF_LINE)
859255736Sdavidch		return;
860255736Sdavidch
861255736Sdavidch	ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
862255736Sdavidch		  (add ? "ADD" : "DELETE"), index);
863255736Sdavidch
864255736Sdavidch	if (add) {
865255736Sdavidch		/* LLH_FUNC_MEM is a uint64_t WB register */
866255736Sdavidch		reg_offset += 8*index;
867255736Sdavidch
868255736Sdavidch		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
869255736Sdavidch			      (dev_addr[4] <<  8) |  dev_addr[5]);
870255736Sdavidch		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
871255736Sdavidch
872255736Sdavidch		ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
873255736Sdavidch	}
874255736Sdavidch
875255736Sdavidch	REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
876255736Sdavidch				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
877255736Sdavidch}
878255736Sdavidch
879255736Sdavidch/**
880255736Sdavidch * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
881255736Sdavidch *
882255736Sdavidch * @sc:		device handle
883255736Sdavidch * @o:		queue for which we want to configure this rule
884255736Sdavidch * @add:	if TRUE the command is an ADD command, DEL otherwise
885255736Sdavidch * @opcode:	CLASSIFY_RULE_OPCODE_XXX
886255736Sdavidch * @hdr:	pointer to a header to setup
887255736Sdavidch *
888255736Sdavidch */
889255736Sdavidchstatic inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc,
890255736Sdavidch	struct ecore_vlan_mac_obj *o, bool add, int opcode,
891255736Sdavidch	struct eth_classify_cmd_header *hdr)
892255736Sdavidch{
893255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
894255736Sdavidch
895255736Sdavidch	hdr->client_id = raw->cl_id;
896255736Sdavidch	hdr->func_id = raw->func_id;
897255736Sdavidch
898255736Sdavidch	/* Rx or/and Tx (internal switching) configuration ? */
899255736Sdavidch	hdr->cmd_general_data |=
900255736Sdavidch		ecore_vlan_mac_get_rx_tx_flag(o);
901255736Sdavidch
902255736Sdavidch	if (add)
903255736Sdavidch		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
904255736Sdavidch
905255736Sdavidch	hdr->cmd_general_data |=
906255736Sdavidch		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
907255736Sdavidch}
908255736Sdavidch
909255736Sdavidch/**
910255736Sdavidch * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
911255736Sdavidch *
912255736Sdavidch * @cid:	connection id
913255736Sdavidch * @type:	ECORE_FILTER_XXX_PENDING
914255736Sdavidch * @hdr:	pointer to header to setup
915255736Sdavidch * @rule_cnt:
916255736Sdavidch *
917255736Sdavidch * currently we always configure one rule and echo field to contain a CID and an
918255736Sdavidch * opcode type.
919255736Sdavidch */
920255736Sdavidchstatic inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
921255736Sdavidch				struct eth_classify_header *hdr, int rule_cnt)
922255736Sdavidch{
923255736Sdavidch	hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
924255736Sdavidch				(type << ECORE_SWCID_SHIFT));
925255736Sdavidch	hdr->rule_cnt = (uint8_t)rule_cnt;
926255736Sdavidch}
927255736Sdavidch
928255736Sdavidch/* hw_config() callbacks */
929255736Sdavidchstatic void ecore_set_one_mac_e2(struct bxe_softc *sc,
930255736Sdavidch				 struct ecore_vlan_mac_obj *o,
931255736Sdavidch				 struct ecore_exeq_elem *elem, int rule_idx,
932255736Sdavidch				 int cam_offset)
933255736Sdavidch{
934255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
935255736Sdavidch	struct eth_classify_rules_ramrod_data *data =
936255736Sdavidch		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
937255736Sdavidch	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
938255736Sdavidch	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
939255736Sdavidch	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
940255736Sdavidch	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
941255736Sdavidch	uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
942255736Sdavidch
943255736Sdavidch	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
944255736Sdavidch	 * relevant. In addition, current implementation is tuned for a
945255736Sdavidch	 * single ETH MAC.
946255736Sdavidch	 *
947255736Sdavidch	 * When multiple unicast ETH MACs PF configuration in switch
948255736Sdavidch	 * independent mode is required (NetQ, multiple netdev MACs,
949255736Sdavidch	 * etc.), consider better utilisation of 8 per function MAC
950255736Sdavidch	 * entries in the LLH register. There is also
951255736Sdavidch	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
952255736Sdavidch	 * total number of CAM entries to 16.
953255736Sdavidch	 *
954255736Sdavidch	 * Currently we won't configure NIG for MACs other than a primary ETH
955255736Sdavidch	 * MAC and iSCSI L2 MAC.
956255736Sdavidch	 *
957255736Sdavidch	 * If this MAC is moving from one Queue to another, no need to change
958255736Sdavidch	 * NIG configuration.
959255736Sdavidch	 */
960255736Sdavidch	if (cmd != ECORE_VLAN_MAC_MOVE) {
961255736Sdavidch		if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
962255736Sdavidch			ecore_set_mac_in_nig(sc, add, mac,
963255736Sdavidch					     ECORE_LLH_CAM_ISCSI_ETH_LINE);
964255736Sdavidch		else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
965255736Sdavidch			ecore_set_mac_in_nig(sc, add, mac,
966255736Sdavidch					     ECORE_LLH_CAM_ETH_LINE);
967255736Sdavidch	}
968255736Sdavidch
969255736Sdavidch	/* Reset the ramrod data buffer for the first rule */
970255736Sdavidch	if (rule_idx == 0)
971255736Sdavidch		ECORE_MEMSET(data, 0, sizeof(*data));
972255736Sdavidch
973255736Sdavidch	/* Setup a command header */
974255736Sdavidch	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
975255736Sdavidch				      &rule_entry->mac.header);
976255736Sdavidch
977255736Sdavidch	ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
978255736Sdavidch		  (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
979255736Sdavidch
980255736Sdavidch	/* Set a MAC itself */
981255736Sdavidch	ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
982255736Sdavidch			      &rule_entry->mac.mac_mid,
983255736Sdavidch			      &rule_entry->mac.mac_lsb, mac);
984255736Sdavidch	rule_entry->mac.inner_mac =
985255736Sdavidch		elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
986255736Sdavidch
987255736Sdavidch	/* MOVE: Add a rule that will add this MAC to the target Queue */
988255736Sdavidch	if (cmd == ECORE_VLAN_MAC_MOVE) {
989255736Sdavidch		rule_entry++;
990255736Sdavidch		rule_cnt++;
991255736Sdavidch
992255736Sdavidch		/* Setup ramrod data */
993255736Sdavidch		ecore_vlan_mac_set_cmd_hdr_e2(sc,
994255736Sdavidch					elem->cmd_data.vlan_mac.target_obj,
995255736Sdavidch					      TRUE, CLASSIFY_RULE_OPCODE_MAC,
996255736Sdavidch					      &rule_entry->mac.header);
997255736Sdavidch
998255736Sdavidch		/* Set a MAC itself */
999255736Sdavidch		ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1000255736Sdavidch				      &rule_entry->mac.mac_mid,
1001255736Sdavidch				      &rule_entry->mac.mac_lsb, mac);
1002255736Sdavidch		rule_entry->mac.inner_mac =
1003255736Sdavidch			elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
1004255736Sdavidch	}
1005255736Sdavidch
1006255736Sdavidch	/* Set the ramrod data header */
1007255736Sdavidch	/* TODO: take this to the higher level in order to prevent multiple
1008255736Sdavidch		 writing */
1009255736Sdavidch	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1010255736Sdavidch					rule_cnt);
1011255736Sdavidch}
1012255736Sdavidch
1013255736Sdavidch/**
1014255736Sdavidch * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1015255736Sdavidch *
1016255736Sdavidch * @sc:		device handle
1017255736Sdavidch * @o:		queue
1018255736Sdavidch * @type:
1019255736Sdavidch * @cam_offset:	offset in cam memory
1020255736Sdavidch * @hdr:	pointer to a header to setup
1021255736Sdavidch *
1022255736Sdavidch * E1/E1H
1023255736Sdavidch */
1024255736Sdavidchstatic inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc,
1025255736Sdavidch	struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1026255736Sdavidch	struct mac_configuration_hdr *hdr)
1027255736Sdavidch{
1028255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
1029255736Sdavidch
1030255736Sdavidch	hdr->length = 1;
1031255736Sdavidch	hdr->offset = (uint8_t)cam_offset;
1032255736Sdavidch	hdr->client_id = ECORE_CPU_TO_LE16(0xff);
1033255736Sdavidch	hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
1034255736Sdavidch				(type << ECORE_SWCID_SHIFT));
1035255736Sdavidch}
1036255736Sdavidch
1037255736Sdavidchstatic inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc,
1038255736Sdavidch	struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
1039255736Sdavidch	uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
1040255736Sdavidch{
1041255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
1042255736Sdavidch	uint32_t cl_bit_vec = (1 << r->cl_id);
1043255736Sdavidch
1044255736Sdavidch	cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
1045255736Sdavidch	cfg_entry->pf_id = r->func_id;
1046255736Sdavidch	cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
1047255736Sdavidch
1048255736Sdavidch	if (add) {
1049255736Sdavidch		ECORE_SET_FLAG(cfg_entry->flags,
1050255736Sdavidch			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1051255736Sdavidch			       T_ETH_MAC_COMMAND_SET);
1052255736Sdavidch		ECORE_SET_FLAG(cfg_entry->flags,
1053255736Sdavidch			       MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1054255736Sdavidch			       opcode);
1055255736Sdavidch
1056255736Sdavidch		/* Set a MAC in a ramrod data */
1057255736Sdavidch		ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1058255736Sdavidch				      &cfg_entry->middle_mac_addr,
1059255736Sdavidch				      &cfg_entry->lsb_mac_addr, mac);
1060255736Sdavidch	} else
1061255736Sdavidch		ECORE_SET_FLAG(cfg_entry->flags,
1062255736Sdavidch			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1063255736Sdavidch			       T_ETH_MAC_COMMAND_INVALIDATE);
1064255736Sdavidch}
1065255736Sdavidch
1066255736Sdavidchstatic inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc,
1067255736Sdavidch	struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
1068255736Sdavidch	uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
1069255736Sdavidch{
1070255736Sdavidch	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1071255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1072255736Sdavidch
1073255736Sdavidch	ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
1074255736Sdavidch					 &config->hdr);
1075255736Sdavidch	ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
1076255736Sdavidch					 cfg_entry);
1077255736Sdavidch
1078255736Sdavidch	ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1079255736Sdavidch		  (add ? "setting" : "clearing"),
1080255736Sdavidch		  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1081255736Sdavidch}
1082255736Sdavidch
1083255736Sdavidch/**
1084255736Sdavidch * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1085255736Sdavidch *
1086255736Sdavidch * @sc:		device handle
1087255736Sdavidch * @o:		ecore_vlan_mac_obj
1088255736Sdavidch * @elem:	ecore_exeq_elem
1089255736Sdavidch * @rule_idx:	rule_idx
1090255736Sdavidch * @cam_offset: cam_offset
1091255736Sdavidch */
1092255736Sdavidchstatic void ecore_set_one_mac_e1x(struct bxe_softc *sc,
1093255736Sdavidch				  struct ecore_vlan_mac_obj *o,
1094255736Sdavidch				  struct ecore_exeq_elem *elem, int rule_idx,
1095255736Sdavidch				  int cam_offset)
1096255736Sdavidch{
1097255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1098255736Sdavidch	struct mac_configuration_cmd *config =
1099255736Sdavidch		(struct mac_configuration_cmd *)(raw->rdata);
1100255736Sdavidch	/* 57710 and 57711 do not support MOVE command,
1101255736Sdavidch	 * so it's either ADD or DEL
1102255736Sdavidch	 */
1103255736Sdavidch	bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1104255736Sdavidch		TRUE : FALSE;
1105255736Sdavidch
1106255736Sdavidch	/* Reset the ramrod data buffer */
1107255736Sdavidch	ECORE_MEMSET(config, 0, sizeof(*config));
1108255736Sdavidch
1109255736Sdavidch	ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1110255736Sdavidch				     cam_offset, add,
1111255736Sdavidch				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
1112255736Sdavidch				     ETH_VLAN_FILTER_ANY_VLAN, config);
1113255736Sdavidch}
1114255736Sdavidch
1115255736Sdavidchstatic void ecore_set_one_vlan_e2(struct bxe_softc *sc,
1116255736Sdavidch				  struct ecore_vlan_mac_obj *o,
1117255736Sdavidch				  struct ecore_exeq_elem *elem, int rule_idx,
1118255736Sdavidch				  int cam_offset)
1119255736Sdavidch{
1120255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1121255736Sdavidch	struct eth_classify_rules_ramrod_data *data =
1122255736Sdavidch		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1123255736Sdavidch	int rule_cnt = rule_idx + 1;
1124255736Sdavidch	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1125255736Sdavidch	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1126255736Sdavidch	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1127255736Sdavidch	uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1128255736Sdavidch
1129255736Sdavidch	/* Reset the ramrod data buffer for the first rule */
1130255736Sdavidch	if (rule_idx == 0)
1131255736Sdavidch		ECORE_MEMSET(data, 0, sizeof(*data));
1132255736Sdavidch
1133255736Sdavidch	/* Set a rule header */
1134255736Sdavidch	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1135255736Sdavidch				      &rule_entry->vlan.header);
1136255736Sdavidch
1137255736Sdavidch	ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1138255736Sdavidch		  vlan);
1139255736Sdavidch
1140255736Sdavidch	/* Set a VLAN itself */
1141255736Sdavidch	rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1142255736Sdavidch
1143255736Sdavidch	/* MOVE: Add a rule that will add this MAC to the target Queue */
1144255736Sdavidch	if (cmd == ECORE_VLAN_MAC_MOVE) {
1145255736Sdavidch		rule_entry++;
1146255736Sdavidch		rule_cnt++;
1147255736Sdavidch
1148255736Sdavidch		/* Setup ramrod data */
1149255736Sdavidch		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1150255736Sdavidch					elem->cmd_data.vlan_mac.target_obj,
1151255736Sdavidch					      TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1152255736Sdavidch					      &rule_entry->vlan.header);
1153255736Sdavidch
1154255736Sdavidch		/* Set a VLAN itself */
1155255736Sdavidch		rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1156255736Sdavidch	}
1157255736Sdavidch
1158255736Sdavidch	/* Set the ramrod data header */
1159255736Sdavidch	/* TODO: take this to the higher level in order to prevent multiple
1160255736Sdavidch		 writing */
1161255736Sdavidch	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1162255736Sdavidch					rule_cnt);
1163255736Sdavidch}
1164255736Sdavidch
1165255736Sdavidchstatic void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
1166255736Sdavidch				      struct ecore_vlan_mac_obj *o,
1167255736Sdavidch				      struct ecore_exeq_elem *elem,
1168255736Sdavidch				      int rule_idx, int cam_offset)
1169255736Sdavidch{
1170255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1171255736Sdavidch	struct eth_classify_rules_ramrod_data *data =
1172255736Sdavidch		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1173255736Sdavidch	int rule_cnt = rule_idx + 1;
1174255736Sdavidch	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1175255736Sdavidch	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1176255736Sdavidch	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1177255736Sdavidch	uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1178255736Sdavidch	uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1179255736Sdavidch
1180255736Sdavidch	/* Reset the ramrod data buffer for the first rule */
1181255736Sdavidch	if (rule_idx == 0)
1182255736Sdavidch		ECORE_MEMSET(data, 0, sizeof(*data));
1183255736Sdavidch
1184255736Sdavidch	/* Set a rule header */
1185255736Sdavidch	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1186255736Sdavidch				      &rule_entry->pair.header);
1187255736Sdavidch
1188255736Sdavidch	/* Set VLAN and MAC themselves */
1189255736Sdavidch	rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1190255736Sdavidch	ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1191255736Sdavidch			      &rule_entry->pair.mac_mid,
1192255736Sdavidch			      &rule_entry->pair.mac_lsb, mac);
1193255736Sdavidch	rule_entry->pair.inner_mac =
1194255736Sdavidch			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1195255736Sdavidch	/* MOVE: Add a rule that will add this MAC to the target Queue */
1196255736Sdavidch	if (cmd == ECORE_VLAN_MAC_MOVE) {
1197255736Sdavidch		rule_entry++;
1198255736Sdavidch		rule_cnt++;
1199255736Sdavidch
1200255736Sdavidch		/* Setup ramrod data */
1201255736Sdavidch		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1202255736Sdavidch					elem->cmd_data.vlan_mac.target_obj,
1203255736Sdavidch					      TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1204255736Sdavidch					      &rule_entry->pair.header);
1205255736Sdavidch
1206255736Sdavidch		/* Set a VLAN itself */
1207255736Sdavidch		rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1208255736Sdavidch		ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1209255736Sdavidch				      &rule_entry->pair.mac_mid,
1210255736Sdavidch				      &rule_entry->pair.mac_lsb, mac);
1211255736Sdavidch		rule_entry->pair.inner_mac =
1212255736Sdavidch			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1213255736Sdavidch	}
1214255736Sdavidch
1215255736Sdavidch	/* Set the ramrod data header */
1216255736Sdavidch	/* TODO: take this to the higher level in order to prevent multiple
1217255736Sdavidch		 writing */
1218255736Sdavidch	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1219255736Sdavidch					rule_cnt);
1220255736Sdavidch}
1221255736Sdavidch
1222255736Sdavidch/**
1223255736Sdavidch * ecore_set_one_vlan_mac_e1h -
1224255736Sdavidch *
1225255736Sdavidch * @sc:		device handle
1226255736Sdavidch * @o:		ecore_vlan_mac_obj
1227255736Sdavidch * @elem:	ecore_exeq_elem
1228255736Sdavidch * @rule_idx:	rule_idx
1229255736Sdavidch * @cam_offset:	cam_offset
1230255736Sdavidch */
1231255736Sdavidchstatic void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc,
1232255736Sdavidch				       struct ecore_vlan_mac_obj *o,
1233255736Sdavidch				       struct ecore_exeq_elem *elem,
1234255736Sdavidch				       int rule_idx, int cam_offset)
1235255736Sdavidch{
1236255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1237255736Sdavidch	struct mac_configuration_cmd *config =
1238255736Sdavidch		(struct mac_configuration_cmd *)(raw->rdata);
1239255736Sdavidch	/* 57710 and 57711 do not support MOVE command,
1240255736Sdavidch	 * so it's either ADD or DEL
1241255736Sdavidch	 */
1242255736Sdavidch	bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1243255736Sdavidch		TRUE : FALSE;
1244255736Sdavidch
1245255736Sdavidch	/* Reset the ramrod data buffer */
1246255736Sdavidch	ECORE_MEMSET(config, 0, sizeof(*config));
1247255736Sdavidch
1248255736Sdavidch	ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
1249255736Sdavidch				     cam_offset, add,
1250255736Sdavidch				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1251255736Sdavidch				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1252255736Sdavidch				     ETH_VLAN_FILTER_CLASSIFY, config);
1253255736Sdavidch}
1254255736Sdavidch
1255255736Sdavidch#define list_next_entry(pos, member) \
1256255736Sdavidch	list_entry((pos)->member.next, typeof(*(pos)), member)
1257255736Sdavidch
1258255736Sdavidch/**
1259255736Sdavidch * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1260255736Sdavidch *
1261255736Sdavidch * @sc:		device handle
1262255736Sdavidch * @p:		command parameters
1263255736Sdavidch * @ppos:	pointer to the cookie
1264255736Sdavidch *
1265255736Sdavidch * reconfigure next MAC/VLAN/VLAN-MAC element from the
1266255736Sdavidch * previously configured elements list.
1267255736Sdavidch *
1268255736Sdavidch * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1269255736Sdavidch * into an account
1270255736Sdavidch *
1271255736Sdavidch * pointer to the cookie  - that should be given back in the next call to make
1272255736Sdavidch * function handle the next element. If *ppos is set to NULL it will restart the
1273255736Sdavidch * iterator. If returned *ppos == NULL this means that the last element has been
1274255736Sdavidch * handled.
1275255736Sdavidch *
1276255736Sdavidch */
1277255736Sdavidchstatic int ecore_vlan_mac_restore(struct bxe_softc *sc,
1278255736Sdavidch			   struct ecore_vlan_mac_ramrod_params *p,
1279255736Sdavidch			   struct ecore_vlan_mac_registry_elem **ppos)
1280255736Sdavidch{
1281255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
1282255736Sdavidch	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1283255736Sdavidch
1284255736Sdavidch	/* If list is empty - there is nothing to do here */
1285255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&o->head)) {
1286255736Sdavidch		*ppos = NULL;
1287255736Sdavidch		return 0;
1288255736Sdavidch	}
1289255736Sdavidch
1290255736Sdavidch	/* make a step... */
1291255736Sdavidch	if (*ppos == NULL)
1292255736Sdavidch		*ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1293255736Sdavidch					    struct ecore_vlan_mac_registry_elem,
1294255736Sdavidch					       link);
1295255736Sdavidch	else
1296255736Sdavidch		*ppos = ECORE_LIST_NEXT(*ppos, link,
1297255736Sdavidch					struct ecore_vlan_mac_registry_elem);
1298255736Sdavidch
1299255736Sdavidch	pos = *ppos;
1300255736Sdavidch
1301255736Sdavidch	/* If it's the last step - return NULL */
1302255736Sdavidch	if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1303255736Sdavidch		*ppos = NULL;
1304255736Sdavidch
1305255736Sdavidch	/* Prepare a 'user_req' */
1306255736Sdavidch	ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1307255736Sdavidch
1308255736Sdavidch	/* Set the command */
1309255736Sdavidch	p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1310255736Sdavidch
1311255736Sdavidch	/* Set vlan_mac_flags */
1312255736Sdavidch	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1313255736Sdavidch
1314255736Sdavidch	/* Set a restore bit */
1315255736Sdavidch	ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1316255736Sdavidch
1317255736Sdavidch	return ecore_config_vlan_mac(sc, p);
1318255736Sdavidch}
1319255736Sdavidch
1320255736Sdavidch/* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1321255736Sdavidch * pointer to an element with a specific criteria and NULL if such an element
1322255736Sdavidch * hasn't been found.
1323255736Sdavidch */
1324255736Sdavidchstatic struct ecore_exeq_elem *ecore_exeq_get_mac(
1325255736Sdavidch	struct ecore_exe_queue_obj *o,
1326255736Sdavidch	struct ecore_exeq_elem *elem)
1327255736Sdavidch{
1328255736Sdavidch	struct ecore_exeq_elem *pos;
1329255736Sdavidch	struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1330255736Sdavidch
1331255736Sdavidch	/* Check pending for execution commands */
1332255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1333255736Sdavidch				  struct ecore_exeq_elem)
1334255736Sdavidch		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1335255736Sdavidch			      sizeof(*data)) &&
1336255736Sdavidch		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1337255736Sdavidch			return pos;
1338255736Sdavidch
1339255736Sdavidch	return NULL;
1340255736Sdavidch}
1341255736Sdavidch
1342255736Sdavidchstatic struct ecore_exeq_elem *ecore_exeq_get_vlan(
1343255736Sdavidch	struct ecore_exe_queue_obj *o,
1344255736Sdavidch	struct ecore_exeq_elem *elem)
1345255736Sdavidch{
1346255736Sdavidch	struct ecore_exeq_elem *pos;
1347255736Sdavidch	struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1348255736Sdavidch
1349255736Sdavidch	/* Check pending for execution commands */
1350255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1351255736Sdavidch				  struct ecore_exeq_elem)
1352255736Sdavidch		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
1353255736Sdavidch			      sizeof(*data)) &&
1354255736Sdavidch		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1355255736Sdavidch			return pos;
1356255736Sdavidch
1357255736Sdavidch	return NULL;
1358255736Sdavidch}
1359255736Sdavidch
1360255736Sdavidchstatic struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1361255736Sdavidch	struct ecore_exe_queue_obj *o,
1362255736Sdavidch	struct ecore_exeq_elem *elem)
1363255736Sdavidch{
1364255736Sdavidch	struct ecore_exeq_elem *pos;
1365255736Sdavidch	struct ecore_vlan_mac_ramrod_data *data =
1366255736Sdavidch		&elem->cmd_data.vlan_mac.u.vlan_mac;
1367255736Sdavidch
1368255736Sdavidch	/* Check pending for execution commands */
1369255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1370255736Sdavidch				  struct ecore_exeq_elem)
1371255736Sdavidch		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1372255736Sdavidch			      sizeof(*data)) &&
1373255736Sdavidch		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1374255736Sdavidch			return pos;
1375255736Sdavidch
1376255736Sdavidch	return NULL;
1377255736Sdavidch}
1378255736Sdavidch
1379255736Sdavidch/**
1380255736Sdavidch * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1381255736Sdavidch *
1382255736Sdavidch * @sc:		device handle
1383255736Sdavidch * @qo:		ecore_qable_obj
1384255736Sdavidch * @elem:	ecore_exeq_elem
1385255736Sdavidch *
1386255736Sdavidch * Checks that the requested configuration can be added. If yes and if
1387255736Sdavidch * requested, consume CAM credit.
1388255736Sdavidch *
1389255736Sdavidch * The 'validate' is run after the 'optimize'.
1390255736Sdavidch *
1391255736Sdavidch */
1392255736Sdavidchstatic inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc,
1393255736Sdavidch					      union ecore_qable_obj *qo,
1394255736Sdavidch					      struct ecore_exeq_elem *elem)
1395255736Sdavidch{
1396255736Sdavidch	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1397255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1398255736Sdavidch	int rc;
1399255736Sdavidch
1400255736Sdavidch	/* Check the registry */
1401255736Sdavidch	rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1402255736Sdavidch	if (rc) {
1403255736Sdavidch		ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
1404255736Sdavidch		return rc;
1405255736Sdavidch	}
1406255736Sdavidch
1407255736Sdavidch	/* Check if there is a pending ADD command for this
1408255736Sdavidch	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1409255736Sdavidch	 */
1410255736Sdavidch	if (exeq->get(exeq, elem)) {
1411255736Sdavidch		ECORE_MSG(sc, "There is a pending ADD command already\n");
1412255736Sdavidch		return ECORE_EXISTS;
1413255736Sdavidch	}
1414255736Sdavidch
1415255736Sdavidch	/* TODO: Check the pending MOVE from other objects where this
1416255736Sdavidch	 * object is a destination object.
1417255736Sdavidch	 */
1418255736Sdavidch
1419255736Sdavidch	/* Consume the credit if not requested not to */
1420255736Sdavidch	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1421255736Sdavidch			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1422255736Sdavidch	    o->get_credit(o)))
1423255736Sdavidch		return ECORE_INVAL;
1424255736Sdavidch
1425255736Sdavidch	return ECORE_SUCCESS;
1426255736Sdavidch}
1427255736Sdavidch
1428255736Sdavidch/**
1429255736Sdavidch * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1430255736Sdavidch *
1431255736Sdavidch * @sc:		device handle
1432255736Sdavidch * @qo:		quable object to check
1433255736Sdavidch * @elem:	element that needs to be deleted
1434255736Sdavidch *
1435255736Sdavidch * Checks that the requested configuration can be deleted. If yes and if
1436255736Sdavidch * requested, returns a CAM credit.
1437255736Sdavidch *
1438255736Sdavidch * The 'validate' is run after the 'optimize'.
1439255736Sdavidch */
1440255736Sdavidchstatic inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc,
1441255736Sdavidch					      union ecore_qable_obj *qo,
1442255736Sdavidch					      struct ecore_exeq_elem *elem)
1443255736Sdavidch{
1444255736Sdavidch	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1445255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
1446255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1447255736Sdavidch	struct ecore_exeq_elem query_elem;
1448255736Sdavidch
1449255736Sdavidch	/* If this classification can not be deleted (doesn't exist)
1450255736Sdavidch	 * - return a ECORE_EXIST.
1451255736Sdavidch	 */
1452255736Sdavidch	pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1453255736Sdavidch	if (!pos) {
1454255736Sdavidch		ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
1455255736Sdavidch		return ECORE_EXISTS;
1456255736Sdavidch	}
1457255736Sdavidch
1458255736Sdavidch	/* Check if there are pending DEL or MOVE commands for this
1459255736Sdavidch	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1460255736Sdavidch	 */
1461255736Sdavidch	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1462255736Sdavidch
1463255736Sdavidch	/* Check for MOVE commands */
1464255736Sdavidch	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1465255736Sdavidch	if (exeq->get(exeq, &query_elem)) {
1466255736Sdavidch		ECORE_ERR("There is a pending MOVE command already\n");
1467255736Sdavidch		return ECORE_INVAL;
1468255736Sdavidch	}
1469255736Sdavidch
1470255736Sdavidch	/* Check for DEL commands */
1471255736Sdavidch	if (exeq->get(exeq, elem)) {
1472255736Sdavidch		ECORE_MSG(sc, "There is a pending DEL command already\n");
1473255736Sdavidch		return ECORE_EXISTS;
1474255736Sdavidch	}
1475255736Sdavidch
1476255736Sdavidch	/* Return the credit to the credit pool if not requested not to */
1477255736Sdavidch	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1478255736Sdavidch			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1479255736Sdavidch	    o->put_credit(o))) {
1480255736Sdavidch		ECORE_ERR("Failed to return a credit\n");
1481255736Sdavidch		return ECORE_INVAL;
1482255736Sdavidch	}
1483255736Sdavidch
1484255736Sdavidch	return ECORE_SUCCESS;
1485255736Sdavidch}
1486255736Sdavidch
1487255736Sdavidch/**
1488255736Sdavidch * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1489255736Sdavidch *
1490255736Sdavidch * @sc:		device handle
1491255736Sdavidch * @qo:		quable object to check (source)
1492255736Sdavidch * @elem:	element that needs to be moved
1493255736Sdavidch *
1494255736Sdavidch * Checks that the requested configuration can be moved. If yes and if
1495255736Sdavidch * requested, returns a CAM credit.
1496255736Sdavidch *
1497255736Sdavidch * The 'validate' is run after the 'optimize'.
1498255736Sdavidch */
1499255736Sdavidchstatic inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc,
1500255736Sdavidch					       union ecore_qable_obj *qo,
1501255736Sdavidch					       struct ecore_exeq_elem *elem)
1502255736Sdavidch{
1503255736Sdavidch	struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1504255736Sdavidch	struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1505255736Sdavidch	struct ecore_exeq_elem query_elem;
1506255736Sdavidch	struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1507255736Sdavidch	struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1508255736Sdavidch
1509255736Sdavidch	/* Check if we can perform this operation based on the current registry
1510255736Sdavidch	 * state.
1511255736Sdavidch	 */
1512255736Sdavidch	if (!src_o->check_move(sc, src_o, dest_o,
1513255736Sdavidch			       &elem->cmd_data.vlan_mac.u)) {
1514255736Sdavidch		ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
1515255736Sdavidch		return ECORE_INVAL;
1516255736Sdavidch	}
1517255736Sdavidch
1518255736Sdavidch	/* Check if there is an already pending DEL or MOVE command for the
1519255736Sdavidch	 * source object or ADD command for a destination object. Return an
1520255736Sdavidch	 * error if so.
1521255736Sdavidch	 */
1522255736Sdavidch	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1523255736Sdavidch
1524255736Sdavidch	/* Check DEL on source */
1525255736Sdavidch	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1526255736Sdavidch	if (src_exeq->get(src_exeq, &query_elem)) {
1527255736Sdavidch		ECORE_ERR("There is a pending DEL command on the source queue already\n");
1528255736Sdavidch		return ECORE_INVAL;
1529255736Sdavidch	}
1530255736Sdavidch
1531255736Sdavidch	/* Check MOVE on source */
1532255736Sdavidch	if (src_exeq->get(src_exeq, elem)) {
1533255736Sdavidch		ECORE_MSG(sc, "There is a pending MOVE command already\n");
1534255736Sdavidch		return ECORE_EXISTS;
1535255736Sdavidch	}
1536255736Sdavidch
1537255736Sdavidch	/* Check ADD on destination */
1538255736Sdavidch	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1539255736Sdavidch	if (dest_exeq->get(dest_exeq, &query_elem)) {
1540255736Sdavidch		ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1541255736Sdavidch		return ECORE_INVAL;
1542255736Sdavidch	}
1543255736Sdavidch
1544255736Sdavidch	/* Consume the credit if not requested not to */
1545255736Sdavidch	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1546255736Sdavidch			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1547255736Sdavidch	    dest_o->get_credit(dest_o)))
1548255736Sdavidch		return ECORE_INVAL;
1549255736Sdavidch
1550255736Sdavidch	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1551255736Sdavidch			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1552255736Sdavidch	    src_o->put_credit(src_o))) {
1553255736Sdavidch		/* return the credit taken from dest... */
1554255736Sdavidch		dest_o->put_credit(dest_o);
1555255736Sdavidch		return ECORE_INVAL;
1556255736Sdavidch	}
1557255736Sdavidch
1558255736Sdavidch	return ECORE_SUCCESS;
1559255736Sdavidch}
1560255736Sdavidch
1561255736Sdavidchstatic int ecore_validate_vlan_mac(struct bxe_softc *sc,
1562255736Sdavidch				   union ecore_qable_obj *qo,
1563255736Sdavidch				   struct ecore_exeq_elem *elem)
1564255736Sdavidch{
1565255736Sdavidch	switch (elem->cmd_data.vlan_mac.cmd) {
1566255736Sdavidch	case ECORE_VLAN_MAC_ADD:
1567255736Sdavidch		return ecore_validate_vlan_mac_add(sc, qo, elem);
1568255736Sdavidch	case ECORE_VLAN_MAC_DEL:
1569255736Sdavidch		return ecore_validate_vlan_mac_del(sc, qo, elem);
1570255736Sdavidch	case ECORE_VLAN_MAC_MOVE:
1571255736Sdavidch		return ecore_validate_vlan_mac_move(sc, qo, elem);
1572255736Sdavidch	default:
1573255736Sdavidch		return ECORE_INVAL;
1574255736Sdavidch	}
1575255736Sdavidch}
1576255736Sdavidch
1577255736Sdavidchstatic int ecore_remove_vlan_mac(struct bxe_softc *sc,
1578255736Sdavidch				  union ecore_qable_obj *qo,
1579255736Sdavidch				  struct ecore_exeq_elem *elem)
1580255736Sdavidch{
1581255736Sdavidch	int rc = 0;
1582255736Sdavidch
1583255736Sdavidch	/* If consumption wasn't required, nothing to do */
1584255736Sdavidch	if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1585255736Sdavidch			   &elem->cmd_data.vlan_mac.vlan_mac_flags))
1586255736Sdavidch		return ECORE_SUCCESS;
1587255736Sdavidch
1588255736Sdavidch	switch (elem->cmd_data.vlan_mac.cmd) {
1589255736Sdavidch	case ECORE_VLAN_MAC_ADD:
1590255736Sdavidch	case ECORE_VLAN_MAC_MOVE:
1591255736Sdavidch		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1592255736Sdavidch		break;
1593255736Sdavidch	case ECORE_VLAN_MAC_DEL:
1594255736Sdavidch		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1595255736Sdavidch		break;
1596255736Sdavidch	default:
1597255736Sdavidch		return ECORE_INVAL;
1598255736Sdavidch	}
1599255736Sdavidch
1600255736Sdavidch	if (rc != TRUE)
1601255736Sdavidch		return ECORE_INVAL;
1602255736Sdavidch
1603255736Sdavidch	return ECORE_SUCCESS;
1604255736Sdavidch}
1605255736Sdavidch
1606255736Sdavidch/**
1607255736Sdavidch * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1608255736Sdavidch *
1609255736Sdavidch * @sc:		device handle
1610255736Sdavidch * @o:		ecore_vlan_mac_obj
1611255736Sdavidch *
1612255736Sdavidch */
1613255736Sdavidchstatic int ecore_wait_vlan_mac(struct bxe_softc *sc,
1614255736Sdavidch			       struct ecore_vlan_mac_obj *o)
1615255736Sdavidch{
1616255736Sdavidch	int cnt = 5000, rc;
1617255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1618255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1619255736Sdavidch
1620255736Sdavidch	while (cnt--) {
1621255736Sdavidch		/* Wait for the current command to complete */
1622255736Sdavidch		rc = raw->wait_comp(sc, raw);
1623255736Sdavidch		if (rc)
1624255736Sdavidch			return rc;
1625255736Sdavidch
1626255736Sdavidch		/* Wait until there are no pending commands */
1627255736Sdavidch		if (!ecore_exe_queue_empty(exeq))
1628255736Sdavidch			ECORE_WAIT(sc, 1000);
1629255736Sdavidch		else
1630255736Sdavidch			return ECORE_SUCCESS;
1631255736Sdavidch	}
1632255736Sdavidch
1633255736Sdavidch	return ECORE_TIMEOUT;
1634255736Sdavidch}
1635255736Sdavidch
1636255736Sdavidchstatic int __ecore_vlan_mac_execute_step(struct bxe_softc *sc,
1637255736Sdavidch					 struct ecore_vlan_mac_obj *o,
1638255736Sdavidch					 unsigned long *ramrod_flags)
1639255736Sdavidch{
1640255736Sdavidch	int rc = ECORE_SUCCESS;
1641255736Sdavidch
1642255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1643255736Sdavidch
1644255736Sdavidch	ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
1645255736Sdavidch	rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1646255736Sdavidch
1647255736Sdavidch	if (rc != ECORE_SUCCESS) {
1648255736Sdavidch		__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1649255736Sdavidch
1650255736Sdavidch		/** Calling function should not diffrentiate between this case
1651255736Sdavidch		 *  and the case in which there is already a pending ramrod
1652255736Sdavidch		 */
1653255736Sdavidch		rc = ECORE_PENDING;
1654255736Sdavidch	} else {
1655258187Sedavis		rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1656255736Sdavidch	}
1657255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1658255736Sdavidch
1659255736Sdavidch	return rc;
1660255736Sdavidch}
1661255736Sdavidch
1662255736Sdavidch/**
1663255736Sdavidch * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1664255736Sdavidch *
1665255736Sdavidch * @sc:		device handle
1666255736Sdavidch * @o:		ecore_vlan_mac_obj
1667255736Sdavidch * @cqe:
1668255736Sdavidch * @cont:	if TRUE schedule next execution chunk
1669255736Sdavidch *
1670255736Sdavidch */
1671255736Sdavidchstatic int ecore_complete_vlan_mac(struct bxe_softc *sc,
1672255736Sdavidch				   struct ecore_vlan_mac_obj *o,
1673255736Sdavidch				   union event_ring_elem *cqe,
1674255736Sdavidch				   unsigned long *ramrod_flags)
1675255736Sdavidch{
1676255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
1677255736Sdavidch	int rc;
1678255736Sdavidch
1679258187Sedavis	/* Clearing the pending list & raw state should be made
1680258187Sedavis	 * atomically (as execution flow assumes they represent the same)
1681258187Sedavis	 */
1682258187Sedavis	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1683258187Sedavis
1684255736Sdavidch	/* Reset pending list */
1685258187Sedavis	__ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1686255736Sdavidch
1687255736Sdavidch	/* Clear pending */
1688255736Sdavidch	r->clear_pending(r);
1689255736Sdavidch
1690258187Sedavis	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1691258187Sedavis
1692255736Sdavidch	/* If ramrod failed this is most likely a SW bug */
1693255736Sdavidch	if (cqe->message.error)
1694255736Sdavidch		return ECORE_INVAL;
1695255736Sdavidch
1696255736Sdavidch	/* Run the next bulk of pending commands if requested */
1697255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1698255736Sdavidch		rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1699255736Sdavidch		if (rc < 0)
1700255736Sdavidch			return rc;
1701255736Sdavidch	}
1702255736Sdavidch
1703255736Sdavidch	/* If there is more work to do return PENDING */
1704255736Sdavidch	if (!ecore_exe_queue_empty(&o->exe_queue))
1705255736Sdavidch		return ECORE_PENDING;
1706255736Sdavidch
1707255736Sdavidch	return ECORE_SUCCESS;
1708255736Sdavidch}
1709255736Sdavidch
1710255736Sdavidch/**
1711255736Sdavidch * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1712255736Sdavidch *
1713255736Sdavidch * @sc:		device handle
1714255736Sdavidch * @o:		ecore_qable_obj
1715255736Sdavidch * @elem:	ecore_exeq_elem
1716255736Sdavidch */
1717255736Sdavidchstatic int ecore_optimize_vlan_mac(struct bxe_softc *sc,
1718255736Sdavidch				   union ecore_qable_obj *qo,
1719255736Sdavidch				   struct ecore_exeq_elem *elem)
1720255736Sdavidch{
1721255736Sdavidch	struct ecore_exeq_elem query, *pos;
1722255736Sdavidch	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1723255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1724255736Sdavidch
1725255736Sdavidch	ECORE_MEMCPY(&query, elem, sizeof(query));
1726255736Sdavidch
1727255736Sdavidch	switch (elem->cmd_data.vlan_mac.cmd) {
1728255736Sdavidch	case ECORE_VLAN_MAC_ADD:
1729255736Sdavidch		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1730255736Sdavidch		break;
1731255736Sdavidch	case ECORE_VLAN_MAC_DEL:
1732255736Sdavidch		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1733255736Sdavidch		break;
1734255736Sdavidch	default:
1735255736Sdavidch		/* Don't handle anything other than ADD or DEL */
1736255736Sdavidch		return 0;
1737255736Sdavidch	}
1738255736Sdavidch
1739255736Sdavidch	/* If we found the appropriate element - delete it */
1740255736Sdavidch	pos = exeq->get(exeq, &query);
1741255736Sdavidch	if (pos) {
1742255736Sdavidch
1743255736Sdavidch		/* Return the credit of the optimized command */
1744255736Sdavidch		if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1745255736Sdavidch				     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1746255736Sdavidch			if ((query.cmd_data.vlan_mac.cmd ==
1747255736Sdavidch			     ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1748255736Sdavidch				ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1749255736Sdavidch				return ECORE_INVAL;
1750255736Sdavidch			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1751255736Sdavidch				ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1752255736Sdavidch				return ECORE_INVAL;
1753255736Sdavidch			}
1754255736Sdavidch		}
1755255736Sdavidch
1756255736Sdavidch		ECORE_MSG(sc, "Optimizing %s command\n",
1757255736Sdavidch			  (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1758255736Sdavidch			  "ADD" : "DEL");
1759255736Sdavidch
1760255736Sdavidch		ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1761255736Sdavidch		ecore_exe_queue_free_elem(sc, pos);
1762255736Sdavidch		return 1;
1763255736Sdavidch	}
1764255736Sdavidch
1765255736Sdavidch	return 0;
1766255736Sdavidch}
1767255736Sdavidch
1768255736Sdavidch/**
1769255736Sdavidch * ecore_vlan_mac_get_registry_elem - prepare a registry element
1770255736Sdavidch *
1771255736Sdavidch * @sc:	  device handle
1772255736Sdavidch * @o:
1773255736Sdavidch * @elem:
1774255736Sdavidch * @restore:
1775255736Sdavidch * @re:
1776255736Sdavidch *
1777255736Sdavidch * prepare a registry element according to the current command request.
1778255736Sdavidch */
1779255736Sdavidchstatic inline int ecore_vlan_mac_get_registry_elem(
1780255736Sdavidch	struct bxe_softc *sc,
1781255736Sdavidch	struct ecore_vlan_mac_obj *o,
1782255736Sdavidch	struct ecore_exeq_elem *elem,
1783255736Sdavidch	bool restore,
1784255736Sdavidch	struct ecore_vlan_mac_registry_elem **re)
1785255736Sdavidch{
1786255736Sdavidch	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1787255736Sdavidch	struct ecore_vlan_mac_registry_elem *reg_elem;
1788255736Sdavidch
1789255736Sdavidch	/* Allocate a new registry element if needed. */
1790255736Sdavidch	if (!restore &&
1791255736Sdavidch	    ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1792255736Sdavidch		reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1793255736Sdavidch		if (!reg_elem)
1794255736Sdavidch			return ECORE_NOMEM;
1795255736Sdavidch
1796255736Sdavidch		/* Get a new CAM offset */
1797255736Sdavidch		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1798255736Sdavidch			/* This shall never happen, because we have checked the
1799255736Sdavidch			 * CAM availability in the 'validate'.
1800255736Sdavidch			 */
1801255736Sdavidch			ECORE_DBG_BREAK_IF(1);
1802255736Sdavidch			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1803255736Sdavidch			return ECORE_INVAL;
1804255736Sdavidch		}
1805255736Sdavidch
1806255736Sdavidch		ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
1807255736Sdavidch
1808255736Sdavidch		/* Set a VLAN-MAC data */
1809255736Sdavidch		ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1810255736Sdavidch			  sizeof(reg_elem->u));
1811255736Sdavidch
1812255736Sdavidch		/* Copy the flags (needed for DEL and RESTORE flows) */
1813255736Sdavidch		reg_elem->vlan_mac_flags =
1814255736Sdavidch			elem->cmd_data.vlan_mac.vlan_mac_flags;
1815255736Sdavidch	} else /* DEL, RESTORE */
1816255736Sdavidch		reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1817255736Sdavidch
1818255736Sdavidch	*re = reg_elem;
1819255736Sdavidch	return ECORE_SUCCESS;
1820255736Sdavidch}
1821255736Sdavidch
1822255736Sdavidch/**
1823255736Sdavidch * ecore_execute_vlan_mac - execute vlan mac command
1824255736Sdavidch *
1825255736Sdavidch * @sc:			device handle
1826255736Sdavidch * @qo:
1827255736Sdavidch * @exe_chunk:
1828255736Sdavidch * @ramrod_flags:
1829255736Sdavidch *
1830255736Sdavidch * go and send a ramrod!
1831255736Sdavidch */
1832255736Sdavidchstatic int ecore_execute_vlan_mac(struct bxe_softc *sc,
1833255736Sdavidch				  union ecore_qable_obj *qo,
1834255736Sdavidch				  ecore_list_t *exe_chunk,
1835255736Sdavidch				  unsigned long *ramrod_flags)
1836255736Sdavidch{
1837255736Sdavidch	struct ecore_exeq_elem *elem;
1838255736Sdavidch	struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1839255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
1840255736Sdavidch	int rc, idx = 0;
1841255736Sdavidch	bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1842255736Sdavidch	bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1843255736Sdavidch	struct ecore_vlan_mac_registry_elem *reg_elem;
1844255736Sdavidch	enum ecore_vlan_mac_cmd cmd;
1845255736Sdavidch
1846255736Sdavidch	/* If DRIVER_ONLY execution is requested, cleanup a registry
1847255736Sdavidch	 * and exit. Otherwise send a ramrod to FW.
1848255736Sdavidch	 */
1849255736Sdavidch	if (!drv_only) {
1850258187Sedavis		ECORE_DBG_BREAK_IF(r->check_pending(r));
1851255736Sdavidch
1852255736Sdavidch		/* Set pending */
1853255736Sdavidch		r->set_pending(r);
1854255736Sdavidch
1855255736Sdavidch		/* Fill the ramrod data */
1856255736Sdavidch		ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1857255736Sdavidch					  struct ecore_exeq_elem) {
1858255736Sdavidch			cmd = elem->cmd_data.vlan_mac.cmd;
1859255736Sdavidch			/* We will add to the target object in MOVE command, so
1860255736Sdavidch			 * change the object for a CAM search.
1861255736Sdavidch			 */
1862255736Sdavidch			if (cmd == ECORE_VLAN_MAC_MOVE)
1863255736Sdavidch				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1864255736Sdavidch			else
1865255736Sdavidch				cam_obj = o;
1866255736Sdavidch
1867255736Sdavidch			rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1868255736Sdavidch							      elem, restore,
1869255736Sdavidch							      &reg_elem);
1870255736Sdavidch			if (rc)
1871255736Sdavidch				goto error_exit;
1872255736Sdavidch
1873255736Sdavidch			ECORE_DBG_BREAK_IF(!reg_elem);
1874255736Sdavidch
1875255736Sdavidch			/* Push a new entry into the registry */
1876255736Sdavidch			if (!restore &&
1877255736Sdavidch			    ((cmd == ECORE_VLAN_MAC_ADD) ||
1878255736Sdavidch			    (cmd == ECORE_VLAN_MAC_MOVE)))
1879255736Sdavidch				ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1880255736Sdavidch						     &cam_obj->head);
1881255736Sdavidch
1882255736Sdavidch			/* Configure a single command in a ramrod data buffer */
1883255736Sdavidch			o->set_one_rule(sc, o, elem, idx,
1884255736Sdavidch					reg_elem->cam_offset);
1885255736Sdavidch
1886255736Sdavidch			/* MOVE command consumes 2 entries in the ramrod data */
1887255736Sdavidch			if (cmd == ECORE_VLAN_MAC_MOVE)
1888255736Sdavidch				idx += 2;
1889255736Sdavidch			else
1890255736Sdavidch				idx++;
1891255736Sdavidch		}
1892255736Sdavidch
1893255736Sdavidch		/*
1894255736Sdavidch		 *  No need for an explicit memory barrier here as long we would
1895255736Sdavidch		 *  need to ensure the ordering of writing to the SPQ element
1896255736Sdavidch		 *  and updating of the SPQ producer which involves a memory
1897255736Sdavidch		 *  read and we will have to put a full memory barrier there
1898255736Sdavidch		 *  (inside ecore_sp_post()).
1899255736Sdavidch		 */
1900255736Sdavidch
1901255736Sdavidch		rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1902255736Sdavidch				   r->rdata_mapping,
1903255736Sdavidch				   ETH_CONNECTION_TYPE);
1904255736Sdavidch		if (rc)
1905255736Sdavidch			goto error_exit;
1906255736Sdavidch	}
1907255736Sdavidch
1908255736Sdavidch	/* Now, when we are done with the ramrod - clean up the registry */
1909255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1910255736Sdavidch				  struct ecore_exeq_elem) {
1911255736Sdavidch		cmd = elem->cmd_data.vlan_mac.cmd;
1912255736Sdavidch		if ((cmd == ECORE_VLAN_MAC_DEL) ||
1913255736Sdavidch		    (cmd == ECORE_VLAN_MAC_MOVE)) {
1914255736Sdavidch			reg_elem = o->check_del(sc, o,
1915255736Sdavidch						&elem->cmd_data.vlan_mac.u);
1916255736Sdavidch
1917255736Sdavidch			ECORE_DBG_BREAK_IF(!reg_elem);
1918255736Sdavidch
1919255736Sdavidch			o->put_cam_offset(o, reg_elem->cam_offset);
1920255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1921255736Sdavidch			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1922255736Sdavidch		}
1923255736Sdavidch	}
1924255736Sdavidch
1925255736Sdavidch	if (!drv_only)
1926255736Sdavidch		return ECORE_PENDING;
1927255736Sdavidch	else
1928255736Sdavidch		return ECORE_SUCCESS;
1929255736Sdavidch
1930255736Sdavidcherror_exit:
1931255736Sdavidch	r->clear_pending(r);
1932255736Sdavidch
1933255736Sdavidch	/* Cleanup a registry in case of a failure */
1934255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1935255736Sdavidch				  struct ecore_exeq_elem) {
1936255736Sdavidch		cmd = elem->cmd_data.vlan_mac.cmd;
1937255736Sdavidch
1938255736Sdavidch		if (cmd == ECORE_VLAN_MAC_MOVE)
1939255736Sdavidch			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1940255736Sdavidch		else
1941255736Sdavidch			cam_obj = o;
1942255736Sdavidch
1943255736Sdavidch		/* Delete all newly added above entries */
1944255736Sdavidch		if (!restore &&
1945255736Sdavidch		    ((cmd == ECORE_VLAN_MAC_ADD) ||
1946255736Sdavidch		    (cmd == ECORE_VLAN_MAC_MOVE))) {
1947255736Sdavidch			reg_elem = o->check_del(sc, cam_obj,
1948255736Sdavidch						&elem->cmd_data.vlan_mac.u);
1949255736Sdavidch			if (reg_elem) {
1950255736Sdavidch				ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1951255736Sdavidch							&cam_obj->head);
1952255736Sdavidch				ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1953255736Sdavidch			}
1954255736Sdavidch		}
1955255736Sdavidch	}
1956255736Sdavidch
1957255736Sdavidch	return rc;
1958255736Sdavidch}
1959255736Sdavidch
1960255736Sdavidchstatic inline int ecore_vlan_mac_push_new_cmd(
1961255736Sdavidch	struct bxe_softc *sc,
1962255736Sdavidch	struct ecore_vlan_mac_ramrod_params *p)
1963255736Sdavidch{
1964255736Sdavidch	struct ecore_exeq_elem *elem;
1965255736Sdavidch	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1966255736Sdavidch	bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1967255736Sdavidch
1968255736Sdavidch	/* Allocate the execution queue element */
1969255736Sdavidch	elem = ecore_exe_queue_alloc_elem(sc);
1970255736Sdavidch	if (!elem)
1971255736Sdavidch		return ECORE_NOMEM;
1972255736Sdavidch
1973255736Sdavidch	/* Set the command 'length' */
1974255736Sdavidch	switch (p->user_req.cmd) {
1975255736Sdavidch	case ECORE_VLAN_MAC_MOVE:
1976255736Sdavidch		elem->cmd_len = 2;
1977255736Sdavidch		break;
1978255736Sdavidch	default:
1979255736Sdavidch		elem->cmd_len = 1;
1980255736Sdavidch	}
1981255736Sdavidch
1982255736Sdavidch	/* Fill the object specific info */
1983255736Sdavidch	ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1984255736Sdavidch
1985255736Sdavidch	/* Try to add a new command to the pending list */
1986255736Sdavidch	return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1987255736Sdavidch}
1988255736Sdavidch
1989255736Sdavidch/**
1990255736Sdavidch * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1991255736Sdavidch *
1992255736Sdavidch * @sc:	  device handle
1993255736Sdavidch * @p:
1994255736Sdavidch *
1995255736Sdavidch */
1996255736Sdavidchint ecore_config_vlan_mac(struct bxe_softc *sc,
1997255736Sdavidch			   struct ecore_vlan_mac_ramrod_params *p)
1998255736Sdavidch{
1999255736Sdavidch	int rc = ECORE_SUCCESS;
2000255736Sdavidch	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2001255736Sdavidch	unsigned long *ramrod_flags = &p->ramrod_flags;
2002255736Sdavidch	bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2003255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
2004255736Sdavidch
2005255736Sdavidch	/*
2006255736Sdavidch	 * Add new elements to the execution list for commands that require it.
2007255736Sdavidch	 */
2008255736Sdavidch	if (!cont) {
2009255736Sdavidch		rc = ecore_vlan_mac_push_new_cmd(sc, p);
2010255736Sdavidch		if (rc)
2011255736Sdavidch			return rc;
2012255736Sdavidch	}
2013255736Sdavidch
2014255736Sdavidch	/* If nothing will be executed further in this iteration we want to
2015255736Sdavidch	 * return PENDING if there are pending commands
2016255736Sdavidch	 */
2017255736Sdavidch	if (!ecore_exe_queue_empty(&o->exe_queue))
2018255736Sdavidch		rc = ECORE_PENDING;
2019255736Sdavidch
2020255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
2021255736Sdavidch		ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2022255736Sdavidch		raw->clear_pending(raw);
2023255736Sdavidch	}
2024255736Sdavidch
2025255736Sdavidch	/* Execute commands if required */
2026255736Sdavidch	if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2027255736Sdavidch	    ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2028255736Sdavidch		rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
2029255736Sdavidch						   &p->ramrod_flags);
2030255736Sdavidch		if (rc < 0)
2031255736Sdavidch			return rc;
2032255736Sdavidch	}
2033255736Sdavidch
2034255736Sdavidch	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2035255736Sdavidch	 * then user want to wait until the last command is done.
2036255736Sdavidch	 */
2037255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2038255736Sdavidch		/* Wait maximum for the current exe_queue length iterations plus
2039255736Sdavidch		 * one (for the current pending command).
2040255736Sdavidch		 */
2041255736Sdavidch		int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2042255736Sdavidch
2043255736Sdavidch		while (!ecore_exe_queue_empty(&o->exe_queue) &&
2044255736Sdavidch		       max_iterations--) {
2045255736Sdavidch
2046255736Sdavidch			/* Wait for the current command to complete */
2047255736Sdavidch			rc = raw->wait_comp(sc, raw);
2048255736Sdavidch			if (rc)
2049255736Sdavidch				return rc;
2050255736Sdavidch
2051255736Sdavidch			/* Make a next step */
2052255736Sdavidch			rc = __ecore_vlan_mac_execute_step(sc,
2053255736Sdavidch							   p->vlan_mac_obj,
2054255736Sdavidch							   &p->ramrod_flags);
2055255736Sdavidch			if (rc < 0)
2056255736Sdavidch				return rc;
2057255736Sdavidch		}
2058255736Sdavidch
2059255736Sdavidch		return ECORE_SUCCESS;
2060255736Sdavidch	}
2061255736Sdavidch
2062255736Sdavidch	return rc;
2063255736Sdavidch}
2064255736Sdavidch
2065255736Sdavidch/**
2066255736Sdavidch * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2067255736Sdavidch *
2068255736Sdavidch * @sc:			device handle
2069255736Sdavidch * @o:
2070255736Sdavidch * @vlan_mac_flags:
2071255736Sdavidch * @ramrod_flags:	execution flags to be used for this deletion
2072255736Sdavidch *
2073255736Sdavidch * if the last operation has completed successfully and there are no
2074255736Sdavidch * more elements left, positive value if the last operation has completed
2075255736Sdavidch * successfully and there are more previously configured elements, negative
2076255736Sdavidch * value is current operation has failed.
2077255736Sdavidch */
2078255736Sdavidchstatic int ecore_vlan_mac_del_all(struct bxe_softc *sc,
2079255736Sdavidch				  struct ecore_vlan_mac_obj *o,
2080255736Sdavidch				  unsigned long *vlan_mac_flags,
2081255736Sdavidch				  unsigned long *ramrod_flags)
2082255736Sdavidch{
2083255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos = NULL;
2084255736Sdavidch	struct ecore_vlan_mac_ramrod_params p;
2085255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2086255736Sdavidch	struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2087258187Sedavis	int read_lock;
2088258187Sedavis	int rc = 0;
2089255736Sdavidch
2090255736Sdavidch	/* Clear pending commands first */
2091255736Sdavidch
2092255736Sdavidch	ECORE_SPIN_LOCK_BH(&exeq->lock);
2093255736Sdavidch
2094255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2095255736Sdavidch				       &exeq->exe_queue, link,
2096255736Sdavidch				       struct ecore_exeq_elem) {
2097255736Sdavidch		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2098255736Sdavidch		    *vlan_mac_flags) {
2099255736Sdavidch			rc = exeq->remove(sc, exeq->owner, exeq_pos);
2100255736Sdavidch			if (rc) {
2101255736Sdavidch				ECORE_ERR("Failed to remove command\n");
2102255736Sdavidch				ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2103255736Sdavidch				return rc;
2104255736Sdavidch			}
2105255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2106255736Sdavidch						&exeq->exe_queue);
2107255736Sdavidch			ecore_exe_queue_free_elem(sc, exeq_pos);
2108255736Sdavidch		}
2109255736Sdavidch	}
2110255736Sdavidch
2111255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2112255736Sdavidch
2113255736Sdavidch	/* Prepare a command request */
2114255736Sdavidch	ECORE_MEMSET(&p, 0, sizeof(p));
2115255736Sdavidch	p.vlan_mac_obj = o;
2116255736Sdavidch	p.ramrod_flags = *ramrod_flags;
2117255736Sdavidch	p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2118255736Sdavidch
2119255736Sdavidch	/* Add all but the last VLAN-MAC to the execution queue without actually
2120255736Sdavidch	 * execution anything.
2121255736Sdavidch	 */
2122255736Sdavidch	ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2123255736Sdavidch	ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2124255736Sdavidch	ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2125255736Sdavidch
2126255736Sdavidch	ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2127255736Sdavidch	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
2128255736Sdavidch	if (read_lock != ECORE_SUCCESS)
2129255736Sdavidch		return read_lock;
2130255736Sdavidch
2131255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2132255736Sdavidch				  struct ecore_vlan_mac_registry_elem) {
2133255736Sdavidch		if (pos->vlan_mac_flags == *vlan_mac_flags) {
2134255736Sdavidch			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2135255736Sdavidch			ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
2136255736Sdavidch			rc = ecore_config_vlan_mac(sc, &p);
2137255736Sdavidch			if (rc < 0) {
2138255736Sdavidch				ECORE_ERR("Failed to add a new DEL command\n");
2139255736Sdavidch				ecore_vlan_mac_h_read_unlock(sc, o);
2140255736Sdavidch				return rc;
2141255736Sdavidch			}
2142255736Sdavidch		}
2143255736Sdavidch	}
2144255736Sdavidch
2145255736Sdavidch	ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2146255736Sdavidch	ecore_vlan_mac_h_read_unlock(sc, o);
2147255736Sdavidch
2148255736Sdavidch	p.ramrod_flags = *ramrod_flags;
2149255736Sdavidch	ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2150255736Sdavidch
2151255736Sdavidch	return ecore_config_vlan_mac(sc, &p);
2152255736Sdavidch}
2153255736Sdavidch
2154255736Sdavidchstatic inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
2155255736Sdavidch	uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
2156255736Sdavidch	unsigned long *pstate, ecore_obj_type type)
2157255736Sdavidch{
2158255736Sdavidch	raw->func_id = func_id;
2159255736Sdavidch	raw->cid = cid;
2160255736Sdavidch	raw->cl_id = cl_id;
2161255736Sdavidch	raw->rdata = rdata;
2162255736Sdavidch	raw->rdata_mapping = rdata_mapping;
2163255736Sdavidch	raw->state = state;
2164255736Sdavidch	raw->pstate = pstate;
2165255736Sdavidch	raw->obj_type = type;
2166255736Sdavidch	raw->check_pending = ecore_raw_check_pending;
2167255736Sdavidch	raw->clear_pending = ecore_raw_clear_pending;
2168255736Sdavidch	raw->set_pending = ecore_raw_set_pending;
2169255736Sdavidch	raw->wait_comp = ecore_raw_wait;
2170255736Sdavidch}
2171255736Sdavidch
2172255736Sdavidchstatic inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2173255736Sdavidch	uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
2174255736Sdavidch	int state, unsigned long *pstate, ecore_obj_type type,
2175255736Sdavidch	struct ecore_credit_pool_obj *macs_pool,
2176255736Sdavidch	struct ecore_credit_pool_obj *vlans_pool)
2177255736Sdavidch{
2178255736Sdavidch	ECORE_LIST_INIT(&o->head);
2179255736Sdavidch	o->head_reader = 0;
2180255736Sdavidch	o->head_exe_request = FALSE;
2181255736Sdavidch	o->saved_ramrod_flags = 0;
2182255736Sdavidch
2183255736Sdavidch	o->macs_pool = macs_pool;
2184255736Sdavidch	o->vlans_pool = vlans_pool;
2185255736Sdavidch
2186255736Sdavidch	o->delete_all = ecore_vlan_mac_del_all;
2187255736Sdavidch	o->restore = ecore_vlan_mac_restore;
2188255736Sdavidch	o->complete = ecore_complete_vlan_mac;
2189255736Sdavidch	o->wait = ecore_wait_vlan_mac;
2190255736Sdavidch
2191255736Sdavidch	ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2192255736Sdavidch			   state, pstate, type);
2193255736Sdavidch}
2194255736Sdavidch
2195255736Sdavidchvoid ecore_init_mac_obj(struct bxe_softc *sc,
2196255736Sdavidch			struct ecore_vlan_mac_obj *mac_obj,
2197255736Sdavidch			uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2198255736Sdavidch			ecore_dma_addr_t rdata_mapping, int state,
2199255736Sdavidch			unsigned long *pstate, ecore_obj_type type,
2200255736Sdavidch			struct ecore_credit_pool_obj *macs_pool)
2201255736Sdavidch{
2202255736Sdavidch	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2203255736Sdavidch
2204255736Sdavidch	ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2205255736Sdavidch				   rdata_mapping, state, pstate, type,
2206255736Sdavidch				   macs_pool, NULL);
2207255736Sdavidch
2208255736Sdavidch	/* CAM credit pool handling */
2209255736Sdavidch	mac_obj->get_credit = ecore_get_credit_mac;
2210255736Sdavidch	mac_obj->put_credit = ecore_put_credit_mac;
2211255736Sdavidch	mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2212255736Sdavidch	mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2213255736Sdavidch
2214255736Sdavidch	if (CHIP_IS_E1x(sc)) {
2215255736Sdavidch		mac_obj->set_one_rule      = ecore_set_one_mac_e1x;
2216255736Sdavidch		mac_obj->check_del         = ecore_check_mac_del;
2217255736Sdavidch		mac_obj->check_add         = ecore_check_mac_add;
2218255736Sdavidch		mac_obj->check_move        = ecore_check_move_always_err;
2219255736Sdavidch		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2220255736Sdavidch
2221255736Sdavidch		/* Exe Queue */
2222255736Sdavidch		ecore_exe_queue_init(sc,
2223255736Sdavidch				     &mac_obj->exe_queue, 1, qable_obj,
2224255736Sdavidch				     ecore_validate_vlan_mac,
2225255736Sdavidch				     ecore_remove_vlan_mac,
2226255736Sdavidch				     ecore_optimize_vlan_mac,
2227255736Sdavidch				     ecore_execute_vlan_mac,
2228255736Sdavidch				     ecore_exeq_get_mac);
2229255736Sdavidch	} else {
2230255736Sdavidch		mac_obj->set_one_rule      = ecore_set_one_mac_e2;
2231255736Sdavidch		mac_obj->check_del         = ecore_check_mac_del;
2232255736Sdavidch		mac_obj->check_add         = ecore_check_mac_add;
2233255736Sdavidch		mac_obj->check_move        = ecore_check_move;
2234255736Sdavidch		mac_obj->ramrod_cmd        =
2235255736Sdavidch			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2236255736Sdavidch		mac_obj->get_n_elements    = ecore_get_n_elements;
2237255736Sdavidch
2238255736Sdavidch		/* Exe Queue */
2239255736Sdavidch		ecore_exe_queue_init(sc,
2240255736Sdavidch				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2241255736Sdavidch				     qable_obj, ecore_validate_vlan_mac,
2242255736Sdavidch				     ecore_remove_vlan_mac,
2243255736Sdavidch				     ecore_optimize_vlan_mac,
2244255736Sdavidch				     ecore_execute_vlan_mac,
2245255736Sdavidch				     ecore_exeq_get_mac);
2246255736Sdavidch	}
2247255736Sdavidch}
2248255736Sdavidch
2249255736Sdavidchvoid ecore_init_vlan_obj(struct bxe_softc *sc,
2250255736Sdavidch			 struct ecore_vlan_mac_obj *vlan_obj,
2251255736Sdavidch			 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2252255736Sdavidch			 ecore_dma_addr_t rdata_mapping, int state,
2253255736Sdavidch			 unsigned long *pstate, ecore_obj_type type,
2254255736Sdavidch			 struct ecore_credit_pool_obj *vlans_pool)
2255255736Sdavidch{
2256255736Sdavidch	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2257255736Sdavidch
2258255736Sdavidch	ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2259255736Sdavidch				   rdata_mapping, state, pstate, type, NULL,
2260255736Sdavidch				   vlans_pool);
2261255736Sdavidch
2262255736Sdavidch	vlan_obj->get_credit = ecore_get_credit_vlan;
2263255736Sdavidch	vlan_obj->put_credit = ecore_put_credit_vlan;
2264255736Sdavidch	vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2265255736Sdavidch	vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2266255736Sdavidch
2267255736Sdavidch	if (CHIP_IS_E1x(sc)) {
2268255736Sdavidch		ECORE_ERR("Do not support chips others than E2 and newer\n");
2269255736Sdavidch		ECORE_BUG();
2270255736Sdavidch	} else {
2271255736Sdavidch		vlan_obj->set_one_rule      = ecore_set_one_vlan_e2;
2272255736Sdavidch		vlan_obj->check_del         = ecore_check_vlan_del;
2273255736Sdavidch		vlan_obj->check_add         = ecore_check_vlan_add;
2274255736Sdavidch		vlan_obj->check_move        = ecore_check_move;
2275255736Sdavidch		vlan_obj->ramrod_cmd        =
2276255736Sdavidch			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2277255736Sdavidch		vlan_obj->get_n_elements    = ecore_get_n_elements;
2278255736Sdavidch
2279255736Sdavidch		/* Exe Queue */
2280255736Sdavidch		ecore_exe_queue_init(sc,
2281255736Sdavidch				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2282255736Sdavidch				     qable_obj, ecore_validate_vlan_mac,
2283255736Sdavidch				     ecore_remove_vlan_mac,
2284255736Sdavidch				     ecore_optimize_vlan_mac,
2285255736Sdavidch				     ecore_execute_vlan_mac,
2286255736Sdavidch				     ecore_exeq_get_vlan);
2287255736Sdavidch	}
2288255736Sdavidch}
2289255736Sdavidch
2290255736Sdavidchvoid ecore_init_vlan_mac_obj(struct bxe_softc *sc,
2291255736Sdavidch			     struct ecore_vlan_mac_obj *vlan_mac_obj,
2292255736Sdavidch			     uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2293255736Sdavidch			     ecore_dma_addr_t rdata_mapping, int state,
2294255736Sdavidch			     unsigned long *pstate, ecore_obj_type type,
2295255736Sdavidch			     struct ecore_credit_pool_obj *macs_pool,
2296255736Sdavidch			     struct ecore_credit_pool_obj *vlans_pool)
2297255736Sdavidch{
2298255736Sdavidch	union ecore_qable_obj *qable_obj =
2299255736Sdavidch		(union ecore_qable_obj *)vlan_mac_obj;
2300255736Sdavidch
2301255736Sdavidch	ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2302255736Sdavidch				   rdata_mapping, state, pstate, type,
2303255736Sdavidch				   macs_pool, vlans_pool);
2304255736Sdavidch
2305255736Sdavidch	/* CAM pool handling */
2306255736Sdavidch	vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2307255736Sdavidch	vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2308255736Sdavidch	/* CAM offset is relevant for 57710 and 57711 chips only which have a
2309255736Sdavidch	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2310255736Sdavidch	 * will be taken from MACs' pool object only.
2311255736Sdavidch	 */
2312255736Sdavidch	vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2313255736Sdavidch	vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2314255736Sdavidch
2315255736Sdavidch	if (CHIP_IS_E1(sc)) {
2316255736Sdavidch		ECORE_ERR("Do not support chips others than E2\n");
2317255736Sdavidch		ECORE_BUG();
2318255736Sdavidch	} else if (CHIP_IS_E1H(sc)) {
2319255736Sdavidch		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e1h;
2320255736Sdavidch		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2321255736Sdavidch		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2322255736Sdavidch		vlan_mac_obj->check_move        = ecore_check_move_always_err;
2323255736Sdavidch		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2324255736Sdavidch
2325255736Sdavidch		/* Exe Queue */
2326255736Sdavidch		ecore_exe_queue_init(sc,
2327255736Sdavidch				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2328255736Sdavidch				     ecore_validate_vlan_mac,
2329255736Sdavidch				     ecore_remove_vlan_mac,
2330255736Sdavidch				     ecore_optimize_vlan_mac,
2331255736Sdavidch				     ecore_execute_vlan_mac,
2332255736Sdavidch				     ecore_exeq_get_vlan_mac);
2333255736Sdavidch	} else {
2334255736Sdavidch		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e2;
2335255736Sdavidch		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2336255736Sdavidch		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2337255736Sdavidch		vlan_mac_obj->check_move        = ecore_check_move;
2338255736Sdavidch		vlan_mac_obj->ramrod_cmd        =
2339255736Sdavidch			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2340255736Sdavidch
2341255736Sdavidch		/* Exe Queue */
2342255736Sdavidch		ecore_exe_queue_init(sc,
2343255736Sdavidch				     &vlan_mac_obj->exe_queue,
2344255736Sdavidch				     CLASSIFY_RULES_COUNT,
2345255736Sdavidch				     qable_obj, ecore_validate_vlan_mac,
2346255736Sdavidch				     ecore_remove_vlan_mac,
2347255736Sdavidch				     ecore_optimize_vlan_mac,
2348255736Sdavidch				     ecore_execute_vlan_mac,
2349255736Sdavidch				     ecore_exeq_get_vlan_mac);
2350255736Sdavidch	}
2351255736Sdavidch}
2352255736Sdavidch
2353255736Sdavidch/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2354255736Sdavidchstatic inline void __storm_memset_mac_filters(struct bxe_softc *sc,
2355255736Sdavidch			struct tstorm_eth_mac_filter_config *mac_filters,
2356255736Sdavidch			uint16_t pf_id)
2357255736Sdavidch{
2358255736Sdavidch	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2359255736Sdavidch
2360255736Sdavidch	uint32_t addr = BAR_TSTRORM_INTMEM +
2361255736Sdavidch			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2362255736Sdavidch
2363255736Sdavidch	ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
2364255736Sdavidch}
2365255736Sdavidch
2366255736Sdavidchstatic int ecore_set_rx_mode_e1x(struct bxe_softc *sc,
2367255736Sdavidch				 struct ecore_rx_mode_ramrod_params *p)
2368255736Sdavidch{
2369255736Sdavidch	/* update the sc MAC filter structure */
2370255736Sdavidch	uint32_t mask = (1 << p->cl_id);
2371255736Sdavidch
2372255736Sdavidch	struct tstorm_eth_mac_filter_config *mac_filters =
2373255736Sdavidch		(struct tstorm_eth_mac_filter_config *)p->rdata;
2374255736Sdavidch
2375255736Sdavidch	/* initial setting is drop-all */
2376255736Sdavidch	uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2377255736Sdavidch	uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2378255736Sdavidch	uint8_t unmatched_unicast = 0;
2379255736Sdavidch
2380255736Sdavidch    /* In e1x there we only take into account rx accept flag since tx switching
2381255736Sdavidch     * isn't enabled. */
2382255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2383255736Sdavidch		/* accept matched ucast */
2384255736Sdavidch		drop_all_ucast = 0;
2385255736Sdavidch
2386255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2387255736Sdavidch		/* accept matched mcast */
2388255736Sdavidch		drop_all_mcast = 0;
2389255736Sdavidch
2390255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2391255736Sdavidch		/* accept all mcast */
2392255736Sdavidch		drop_all_ucast = 0;
2393255736Sdavidch		accp_all_ucast = 1;
2394255736Sdavidch	}
2395255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2396255736Sdavidch		/* accept all mcast */
2397255736Sdavidch		drop_all_mcast = 0;
2398255736Sdavidch		accp_all_mcast = 1;
2399255736Sdavidch	}
2400255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2401255736Sdavidch		/* accept (all) bcast */
2402255736Sdavidch		accp_all_bcast = 1;
2403255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2404255736Sdavidch		/* accept unmatched unicasts */
2405255736Sdavidch		unmatched_unicast = 1;
2406255736Sdavidch
2407255736Sdavidch	mac_filters->ucast_drop_all = drop_all_ucast ?
2408255736Sdavidch		mac_filters->ucast_drop_all | mask :
2409255736Sdavidch		mac_filters->ucast_drop_all & ~mask;
2410255736Sdavidch
2411255736Sdavidch	mac_filters->mcast_drop_all = drop_all_mcast ?
2412255736Sdavidch		mac_filters->mcast_drop_all | mask :
2413255736Sdavidch		mac_filters->mcast_drop_all & ~mask;
2414255736Sdavidch
2415255736Sdavidch	mac_filters->ucast_accept_all = accp_all_ucast ?
2416255736Sdavidch		mac_filters->ucast_accept_all | mask :
2417255736Sdavidch		mac_filters->ucast_accept_all & ~mask;
2418255736Sdavidch
2419255736Sdavidch	mac_filters->mcast_accept_all = accp_all_mcast ?
2420255736Sdavidch		mac_filters->mcast_accept_all | mask :
2421255736Sdavidch		mac_filters->mcast_accept_all & ~mask;
2422255736Sdavidch
2423255736Sdavidch	mac_filters->bcast_accept_all = accp_all_bcast ?
2424255736Sdavidch		mac_filters->bcast_accept_all | mask :
2425255736Sdavidch		mac_filters->bcast_accept_all & ~mask;
2426255736Sdavidch
2427255736Sdavidch	mac_filters->unmatched_unicast = unmatched_unicast ?
2428255736Sdavidch		mac_filters->unmatched_unicast | mask :
2429255736Sdavidch		mac_filters->unmatched_unicast & ~mask;
2430255736Sdavidch
2431255736Sdavidch	ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2432255736Sdavidch			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2433255736Sdavidch	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2434255736Sdavidch	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2435255736Sdavidch	   mac_filters->bcast_accept_all);
2436255736Sdavidch
2437255736Sdavidch	/* write the MAC filter structure*/
2438255736Sdavidch	__storm_memset_mac_filters(sc, mac_filters, p->func_id);
2439255736Sdavidch
2440255736Sdavidch	/* The operation is completed */
2441255736Sdavidch	ECORE_CLEAR_BIT(p->state, p->pstate);
2442255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
2443255736Sdavidch
2444255736Sdavidch	return ECORE_SUCCESS;
2445255736Sdavidch}
2446255736Sdavidch
2447255736Sdavidch/* Setup ramrod data */
2448255736Sdavidchstatic inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2449255736Sdavidch				struct eth_classify_header *hdr,
2450255736Sdavidch				uint8_t rule_cnt)
2451255736Sdavidch{
2452255736Sdavidch	hdr->echo = ECORE_CPU_TO_LE32(cid);
2453255736Sdavidch	hdr->rule_cnt = rule_cnt;
2454255736Sdavidch}
2455255736Sdavidch
2456255736Sdavidchstatic inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc,
2457255736Sdavidch				unsigned long *accept_flags,
2458255736Sdavidch				struct eth_filter_rules_cmd *cmd,
2459255736Sdavidch				bool clear_accept_all)
2460255736Sdavidch{
2461255736Sdavidch	uint16_t state;
2462255736Sdavidch
2463255736Sdavidch	/* start with 'drop-all' */
2464255736Sdavidch	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2465255736Sdavidch		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2466255736Sdavidch
2467255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2468255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2469255736Sdavidch
2470255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2471255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2472255736Sdavidch
2473255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2474255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2475255736Sdavidch		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2476255736Sdavidch	}
2477255736Sdavidch
2478255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2479255736Sdavidch		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2480255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2481255736Sdavidch	}
2482255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2483255736Sdavidch		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2484255736Sdavidch
2485255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2486255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2487255736Sdavidch		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2488255736Sdavidch	}
2489255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2490255736Sdavidch		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2491255736Sdavidch
2492255736Sdavidch	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2493255736Sdavidch	if (clear_accept_all) {
2494255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2495255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2496255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2497255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2498255736Sdavidch	}
2499255736Sdavidch
2500255736Sdavidch	cmd->state = ECORE_CPU_TO_LE16(state);
2501255736Sdavidch}
2502255736Sdavidch
2503255736Sdavidchstatic int ecore_set_rx_mode_e2(struct bxe_softc *sc,
2504255736Sdavidch				struct ecore_rx_mode_ramrod_params *p)
2505255736Sdavidch{
2506255736Sdavidch	struct eth_filter_rules_ramrod_data *data = p->rdata;
2507255736Sdavidch	int rc;
2508255736Sdavidch	uint8_t rule_idx = 0;
2509255736Sdavidch
2510255736Sdavidch	/* Reset the ramrod data buffer */
2511255736Sdavidch	ECORE_MEMSET(data, 0, sizeof(*data));
2512255736Sdavidch
2513255736Sdavidch	/* Setup ramrod data */
2514255736Sdavidch
2515255736Sdavidch	/* Tx (internal switching) */
2516255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2517255736Sdavidch		data->rules[rule_idx].client_id = p->cl_id;
2518255736Sdavidch		data->rules[rule_idx].func_id = p->func_id;
2519255736Sdavidch
2520255736Sdavidch		data->rules[rule_idx].cmd_general_data =
2521255736Sdavidch			ETH_FILTER_RULES_CMD_TX_CMD;
2522255736Sdavidch
2523255736Sdavidch		ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2524258187Sedavis					       &(data->rules[rule_idx++]),
2525258187Sedavis					       FALSE);
2526255736Sdavidch	}
2527255736Sdavidch
2528255736Sdavidch	/* Rx */
2529255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2530255736Sdavidch		data->rules[rule_idx].client_id = p->cl_id;
2531255736Sdavidch		data->rules[rule_idx].func_id = p->func_id;
2532255736Sdavidch
2533255736Sdavidch		data->rules[rule_idx].cmd_general_data =
2534255736Sdavidch			ETH_FILTER_RULES_CMD_RX_CMD;
2535255736Sdavidch
2536255736Sdavidch		ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2537258187Sedavis					       &(data->rules[rule_idx++]),
2538258187Sedavis					       FALSE);
2539255736Sdavidch	}
2540255736Sdavidch
2541255736Sdavidch	/* If FCoE Queue configuration has been requested configure the Rx and
2542255736Sdavidch	 * internal switching modes for this queue in separate rules.
2543255736Sdavidch	 *
2544255736Sdavidch	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2545255736Sdavidch	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2546255736Sdavidch	 */
2547255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2548255736Sdavidch		/*  Tx (internal switching) */
2549255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2550255736Sdavidch			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2551255736Sdavidch			data->rules[rule_idx].func_id = p->func_id;
2552255736Sdavidch
2553255736Sdavidch			data->rules[rule_idx].cmd_general_data =
2554255736Sdavidch						ETH_FILTER_RULES_CMD_TX_CMD;
2555255736Sdavidch
2556258187Sedavis			ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2557258187Sedavis						       &(data->rules[rule_idx]),
2558255736Sdavidch						       TRUE);
2559258187Sedavis			rule_idx++;
2560255736Sdavidch		}
2561255736Sdavidch
2562255736Sdavidch		/* Rx */
2563255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2564255736Sdavidch			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2565255736Sdavidch			data->rules[rule_idx].func_id = p->func_id;
2566255736Sdavidch
2567255736Sdavidch			data->rules[rule_idx].cmd_general_data =
2568255736Sdavidch						ETH_FILTER_RULES_CMD_RX_CMD;
2569255736Sdavidch
2570258187Sedavis			ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2571258187Sedavis						       &(data->rules[rule_idx]),
2572255736Sdavidch						       TRUE);
2573258187Sedavis			rule_idx++;
2574255736Sdavidch		}
2575255736Sdavidch	}
2576255736Sdavidch
2577255736Sdavidch	/* Set the ramrod header (most importantly - number of rules to
2578255736Sdavidch	 * configure).
2579255736Sdavidch	 */
2580255736Sdavidch	ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2581255736Sdavidch
2582255736Sdavidch	ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2583255736Sdavidch		  data->header.rule_cnt, p->rx_accept_flags,
2584255736Sdavidch		  p->tx_accept_flags);
2585255736Sdavidch
2586255736Sdavidch	/* No need for an explicit memory barrier here as long we would
2587255736Sdavidch	 * need to ensure the ordering of writing to the SPQ element
2588255736Sdavidch	 * and updating of the SPQ producer which involves a memory
2589255736Sdavidch	 * read and we will have to put a full memory barrier there
2590255736Sdavidch	 * (inside ecore_sp_post()).
2591255736Sdavidch	 */
2592255736Sdavidch
2593255736Sdavidch	/* Send a ramrod */
2594255736Sdavidch	rc = ecore_sp_post(sc,
2595255736Sdavidch			   RAMROD_CMD_ID_ETH_FILTER_RULES,
2596255736Sdavidch			   p->cid,
2597255736Sdavidch			   p->rdata_mapping,
2598255736Sdavidch			   ETH_CONNECTION_TYPE);
2599255736Sdavidch	if (rc)
2600255736Sdavidch		return rc;
2601255736Sdavidch
2602255736Sdavidch	/* Ramrod completion is pending */
2603255736Sdavidch	return ECORE_PENDING;
2604255736Sdavidch}
2605255736Sdavidch
2606255736Sdavidchstatic int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc,
2607255736Sdavidch				      struct ecore_rx_mode_ramrod_params *p)
2608255736Sdavidch{
2609255736Sdavidch	return ecore_state_wait(sc, p->state, p->pstate);
2610255736Sdavidch}
2611255736Sdavidch
2612255736Sdavidchstatic int ecore_empty_rx_mode_wait(struct bxe_softc *sc,
2613255736Sdavidch				    struct ecore_rx_mode_ramrod_params *p)
2614255736Sdavidch{
2615255736Sdavidch	/* Do nothing */
2616255736Sdavidch	return ECORE_SUCCESS;
2617255736Sdavidch}
2618255736Sdavidch
2619255736Sdavidchint ecore_config_rx_mode(struct bxe_softc *sc,
2620255736Sdavidch			 struct ecore_rx_mode_ramrod_params *p)
2621255736Sdavidch{
2622255736Sdavidch	int rc;
2623255736Sdavidch
2624255736Sdavidch	/* Configure the new classification in the chip */
2625255736Sdavidch	rc = p->rx_mode_obj->config_rx_mode(sc, p);
2626255736Sdavidch	if (rc < 0)
2627255736Sdavidch		return rc;
2628255736Sdavidch
2629255736Sdavidch	/* Wait for a ramrod completion if was requested */
2630255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2631255736Sdavidch		rc = p->rx_mode_obj->wait_comp(sc, p);
2632255736Sdavidch		if (rc)
2633255736Sdavidch			return rc;
2634255736Sdavidch	}
2635255736Sdavidch
2636255736Sdavidch	return rc;
2637255736Sdavidch}
2638255736Sdavidch
2639255736Sdavidchvoid ecore_init_rx_mode_obj(struct bxe_softc *sc,
2640255736Sdavidch			    struct ecore_rx_mode_obj *o)
2641255736Sdavidch{
2642255736Sdavidch	if (CHIP_IS_E1x(sc)) {
2643255736Sdavidch		o->wait_comp      = ecore_empty_rx_mode_wait;
2644255736Sdavidch		o->config_rx_mode = ecore_set_rx_mode_e1x;
2645255736Sdavidch	} else {
2646255736Sdavidch		o->wait_comp      = ecore_wait_rx_mode_comp_e2;
2647255736Sdavidch		o->config_rx_mode = ecore_set_rx_mode_e2;
2648255736Sdavidch	}
2649255736Sdavidch}
2650255736Sdavidch
2651255736Sdavidch/********************* Multicast verbs: SET, CLEAR ****************************/
2652255736Sdavidchstatic inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
2653255736Sdavidch{
2654255736Sdavidch	return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2655255736Sdavidch}
2656255736Sdavidch
2657255736Sdavidchstruct ecore_mcast_mac_elem {
2658255736Sdavidch	ecore_list_entry_t link;
2659255736Sdavidch	uint8_t mac[ETH_ALEN];
2660255736Sdavidch	uint8_t pad[2]; /* For a natural alignment of the following buffer */
2661255736Sdavidch};
2662255736Sdavidch
2663255736Sdavidchstruct ecore_pending_mcast_cmd {
2664255736Sdavidch	ecore_list_entry_t link;
2665255736Sdavidch	int type; /* ECORE_MCAST_CMD_X */
2666255736Sdavidch	union {
2667255736Sdavidch		ecore_list_t macs_head;
2668255736Sdavidch		uint32_t macs_num; /* Needed for DEL command */
2669255736Sdavidch		int next_bin; /* Needed for RESTORE flow with aprox match */
2670255736Sdavidch	} data;
2671255736Sdavidch
2672255736Sdavidch	bool done; /* set to TRUE, when the command has been handled,
2673255736Sdavidch		    * practically used in 57712 handling only, where one pending
2674255736Sdavidch		    * command may be handled in a few operations. As long as for
2675255736Sdavidch		    * other chips every operation handling is completed in a
2676255736Sdavidch		    * single ramrod, there is no need to utilize this field.
2677255736Sdavidch		    */
2678255736Sdavidch};
2679255736Sdavidch
2680255736Sdavidchstatic int ecore_mcast_wait(struct bxe_softc *sc,
2681255736Sdavidch			    struct ecore_mcast_obj *o)
2682255736Sdavidch{
2683255736Sdavidch	if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2684255736Sdavidch			o->raw.wait_comp(sc, &o->raw))
2685255736Sdavidch		return ECORE_TIMEOUT;
2686255736Sdavidch
2687255736Sdavidch	return ECORE_SUCCESS;
2688255736Sdavidch}
2689255736Sdavidch
2690255736Sdavidchstatic int ecore_mcast_enqueue_cmd(struct bxe_softc *sc,
2691255736Sdavidch				   struct ecore_mcast_obj *o,
2692255736Sdavidch				   struct ecore_mcast_ramrod_params *p,
2693255736Sdavidch				   enum ecore_mcast_cmd cmd)
2694255736Sdavidch{
2695255736Sdavidch	int total_sz;
2696255736Sdavidch	struct ecore_pending_mcast_cmd *new_cmd;
2697255736Sdavidch	struct ecore_mcast_mac_elem *cur_mac = NULL;
2698255736Sdavidch	struct ecore_mcast_list_elem *pos;
2699255736Sdavidch	int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2700255736Sdavidch			     p->mcast_list_len : 0);
2701255736Sdavidch
2702255736Sdavidch	/* If the command is empty ("handle pending commands only"), break */
2703255736Sdavidch	if (!p->mcast_list_len)
2704255736Sdavidch		return ECORE_SUCCESS;
2705255736Sdavidch
2706255736Sdavidch	total_sz = sizeof(*new_cmd) +
2707255736Sdavidch		macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2708255736Sdavidch
2709255736Sdavidch	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2710255736Sdavidch	new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2711255736Sdavidch
2712255736Sdavidch	if (!new_cmd)
2713255736Sdavidch		return ECORE_NOMEM;
2714255736Sdavidch
2715258187Sedavis	ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n",
2716255736Sdavidch		  cmd, macs_list_len);
2717255736Sdavidch
2718255736Sdavidch	ECORE_LIST_INIT(&new_cmd->data.macs_head);
2719255736Sdavidch
2720255736Sdavidch	new_cmd->type = cmd;
2721255736Sdavidch	new_cmd->done = FALSE;
2722255736Sdavidch
2723255736Sdavidch	switch (cmd) {
2724255736Sdavidch	case ECORE_MCAST_CMD_ADD:
2725255736Sdavidch		cur_mac = (struct ecore_mcast_mac_elem *)
2726255736Sdavidch			  ((uint8_t *)new_cmd + sizeof(*new_cmd));
2727255736Sdavidch
2728255736Sdavidch		/* Push the MACs of the current command into the pending command
2729255736Sdavidch		 * MACs list: FIFO
2730255736Sdavidch		 */
2731255736Sdavidch		ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2732255736Sdavidch					  struct ecore_mcast_list_elem) {
2733255736Sdavidch			ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2734255736Sdavidch			ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2735255736Sdavidch					     &new_cmd->data.macs_head);
2736255736Sdavidch			cur_mac++;
2737255736Sdavidch		}
2738255736Sdavidch
2739255736Sdavidch		break;
2740255736Sdavidch
2741255736Sdavidch	case ECORE_MCAST_CMD_DEL:
2742255736Sdavidch		new_cmd->data.macs_num = p->mcast_list_len;
2743255736Sdavidch		break;
2744255736Sdavidch
2745255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
2746255736Sdavidch		new_cmd->data.next_bin = 0;
2747255736Sdavidch		break;
2748255736Sdavidch
2749255736Sdavidch	default:
2750255736Sdavidch		ECORE_FREE(sc, new_cmd, total_sz);
2751255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
2752255736Sdavidch		return ECORE_INVAL;
2753255736Sdavidch	}
2754255736Sdavidch
2755255736Sdavidch	/* Push the new pending command to the tail of the pending list: FIFO */
2756255736Sdavidch	ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2757255736Sdavidch
2758255736Sdavidch	o->set_sched(o);
2759255736Sdavidch
2760255736Sdavidch	return ECORE_PENDING;
2761255736Sdavidch}
2762255736Sdavidch
2763255736Sdavidch/**
2764255736Sdavidch * ecore_mcast_get_next_bin - get the next set bin (index)
2765255736Sdavidch *
2766255736Sdavidch * @o:
2767255736Sdavidch * @last:	index to start looking from (including)
2768255736Sdavidch *
2769255736Sdavidch * returns the next found (set) bin or a negative value if none is found.
2770255736Sdavidch */
2771255736Sdavidchstatic inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2772255736Sdavidch{
2773255736Sdavidch	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2774255736Sdavidch
2775255736Sdavidch	for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2776255736Sdavidch		if (o->registry.aprox_match.vec[i])
2777255736Sdavidch			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2778255736Sdavidch				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2779255736Sdavidch				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2780255736Sdavidch						       vec, cur_bit)) {
2781255736Sdavidch					return cur_bit;
2782255736Sdavidch				}
2783255736Sdavidch			}
2784255736Sdavidch		inner_start = 0;
2785255736Sdavidch	}
2786255736Sdavidch
2787255736Sdavidch	/* None found */
2788255736Sdavidch	return -1;
2789255736Sdavidch}
2790255736Sdavidch
2791255736Sdavidch/**
2792255736Sdavidch * ecore_mcast_clear_first_bin - find the first set bin and clear it
2793255736Sdavidch *
2794255736Sdavidch * @o:
2795255736Sdavidch *
2796255736Sdavidch * returns the index of the found bin or -1 if none is found
2797255736Sdavidch */
2798255736Sdavidchstatic inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2799255736Sdavidch{
2800255736Sdavidch	int cur_bit = ecore_mcast_get_next_bin(o, 0);
2801255736Sdavidch
2802255736Sdavidch	if (cur_bit >= 0)
2803255736Sdavidch		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2804255736Sdavidch
2805255736Sdavidch	return cur_bit;
2806255736Sdavidch}
2807255736Sdavidch
2808255736Sdavidchstatic inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2809255736Sdavidch{
2810255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
2811255736Sdavidch	uint8_t rx_tx_flag = 0;
2812255736Sdavidch
2813255736Sdavidch	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2814255736Sdavidch	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2815255736Sdavidch		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2816255736Sdavidch
2817255736Sdavidch	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2818255736Sdavidch	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2819255736Sdavidch		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2820255736Sdavidch
2821255736Sdavidch	return rx_tx_flag;
2822255736Sdavidch}
2823255736Sdavidch
2824255736Sdavidchstatic void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc,
2825255736Sdavidch					struct ecore_mcast_obj *o, int idx,
2826255736Sdavidch					union ecore_mcast_config_data *cfg_data,
2827255736Sdavidch					enum ecore_mcast_cmd cmd)
2828255736Sdavidch{
2829255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
2830255736Sdavidch	struct eth_multicast_rules_ramrod_data *data =
2831255736Sdavidch		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2832255736Sdavidch	uint8_t func_id = r->func_id;
2833255736Sdavidch	uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2834255736Sdavidch	int bin;
2835255736Sdavidch
2836255736Sdavidch	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2837255736Sdavidch		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2838255736Sdavidch
2839255736Sdavidch	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2840255736Sdavidch
2841255736Sdavidch	/* Get a bin and update a bins' vector */
2842255736Sdavidch	switch (cmd) {
2843255736Sdavidch	case ECORE_MCAST_CMD_ADD:
2844255736Sdavidch		bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2845255736Sdavidch		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2846255736Sdavidch		break;
2847255736Sdavidch
2848255736Sdavidch	case ECORE_MCAST_CMD_DEL:
2849255736Sdavidch		/* If there were no more bins to clear
2850255736Sdavidch		 * (ecore_mcast_clear_first_bin() returns -1) then we would
2851255736Sdavidch		 * clear any (0xff) bin.
2852255736Sdavidch		 * See ecore_mcast_validate_e2() for explanation when it may
2853255736Sdavidch		 * happen.
2854255736Sdavidch		 */
2855255736Sdavidch		bin = ecore_mcast_clear_first_bin(o);
2856255736Sdavidch		break;
2857255736Sdavidch
2858255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
2859255736Sdavidch		bin = cfg_data->bin;
2860255736Sdavidch		break;
2861255736Sdavidch
2862255736Sdavidch	default:
2863255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
2864255736Sdavidch		return;
2865255736Sdavidch	}
2866255736Sdavidch
2867255736Sdavidch	ECORE_MSG(sc, "%s bin %d\n",
2868255736Sdavidch		  ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2869255736Sdavidch		   "Setting"  : "Clearing"), bin);
2870255736Sdavidch
2871255736Sdavidch	data->rules[idx].bin_id    = (uint8_t)bin;
2872255736Sdavidch	data->rules[idx].func_id   = func_id;
2873255736Sdavidch	data->rules[idx].engine_id = o->engine_id;
2874255736Sdavidch}
2875255736Sdavidch
2876255736Sdavidch/**
2877255736Sdavidch * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2878255736Sdavidch *
2879255736Sdavidch * @sc:		device handle
2880255736Sdavidch * @o:
2881255736Sdavidch * @start_bin:	index in the registry to start from (including)
2882255736Sdavidch * @rdata_idx:	index in the ramrod data to start from
2883255736Sdavidch *
2884255736Sdavidch * returns last handled bin index or -1 if all bins have been handled
2885255736Sdavidch */
2886255736Sdavidchstatic inline int ecore_mcast_handle_restore_cmd_e2(
2887255736Sdavidch	struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin,
2888255736Sdavidch	int *rdata_idx)
2889255736Sdavidch{
2890255736Sdavidch	int cur_bin, cnt = *rdata_idx;
2891255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
2892255736Sdavidch
2893255736Sdavidch	/* go through the registry and configure the bins from it */
2894255736Sdavidch	for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2895255736Sdavidch	    cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2896255736Sdavidch
2897255736Sdavidch		cfg_data.bin = (uint8_t)cur_bin;
2898255736Sdavidch		o->set_one_rule(sc, o, cnt, &cfg_data,
2899255736Sdavidch				ECORE_MCAST_CMD_RESTORE);
2900255736Sdavidch
2901255736Sdavidch		cnt++;
2902255736Sdavidch
2903255736Sdavidch		ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
2904255736Sdavidch
2905255736Sdavidch		/* Break if we reached the maximum number
2906255736Sdavidch		 * of rules.
2907255736Sdavidch		 */
2908255736Sdavidch		if (cnt >= o->max_cmd_len)
2909255736Sdavidch			break;
2910255736Sdavidch	}
2911255736Sdavidch
2912255736Sdavidch	*rdata_idx = cnt;
2913255736Sdavidch
2914255736Sdavidch	return cur_bin;
2915255736Sdavidch}
2916255736Sdavidch
2917255736Sdavidchstatic inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc,
2918255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2919255736Sdavidch	int *line_idx)
2920255736Sdavidch{
2921255736Sdavidch	struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2922255736Sdavidch	int cnt = *line_idx;
2923255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
2924255736Sdavidch
2925255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2926255736Sdavidch		&cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
2927255736Sdavidch
2928255736Sdavidch		cfg_data.mac = &pmac_pos->mac[0];
2929255736Sdavidch		o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2930255736Sdavidch
2931255736Sdavidch		cnt++;
2932255736Sdavidch
2933255736Sdavidch		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
2934255736Sdavidch			  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2935255736Sdavidch
2936255736Sdavidch		ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2937255736Sdavidch					&cmd_pos->data.macs_head);
2938255736Sdavidch
2939255736Sdavidch		/* Break if we reached the maximum number
2940255736Sdavidch		 * of rules.
2941255736Sdavidch		 */
2942255736Sdavidch		if (cnt >= o->max_cmd_len)
2943255736Sdavidch			break;
2944255736Sdavidch	}
2945255736Sdavidch
2946255736Sdavidch	*line_idx = cnt;
2947255736Sdavidch
2948255736Sdavidch	/* if no more MACs to configure - we are done */
2949255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2950255736Sdavidch		cmd_pos->done = TRUE;
2951255736Sdavidch}
2952255736Sdavidch
2953255736Sdavidchstatic inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc,
2954255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2955255736Sdavidch	int *line_idx)
2956255736Sdavidch{
2957255736Sdavidch	int cnt = *line_idx;
2958255736Sdavidch
2959255736Sdavidch	while (cmd_pos->data.macs_num) {
2960255736Sdavidch		o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2961255736Sdavidch
2962255736Sdavidch		cnt++;
2963255736Sdavidch
2964255736Sdavidch		cmd_pos->data.macs_num--;
2965255736Sdavidch
2966255736Sdavidch		  ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
2967255736Sdavidch				  cmd_pos->data.macs_num, cnt);
2968255736Sdavidch
2969255736Sdavidch		/* Break if we reached the maximum
2970255736Sdavidch		 * number of rules.
2971255736Sdavidch		 */
2972255736Sdavidch		if (cnt >= o->max_cmd_len)
2973255736Sdavidch			break;
2974255736Sdavidch	}
2975255736Sdavidch
2976255736Sdavidch	*line_idx = cnt;
2977255736Sdavidch
2978255736Sdavidch	/* If we cleared all bins - we are done */
2979255736Sdavidch	if (!cmd_pos->data.macs_num)
2980255736Sdavidch		cmd_pos->done = TRUE;
2981255736Sdavidch}
2982255736Sdavidch
2983255736Sdavidchstatic inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc,
2984255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2985255736Sdavidch	int *line_idx)
2986255736Sdavidch{
2987255736Sdavidch	cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2988255736Sdavidch						line_idx);
2989255736Sdavidch
2990255736Sdavidch	if (cmd_pos->data.next_bin < 0)
2991255736Sdavidch		/* If o->set_restore returned -1 we are done */
2992255736Sdavidch		cmd_pos->done = TRUE;
2993255736Sdavidch	else
2994255736Sdavidch		/* Start from the next bin next time */
2995255736Sdavidch		cmd_pos->data.next_bin++;
2996255736Sdavidch}
2997255736Sdavidch
2998255736Sdavidchstatic inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc,
2999255736Sdavidch				struct ecore_mcast_ramrod_params *p)
3000255736Sdavidch{
3001255736Sdavidch	struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3002255736Sdavidch	int cnt = 0;
3003255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3004255736Sdavidch
3005255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3006255736Sdavidch		&o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3007255736Sdavidch		switch (cmd_pos->type) {
3008255736Sdavidch		case ECORE_MCAST_CMD_ADD:
3009255736Sdavidch			ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
3010255736Sdavidch			break;
3011255736Sdavidch
3012255736Sdavidch		case ECORE_MCAST_CMD_DEL:
3013255736Sdavidch			ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
3014255736Sdavidch			break;
3015255736Sdavidch
3016255736Sdavidch		case ECORE_MCAST_CMD_RESTORE:
3017255736Sdavidch			ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
3018255736Sdavidch							   &cnt);
3019255736Sdavidch			break;
3020255736Sdavidch
3021255736Sdavidch		default:
3022255736Sdavidch			ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3023255736Sdavidch			return ECORE_INVAL;
3024255736Sdavidch		}
3025255736Sdavidch
3026255736Sdavidch		/* If the command has been completed - remove it from the list
3027255736Sdavidch		 * and free the memory
3028255736Sdavidch		 */
3029255736Sdavidch		if (cmd_pos->done) {
3030255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3031255736Sdavidch						&o->pending_cmds_head);
3032255736Sdavidch			ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3033255736Sdavidch		}
3034255736Sdavidch
3035255736Sdavidch		/* Break if we reached the maximum number of rules */
3036255736Sdavidch		if (cnt >= o->max_cmd_len)
3037255736Sdavidch			break;
3038255736Sdavidch	}
3039255736Sdavidch
3040255736Sdavidch	return cnt;
3041255736Sdavidch}
3042255736Sdavidch
3043255736Sdavidchstatic inline void ecore_mcast_hdl_add(struct bxe_softc *sc,
3044255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3045255736Sdavidch	int *line_idx)
3046255736Sdavidch{
3047255736Sdavidch	struct ecore_mcast_list_elem *mlist_pos;
3048255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
3049255736Sdavidch	int cnt = *line_idx;
3050255736Sdavidch
3051255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3052255736Sdavidch				  struct ecore_mcast_list_elem) {
3053255736Sdavidch		cfg_data.mac = mlist_pos->mac;
3054255736Sdavidch		o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3055255736Sdavidch
3056255736Sdavidch		cnt++;
3057255736Sdavidch
3058255736Sdavidch		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3059255736Sdavidch			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3060255736Sdavidch	}
3061255736Sdavidch
3062255736Sdavidch	*line_idx = cnt;
3063255736Sdavidch}
3064255736Sdavidch
3065255736Sdavidchstatic inline void ecore_mcast_hdl_del(struct bxe_softc *sc,
3066255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3067255736Sdavidch	int *line_idx)
3068255736Sdavidch{
3069255736Sdavidch	int cnt = *line_idx, i;
3070255736Sdavidch
3071255736Sdavidch	for (i = 0; i < p->mcast_list_len; i++) {
3072255736Sdavidch		o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3073255736Sdavidch
3074255736Sdavidch		cnt++;
3075255736Sdavidch
3076255736Sdavidch		ECORE_MSG(sc, "Deleting MAC. %d left\n",
3077255736Sdavidch			  p->mcast_list_len - i - 1);
3078255736Sdavidch	}
3079255736Sdavidch
3080255736Sdavidch	*line_idx = cnt;
3081255736Sdavidch}
3082255736Sdavidch
3083255736Sdavidch/**
3084255736Sdavidch * ecore_mcast_handle_current_cmd -
3085255736Sdavidch *
3086255736Sdavidch * @sc:		device handle
3087255736Sdavidch * @p:
3088255736Sdavidch * @cmd:
3089255736Sdavidch * @start_cnt:	first line in the ramrod data that may be used
3090255736Sdavidch *
3091255736Sdavidch * This function is called iff there is enough place for the current command in
3092255736Sdavidch * the ramrod data.
3093255736Sdavidch * Returns number of lines filled in the ramrod data in total.
3094255736Sdavidch */
3095255736Sdavidchstatic inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc,
3096255736Sdavidch			struct ecore_mcast_ramrod_params *p,
3097255736Sdavidch			enum ecore_mcast_cmd cmd,
3098255736Sdavidch			int start_cnt)
3099255736Sdavidch{
3100255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3101255736Sdavidch	int cnt = start_cnt;
3102255736Sdavidch
3103255736Sdavidch	ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3104255736Sdavidch
3105255736Sdavidch	switch (cmd) {
3106255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3107255736Sdavidch		ecore_mcast_hdl_add(sc, o, p, &cnt);
3108255736Sdavidch		break;
3109255736Sdavidch
3110255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3111255736Sdavidch		ecore_mcast_hdl_del(sc, o, p, &cnt);
3112255736Sdavidch		break;
3113255736Sdavidch
3114255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3115255736Sdavidch		o->hdl_restore(sc, o, 0, &cnt);
3116255736Sdavidch		break;
3117255736Sdavidch
3118255736Sdavidch	default:
3119255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
3120255736Sdavidch		return ECORE_INVAL;
3121255736Sdavidch	}
3122255736Sdavidch
3123255736Sdavidch	/* The current command has been handled */
3124255736Sdavidch	p->mcast_list_len = 0;
3125255736Sdavidch
3126255736Sdavidch	return cnt;
3127255736Sdavidch}
3128255736Sdavidch
3129255736Sdavidchstatic int ecore_mcast_validate_e2(struct bxe_softc *sc,
3130255736Sdavidch				   struct ecore_mcast_ramrod_params *p,
3131255736Sdavidch				   enum ecore_mcast_cmd cmd)
3132255736Sdavidch{
3133255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3134255736Sdavidch	int reg_sz = o->get_registry_size(o);
3135255736Sdavidch
3136255736Sdavidch	switch (cmd) {
3137255736Sdavidch	/* DEL command deletes all currently configured MACs */
3138255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3139255736Sdavidch		o->set_registry_size(o, 0);
3140255736Sdavidch		/* Don't break */
3141255736Sdavidch
3142255736Sdavidch	/* RESTORE command will restore the entire multicast configuration */
3143255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3144255736Sdavidch		/* Here we set the approximate amount of work to do, which in
3145255736Sdavidch		 * fact may be only less as some MACs in postponed ADD
3146255736Sdavidch		 * command(s) scheduled before this command may fall into
3147255736Sdavidch		 * the same bin and the actual number of bins set in the
3148255736Sdavidch		 * registry would be less than we estimated here. See
3149255736Sdavidch		 * ecore_mcast_set_one_rule_e2() for further details.
3150255736Sdavidch		 */
3151255736Sdavidch		p->mcast_list_len = reg_sz;
3152255736Sdavidch		break;
3153255736Sdavidch
3154255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3155255736Sdavidch	case ECORE_MCAST_CMD_CONT:
3156255736Sdavidch		/* Here we assume that all new MACs will fall into new bins.
3157255736Sdavidch		 * However we will correct the real registry size after we
3158255736Sdavidch		 * handle all pending commands.
3159255736Sdavidch		 */
3160255736Sdavidch		o->set_registry_size(o, reg_sz + p->mcast_list_len);
3161255736Sdavidch		break;
3162255736Sdavidch
3163255736Sdavidch	default:
3164255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
3165255736Sdavidch		return ECORE_INVAL;
3166255736Sdavidch	}
3167255736Sdavidch
3168255736Sdavidch	/* Increase the total number of MACs pending to be configured */
3169255736Sdavidch	o->total_pending_num += p->mcast_list_len;
3170255736Sdavidch
3171255736Sdavidch	return ECORE_SUCCESS;
3172255736Sdavidch}
3173255736Sdavidch
3174255736Sdavidchstatic void ecore_mcast_revert_e2(struct bxe_softc *sc,
3175255736Sdavidch				      struct ecore_mcast_ramrod_params *p,
3176255736Sdavidch				      int old_num_bins)
3177255736Sdavidch{
3178255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3179255736Sdavidch
3180255736Sdavidch	o->set_registry_size(o, old_num_bins);
3181255736Sdavidch	o->total_pending_num -= p->mcast_list_len;
3182255736Sdavidch}
3183255736Sdavidch
3184255736Sdavidch/**
3185255736Sdavidch * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3186255736Sdavidch *
3187255736Sdavidch * @sc:		device handle
3188255736Sdavidch * @p:
3189255736Sdavidch * @len:	number of rules to handle
3190255736Sdavidch */
3191255736Sdavidchstatic inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc,
3192255736Sdavidch					struct ecore_mcast_ramrod_params *p,
3193255736Sdavidch					uint8_t len)
3194255736Sdavidch{
3195255736Sdavidch	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3196255736Sdavidch	struct eth_multicast_rules_ramrod_data *data =
3197255736Sdavidch		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3198255736Sdavidch
3199255736Sdavidch	data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3200255736Sdavidch					(ECORE_FILTER_MCAST_PENDING <<
3201255736Sdavidch					 ECORE_SWCID_SHIFT));
3202255736Sdavidch	data->header.rule_cnt = len;
3203255736Sdavidch}
3204255736Sdavidch
3205255736Sdavidch/**
3206255736Sdavidch * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3207255736Sdavidch *
3208255736Sdavidch * @sc:		device handle
3209255736Sdavidch * @o:
3210255736Sdavidch *
3211255736Sdavidch * Recalculate the actual number of set bins in the registry using Brian
3212255736Sdavidch * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3213255736Sdavidch *
3214255736Sdavidch * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3215255736Sdavidch */
3216255736Sdavidchstatic inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc,
3217255736Sdavidch						  struct ecore_mcast_obj *o)
3218255736Sdavidch{
3219255736Sdavidch	int i, cnt = 0;
3220255736Sdavidch	uint64_t elem;
3221255736Sdavidch
3222255736Sdavidch	for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3223255736Sdavidch		elem = o->registry.aprox_match.vec[i];
3224255736Sdavidch		for (; elem; cnt++)
3225255736Sdavidch			elem &= elem - 1;
3226255736Sdavidch	}
3227255736Sdavidch
3228255736Sdavidch	o->set_registry_size(o, cnt);
3229255736Sdavidch
3230255736Sdavidch	return ECORE_SUCCESS;
3231255736Sdavidch}
3232255736Sdavidch
3233255736Sdavidchstatic int ecore_mcast_setup_e2(struct bxe_softc *sc,
3234255736Sdavidch				struct ecore_mcast_ramrod_params *p,
3235255736Sdavidch				enum ecore_mcast_cmd cmd)
3236255736Sdavidch{
3237255736Sdavidch	struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3238255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3239255736Sdavidch	struct eth_multicast_rules_ramrod_data *data =
3240255736Sdavidch		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3241255736Sdavidch	int cnt = 0, rc;
3242255736Sdavidch
3243255736Sdavidch	/* Reset the ramrod data buffer */
3244255736Sdavidch	ECORE_MEMSET(data, 0, sizeof(*data));
3245255736Sdavidch
3246255736Sdavidch	cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
3247255736Sdavidch
3248255736Sdavidch	/* If there are no more pending commands - clear SCHEDULED state */
3249255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3250255736Sdavidch		o->clear_sched(o);
3251255736Sdavidch
3252255736Sdavidch	/* The below may be TRUE iff there was enough room in ramrod
3253255736Sdavidch	 * data for all pending commands and for the current
3254255736Sdavidch	 * command. Otherwise the current command would have been added
3255255736Sdavidch	 * to the pending commands and p->mcast_list_len would have been
3256255736Sdavidch	 * zeroed.
3257255736Sdavidch	 */
3258255736Sdavidch	if (p->mcast_list_len > 0)
3259255736Sdavidch		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
3260255736Sdavidch
3261255736Sdavidch	/* We've pulled out some MACs - update the total number of
3262255736Sdavidch	 * outstanding.
3263255736Sdavidch	 */
3264255736Sdavidch	o->total_pending_num -= cnt;
3265255736Sdavidch
3266255736Sdavidch	/* send a ramrod */
3267255736Sdavidch	ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
3268255736Sdavidch	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3269255736Sdavidch
3270255736Sdavidch	ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
3271255736Sdavidch
3272255736Sdavidch	/* Update a registry size if there are no more pending operations.
3273255736Sdavidch	 *
3274255736Sdavidch	 * We don't want to change the value of the registry size if there are
3275255736Sdavidch	 * pending operations because we want it to always be equal to the
3276255736Sdavidch	 * exact or the approximate number (see ecore_mcast_validate_e2()) of
3277255736Sdavidch	 * set bins after the last requested operation in order to properly
3278255736Sdavidch	 * evaluate the size of the next DEL/RESTORE operation.
3279255736Sdavidch	 *
3280255736Sdavidch	 * Note that we update the registry itself during command(s) handling
3281255736Sdavidch	 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3282255736Sdavidch	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3283255736Sdavidch	 * with a limited amount of update commands (per MAC/bin) and we don't
3284255736Sdavidch	 * know in this scope what the actual state of bins configuration is
3285255736Sdavidch	 * going to be after this ramrod.
3286255736Sdavidch	 */
3287255736Sdavidch	if (!o->total_pending_num)
3288255736Sdavidch		ecore_mcast_refresh_registry_e2(sc, o);
3289255736Sdavidch
3290255736Sdavidch	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3291255736Sdavidch	 * RAMROD_PENDING status immediately.
3292255736Sdavidch	 */
3293255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3294255736Sdavidch		raw->clear_pending(raw);
3295255736Sdavidch		return ECORE_SUCCESS;
3296255736Sdavidch	} else {
3297255736Sdavidch		/* No need for an explicit memory barrier here as long we would
3298255736Sdavidch		 * need to ensure the ordering of writing to the SPQ element
3299255736Sdavidch		 * and updating of the SPQ producer which involves a memory
3300255736Sdavidch		 * read and we will have to put a full memory barrier there
3301255736Sdavidch		 * (inside ecore_sp_post()).
3302255736Sdavidch		 */
3303255736Sdavidch
3304255736Sdavidch		/* Send a ramrod */
3305255736Sdavidch		rc = ecore_sp_post( sc,
3306255736Sdavidch				    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3307255736Sdavidch				    raw->cid,
3308255736Sdavidch				    raw->rdata_mapping,
3309255736Sdavidch				    ETH_CONNECTION_TYPE);
3310255736Sdavidch		if (rc)
3311255736Sdavidch			return rc;
3312255736Sdavidch
3313255736Sdavidch		/* Ramrod completion is pending */
3314255736Sdavidch		return ECORE_PENDING;
3315255736Sdavidch	}
3316255736Sdavidch}
3317255736Sdavidch
3318255736Sdavidchstatic int ecore_mcast_validate_e1h(struct bxe_softc *sc,
3319255736Sdavidch				    struct ecore_mcast_ramrod_params *p,
3320255736Sdavidch				    enum ecore_mcast_cmd cmd)
3321255736Sdavidch{
3322255736Sdavidch	/* Mark, that there is a work to do */
3323255736Sdavidch	if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3324255736Sdavidch		p->mcast_list_len = 1;
3325255736Sdavidch
3326255736Sdavidch	return ECORE_SUCCESS;
3327255736Sdavidch}
3328255736Sdavidch
3329255736Sdavidchstatic void ecore_mcast_revert_e1h(struct bxe_softc *sc,
3330255736Sdavidch				       struct ecore_mcast_ramrod_params *p,
3331255736Sdavidch				       int old_num_bins)
3332255736Sdavidch{
3333255736Sdavidch	/* Do nothing */
3334255736Sdavidch}
3335255736Sdavidch
3336255736Sdavidch#define ECORE_57711_SET_MC_FILTER(filter, bit) \
3337255736Sdavidchdo { \
3338255736Sdavidch	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3339255736Sdavidch} while (0)
3340255736Sdavidch
3341255736Sdavidchstatic inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc,
3342255736Sdavidch					   struct ecore_mcast_obj *o,
3343255736Sdavidch					   struct ecore_mcast_ramrod_params *p,
3344255736Sdavidch					   uint32_t *mc_filter)
3345255736Sdavidch{
3346255736Sdavidch	struct ecore_mcast_list_elem *mlist_pos;
3347255736Sdavidch	int bit;
3348255736Sdavidch
3349255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3350255736Sdavidch				  struct ecore_mcast_list_elem) {
3351255736Sdavidch		bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3352255736Sdavidch		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3353255736Sdavidch
3354255736Sdavidch		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3355255736Sdavidch			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3356255736Sdavidch
3357255736Sdavidch		/* bookkeeping... */
3358255736Sdavidch		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3359255736Sdavidch				  bit);
3360255736Sdavidch	}
3361255736Sdavidch}
3362255736Sdavidch
3363255736Sdavidchstatic inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc,
3364255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3365255736Sdavidch	uint32_t *mc_filter)
3366255736Sdavidch{
3367255736Sdavidch	int bit;
3368255736Sdavidch
3369255736Sdavidch	for (bit = ecore_mcast_get_next_bin(o, 0);
3370255736Sdavidch	     bit >= 0;
3371255736Sdavidch	     bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3372255736Sdavidch		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3373255736Sdavidch		ECORE_MSG(sc, "About to set bin %d\n", bit);
3374255736Sdavidch	}
3375255736Sdavidch}
3376255736Sdavidch
3377255736Sdavidch/* On 57711 we write the multicast MACs' approximate match
3378255736Sdavidch * table by directly into the TSTORM's internal RAM. So we don't
3379255736Sdavidch * really need to handle any tricks to make it work.
3380255736Sdavidch */
3381255736Sdavidchstatic int ecore_mcast_setup_e1h(struct bxe_softc *sc,
3382255736Sdavidch				 struct ecore_mcast_ramrod_params *p,
3383255736Sdavidch				 enum ecore_mcast_cmd cmd)
3384255736Sdavidch{
3385255736Sdavidch	int i;
3386255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3387255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
3388255736Sdavidch
3389255736Sdavidch	/* If CLEAR_ONLY has been requested - clear the registry
3390255736Sdavidch	 * and clear a pending bit.
3391255736Sdavidch	 */
3392255736Sdavidch	if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3393255736Sdavidch		uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
3394255736Sdavidch
3395255736Sdavidch		/* Set the multicast filter bits before writing it into
3396255736Sdavidch		 * the internal memory.
3397255736Sdavidch		 */
3398255736Sdavidch		switch (cmd) {
3399255736Sdavidch		case ECORE_MCAST_CMD_ADD:
3400255736Sdavidch			ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3401255736Sdavidch			break;
3402255736Sdavidch
3403255736Sdavidch		case ECORE_MCAST_CMD_DEL:
3404255736Sdavidch			ECORE_MSG(sc,
3405255736Sdavidch				  "Invalidating multicast MACs configuration\n");
3406255736Sdavidch
3407255736Sdavidch			/* clear the registry */
3408255736Sdavidch			ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3409255736Sdavidch			       sizeof(o->registry.aprox_match.vec));
3410255736Sdavidch			break;
3411255736Sdavidch
3412255736Sdavidch		case ECORE_MCAST_CMD_RESTORE:
3413255736Sdavidch			ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
3414255736Sdavidch			break;
3415255736Sdavidch
3416255736Sdavidch		default:
3417255736Sdavidch			ECORE_ERR("Unknown command: %d\n", cmd);
3418255736Sdavidch			return ECORE_INVAL;
3419255736Sdavidch		}
3420255736Sdavidch
3421255736Sdavidch		/* Set the mcast filter in the internal memory */
3422255736Sdavidch		for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3423255736Sdavidch			REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3424255736Sdavidch	} else
3425255736Sdavidch		/* clear the registry */
3426255736Sdavidch		ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3427255736Sdavidch		       sizeof(o->registry.aprox_match.vec));
3428255736Sdavidch
3429255736Sdavidch	/* We are done */
3430255736Sdavidch	r->clear_pending(r);
3431255736Sdavidch
3432255736Sdavidch	return ECORE_SUCCESS;
3433255736Sdavidch}
3434255736Sdavidch
3435255736Sdavidchstatic int ecore_mcast_validate_e1(struct bxe_softc *sc,
3436255736Sdavidch				   struct ecore_mcast_ramrod_params *p,
3437255736Sdavidch				   enum ecore_mcast_cmd cmd)
3438255736Sdavidch{
3439255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3440255736Sdavidch	int reg_sz = o->get_registry_size(o);
3441255736Sdavidch
3442255736Sdavidch	switch (cmd) {
3443255736Sdavidch	/* DEL command deletes all currently configured MACs */
3444255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3445255736Sdavidch		o->set_registry_size(o, 0);
3446255736Sdavidch		/* Don't break */
3447255736Sdavidch
3448255736Sdavidch	/* RESTORE command will restore the entire multicast configuration */
3449255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3450255736Sdavidch		p->mcast_list_len = reg_sz;
3451255736Sdavidch		  ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
3452255736Sdavidch				  cmd, p->mcast_list_len);
3453255736Sdavidch		break;
3454255736Sdavidch
3455255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3456255736Sdavidch	case ECORE_MCAST_CMD_CONT:
3457255736Sdavidch		/* Multicast MACs on 57710 are configured as unicast MACs and
3458255736Sdavidch		 * there is only a limited number of CAM entries for that
3459255736Sdavidch		 * matter.
3460255736Sdavidch		 */
3461255736Sdavidch		if (p->mcast_list_len > o->max_cmd_len) {
3462255736Sdavidch			ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3463255736Sdavidch				  o->max_cmd_len);
3464255736Sdavidch			return ECORE_INVAL;
3465255736Sdavidch		}
3466255736Sdavidch		/* Every configured MAC should be cleared if DEL command is
3467255736Sdavidch		 * called. Only the last ADD command is relevant as long as
3468255736Sdavidch		 * every ADD commands overrides the previous configuration.
3469255736Sdavidch		 */
3470255736Sdavidch		ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3471255736Sdavidch		if (p->mcast_list_len > 0)
3472255736Sdavidch			o->set_registry_size(o, p->mcast_list_len);
3473255736Sdavidch
3474255736Sdavidch		break;
3475255736Sdavidch
3476255736Sdavidch	default:
3477255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
3478255736Sdavidch		return ECORE_INVAL;
3479255736Sdavidch	}
3480255736Sdavidch
3481255736Sdavidch	/* We want to ensure that commands are executed one by one for 57710.
3482255736Sdavidch	 * Therefore each none-empty command will consume o->max_cmd_len.
3483255736Sdavidch	 */
3484255736Sdavidch	if (p->mcast_list_len)
3485255736Sdavidch		o->total_pending_num += o->max_cmd_len;
3486255736Sdavidch
3487255736Sdavidch	return ECORE_SUCCESS;
3488255736Sdavidch}
3489255736Sdavidch
3490255736Sdavidchstatic void ecore_mcast_revert_e1(struct bxe_softc *sc,
3491255736Sdavidch				      struct ecore_mcast_ramrod_params *p,
3492255736Sdavidch				      int old_num_macs)
3493255736Sdavidch{
3494255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3495255736Sdavidch
3496255736Sdavidch	o->set_registry_size(o, old_num_macs);
3497255736Sdavidch
3498255736Sdavidch	/* If current command hasn't been handled yet and we are
3499255736Sdavidch	 * here means that it's meant to be dropped and we have to
3500255736Sdavidch	 * update the number of outstanding MACs accordingly.
3501255736Sdavidch	 */
3502255736Sdavidch	if (p->mcast_list_len)
3503255736Sdavidch		o->total_pending_num -= o->max_cmd_len;
3504255736Sdavidch}
3505255736Sdavidch
3506255736Sdavidchstatic void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc,
3507255736Sdavidch					struct ecore_mcast_obj *o, int idx,
3508255736Sdavidch					union ecore_mcast_config_data *cfg_data,
3509255736Sdavidch					enum ecore_mcast_cmd cmd)
3510255736Sdavidch{
3511255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
3512255736Sdavidch	struct mac_configuration_cmd *data =
3513255736Sdavidch		(struct mac_configuration_cmd *)(r->rdata);
3514255736Sdavidch
3515255736Sdavidch	/* copy mac */
3516255736Sdavidch	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
3517255736Sdavidch		ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3518255736Sdavidch				      &data->config_table[idx].middle_mac_addr,
3519255736Sdavidch				      &data->config_table[idx].lsb_mac_addr,
3520255736Sdavidch				      cfg_data->mac);
3521255736Sdavidch
3522255736Sdavidch		data->config_table[idx].vlan_id = 0;
3523255736Sdavidch		data->config_table[idx].pf_id = r->func_id;
3524255736Sdavidch		data->config_table[idx].clients_bit_vector =
3525255736Sdavidch			ECORE_CPU_TO_LE32(1 << r->cl_id);
3526255736Sdavidch
3527255736Sdavidch		ECORE_SET_FLAG(data->config_table[idx].flags,
3528255736Sdavidch			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3529255736Sdavidch			       T_ETH_MAC_COMMAND_SET);
3530255736Sdavidch	}
3531255736Sdavidch}
3532255736Sdavidch
3533255736Sdavidch/**
3534255736Sdavidch * ecore_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3535255736Sdavidch *
3536255736Sdavidch * @sc:		device handle
3537255736Sdavidch * @p:
3538255736Sdavidch * @len:	number of rules to handle
3539255736Sdavidch */
3540255736Sdavidchstatic inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc,
3541255736Sdavidch					struct ecore_mcast_ramrod_params *p,
3542255736Sdavidch					uint8_t len)
3543255736Sdavidch{
3544255736Sdavidch	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3545255736Sdavidch	struct mac_configuration_cmd *data =
3546255736Sdavidch		(struct mac_configuration_cmd *)(r->rdata);
3547255736Sdavidch
3548255736Sdavidch	uint8_t offset = (CHIP_REV_IS_SLOW(sc) ?
3549255736Sdavidch		     ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
3550255736Sdavidch		     ECORE_MAX_MULTICAST*(1 + r->func_id));
3551255736Sdavidch
3552255736Sdavidch	data->hdr.offset = offset;
3553255736Sdavidch	data->hdr.client_id = ECORE_CPU_TO_LE16(0xff);
3554255736Sdavidch	data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3555255736Sdavidch				     (ECORE_FILTER_MCAST_PENDING <<
3556255736Sdavidch				      ECORE_SWCID_SHIFT));
3557255736Sdavidch	data->hdr.length = len;
3558255736Sdavidch}
3559255736Sdavidch
3560255736Sdavidch/**
3561255736Sdavidch * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
3562255736Sdavidch *
3563255736Sdavidch * @sc:		device handle
3564255736Sdavidch * @o:
3565255736Sdavidch * @start_idx:	index in the registry to start from
3566255736Sdavidch * @rdata_idx:	index in the ramrod data to start from
3567255736Sdavidch *
3568255736Sdavidch * restore command for 57710 is like all other commands - always a stand alone
3569255736Sdavidch * command - start_idx and rdata_idx will always be 0. This function will always
3570255736Sdavidch * succeed.
3571255736Sdavidch * returns -1 to comply with 57712 variant.
3572255736Sdavidch */
3573255736Sdavidchstatic inline int ecore_mcast_handle_restore_cmd_e1(
3574255736Sdavidch	struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx,
3575255736Sdavidch	int *rdata_idx)
3576255736Sdavidch{
3577255736Sdavidch	struct ecore_mcast_mac_elem *elem;
3578255736Sdavidch	int i = 0;
3579255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
3580255736Sdavidch
3581255736Sdavidch	/* go through the registry and configure the MACs from it. */
3582255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
3583255736Sdavidch				  struct ecore_mcast_mac_elem) {
3584255736Sdavidch		cfg_data.mac = &elem->mac[0];
3585255736Sdavidch		o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
3586255736Sdavidch
3587255736Sdavidch		i++;
3588255736Sdavidch
3589255736Sdavidch		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3590255736Sdavidch			  cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
3591255736Sdavidch	}
3592255736Sdavidch
3593255736Sdavidch	*rdata_idx = i;
3594255736Sdavidch
3595255736Sdavidch	return -1;
3596255736Sdavidch}
3597255736Sdavidch
3598255736Sdavidchstatic inline int ecore_mcast_handle_pending_cmds_e1(
3599255736Sdavidch	struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p)
3600255736Sdavidch{
3601255736Sdavidch	struct ecore_pending_mcast_cmd *cmd_pos;
3602255736Sdavidch	struct ecore_mcast_mac_elem *pmac_pos;
3603255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3604255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
3605255736Sdavidch	int cnt = 0;
3606255736Sdavidch
3607255736Sdavidch	/* If nothing to be done - return */
3608255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3609255736Sdavidch		return 0;
3610255736Sdavidch
3611255736Sdavidch	/* Handle the first command */
3612255736Sdavidch	cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
3613255736Sdavidch					 struct ecore_pending_mcast_cmd, link);
3614255736Sdavidch
3615255736Sdavidch	switch (cmd_pos->type) {
3616255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3617255736Sdavidch		ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
3618255736Sdavidch					  link, struct ecore_mcast_mac_elem) {
3619255736Sdavidch			cfg_data.mac = &pmac_pos->mac[0];
3620255736Sdavidch			o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3621255736Sdavidch
3622255736Sdavidch			cnt++;
3623255736Sdavidch
3624255736Sdavidch			ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3625255736Sdavidch				  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3626255736Sdavidch		}
3627255736Sdavidch		break;
3628255736Sdavidch
3629255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3630255736Sdavidch		cnt = cmd_pos->data.macs_num;
3631255736Sdavidch		ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt);
3632255736Sdavidch		break;
3633255736Sdavidch
3634255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3635255736Sdavidch		o->hdl_restore(sc, o, 0, &cnt);
3636255736Sdavidch		break;
3637255736Sdavidch
3638255736Sdavidch	default:
3639255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3640255736Sdavidch		return ECORE_INVAL;
3641255736Sdavidch	}
3642255736Sdavidch
3643255736Sdavidch	ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
3644255736Sdavidch	ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3645255736Sdavidch
3646255736Sdavidch	return cnt;
3647255736Sdavidch}
3648255736Sdavidch
3649255736Sdavidch/**
3650255736Sdavidch * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
3651255736Sdavidch *
3652255736Sdavidch * @fw_hi:
3653255736Sdavidch * @fw_mid:
3654255736Sdavidch * @fw_lo:
3655255736Sdavidch * @mac:
3656255736Sdavidch */
3657255736Sdavidchstatic inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid,
3658255736Sdavidch					 uint16_t *fw_lo, uint8_t *mac)
3659255736Sdavidch{
3660255736Sdavidch	mac[1] = ((uint8_t *)fw_hi)[0];
3661255736Sdavidch	mac[0] = ((uint8_t *)fw_hi)[1];
3662255736Sdavidch	mac[3] = ((uint8_t *)fw_mid)[0];
3663255736Sdavidch	mac[2] = ((uint8_t *)fw_mid)[1];
3664255736Sdavidch	mac[5] = ((uint8_t *)fw_lo)[0];
3665255736Sdavidch	mac[4] = ((uint8_t *)fw_lo)[1];
3666255736Sdavidch}
3667255736Sdavidch
3668255736Sdavidch/**
3669255736Sdavidch * ecore_mcast_refresh_registry_e1 -
3670255736Sdavidch *
3671255736Sdavidch * @sc:		device handle
3672255736Sdavidch * @cnt:
3673255736Sdavidch *
3674255736Sdavidch * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3675255736Sdavidch * and update the registry correspondingly: if ADD - allocate a memory and add
3676255736Sdavidch * the entries to the registry (list), if DELETE - clear the registry and free
3677255736Sdavidch * the memory.
3678255736Sdavidch */
3679255736Sdavidchstatic inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc,
3680255736Sdavidch						  struct ecore_mcast_obj *o)
3681255736Sdavidch{
3682255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
3683255736Sdavidch	struct ecore_mcast_mac_elem *elem;
3684255736Sdavidch	struct mac_configuration_cmd *data =
3685255736Sdavidch			(struct mac_configuration_cmd *)(raw->rdata);
3686255736Sdavidch
3687255736Sdavidch	/* If first entry contains a SET bit - the command was ADD,
3688255736Sdavidch	 * otherwise - DEL_ALL
3689255736Sdavidch	 */
3690255736Sdavidch	if (ECORE_GET_FLAG(data->config_table[0].flags,
3691255736Sdavidch			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3692255736Sdavidch		int i, len = data->hdr.length;
3693255736Sdavidch
3694255736Sdavidch		/* Break if it was a RESTORE command */
3695255736Sdavidch		if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
3696255736Sdavidch			return ECORE_SUCCESS;
3697255736Sdavidch
3698255736Sdavidch		elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc);
3699255736Sdavidch		if (!elem) {
3700255736Sdavidch			ECORE_ERR("Failed to allocate registry memory\n");
3701255736Sdavidch			return ECORE_NOMEM;
3702255736Sdavidch		}
3703255736Sdavidch
3704255736Sdavidch		for (i = 0; i < len; i++, elem++) {
3705255736Sdavidch			ecore_get_fw_mac_addr(
3706255736Sdavidch				&data->config_table[i].msb_mac_addr,
3707255736Sdavidch				&data->config_table[i].middle_mac_addr,
3708255736Sdavidch				&data->config_table[i].lsb_mac_addr,
3709255736Sdavidch				elem->mac);
3710255736Sdavidch			ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
3711255736Sdavidch				  elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
3712255736Sdavidch			ECORE_LIST_PUSH_TAIL(&elem->link,
3713255736Sdavidch					     &o->registry.exact_match.macs);
3714255736Sdavidch		}
3715255736Sdavidch	} else {
3716255736Sdavidch		elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
3717255736Sdavidch					      struct ecore_mcast_mac_elem,
3718255736Sdavidch					      link);
3719255736Sdavidch		ECORE_MSG(sc, "Deleting a registry\n");
3720255736Sdavidch		ECORE_FREE(sc, elem, sizeof(*elem));
3721255736Sdavidch		ECORE_LIST_INIT(&o->registry.exact_match.macs);
3722255736Sdavidch	}
3723255736Sdavidch
3724255736Sdavidch	return ECORE_SUCCESS;
3725255736Sdavidch}
3726255736Sdavidch
3727255736Sdavidchstatic int ecore_mcast_setup_e1(struct bxe_softc *sc,
3728255736Sdavidch				struct ecore_mcast_ramrod_params *p,
3729255736Sdavidch				enum ecore_mcast_cmd cmd)
3730255736Sdavidch{
3731255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3732255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
3733255736Sdavidch	struct mac_configuration_cmd *data =
3734255736Sdavidch		(struct mac_configuration_cmd *)(raw->rdata);
3735255736Sdavidch	int cnt = 0, i, rc;
3736255736Sdavidch
3737255736Sdavidch	/* Reset the ramrod data buffer */
3738255736Sdavidch	ECORE_MEMSET(data, 0, sizeof(*data));
3739255736Sdavidch
3740255736Sdavidch	/* First set all entries as invalid */
3741255736Sdavidch	for (i = 0; i < o->max_cmd_len ; i++)
3742255736Sdavidch		ECORE_SET_FLAG(data->config_table[i].flags,
3743255736Sdavidch			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3744255736Sdavidch			T_ETH_MAC_COMMAND_INVALIDATE);
3745255736Sdavidch
3746255736Sdavidch	/* Handle pending commands first */
3747255736Sdavidch	cnt = ecore_mcast_handle_pending_cmds_e1(sc, p);
3748255736Sdavidch
3749255736Sdavidch	/* If there are no more pending commands - clear SCHEDULED state */
3750255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3751255736Sdavidch		o->clear_sched(o);
3752255736Sdavidch
3753255736Sdavidch	/* The below may be TRUE iff there were no pending commands */
3754255736Sdavidch	if (!cnt)
3755255736Sdavidch		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0);
3756255736Sdavidch
3757255736Sdavidch	/* For 57710 every command has o->max_cmd_len length to ensure that
3758255736Sdavidch	 * commands are done one at a time.
3759255736Sdavidch	 */
3760255736Sdavidch	o->total_pending_num -= o->max_cmd_len;
3761255736Sdavidch
3762255736Sdavidch	/* send a ramrod */
3763255736Sdavidch
3764255736Sdavidch	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3765255736Sdavidch
3766255736Sdavidch	/* Set ramrod header (in particular, a number of entries to update) */
3767255736Sdavidch	ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt);
3768255736Sdavidch
3769255736Sdavidch	/* update a registry: we need the registry contents to be always up
3770255736Sdavidch	 * to date in order to be able to execute a RESTORE opcode. Here
3771255736Sdavidch	 * we use the fact that for 57710 we sent one command at a time
3772255736Sdavidch	 * hence we may take the registry update out of the command handling
3773255736Sdavidch	 * and do it in a simpler way here.
3774255736Sdavidch	 */
3775255736Sdavidch	rc = ecore_mcast_refresh_registry_e1(sc, o);
3776255736Sdavidch	if (rc)
3777255736Sdavidch		return rc;
3778255736Sdavidch
3779255736Sdavidch	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3780255736Sdavidch	 * RAMROD_PENDING status immediately.
3781255736Sdavidch	 */
3782255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3783255736Sdavidch		raw->clear_pending(raw);
3784255736Sdavidch		return ECORE_SUCCESS;
3785255736Sdavidch	} else {
3786255736Sdavidch		/* No need for an explicit memory barrier here as long we would
3787255736Sdavidch		 * need to ensure the ordering of writing to the SPQ element
3788255736Sdavidch		 * and updating of the SPQ producer which involves a memory
3789255736Sdavidch		 * read and we will have to put a full memory barrier there
3790255736Sdavidch		 * (inside ecore_sp_post()).
3791255736Sdavidch		 */
3792255736Sdavidch
3793255736Sdavidch		/* Send a ramrod */
3794255736Sdavidch		rc = ecore_sp_post( sc,
3795255736Sdavidch				    RAMROD_CMD_ID_ETH_SET_MAC,
3796255736Sdavidch				    raw->cid,
3797255736Sdavidch				    raw->rdata_mapping,
3798255736Sdavidch				    ETH_CONNECTION_TYPE);
3799255736Sdavidch		if (rc)
3800255736Sdavidch			return rc;
3801255736Sdavidch
3802255736Sdavidch		/* Ramrod completion is pending */
3803255736Sdavidch		return ECORE_PENDING;
3804255736Sdavidch	}
3805255736Sdavidch}
3806255736Sdavidch
3807255736Sdavidchstatic int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
3808255736Sdavidch{
3809255736Sdavidch	return o->registry.exact_match.num_macs_set;
3810255736Sdavidch}
3811255736Sdavidch
3812255736Sdavidchstatic int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3813255736Sdavidch{
3814255736Sdavidch	return o->registry.aprox_match.num_bins_set;
3815255736Sdavidch}
3816255736Sdavidch
3817255736Sdavidchstatic void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
3818255736Sdavidch						int n)
3819255736Sdavidch{
3820255736Sdavidch	o->registry.exact_match.num_macs_set = n;
3821255736Sdavidch}
3822255736Sdavidch
3823255736Sdavidchstatic void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3824255736Sdavidch						int n)
3825255736Sdavidch{
3826255736Sdavidch	o->registry.aprox_match.num_bins_set = n;
3827255736Sdavidch}
3828255736Sdavidch
3829255736Sdavidchint ecore_config_mcast(struct bxe_softc *sc,
3830255736Sdavidch		       struct ecore_mcast_ramrod_params *p,
3831255736Sdavidch		       enum ecore_mcast_cmd cmd)
3832255736Sdavidch{
3833255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3834255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
3835255736Sdavidch	int rc = 0, old_reg_size;
3836255736Sdavidch
3837255736Sdavidch	/* This is needed to recover number of currently configured mcast macs
3838255736Sdavidch	 * in case of failure.
3839255736Sdavidch	 */
3840255736Sdavidch	old_reg_size = o->get_registry_size(o);
3841255736Sdavidch
3842255736Sdavidch	/* Do some calculations and checks */
3843255736Sdavidch	rc = o->validate(sc, p, cmd);
3844255736Sdavidch	if (rc)
3845255736Sdavidch		return rc;
3846255736Sdavidch
3847255736Sdavidch	/* Return if there is no work to do */
3848255736Sdavidch	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3849255736Sdavidch		return ECORE_SUCCESS;
3850255736Sdavidch
3851255736Sdavidch	ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3852255736Sdavidch		  o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3853255736Sdavidch
3854255736Sdavidch	/* Enqueue the current command to the pending list if we can't complete
3855255736Sdavidch	 * it in the current iteration
3856255736Sdavidch	 */
3857255736Sdavidch	if (r->check_pending(r) ||
3858255736Sdavidch	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3859255736Sdavidch		rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3860255736Sdavidch		if (rc < 0)
3861255736Sdavidch			goto error_exit1;
3862255736Sdavidch
3863255736Sdavidch		/* As long as the current command is in a command list we
3864255736Sdavidch		 * don't need to handle it separately.
3865255736Sdavidch		 */
3866255736Sdavidch		p->mcast_list_len = 0;
3867255736Sdavidch	}
3868255736Sdavidch
3869255736Sdavidch	if (!r->check_pending(r)) {
3870255736Sdavidch
3871255736Sdavidch		/* Set 'pending' state */
3872255736Sdavidch		r->set_pending(r);
3873255736Sdavidch
3874255736Sdavidch		/* Configure the new classification in the chip */
3875255736Sdavidch		rc = o->config_mcast(sc, p, cmd);
3876255736Sdavidch		if (rc < 0)
3877255736Sdavidch			goto error_exit2;
3878255736Sdavidch
3879255736Sdavidch		/* Wait for a ramrod completion if was requested */
3880255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3881255736Sdavidch			rc = o->wait_comp(sc, o);
3882255736Sdavidch	}
3883255736Sdavidch
3884255736Sdavidch	return rc;
3885255736Sdavidch
3886255736Sdavidcherror_exit2:
3887255736Sdavidch	r->clear_pending(r);
3888255736Sdavidch
3889255736Sdavidcherror_exit1:
3890255736Sdavidch	o->revert(sc, p, old_reg_size);
3891255736Sdavidch
3892255736Sdavidch	return rc;
3893255736Sdavidch}
3894255736Sdavidch
3895255736Sdavidchstatic void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3896255736Sdavidch{
3897255736Sdavidch	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3898255736Sdavidch	ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3899255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3900255736Sdavidch}
3901255736Sdavidch
3902255736Sdavidchstatic void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3903255736Sdavidch{
3904255736Sdavidch	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3905255736Sdavidch	ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3906255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3907255736Sdavidch}
3908255736Sdavidch
3909255736Sdavidchstatic bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3910255736Sdavidch{
3911255736Sdavidch	return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3912255736Sdavidch}
3913255736Sdavidch
3914255736Sdavidchstatic bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3915255736Sdavidch{
3916255736Sdavidch	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3917255736Sdavidch}
3918255736Sdavidch
3919255736Sdavidchvoid ecore_init_mcast_obj(struct bxe_softc *sc,
3920255736Sdavidch			  struct ecore_mcast_obj *mcast_obj,
3921255736Sdavidch			  uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
3922255736Sdavidch			  uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
3923255736Sdavidch			  int state, unsigned long *pstate, ecore_obj_type type)
3924255736Sdavidch{
3925255736Sdavidch	ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3926255736Sdavidch
3927255736Sdavidch	ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3928255736Sdavidch			   rdata, rdata_mapping, state, pstate, type);
3929255736Sdavidch
3930255736Sdavidch	mcast_obj->engine_id = engine_id;
3931255736Sdavidch
3932255736Sdavidch	ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3933255736Sdavidch
3934255736Sdavidch	mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3935255736Sdavidch	mcast_obj->check_sched = ecore_mcast_check_sched;
3936255736Sdavidch	mcast_obj->set_sched = ecore_mcast_set_sched;
3937255736Sdavidch	mcast_obj->clear_sched = ecore_mcast_clear_sched;
3938255736Sdavidch
3939255736Sdavidch	if (CHIP_IS_E1(sc)) {
3940255736Sdavidch		mcast_obj->config_mcast      = ecore_mcast_setup_e1;
3941255736Sdavidch		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
3942255736Sdavidch		mcast_obj->hdl_restore       =
3943255736Sdavidch			ecore_mcast_handle_restore_cmd_e1;
3944255736Sdavidch		mcast_obj->check_pending     = ecore_mcast_check_pending;
3945255736Sdavidch
3946255736Sdavidch		if (CHIP_REV_IS_SLOW(sc))
3947255736Sdavidch			mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
3948255736Sdavidch		else
3949255736Sdavidch			mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
3950255736Sdavidch
3951255736Sdavidch		mcast_obj->wait_comp         = ecore_mcast_wait;
3952255736Sdavidch		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e1;
3953255736Sdavidch		mcast_obj->validate          = ecore_mcast_validate_e1;
3954255736Sdavidch		mcast_obj->revert            = ecore_mcast_revert_e1;
3955255736Sdavidch		mcast_obj->get_registry_size =
3956255736Sdavidch			ecore_mcast_get_registry_size_exact;
3957255736Sdavidch		mcast_obj->set_registry_size =
3958255736Sdavidch			ecore_mcast_set_registry_size_exact;
3959255736Sdavidch
3960255736Sdavidch		/* 57710 is the only chip that uses the exact match for mcast
3961255736Sdavidch		 * at the moment.
3962255736Sdavidch		 */
3963255736Sdavidch		ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
3964255736Sdavidch
3965255736Sdavidch	} else if (CHIP_IS_E1H(sc)) {
3966255736Sdavidch		mcast_obj->config_mcast  = ecore_mcast_setup_e1h;
3967255736Sdavidch		mcast_obj->enqueue_cmd   = NULL;
3968255736Sdavidch		mcast_obj->hdl_restore   = NULL;
3969255736Sdavidch		mcast_obj->check_pending = ecore_mcast_check_pending;
3970255736Sdavidch
3971255736Sdavidch		/* 57711 doesn't send a ramrod, so it has unlimited credit
3972255736Sdavidch		 * for one command.
3973255736Sdavidch		 */
3974255736Sdavidch		mcast_obj->max_cmd_len       = -1;
3975255736Sdavidch		mcast_obj->wait_comp         = ecore_mcast_wait;
3976255736Sdavidch		mcast_obj->set_one_rule      = NULL;
3977255736Sdavidch		mcast_obj->validate          = ecore_mcast_validate_e1h;
3978255736Sdavidch		mcast_obj->revert            = ecore_mcast_revert_e1h;
3979255736Sdavidch		mcast_obj->get_registry_size =
3980255736Sdavidch			ecore_mcast_get_registry_size_aprox;
3981255736Sdavidch		mcast_obj->set_registry_size =
3982255736Sdavidch			ecore_mcast_set_registry_size_aprox;
3983255736Sdavidch	} else {
3984255736Sdavidch		mcast_obj->config_mcast      = ecore_mcast_setup_e2;
3985255736Sdavidch		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
3986255736Sdavidch		mcast_obj->hdl_restore       =
3987255736Sdavidch			ecore_mcast_handle_restore_cmd_e2;
3988255736Sdavidch		mcast_obj->check_pending     = ecore_mcast_check_pending;
3989255736Sdavidch		/* TODO: There should be a proper HSI define for this number!!!
3990255736Sdavidch		 */
3991255736Sdavidch		mcast_obj->max_cmd_len       = 16;
3992255736Sdavidch		mcast_obj->wait_comp         = ecore_mcast_wait;
3993255736Sdavidch		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e2;
3994255736Sdavidch		mcast_obj->validate          = ecore_mcast_validate_e2;
3995255736Sdavidch		mcast_obj->revert            = ecore_mcast_revert_e2;
3996255736Sdavidch		mcast_obj->get_registry_size =
3997255736Sdavidch			ecore_mcast_get_registry_size_aprox;
3998255736Sdavidch		mcast_obj->set_registry_size =
3999255736Sdavidch			ecore_mcast_set_registry_size_aprox;
4000255736Sdavidch	}
4001255736Sdavidch}
4002255736Sdavidch
4003255736Sdavidch/*************************** Credit handling **********************************/
4004255736Sdavidch
4005255736Sdavidch/**
4006255736Sdavidch * atomic_add_ifless - add if the result is less than a given value.
4007255736Sdavidch *
4008255736Sdavidch * @v:	pointer of type ecore_atomic_t
4009255736Sdavidch * @a:	the amount to add to v...
4010255736Sdavidch * @u:	...if (v + a) is less than u.
4011255736Sdavidch *
4012255736Sdavidch * returns TRUE if (v + a) was less than u, and FALSE otherwise.
4013255736Sdavidch *
4014255736Sdavidch */
4015255736Sdavidchstatic inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
4016255736Sdavidch{
4017255736Sdavidch	int c, old;
4018255736Sdavidch
4019255736Sdavidch	c = ECORE_ATOMIC_READ(v);
4020255736Sdavidch	for (;;) {
4021255736Sdavidch		if (ECORE_UNLIKELY(c + a >= u))
4022255736Sdavidch			return FALSE;
4023255736Sdavidch
4024255736Sdavidch		old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
4025255736Sdavidch		if (ECORE_LIKELY(old == c))
4026255736Sdavidch			break;
4027255736Sdavidch		c = old;
4028255736Sdavidch	}
4029255736Sdavidch
4030255736Sdavidch	return TRUE;
4031255736Sdavidch}
4032255736Sdavidch
4033255736Sdavidch/**
4034255736Sdavidch * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4035255736Sdavidch *
4036255736Sdavidch * @v:	pointer of type ecore_atomic_t
4037255736Sdavidch * @a:	the amount to dec from v...
4038255736Sdavidch * @u:	...if (v - a) is more or equal than u.
4039255736Sdavidch *
4040255736Sdavidch * returns TRUE if (v - a) was more or equal than u, and FALSE
4041255736Sdavidch * otherwise.
4042255736Sdavidch */
4043255736Sdavidchstatic inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
4044255736Sdavidch{
4045255736Sdavidch	int c, old;
4046255736Sdavidch
4047255736Sdavidch	c = ECORE_ATOMIC_READ(v);
4048255736Sdavidch	for (;;) {
4049255736Sdavidch		if (ECORE_UNLIKELY(c - a < u))
4050255736Sdavidch			return FALSE;
4051255736Sdavidch
4052255736Sdavidch		old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
4053255736Sdavidch		if (ECORE_LIKELY(old == c))
4054255736Sdavidch			break;
4055255736Sdavidch		c = old;
4056255736Sdavidch	}
4057255736Sdavidch
4058255736Sdavidch	return TRUE;
4059255736Sdavidch}
4060255736Sdavidch
4061255736Sdavidchstatic bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
4062255736Sdavidch{
4063255736Sdavidch	bool rc;
4064255736Sdavidch
4065255736Sdavidch	ECORE_SMP_MB();
4066255736Sdavidch	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4067255736Sdavidch	ECORE_SMP_MB();
4068255736Sdavidch
4069255736Sdavidch	return rc;
4070255736Sdavidch}
4071255736Sdavidch
4072255736Sdavidchstatic bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
4073255736Sdavidch{
4074255736Sdavidch	bool rc;
4075255736Sdavidch
4076255736Sdavidch	ECORE_SMP_MB();
4077255736Sdavidch
4078255736Sdavidch	/* Don't let to refill if credit + cnt > pool_sz */
4079255736Sdavidch	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4080255736Sdavidch
4081255736Sdavidch	ECORE_SMP_MB();
4082255736Sdavidch
4083255736Sdavidch	return rc;
4084255736Sdavidch}
4085255736Sdavidch
4086255736Sdavidchstatic int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
4087255736Sdavidch{
4088255736Sdavidch	int cur_credit;
4089255736Sdavidch
4090255736Sdavidch	ECORE_SMP_MB();
4091255736Sdavidch	cur_credit = ECORE_ATOMIC_READ(&o->credit);
4092255736Sdavidch
4093255736Sdavidch	return cur_credit;
4094255736Sdavidch}
4095255736Sdavidch
4096255736Sdavidchstatic bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
4097255736Sdavidch					  int cnt)
4098255736Sdavidch{
4099255736Sdavidch	return TRUE;
4100255736Sdavidch}
4101255736Sdavidch
4102255736Sdavidchstatic bool ecore_credit_pool_get_entry(
4103255736Sdavidch	struct ecore_credit_pool_obj *o,
4104255736Sdavidch	int *offset)
4105255736Sdavidch{
4106255736Sdavidch	int idx, vec, i;
4107255736Sdavidch
4108255736Sdavidch	*offset = -1;
4109255736Sdavidch
4110255736Sdavidch	/* Find "internal cam-offset" then add to base for this object... */
4111255736Sdavidch	for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
4112255736Sdavidch
4113255736Sdavidch		/* Skip the current vector if there are no free entries in it */
4114255736Sdavidch		if (!o->pool_mirror[vec])
4115255736Sdavidch			continue;
4116255736Sdavidch
4117255736Sdavidch		/* If we've got here we are going to find a free entry */
4118255736Sdavidch		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4119255736Sdavidch		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
4120255736Sdavidch
4121255736Sdavidch			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4122255736Sdavidch				/* Got one!! */
4123255736Sdavidch				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4124255736Sdavidch				*offset = o->base_pool_offset + idx;
4125255736Sdavidch				return TRUE;
4126255736Sdavidch			}
4127255736Sdavidch	}
4128255736Sdavidch
4129255736Sdavidch	return FALSE;
4130255736Sdavidch}
4131255736Sdavidch
4132255736Sdavidchstatic bool ecore_credit_pool_put_entry(
4133255736Sdavidch	struct ecore_credit_pool_obj *o,
4134255736Sdavidch	int offset)
4135255736Sdavidch{
4136255736Sdavidch	if (offset < o->base_pool_offset)
4137255736Sdavidch		return FALSE;
4138255736Sdavidch
4139255736Sdavidch	offset -= o->base_pool_offset;
4140255736Sdavidch
4141255736Sdavidch	if (offset >= o->pool_sz)
4142255736Sdavidch		return FALSE;
4143255736Sdavidch
4144255736Sdavidch	/* Return the entry to the pool */
4145255736Sdavidch	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4146255736Sdavidch
4147255736Sdavidch	return TRUE;
4148255736Sdavidch}
4149255736Sdavidch
4150255736Sdavidchstatic bool ecore_credit_pool_put_entry_always_TRUE(
4151255736Sdavidch	struct ecore_credit_pool_obj *o,
4152255736Sdavidch	int offset)
4153255736Sdavidch{
4154255736Sdavidch	return TRUE;
4155255736Sdavidch}
4156255736Sdavidch
4157255736Sdavidchstatic bool ecore_credit_pool_get_entry_always_TRUE(
4158255736Sdavidch	struct ecore_credit_pool_obj *o,
4159255736Sdavidch	int *offset)
4160255736Sdavidch{
4161255736Sdavidch	*offset = -1;
4162255736Sdavidch	return TRUE;
4163255736Sdavidch}
4164255736Sdavidch/**
4165255736Sdavidch * ecore_init_credit_pool - initialize credit pool internals.
4166255736Sdavidch *
4167255736Sdavidch * @p:
4168255736Sdavidch * @base:	Base entry in the CAM to use.
4169255736Sdavidch * @credit:	pool size.
4170255736Sdavidch *
4171255736Sdavidch * If base is negative no CAM entries handling will be performed.
4172255736Sdavidch * If credit is negative pool operations will always succeed (unlimited pool).
4173255736Sdavidch *
4174255736Sdavidch */
4175255736Sdavidchstatic inline void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
4176255736Sdavidch					  int base, int credit)
4177255736Sdavidch{
4178255736Sdavidch	/* Zero the object first */
4179255736Sdavidch	ECORE_MEMSET(p, 0, sizeof(*p));
4180255736Sdavidch
4181255736Sdavidch	/* Set the table to all 1s */
4182255736Sdavidch	ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4183255736Sdavidch
4184255736Sdavidch	/* Init a pool as full */
4185255736Sdavidch	ECORE_ATOMIC_SET(&p->credit, credit);
4186255736Sdavidch
4187255736Sdavidch	/* The total poll size */
4188255736Sdavidch	p->pool_sz = credit;
4189255736Sdavidch
4190255736Sdavidch	p->base_pool_offset = base;
4191255736Sdavidch
4192255736Sdavidch	/* Commit the change */
4193255736Sdavidch	ECORE_SMP_MB();
4194255736Sdavidch
4195255736Sdavidch	p->check = ecore_credit_pool_check;
4196255736Sdavidch
4197255736Sdavidch	/* if pool credit is negative - disable the checks */
4198255736Sdavidch	if (credit >= 0) {
4199255736Sdavidch		p->put      = ecore_credit_pool_put;
4200255736Sdavidch		p->get      = ecore_credit_pool_get;
4201255736Sdavidch		p->put_entry = ecore_credit_pool_put_entry;
4202255736Sdavidch		p->get_entry = ecore_credit_pool_get_entry;
4203255736Sdavidch	} else {
4204255736Sdavidch		p->put      = ecore_credit_pool_always_TRUE;
4205255736Sdavidch		p->get      = ecore_credit_pool_always_TRUE;
4206255736Sdavidch		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4207255736Sdavidch		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4208255736Sdavidch	}
4209255736Sdavidch
4210255736Sdavidch	/* If base is negative - disable entries handling */
4211255736Sdavidch	if (base < 0) {
4212255736Sdavidch		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4213255736Sdavidch		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4214255736Sdavidch	}
4215255736Sdavidch}
4216255736Sdavidch
4217255736Sdavidchvoid ecore_init_mac_credit_pool(struct bxe_softc *sc,
4218255736Sdavidch				struct ecore_credit_pool_obj *p, uint8_t func_id,
4219255736Sdavidch				uint8_t func_num)
4220255736Sdavidch{
4221255736Sdavidch/* TODO: this will be defined in consts as well... */
4222255736Sdavidch#define ECORE_CAM_SIZE_EMUL 5
4223255736Sdavidch
4224255736Sdavidch	int cam_sz;
4225255736Sdavidch
4226255736Sdavidch	if (CHIP_IS_E1(sc)) {
4227255736Sdavidch		/* In E1, Multicast is saved in cam... */
4228255736Sdavidch		if (!CHIP_REV_IS_SLOW(sc))
4229255736Sdavidch			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
4230255736Sdavidch		else
4231255736Sdavidch			cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
4232255736Sdavidch
4233255736Sdavidch		ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4234255736Sdavidch
4235255736Sdavidch	} else if (CHIP_IS_E1H(sc)) {
4236255736Sdavidch		/* CAM credit is equally divided between all active functions
4237255736Sdavidch		 * on the PORT!.
4238255736Sdavidch		 */
4239255736Sdavidch		if ((func_num > 0)) {
4240255736Sdavidch			if (!CHIP_REV_IS_SLOW(sc))
4241255736Sdavidch				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4242255736Sdavidch			else
4243255736Sdavidch				cam_sz = ECORE_CAM_SIZE_EMUL;
4244255736Sdavidch			ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4245255736Sdavidch		} else {
4246255736Sdavidch			/* this should never happen! Block MAC operations. */
4247255736Sdavidch			ecore_init_credit_pool(p, 0, 0);
4248255736Sdavidch		}
4249255736Sdavidch
4250255736Sdavidch	} else {
4251255736Sdavidch
4252255736Sdavidch		/*
4253255736Sdavidch		 * CAM credit is equaly divided between all active functions
4254255736Sdavidch		 * on the PATH.
4255255736Sdavidch		 */
4256258187Sedavis		if ((func_num > 1)) {
4257255736Sdavidch			if (!CHIP_REV_IS_SLOW(sc))
4258258187Sedavis				cam_sz = (MAX_MAC_CREDIT_E2
4259258187Sedavis				- GET_NUM_VFS_PER_PATH(sc))
4260260113Sedavis				/ func_num
4261258187Sedavis				+ GET_NUM_VFS_PER_PF(sc);
4262255736Sdavidch			else
4263255736Sdavidch				cam_sz = ECORE_CAM_SIZE_EMUL;
4264255736Sdavidch
4265255736Sdavidch			/* No need for CAM entries handling for 57712 and
4266255736Sdavidch			 * newer.
4267255736Sdavidch			 */
4268255736Sdavidch			ecore_init_credit_pool(p, -1, cam_sz);
4269258187Sedavis		} else if (func_num == 1) {
4270258187Sedavis			if (!CHIP_REV_IS_SLOW(sc))
4271258187Sedavis				cam_sz = MAX_MAC_CREDIT_E2;
4272258187Sedavis			else
4273258187Sedavis				cam_sz = ECORE_CAM_SIZE_EMUL;
4274258187Sedavis
4275258187Sedavis			/* No need for CAM entries handling for 57712 and
4276258187Sedavis			 * newer.
4277258187Sedavis			 */
4278258187Sedavis			ecore_init_credit_pool(p, -1, cam_sz);
4279255736Sdavidch		} else {
4280255736Sdavidch			/* this should never happen! Block MAC operations. */
4281255736Sdavidch			ecore_init_credit_pool(p, 0, 0);
4282255736Sdavidch		}
4283255736Sdavidch	}
4284255736Sdavidch}
4285255736Sdavidch
4286255736Sdavidchvoid ecore_init_vlan_credit_pool(struct bxe_softc *sc,
4287255736Sdavidch				 struct ecore_credit_pool_obj *p,
4288255736Sdavidch				 uint8_t func_id,
4289255736Sdavidch				 uint8_t func_num)
4290255736Sdavidch{
4291255736Sdavidch	if (CHIP_IS_E1x(sc)) {
4292255736Sdavidch		/* There is no VLAN credit in HW on 57710 and 57711 only
4293255736Sdavidch		 * MAC / MAC-VLAN can be set
4294255736Sdavidch		 */
4295255736Sdavidch		ecore_init_credit_pool(p, 0, -1);
4296255736Sdavidch	} else {
4297255736Sdavidch		/* CAM credit is equally divided between all active functions
4298255736Sdavidch		 * on the PATH.
4299255736Sdavidch		 */
4300255736Sdavidch		if (func_num > 0) {
4301255736Sdavidch			int credit = MAX_VLAN_CREDIT_E2 / func_num;
4302255736Sdavidch			ecore_init_credit_pool(p, func_id * credit, credit);
4303255736Sdavidch		} else
4304255736Sdavidch			/* this should never happen! Block VLAN operations. */
4305255736Sdavidch			ecore_init_credit_pool(p, 0, 0);
4306255736Sdavidch	}
4307255736Sdavidch}
4308255736Sdavidch
4309255736Sdavidch/****************** RSS Configuration ******************/
4310255736Sdavidch
4311255736Sdavidch/**
4312255736Sdavidch * ecore_setup_rss - configure RSS
4313255736Sdavidch *
4314255736Sdavidch * @sc:		device handle
4315255736Sdavidch * @p:		rss configuration
4316255736Sdavidch *
4317255736Sdavidch * sends on UPDATE ramrod for that matter.
4318255736Sdavidch */
4319255736Sdavidchstatic int ecore_setup_rss(struct bxe_softc *sc,
4320255736Sdavidch			   struct ecore_config_rss_params *p)
4321255736Sdavidch{
4322255736Sdavidch	struct ecore_rss_config_obj *o = p->rss_obj;
4323255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
4324255736Sdavidch	struct eth_rss_update_ramrod_data *data =
4325255736Sdavidch		(struct eth_rss_update_ramrod_data *)(r->rdata);
4326255736Sdavidch	uint8_t rss_mode = 0;
4327255736Sdavidch	int rc;
4328255736Sdavidch
4329255736Sdavidch	ECORE_MEMSET(data, 0, sizeof(*data));
4330255736Sdavidch
4331255736Sdavidch	ECORE_MSG(sc, "Configuring RSS\n");
4332255736Sdavidch
4333255736Sdavidch	/* Set an echo field */
4334255736Sdavidch	data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
4335255736Sdavidch				 (r->state << ECORE_SWCID_SHIFT));
4336255736Sdavidch
4337255736Sdavidch	/* RSS mode */
4338255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
4339255736Sdavidch		rss_mode = ETH_RSS_MODE_DISABLED;
4340255736Sdavidch	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
4341255736Sdavidch		rss_mode = ETH_RSS_MODE_REGULAR;
4342258187Sedavis#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
4343258187Sedavis	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_ESX51, &p->rss_flags))
4344258187Sedavis		rss_mode = ETH_RSS_MODE_ESX51;
4345258187Sedavis#endif
4346255736Sdavidch
4347255736Sdavidch	data->rss_mode = rss_mode;
4348255736Sdavidch
4349255736Sdavidch	ECORE_MSG(sc, "rss_mode=%d\n", rss_mode);
4350255736Sdavidch
4351255736Sdavidch	/* RSS capabilities */
4352255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
4353255736Sdavidch		data->capabilities |=
4354255736Sdavidch			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4355255736Sdavidch
4356255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
4357255736Sdavidch		data->capabilities |=
4358255736Sdavidch			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4359255736Sdavidch
4360255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
4361255736Sdavidch		data->capabilities |=
4362255736Sdavidch			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4363255736Sdavidch
4364255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
4365255736Sdavidch		data->capabilities |=
4366255736Sdavidch			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4367255736Sdavidch
4368255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
4369255736Sdavidch		data->capabilities |=
4370255736Sdavidch			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4371255736Sdavidch
4372255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
4373255736Sdavidch		data->capabilities |=
4374255736Sdavidch			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4375255736Sdavidch
4376255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
4377255736Sdavidch		data->udp_4tuple_dst_port_mask = ECORE_CPU_TO_LE16(p->tunnel_mask);
4378255736Sdavidch		data->udp_4tuple_dst_port_value =
4379255736Sdavidch			ECORE_CPU_TO_LE16(p->tunnel_value);
4380255736Sdavidch	}
4381255736Sdavidch
4382255736Sdavidch	/* Hashing mask */
4383255736Sdavidch	data->rss_result_mask = p->rss_result_mask;
4384255736Sdavidch
4385255736Sdavidch	/* RSS engine ID */
4386255736Sdavidch	data->rss_engine_id = o->engine_id;
4387255736Sdavidch
4388255736Sdavidch	ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id);
4389255736Sdavidch
4390255736Sdavidch	/* Indirection table */
4391255736Sdavidch	ECORE_MEMCPY(data->indirection_table, p->ind_table,
4392255736Sdavidch		  T_ETH_INDIRECTION_TABLE_SIZE);
4393255736Sdavidch
4394255736Sdavidch	/* Remember the last configuration */
4395255736Sdavidch	ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4396255736Sdavidch
4397255736Sdavidch
4398255736Sdavidch	/* RSS keys */
4399255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
4400255736Sdavidch		ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
4401255736Sdavidch		       sizeof(data->rss_key));
4402255736Sdavidch		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4403255736Sdavidch	}
4404255736Sdavidch
4405255736Sdavidch	/* No need for an explicit memory barrier here as long we would
4406255736Sdavidch	 * need to ensure the ordering of writing to the SPQ element
4407255736Sdavidch	 * and updating of the SPQ producer which involves a memory
4408255736Sdavidch	 * read and we will have to put a full memory barrier there
4409255736Sdavidch	 * (inside ecore_sp_post()).
4410255736Sdavidch	 */
4411255736Sdavidch
4412255736Sdavidch	/* Send a ramrod */
4413255736Sdavidch	rc = ecore_sp_post(sc,
4414255736Sdavidch			     RAMROD_CMD_ID_ETH_RSS_UPDATE,
4415255736Sdavidch			     r->cid,
4416255736Sdavidch			     r->rdata_mapping,
4417255736Sdavidch			     ETH_CONNECTION_TYPE);
4418255736Sdavidch
4419255736Sdavidch	if (rc < 0)
4420255736Sdavidch		return rc;
4421255736Sdavidch
4422255736Sdavidch	return ECORE_PENDING;
4423255736Sdavidch}
4424255736Sdavidch
4425255736Sdavidchvoid ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
4426255736Sdavidch			     uint8_t *ind_table)
4427255736Sdavidch{
4428255736Sdavidch	ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4429255736Sdavidch}
4430255736Sdavidch
4431255736Sdavidchint ecore_config_rss(struct bxe_softc *sc,
4432255736Sdavidch		     struct ecore_config_rss_params *p)
4433255736Sdavidch{
4434255736Sdavidch	int rc;
4435255736Sdavidch	struct ecore_rss_config_obj *o = p->rss_obj;
4436255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
4437255736Sdavidch
4438255736Sdavidch	/* Do nothing if only driver cleanup was requested */
4439260113Sedavis	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4440260113Sedavis		ECORE_MSG(sc, "Not configuring RSS ramrod_flags=%lx\n",
4441260113Sedavis			  p->ramrod_flags);
4442255736Sdavidch		return ECORE_SUCCESS;
4443260113Sedavis	}
4444255736Sdavidch
4445255736Sdavidch	r->set_pending(r);
4446255736Sdavidch
4447255736Sdavidch	rc = o->config_rss(sc, p);
4448255736Sdavidch	if (rc < 0) {
4449255736Sdavidch		r->clear_pending(r);
4450255736Sdavidch		return rc;
4451255736Sdavidch	}
4452255736Sdavidch
4453255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4454255736Sdavidch		rc = r->wait_comp(sc, r);
4455255736Sdavidch
4456255736Sdavidch	return rc;
4457255736Sdavidch}
4458255736Sdavidch
4459255736Sdavidchvoid ecore_init_rss_config_obj(struct bxe_softc *sc,
4460255736Sdavidch			       struct ecore_rss_config_obj *rss_obj,
4461255736Sdavidch			       uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
4462255736Sdavidch			       void *rdata, ecore_dma_addr_t rdata_mapping,
4463255736Sdavidch			       int state, unsigned long *pstate,
4464255736Sdavidch			       ecore_obj_type type)
4465255736Sdavidch{
4466255736Sdavidch	ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4467255736Sdavidch			   rdata_mapping, state, pstate, type);
4468255736Sdavidch
4469255736Sdavidch	rss_obj->engine_id  = engine_id;
4470255736Sdavidch	rss_obj->config_rss = ecore_setup_rss;
4471255736Sdavidch}
4472255736Sdavidch
4473258187Sedavisint validate_vlan_mac(struct bxe_softc *sc,
4474258187Sedavis		      struct ecore_vlan_mac_obj *vlan_mac)
4475258187Sedavis{
4476258187Sedavis	if (!vlan_mac->get_n_elements) {
4477258187Sedavis		ECORE_ERR("vlan mac object was not intialized\n");
4478258187Sedavis		return ECORE_INVAL;
4479258187Sedavis	}
4480258187Sedavis	return 0;
4481258187Sedavis}
4482258187Sedavis
4483255736Sdavidch/********************** Queue state object ***********************************/
4484255736Sdavidch
4485255736Sdavidch/**
4486255736Sdavidch * ecore_queue_state_change - perform Queue state change transition
4487255736Sdavidch *
4488255736Sdavidch * @sc:		device handle
4489255736Sdavidch * @params:	parameters to perform the transition
4490255736Sdavidch *
4491255736Sdavidch * returns 0 in case of successfully completed transition, negative error
4492255736Sdavidch * code in case of failure, positive (EBUSY) value if there is a completion
4493255736Sdavidch * to that is still pending (possible only if RAMROD_COMP_WAIT is
4494255736Sdavidch * not set in params->ramrod_flags for asynchronous commands).
4495255736Sdavidch *
4496255736Sdavidch */
4497255736Sdavidchint ecore_queue_state_change(struct bxe_softc *sc,
4498255736Sdavidch			     struct ecore_queue_state_params *params)
4499255736Sdavidch{
4500255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
4501255736Sdavidch	int rc, pending_bit;
4502255736Sdavidch	unsigned long *pending = &o->pending;
4503255736Sdavidch
4504255736Sdavidch	/* Check that the requested transition is legal */
4505255736Sdavidch	rc = o->check_transition(sc, o, params);
4506255736Sdavidch	if (rc) {
4507255736Sdavidch		ECORE_ERR("check transition returned an error. rc %d\n", rc);
4508255736Sdavidch		return ECORE_INVAL;
4509255736Sdavidch	}
4510255736Sdavidch
4511255736Sdavidch	/* Set "pending" bit */
4512255736Sdavidch	ECORE_MSG(sc, "pending bit was=%lx\n", o->pending);
4513255736Sdavidch	pending_bit = o->set_pending(o, params);
4514255736Sdavidch	ECORE_MSG(sc, "pending bit now=%lx\n", o->pending);
4515255736Sdavidch
4516255736Sdavidch	/* Don't send a command if only driver cleanup was requested */
4517255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4518255736Sdavidch		o->complete_cmd(sc, o, pending_bit);
4519255736Sdavidch	else {
4520255736Sdavidch		/* Send a ramrod */
4521255736Sdavidch		rc = o->send_cmd(sc, params);
4522255736Sdavidch		if (rc) {
4523255736Sdavidch			o->next_state = ECORE_Q_STATE_MAX;
4524255736Sdavidch			ECORE_CLEAR_BIT(pending_bit, pending);
4525255736Sdavidch			ECORE_SMP_MB_AFTER_CLEAR_BIT();
4526255736Sdavidch			return rc;
4527255736Sdavidch		}
4528255736Sdavidch
4529255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4530255736Sdavidch			rc = o->wait_comp(sc, o, pending_bit);
4531255736Sdavidch			if (rc)
4532255736Sdavidch				return rc;
4533255736Sdavidch
4534255736Sdavidch			return ECORE_SUCCESS;
4535255736Sdavidch		}
4536255736Sdavidch	}
4537255736Sdavidch
4538255736Sdavidch	return ECORE_RET_PENDING(pending_bit, pending);
4539255736Sdavidch}
4540255736Sdavidch
4541255736Sdavidchstatic int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
4542255736Sdavidch				   struct ecore_queue_state_params *params)
4543255736Sdavidch{
4544255736Sdavidch	enum ecore_queue_cmd cmd = params->cmd, bit;
4545255736Sdavidch
4546255736Sdavidch	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4547255736Sdavidch	 * UPDATE command.
4548255736Sdavidch	 */
4549255736Sdavidch	if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
4550255736Sdavidch	    (cmd == ECORE_Q_CMD_DEACTIVATE))
4551255736Sdavidch		bit = ECORE_Q_CMD_UPDATE;
4552255736Sdavidch	else
4553255736Sdavidch		bit = cmd;
4554255736Sdavidch
4555255736Sdavidch	ECORE_SET_BIT(bit, &obj->pending);
4556255736Sdavidch	return bit;
4557255736Sdavidch}
4558255736Sdavidch
4559255736Sdavidchstatic int ecore_queue_wait_comp(struct bxe_softc *sc,
4560255736Sdavidch				 struct ecore_queue_sp_obj *o,
4561255736Sdavidch				 enum ecore_queue_cmd cmd)
4562255736Sdavidch{
4563255736Sdavidch	return ecore_state_wait(sc, cmd, &o->pending);
4564255736Sdavidch}
4565255736Sdavidch
4566255736Sdavidch/**
4567255736Sdavidch * ecore_queue_comp_cmd - complete the state change command.
4568255736Sdavidch *
4569255736Sdavidch * @sc:		device handle
4570255736Sdavidch * @o:
4571255736Sdavidch * @cmd:
4572255736Sdavidch *
4573255736Sdavidch * Checks that the arrived completion is expected.
4574255736Sdavidch */
4575255736Sdavidchstatic int ecore_queue_comp_cmd(struct bxe_softc *sc,
4576255736Sdavidch				struct ecore_queue_sp_obj *o,
4577255736Sdavidch				enum ecore_queue_cmd cmd)
4578255736Sdavidch{
4579255736Sdavidch	unsigned long cur_pending = o->pending;
4580255736Sdavidch
4581255736Sdavidch	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4582255736Sdavidch		ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4583255736Sdavidch			  cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
4584255736Sdavidch			  o->state, cur_pending, o->next_state);
4585255736Sdavidch		return ECORE_INVAL;
4586255736Sdavidch	}
4587255736Sdavidch
4588255736Sdavidch	if (o->next_tx_only >= o->max_cos)
4589255736Sdavidch		/* >= because tx only must always be smaller than cos since the
4590255736Sdavidch		 * primary connection supports COS 0
4591255736Sdavidch		 */
4592255736Sdavidch		ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
4593255736Sdavidch			  o->next_tx_only, o->max_cos);
4594255736Sdavidch
4595255736Sdavidch	ECORE_MSG(sc,
4596255736Sdavidch		  "Completing command %d for queue %d, setting state to %d\n",
4597255736Sdavidch		  cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
4598255736Sdavidch
4599255736Sdavidch	if (o->next_tx_only)  /* print num tx-only if any exist */
4600255736Sdavidch		ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n",
4601255736Sdavidch			  o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
4602255736Sdavidch
4603255736Sdavidch	o->state = o->next_state;
4604255736Sdavidch	o->num_tx_only = o->next_tx_only;
4605255736Sdavidch	o->next_state = ECORE_Q_STATE_MAX;
4606255736Sdavidch
4607255736Sdavidch	/* It's important that o->state and o->next_state are
4608255736Sdavidch	 * updated before o->pending.
4609255736Sdavidch	 */
4610255736Sdavidch	wmb();
4611255736Sdavidch
4612255736Sdavidch	ECORE_CLEAR_BIT(cmd, &o->pending);
4613255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
4614255736Sdavidch
4615255736Sdavidch	return ECORE_SUCCESS;
4616255736Sdavidch}
4617255736Sdavidch
4618255736Sdavidchstatic void ecore_q_fill_setup_data_e2(struct bxe_softc *sc,
4619255736Sdavidch				struct ecore_queue_state_params *cmd_params,
4620255736Sdavidch				struct client_init_ramrod_data *data)
4621255736Sdavidch{
4622255736Sdavidch	struct ecore_queue_setup_params *params = &cmd_params->params.setup;
4623255736Sdavidch
4624255736Sdavidch	/* Rx data */
4625255736Sdavidch
4626255736Sdavidch	/* IPv6 TPA supported for E2 and above only */
4627255736Sdavidch	data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
4628255736Sdavidch					  &params->flags) *
4629255736Sdavidch				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4630255736Sdavidch}
4631255736Sdavidch
4632255736Sdavidchstatic void ecore_q_fill_init_general_data(struct bxe_softc *sc,
4633255736Sdavidch				struct ecore_queue_sp_obj *o,
4634255736Sdavidch				struct ecore_general_setup_params *params,
4635255736Sdavidch				struct client_init_general_data *gen_data,
4636255736Sdavidch				unsigned long *flags)
4637255736Sdavidch{
4638255736Sdavidch	gen_data->client_id = o->cl_id;
4639255736Sdavidch
4640255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
4641255736Sdavidch		gen_data->statistics_counter_id =
4642255736Sdavidch					params->stat_id;
4643255736Sdavidch		gen_data->statistics_en_flg = 1;
4644255736Sdavidch		gen_data->statistics_zero_flg =
4645255736Sdavidch			ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
4646255736Sdavidch	} else
4647255736Sdavidch		gen_data->statistics_counter_id =
4648255736Sdavidch					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4649255736Sdavidch
4650255736Sdavidch	gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
4651255736Sdavidch						   flags);
4652255736Sdavidch	gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4653255736Sdavidch						    flags);
4654255736Sdavidch	gen_data->sp_client_id = params->spcl_id;
4655255736Sdavidch	gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
4656255736Sdavidch	gen_data->func_id = o->func_id;
4657255736Sdavidch
4658255736Sdavidch	gen_data->cos = params->cos;
4659255736Sdavidch
4660255736Sdavidch	gen_data->traffic_type =
4661255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
4662255736Sdavidch		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4663255736Sdavidch
4664255736Sdavidch	ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
4665255736Sdavidch		  gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4666255736Sdavidch}
4667255736Sdavidch
4668255736Sdavidchstatic void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
4669255736Sdavidch				struct ecore_txq_setup_params *params,
4670255736Sdavidch				struct client_init_tx_data *tx_data,
4671255736Sdavidch				unsigned long *flags)
4672255736Sdavidch{
4673255736Sdavidch	tx_data->enforce_security_flg =
4674255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
4675255736Sdavidch	tx_data->default_vlan =
4676255736Sdavidch		ECORE_CPU_TO_LE16(params->default_vlan);
4677255736Sdavidch	tx_data->default_vlan_flg =
4678255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
4679255736Sdavidch	tx_data->tx_switching_flg =
4680255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
4681255736Sdavidch	tx_data->anti_spoofing_flg =
4682255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
4683255736Sdavidch	tx_data->force_default_pri_flg =
4684255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
4685255736Sdavidch	tx_data->refuse_outband_vlan_flg =
4686255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4687255736Sdavidch	tx_data->tunnel_lso_inc_ip_id =
4688255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4689255736Sdavidch	tx_data->tunnel_non_lso_pcsum_location =
4690255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4691255736Sdavidch							    CSUM_ON_BD;
4692255736Sdavidch
4693255736Sdavidch	tx_data->tx_status_block_id = params->fw_sb_id;
4694255736Sdavidch	tx_data->tx_sb_index_number = params->sb_cq_index;
4695255736Sdavidch	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4696255736Sdavidch
4697255736Sdavidch	tx_data->tx_bd_page_base.lo =
4698255736Sdavidch		ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4699255736Sdavidch	tx_data->tx_bd_page_base.hi =
4700255736Sdavidch		ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4701255736Sdavidch
4702255736Sdavidch	/* Don't configure any Tx switching mode during queue SETUP */
4703255736Sdavidch	tx_data->state = 0;
4704255736Sdavidch}
4705255736Sdavidch
4706255736Sdavidchstatic void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
4707255736Sdavidch				struct rxq_pause_params *params,
4708255736Sdavidch				struct client_init_rx_data *rx_data)
4709255736Sdavidch{
4710255736Sdavidch	/* flow control data */
4711255736Sdavidch	rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
4712255736Sdavidch	rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
4713255736Sdavidch	rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
4714255736Sdavidch	rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
4715255736Sdavidch	rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
4716255736Sdavidch	rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
4717255736Sdavidch	rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
4718255736Sdavidch}
4719255736Sdavidch
4720255736Sdavidchstatic void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
4721255736Sdavidch				struct ecore_rxq_setup_params *params,
4722255736Sdavidch				struct client_init_rx_data *rx_data,
4723255736Sdavidch				unsigned long *flags)
4724255736Sdavidch{
4725255736Sdavidch	rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
4726255736Sdavidch				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4727255736Sdavidch	rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
4728255736Sdavidch				CLIENT_INIT_RX_DATA_TPA_MODE;
4729255736Sdavidch	rx_data->vmqueue_mode_en_flg = 0;
4730255736Sdavidch
4731255736Sdavidch	rx_data->extra_data_over_sgl_en_flg =
4732255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
4733255736Sdavidch	rx_data->cache_line_alignment_log_size =
4734255736Sdavidch		params->cache_line_log;
4735255736Sdavidch	rx_data->enable_dynamic_hc =
4736255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
4737255736Sdavidch	rx_data->max_sges_for_packet = params->max_sges_pkt;
4738255736Sdavidch	rx_data->client_qzone_id = params->cl_qzone_id;
4739255736Sdavidch	rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
4740255736Sdavidch
4741255736Sdavidch	/* Always start in DROP_ALL mode */
4742255736Sdavidch	rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4743255736Sdavidch				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4744255736Sdavidch
4745255736Sdavidch	/* We don't set drop flags */
4746255736Sdavidch	rx_data->drop_ip_cs_err_flg = 0;
4747255736Sdavidch	rx_data->drop_tcp_cs_err_flg = 0;
4748255736Sdavidch	rx_data->drop_ttl0_flg = 0;
4749255736Sdavidch	rx_data->drop_udp_cs_err_flg = 0;
4750255736Sdavidch	rx_data->inner_vlan_removal_enable_flg =
4751255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
4752255736Sdavidch	rx_data->outer_vlan_removal_enable_flg =
4753255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
4754255736Sdavidch	rx_data->status_block_id = params->fw_sb_id;
4755255736Sdavidch	rx_data->rx_sb_index_number = params->sb_cq_index;
4756255736Sdavidch	rx_data->max_tpa_queues = params->max_tpa_queues;
4757255736Sdavidch	rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
4758255736Sdavidch	rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz);
4759255736Sdavidch	rx_data->bd_page_base.lo =
4760255736Sdavidch		ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4761255736Sdavidch	rx_data->bd_page_base.hi =
4762255736Sdavidch		ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4763255736Sdavidch	rx_data->sge_page_base.lo =
4764255736Sdavidch		ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
4765255736Sdavidch	rx_data->sge_page_base.hi =
4766255736Sdavidch		ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
4767255736Sdavidch	rx_data->cqe_page_base.lo =
4768255736Sdavidch		ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
4769255736Sdavidch	rx_data->cqe_page_base.hi =
4770255736Sdavidch		ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
4771255736Sdavidch	rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
4772255736Sdavidch						 flags);
4773255736Sdavidch
4774255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
4775255736Sdavidch		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4776255736Sdavidch		rx_data->is_approx_mcast = 1;
4777255736Sdavidch	}
4778255736Sdavidch
4779255736Sdavidch	rx_data->rss_engine_id = params->rss_engine_id;
4780255736Sdavidch
4781255736Sdavidch	/* silent vlan removal */
4782255736Sdavidch	rx_data->silent_vlan_removal_flg =
4783255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
4784255736Sdavidch	rx_data->silent_vlan_value =
4785255736Sdavidch		ECORE_CPU_TO_LE16(params->silent_removal_value);
4786255736Sdavidch	rx_data->silent_vlan_mask =
4787255736Sdavidch		ECORE_CPU_TO_LE16(params->silent_removal_mask);
4788255736Sdavidch}
4789255736Sdavidch
4790255736Sdavidch/* initialize the general, tx and rx parts of a queue object */
4791255736Sdavidchstatic void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc,
4792255736Sdavidch				struct ecore_queue_state_params *cmd_params,
4793255736Sdavidch				struct client_init_ramrod_data *data)
4794255736Sdavidch{
4795255736Sdavidch	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4796255736Sdavidch				       &cmd_params->params.setup.gen_params,
4797255736Sdavidch				       &data->general,
4798255736Sdavidch				       &cmd_params->params.setup.flags);
4799255736Sdavidch
4800255736Sdavidch	ecore_q_fill_init_tx_data(cmd_params->q_obj,
4801255736Sdavidch				  &cmd_params->params.setup.txq_params,
4802255736Sdavidch				  &data->tx,
4803255736Sdavidch				  &cmd_params->params.setup.flags);
4804255736Sdavidch
4805255736Sdavidch	ecore_q_fill_init_rx_data(cmd_params->q_obj,
4806255736Sdavidch				  &cmd_params->params.setup.rxq_params,
4807255736Sdavidch				  &data->rx,
4808255736Sdavidch				  &cmd_params->params.setup.flags);
4809255736Sdavidch
4810255736Sdavidch	ecore_q_fill_init_pause_data(cmd_params->q_obj,
4811255736Sdavidch				     &cmd_params->params.setup.pause_params,
4812255736Sdavidch				     &data->rx);
4813255736Sdavidch}
4814255736Sdavidch
4815255736Sdavidch/* initialize the general and tx parts of a tx-only queue object */
4816255736Sdavidchstatic void ecore_q_fill_setup_tx_only(struct bxe_softc *sc,
4817255736Sdavidch				struct ecore_queue_state_params *cmd_params,
4818255736Sdavidch				struct tx_queue_init_ramrod_data *data)
4819255736Sdavidch{
4820255736Sdavidch	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4821255736Sdavidch				       &cmd_params->params.tx_only.gen_params,
4822255736Sdavidch				       &data->general,
4823255736Sdavidch				       &cmd_params->params.tx_only.flags);
4824255736Sdavidch
4825255736Sdavidch	ecore_q_fill_init_tx_data(cmd_params->q_obj,
4826255736Sdavidch				  &cmd_params->params.tx_only.txq_params,
4827255736Sdavidch				  &data->tx,
4828255736Sdavidch				  &cmd_params->params.tx_only.flags);
4829255736Sdavidch
4830255736Sdavidch	ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
4831255736Sdavidch		  cmd_params->q_obj->cids[0],
4832255736Sdavidch		  data->tx.tx_bd_page_base.lo,
4833255736Sdavidch		  data->tx.tx_bd_page_base.hi);
4834255736Sdavidch}
4835255736Sdavidch
4836255736Sdavidch/**
4837255736Sdavidch * ecore_q_init - init HW/FW queue
4838255736Sdavidch *
4839255736Sdavidch * @sc:		device handle
4840255736Sdavidch * @params:
4841255736Sdavidch *
4842255736Sdavidch * HW/FW initial Queue configuration:
4843255736Sdavidch *      - HC: Rx and Tx
4844255736Sdavidch *      - CDU context validation
4845255736Sdavidch *
4846255736Sdavidch */
4847255736Sdavidchstatic inline int ecore_q_init(struct bxe_softc *sc,
4848255736Sdavidch			       struct ecore_queue_state_params *params)
4849255736Sdavidch{
4850255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
4851255736Sdavidch	struct ecore_queue_init_params *init = &params->params.init;
4852255736Sdavidch	uint16_t hc_usec;
4853255736Sdavidch	uint8_t cos;
4854255736Sdavidch
4855255736Sdavidch	/* Tx HC configuration */
4856255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
4857255736Sdavidch	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
4858255736Sdavidch		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4859255736Sdavidch
4860255736Sdavidch		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
4861255736Sdavidch			init->tx.sb_cq_index,
4862255736Sdavidch			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
4863255736Sdavidch			hc_usec);
4864255736Sdavidch	}
4865255736Sdavidch
4866255736Sdavidch	/* Rx HC configuration */
4867255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
4868255736Sdavidch	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
4869255736Sdavidch		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4870255736Sdavidch
4871255736Sdavidch		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
4872255736Sdavidch			init->rx.sb_cq_index,
4873255736Sdavidch			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
4874255736Sdavidch			hc_usec);
4875255736Sdavidch	}
4876255736Sdavidch
4877255736Sdavidch	/* Set CDU context validation values */
4878255736Sdavidch	for (cos = 0; cos < o->max_cos; cos++) {
4879255736Sdavidch		ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n",
4880255736Sdavidch			  o->cids[cos], cos);
4881255736Sdavidch		ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]);
4882255736Sdavidch		ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
4883255736Sdavidch	}
4884255736Sdavidch
4885255736Sdavidch	/* As no ramrod is sent, complete the command immediately  */
4886255736Sdavidch	o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
4887255736Sdavidch
4888255736Sdavidch	ECORE_MMIOWB();
4889255736Sdavidch	ECORE_SMP_MB();
4890255736Sdavidch
4891255736Sdavidch	return ECORE_SUCCESS;
4892255736Sdavidch}
4893255736Sdavidch
4894255736Sdavidchstatic inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
4895255736Sdavidch					struct ecore_queue_state_params *params)
4896255736Sdavidch{
4897255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
4898255736Sdavidch	struct client_init_ramrod_data *rdata =
4899255736Sdavidch		(struct client_init_ramrod_data *)o->rdata;
4900255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4901255736Sdavidch	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4902255736Sdavidch
4903255736Sdavidch	/* Clear the ramrod data */
4904255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4905255736Sdavidch
4906255736Sdavidch	/* Fill the ramrod data */
4907255736Sdavidch	ecore_q_fill_setup_data_cmn(sc, params, rdata);
4908255736Sdavidch
4909255736Sdavidch	/* No need for an explicit memory barrier here as long we would
4910255736Sdavidch	 * need to ensure the ordering of writing to the SPQ element
4911255736Sdavidch	 * and updating of the SPQ producer which involves a memory
4912255736Sdavidch	 * read and we will have to put a full memory barrier there
4913255736Sdavidch	 * (inside ecore_sp_post()).
4914255736Sdavidch	 */
4915255736Sdavidch
4916255736Sdavidch	return ecore_sp_post(sc,
4917255736Sdavidch			     ramrod,
4918255736Sdavidch			     o->cids[ECORE_PRIMARY_CID_INDEX],
4919255736Sdavidch			     data_mapping,
4920255736Sdavidch			     ETH_CONNECTION_TYPE);
4921255736Sdavidch}
4922255736Sdavidch
4923255736Sdavidchstatic inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
4924255736Sdavidch					struct ecore_queue_state_params *params)
4925255736Sdavidch{
4926255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
4927255736Sdavidch	struct client_init_ramrod_data *rdata =
4928255736Sdavidch		(struct client_init_ramrod_data *)o->rdata;
4929255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4930255736Sdavidch	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4931255736Sdavidch
4932255736Sdavidch	/* Clear the ramrod data */
4933255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4934255736Sdavidch
4935255736Sdavidch	/* Fill the ramrod data */
4936255736Sdavidch	ecore_q_fill_setup_data_cmn(sc, params, rdata);
4937255736Sdavidch	ecore_q_fill_setup_data_e2(sc, params, rdata);
4938255736Sdavidch
4939255736Sdavidch	/* No need for an explicit memory barrier here as long we would
4940255736Sdavidch	 * need to ensure the ordering of writing to the SPQ element
4941255736Sdavidch	 * and updating of the SPQ producer which involves a memory
4942255736Sdavidch	 * read and we will have to put a full memory barrier there
4943255736Sdavidch	 * (inside ecore_sp_post()).
4944255736Sdavidch	 */
4945255736Sdavidch
4946255736Sdavidch	return ecore_sp_post(sc,
4947255736Sdavidch			     ramrod,
4948255736Sdavidch			     o->cids[ECORE_PRIMARY_CID_INDEX],
4949255736Sdavidch			     data_mapping,
4950255736Sdavidch			     ETH_CONNECTION_TYPE);
4951255736Sdavidch}
4952255736Sdavidch
4953255736Sdavidchstatic inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
4954255736Sdavidch				  struct ecore_queue_state_params *params)
4955255736Sdavidch{
4956255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
4957255736Sdavidch	struct tx_queue_init_ramrod_data *rdata =
4958255736Sdavidch		(struct tx_queue_init_ramrod_data *)o->rdata;
4959255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4960255736Sdavidch	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4961255736Sdavidch	struct ecore_queue_setup_tx_only_params *tx_only_params =
4962255736Sdavidch		&params->params.tx_only;
4963255736Sdavidch	uint8_t cid_index = tx_only_params->cid_index;
4964255736Sdavidch
4965255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4966255736Sdavidch		ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4967255736Sdavidch	ECORE_MSG(sc, "sending forward tx-only ramrod");
4968255736Sdavidch
4969255736Sdavidch	if (cid_index >= o->max_cos) {
4970255736Sdavidch		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
4971255736Sdavidch			  o->cl_id, cid_index);
4972255736Sdavidch		return ECORE_INVAL;
4973255736Sdavidch	}
4974255736Sdavidch
4975255736Sdavidch	ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n",
4976255736Sdavidch		  tx_only_params->gen_params.cos,
4977255736Sdavidch		  tx_only_params->gen_params.spcl_id);
4978255736Sdavidch
4979255736Sdavidch	/* Clear the ramrod data */
4980255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4981255736Sdavidch
4982255736Sdavidch	/* Fill the ramrod data */
4983255736Sdavidch	ecore_q_fill_setup_tx_only(sc, params, rdata);
4984255736Sdavidch
4985255736Sdavidch	ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4986255736Sdavidch		  o->cids[cid_index], rdata->general.client_id,
4987255736Sdavidch		  rdata->general.sp_client_id, rdata->general.cos);
4988255736Sdavidch
4989255736Sdavidch	/* No need for an explicit memory barrier here as long we would
4990255736Sdavidch	 * need to ensure the ordering of writing to the SPQ element
4991255736Sdavidch	 * and updating of the SPQ producer which involves a memory
4992255736Sdavidch	 * read and we will have to put a full memory barrier there
4993255736Sdavidch	 * (inside ecore_sp_post()).
4994255736Sdavidch	 */
4995255736Sdavidch
4996255736Sdavidch	return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4997255736Sdavidch			     data_mapping, ETH_CONNECTION_TYPE);
4998255736Sdavidch}
4999255736Sdavidch
5000255736Sdavidchstatic void ecore_q_fill_update_data(struct bxe_softc *sc,
5001255736Sdavidch				     struct ecore_queue_sp_obj *obj,
5002255736Sdavidch				     struct ecore_queue_update_params *params,
5003255736Sdavidch				     struct client_update_ramrod_data *data)
5004255736Sdavidch{
5005255736Sdavidch	/* Client ID of the client to update */
5006255736Sdavidch	data->client_id = obj->cl_id;
5007255736Sdavidch
5008255736Sdavidch	/* Function ID of the client to update */
5009255736Sdavidch	data->func_id = obj->func_id;
5010255736Sdavidch
5011255736Sdavidch	/* Default VLAN value */
5012255736Sdavidch	data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
5013255736Sdavidch
5014255736Sdavidch	/* Inner VLAN stripping */
5015255736Sdavidch	data->inner_vlan_removal_enable_flg =
5016255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
5017255736Sdavidch			       &params->update_flags);
5018255736Sdavidch	data->inner_vlan_removal_change_flg =
5019255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
5020255736Sdavidch		       &params->update_flags);
5021255736Sdavidch
5022255736Sdavidch	/* Outer VLAN stripping */
5023255736Sdavidch	data->outer_vlan_removal_enable_flg =
5024255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
5025255736Sdavidch			       &params->update_flags);
5026255736Sdavidch	data->outer_vlan_removal_change_flg =
5027255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
5028255736Sdavidch		       &params->update_flags);
5029255736Sdavidch
5030255736Sdavidch	/* Drop packets that have source MAC that doesn't belong to this
5031255736Sdavidch	 * Queue.
5032255736Sdavidch	 */
5033255736Sdavidch	data->anti_spoofing_enable_flg =
5034255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
5035255736Sdavidch			       &params->update_flags);
5036255736Sdavidch	data->anti_spoofing_change_flg =
5037255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
5038255736Sdavidch		       &params->update_flags);
5039255736Sdavidch
5040255736Sdavidch	/* Activate/Deactivate */
5041255736Sdavidch	data->activate_flg =
5042255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
5043255736Sdavidch	data->activate_change_flg =
5044255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5045255736Sdavidch			       &params->update_flags);
5046255736Sdavidch
5047255736Sdavidch	/* Enable default VLAN */
5048255736Sdavidch	data->default_vlan_enable_flg =
5049255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
5050255736Sdavidch			       &params->update_flags);
5051255736Sdavidch	data->default_vlan_change_flg =
5052255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
5053255736Sdavidch		       &params->update_flags);
5054255736Sdavidch
5055255736Sdavidch	/* silent vlan removal */
5056255736Sdavidch	data->silent_vlan_change_flg =
5057255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5058255736Sdavidch			       &params->update_flags);
5059255736Sdavidch	data->silent_vlan_removal_flg =
5060255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
5061255736Sdavidch			       &params->update_flags);
5062255736Sdavidch	data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value);
5063255736Sdavidch	data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
5064255736Sdavidch
5065255736Sdavidch	/* tx switching */
5066255736Sdavidch	data->tx_switching_flg =
5067255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
5068255736Sdavidch			       &params->update_flags);
5069255736Sdavidch	data->tx_switching_change_flg =
5070255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
5071255736Sdavidch			       &params->update_flags);
5072255736Sdavidch}
5073255736Sdavidch
5074255736Sdavidchstatic inline int ecore_q_send_update(struct bxe_softc *sc,
5075255736Sdavidch				      struct ecore_queue_state_params *params)
5076255736Sdavidch{
5077255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5078255736Sdavidch	struct client_update_ramrod_data *rdata =
5079255736Sdavidch		(struct client_update_ramrod_data *)o->rdata;
5080255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5081255736Sdavidch	struct ecore_queue_update_params *update_params =
5082255736Sdavidch		&params->params.update;
5083255736Sdavidch	uint8_t cid_index = update_params->cid_index;
5084255736Sdavidch
5085255736Sdavidch	if (cid_index >= o->max_cos) {
5086255736Sdavidch		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5087255736Sdavidch			  o->cl_id, cid_index);
5088255736Sdavidch		return ECORE_INVAL;
5089255736Sdavidch	}
5090255736Sdavidch
5091255736Sdavidch	/* Clear the ramrod data */
5092255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5093255736Sdavidch
5094255736Sdavidch	/* Fill the ramrod data */
5095255736Sdavidch	ecore_q_fill_update_data(sc, o, update_params, rdata);
5096255736Sdavidch
5097255736Sdavidch	/* No need for an explicit memory barrier here as long we would
5098255736Sdavidch	 * need to ensure the ordering of writing to the SPQ element
5099255736Sdavidch	 * and updating of the SPQ producer which involves a memory
5100255736Sdavidch	 * read and we will have to put a full memory barrier there
5101255736Sdavidch	 * (inside ecore_sp_post()).
5102255736Sdavidch	 */
5103255736Sdavidch
5104255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5105255736Sdavidch			     o->cids[cid_index], data_mapping,
5106255736Sdavidch			     ETH_CONNECTION_TYPE);
5107255736Sdavidch}
5108255736Sdavidch
5109255736Sdavidch/**
5110255736Sdavidch * ecore_q_send_deactivate - send DEACTIVATE command
5111255736Sdavidch *
5112255736Sdavidch * @sc:		device handle
5113255736Sdavidch * @params:
5114255736Sdavidch *
5115255736Sdavidch * implemented using the UPDATE command.
5116255736Sdavidch */
5117255736Sdavidchstatic inline int ecore_q_send_deactivate(struct bxe_softc *sc,
5118255736Sdavidch					struct ecore_queue_state_params *params)
5119255736Sdavidch{
5120255736Sdavidch	struct ecore_queue_update_params *update = &params->params.update;
5121255736Sdavidch
5122255736Sdavidch	ECORE_MEMSET(update, 0, sizeof(*update));
5123255736Sdavidch
5124255736Sdavidch	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5125255736Sdavidch
5126255736Sdavidch	return ecore_q_send_update(sc, params);
5127255736Sdavidch}
5128255736Sdavidch
5129255736Sdavidch/**
5130255736Sdavidch * ecore_q_send_activate - send ACTIVATE command
5131255736Sdavidch *
5132255736Sdavidch * @sc:		device handle
5133255736Sdavidch * @params:
5134255736Sdavidch *
5135255736Sdavidch * implemented using the UPDATE command.
5136255736Sdavidch */
5137255736Sdavidchstatic inline int ecore_q_send_activate(struct bxe_softc *sc,
5138255736Sdavidch					struct ecore_queue_state_params *params)
5139255736Sdavidch{
5140255736Sdavidch	struct ecore_queue_update_params *update = &params->params.update;
5141255736Sdavidch
5142255736Sdavidch	ECORE_MEMSET(update, 0, sizeof(*update));
5143255736Sdavidch
5144255736Sdavidch	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
5145255736Sdavidch	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5146255736Sdavidch
5147255736Sdavidch	return ecore_q_send_update(sc, params);
5148255736Sdavidch}
5149255736Sdavidch
5150255736Sdavidchstatic inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
5151255736Sdavidch					struct ecore_queue_state_params *params)
5152255736Sdavidch{
5153255736Sdavidch	/* TODO: Not implemented yet. */
5154255736Sdavidch	return -1;
5155255736Sdavidch}
5156255736Sdavidch
5157255736Sdavidchstatic inline int ecore_q_send_halt(struct bxe_softc *sc,
5158255736Sdavidch				    struct ecore_queue_state_params *params)
5159255736Sdavidch{
5160255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5161255736Sdavidch
5162255736Sdavidch	/* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
5163255736Sdavidch	ecore_dma_addr_t data_mapping = 0;
5164255736Sdavidch	data_mapping = (ecore_dma_addr_t)o->cl_id;
5165255736Sdavidch
5166255736Sdavidch	return ecore_sp_post(sc,
5167255736Sdavidch			     RAMROD_CMD_ID_ETH_HALT,
5168255736Sdavidch			     o->cids[ECORE_PRIMARY_CID_INDEX],
5169255736Sdavidch			     data_mapping,
5170255736Sdavidch			     ETH_CONNECTION_TYPE);
5171255736Sdavidch}
5172255736Sdavidch
5173255736Sdavidchstatic inline int ecore_q_send_cfc_del(struct bxe_softc *sc,
5174255736Sdavidch				       struct ecore_queue_state_params *params)
5175255736Sdavidch{
5176255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5177255736Sdavidch	uint8_t cid_idx = params->params.cfc_del.cid_index;
5178255736Sdavidch
5179255736Sdavidch	if (cid_idx >= o->max_cos) {
5180255736Sdavidch		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5181255736Sdavidch			  o->cl_id, cid_idx);
5182255736Sdavidch		return ECORE_INVAL;
5183255736Sdavidch	}
5184255736Sdavidch
5185255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
5186255736Sdavidch			     o->cids[cid_idx], 0,
5187255736Sdavidch			     NONE_CONNECTION_TYPE);
5188255736Sdavidch}
5189255736Sdavidch
5190255736Sdavidchstatic inline int ecore_q_send_terminate(struct bxe_softc *sc,
5191255736Sdavidch					struct ecore_queue_state_params *params)
5192255736Sdavidch{
5193255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5194255736Sdavidch	uint8_t cid_index = params->params.terminate.cid_index;
5195255736Sdavidch
5196255736Sdavidch	if (cid_index >= o->max_cos) {
5197255736Sdavidch		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5198255736Sdavidch			  o->cl_id, cid_index);
5199255736Sdavidch		return ECORE_INVAL;
5200255736Sdavidch	}
5201255736Sdavidch
5202255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
5203255736Sdavidch			     o->cids[cid_index], 0,
5204255736Sdavidch			     ETH_CONNECTION_TYPE);
5205255736Sdavidch}
5206255736Sdavidch
5207255736Sdavidchstatic inline int ecore_q_send_empty(struct bxe_softc *sc,
5208255736Sdavidch				     struct ecore_queue_state_params *params)
5209255736Sdavidch{
5210255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5211255736Sdavidch
5212255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
5213255736Sdavidch			     o->cids[ECORE_PRIMARY_CID_INDEX], 0,
5214255736Sdavidch			     ETH_CONNECTION_TYPE);
5215255736Sdavidch}
5216255736Sdavidch
5217255736Sdavidchstatic inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc,
5218255736Sdavidch					struct ecore_queue_state_params *params)
5219255736Sdavidch{
5220255736Sdavidch	switch (params->cmd) {
5221255736Sdavidch	case ECORE_Q_CMD_INIT:
5222255736Sdavidch		return ecore_q_init(sc, params);
5223255736Sdavidch	case ECORE_Q_CMD_SETUP_TX_ONLY:
5224255736Sdavidch		return ecore_q_send_setup_tx_only(sc, params);
5225255736Sdavidch	case ECORE_Q_CMD_DEACTIVATE:
5226255736Sdavidch		return ecore_q_send_deactivate(sc, params);
5227255736Sdavidch	case ECORE_Q_CMD_ACTIVATE:
5228255736Sdavidch		return ecore_q_send_activate(sc, params);
5229255736Sdavidch	case ECORE_Q_CMD_UPDATE:
5230255736Sdavidch		return ecore_q_send_update(sc, params);
5231255736Sdavidch	case ECORE_Q_CMD_UPDATE_TPA:
5232255736Sdavidch		return ecore_q_send_update_tpa(sc, params);
5233255736Sdavidch	case ECORE_Q_CMD_HALT:
5234255736Sdavidch		return ecore_q_send_halt(sc, params);
5235255736Sdavidch	case ECORE_Q_CMD_CFC_DEL:
5236255736Sdavidch		return ecore_q_send_cfc_del(sc, params);
5237255736Sdavidch	case ECORE_Q_CMD_TERMINATE:
5238255736Sdavidch		return ecore_q_send_terminate(sc, params);
5239255736Sdavidch	case ECORE_Q_CMD_EMPTY:
5240255736Sdavidch		return ecore_q_send_empty(sc, params);
5241255736Sdavidch	default:
5242255736Sdavidch		ECORE_ERR("Unknown command: %d\n", params->cmd);
5243255736Sdavidch		return ECORE_INVAL;
5244255736Sdavidch	}
5245255736Sdavidch}
5246255736Sdavidch
5247255736Sdavidchstatic int ecore_queue_send_cmd_e1x(struct bxe_softc *sc,
5248255736Sdavidch				    struct ecore_queue_state_params *params)
5249255736Sdavidch{
5250255736Sdavidch	switch (params->cmd) {
5251255736Sdavidch	case ECORE_Q_CMD_SETUP:
5252255736Sdavidch		return ecore_q_send_setup_e1x(sc, params);
5253255736Sdavidch	case ECORE_Q_CMD_INIT:
5254255736Sdavidch	case ECORE_Q_CMD_SETUP_TX_ONLY:
5255255736Sdavidch	case ECORE_Q_CMD_DEACTIVATE:
5256255736Sdavidch	case ECORE_Q_CMD_ACTIVATE:
5257255736Sdavidch	case ECORE_Q_CMD_UPDATE:
5258255736Sdavidch	case ECORE_Q_CMD_UPDATE_TPA:
5259255736Sdavidch	case ECORE_Q_CMD_HALT:
5260255736Sdavidch	case ECORE_Q_CMD_CFC_DEL:
5261255736Sdavidch	case ECORE_Q_CMD_TERMINATE:
5262255736Sdavidch	case ECORE_Q_CMD_EMPTY:
5263255736Sdavidch		return ecore_queue_send_cmd_cmn(sc, params);
5264255736Sdavidch	default:
5265255736Sdavidch		ECORE_ERR("Unknown command: %d\n", params->cmd);
5266255736Sdavidch		return ECORE_INVAL;
5267255736Sdavidch	}
5268255736Sdavidch}
5269255736Sdavidch
5270255736Sdavidchstatic int ecore_queue_send_cmd_e2(struct bxe_softc *sc,
5271255736Sdavidch				   struct ecore_queue_state_params *params)
5272255736Sdavidch{
5273255736Sdavidch	switch (params->cmd) {
5274255736Sdavidch	case ECORE_Q_CMD_SETUP:
5275255736Sdavidch		return ecore_q_send_setup_e2(sc, params);
5276255736Sdavidch	case ECORE_Q_CMD_INIT:
5277255736Sdavidch	case ECORE_Q_CMD_SETUP_TX_ONLY:
5278255736Sdavidch	case ECORE_Q_CMD_DEACTIVATE:
5279255736Sdavidch	case ECORE_Q_CMD_ACTIVATE:
5280255736Sdavidch	case ECORE_Q_CMD_UPDATE:
5281255736Sdavidch	case ECORE_Q_CMD_UPDATE_TPA:
5282255736Sdavidch	case ECORE_Q_CMD_HALT:
5283255736Sdavidch	case ECORE_Q_CMD_CFC_DEL:
5284255736Sdavidch	case ECORE_Q_CMD_TERMINATE:
5285255736Sdavidch	case ECORE_Q_CMD_EMPTY:
5286255736Sdavidch		return ecore_queue_send_cmd_cmn(sc, params);
5287255736Sdavidch	default:
5288255736Sdavidch		ECORE_ERR("Unknown command: %d\n", params->cmd);
5289255736Sdavidch		return ECORE_INVAL;
5290255736Sdavidch	}
5291255736Sdavidch}
5292255736Sdavidch
5293255736Sdavidch/**
5294255736Sdavidch * ecore_queue_chk_transition - check state machine of a regular Queue
5295255736Sdavidch *
5296255736Sdavidch * @sc:		device handle
5297255736Sdavidch * @o:
5298255736Sdavidch * @params:
5299255736Sdavidch *
5300255736Sdavidch * (not Forwarding)
5301255736Sdavidch * It both checks if the requested command is legal in a current
5302255736Sdavidch * state and, if it's legal, sets a `next_state' in the object
5303255736Sdavidch * that will be used in the completion flow to set the `state'
5304255736Sdavidch * of the object.
5305255736Sdavidch *
5306255736Sdavidch * returns 0 if a requested command is a legal transition,
5307255736Sdavidch *         ECORE_INVAL otherwise.
5308255736Sdavidch */
5309255736Sdavidchstatic int ecore_queue_chk_transition(struct bxe_softc *sc,
5310255736Sdavidch				      struct ecore_queue_sp_obj *o,
5311255736Sdavidch				      struct ecore_queue_state_params *params)
5312255736Sdavidch{
5313255736Sdavidch	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5314255736Sdavidch	enum ecore_queue_cmd cmd = params->cmd;
5315255736Sdavidch	struct ecore_queue_update_params *update_params =
5316255736Sdavidch		 &params->params.update;
5317255736Sdavidch	uint8_t next_tx_only = o->num_tx_only;
5318255736Sdavidch
5319255736Sdavidch	/* Forget all pending for completion commands if a driver only state
5320255736Sdavidch	 * transition has been requested.
5321255736Sdavidch	 */
5322255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5323255736Sdavidch		o->pending = 0;
5324255736Sdavidch		o->next_state = ECORE_Q_STATE_MAX;
5325255736Sdavidch	}
5326255736Sdavidch
5327255736Sdavidch	/* Don't allow a next state transition if we are in the middle of
5328255736Sdavidch	 * the previous one.
5329255736Sdavidch	 */
5330255736Sdavidch	if (o->pending) {
5331255736Sdavidch		ECORE_ERR("Blocking transition since pending was %lx\n",
5332255736Sdavidch			  o->pending);
5333255736Sdavidch		return ECORE_BUSY;
5334255736Sdavidch	}
5335255736Sdavidch
5336255736Sdavidch	switch (state) {
5337255736Sdavidch	case ECORE_Q_STATE_RESET:
5338255736Sdavidch		if (cmd == ECORE_Q_CMD_INIT)
5339255736Sdavidch			next_state = ECORE_Q_STATE_INITIALIZED;
5340255736Sdavidch
5341255736Sdavidch		break;
5342255736Sdavidch	case ECORE_Q_STATE_INITIALIZED:
5343255736Sdavidch		if (cmd == ECORE_Q_CMD_SETUP) {
5344255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5345255736Sdavidch					   &params->params.setup.flags))
5346255736Sdavidch				next_state = ECORE_Q_STATE_ACTIVE;
5347255736Sdavidch			else
5348255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5349255736Sdavidch		}
5350255736Sdavidch
5351255736Sdavidch		break;
5352255736Sdavidch	case ECORE_Q_STATE_ACTIVE:
5353255736Sdavidch		if (cmd == ECORE_Q_CMD_DEACTIVATE)
5354255736Sdavidch			next_state = ECORE_Q_STATE_INACTIVE;
5355255736Sdavidch
5356255736Sdavidch		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5357255736Sdavidch			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5358255736Sdavidch			next_state = ECORE_Q_STATE_ACTIVE;
5359255736Sdavidch
5360255736Sdavidch		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5361255736Sdavidch			next_state = ECORE_Q_STATE_MULTI_COS;
5362255736Sdavidch			next_tx_only = 1;
5363255736Sdavidch		}
5364255736Sdavidch
5365255736Sdavidch		else if (cmd == ECORE_Q_CMD_HALT)
5366255736Sdavidch			next_state = ECORE_Q_STATE_STOPPED;
5367255736Sdavidch
5368255736Sdavidch		else if (cmd == ECORE_Q_CMD_UPDATE) {
5369255736Sdavidch			/* If "active" state change is requested, update the
5370255736Sdavidch			 *  state accordingly.
5371255736Sdavidch			 */
5372255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5373255736Sdavidch					   &update_params->update_flags) &&
5374255736Sdavidch			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5375255736Sdavidch					    &update_params->update_flags))
5376255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5377255736Sdavidch			else
5378255736Sdavidch				next_state = ECORE_Q_STATE_ACTIVE;
5379255736Sdavidch		}
5380255736Sdavidch
5381255736Sdavidch		break;
5382255736Sdavidch	case ECORE_Q_STATE_MULTI_COS:
5383255736Sdavidch		if (cmd == ECORE_Q_CMD_TERMINATE)
5384255736Sdavidch			next_state = ECORE_Q_STATE_MCOS_TERMINATED;
5385255736Sdavidch
5386255736Sdavidch		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5387255736Sdavidch			next_state = ECORE_Q_STATE_MULTI_COS;
5388255736Sdavidch			next_tx_only = o->num_tx_only + 1;
5389255736Sdavidch		}
5390255736Sdavidch
5391255736Sdavidch		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5392255736Sdavidch			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5393255736Sdavidch			next_state = ECORE_Q_STATE_MULTI_COS;
5394255736Sdavidch
5395255736Sdavidch		else if (cmd == ECORE_Q_CMD_UPDATE) {
5396255736Sdavidch			/* If "active" state change is requested, update the
5397255736Sdavidch			 *  state accordingly.
5398255736Sdavidch			 */
5399255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5400255736Sdavidch					   &update_params->update_flags) &&
5401255736Sdavidch			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5402255736Sdavidch					    &update_params->update_flags))
5403255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5404255736Sdavidch			else
5405255736Sdavidch				next_state = ECORE_Q_STATE_MULTI_COS;
5406255736Sdavidch		}
5407255736Sdavidch
5408255736Sdavidch		break;
5409255736Sdavidch	case ECORE_Q_STATE_MCOS_TERMINATED:
5410255736Sdavidch		if (cmd == ECORE_Q_CMD_CFC_DEL) {
5411255736Sdavidch			next_tx_only = o->num_tx_only - 1;
5412255736Sdavidch			if (next_tx_only == 0)
5413255736Sdavidch				next_state = ECORE_Q_STATE_ACTIVE;
5414255736Sdavidch			else
5415255736Sdavidch				next_state = ECORE_Q_STATE_MULTI_COS;
5416255736Sdavidch		}
5417255736Sdavidch
5418255736Sdavidch		break;
5419255736Sdavidch	case ECORE_Q_STATE_INACTIVE:
5420255736Sdavidch		if (cmd == ECORE_Q_CMD_ACTIVATE)
5421255736Sdavidch			next_state = ECORE_Q_STATE_ACTIVE;
5422255736Sdavidch
5423255736Sdavidch		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5424255736Sdavidch			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5425255736Sdavidch			next_state = ECORE_Q_STATE_INACTIVE;
5426255736Sdavidch
5427255736Sdavidch		else if (cmd == ECORE_Q_CMD_HALT)
5428255736Sdavidch			next_state = ECORE_Q_STATE_STOPPED;
5429255736Sdavidch
5430255736Sdavidch		else if (cmd == ECORE_Q_CMD_UPDATE) {
5431255736Sdavidch			/* If "active" state change is requested, update the
5432255736Sdavidch			 * state accordingly.
5433255736Sdavidch			 */
5434255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5435255736Sdavidch					   &update_params->update_flags) &&
5436255736Sdavidch			    ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5437255736Sdavidch					   &update_params->update_flags)){
5438255736Sdavidch				if (o->num_tx_only == 0)
5439255736Sdavidch					next_state = ECORE_Q_STATE_ACTIVE;
5440255736Sdavidch				else /* tx only queues exist for this queue */
5441255736Sdavidch					next_state = ECORE_Q_STATE_MULTI_COS;
5442255736Sdavidch			} else
5443255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5444255736Sdavidch		}
5445255736Sdavidch
5446255736Sdavidch		break;
5447255736Sdavidch	case ECORE_Q_STATE_STOPPED:
5448255736Sdavidch		if (cmd == ECORE_Q_CMD_TERMINATE)
5449255736Sdavidch			next_state = ECORE_Q_STATE_TERMINATED;
5450255736Sdavidch
5451255736Sdavidch		break;
5452255736Sdavidch	case ECORE_Q_STATE_TERMINATED:
5453255736Sdavidch		if (cmd == ECORE_Q_CMD_CFC_DEL)
5454255736Sdavidch			next_state = ECORE_Q_STATE_RESET;
5455255736Sdavidch
5456255736Sdavidch		break;
5457255736Sdavidch	default:
5458255736Sdavidch		ECORE_ERR("Illegal state: %d\n", state);
5459255736Sdavidch	}
5460255736Sdavidch
5461255736Sdavidch	/* Transition is assured */
5462255736Sdavidch	if (next_state != ECORE_Q_STATE_MAX) {
5463255736Sdavidch		ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5464255736Sdavidch			  state, cmd, next_state);
5465255736Sdavidch		o->next_state = next_state;
5466255736Sdavidch		o->next_tx_only = next_tx_only;
5467255736Sdavidch		return ECORE_SUCCESS;
5468255736Sdavidch	}
5469255736Sdavidch
5470255736Sdavidch	ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5471255736Sdavidch
5472255736Sdavidch	return ECORE_INVAL;
5473255736Sdavidch}
5474255736Sdavidch
5475255736Sdavidch/**
5476255736Sdavidch * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
5477255736Sdavidch *
5478255736Sdavidch * @sc:		device handle
5479255736Sdavidch * @o:
5480255736Sdavidch * @params:
5481255736Sdavidch *
5482255736Sdavidch * It both checks if the requested command is legal in a current
5483255736Sdavidch * state and, if it's legal, sets a `next_state' in the object
5484255736Sdavidch * that will be used in the completion flow to set the `state'
5485255736Sdavidch * of the object.
5486255736Sdavidch *
5487255736Sdavidch * returns 0 if a requested command is a legal transition,
5488255736Sdavidch *         ECORE_INVAL otherwise.
5489255736Sdavidch */
5490255736Sdavidchstatic int ecore_queue_chk_fwd_transition(struct bxe_softc *sc,
5491255736Sdavidch					  struct ecore_queue_sp_obj *o,
5492255736Sdavidch					struct ecore_queue_state_params *params)
5493255736Sdavidch{
5494255736Sdavidch	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5495255736Sdavidch	enum ecore_queue_cmd cmd = params->cmd;
5496255736Sdavidch
5497255736Sdavidch	switch (state) {
5498255736Sdavidch	case ECORE_Q_STATE_RESET:
5499255736Sdavidch		if (cmd == ECORE_Q_CMD_INIT)
5500255736Sdavidch			next_state = ECORE_Q_STATE_INITIALIZED;
5501255736Sdavidch
5502255736Sdavidch		break;
5503255736Sdavidch	case ECORE_Q_STATE_INITIALIZED:
5504255736Sdavidch		if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5505255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5506255736Sdavidch					   &params->params.tx_only.flags))
5507255736Sdavidch				next_state = ECORE_Q_STATE_ACTIVE;
5508255736Sdavidch			else
5509255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5510255736Sdavidch		}
5511255736Sdavidch
5512255736Sdavidch		break;
5513255736Sdavidch	case ECORE_Q_STATE_ACTIVE:
5514255736Sdavidch	case ECORE_Q_STATE_INACTIVE:
5515255736Sdavidch		if (cmd == ECORE_Q_CMD_CFC_DEL)
5516255736Sdavidch			next_state = ECORE_Q_STATE_RESET;
5517255736Sdavidch
5518255736Sdavidch		break;
5519255736Sdavidch	default:
5520255736Sdavidch		ECORE_ERR("Illegal state: %d\n", state);
5521255736Sdavidch	}
5522255736Sdavidch
5523255736Sdavidch	/* Transition is assured */
5524255736Sdavidch	if (next_state != ECORE_Q_STATE_MAX) {
5525255736Sdavidch		ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5526255736Sdavidch			  state, cmd, next_state);
5527255736Sdavidch		o->next_state = next_state;
5528255736Sdavidch		return ECORE_SUCCESS;
5529255736Sdavidch	}
5530255736Sdavidch
5531255736Sdavidch	ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5532255736Sdavidch	return ECORE_INVAL;
5533255736Sdavidch}
5534255736Sdavidch
5535255736Sdavidchvoid ecore_init_queue_obj(struct bxe_softc *sc,
5536255736Sdavidch			  struct ecore_queue_sp_obj *obj,
5537255736Sdavidch			  uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id,
5538255736Sdavidch			  void *rdata,
5539255736Sdavidch			  ecore_dma_addr_t rdata_mapping, unsigned long type)
5540255736Sdavidch{
5541255736Sdavidch	ECORE_MEMSET(obj, 0, sizeof(*obj));
5542255736Sdavidch
5543255736Sdavidch	/* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
5544255736Sdavidch	ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
5545255736Sdavidch
5546255736Sdavidch	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5547255736Sdavidch	obj->max_cos = cid_cnt;
5548255736Sdavidch	obj->cl_id = cl_id;
5549255736Sdavidch	obj->func_id = func_id;
5550255736Sdavidch	obj->rdata = rdata;
5551255736Sdavidch	obj->rdata_mapping = rdata_mapping;
5552255736Sdavidch	obj->type = type;
5553255736Sdavidch	obj->next_state = ECORE_Q_STATE_MAX;
5554255736Sdavidch
5555255736Sdavidch	if (CHIP_IS_E1x(sc))
5556255736Sdavidch		obj->send_cmd = ecore_queue_send_cmd_e1x;
5557255736Sdavidch	else
5558255736Sdavidch		obj->send_cmd = ecore_queue_send_cmd_e2;
5559255736Sdavidch
5560255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
5561255736Sdavidch		obj->check_transition = ecore_queue_chk_fwd_transition;
5562255736Sdavidch	else
5563255736Sdavidch	obj->check_transition = ecore_queue_chk_transition;
5564255736Sdavidch
5565255736Sdavidch	obj->complete_cmd = ecore_queue_comp_cmd;
5566255736Sdavidch	obj->wait_comp = ecore_queue_wait_comp;
5567255736Sdavidch	obj->set_pending = ecore_queue_set_pending;
5568255736Sdavidch}
5569255736Sdavidch
5570255736Sdavidch/* return a queue object's logical state*/
5571255736Sdavidchint ecore_get_q_logical_state(struct bxe_softc *sc,
5572255736Sdavidch			       struct ecore_queue_sp_obj *obj)
5573255736Sdavidch{
5574255736Sdavidch	switch (obj->state) {
5575255736Sdavidch	case ECORE_Q_STATE_ACTIVE:
5576255736Sdavidch	case ECORE_Q_STATE_MULTI_COS:
5577255736Sdavidch		return ECORE_Q_LOGICAL_STATE_ACTIVE;
5578255736Sdavidch	case ECORE_Q_STATE_RESET:
5579255736Sdavidch	case ECORE_Q_STATE_INITIALIZED:
5580255736Sdavidch	case ECORE_Q_STATE_MCOS_TERMINATED:
5581255736Sdavidch	case ECORE_Q_STATE_INACTIVE:
5582255736Sdavidch	case ECORE_Q_STATE_STOPPED:
5583255736Sdavidch	case ECORE_Q_STATE_TERMINATED:
5584255736Sdavidch	case ECORE_Q_STATE_FLRED:
5585255736Sdavidch		return ECORE_Q_LOGICAL_STATE_STOPPED;
5586255736Sdavidch	default:
5587255736Sdavidch		return ECORE_INVAL;
5588255736Sdavidch	}
5589255736Sdavidch}
5590255736Sdavidch
5591255736Sdavidch/********************** Function state object *********************************/
5592255736Sdavidchenum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
5593255736Sdavidch					   struct ecore_func_sp_obj *o)
5594255736Sdavidch{
5595255736Sdavidch	/* in the middle of transaction - return INVALID state */
5596255736Sdavidch	if (o->pending)
5597255736Sdavidch		return ECORE_F_STATE_MAX;
5598255736Sdavidch
5599255736Sdavidch	/* unsure the order of reading of o->pending and o->state
5600255736Sdavidch	 * o->pending should be read first
5601255736Sdavidch	 */
5602255736Sdavidch	rmb();
5603255736Sdavidch
5604255736Sdavidch	return o->state;
5605255736Sdavidch}
5606255736Sdavidch
5607255736Sdavidchstatic int ecore_func_wait_comp(struct bxe_softc *sc,
5608255736Sdavidch				struct ecore_func_sp_obj *o,
5609255736Sdavidch				enum ecore_func_cmd cmd)
5610255736Sdavidch{
5611255736Sdavidch	return ecore_state_wait(sc, cmd, &o->pending);
5612255736Sdavidch}
5613255736Sdavidch
5614255736Sdavidch/**
5615255736Sdavidch * ecore_func_state_change_comp - complete the state machine transition
5616255736Sdavidch *
5617255736Sdavidch * @sc:		device handle
5618255736Sdavidch * @o:
5619255736Sdavidch * @cmd:
5620255736Sdavidch *
5621255736Sdavidch * Called on state change transition. Completes the state
5622255736Sdavidch * machine transition only - no HW interaction.
5623255736Sdavidch */
5624255736Sdavidchstatic inline int ecore_func_state_change_comp(struct bxe_softc *sc,
5625255736Sdavidch					       struct ecore_func_sp_obj *o,
5626255736Sdavidch					       enum ecore_func_cmd cmd)
5627255736Sdavidch{
5628255736Sdavidch	unsigned long cur_pending = o->pending;
5629255736Sdavidch
5630255736Sdavidch	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
5631255736Sdavidch		ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5632255736Sdavidch			  cmd, ECORE_FUNC_ID(sc), o->state,
5633255736Sdavidch			  cur_pending, o->next_state);
5634255736Sdavidch		return ECORE_INVAL;
5635255736Sdavidch	}
5636255736Sdavidch
5637255736Sdavidch	ECORE_MSG(sc,
5638255736Sdavidch		  "Completing command %d for func %d, setting state to %d\n",
5639255736Sdavidch		  cmd, ECORE_FUNC_ID(sc), o->next_state);
5640255736Sdavidch
5641255736Sdavidch	o->state = o->next_state;
5642255736Sdavidch	o->next_state = ECORE_F_STATE_MAX;
5643255736Sdavidch
5644255736Sdavidch	/* It's important that o->state and o->next_state are
5645255736Sdavidch	 * updated before o->pending.
5646255736Sdavidch	 */
5647255736Sdavidch	wmb();
5648255736Sdavidch
5649255736Sdavidch	ECORE_CLEAR_BIT(cmd, &o->pending);
5650255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
5651255736Sdavidch
5652255736Sdavidch	return ECORE_SUCCESS;
5653255736Sdavidch}
5654255736Sdavidch
5655255736Sdavidch/**
5656255736Sdavidch * ecore_func_comp_cmd - complete the state change command
5657255736Sdavidch *
5658255736Sdavidch * @sc:		device handle
5659255736Sdavidch * @o:
5660255736Sdavidch * @cmd:
5661255736Sdavidch *
5662255736Sdavidch * Checks that the arrived completion is expected.
5663255736Sdavidch */
5664255736Sdavidchstatic int ecore_func_comp_cmd(struct bxe_softc *sc,
5665255736Sdavidch			       struct ecore_func_sp_obj *o,
5666255736Sdavidch			       enum ecore_func_cmd cmd)
5667255736Sdavidch{
5668255736Sdavidch	/* Complete the state machine part first, check if it's a
5669255736Sdavidch	 * legal completion.
5670255736Sdavidch	 */
5671255736Sdavidch	int rc = ecore_func_state_change_comp(sc, o, cmd);
5672255736Sdavidch	return rc;
5673255736Sdavidch}
5674255736Sdavidch
5675255736Sdavidch/**
5676255736Sdavidch * ecore_func_chk_transition - perform function state machine transition
5677255736Sdavidch *
5678255736Sdavidch * @sc:		device handle
5679255736Sdavidch * @o:
5680255736Sdavidch * @params:
5681255736Sdavidch *
5682255736Sdavidch * It both checks if the requested command is legal in a current
5683255736Sdavidch * state and, if it's legal, sets a `next_state' in the object
5684255736Sdavidch * that will be used in the completion flow to set the `state'
5685255736Sdavidch * of the object.
5686255736Sdavidch *
5687255736Sdavidch * returns 0 if a requested command is a legal transition,
5688255736Sdavidch *         ECORE_INVAL otherwise.
5689255736Sdavidch */
5690255736Sdavidchstatic int ecore_func_chk_transition(struct bxe_softc *sc,
5691255736Sdavidch				     struct ecore_func_sp_obj *o,
5692255736Sdavidch				     struct ecore_func_state_params *params)
5693255736Sdavidch{
5694255736Sdavidch	enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
5695255736Sdavidch	enum ecore_func_cmd cmd = params->cmd;
5696255736Sdavidch
5697255736Sdavidch	/* Forget all pending for completion commands if a driver only state
5698255736Sdavidch	 * transition has been requested.
5699255736Sdavidch	 */
5700255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5701255736Sdavidch		o->pending = 0;
5702255736Sdavidch		o->next_state = ECORE_F_STATE_MAX;
5703255736Sdavidch	}
5704255736Sdavidch
5705255736Sdavidch	/* Don't allow a next state transition if we are in the middle of
5706255736Sdavidch	 * the previous one.
5707255736Sdavidch	 */
5708255736Sdavidch	if (o->pending)
5709255736Sdavidch		return ECORE_BUSY;
5710255736Sdavidch
5711255736Sdavidch	switch (state) {
5712255736Sdavidch	case ECORE_F_STATE_RESET:
5713255736Sdavidch		if (cmd == ECORE_F_CMD_HW_INIT)
5714255736Sdavidch			next_state = ECORE_F_STATE_INITIALIZED;
5715255736Sdavidch
5716255736Sdavidch		break;
5717255736Sdavidch	case ECORE_F_STATE_INITIALIZED:
5718255736Sdavidch		if (cmd == ECORE_F_CMD_START)
5719255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5720255736Sdavidch
5721255736Sdavidch		else if (cmd == ECORE_F_CMD_HW_RESET)
5722255736Sdavidch			next_state = ECORE_F_STATE_RESET;
5723255736Sdavidch
5724255736Sdavidch		break;
5725255736Sdavidch	case ECORE_F_STATE_STARTED:
5726255736Sdavidch		if (cmd == ECORE_F_CMD_STOP)
5727255736Sdavidch			next_state = ECORE_F_STATE_INITIALIZED;
5728255736Sdavidch		/* afex ramrods can be sent only in started mode, and only
5729255736Sdavidch		 * if not pending for function_stop ramrod completion
5730255736Sdavidch		 * for these events - next state remained STARTED.
5731255736Sdavidch		 */
5732255736Sdavidch		else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
5733255736Sdavidch			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5734255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5735255736Sdavidch
5736255736Sdavidch		else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
5737255736Sdavidch			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5738255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5739255736Sdavidch
5740255736Sdavidch		/* Switch_update ramrod can be sent in either started or
5741255736Sdavidch		 * tx_stopped state, and it doesn't change the state.
5742255736Sdavidch		 */
5743255736Sdavidch		else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5744255736Sdavidch			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5745255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5746255736Sdavidch
5747255736Sdavidch		else if (cmd == ECORE_F_CMD_TX_STOP)
5748255736Sdavidch			next_state = ECORE_F_STATE_TX_STOPPED;
5749255736Sdavidch
5750255736Sdavidch		break;
5751255736Sdavidch	case ECORE_F_STATE_TX_STOPPED:
5752255736Sdavidch		if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5753255736Sdavidch		    (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5754255736Sdavidch			next_state = ECORE_F_STATE_TX_STOPPED;
5755255736Sdavidch
5756255736Sdavidch		else if (cmd == ECORE_F_CMD_TX_START)
5757255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5758255736Sdavidch
5759255736Sdavidch		break;
5760255736Sdavidch	default:
5761255736Sdavidch		ECORE_ERR("Unknown state: %d\n", state);
5762255736Sdavidch	}
5763255736Sdavidch
5764255736Sdavidch	/* Transition is assured */
5765255736Sdavidch	if (next_state != ECORE_F_STATE_MAX) {
5766255736Sdavidch		ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n",
5767255736Sdavidch			  state, cmd, next_state);
5768255736Sdavidch		o->next_state = next_state;
5769255736Sdavidch		return ECORE_SUCCESS;
5770255736Sdavidch	}
5771255736Sdavidch
5772255736Sdavidch	ECORE_MSG(sc, "Bad function state transition request: %d %d\n",
5773255736Sdavidch		  state, cmd);
5774255736Sdavidch
5775255736Sdavidch	return ECORE_INVAL;
5776255736Sdavidch}
5777255736Sdavidch
5778255736Sdavidch/**
5779255736Sdavidch * ecore_func_init_func - performs HW init at function stage
5780255736Sdavidch *
5781255736Sdavidch * @sc:		device handle
5782255736Sdavidch * @drv:
5783255736Sdavidch *
5784255736Sdavidch * Init HW when the current phase is
5785255736Sdavidch * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5786255736Sdavidch * HW blocks.
5787255736Sdavidch */
5788255736Sdavidchstatic inline int ecore_func_init_func(struct bxe_softc *sc,
5789255736Sdavidch				       const struct ecore_func_sp_drv_ops *drv)
5790255736Sdavidch{
5791255736Sdavidch	return drv->init_hw_func(sc);
5792255736Sdavidch}
5793255736Sdavidch
5794255736Sdavidch/**
5795255736Sdavidch * ecore_func_init_port - performs HW init at port stage
5796255736Sdavidch *
5797255736Sdavidch * @sc:		device handle
5798255736Sdavidch * @drv:
5799255736Sdavidch *
5800255736Sdavidch * Init HW when the current phase is
5801255736Sdavidch * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5802255736Sdavidch * FUNCTION-only HW blocks.
5803255736Sdavidch *
5804255736Sdavidch */
5805255736Sdavidchstatic inline int ecore_func_init_port(struct bxe_softc *sc,
5806255736Sdavidch				       const struct ecore_func_sp_drv_ops *drv)
5807255736Sdavidch{
5808255736Sdavidch	int rc = drv->init_hw_port(sc);
5809255736Sdavidch	if (rc)
5810255736Sdavidch		return rc;
5811255736Sdavidch
5812255736Sdavidch	return ecore_func_init_func(sc, drv);
5813255736Sdavidch}
5814255736Sdavidch
5815255736Sdavidch/**
5816255736Sdavidch * ecore_func_init_cmn_chip - performs HW init at chip-common stage
5817255736Sdavidch *
5818255736Sdavidch * @sc:		device handle
5819255736Sdavidch * @drv:
5820255736Sdavidch *
5821255736Sdavidch * Init HW when the current phase is
5822255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5823255736Sdavidch * PORT-only and FUNCTION-only HW blocks.
5824255736Sdavidch */
5825255736Sdavidchstatic inline int ecore_func_init_cmn_chip(struct bxe_softc *sc,
5826255736Sdavidch					const struct ecore_func_sp_drv_ops *drv)
5827255736Sdavidch{
5828255736Sdavidch	int rc = drv->init_hw_cmn_chip(sc);
5829255736Sdavidch	if (rc)
5830255736Sdavidch		return rc;
5831255736Sdavidch
5832255736Sdavidch	return ecore_func_init_port(sc, drv);
5833255736Sdavidch}
5834255736Sdavidch
5835255736Sdavidch/**
5836255736Sdavidch * ecore_func_init_cmn - performs HW init at common stage
5837255736Sdavidch *
5838255736Sdavidch * @sc:		device handle
5839255736Sdavidch * @drv:
5840255736Sdavidch *
5841255736Sdavidch * Init HW when the current phase is
5842255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5843255736Sdavidch * PORT-only and FUNCTION-only HW blocks.
5844255736Sdavidch */
5845255736Sdavidchstatic inline int ecore_func_init_cmn(struct bxe_softc *sc,
5846255736Sdavidch				      const struct ecore_func_sp_drv_ops *drv)
5847255736Sdavidch{
5848255736Sdavidch	int rc = drv->init_hw_cmn(sc);
5849255736Sdavidch	if (rc)
5850255736Sdavidch		return rc;
5851255736Sdavidch
5852255736Sdavidch	return ecore_func_init_port(sc, drv);
5853255736Sdavidch}
5854255736Sdavidch
5855255736Sdavidchstatic int ecore_func_hw_init(struct bxe_softc *sc,
5856255736Sdavidch			      struct ecore_func_state_params *params)
5857255736Sdavidch{
5858255736Sdavidch	uint32_t load_code = params->params.hw_init.load_phase;
5859255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
5860255736Sdavidch	const struct ecore_func_sp_drv_ops *drv = o->drv;
5861255736Sdavidch	int rc = 0;
5862255736Sdavidch
5863255736Sdavidch	ECORE_MSG(sc, "function %d  load_code %x\n",
5864255736Sdavidch		  ECORE_ABS_FUNC_ID(sc), load_code);
5865255736Sdavidch
5866255736Sdavidch	/* Prepare buffers for unzipping the FW */
5867255736Sdavidch	rc = drv->gunzip_init(sc);
5868255736Sdavidch	if (rc)
5869255736Sdavidch		return rc;
5870255736Sdavidch
5871255736Sdavidch	/* Prepare FW */
5872255736Sdavidch	rc = drv->init_fw(sc);
5873255736Sdavidch	if (rc) {
5874255736Sdavidch		ECORE_ERR("Error loading firmware\n");
5875255736Sdavidch		goto init_err;
5876255736Sdavidch	}
5877255736Sdavidch
5878255736Sdavidch	/* Handle the beginning of COMMON_XXX pases separately... */
5879255736Sdavidch	switch (load_code) {
5880255736Sdavidch	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5881255736Sdavidch		rc = ecore_func_init_cmn_chip(sc, drv);
5882255736Sdavidch		if (rc)
5883255736Sdavidch			goto init_err;
5884255736Sdavidch
5885255736Sdavidch		break;
5886255736Sdavidch	case FW_MSG_CODE_DRV_LOAD_COMMON:
5887255736Sdavidch		rc = ecore_func_init_cmn(sc, drv);
5888255736Sdavidch		if (rc)
5889255736Sdavidch			goto init_err;
5890255736Sdavidch
5891255736Sdavidch		break;
5892255736Sdavidch	case FW_MSG_CODE_DRV_LOAD_PORT:
5893255736Sdavidch		rc = ecore_func_init_port(sc, drv);
5894255736Sdavidch		if (rc)
5895255736Sdavidch			goto init_err;
5896255736Sdavidch
5897255736Sdavidch		break;
5898255736Sdavidch	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5899255736Sdavidch		rc = ecore_func_init_func(sc, drv);
5900255736Sdavidch		if (rc)
5901255736Sdavidch			goto init_err;
5902255736Sdavidch
5903255736Sdavidch		break;
5904255736Sdavidch	default:
5905255736Sdavidch		ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5906255736Sdavidch		rc = ECORE_INVAL;
5907255736Sdavidch	}
5908255736Sdavidch
5909255736Sdavidchinit_err:
5910255736Sdavidch	drv->gunzip_end(sc);
5911255736Sdavidch
5912255736Sdavidch	/* In case of success, complete the command immediately: no ramrods
5913255736Sdavidch	 * have been sent.
5914255736Sdavidch	 */
5915255736Sdavidch	if (!rc)
5916255736Sdavidch		o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
5917255736Sdavidch
5918255736Sdavidch	return rc;
5919255736Sdavidch}
5920255736Sdavidch
5921255736Sdavidch/**
5922255736Sdavidch * ecore_func_reset_func - reset HW at function stage
5923255736Sdavidch *
5924255736Sdavidch * @sc:		device handle
5925255736Sdavidch * @drv:
5926255736Sdavidch *
5927255736Sdavidch * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5928255736Sdavidch * FUNCTION-only HW blocks.
5929255736Sdavidch */
5930255736Sdavidchstatic inline void ecore_func_reset_func(struct bxe_softc *sc,
5931255736Sdavidch					const struct ecore_func_sp_drv_ops *drv)
5932255736Sdavidch{
5933255736Sdavidch	drv->reset_hw_func(sc);
5934255736Sdavidch}
5935255736Sdavidch
5936255736Sdavidch/**
5937255736Sdavidch * ecore_func_reset_port - reser HW at port stage
5938255736Sdavidch *
5939255736Sdavidch * @sc:		device handle
5940255736Sdavidch * @drv:
5941255736Sdavidch *
5942255736Sdavidch * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5943255736Sdavidch * FUNCTION-only and PORT-only HW blocks.
5944255736Sdavidch *
5945255736Sdavidch *                 !!!IMPORTANT!!!
5946255736Sdavidch *
5947255736Sdavidch * It's important to call reset_port before reset_func() as the last thing
5948255736Sdavidch * reset_func does is pf_disable() thus disabling PGLUE_B, which
5949255736Sdavidch * makes impossible any DMAE transactions.
5950255736Sdavidch */
5951255736Sdavidchstatic inline void ecore_func_reset_port(struct bxe_softc *sc,
5952255736Sdavidch					const struct ecore_func_sp_drv_ops *drv)
5953255736Sdavidch{
5954255736Sdavidch	drv->reset_hw_port(sc);
5955255736Sdavidch	ecore_func_reset_func(sc, drv);
5956255736Sdavidch}
5957255736Sdavidch
5958255736Sdavidch/**
5959255736Sdavidch * ecore_func_reset_cmn - reser HW at common stage
5960255736Sdavidch *
5961255736Sdavidch * @sc:		device handle
5962255736Sdavidch * @drv:
5963255736Sdavidch *
5964255736Sdavidch * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5965255736Sdavidch * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5966255736Sdavidch * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5967255736Sdavidch */
5968255736Sdavidchstatic inline void ecore_func_reset_cmn(struct bxe_softc *sc,
5969255736Sdavidch					const struct ecore_func_sp_drv_ops *drv)
5970255736Sdavidch{
5971255736Sdavidch	ecore_func_reset_port(sc, drv);
5972255736Sdavidch	drv->reset_hw_cmn(sc);
5973255736Sdavidch}
5974255736Sdavidch
5975255736Sdavidchstatic inline int ecore_func_hw_reset(struct bxe_softc *sc,
5976255736Sdavidch				      struct ecore_func_state_params *params)
5977255736Sdavidch{
5978255736Sdavidch	uint32_t reset_phase = params->params.hw_reset.reset_phase;
5979255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
5980255736Sdavidch	const struct ecore_func_sp_drv_ops *drv = o->drv;
5981255736Sdavidch
5982255736Sdavidch	ECORE_MSG(sc, "function %d  reset_phase %x\n", ECORE_ABS_FUNC_ID(sc),
5983255736Sdavidch		  reset_phase);
5984255736Sdavidch
5985255736Sdavidch	switch (reset_phase) {
5986255736Sdavidch	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5987255736Sdavidch		ecore_func_reset_cmn(sc, drv);
5988255736Sdavidch		break;
5989255736Sdavidch	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5990255736Sdavidch		ecore_func_reset_port(sc, drv);
5991255736Sdavidch		break;
5992255736Sdavidch	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5993255736Sdavidch		ecore_func_reset_func(sc, drv);
5994255736Sdavidch		break;
5995255736Sdavidch	default:
5996255736Sdavidch		ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
5997255736Sdavidch			  reset_phase);
5998255736Sdavidch		break;
5999255736Sdavidch	}
6000255736Sdavidch
6001255736Sdavidch	/* Complete the command immediately: no ramrods have been sent. */
6002255736Sdavidch	o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
6003255736Sdavidch
6004255736Sdavidch	return ECORE_SUCCESS;
6005255736Sdavidch}
6006255736Sdavidch
6007255736Sdavidchstatic inline int ecore_func_send_start(struct bxe_softc *sc,
6008255736Sdavidch					struct ecore_func_state_params *params)
6009255736Sdavidch{
6010255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6011255736Sdavidch	struct function_start_data *rdata =
6012255736Sdavidch		(struct function_start_data *)o->rdata;
6013255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6014255736Sdavidch	struct ecore_func_start_params *start_params = &params->params.start;
6015255736Sdavidch
6016255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6017255736Sdavidch
6018255736Sdavidch	/* Fill the ramrod data with provided parameters */
6019255736Sdavidch	rdata->function_mode	= (uint8_t)start_params->mf_mode;
6020255736Sdavidch	rdata->sd_vlan_tag	= ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
6021255736Sdavidch	rdata->path_id		= ECORE_PATH_ID(sc);
6022255736Sdavidch	rdata->network_cos_mode	= start_params->network_cos_mode;
6023255736Sdavidch	rdata->gre_tunnel_mode	= start_params->gre_tunnel_mode;
6024255736Sdavidch	rdata->gre_tunnel_rss	= start_params->gre_tunnel_rss;
6025255736Sdavidch
6026255736Sdavidch	/*
6027255736Sdavidch	 *  No need for an explicit memory barrier here as long we would
6028255736Sdavidch	 *  need to ensure the ordering of writing to the SPQ element
6029255736Sdavidch	 *  and updating of the SPQ producer which involves a memory
6030255736Sdavidch	 *  read and we will have to put a full memory barrier there
6031255736Sdavidch	 *  (inside ecore_sp_post()).
6032255736Sdavidch	 */
6033255736Sdavidch
6034255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6035255736Sdavidch			     data_mapping, NONE_CONNECTION_TYPE);
6036255736Sdavidch}
6037255736Sdavidch
6038255736Sdavidchstatic inline int ecore_func_send_switch_update(struct bxe_softc *sc,
6039255736Sdavidch					struct ecore_func_state_params *params)
6040255736Sdavidch{
6041255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6042255736Sdavidch	struct function_update_data *rdata =
6043255736Sdavidch		(struct function_update_data *)o->rdata;
6044255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6045255736Sdavidch	struct ecore_func_switch_update_params *switch_update_params =
6046255736Sdavidch		&params->params.switch_update;
6047255736Sdavidch
6048255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6049255736Sdavidch
6050255736Sdavidch	/* Fill the ramrod data with provided parameters */
6051255736Sdavidch	rdata->tx_switch_suspend_change_flg = 1;
6052255736Sdavidch	rdata->tx_switch_suspend = switch_update_params->suspend;
6053255736Sdavidch	rdata->echo = SWITCH_UPDATE;
6054255736Sdavidch
6055255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6056255736Sdavidch			     data_mapping, NONE_CONNECTION_TYPE);
6057255736Sdavidch}
6058255736Sdavidch
6059255736Sdavidchstatic inline int ecore_func_send_afex_update(struct bxe_softc *sc,
6060255736Sdavidch					 struct ecore_func_state_params *params)
6061255736Sdavidch{
6062255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6063255736Sdavidch	struct function_update_data *rdata =
6064255736Sdavidch		(struct function_update_data *)o->afex_rdata;
6065255736Sdavidch	ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
6066255736Sdavidch	struct ecore_func_afex_update_params *afex_update_params =
6067255736Sdavidch		&params->params.afex_update;
6068255736Sdavidch
6069255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6070255736Sdavidch
6071255736Sdavidch	/* Fill the ramrod data with provided parameters */
6072255736Sdavidch	rdata->vif_id_change_flg = 1;
6073255736Sdavidch	rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
6074255736Sdavidch	rdata->afex_default_vlan_change_flg = 1;
6075255736Sdavidch	rdata->afex_default_vlan =
6076255736Sdavidch		ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
6077255736Sdavidch	rdata->allowed_priorities_change_flg = 1;
6078255736Sdavidch	rdata->allowed_priorities = afex_update_params->allowed_priorities;
6079255736Sdavidch	rdata->echo = AFEX_UPDATE;
6080255736Sdavidch
6081255736Sdavidch	/*  No need for an explicit memory barrier here as long we would
6082255736Sdavidch	 *  need to ensure the ordering of writing to the SPQ element
6083255736Sdavidch	 *  and updating of the SPQ producer which involves a memory
6084255736Sdavidch	 *  read and we will have to put a full memory barrier there
6085255736Sdavidch	 *  (inside ecore_sp_post()).
6086255736Sdavidch	 */
6087255736Sdavidch	ECORE_MSG(sc,
6088255736Sdavidch		  "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6089255736Sdavidch		  rdata->vif_id,
6090255736Sdavidch		  rdata->afex_default_vlan, rdata->allowed_priorities);
6091255736Sdavidch
6092255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6093255736Sdavidch			     data_mapping, NONE_CONNECTION_TYPE);
6094255736Sdavidch}
6095255736Sdavidch
6096255736Sdavidchstatic
6097255736Sdavidchinline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
6098255736Sdavidch					 struct ecore_func_state_params *params)
6099255736Sdavidch{
6100255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6101255736Sdavidch	struct afex_vif_list_ramrod_data *rdata =
6102255736Sdavidch		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
6103255736Sdavidch	struct ecore_func_afex_viflists_params *afex_vif_params =
6104255736Sdavidch		&params->params.afex_viflists;
6105255736Sdavidch	uint64_t *p_rdata = (uint64_t *)rdata;
6106255736Sdavidch
6107255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6108255736Sdavidch
6109255736Sdavidch	/* Fill the ramrod data with provided parameters */
6110255736Sdavidch	rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
6111255736Sdavidch	rdata->func_bit_map          = afex_vif_params->func_bit_map;
6112255736Sdavidch	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6113255736Sdavidch	rdata->func_to_clear         = afex_vif_params->func_to_clear;
6114255736Sdavidch
6115255736Sdavidch	/* send in echo type of sub command */
6116255736Sdavidch	rdata->echo = afex_vif_params->afex_vif_list_command;
6117255736Sdavidch
6118255736Sdavidch	/*  No need for an explicit memory barrier here as long we would
6119255736Sdavidch	 *  need to ensure the ordering of writing to the SPQ element
6120255736Sdavidch	 *  and updating of the SPQ producer which involves a memory
6121255736Sdavidch	 *  read and we will have to put a full memory barrier there
6122255736Sdavidch	 *  (inside ecore_sp_post()).
6123255736Sdavidch	 */
6124255736Sdavidch
6125255736Sdavidch	ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6126255736Sdavidch		  rdata->afex_vif_list_command, rdata->vif_list_index,
6127255736Sdavidch		  rdata->func_bit_map, rdata->func_to_clear);
6128255736Sdavidch
6129255736Sdavidch	/* this ramrod sends data directly and not through DMA mapping */
6130255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6131255736Sdavidch			     *p_rdata, NONE_CONNECTION_TYPE);
6132255736Sdavidch}
6133255736Sdavidch
6134255736Sdavidchstatic inline int ecore_func_send_stop(struct bxe_softc *sc,
6135255736Sdavidch				       struct ecore_func_state_params *params)
6136255736Sdavidch{
6137255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
6138255736Sdavidch			     NONE_CONNECTION_TYPE);
6139255736Sdavidch}
6140255736Sdavidch
6141255736Sdavidchstatic inline int ecore_func_send_tx_stop(struct bxe_softc *sc,
6142255736Sdavidch				       struct ecore_func_state_params *params)
6143255736Sdavidch{
6144255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
6145255736Sdavidch			     NONE_CONNECTION_TYPE);
6146255736Sdavidch}
6147255736Sdavidchstatic inline int ecore_func_send_tx_start(struct bxe_softc *sc,
6148255736Sdavidch				       struct ecore_func_state_params *params)
6149255736Sdavidch{
6150255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6151255736Sdavidch	struct flow_control_configuration *rdata =
6152255736Sdavidch		(struct flow_control_configuration *)o->rdata;
6153255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6154255736Sdavidch	struct ecore_func_tx_start_params *tx_start_params =
6155255736Sdavidch		&params->params.tx_start;
6156255736Sdavidch	int i;
6157255736Sdavidch
6158255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6159255736Sdavidch
6160255736Sdavidch	rdata->dcb_enabled = tx_start_params->dcb_enabled;
6161255736Sdavidch	rdata->dcb_version = tx_start_params->dcb_version;
6162255736Sdavidch	rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
6163255736Sdavidch
6164255736Sdavidch	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6165255736Sdavidch		rdata->traffic_type_to_priority_cos[i] =
6166255736Sdavidch			tx_start_params->traffic_type_to_priority_cos[i];
6167255736Sdavidch
6168255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6169255736Sdavidch			     data_mapping, NONE_CONNECTION_TYPE);
6170255736Sdavidch}
6171255736Sdavidch
6172255736Sdavidchstatic int ecore_func_send_cmd(struct bxe_softc *sc,
6173255736Sdavidch			       struct ecore_func_state_params *params)
6174255736Sdavidch{
6175255736Sdavidch	switch (params->cmd) {
6176255736Sdavidch	case ECORE_F_CMD_HW_INIT:
6177255736Sdavidch		return ecore_func_hw_init(sc, params);
6178255736Sdavidch	case ECORE_F_CMD_START:
6179255736Sdavidch		return ecore_func_send_start(sc, params);
6180255736Sdavidch	case ECORE_F_CMD_STOP:
6181255736Sdavidch		return ecore_func_send_stop(sc, params);
6182255736Sdavidch	case ECORE_F_CMD_HW_RESET:
6183255736Sdavidch		return ecore_func_hw_reset(sc, params);
6184255736Sdavidch	case ECORE_F_CMD_AFEX_UPDATE:
6185255736Sdavidch		return ecore_func_send_afex_update(sc, params);
6186255736Sdavidch	case ECORE_F_CMD_AFEX_VIFLISTS:
6187255736Sdavidch		return ecore_func_send_afex_viflists(sc, params);
6188255736Sdavidch	case ECORE_F_CMD_TX_STOP:
6189255736Sdavidch		return ecore_func_send_tx_stop(sc, params);
6190255736Sdavidch	case ECORE_F_CMD_TX_START:
6191255736Sdavidch		return ecore_func_send_tx_start(sc, params);
6192255736Sdavidch	case ECORE_F_CMD_SWITCH_UPDATE:
6193255736Sdavidch		return ecore_func_send_switch_update(sc, params);
6194255736Sdavidch	default:
6195255736Sdavidch		ECORE_ERR("Unknown command: %d\n", params->cmd);
6196255736Sdavidch		return ECORE_INVAL;
6197255736Sdavidch	}
6198255736Sdavidch}
6199255736Sdavidch
6200255736Sdavidchvoid ecore_init_func_obj(struct bxe_softc *sc,
6201255736Sdavidch			 struct ecore_func_sp_obj *obj,
6202255736Sdavidch			 void *rdata, ecore_dma_addr_t rdata_mapping,
6203255736Sdavidch			 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
6204255736Sdavidch			 struct ecore_func_sp_drv_ops *drv_iface)
6205255736Sdavidch{
6206255736Sdavidch	ECORE_MEMSET(obj, 0, sizeof(*obj));
6207255736Sdavidch
6208255736Sdavidch	ECORE_MUTEX_INIT(&obj->one_pending_mutex);
6209255736Sdavidch
6210255736Sdavidch	obj->rdata = rdata;
6211255736Sdavidch	obj->rdata_mapping = rdata_mapping;
6212255736Sdavidch	obj->afex_rdata = afex_rdata;
6213255736Sdavidch	obj->afex_rdata_mapping = afex_rdata_mapping;
6214255736Sdavidch	obj->send_cmd = ecore_func_send_cmd;
6215255736Sdavidch	obj->check_transition = ecore_func_chk_transition;
6216255736Sdavidch	obj->complete_cmd = ecore_func_comp_cmd;
6217255736Sdavidch	obj->wait_comp = ecore_func_wait_comp;
6218255736Sdavidch	obj->drv = drv_iface;
6219255736Sdavidch}
6220255736Sdavidch
6221255736Sdavidch/**
6222255736Sdavidch * ecore_func_state_change - perform Function state change transition
6223255736Sdavidch *
6224255736Sdavidch * @sc:		device handle
6225255736Sdavidch * @params:	parameters to perform the transaction
6226255736Sdavidch *
6227255736Sdavidch * returns 0 in case of successfully completed transition,
6228255736Sdavidch *         negative error code in case of failure, positive
6229255736Sdavidch *         (EBUSY) value if there is a completion to that is
6230255736Sdavidch *         still pending (possible only if RAMROD_COMP_WAIT is
6231255736Sdavidch *         not set in params->ramrod_flags for asynchronous
6232255736Sdavidch *         commands).
6233255736Sdavidch */
6234255736Sdavidchint ecore_func_state_change(struct bxe_softc *sc,
6235255736Sdavidch			    struct ecore_func_state_params *params)
6236255736Sdavidch{
6237255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6238255736Sdavidch	int rc, cnt = 300;
6239255736Sdavidch	enum ecore_func_cmd cmd = params->cmd;
6240255736Sdavidch	unsigned long *pending = &o->pending;
6241255736Sdavidch
6242255736Sdavidch	ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6243255736Sdavidch
6244255736Sdavidch	/* Check that the requested transition is legal */
6245255736Sdavidch	rc = o->check_transition(sc, o, params);
6246255736Sdavidch	if ((rc == ECORE_BUSY) &&
6247255736Sdavidch	    (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
6248255736Sdavidch		while ((rc == ECORE_BUSY) && (--cnt > 0)) {
6249255736Sdavidch			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6250255736Sdavidch			ECORE_MSLEEP(10);
6251255736Sdavidch			ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6252255736Sdavidch			rc = o->check_transition(sc, o, params);
6253255736Sdavidch		}
6254255736Sdavidch		if (rc == ECORE_BUSY) {
6255255736Sdavidch			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6256255736Sdavidch			ECORE_ERR("timeout waiting for previous ramrod completion\n");
6257255736Sdavidch			return rc;
6258255736Sdavidch		}
6259255736Sdavidch	} else if (rc) {
6260255736Sdavidch		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6261255736Sdavidch		return rc;
6262255736Sdavidch	}
6263255736Sdavidch
6264255736Sdavidch	/* Set "pending" bit */
6265255736Sdavidch	ECORE_SET_BIT(cmd, pending);
6266255736Sdavidch
6267255736Sdavidch	/* Don't send a command if only driver cleanup was requested */
6268255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6269255736Sdavidch		ecore_func_state_change_comp(sc, o, cmd);
6270255736Sdavidch		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6271255736Sdavidch	} else {
6272255736Sdavidch		/* Send a ramrod */
6273255736Sdavidch		rc = o->send_cmd(sc, params);
6274255736Sdavidch
6275255736Sdavidch		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6276255736Sdavidch
6277255736Sdavidch		if (rc) {
6278255736Sdavidch			o->next_state = ECORE_F_STATE_MAX;
6279255736Sdavidch			ECORE_CLEAR_BIT(cmd, pending);
6280255736Sdavidch			ECORE_SMP_MB_AFTER_CLEAR_BIT();
6281255736Sdavidch			return rc;
6282255736Sdavidch		}
6283255736Sdavidch
6284255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6285255736Sdavidch			rc = o->wait_comp(sc, o, cmd);
6286255736Sdavidch			if (rc)
6287255736Sdavidch				return rc;
6288255736Sdavidch
6289255736Sdavidch			return ECORE_SUCCESS;
6290255736Sdavidch		}
6291255736Sdavidch	}
6292255736Sdavidch
6293255736Sdavidch	return ECORE_RET_PENDING(cmd, pending);
6294255736Sdavidch}
6295