1255736Sdavidch/*-
2296071Sdavidcs * Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
3255736Sdavidch *
4255736Sdavidch * Redistribution and use in source and binary forms, with or without
5255736Sdavidch * modification, are permitted provided that the following conditions
6255736Sdavidch * are met:
7255736Sdavidch *
8255736Sdavidch * 1. Redistributions of source code must retain the above copyright
9255736Sdavidch *    notice, this list of conditions and the following disclaimer.
10255736Sdavidch * 2. Redistributions in binary form must reproduce the above copyright
11255736Sdavidch *    notice, this list of conditions and the following disclaimer in the
12255736Sdavidch *    documentation and/or other materials provided with the distribution.
13255736Sdavidch *
14296071Sdavidcs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15255736Sdavidch * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16255736Sdavidch * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17255736Sdavidch * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18255736Sdavidch * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19255736Sdavidch * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20255736Sdavidch * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21255736Sdavidch * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22255736Sdavidch * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23255736Sdavidch * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24255736Sdavidch * THE POSSIBILITY OF SUCH DAMAGE.
25255736Sdavidch */
26255736Sdavidch
27255736Sdavidch#include <sys/cdefs.h>
28255736Sdavidch__FBSDID("$FreeBSD$");
29255736Sdavidch
30255736Sdavidch#include "bxe.h"
31255736Sdavidch#include "ecore_init.h"
32255736Sdavidch
33296071Sdavidcs
34296071Sdavidcs
35296071Sdavidcs
36255736Sdavidch/**** Exe Queue interfaces ****/
37255736Sdavidch
38255736Sdavidch/**
39255736Sdavidch * ecore_exe_queue_init - init the Exe Queue object
40255736Sdavidch *
41255736Sdavidch * @o:		pointer to the object
42255736Sdavidch * @exe_len:	length
43255736Sdavidch * @owner:	pointer to the owner
44255736Sdavidch * @validate:	validate function pointer
45255736Sdavidch * @optimize:	optimize function pointer
46255736Sdavidch * @exec:	execute function pointer
47255736Sdavidch * @get:	get function pointer
48255736Sdavidch */
49255736Sdavidchstatic inline void ecore_exe_queue_init(struct bxe_softc *sc,
50255736Sdavidch					struct ecore_exe_queue_obj *o,
51255736Sdavidch					int exe_len,
52255736Sdavidch					union ecore_qable_obj *owner,
53255736Sdavidch					exe_q_validate validate,
54255736Sdavidch					exe_q_remove remove,
55255736Sdavidch					exe_q_optimize optimize,
56255736Sdavidch					exe_q_execute exec,
57255736Sdavidch					exe_q_get get)
58255736Sdavidch{
59255736Sdavidch	ECORE_MEMSET(o, 0, sizeof(*o));
60255736Sdavidch
61255736Sdavidch	ECORE_LIST_INIT(&o->exe_queue);
62255736Sdavidch	ECORE_LIST_INIT(&o->pending_comp);
63255736Sdavidch
64255736Sdavidch	ECORE_SPIN_LOCK_INIT(&o->lock, sc);
65255736Sdavidch
66255736Sdavidch	o->exe_chunk_len = exe_len;
67255736Sdavidch	o->owner         = owner;
68255736Sdavidch
69255736Sdavidch	/* Owner specific callbacks */
70255736Sdavidch	o->validate      = validate;
71255736Sdavidch	o->remove        = remove;
72255736Sdavidch	o->optimize      = optimize;
73255736Sdavidch	o->execute       = exec;
74255736Sdavidch	o->get           = get;
75255736Sdavidch
76255736Sdavidch	ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
77255736Sdavidch		  exe_len);
78255736Sdavidch}
79255736Sdavidch
80255736Sdavidchstatic inline void ecore_exe_queue_free_elem(struct bxe_softc *sc,
81255736Sdavidch					     struct ecore_exeq_elem *elem)
82255736Sdavidch{
83255736Sdavidch	ECORE_MSG(sc, "Deleting an exe_queue element\n");
84255736Sdavidch	ECORE_FREE(sc, elem, sizeof(*elem));
85255736Sdavidch}
86255736Sdavidch
87255736Sdavidchstatic inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
88255736Sdavidch{
89255736Sdavidch	struct ecore_exeq_elem *elem;
90255736Sdavidch	int cnt = 0;
91255736Sdavidch
92255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->lock);
93255736Sdavidch
94255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
95255736Sdavidch				  struct ecore_exeq_elem)
96255736Sdavidch		cnt++;
97255736Sdavidch
98255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->lock);
99255736Sdavidch
100255736Sdavidch	return cnt;
101255736Sdavidch}
102255736Sdavidch
103255736Sdavidch/**
104255736Sdavidch * ecore_exe_queue_add - add a new element to the execution queue
105255736Sdavidch *
106255736Sdavidch * @sc:		driver handle
107255736Sdavidch * @o:		queue
108255736Sdavidch * @cmd:	new command to add
109255736Sdavidch * @restore:	true - do not optimize the command
110255736Sdavidch *
111255736Sdavidch * If the element is optimized or is illegal, frees it.
112255736Sdavidch */
113255736Sdavidchstatic inline int ecore_exe_queue_add(struct bxe_softc *sc,
114255736Sdavidch				      struct ecore_exe_queue_obj *o,
115255736Sdavidch				      struct ecore_exeq_elem *elem,
116255736Sdavidch				      bool restore)
117255736Sdavidch{
118255736Sdavidch	int rc;
119255736Sdavidch
120255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->lock);
121255736Sdavidch
122255736Sdavidch	if (!restore) {
123255736Sdavidch		/* Try to cancel this element queue */
124255736Sdavidch		rc = o->optimize(sc, o->owner, elem);
125255736Sdavidch		if (rc)
126255736Sdavidch			goto free_and_exit;
127255736Sdavidch
128255736Sdavidch		/* Check if this request is ok */
129255736Sdavidch		rc = o->validate(sc, o->owner, elem);
130255736Sdavidch		if (rc) {
131255736Sdavidch			ECORE_MSG(sc, "Preamble failed: %d\n", rc);
132255736Sdavidch			goto free_and_exit;
133255736Sdavidch		}
134255736Sdavidch	}
135255736Sdavidch
136255736Sdavidch	/* If so, add it to the execution queue */
137255736Sdavidch	ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
138255736Sdavidch
139255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->lock);
140255736Sdavidch
141255736Sdavidch	return ECORE_SUCCESS;
142255736Sdavidch
143255736Sdavidchfree_and_exit:
144255736Sdavidch	ecore_exe_queue_free_elem(sc, elem);
145255736Sdavidch
146255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->lock);
147255736Sdavidch
148255736Sdavidch	return rc;
149255736Sdavidch}
150255736Sdavidch
151255736Sdavidchstatic inline void __ecore_exe_queue_reset_pending(
152255736Sdavidch	struct bxe_softc *sc,
153255736Sdavidch	struct ecore_exe_queue_obj *o)
154255736Sdavidch{
155255736Sdavidch	struct ecore_exeq_elem *elem;
156255736Sdavidch
157255736Sdavidch	while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
158255736Sdavidch		elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
159255736Sdavidch					      struct ecore_exeq_elem,
160255736Sdavidch					      link);
161255736Sdavidch
162255736Sdavidch		ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
163255736Sdavidch		ecore_exe_queue_free_elem(sc, elem);
164255736Sdavidch	}
165255736Sdavidch}
166255736Sdavidch
167255736Sdavidch/**
168255736Sdavidch * ecore_exe_queue_step - execute one execution chunk atomically
169255736Sdavidch *
170255736Sdavidch * @sc:			driver handle
171255736Sdavidch * @o:			queue
172255736Sdavidch * @ramrod_flags:	flags
173255736Sdavidch *
174255736Sdavidch * (Should be called while holding the exe_queue->lock).
175255736Sdavidch */
176255736Sdavidchstatic inline int ecore_exe_queue_step(struct bxe_softc *sc,
177255736Sdavidch				       struct ecore_exe_queue_obj *o,
178255736Sdavidch				       unsigned long *ramrod_flags)
179255736Sdavidch{
180255736Sdavidch	struct ecore_exeq_elem *elem, spacer;
181255736Sdavidch	int cur_len = 0, rc;
182255736Sdavidch
183255736Sdavidch	ECORE_MEMSET(&spacer, 0, sizeof(spacer));
184255736Sdavidch
185255736Sdavidch	/* Next step should not be performed until the current is finished,
186255736Sdavidch	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
187255736Sdavidch	 * properly clear object internals without sending any command to the FW
188255736Sdavidch	 * which also implies there won't be any completion to clear the
189255736Sdavidch	 * 'pending' list.
190255736Sdavidch	 */
191255736Sdavidch	if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
192255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
193255736Sdavidch			ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
194255736Sdavidch			__ecore_exe_queue_reset_pending(sc, o);
195255736Sdavidch		} else {
196255736Sdavidch			return ECORE_PENDING;
197255736Sdavidch		}
198255736Sdavidch	}
199255736Sdavidch
200255736Sdavidch	/* Run through the pending commands list and create a next
201255736Sdavidch	 * execution chunk.
202255736Sdavidch	 */
203255736Sdavidch	while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
204255736Sdavidch		elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
205255736Sdavidch					      struct ecore_exeq_elem,
206255736Sdavidch					      link);
207255736Sdavidch		ECORE_DBG_BREAK_IF(!elem->cmd_len);
208255736Sdavidch
209255736Sdavidch		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
210255736Sdavidch			cur_len += elem->cmd_len;
211255736Sdavidch			/* Prevent from both lists being empty when moving an
212255736Sdavidch			 * element. This will allow the call of
213255736Sdavidch			 * ecore_exe_queue_empty() without locking.
214255736Sdavidch			 */
215255736Sdavidch			ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
216255736Sdavidch			mb();
217255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
218255736Sdavidch			ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
219255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
220255736Sdavidch		} else
221255736Sdavidch			break;
222255736Sdavidch	}
223255736Sdavidch
224255736Sdavidch	/* Sanity check */
225255736Sdavidch	if (!cur_len)
226255736Sdavidch		return ECORE_SUCCESS;
227255736Sdavidch
228255736Sdavidch	rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
229255736Sdavidch	if (rc < 0)
230255736Sdavidch		/* In case of an error return the commands back to the queue
231255736Sdavidch		 *  and reset the pending_comp.
232255736Sdavidch		 */
233255736Sdavidch		ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
234255736Sdavidch	else if (!rc)
235255736Sdavidch		/* If zero is returned, means there are no outstanding pending
236255736Sdavidch		 * completions and we may dismiss the pending list.
237255736Sdavidch		 */
238255736Sdavidch		__ecore_exe_queue_reset_pending(sc, o);
239255736Sdavidch
240255736Sdavidch	return rc;
241255736Sdavidch}
242255736Sdavidch
243255736Sdavidchstatic inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
244255736Sdavidch{
245255736Sdavidch	bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
246255736Sdavidch
247255736Sdavidch	/* Don't reorder!!! */
248255736Sdavidch	mb();
249255736Sdavidch
250255736Sdavidch	return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
251255736Sdavidch}
252255736Sdavidch
253255736Sdavidchstatic inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
254255736Sdavidch	struct bxe_softc *sc)
255255736Sdavidch{
256255736Sdavidch	ECORE_MSG(sc, "Allocating a new exe_queue element\n");
257255736Sdavidch	return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
258255736Sdavidch			    sc);
259255736Sdavidch}
260255736Sdavidch
261255736Sdavidch/************************ raw_obj functions ***********************************/
262255736Sdavidchstatic bool ecore_raw_check_pending(struct ecore_raw_obj *o)
263255736Sdavidch{
264255736Sdavidch	/*
265255736Sdavidch     * !! converts the value returned by ECORE_TEST_BIT such that it
266255736Sdavidch     * is guaranteed not to be truncated regardless of bool definition.
267255736Sdavidch	 *
268255736Sdavidch	 * Note we cannot simply define the function's return value type
269255736Sdavidch     * to match the type returned by ECORE_TEST_BIT, as it varies by
270255736Sdavidch     * platform/implementation.
271255736Sdavidch	 */
272255736Sdavidch
273255736Sdavidch	return !!ECORE_TEST_BIT(o->state, o->pstate);
274255736Sdavidch}
275255736Sdavidch
276255736Sdavidchstatic void ecore_raw_clear_pending(struct ecore_raw_obj *o)
277255736Sdavidch{
278255736Sdavidch	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
279255736Sdavidch	ECORE_CLEAR_BIT(o->state, o->pstate);
280255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
281255736Sdavidch}
282255736Sdavidch
283255736Sdavidchstatic void ecore_raw_set_pending(struct ecore_raw_obj *o)
284255736Sdavidch{
285255736Sdavidch	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
286255736Sdavidch	ECORE_SET_BIT(o->state, o->pstate);
287255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
288255736Sdavidch}
289255736Sdavidch
290255736Sdavidch/**
291255736Sdavidch * ecore_state_wait - wait until the given bit(state) is cleared
292255736Sdavidch *
293255736Sdavidch * @sc:		device handle
294255736Sdavidch * @state:	state which is to be cleared
295255736Sdavidch * @state_p:	state buffer
296255736Sdavidch *
297255736Sdavidch */
298255736Sdavidchstatic inline int ecore_state_wait(struct bxe_softc *sc, int state,
299255736Sdavidch				   unsigned long *pstate)
300255736Sdavidch{
301255736Sdavidch	/* can take a while if any port is running */
302255736Sdavidch	int cnt = 5000;
303255736Sdavidch
304255736Sdavidch
305255736Sdavidch	if (CHIP_REV_IS_EMUL(sc))
306255736Sdavidch		cnt *= 20;
307255736Sdavidch
308255736Sdavidch	ECORE_MSG(sc, "waiting for state to become %d\n", state);
309255736Sdavidch
310255736Sdavidch	ECORE_MIGHT_SLEEP();
311255736Sdavidch	while (cnt--) {
312255736Sdavidch		if (!ECORE_TEST_BIT(state, pstate)) {
313255736Sdavidch#ifdef ECORE_STOP_ON_ERROR
314255736Sdavidch			ECORE_MSG(sc, "exit  (cnt %d)\n", 5000 - cnt);
315255736Sdavidch#endif
316255736Sdavidch			return ECORE_SUCCESS;
317255736Sdavidch		}
318255736Sdavidch
319255736Sdavidch		ECORE_WAIT(sc, delay_us);
320255736Sdavidch
321255736Sdavidch		if (sc->panic)
322255736Sdavidch			return ECORE_IO;
323255736Sdavidch	}
324255736Sdavidch
325255736Sdavidch	/* timeout! */
326255736Sdavidch	ECORE_ERR("timeout waiting for state %d\n", state);
327255736Sdavidch#ifdef ECORE_STOP_ON_ERROR
328255736Sdavidch	ecore_panic();
329255736Sdavidch#endif
330255736Sdavidch
331255736Sdavidch	return ECORE_TIMEOUT;
332255736Sdavidch}
333255736Sdavidch
334255736Sdavidchstatic int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw)
335255736Sdavidch{
336255736Sdavidch	return ecore_state_wait(sc, raw->state, raw->pstate);
337255736Sdavidch}
338255736Sdavidch
339255736Sdavidch/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
340255736Sdavidch/* credit handling callbacks */
341255736Sdavidchstatic bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
342255736Sdavidch{
343255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
344255736Sdavidch
345255736Sdavidch	ECORE_DBG_BREAK_IF(!mp);
346255736Sdavidch
347255736Sdavidch	return mp->get_entry(mp, offset);
348255736Sdavidch}
349255736Sdavidch
350255736Sdavidchstatic bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
351255736Sdavidch{
352255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
353255736Sdavidch
354255736Sdavidch	ECORE_DBG_BREAK_IF(!mp);
355255736Sdavidch
356255736Sdavidch	return mp->get(mp, 1);
357255736Sdavidch}
358255736Sdavidch
359255736Sdavidchstatic bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
360255736Sdavidch{
361255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
362255736Sdavidch
363255736Sdavidch	ECORE_DBG_BREAK_IF(!vp);
364255736Sdavidch
365255736Sdavidch	return vp->get_entry(vp, offset);
366255736Sdavidch}
367255736Sdavidch
368255736Sdavidchstatic bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
369255736Sdavidch{
370255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
371255736Sdavidch
372255736Sdavidch	ECORE_DBG_BREAK_IF(!vp);
373255736Sdavidch
374255736Sdavidch	return vp->get(vp, 1);
375255736Sdavidch}
376255736Sdavidch
377255736Sdavidchstatic bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
378255736Sdavidch{
379255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
380255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
381255736Sdavidch
382255736Sdavidch	if (!mp->get(mp, 1))
383255736Sdavidch		return FALSE;
384255736Sdavidch
385255736Sdavidch	if (!vp->get(vp, 1)) {
386255736Sdavidch		mp->put(mp, 1);
387255736Sdavidch		return FALSE;
388255736Sdavidch	}
389255736Sdavidch
390255736Sdavidch	return TRUE;
391255736Sdavidch}
392255736Sdavidch
393255736Sdavidchstatic bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
394255736Sdavidch{
395255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
396255736Sdavidch
397255736Sdavidch	return mp->put_entry(mp, offset);
398255736Sdavidch}
399255736Sdavidch
400255736Sdavidchstatic bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
401255736Sdavidch{
402255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
403255736Sdavidch
404255736Sdavidch	return mp->put(mp, 1);
405255736Sdavidch}
406255736Sdavidch
407255736Sdavidchstatic bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
408255736Sdavidch{
409255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
410255736Sdavidch
411255736Sdavidch	return vp->put_entry(vp, offset);
412255736Sdavidch}
413255736Sdavidch
414255736Sdavidchstatic bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
415255736Sdavidch{
416255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
417255736Sdavidch
418255736Sdavidch	return vp->put(vp, 1);
419255736Sdavidch}
420255736Sdavidch
421255736Sdavidchstatic bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
422255736Sdavidch{
423255736Sdavidch	struct ecore_credit_pool_obj *mp = o->macs_pool;
424255736Sdavidch	struct ecore_credit_pool_obj *vp = o->vlans_pool;
425255736Sdavidch
426255736Sdavidch	if (!mp->put(mp, 1))
427255736Sdavidch		return FALSE;
428255736Sdavidch
429255736Sdavidch	if (!vp->put(vp, 1)) {
430255736Sdavidch		mp->get(mp, 1);
431255736Sdavidch		return FALSE;
432255736Sdavidch	}
433255736Sdavidch
434255736Sdavidch	return TRUE;
435255736Sdavidch}
436255736Sdavidch
437255736Sdavidch/**
438255736Sdavidch * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
439255736Sdavidch * head list.
440255736Sdavidch *
441255736Sdavidch * @sc:		device handle
442255736Sdavidch * @o:		vlan_mac object
443255736Sdavidch *
444255736Sdavidch * @details: Non-blocking implementation; should be called under execution
445255736Sdavidch *           queue lock.
446255736Sdavidch */
447255736Sdavidchstatic int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc,
448255736Sdavidch					    struct ecore_vlan_mac_obj *o)
449255736Sdavidch{
450255736Sdavidch	if (o->head_reader) {
451255736Sdavidch		ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
452255736Sdavidch		return ECORE_BUSY;
453255736Sdavidch	}
454255736Sdavidch
455255736Sdavidch	ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
456255736Sdavidch	return ECORE_SUCCESS;
457255736Sdavidch}
458255736Sdavidch
459255736Sdavidch/**
460255736Sdavidch * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
461255736Sdavidch * which wasn't able to run due to a taken lock on vlan mac head list.
462255736Sdavidch *
463255736Sdavidch * @sc:		device handle
464255736Sdavidch * @o:		vlan_mac object
465255736Sdavidch *
466255736Sdavidch * @details Should be called under execution queue lock; notice it might release
467255736Sdavidch *          and reclaim it during its run.
468255736Sdavidch */
469255736Sdavidchstatic void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc,
470255736Sdavidch					    struct ecore_vlan_mac_obj *o)
471255736Sdavidch{
472255736Sdavidch	int rc;
473255736Sdavidch	unsigned long ramrod_flags = o->saved_ramrod_flags;
474255736Sdavidch
475255736Sdavidch	ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
476255736Sdavidch		  ramrod_flags);
477255736Sdavidch	o->head_exe_request = FALSE;
478255736Sdavidch	o->saved_ramrod_flags = 0;
479258187Sedavis	rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
480284470Sdavidcs	if ((rc != ECORE_SUCCESS) && (rc != ECORE_PENDING)) {
481255736Sdavidch		ECORE_ERR("execution of pending commands failed with rc %d\n",
482255736Sdavidch			  rc);
483255736Sdavidch#ifdef ECORE_STOP_ON_ERROR
484255736Sdavidch		ecore_panic();
485255736Sdavidch#endif
486255736Sdavidch	}
487255736Sdavidch}
488255736Sdavidch
489255736Sdavidch/**
490255736Sdavidch * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
491255736Sdavidch * called due to vlan mac head list lock being taken.
492255736Sdavidch *
493255736Sdavidch * @sc:			device handle
494255736Sdavidch * @o:			vlan_mac object
495255736Sdavidch * @ramrod_flags:	ramrod flags of missed execution
496255736Sdavidch *
497255736Sdavidch * @details Should be called under execution queue lock.
498255736Sdavidch */
499255736Sdavidchstatic void __ecore_vlan_mac_h_pend(struct bxe_softc *sc,
500255736Sdavidch				    struct ecore_vlan_mac_obj *o,
501255736Sdavidch				    unsigned long ramrod_flags)
502255736Sdavidch{
503255736Sdavidch	o->head_exe_request = TRUE;
504255736Sdavidch	o->saved_ramrod_flags = ramrod_flags;
505255736Sdavidch	ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
506255736Sdavidch		  ramrod_flags);
507255736Sdavidch}
508255736Sdavidch
509255736Sdavidch/**
510255736Sdavidch * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
511255736Sdavidch *
512255736Sdavidch * @sc:			device handle
513255736Sdavidch * @o:			vlan_mac object
514255736Sdavidch *
515255736Sdavidch * @details Should be called under execution queue lock. Notice if a pending
516255736Sdavidch *          execution exists, it would perform it - possibly releasing and
517255736Sdavidch *          reclaiming the execution queue lock.
518255736Sdavidch */
519255736Sdavidchstatic void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
520255736Sdavidch					    struct ecore_vlan_mac_obj *o)
521255736Sdavidch{
522255736Sdavidch	/* It's possible a new pending execution was added since this writer
523255736Sdavidch	 * executed. If so, execute again. [Ad infinitum]
524255736Sdavidch	 */
525255736Sdavidch	while(o->head_exe_request) {
526255736Sdavidch		ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
527255736Sdavidch		__ecore_vlan_mac_h_exec_pending(sc, o);
528255736Sdavidch	}
529255736Sdavidch}
530255736Sdavidch
531255736Sdavidch/**
532255736Sdavidch * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
533255736Sdavidch *
534255736Sdavidch * @sc:			device handle
535255736Sdavidch * @o:			vlan_mac object
536255736Sdavidch *
537255736Sdavidch * @details Notice if a pending execution exists, it would perform it -
538255736Sdavidch *          possibly releasing and reclaiming the execution queue lock.
539255736Sdavidch */
540255736Sdavidchvoid ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
541255736Sdavidch				   struct ecore_vlan_mac_obj *o)
542255736Sdavidch{
543255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
544255736Sdavidch	__ecore_vlan_mac_h_write_unlock(sc, o);
545255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
546255736Sdavidch}
547255736Sdavidch
548255736Sdavidch/**
549255736Sdavidch * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
550255736Sdavidch *
551255736Sdavidch * @sc:			device handle
552255736Sdavidch * @o:			vlan_mac object
553255736Sdavidch *
554255736Sdavidch * @details Should be called under the execution queue lock. May sleep. May
555255736Sdavidch *          release and reclaim execution queue lock during its run.
556255736Sdavidch */
557255736Sdavidchstatic int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
558255736Sdavidch					struct ecore_vlan_mac_obj *o)
559255736Sdavidch{
560255736Sdavidch	/* If we got here, we're holding lock --> no WRITER exists */
561255736Sdavidch	o->head_reader++;
562255736Sdavidch	ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
563255736Sdavidch		  o->head_reader);
564255736Sdavidch
565255736Sdavidch	return ECORE_SUCCESS;
566255736Sdavidch}
567255736Sdavidch
568255736Sdavidch/**
569255736Sdavidch * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
570255736Sdavidch *
571255736Sdavidch * @sc:			device handle
572255736Sdavidch * @o:			vlan_mac object
573255736Sdavidch *
574255736Sdavidch * @details May sleep. Claims and releases execution queue lock during its run.
575255736Sdavidch */
576255736Sdavidchint ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
577255736Sdavidch			       struct ecore_vlan_mac_obj *o)
578255736Sdavidch{
579255736Sdavidch	int rc;
580255736Sdavidch
581255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
582255736Sdavidch	rc = __ecore_vlan_mac_h_read_lock(sc, o);
583255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
584255736Sdavidch
585255736Sdavidch	return rc;
586255736Sdavidch}
587255736Sdavidch
588255736Sdavidch/**
589255736Sdavidch * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
590255736Sdavidch *
591255736Sdavidch * @sc:			device handle
592255736Sdavidch * @o:			vlan_mac object
593255736Sdavidch *
594255736Sdavidch * @details Should be called under execution queue lock. Notice if a pending
595255736Sdavidch *          execution exists, it would be performed if this was the last
596255736Sdavidch *          reader. possibly releasing and reclaiming the execution queue lock.
597255736Sdavidch */
598255736Sdavidchstatic void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
599255736Sdavidch					  struct ecore_vlan_mac_obj *o)
600255736Sdavidch{
601255736Sdavidch	if (!o->head_reader) {
602255736Sdavidch		ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
603255736Sdavidch#ifdef ECORE_STOP_ON_ERROR
604255736Sdavidch		ecore_panic();
605255736Sdavidch#endif
606255736Sdavidch	} else {
607255736Sdavidch		o->head_reader--;
608255736Sdavidch		ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
609255736Sdavidch			  o->head_reader);
610255736Sdavidch	}
611255736Sdavidch
612255736Sdavidch	/* It's possible a new pending execution was added, and that this reader
613255736Sdavidch	 * was last - if so we need to execute the command.
614255736Sdavidch	 */
615255736Sdavidch	if (!o->head_reader && o->head_exe_request) {
616255736Sdavidch		ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
617255736Sdavidch
618255736Sdavidch		/* Writer release will do the trick */
619255736Sdavidch		__ecore_vlan_mac_h_write_unlock(sc, o);
620255736Sdavidch	}
621255736Sdavidch}
622255736Sdavidch
623255736Sdavidch/**
624255736Sdavidch * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
625255736Sdavidch *
626255736Sdavidch * @sc:			device handle
627255736Sdavidch * @o:			vlan_mac object
628255736Sdavidch *
629255736Sdavidch * @details Notice if a pending execution exists, it would be performed if this
630255736Sdavidch *          was the last reader. Claims and releases the execution queue lock
631255736Sdavidch *          during its run.
632255736Sdavidch */
633255736Sdavidchvoid ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
634255736Sdavidch				  struct ecore_vlan_mac_obj *o)
635255736Sdavidch{
636255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
637255736Sdavidch	__ecore_vlan_mac_h_read_unlock(sc, o);
638255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
639255736Sdavidch}
640255736Sdavidch
641255736Sdavidch/**
642255736Sdavidch * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
643255736Sdavidch *
644255736Sdavidch * @sc:			device handle
645255736Sdavidch * @o:			vlan_mac object
646255736Sdavidch * @n:			number of elements to get
647255736Sdavidch * @base:		base address for element placement
648255736Sdavidch * @stride:		stride between elements (in bytes)
649255736Sdavidch */
650255736Sdavidchstatic int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
651255736Sdavidch				 int n, uint8_t *base, uint8_t stride, uint8_t size)
652255736Sdavidch{
653255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
654255736Sdavidch	uint8_t *next = base;
655258187Sedavis	int counter = 0;
656258187Sedavis	int read_lock;
657255736Sdavidch
658255736Sdavidch	ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
659255736Sdavidch	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
660255736Sdavidch	if (read_lock != ECORE_SUCCESS)
661255736Sdavidch		ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
662255736Sdavidch
663255736Sdavidch	/* traverse list */
664255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
665255736Sdavidch				  struct ecore_vlan_mac_registry_elem) {
666255736Sdavidch		if (counter < n) {
667255736Sdavidch			ECORE_MEMCPY(next, &pos->u, size);
668255736Sdavidch			counter++;
669258187Sedavis			ECORE_MSG(sc, "copied element number %d to address %p element was:\n",
670255736Sdavidch				  counter, next);
671255736Sdavidch			next += stride + size;
672255736Sdavidch		}
673255736Sdavidch	}
674255736Sdavidch
675255736Sdavidch	if (read_lock == ECORE_SUCCESS) {
676255736Sdavidch		ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
677255736Sdavidch		ecore_vlan_mac_h_read_unlock(sc, o);
678255736Sdavidch	}
679255736Sdavidch
680255736Sdavidch	return counter * ETH_ALEN;
681255736Sdavidch}
682255736Sdavidch
683255736Sdavidch/* check_add() callbacks */
684255736Sdavidchstatic int ecore_check_mac_add(struct bxe_softc *sc,
685255736Sdavidch			       struct ecore_vlan_mac_obj *o,
686255736Sdavidch			       union ecore_classification_ramrod_data *data)
687255736Sdavidch{
688255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
689255736Sdavidch
690255736Sdavidch	ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
691255736Sdavidch
692255736Sdavidch	if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
693255736Sdavidch		return ECORE_INVAL;
694255736Sdavidch
695255736Sdavidch	/* Check if a requested MAC already exists */
696255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
697255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
698255736Sdavidch		if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
699255736Sdavidch		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
700255736Sdavidch			return ECORE_EXISTS;
701255736Sdavidch
702255736Sdavidch	return ECORE_SUCCESS;
703255736Sdavidch}
704255736Sdavidch
705255736Sdavidchstatic int ecore_check_vlan_add(struct bxe_softc *sc,
706255736Sdavidch				struct ecore_vlan_mac_obj *o,
707255736Sdavidch				union ecore_classification_ramrod_data *data)
708255736Sdavidch{
709255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
710255736Sdavidch
711255736Sdavidch	ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
712255736Sdavidch
713255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
714255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
715255736Sdavidch		if (data->vlan.vlan == pos->u.vlan.vlan)
716255736Sdavidch			return ECORE_EXISTS;
717255736Sdavidch
718255736Sdavidch	return ECORE_SUCCESS;
719255736Sdavidch}
720255736Sdavidch
721255736Sdavidchstatic int ecore_check_vlan_mac_add(struct bxe_softc *sc,
722255736Sdavidch				    struct ecore_vlan_mac_obj *o,
723255736Sdavidch				   union ecore_classification_ramrod_data *data)
724255736Sdavidch{
725255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
726255736Sdavidch
727255736Sdavidch	ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
728255736Sdavidch		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
729255736Sdavidch
730255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
731255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
732255736Sdavidch		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
733255736Sdavidch		    (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
734255736Sdavidch				  ETH_ALEN)) &&
735255736Sdavidch		    (data->vlan_mac.is_inner_mac ==
736255736Sdavidch		     pos->u.vlan_mac.is_inner_mac))
737255736Sdavidch			return ECORE_EXISTS;
738255736Sdavidch
739255736Sdavidch	return ECORE_SUCCESS;
740255736Sdavidch}
741255736Sdavidch
742296071Sdavidcsstatic int ecore_check_vxlan_fltr_add(struct bxe_softc *sc,
743296071Sdavidcs				struct ecore_vlan_mac_obj *o,
744296071Sdavidcs				union ecore_classification_ramrod_data *data)
745296071Sdavidcs{
746296071Sdavidcs	struct ecore_vlan_mac_registry_elem *pos;
747296071Sdavidcs
748296071Sdavidcs	ECORE_MSG(sc, "Checking VXLAN_FLTR (Inner:%pM, %d) for ADD command\n",
749296071Sdavidcs		  data->vxlan_fltr.innermac, data->vxlan_fltr.vni);
750296071Sdavidcs
751296071Sdavidcs	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
752296071Sdavidcs				  struct ecore_vlan_mac_registry_elem)
753296071Sdavidcs		if ((!ECORE_MEMCMP(data->vxlan_fltr.innermac,
754296071Sdavidcs			       pos->u.vxlan_fltr.innermac,
755296071Sdavidcs			       ETH_ALEN)) &&
756296071Sdavidcs			     (data->vxlan_fltr.vni == pos->u.vxlan_fltr.vni))
757296071Sdavidcs			return ECORE_EXISTS;
758296071Sdavidcs
759296071Sdavidcs	return ECORE_SUCCESS;
760296071Sdavidcs}
761296071Sdavidcs
762255736Sdavidch/* check_del() callbacks */
763255736Sdavidchstatic struct ecore_vlan_mac_registry_elem *
764255736Sdavidch	ecore_check_mac_del(struct bxe_softc *sc,
765255736Sdavidch			    struct ecore_vlan_mac_obj *o,
766255736Sdavidch			    union ecore_classification_ramrod_data *data)
767255736Sdavidch{
768255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
769255736Sdavidch
770255736Sdavidch	ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
771255736Sdavidch
772255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
773255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
774255736Sdavidch		if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
775255736Sdavidch		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
776255736Sdavidch			return pos;
777255736Sdavidch
778255736Sdavidch	return NULL;
779255736Sdavidch}
780255736Sdavidch
781255736Sdavidchstatic struct ecore_vlan_mac_registry_elem *
782255736Sdavidch	ecore_check_vlan_del(struct bxe_softc *sc,
783255736Sdavidch			     struct ecore_vlan_mac_obj *o,
784255736Sdavidch			     union ecore_classification_ramrod_data *data)
785255736Sdavidch{
786255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
787255736Sdavidch
788255736Sdavidch	ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
789255736Sdavidch
790255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
791255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
792255736Sdavidch		if (data->vlan.vlan == pos->u.vlan.vlan)
793255736Sdavidch			return pos;
794255736Sdavidch
795255736Sdavidch	return NULL;
796255736Sdavidch}
797255736Sdavidch
798255736Sdavidchstatic struct ecore_vlan_mac_registry_elem *
799255736Sdavidch	ecore_check_vlan_mac_del(struct bxe_softc *sc,
800255736Sdavidch				 struct ecore_vlan_mac_obj *o,
801255736Sdavidch				 union ecore_classification_ramrod_data *data)
802255736Sdavidch{
803255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
804255736Sdavidch
805255736Sdavidch	ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
806255736Sdavidch		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
807255736Sdavidch
808255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
809255736Sdavidch				  struct ecore_vlan_mac_registry_elem)
810255736Sdavidch		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
811255736Sdavidch		    (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
812255736Sdavidch			     ETH_ALEN)) &&
813255736Sdavidch		    (data->vlan_mac.is_inner_mac ==
814255736Sdavidch		     pos->u.vlan_mac.is_inner_mac))
815255736Sdavidch			return pos;
816255736Sdavidch
817255736Sdavidch	return NULL;
818255736Sdavidch}
819255736Sdavidch
820296071Sdavidcsstatic struct ecore_vlan_mac_registry_elem *
821296071Sdavidcs	ecore_check_vxlan_fltr_del
822296071Sdavidcs			(struct bxe_softc *sc,
823296071Sdavidcs			struct ecore_vlan_mac_obj *o,
824296071Sdavidcs			union ecore_classification_ramrod_data *data)
825296071Sdavidcs{
826296071Sdavidcs	struct ecore_vlan_mac_registry_elem *pos;
827296071Sdavidcs
828296071Sdavidcs	ECORE_MSG(sc, "Checking VXLAN_FLTR (Inner:%pM, %d) for DEL command\n",
829296071Sdavidcs		  data->vxlan_fltr.innermac, data->vxlan_fltr.vni);
830296071Sdavidcs
831296071Sdavidcs	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
832296071Sdavidcs				  struct ecore_vlan_mac_registry_elem)
833296071Sdavidcs		if ((!ECORE_MEMCMP(data->vxlan_fltr.innermac,
834296071Sdavidcs			       pos->u.vxlan_fltr.innermac,
835296071Sdavidcs			       ETH_ALEN)) &&
836296071Sdavidcs			       (data->vxlan_fltr.vni == pos->u.vxlan_fltr.vni))
837296071Sdavidcs			return pos;
838296071Sdavidcs
839296071Sdavidcs	return NULL;
840296071Sdavidcs}
841296071Sdavidcs
842255736Sdavidch/* check_move() callback */
843255736Sdavidchstatic bool ecore_check_move(struct bxe_softc *sc,
844255736Sdavidch			     struct ecore_vlan_mac_obj *src_o,
845255736Sdavidch			     struct ecore_vlan_mac_obj *dst_o,
846255736Sdavidch			     union ecore_classification_ramrod_data *data)
847255736Sdavidch{
848255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
849255736Sdavidch	int rc;
850255736Sdavidch
851255736Sdavidch	/* Check if we can delete the requested configuration from the first
852255736Sdavidch	 * object.
853255736Sdavidch	 */
854255736Sdavidch	pos = src_o->check_del(sc, src_o, data);
855255736Sdavidch
856255736Sdavidch	/*  check if configuration can be added */
857255736Sdavidch	rc = dst_o->check_add(sc, dst_o, data);
858255736Sdavidch
859255736Sdavidch	/* If this classification can not be added (is already set)
860255736Sdavidch	 * or can't be deleted - return an error.
861255736Sdavidch	 */
862255736Sdavidch	if (rc || !pos)
863255736Sdavidch		return FALSE;
864255736Sdavidch
865255736Sdavidch	return TRUE;
866255736Sdavidch}
867255736Sdavidch
868255736Sdavidchstatic bool ecore_check_move_always_err(
869255736Sdavidch	struct bxe_softc *sc,
870255736Sdavidch	struct ecore_vlan_mac_obj *src_o,
871255736Sdavidch	struct ecore_vlan_mac_obj *dst_o,
872255736Sdavidch	union ecore_classification_ramrod_data *data)
873255736Sdavidch{
874255736Sdavidch	return FALSE;
875255736Sdavidch}
876255736Sdavidch
877255736Sdavidchstatic inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
878255736Sdavidch{
879255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
880255736Sdavidch	uint8_t rx_tx_flag = 0;
881255736Sdavidch
882255736Sdavidch	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
883255736Sdavidch	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
884255736Sdavidch		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
885255736Sdavidch
886255736Sdavidch	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
887255736Sdavidch	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
888255736Sdavidch		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
889255736Sdavidch
890255736Sdavidch	return rx_tx_flag;
891255736Sdavidch}
892255736Sdavidch
893255736Sdavidchvoid ecore_set_mac_in_nig(struct bxe_softc *sc,
894255736Sdavidch			  bool add, unsigned char *dev_addr, int index)
895255736Sdavidch{
896255736Sdavidch	uint32_t wb_data[2];
897255736Sdavidch	uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
898255736Sdavidch			 NIG_REG_LLH0_FUNC_MEM;
899255736Sdavidch
900255736Sdavidch	if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
901255736Sdavidch		return;
902255736Sdavidch
903255736Sdavidch	if (index > ECORE_LLH_CAM_MAX_PF_LINE)
904255736Sdavidch		return;
905255736Sdavidch
906255736Sdavidch	ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
907255736Sdavidch		  (add ? "ADD" : "DELETE"), index);
908255736Sdavidch
909255736Sdavidch	if (add) {
910255736Sdavidch		/* LLH_FUNC_MEM is a uint64_t WB register */
911255736Sdavidch		reg_offset += 8*index;
912255736Sdavidch
913255736Sdavidch		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
914255736Sdavidch			      (dev_addr[4] <<  8) |  dev_addr[5]);
915255736Sdavidch		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
916255736Sdavidch
917255736Sdavidch		ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
918255736Sdavidch	}
919255736Sdavidch
920255736Sdavidch	REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
921255736Sdavidch				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
922255736Sdavidch}
923255736Sdavidch
924255736Sdavidch/**
925255736Sdavidch * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
926255736Sdavidch *
927255736Sdavidch * @sc:		device handle
928255736Sdavidch * @o:		queue for which we want to configure this rule
929255736Sdavidch * @add:	if TRUE the command is an ADD command, DEL otherwise
930255736Sdavidch * @opcode:	CLASSIFY_RULE_OPCODE_XXX
931255736Sdavidch * @hdr:	pointer to a header to setup
932255736Sdavidch *
933255736Sdavidch */
934255736Sdavidchstatic inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc,
935255736Sdavidch	struct ecore_vlan_mac_obj *o, bool add, int opcode,
936255736Sdavidch	struct eth_classify_cmd_header *hdr)
937255736Sdavidch{
938255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
939255736Sdavidch
940255736Sdavidch	hdr->client_id = raw->cl_id;
941255736Sdavidch	hdr->func_id = raw->func_id;
942255736Sdavidch
943255736Sdavidch	/* Rx or/and Tx (internal switching) configuration ? */
944255736Sdavidch	hdr->cmd_general_data |=
945255736Sdavidch		ecore_vlan_mac_get_rx_tx_flag(o);
946255736Sdavidch
947255736Sdavidch	if (add)
948255736Sdavidch		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
949255736Sdavidch
950255736Sdavidch	hdr->cmd_general_data |=
951255736Sdavidch		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
952255736Sdavidch}
953255736Sdavidch
954255736Sdavidch/**
955255736Sdavidch * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
956255736Sdavidch *
957255736Sdavidch * @cid:	connection id
958255736Sdavidch * @type:	ECORE_FILTER_XXX_PENDING
959255736Sdavidch * @hdr:	pointer to header to setup
960255736Sdavidch * @rule_cnt:
961255736Sdavidch *
962255736Sdavidch * currently we always configure one rule and echo field to contain a CID and an
963255736Sdavidch * opcode type.
964255736Sdavidch */
965255736Sdavidchstatic inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
966255736Sdavidch				struct eth_classify_header *hdr, int rule_cnt)
967255736Sdavidch{
968255736Sdavidch	hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
969255736Sdavidch				(type << ECORE_SWCID_SHIFT));
970255736Sdavidch	hdr->rule_cnt = (uint8_t)rule_cnt;
971255736Sdavidch}
972255736Sdavidch
973255736Sdavidch/* hw_config() callbacks */
974255736Sdavidchstatic void ecore_set_one_mac_e2(struct bxe_softc *sc,
975255736Sdavidch				 struct ecore_vlan_mac_obj *o,
976255736Sdavidch				 struct ecore_exeq_elem *elem, int rule_idx,
977255736Sdavidch				 int cam_offset)
978255736Sdavidch{
979255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
980255736Sdavidch	struct eth_classify_rules_ramrod_data *data =
981255736Sdavidch		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
982255736Sdavidch	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
983255736Sdavidch	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
984255736Sdavidch	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
985255736Sdavidch	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
986255736Sdavidch	uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
987255736Sdavidch
988255736Sdavidch	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
989255736Sdavidch	 * relevant. In addition, current implementation is tuned for a
990255736Sdavidch	 * single ETH MAC.
991255736Sdavidch	 *
992255736Sdavidch	 * When multiple unicast ETH MACs PF configuration in switch
993255736Sdavidch	 * independent mode is required (NetQ, multiple netdev MACs,
994255736Sdavidch	 * etc.), consider better utilisation of 8 per function MAC
995255736Sdavidch	 * entries in the LLH register. There is also
996255736Sdavidch	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
997255736Sdavidch	 * total number of CAM entries to 16.
998255736Sdavidch	 *
999255736Sdavidch	 * Currently we won't configure NIG for MACs other than a primary ETH
1000255736Sdavidch	 * MAC and iSCSI L2 MAC.
1001255736Sdavidch	 *
1002255736Sdavidch	 * If this MAC is moving from one Queue to another, no need to change
1003255736Sdavidch	 * NIG configuration.
1004255736Sdavidch	 */
1005255736Sdavidch	if (cmd != ECORE_VLAN_MAC_MOVE) {
1006255736Sdavidch		if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
1007255736Sdavidch			ecore_set_mac_in_nig(sc, add, mac,
1008255736Sdavidch					     ECORE_LLH_CAM_ISCSI_ETH_LINE);
1009255736Sdavidch		else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
1010255736Sdavidch			ecore_set_mac_in_nig(sc, add, mac,
1011255736Sdavidch					     ECORE_LLH_CAM_ETH_LINE);
1012255736Sdavidch	}
1013255736Sdavidch
1014255736Sdavidch	/* Reset the ramrod data buffer for the first rule */
1015255736Sdavidch	if (rule_idx == 0)
1016255736Sdavidch		ECORE_MEMSET(data, 0, sizeof(*data));
1017255736Sdavidch
1018255736Sdavidch	/* Setup a command header */
1019255736Sdavidch	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
1020255736Sdavidch				      &rule_entry->mac.header);
1021255736Sdavidch
1022255736Sdavidch	ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
1023255736Sdavidch		  (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
1024255736Sdavidch
1025255736Sdavidch	/* Set a MAC itself */
1026255736Sdavidch	ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1027255736Sdavidch			      &rule_entry->mac.mac_mid,
1028255736Sdavidch			      &rule_entry->mac.mac_lsb, mac);
1029255736Sdavidch	rule_entry->mac.inner_mac =
1030296071Sdavidcs		ECORE_CPU_TO_LE16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
1031255736Sdavidch
1032255736Sdavidch	/* MOVE: Add a rule that will add this MAC to the target Queue */
1033255736Sdavidch	if (cmd == ECORE_VLAN_MAC_MOVE) {
1034255736Sdavidch		rule_entry++;
1035255736Sdavidch		rule_cnt++;
1036255736Sdavidch
1037255736Sdavidch		/* Setup ramrod data */
1038255736Sdavidch		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1039255736Sdavidch					elem->cmd_data.vlan_mac.target_obj,
1040255736Sdavidch					      TRUE, CLASSIFY_RULE_OPCODE_MAC,
1041255736Sdavidch					      &rule_entry->mac.header);
1042255736Sdavidch
1043255736Sdavidch		/* Set a MAC itself */
1044255736Sdavidch		ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1045255736Sdavidch				      &rule_entry->mac.mac_mid,
1046255736Sdavidch				      &rule_entry->mac.mac_lsb, mac);
1047255736Sdavidch		rule_entry->mac.inner_mac =
1048296071Sdavidcs			ECORE_CPU_TO_LE16(elem->cmd_data.vlan_mac.
1049296071Sdavidcs				       u.mac.is_inner_mac);
1050255736Sdavidch	}
1051255736Sdavidch
1052255736Sdavidch	/* Set the ramrod data header */
1053255736Sdavidch	/* TODO: take this to the higher level in order to prevent multiple
1054255736Sdavidch		 writing */
1055255736Sdavidch	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1056255736Sdavidch					rule_cnt);
1057255736Sdavidch}
1058255736Sdavidch
1059255736Sdavidch/**
1060255736Sdavidch * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1061255736Sdavidch *
1062255736Sdavidch * @sc:		device handle
1063255736Sdavidch * @o:		queue
1064255736Sdavidch * @type:
1065255736Sdavidch * @cam_offset:	offset in cam memory
1066255736Sdavidch * @hdr:	pointer to a header to setup
1067255736Sdavidch *
1068255736Sdavidch * E1/E1H
1069255736Sdavidch */
1070255736Sdavidchstatic inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc,
1071255736Sdavidch	struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1072255736Sdavidch	struct mac_configuration_hdr *hdr)
1073255736Sdavidch{
1074255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
1075255736Sdavidch
1076255736Sdavidch	hdr->length = 1;
1077255736Sdavidch	hdr->offset = (uint8_t)cam_offset;
1078255736Sdavidch	hdr->client_id = ECORE_CPU_TO_LE16(0xff);
1079255736Sdavidch	hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
1080255736Sdavidch				(type << ECORE_SWCID_SHIFT));
1081255736Sdavidch}
1082255736Sdavidch
1083255736Sdavidchstatic inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc,
1084255736Sdavidch	struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
1085255736Sdavidch	uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
1086255736Sdavidch{
1087255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
1088255736Sdavidch	uint32_t cl_bit_vec = (1 << r->cl_id);
1089255736Sdavidch
1090255736Sdavidch	cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
1091255736Sdavidch	cfg_entry->pf_id = r->func_id;
1092255736Sdavidch	cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
1093255736Sdavidch
1094255736Sdavidch	if (add) {
1095255736Sdavidch		ECORE_SET_FLAG(cfg_entry->flags,
1096255736Sdavidch			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1097255736Sdavidch			       T_ETH_MAC_COMMAND_SET);
1098255736Sdavidch		ECORE_SET_FLAG(cfg_entry->flags,
1099255736Sdavidch			       MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1100255736Sdavidch			       opcode);
1101255736Sdavidch
1102255736Sdavidch		/* Set a MAC in a ramrod data */
1103255736Sdavidch		ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1104255736Sdavidch				      &cfg_entry->middle_mac_addr,
1105255736Sdavidch				      &cfg_entry->lsb_mac_addr, mac);
1106255736Sdavidch	} else
1107255736Sdavidch		ECORE_SET_FLAG(cfg_entry->flags,
1108255736Sdavidch			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1109255736Sdavidch			       T_ETH_MAC_COMMAND_INVALIDATE);
1110255736Sdavidch}
1111255736Sdavidch
1112255736Sdavidchstatic inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc,
1113255736Sdavidch	struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
1114255736Sdavidch	uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
1115255736Sdavidch{
1116255736Sdavidch	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1117255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1118255736Sdavidch
1119255736Sdavidch	ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
1120255736Sdavidch					 &config->hdr);
1121255736Sdavidch	ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
1122255736Sdavidch					 cfg_entry);
1123255736Sdavidch
1124255736Sdavidch	ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1125255736Sdavidch		  (add ? "setting" : "clearing"),
1126255736Sdavidch		  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1127255736Sdavidch}
1128255736Sdavidch
1129255736Sdavidch/**
1130255736Sdavidch * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1131255736Sdavidch *
1132255736Sdavidch * @sc:		device handle
1133255736Sdavidch * @o:		ecore_vlan_mac_obj
1134255736Sdavidch * @elem:	ecore_exeq_elem
1135255736Sdavidch * @rule_idx:	rule_idx
1136255736Sdavidch * @cam_offset: cam_offset
1137255736Sdavidch */
1138255736Sdavidchstatic void ecore_set_one_mac_e1x(struct bxe_softc *sc,
1139255736Sdavidch				  struct ecore_vlan_mac_obj *o,
1140255736Sdavidch				  struct ecore_exeq_elem *elem, int rule_idx,
1141255736Sdavidch				  int cam_offset)
1142255736Sdavidch{
1143255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1144255736Sdavidch	struct mac_configuration_cmd *config =
1145255736Sdavidch		(struct mac_configuration_cmd *)(raw->rdata);
1146255736Sdavidch	/* 57710 and 57711 do not support MOVE command,
1147255736Sdavidch	 * so it's either ADD or DEL
1148255736Sdavidch	 */
1149255736Sdavidch	bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1150255736Sdavidch		TRUE : FALSE;
1151255736Sdavidch
1152255736Sdavidch	/* Reset the ramrod data buffer */
1153255736Sdavidch	ECORE_MEMSET(config, 0, sizeof(*config));
1154255736Sdavidch
1155255736Sdavidch	ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1156255736Sdavidch				     cam_offset, add,
1157255736Sdavidch				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
1158255736Sdavidch				     ETH_VLAN_FILTER_ANY_VLAN, config);
1159255736Sdavidch}
1160255736Sdavidch
1161255736Sdavidchstatic void ecore_set_one_vlan_e2(struct bxe_softc *sc,
1162255736Sdavidch				  struct ecore_vlan_mac_obj *o,
1163255736Sdavidch				  struct ecore_exeq_elem *elem, int rule_idx,
1164255736Sdavidch				  int cam_offset)
1165255736Sdavidch{
1166255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1167255736Sdavidch	struct eth_classify_rules_ramrod_data *data =
1168255736Sdavidch		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1169255736Sdavidch	int rule_cnt = rule_idx + 1;
1170255736Sdavidch	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1171255736Sdavidch	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1172255736Sdavidch	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1173255736Sdavidch	uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1174255736Sdavidch
1175255736Sdavidch	/* Reset the ramrod data buffer for the first rule */
1176255736Sdavidch	if (rule_idx == 0)
1177255736Sdavidch		ECORE_MEMSET(data, 0, sizeof(*data));
1178255736Sdavidch
1179255736Sdavidch	/* Set a rule header */
1180255736Sdavidch	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1181255736Sdavidch				      &rule_entry->vlan.header);
1182255736Sdavidch
1183255736Sdavidch	ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1184255736Sdavidch		  vlan);
1185255736Sdavidch
1186255736Sdavidch	/* Set a VLAN itself */
1187255736Sdavidch	rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1188255736Sdavidch
1189255736Sdavidch	/* MOVE: Add a rule that will add this MAC to the target Queue */
1190255736Sdavidch	if (cmd == ECORE_VLAN_MAC_MOVE) {
1191255736Sdavidch		rule_entry++;
1192255736Sdavidch		rule_cnt++;
1193255736Sdavidch
1194255736Sdavidch		/* Setup ramrod data */
1195255736Sdavidch		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1196255736Sdavidch					elem->cmd_data.vlan_mac.target_obj,
1197255736Sdavidch					      TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1198255736Sdavidch					      &rule_entry->vlan.header);
1199255736Sdavidch
1200255736Sdavidch		/* Set a VLAN itself */
1201255736Sdavidch		rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1202255736Sdavidch	}
1203255736Sdavidch
1204255736Sdavidch	/* Set the ramrod data header */
1205255736Sdavidch	/* TODO: take this to the higher level in order to prevent multiple
1206255736Sdavidch		 writing */
1207255736Sdavidch	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1208255736Sdavidch					rule_cnt);
1209255736Sdavidch}
1210255736Sdavidch
1211255736Sdavidchstatic void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
1212255736Sdavidch				      struct ecore_vlan_mac_obj *o,
1213255736Sdavidch				      struct ecore_exeq_elem *elem,
1214255736Sdavidch				      int rule_idx, int cam_offset)
1215255736Sdavidch{
1216255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1217255736Sdavidch	struct eth_classify_rules_ramrod_data *data =
1218255736Sdavidch		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1219255736Sdavidch	int rule_cnt = rule_idx + 1;
1220255736Sdavidch	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1221255736Sdavidch	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1222255736Sdavidch	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1223255736Sdavidch	uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1224255736Sdavidch	uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1225255736Sdavidch
1226255736Sdavidch	/* Reset the ramrod data buffer for the first rule */
1227255736Sdavidch	if (rule_idx == 0)
1228255736Sdavidch		ECORE_MEMSET(data, 0, sizeof(*data));
1229255736Sdavidch
1230255736Sdavidch	/* Set a rule header */
1231255736Sdavidch	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1232255736Sdavidch				      &rule_entry->pair.header);
1233255736Sdavidch
1234255736Sdavidch	/* Set VLAN and MAC themselves */
1235255736Sdavidch	rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1236255736Sdavidch	ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1237255736Sdavidch			      &rule_entry->pair.mac_mid,
1238255736Sdavidch			      &rule_entry->pair.mac_lsb, mac);
1239255736Sdavidch	rule_entry->pair.inner_mac =
1240255736Sdavidch			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1241255736Sdavidch	/* MOVE: Add a rule that will add this MAC to the target Queue */
1242255736Sdavidch	if (cmd == ECORE_VLAN_MAC_MOVE) {
1243255736Sdavidch		rule_entry++;
1244255736Sdavidch		rule_cnt++;
1245255736Sdavidch
1246255736Sdavidch		/* Setup ramrod data */
1247255736Sdavidch		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1248255736Sdavidch					elem->cmd_data.vlan_mac.target_obj,
1249255736Sdavidch					      TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1250255736Sdavidch					      &rule_entry->pair.header);
1251255736Sdavidch
1252255736Sdavidch		/* Set a VLAN itself */
1253255736Sdavidch		rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1254255736Sdavidch		ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1255255736Sdavidch				      &rule_entry->pair.mac_mid,
1256255736Sdavidch				      &rule_entry->pair.mac_lsb, mac);
1257255736Sdavidch		rule_entry->pair.inner_mac =
1258255736Sdavidch			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1259255736Sdavidch	}
1260255736Sdavidch
1261255736Sdavidch	/* Set the ramrod data header */
1262255736Sdavidch	/* TODO: take this to the higher level in order to prevent multiple
1263255736Sdavidch		 writing */
1264255736Sdavidch	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1265255736Sdavidch					rule_cnt);
1266255736Sdavidch}
1267255736Sdavidch
1268296071Sdavidcsstatic void ecore_set_one_vxlan_fltr_e2(struct bxe_softc *sc,
1269296071Sdavidcs						struct ecore_vlan_mac_obj *o,
1270296071Sdavidcs						struct ecore_exeq_elem *elem,
1271296071Sdavidcs						int rule_idx, int cam_offset)
1272296071Sdavidcs{
1273296071Sdavidcs	struct ecore_raw_obj *raw = &o->raw;
1274296071Sdavidcs	struct eth_classify_rules_ramrod_data *data =
1275296071Sdavidcs		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1276296071Sdavidcs	int rule_cnt = rule_idx + 1;
1277296071Sdavidcs	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1278296071Sdavidcs	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1279296071Sdavidcs	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1280296071Sdavidcs	uint32_t vni = elem->cmd_data.vlan_mac.u.vxlan_fltr.vni;
1281296071Sdavidcs	uint8_t *mac = elem->cmd_data.vlan_mac.u.vxlan_fltr.innermac;
1282296071Sdavidcs
1283296071Sdavidcs	/* Reset the ramrod data buffer for the first rule */
1284296071Sdavidcs	if (rule_idx == 0)
1285296071Sdavidcs		ECORE_MEMSET(data, 0, sizeof(*data));
1286296071Sdavidcs
1287296071Sdavidcs	/* Set a rule header */
1288296071Sdavidcs	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add,
1289296071Sdavidcs				      CLASSIFY_RULE_OPCODE_IMAC_VNI,
1290296071Sdavidcs				      &rule_entry->imac_vni.header);
1291296071Sdavidcs
1292296071Sdavidcs	/* Set VLAN and MAC themselves */
1293296071Sdavidcs	rule_entry->imac_vni.vni = vni;
1294296071Sdavidcs	ecore_set_fw_mac_addr(&rule_entry->imac_vni.imac_msb,
1295296071Sdavidcs			      &rule_entry->imac_vni.imac_mid,
1296296071Sdavidcs			      &rule_entry->imac_vni.imac_lsb, mac);
1297296071Sdavidcs
1298296071Sdavidcs	/* MOVE: Add a rule that will add this MAC to the target Queue */
1299296071Sdavidcs	if (cmd == ECORE_VLAN_MAC_MOVE) {
1300296071Sdavidcs		rule_entry++;
1301296071Sdavidcs		rule_cnt++;
1302296071Sdavidcs
1303296071Sdavidcs		/* Setup ramrod data */
1304296071Sdavidcs		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1305296071Sdavidcs					      elem->cmd_data.vlan_mac.target_obj,
1306296071Sdavidcs					      TRUE, CLASSIFY_RULE_OPCODE_IMAC_VNI,
1307296071Sdavidcs					      &rule_entry->imac_vni.header);
1308296071Sdavidcs
1309296071Sdavidcs		/* Set a VLAN itself */
1310296071Sdavidcs		rule_entry->imac_vni.vni = vni;
1311296071Sdavidcs		ecore_set_fw_mac_addr(&rule_entry->imac_vni.imac_msb,
1312296071Sdavidcs				      &rule_entry->imac_vni.imac_mid,
1313296071Sdavidcs				      &rule_entry->imac_vni.imac_lsb, mac);
1314296071Sdavidcs	}
1315296071Sdavidcs
1316296071Sdavidcs	/* Set the ramrod data header */
1317296071Sdavidcs	/* TODO: take this to the higher level in order to prevent multiple
1318296071Sdavidcs	   * writing
1319296071Sdavidcs	*/
1320296071Sdavidcs	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state,
1321296071Sdavidcs					&data->header, rule_cnt);
1322296071Sdavidcs}
1323296071Sdavidcs
1324255736Sdavidch/**
1325255736Sdavidch * ecore_set_one_vlan_mac_e1h -
1326255736Sdavidch *
1327255736Sdavidch * @sc:		device handle
1328255736Sdavidch * @o:		ecore_vlan_mac_obj
1329255736Sdavidch * @elem:	ecore_exeq_elem
1330255736Sdavidch * @rule_idx:	rule_idx
1331255736Sdavidch * @cam_offset:	cam_offset
1332255736Sdavidch */
1333255736Sdavidchstatic void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc,
1334255736Sdavidch				       struct ecore_vlan_mac_obj *o,
1335255736Sdavidch				       struct ecore_exeq_elem *elem,
1336255736Sdavidch				       int rule_idx, int cam_offset)
1337255736Sdavidch{
1338255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1339255736Sdavidch	struct mac_configuration_cmd *config =
1340255736Sdavidch		(struct mac_configuration_cmd *)(raw->rdata);
1341255736Sdavidch	/* 57710 and 57711 do not support MOVE command,
1342255736Sdavidch	 * so it's either ADD or DEL
1343255736Sdavidch	 */
1344255736Sdavidch	bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1345255736Sdavidch		TRUE : FALSE;
1346255736Sdavidch
1347255736Sdavidch	/* Reset the ramrod data buffer */
1348255736Sdavidch	ECORE_MEMSET(config, 0, sizeof(*config));
1349255736Sdavidch
1350255736Sdavidch	ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
1351255736Sdavidch				     cam_offset, add,
1352255736Sdavidch				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1353255736Sdavidch				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1354255736Sdavidch				     ETH_VLAN_FILTER_CLASSIFY, config);
1355255736Sdavidch}
1356255736Sdavidch
1357255736Sdavidch#define list_next_entry(pos, member) \
1358255736Sdavidch	list_entry((pos)->member.next, typeof(*(pos)), member)
1359255736Sdavidch
1360255736Sdavidch/**
1361255736Sdavidch * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1362255736Sdavidch *
1363255736Sdavidch * @sc:		device handle
1364255736Sdavidch * @p:		command parameters
1365255736Sdavidch * @ppos:	pointer to the cookie
1366255736Sdavidch *
1367255736Sdavidch * reconfigure next MAC/VLAN/VLAN-MAC element from the
1368255736Sdavidch * previously configured elements list.
1369255736Sdavidch *
1370255736Sdavidch * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1371255736Sdavidch * into an account
1372255736Sdavidch *
1373255736Sdavidch * pointer to the cookie  - that should be given back in the next call to make
1374255736Sdavidch * function handle the next element. If *ppos is set to NULL it will restart the
1375255736Sdavidch * iterator. If returned *ppos == NULL this means that the last element has been
1376255736Sdavidch * handled.
1377255736Sdavidch *
1378255736Sdavidch */
1379255736Sdavidchstatic int ecore_vlan_mac_restore(struct bxe_softc *sc,
1380255736Sdavidch			   struct ecore_vlan_mac_ramrod_params *p,
1381255736Sdavidch			   struct ecore_vlan_mac_registry_elem **ppos)
1382255736Sdavidch{
1383255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
1384255736Sdavidch	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1385255736Sdavidch
1386255736Sdavidch	/* If list is empty - there is nothing to do here */
1387255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&o->head)) {
1388255736Sdavidch		*ppos = NULL;
1389255736Sdavidch		return 0;
1390255736Sdavidch	}
1391255736Sdavidch
1392255736Sdavidch	/* make a step... */
1393255736Sdavidch	if (*ppos == NULL)
1394255736Sdavidch		*ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1395255736Sdavidch					    struct ecore_vlan_mac_registry_elem,
1396255736Sdavidch					       link);
1397255736Sdavidch	else
1398255736Sdavidch		*ppos = ECORE_LIST_NEXT(*ppos, link,
1399255736Sdavidch					struct ecore_vlan_mac_registry_elem);
1400255736Sdavidch
1401255736Sdavidch	pos = *ppos;
1402255736Sdavidch
1403255736Sdavidch	/* If it's the last step - return NULL */
1404255736Sdavidch	if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1405255736Sdavidch		*ppos = NULL;
1406255736Sdavidch
1407255736Sdavidch	/* Prepare a 'user_req' */
1408255736Sdavidch	ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1409255736Sdavidch
1410255736Sdavidch	/* Set the command */
1411255736Sdavidch	p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1412255736Sdavidch
1413255736Sdavidch	/* Set vlan_mac_flags */
1414255736Sdavidch	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1415255736Sdavidch
1416255736Sdavidch	/* Set a restore bit */
1417255736Sdavidch	ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1418255736Sdavidch
1419255736Sdavidch	return ecore_config_vlan_mac(sc, p);
1420255736Sdavidch}
1421255736Sdavidch
1422255736Sdavidch/* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1423255736Sdavidch * pointer to an element with a specific criteria and NULL if such an element
1424255736Sdavidch * hasn't been found.
1425255736Sdavidch */
1426255736Sdavidchstatic struct ecore_exeq_elem *ecore_exeq_get_mac(
1427255736Sdavidch	struct ecore_exe_queue_obj *o,
1428255736Sdavidch	struct ecore_exeq_elem *elem)
1429255736Sdavidch{
1430255736Sdavidch	struct ecore_exeq_elem *pos;
1431255736Sdavidch	struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1432255736Sdavidch
1433255736Sdavidch	/* Check pending for execution commands */
1434255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1435255736Sdavidch				  struct ecore_exeq_elem)
1436255736Sdavidch		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1437255736Sdavidch			      sizeof(*data)) &&
1438255736Sdavidch		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1439255736Sdavidch			return pos;
1440255736Sdavidch
1441255736Sdavidch	return NULL;
1442255736Sdavidch}
1443255736Sdavidch
1444255736Sdavidchstatic struct ecore_exeq_elem *ecore_exeq_get_vlan(
1445255736Sdavidch	struct ecore_exe_queue_obj *o,
1446255736Sdavidch	struct ecore_exeq_elem *elem)
1447255736Sdavidch{
1448255736Sdavidch	struct ecore_exeq_elem *pos;
1449255736Sdavidch	struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1450255736Sdavidch
1451255736Sdavidch	/* Check pending for execution commands */
1452255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1453255736Sdavidch				  struct ecore_exeq_elem)
1454255736Sdavidch		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
1455255736Sdavidch			      sizeof(*data)) &&
1456255736Sdavidch		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1457255736Sdavidch			return pos;
1458255736Sdavidch
1459255736Sdavidch	return NULL;
1460255736Sdavidch}
1461255736Sdavidch
1462255736Sdavidchstatic struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1463255736Sdavidch	struct ecore_exe_queue_obj *o,
1464255736Sdavidch	struct ecore_exeq_elem *elem)
1465255736Sdavidch{
1466255736Sdavidch	struct ecore_exeq_elem *pos;
1467255736Sdavidch	struct ecore_vlan_mac_ramrod_data *data =
1468255736Sdavidch		&elem->cmd_data.vlan_mac.u.vlan_mac;
1469255736Sdavidch
1470255736Sdavidch	/* Check pending for execution commands */
1471255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1472255736Sdavidch				  struct ecore_exeq_elem)
1473255736Sdavidch		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1474255736Sdavidch			      sizeof(*data)) &&
1475255736Sdavidch		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1476255736Sdavidch			return pos;
1477255736Sdavidch
1478255736Sdavidch	return NULL;
1479255736Sdavidch}
1480255736Sdavidch
1481296071Sdavidcsstatic struct ecore_exeq_elem *ecore_exeq_get_vxlan_fltr
1482296071Sdavidcs			(struct ecore_exe_queue_obj *o,
1483296071Sdavidcs			struct ecore_exeq_elem *elem)
1484296071Sdavidcs{
1485296071Sdavidcs	struct ecore_exeq_elem *pos;
1486296071Sdavidcs	struct ecore_vxlan_fltr_ramrod_data *data =
1487296071Sdavidcs		&elem->cmd_data.vlan_mac.u.vxlan_fltr;
1488296071Sdavidcs
1489296071Sdavidcs	/* Check pending for execution commands */
1490296071Sdavidcs	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1491296071Sdavidcs				  struct ecore_exeq_elem)
1492296071Sdavidcs		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vxlan_fltr, data,
1493296071Sdavidcs			      sizeof(*data)) &&
1494296071Sdavidcs			      (pos->cmd_data.vlan_mac.cmd ==
1495296071Sdavidcs			      elem->cmd_data.vlan_mac.cmd))
1496296071Sdavidcs			return pos;
1497296071Sdavidcs
1498296071Sdavidcs	return NULL;
1499296071Sdavidcs}
1500296071Sdavidcs
1501255736Sdavidch/**
1502255736Sdavidch * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1503255736Sdavidch *
1504255736Sdavidch * @sc:		device handle
1505255736Sdavidch * @qo:		ecore_qable_obj
1506255736Sdavidch * @elem:	ecore_exeq_elem
1507255736Sdavidch *
1508255736Sdavidch * Checks that the requested configuration can be added. If yes and if
1509255736Sdavidch * requested, consume CAM credit.
1510255736Sdavidch *
1511255736Sdavidch * The 'validate' is run after the 'optimize'.
1512255736Sdavidch *
1513255736Sdavidch */
1514255736Sdavidchstatic inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc,
1515255736Sdavidch					      union ecore_qable_obj *qo,
1516255736Sdavidch					      struct ecore_exeq_elem *elem)
1517255736Sdavidch{
1518255736Sdavidch	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1519255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1520255736Sdavidch	int rc;
1521255736Sdavidch
1522255736Sdavidch	/* Check the registry */
1523255736Sdavidch	rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1524255736Sdavidch	if (rc) {
1525255736Sdavidch		ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
1526255736Sdavidch		return rc;
1527255736Sdavidch	}
1528255736Sdavidch
1529255736Sdavidch	/* Check if there is a pending ADD command for this
1530255736Sdavidch	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1531255736Sdavidch	 */
1532255736Sdavidch	if (exeq->get(exeq, elem)) {
1533255736Sdavidch		ECORE_MSG(sc, "There is a pending ADD command already\n");
1534255736Sdavidch		return ECORE_EXISTS;
1535255736Sdavidch	}
1536255736Sdavidch
1537255736Sdavidch	/* TODO: Check the pending MOVE from other objects where this
1538255736Sdavidch	 * object is a destination object.
1539255736Sdavidch	 */
1540255736Sdavidch
1541255736Sdavidch	/* Consume the credit if not requested not to */
1542255736Sdavidch	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1543255736Sdavidch			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1544255736Sdavidch	    o->get_credit(o)))
1545255736Sdavidch		return ECORE_INVAL;
1546255736Sdavidch
1547255736Sdavidch	return ECORE_SUCCESS;
1548255736Sdavidch}
1549255736Sdavidch
1550255736Sdavidch/**
1551255736Sdavidch * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1552255736Sdavidch *
1553255736Sdavidch * @sc:		device handle
1554255736Sdavidch * @qo:		quable object to check
1555255736Sdavidch * @elem:	element that needs to be deleted
1556255736Sdavidch *
1557255736Sdavidch * Checks that the requested configuration can be deleted. If yes and if
1558255736Sdavidch * requested, returns a CAM credit.
1559255736Sdavidch *
1560255736Sdavidch * The 'validate' is run after the 'optimize'.
1561255736Sdavidch */
1562255736Sdavidchstatic inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc,
1563255736Sdavidch					      union ecore_qable_obj *qo,
1564255736Sdavidch					      struct ecore_exeq_elem *elem)
1565255736Sdavidch{
1566255736Sdavidch	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1567255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos;
1568255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1569255736Sdavidch	struct ecore_exeq_elem query_elem;
1570255736Sdavidch
1571255736Sdavidch	/* If this classification can not be deleted (doesn't exist)
1572255736Sdavidch	 * - return a ECORE_EXIST.
1573255736Sdavidch	 */
1574255736Sdavidch	pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1575255736Sdavidch	if (!pos) {
1576255736Sdavidch		ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
1577255736Sdavidch		return ECORE_EXISTS;
1578255736Sdavidch	}
1579255736Sdavidch
1580255736Sdavidch	/* Check if there are pending DEL or MOVE commands for this
1581255736Sdavidch	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1582255736Sdavidch	 */
1583255736Sdavidch	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1584255736Sdavidch
1585255736Sdavidch	/* Check for MOVE commands */
1586255736Sdavidch	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1587255736Sdavidch	if (exeq->get(exeq, &query_elem)) {
1588255736Sdavidch		ECORE_ERR("There is a pending MOVE command already\n");
1589255736Sdavidch		return ECORE_INVAL;
1590255736Sdavidch	}
1591255736Sdavidch
1592255736Sdavidch	/* Check for DEL commands */
1593255736Sdavidch	if (exeq->get(exeq, elem)) {
1594255736Sdavidch		ECORE_MSG(sc, "There is a pending DEL command already\n");
1595255736Sdavidch		return ECORE_EXISTS;
1596255736Sdavidch	}
1597255736Sdavidch
1598255736Sdavidch	/* Return the credit to the credit pool if not requested not to */
1599255736Sdavidch	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1600255736Sdavidch			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1601255736Sdavidch	    o->put_credit(o))) {
1602255736Sdavidch		ECORE_ERR("Failed to return a credit\n");
1603255736Sdavidch		return ECORE_INVAL;
1604255736Sdavidch	}
1605255736Sdavidch
1606255736Sdavidch	return ECORE_SUCCESS;
1607255736Sdavidch}
1608255736Sdavidch
1609255736Sdavidch/**
1610255736Sdavidch * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1611255736Sdavidch *
1612255736Sdavidch * @sc:		device handle
1613255736Sdavidch * @qo:		quable object to check (source)
1614255736Sdavidch * @elem:	element that needs to be moved
1615255736Sdavidch *
1616255736Sdavidch * Checks that the requested configuration can be moved. If yes and if
1617255736Sdavidch * requested, returns a CAM credit.
1618255736Sdavidch *
1619255736Sdavidch * The 'validate' is run after the 'optimize'.
1620255736Sdavidch */
1621255736Sdavidchstatic inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc,
1622255736Sdavidch					       union ecore_qable_obj *qo,
1623255736Sdavidch					       struct ecore_exeq_elem *elem)
1624255736Sdavidch{
1625255736Sdavidch	struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1626255736Sdavidch	struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1627255736Sdavidch	struct ecore_exeq_elem query_elem;
1628255736Sdavidch	struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1629255736Sdavidch	struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1630255736Sdavidch
1631255736Sdavidch	/* Check if we can perform this operation based on the current registry
1632255736Sdavidch	 * state.
1633255736Sdavidch	 */
1634255736Sdavidch	if (!src_o->check_move(sc, src_o, dest_o,
1635255736Sdavidch			       &elem->cmd_data.vlan_mac.u)) {
1636255736Sdavidch		ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
1637255736Sdavidch		return ECORE_INVAL;
1638255736Sdavidch	}
1639255736Sdavidch
1640255736Sdavidch	/* Check if there is an already pending DEL or MOVE command for the
1641255736Sdavidch	 * source object or ADD command for a destination object. Return an
1642255736Sdavidch	 * error if so.
1643255736Sdavidch	 */
1644255736Sdavidch	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1645255736Sdavidch
1646255736Sdavidch	/* Check DEL on source */
1647255736Sdavidch	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1648255736Sdavidch	if (src_exeq->get(src_exeq, &query_elem)) {
1649255736Sdavidch		ECORE_ERR("There is a pending DEL command on the source queue already\n");
1650255736Sdavidch		return ECORE_INVAL;
1651255736Sdavidch	}
1652255736Sdavidch
1653255736Sdavidch	/* Check MOVE on source */
1654255736Sdavidch	if (src_exeq->get(src_exeq, elem)) {
1655255736Sdavidch		ECORE_MSG(sc, "There is a pending MOVE command already\n");
1656255736Sdavidch		return ECORE_EXISTS;
1657255736Sdavidch	}
1658255736Sdavidch
1659255736Sdavidch	/* Check ADD on destination */
1660255736Sdavidch	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1661255736Sdavidch	if (dest_exeq->get(dest_exeq, &query_elem)) {
1662255736Sdavidch		ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1663255736Sdavidch		return ECORE_INVAL;
1664255736Sdavidch	}
1665255736Sdavidch
1666255736Sdavidch	/* Consume the credit if not requested not to */
1667255736Sdavidch	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1668255736Sdavidch			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1669255736Sdavidch	    dest_o->get_credit(dest_o)))
1670255736Sdavidch		return ECORE_INVAL;
1671255736Sdavidch
1672255736Sdavidch	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1673255736Sdavidch			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1674255736Sdavidch	    src_o->put_credit(src_o))) {
1675255736Sdavidch		/* return the credit taken from dest... */
1676255736Sdavidch		dest_o->put_credit(dest_o);
1677255736Sdavidch		return ECORE_INVAL;
1678255736Sdavidch	}
1679255736Sdavidch
1680255736Sdavidch	return ECORE_SUCCESS;
1681255736Sdavidch}
1682255736Sdavidch
1683255736Sdavidchstatic int ecore_validate_vlan_mac(struct bxe_softc *sc,
1684255736Sdavidch				   union ecore_qable_obj *qo,
1685255736Sdavidch				   struct ecore_exeq_elem *elem)
1686255736Sdavidch{
1687255736Sdavidch	switch (elem->cmd_data.vlan_mac.cmd) {
1688255736Sdavidch	case ECORE_VLAN_MAC_ADD:
1689255736Sdavidch		return ecore_validate_vlan_mac_add(sc, qo, elem);
1690255736Sdavidch	case ECORE_VLAN_MAC_DEL:
1691255736Sdavidch		return ecore_validate_vlan_mac_del(sc, qo, elem);
1692255736Sdavidch	case ECORE_VLAN_MAC_MOVE:
1693255736Sdavidch		return ecore_validate_vlan_mac_move(sc, qo, elem);
1694255736Sdavidch	default:
1695255736Sdavidch		return ECORE_INVAL;
1696255736Sdavidch	}
1697255736Sdavidch}
1698255736Sdavidch
1699255736Sdavidchstatic int ecore_remove_vlan_mac(struct bxe_softc *sc,
1700255736Sdavidch				  union ecore_qable_obj *qo,
1701255736Sdavidch				  struct ecore_exeq_elem *elem)
1702255736Sdavidch{
1703255736Sdavidch	int rc = 0;
1704255736Sdavidch
1705255736Sdavidch	/* If consumption wasn't required, nothing to do */
1706255736Sdavidch	if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1707255736Sdavidch			   &elem->cmd_data.vlan_mac.vlan_mac_flags))
1708255736Sdavidch		return ECORE_SUCCESS;
1709255736Sdavidch
1710255736Sdavidch	switch (elem->cmd_data.vlan_mac.cmd) {
1711255736Sdavidch	case ECORE_VLAN_MAC_ADD:
1712255736Sdavidch	case ECORE_VLAN_MAC_MOVE:
1713255736Sdavidch		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1714255736Sdavidch		break;
1715255736Sdavidch	case ECORE_VLAN_MAC_DEL:
1716255736Sdavidch		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1717255736Sdavidch		break;
1718255736Sdavidch	default:
1719255736Sdavidch		return ECORE_INVAL;
1720255736Sdavidch	}
1721255736Sdavidch
1722255736Sdavidch	if (rc != TRUE)
1723255736Sdavidch		return ECORE_INVAL;
1724255736Sdavidch
1725255736Sdavidch	return ECORE_SUCCESS;
1726255736Sdavidch}
1727255736Sdavidch
1728255736Sdavidch/**
1729255736Sdavidch * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1730255736Sdavidch *
1731255736Sdavidch * @sc:		device handle
1732255736Sdavidch * @o:		ecore_vlan_mac_obj
1733255736Sdavidch *
1734255736Sdavidch */
1735255736Sdavidchstatic int ecore_wait_vlan_mac(struct bxe_softc *sc,
1736255736Sdavidch			       struct ecore_vlan_mac_obj *o)
1737255736Sdavidch{
1738255736Sdavidch	int cnt = 5000, rc;
1739255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1740255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
1741255736Sdavidch
1742255736Sdavidch	while (cnt--) {
1743255736Sdavidch		/* Wait for the current command to complete */
1744255736Sdavidch		rc = raw->wait_comp(sc, raw);
1745255736Sdavidch		if (rc)
1746255736Sdavidch			return rc;
1747255736Sdavidch
1748255736Sdavidch		/* Wait until there are no pending commands */
1749255736Sdavidch		if (!ecore_exe_queue_empty(exeq))
1750255736Sdavidch			ECORE_WAIT(sc, 1000);
1751255736Sdavidch		else
1752255736Sdavidch			return ECORE_SUCCESS;
1753255736Sdavidch	}
1754255736Sdavidch
1755255736Sdavidch	return ECORE_TIMEOUT;
1756255736Sdavidch}
1757255736Sdavidch
1758255736Sdavidchstatic int __ecore_vlan_mac_execute_step(struct bxe_softc *sc,
1759255736Sdavidch					 struct ecore_vlan_mac_obj *o,
1760255736Sdavidch					 unsigned long *ramrod_flags)
1761255736Sdavidch{
1762255736Sdavidch	int rc = ECORE_SUCCESS;
1763255736Sdavidch
1764255736Sdavidch	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1765255736Sdavidch
1766255736Sdavidch	ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
1767255736Sdavidch	rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1768255736Sdavidch
1769255736Sdavidch	if (rc != ECORE_SUCCESS) {
1770255736Sdavidch		__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1771255736Sdavidch
1772255736Sdavidch		/** Calling function should not diffrentiate between this case
1773255736Sdavidch		 *  and the case in which there is already a pending ramrod
1774255736Sdavidch		 */
1775255736Sdavidch		rc = ECORE_PENDING;
1776255736Sdavidch	} else {
1777258187Sedavis		rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1778255736Sdavidch	}
1779255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1780255736Sdavidch
1781255736Sdavidch	return rc;
1782255736Sdavidch}
1783255736Sdavidch
1784255736Sdavidch/**
1785255736Sdavidch * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1786255736Sdavidch *
1787255736Sdavidch * @sc:		device handle
1788255736Sdavidch * @o:		ecore_vlan_mac_obj
1789255736Sdavidch * @cqe:
1790255736Sdavidch * @cont:	if TRUE schedule next execution chunk
1791255736Sdavidch *
1792255736Sdavidch */
1793255736Sdavidchstatic int ecore_complete_vlan_mac(struct bxe_softc *sc,
1794255736Sdavidch				   struct ecore_vlan_mac_obj *o,
1795255736Sdavidch				   union event_ring_elem *cqe,
1796255736Sdavidch				   unsigned long *ramrod_flags)
1797255736Sdavidch{
1798255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
1799255736Sdavidch	int rc;
1800255736Sdavidch
1801258187Sedavis	/* Clearing the pending list & raw state should be made
1802258187Sedavis	 * atomically (as execution flow assumes they represent the same)
1803258187Sedavis	 */
1804258187Sedavis	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1805258187Sedavis
1806255736Sdavidch	/* Reset pending list */
1807258187Sedavis	__ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1808255736Sdavidch
1809255736Sdavidch	/* Clear pending */
1810255736Sdavidch	r->clear_pending(r);
1811255736Sdavidch
1812258187Sedavis	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1813258187Sedavis
1814255736Sdavidch	/* If ramrod failed this is most likely a SW bug */
1815255736Sdavidch	if (cqe->message.error)
1816255736Sdavidch		return ECORE_INVAL;
1817255736Sdavidch
1818255736Sdavidch	/* Run the next bulk of pending commands if requested */
1819255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1820255736Sdavidch		rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1821255736Sdavidch		if (rc < 0)
1822255736Sdavidch			return rc;
1823255736Sdavidch	}
1824255736Sdavidch
1825255736Sdavidch	/* If there is more work to do return PENDING */
1826255736Sdavidch	if (!ecore_exe_queue_empty(&o->exe_queue))
1827255736Sdavidch		return ECORE_PENDING;
1828255736Sdavidch
1829255736Sdavidch	return ECORE_SUCCESS;
1830255736Sdavidch}
1831255736Sdavidch
1832255736Sdavidch/**
1833255736Sdavidch * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1834255736Sdavidch *
1835255736Sdavidch * @sc:		device handle
1836255736Sdavidch * @o:		ecore_qable_obj
1837255736Sdavidch * @elem:	ecore_exeq_elem
1838255736Sdavidch */
1839255736Sdavidchstatic int ecore_optimize_vlan_mac(struct bxe_softc *sc,
1840255736Sdavidch				   union ecore_qable_obj *qo,
1841255736Sdavidch				   struct ecore_exeq_elem *elem)
1842255736Sdavidch{
1843255736Sdavidch	struct ecore_exeq_elem query, *pos;
1844255736Sdavidch	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1845255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1846255736Sdavidch
1847255736Sdavidch	ECORE_MEMCPY(&query, elem, sizeof(query));
1848255736Sdavidch
1849255736Sdavidch	switch (elem->cmd_data.vlan_mac.cmd) {
1850255736Sdavidch	case ECORE_VLAN_MAC_ADD:
1851255736Sdavidch		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1852255736Sdavidch		break;
1853255736Sdavidch	case ECORE_VLAN_MAC_DEL:
1854255736Sdavidch		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1855255736Sdavidch		break;
1856255736Sdavidch	default:
1857255736Sdavidch		/* Don't handle anything other than ADD or DEL */
1858255736Sdavidch		return 0;
1859255736Sdavidch	}
1860255736Sdavidch
1861255736Sdavidch	/* If we found the appropriate element - delete it */
1862255736Sdavidch	pos = exeq->get(exeq, &query);
1863255736Sdavidch	if (pos) {
1864255736Sdavidch
1865255736Sdavidch		/* Return the credit of the optimized command */
1866255736Sdavidch		if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1867255736Sdavidch				     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1868255736Sdavidch			if ((query.cmd_data.vlan_mac.cmd ==
1869255736Sdavidch			     ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1870255736Sdavidch				ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1871255736Sdavidch				return ECORE_INVAL;
1872255736Sdavidch			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1873255736Sdavidch				ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1874255736Sdavidch				return ECORE_INVAL;
1875255736Sdavidch			}
1876255736Sdavidch		}
1877255736Sdavidch
1878255736Sdavidch		ECORE_MSG(sc, "Optimizing %s command\n",
1879255736Sdavidch			  (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1880255736Sdavidch			  "ADD" : "DEL");
1881255736Sdavidch
1882255736Sdavidch		ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1883255736Sdavidch		ecore_exe_queue_free_elem(sc, pos);
1884255736Sdavidch		return 1;
1885255736Sdavidch	}
1886255736Sdavidch
1887255736Sdavidch	return 0;
1888255736Sdavidch}
1889255736Sdavidch
1890255736Sdavidch/**
1891255736Sdavidch * ecore_vlan_mac_get_registry_elem - prepare a registry element
1892255736Sdavidch *
1893255736Sdavidch * @sc:	  device handle
1894255736Sdavidch * @o:
1895255736Sdavidch * @elem:
1896255736Sdavidch * @restore:
1897255736Sdavidch * @re:
1898255736Sdavidch *
1899255736Sdavidch * prepare a registry element according to the current command request.
1900255736Sdavidch */
1901255736Sdavidchstatic inline int ecore_vlan_mac_get_registry_elem(
1902255736Sdavidch	struct bxe_softc *sc,
1903255736Sdavidch	struct ecore_vlan_mac_obj *o,
1904255736Sdavidch	struct ecore_exeq_elem *elem,
1905255736Sdavidch	bool restore,
1906255736Sdavidch	struct ecore_vlan_mac_registry_elem **re)
1907255736Sdavidch{
1908255736Sdavidch	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1909255736Sdavidch	struct ecore_vlan_mac_registry_elem *reg_elem;
1910255736Sdavidch
1911255736Sdavidch	/* Allocate a new registry element if needed. */
1912255736Sdavidch	if (!restore &&
1913255736Sdavidch	    ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1914255736Sdavidch		reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1915255736Sdavidch		if (!reg_elem)
1916255736Sdavidch			return ECORE_NOMEM;
1917255736Sdavidch
1918255736Sdavidch		/* Get a new CAM offset */
1919255736Sdavidch		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1920255736Sdavidch			/* This shall never happen, because we have checked the
1921255736Sdavidch			 * CAM availability in the 'validate'.
1922255736Sdavidch			 */
1923255736Sdavidch			ECORE_DBG_BREAK_IF(1);
1924255736Sdavidch			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1925255736Sdavidch			return ECORE_INVAL;
1926255736Sdavidch		}
1927255736Sdavidch
1928255736Sdavidch		ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
1929255736Sdavidch
1930255736Sdavidch		/* Set a VLAN-MAC data */
1931255736Sdavidch		ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1932255736Sdavidch			  sizeof(reg_elem->u));
1933255736Sdavidch
1934255736Sdavidch		/* Copy the flags (needed for DEL and RESTORE flows) */
1935255736Sdavidch		reg_elem->vlan_mac_flags =
1936255736Sdavidch			elem->cmd_data.vlan_mac.vlan_mac_flags;
1937255736Sdavidch	} else /* DEL, RESTORE */
1938255736Sdavidch		reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1939255736Sdavidch
1940255736Sdavidch	*re = reg_elem;
1941255736Sdavidch	return ECORE_SUCCESS;
1942255736Sdavidch}
1943255736Sdavidch
1944255736Sdavidch/**
1945255736Sdavidch * ecore_execute_vlan_mac - execute vlan mac command
1946255736Sdavidch *
1947255736Sdavidch * @sc:			device handle
1948255736Sdavidch * @qo:
1949255736Sdavidch * @exe_chunk:
1950255736Sdavidch * @ramrod_flags:
1951255736Sdavidch *
1952255736Sdavidch * go and send a ramrod!
1953255736Sdavidch */
1954255736Sdavidchstatic int ecore_execute_vlan_mac(struct bxe_softc *sc,
1955255736Sdavidch				  union ecore_qable_obj *qo,
1956255736Sdavidch				  ecore_list_t *exe_chunk,
1957255736Sdavidch				  unsigned long *ramrod_flags)
1958255736Sdavidch{
1959255736Sdavidch	struct ecore_exeq_elem *elem;
1960255736Sdavidch	struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1961255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
1962255736Sdavidch	int rc, idx = 0;
1963255736Sdavidch	bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1964255736Sdavidch	bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1965255736Sdavidch	struct ecore_vlan_mac_registry_elem *reg_elem;
1966255736Sdavidch	enum ecore_vlan_mac_cmd cmd;
1967255736Sdavidch
1968255736Sdavidch	/* If DRIVER_ONLY execution is requested, cleanup a registry
1969255736Sdavidch	 * and exit. Otherwise send a ramrod to FW.
1970255736Sdavidch	 */
1971255736Sdavidch	if (!drv_only) {
1972258187Sedavis		ECORE_DBG_BREAK_IF(r->check_pending(r));
1973255736Sdavidch
1974255736Sdavidch		/* Set pending */
1975255736Sdavidch		r->set_pending(r);
1976255736Sdavidch
1977255736Sdavidch		/* Fill the ramrod data */
1978255736Sdavidch		ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1979255736Sdavidch					  struct ecore_exeq_elem) {
1980255736Sdavidch			cmd = elem->cmd_data.vlan_mac.cmd;
1981255736Sdavidch			/* We will add to the target object in MOVE command, so
1982255736Sdavidch			 * change the object for a CAM search.
1983255736Sdavidch			 */
1984255736Sdavidch			if (cmd == ECORE_VLAN_MAC_MOVE)
1985255736Sdavidch				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1986255736Sdavidch			else
1987255736Sdavidch				cam_obj = o;
1988255736Sdavidch
1989255736Sdavidch			rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1990255736Sdavidch							      elem, restore,
1991255736Sdavidch							      &reg_elem);
1992255736Sdavidch			if (rc)
1993255736Sdavidch				goto error_exit;
1994255736Sdavidch
1995255736Sdavidch			ECORE_DBG_BREAK_IF(!reg_elem);
1996255736Sdavidch
1997255736Sdavidch			/* Push a new entry into the registry */
1998255736Sdavidch			if (!restore &&
1999255736Sdavidch			    ((cmd == ECORE_VLAN_MAC_ADD) ||
2000255736Sdavidch			    (cmd == ECORE_VLAN_MAC_MOVE)))
2001255736Sdavidch				ECORE_LIST_PUSH_HEAD(&reg_elem->link,
2002255736Sdavidch						     &cam_obj->head);
2003255736Sdavidch
2004255736Sdavidch			/* Configure a single command in a ramrod data buffer */
2005255736Sdavidch			o->set_one_rule(sc, o, elem, idx,
2006255736Sdavidch					reg_elem->cam_offset);
2007255736Sdavidch
2008255736Sdavidch			/* MOVE command consumes 2 entries in the ramrod data */
2009255736Sdavidch			if (cmd == ECORE_VLAN_MAC_MOVE)
2010255736Sdavidch				idx += 2;
2011255736Sdavidch			else
2012255736Sdavidch				idx++;
2013255736Sdavidch		}
2014255736Sdavidch
2015296071Sdavidcs		/* No need for an explicit memory barrier here as long as we
2016296071Sdavidcs		 * ensure the ordering of writing to the SPQ element
2017255736Sdavidch		 *  and updating of the SPQ producer which involves a memory
2018296071Sdavidcs		 * read. If the memory read is removed we will have to put a
2019296071Sdavidcs		 * full memory barrier there (inside ecore_sp_post()).
2020255736Sdavidch		 */
2021255736Sdavidch		rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
2022255736Sdavidch				   r->rdata_mapping,
2023255736Sdavidch				   ETH_CONNECTION_TYPE);
2024255736Sdavidch		if (rc)
2025255736Sdavidch			goto error_exit;
2026255736Sdavidch	}
2027255736Sdavidch
2028255736Sdavidch	/* Now, when we are done with the ramrod - clean up the registry */
2029255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
2030255736Sdavidch				  struct ecore_exeq_elem) {
2031255736Sdavidch		cmd = elem->cmd_data.vlan_mac.cmd;
2032255736Sdavidch		if ((cmd == ECORE_VLAN_MAC_DEL) ||
2033255736Sdavidch		    (cmd == ECORE_VLAN_MAC_MOVE)) {
2034255736Sdavidch			reg_elem = o->check_del(sc, o,
2035255736Sdavidch						&elem->cmd_data.vlan_mac.u);
2036255736Sdavidch
2037255736Sdavidch			ECORE_DBG_BREAK_IF(!reg_elem);
2038255736Sdavidch
2039255736Sdavidch			o->put_cam_offset(o, reg_elem->cam_offset);
2040255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
2041255736Sdavidch			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
2042255736Sdavidch		}
2043255736Sdavidch	}
2044255736Sdavidch
2045255736Sdavidch	if (!drv_only)
2046255736Sdavidch		return ECORE_PENDING;
2047255736Sdavidch	else
2048255736Sdavidch		return ECORE_SUCCESS;
2049255736Sdavidch
2050255736Sdavidcherror_exit:
2051255736Sdavidch	r->clear_pending(r);
2052255736Sdavidch
2053255736Sdavidch	/* Cleanup a registry in case of a failure */
2054255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
2055255736Sdavidch				  struct ecore_exeq_elem) {
2056255736Sdavidch		cmd = elem->cmd_data.vlan_mac.cmd;
2057255736Sdavidch
2058255736Sdavidch		if (cmd == ECORE_VLAN_MAC_MOVE)
2059255736Sdavidch			cam_obj = elem->cmd_data.vlan_mac.target_obj;
2060255736Sdavidch		else
2061255736Sdavidch			cam_obj = o;
2062255736Sdavidch
2063255736Sdavidch		/* Delete all newly added above entries */
2064255736Sdavidch		if (!restore &&
2065255736Sdavidch		    ((cmd == ECORE_VLAN_MAC_ADD) ||
2066255736Sdavidch		    (cmd == ECORE_VLAN_MAC_MOVE))) {
2067255736Sdavidch			reg_elem = o->check_del(sc, cam_obj,
2068255736Sdavidch						&elem->cmd_data.vlan_mac.u);
2069255736Sdavidch			if (reg_elem) {
2070255736Sdavidch				ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
2071255736Sdavidch							&cam_obj->head);
2072255736Sdavidch				ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
2073255736Sdavidch			}
2074255736Sdavidch		}
2075255736Sdavidch	}
2076255736Sdavidch
2077255736Sdavidch	return rc;
2078255736Sdavidch}
2079255736Sdavidch
2080255736Sdavidchstatic inline int ecore_vlan_mac_push_new_cmd(
2081255736Sdavidch	struct bxe_softc *sc,
2082255736Sdavidch	struct ecore_vlan_mac_ramrod_params *p)
2083255736Sdavidch{
2084255736Sdavidch	struct ecore_exeq_elem *elem;
2085255736Sdavidch	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2086255736Sdavidch	bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
2087255736Sdavidch
2088255736Sdavidch	/* Allocate the execution queue element */
2089255736Sdavidch	elem = ecore_exe_queue_alloc_elem(sc);
2090255736Sdavidch	if (!elem)
2091255736Sdavidch		return ECORE_NOMEM;
2092255736Sdavidch
2093255736Sdavidch	/* Set the command 'length' */
2094255736Sdavidch	switch (p->user_req.cmd) {
2095255736Sdavidch	case ECORE_VLAN_MAC_MOVE:
2096255736Sdavidch		elem->cmd_len = 2;
2097255736Sdavidch		break;
2098255736Sdavidch	default:
2099255736Sdavidch		elem->cmd_len = 1;
2100255736Sdavidch	}
2101255736Sdavidch
2102255736Sdavidch	/* Fill the object specific info */
2103255736Sdavidch	ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
2104255736Sdavidch
2105255736Sdavidch	/* Try to add a new command to the pending list */
2106255736Sdavidch	return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
2107255736Sdavidch}
2108255736Sdavidch
2109255736Sdavidch/**
2110255736Sdavidch * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
2111255736Sdavidch *
2112255736Sdavidch * @sc:	  device handle
2113255736Sdavidch * @p:
2114255736Sdavidch *
2115255736Sdavidch */
2116255736Sdavidchint ecore_config_vlan_mac(struct bxe_softc *sc,
2117255736Sdavidch			   struct ecore_vlan_mac_ramrod_params *p)
2118255736Sdavidch{
2119255736Sdavidch	int rc = ECORE_SUCCESS;
2120255736Sdavidch	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2121255736Sdavidch	unsigned long *ramrod_flags = &p->ramrod_flags;
2122255736Sdavidch	bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2123255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
2124255736Sdavidch
2125255736Sdavidch	/*
2126255736Sdavidch	 * Add new elements to the execution list for commands that require it.
2127255736Sdavidch	 */
2128255736Sdavidch	if (!cont) {
2129255736Sdavidch		rc = ecore_vlan_mac_push_new_cmd(sc, p);
2130255736Sdavidch		if (rc)
2131255736Sdavidch			return rc;
2132255736Sdavidch	}
2133255736Sdavidch
2134255736Sdavidch	/* If nothing will be executed further in this iteration we want to
2135255736Sdavidch	 * return PENDING if there are pending commands
2136255736Sdavidch	 */
2137255736Sdavidch	if (!ecore_exe_queue_empty(&o->exe_queue))
2138255736Sdavidch		rc = ECORE_PENDING;
2139255736Sdavidch
2140255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
2141255736Sdavidch		ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2142255736Sdavidch		raw->clear_pending(raw);
2143255736Sdavidch	}
2144255736Sdavidch
2145255736Sdavidch	/* Execute commands if required */
2146255736Sdavidch	if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2147255736Sdavidch	    ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2148255736Sdavidch		rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
2149255736Sdavidch						   &p->ramrod_flags);
2150255736Sdavidch		if (rc < 0)
2151255736Sdavidch			return rc;
2152255736Sdavidch	}
2153255736Sdavidch
2154255736Sdavidch	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2155255736Sdavidch	 * then user want to wait until the last command is done.
2156255736Sdavidch	 */
2157255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2158255736Sdavidch		/* Wait maximum for the current exe_queue length iterations plus
2159255736Sdavidch		 * one (for the current pending command).
2160255736Sdavidch		 */
2161255736Sdavidch		int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2162255736Sdavidch
2163255736Sdavidch		while (!ecore_exe_queue_empty(&o->exe_queue) &&
2164255736Sdavidch		       max_iterations--) {
2165255736Sdavidch
2166255736Sdavidch			/* Wait for the current command to complete */
2167255736Sdavidch			rc = raw->wait_comp(sc, raw);
2168255736Sdavidch			if (rc)
2169255736Sdavidch				return rc;
2170255736Sdavidch
2171255736Sdavidch			/* Make a next step */
2172255736Sdavidch			rc = __ecore_vlan_mac_execute_step(sc,
2173255736Sdavidch							   p->vlan_mac_obj,
2174255736Sdavidch							   &p->ramrod_flags);
2175255736Sdavidch			if (rc < 0)
2176255736Sdavidch				return rc;
2177255736Sdavidch		}
2178255736Sdavidch
2179255736Sdavidch		return ECORE_SUCCESS;
2180255736Sdavidch	}
2181255736Sdavidch
2182255736Sdavidch	return rc;
2183255736Sdavidch}
2184255736Sdavidch
2185255736Sdavidch/**
2186255736Sdavidch * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2187255736Sdavidch *
2188255736Sdavidch * @sc:			device handle
2189255736Sdavidch * @o:
2190255736Sdavidch * @vlan_mac_flags:
2191255736Sdavidch * @ramrod_flags:	execution flags to be used for this deletion
2192255736Sdavidch *
2193255736Sdavidch * if the last operation has completed successfully and there are no
2194255736Sdavidch * more elements left, positive value if the last operation has completed
2195255736Sdavidch * successfully and there are more previously configured elements, negative
2196255736Sdavidch * value is current operation has failed.
2197255736Sdavidch */
2198255736Sdavidchstatic int ecore_vlan_mac_del_all(struct bxe_softc *sc,
2199255736Sdavidch				  struct ecore_vlan_mac_obj *o,
2200255736Sdavidch				  unsigned long *vlan_mac_flags,
2201255736Sdavidch				  unsigned long *ramrod_flags)
2202255736Sdavidch{
2203255736Sdavidch	struct ecore_vlan_mac_registry_elem *pos = NULL;
2204255736Sdavidch	struct ecore_vlan_mac_ramrod_params p;
2205255736Sdavidch	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2206255736Sdavidch	struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2207296071Sdavidcs	unsigned long flags;
2208258187Sedavis	int read_lock;
2209258187Sedavis	int rc = 0;
2210255736Sdavidch
2211255736Sdavidch	/* Clear pending commands first */
2212255736Sdavidch
2213255736Sdavidch	ECORE_SPIN_LOCK_BH(&exeq->lock);
2214255736Sdavidch
2215255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2216255736Sdavidch				       &exeq->exe_queue, link,
2217255736Sdavidch				       struct ecore_exeq_elem) {
2218296071Sdavidcs		flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2219296071Sdavidcs		if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
2220296071Sdavidcs		    ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2221255736Sdavidch			rc = exeq->remove(sc, exeq->owner, exeq_pos);
2222255736Sdavidch			if (rc) {
2223255736Sdavidch				ECORE_ERR("Failed to remove command\n");
2224255736Sdavidch				ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2225255736Sdavidch				return rc;
2226255736Sdavidch			}
2227255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2228255736Sdavidch						&exeq->exe_queue);
2229255736Sdavidch			ecore_exe_queue_free_elem(sc, exeq_pos);
2230255736Sdavidch		}
2231255736Sdavidch	}
2232255736Sdavidch
2233255736Sdavidch	ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2234255736Sdavidch
2235255736Sdavidch	/* Prepare a command request */
2236255736Sdavidch	ECORE_MEMSET(&p, 0, sizeof(p));
2237255736Sdavidch	p.vlan_mac_obj = o;
2238255736Sdavidch	p.ramrod_flags = *ramrod_flags;
2239255736Sdavidch	p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2240255736Sdavidch
2241255736Sdavidch	/* Add all but the last VLAN-MAC to the execution queue without actually
2242255736Sdavidch	 * execution anything.
2243255736Sdavidch	 */
2244255736Sdavidch	ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2245255736Sdavidch	ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2246255736Sdavidch	ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2247255736Sdavidch
2248255736Sdavidch	ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2249255736Sdavidch	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
2250255736Sdavidch	if (read_lock != ECORE_SUCCESS)
2251255736Sdavidch		return read_lock;
2252255736Sdavidch
2253255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2254255736Sdavidch				  struct ecore_vlan_mac_registry_elem) {
2255296071Sdavidcs		flags = pos->vlan_mac_flags;
2256296071Sdavidcs		if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
2257296071Sdavidcs		    ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2258255736Sdavidch			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2259255736Sdavidch			ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
2260255736Sdavidch			rc = ecore_config_vlan_mac(sc, &p);
2261255736Sdavidch			if (rc < 0) {
2262255736Sdavidch				ECORE_ERR("Failed to add a new DEL command\n");
2263255736Sdavidch				ecore_vlan_mac_h_read_unlock(sc, o);
2264255736Sdavidch				return rc;
2265255736Sdavidch			}
2266255736Sdavidch		}
2267255736Sdavidch	}
2268255736Sdavidch
2269255736Sdavidch	ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2270255736Sdavidch	ecore_vlan_mac_h_read_unlock(sc, o);
2271255736Sdavidch
2272255736Sdavidch	p.ramrod_flags = *ramrod_flags;
2273255736Sdavidch	ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2274255736Sdavidch
2275255736Sdavidch	return ecore_config_vlan_mac(sc, &p);
2276255736Sdavidch}
2277255736Sdavidch
2278255736Sdavidchstatic inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
2279255736Sdavidch	uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
2280255736Sdavidch	unsigned long *pstate, ecore_obj_type type)
2281255736Sdavidch{
2282255736Sdavidch	raw->func_id = func_id;
2283255736Sdavidch	raw->cid = cid;
2284255736Sdavidch	raw->cl_id = cl_id;
2285255736Sdavidch	raw->rdata = rdata;
2286255736Sdavidch	raw->rdata_mapping = rdata_mapping;
2287255736Sdavidch	raw->state = state;
2288255736Sdavidch	raw->pstate = pstate;
2289255736Sdavidch	raw->obj_type = type;
2290255736Sdavidch	raw->check_pending = ecore_raw_check_pending;
2291255736Sdavidch	raw->clear_pending = ecore_raw_clear_pending;
2292255736Sdavidch	raw->set_pending = ecore_raw_set_pending;
2293255736Sdavidch	raw->wait_comp = ecore_raw_wait;
2294255736Sdavidch}
2295255736Sdavidch
2296255736Sdavidchstatic inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2297255736Sdavidch	uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
2298255736Sdavidch	int state, unsigned long *pstate, ecore_obj_type type,
2299255736Sdavidch	struct ecore_credit_pool_obj *macs_pool,
2300255736Sdavidch	struct ecore_credit_pool_obj *vlans_pool)
2301255736Sdavidch{
2302255736Sdavidch	ECORE_LIST_INIT(&o->head);
2303255736Sdavidch	o->head_reader = 0;
2304255736Sdavidch	o->head_exe_request = FALSE;
2305255736Sdavidch	o->saved_ramrod_flags = 0;
2306255736Sdavidch
2307255736Sdavidch	o->macs_pool = macs_pool;
2308255736Sdavidch	o->vlans_pool = vlans_pool;
2309255736Sdavidch
2310255736Sdavidch	o->delete_all = ecore_vlan_mac_del_all;
2311255736Sdavidch	o->restore = ecore_vlan_mac_restore;
2312255736Sdavidch	o->complete = ecore_complete_vlan_mac;
2313255736Sdavidch	o->wait = ecore_wait_vlan_mac;
2314255736Sdavidch
2315255736Sdavidch	ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2316255736Sdavidch			   state, pstate, type);
2317255736Sdavidch}
2318255736Sdavidch
2319255736Sdavidchvoid ecore_init_mac_obj(struct bxe_softc *sc,
2320255736Sdavidch			struct ecore_vlan_mac_obj *mac_obj,
2321255736Sdavidch			uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2322255736Sdavidch			ecore_dma_addr_t rdata_mapping, int state,
2323255736Sdavidch			unsigned long *pstate, ecore_obj_type type,
2324255736Sdavidch			struct ecore_credit_pool_obj *macs_pool)
2325255736Sdavidch{
2326255736Sdavidch	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2327255736Sdavidch
2328255736Sdavidch	ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2329255736Sdavidch				   rdata_mapping, state, pstate, type,
2330255736Sdavidch				   macs_pool, NULL);
2331255736Sdavidch
2332255736Sdavidch	/* CAM credit pool handling */
2333255736Sdavidch	mac_obj->get_credit = ecore_get_credit_mac;
2334255736Sdavidch	mac_obj->put_credit = ecore_put_credit_mac;
2335255736Sdavidch	mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2336255736Sdavidch	mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2337255736Sdavidch
2338255736Sdavidch	if (CHIP_IS_E1x(sc)) {
2339255736Sdavidch		mac_obj->set_one_rule      = ecore_set_one_mac_e1x;
2340255736Sdavidch		mac_obj->check_del         = ecore_check_mac_del;
2341255736Sdavidch		mac_obj->check_add         = ecore_check_mac_add;
2342255736Sdavidch		mac_obj->check_move        = ecore_check_move_always_err;
2343255736Sdavidch		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2344255736Sdavidch
2345255736Sdavidch		/* Exe Queue */
2346255736Sdavidch		ecore_exe_queue_init(sc,
2347255736Sdavidch				     &mac_obj->exe_queue, 1, qable_obj,
2348255736Sdavidch				     ecore_validate_vlan_mac,
2349255736Sdavidch				     ecore_remove_vlan_mac,
2350255736Sdavidch				     ecore_optimize_vlan_mac,
2351255736Sdavidch				     ecore_execute_vlan_mac,
2352255736Sdavidch				     ecore_exeq_get_mac);
2353255736Sdavidch	} else {
2354255736Sdavidch		mac_obj->set_one_rule      = ecore_set_one_mac_e2;
2355255736Sdavidch		mac_obj->check_del         = ecore_check_mac_del;
2356255736Sdavidch		mac_obj->check_add         = ecore_check_mac_add;
2357255736Sdavidch		mac_obj->check_move        = ecore_check_move;
2358255736Sdavidch		mac_obj->ramrod_cmd        =
2359255736Sdavidch			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2360255736Sdavidch		mac_obj->get_n_elements    = ecore_get_n_elements;
2361255736Sdavidch
2362255736Sdavidch		/* Exe Queue */
2363255736Sdavidch		ecore_exe_queue_init(sc,
2364255736Sdavidch				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2365255736Sdavidch				     qable_obj, ecore_validate_vlan_mac,
2366255736Sdavidch				     ecore_remove_vlan_mac,
2367255736Sdavidch				     ecore_optimize_vlan_mac,
2368255736Sdavidch				     ecore_execute_vlan_mac,
2369255736Sdavidch				     ecore_exeq_get_mac);
2370255736Sdavidch	}
2371255736Sdavidch}
2372255736Sdavidch
2373255736Sdavidchvoid ecore_init_vlan_obj(struct bxe_softc *sc,
2374255736Sdavidch			 struct ecore_vlan_mac_obj *vlan_obj,
2375255736Sdavidch			 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2376255736Sdavidch			 ecore_dma_addr_t rdata_mapping, int state,
2377255736Sdavidch			 unsigned long *pstate, ecore_obj_type type,
2378255736Sdavidch			 struct ecore_credit_pool_obj *vlans_pool)
2379255736Sdavidch{
2380255736Sdavidch	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2381255736Sdavidch
2382255736Sdavidch	ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2383255736Sdavidch				   rdata_mapping, state, pstate, type, NULL,
2384255736Sdavidch				   vlans_pool);
2385255736Sdavidch
2386255736Sdavidch	vlan_obj->get_credit = ecore_get_credit_vlan;
2387255736Sdavidch	vlan_obj->put_credit = ecore_put_credit_vlan;
2388255736Sdavidch	vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2389255736Sdavidch	vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2390255736Sdavidch
2391255736Sdavidch	if (CHIP_IS_E1x(sc)) {
2392255736Sdavidch		ECORE_ERR("Do not support chips others than E2 and newer\n");
2393255736Sdavidch		ECORE_BUG();
2394255736Sdavidch	} else {
2395255736Sdavidch		vlan_obj->set_one_rule      = ecore_set_one_vlan_e2;
2396255736Sdavidch		vlan_obj->check_del         = ecore_check_vlan_del;
2397255736Sdavidch		vlan_obj->check_add         = ecore_check_vlan_add;
2398255736Sdavidch		vlan_obj->check_move        = ecore_check_move;
2399255736Sdavidch		vlan_obj->ramrod_cmd        =
2400255736Sdavidch			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2401255736Sdavidch		vlan_obj->get_n_elements    = ecore_get_n_elements;
2402255736Sdavidch
2403255736Sdavidch		/* Exe Queue */
2404255736Sdavidch		ecore_exe_queue_init(sc,
2405255736Sdavidch				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2406255736Sdavidch				     qable_obj, ecore_validate_vlan_mac,
2407255736Sdavidch				     ecore_remove_vlan_mac,
2408255736Sdavidch				     ecore_optimize_vlan_mac,
2409255736Sdavidch				     ecore_execute_vlan_mac,
2410255736Sdavidch				     ecore_exeq_get_vlan);
2411255736Sdavidch	}
2412255736Sdavidch}
2413255736Sdavidch
2414255736Sdavidchvoid ecore_init_vlan_mac_obj(struct bxe_softc *sc,
2415255736Sdavidch			     struct ecore_vlan_mac_obj *vlan_mac_obj,
2416255736Sdavidch			     uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2417255736Sdavidch			     ecore_dma_addr_t rdata_mapping, int state,
2418255736Sdavidch			     unsigned long *pstate, ecore_obj_type type,
2419255736Sdavidch			     struct ecore_credit_pool_obj *macs_pool,
2420255736Sdavidch			     struct ecore_credit_pool_obj *vlans_pool)
2421255736Sdavidch{
2422255736Sdavidch	union ecore_qable_obj *qable_obj =
2423255736Sdavidch		(union ecore_qable_obj *)vlan_mac_obj;
2424255736Sdavidch
2425255736Sdavidch	ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2426255736Sdavidch				   rdata_mapping, state, pstate, type,
2427255736Sdavidch				   macs_pool, vlans_pool);
2428255736Sdavidch
2429255736Sdavidch	/* CAM pool handling */
2430255736Sdavidch	vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2431255736Sdavidch	vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2432255736Sdavidch	/* CAM offset is relevant for 57710 and 57711 chips only which have a
2433255736Sdavidch	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2434255736Sdavidch	 * will be taken from MACs' pool object only.
2435255736Sdavidch	 */
2436255736Sdavidch	vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2437255736Sdavidch	vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2438255736Sdavidch
2439255736Sdavidch	if (CHIP_IS_E1(sc)) {
2440255736Sdavidch		ECORE_ERR("Do not support chips others than E2\n");
2441255736Sdavidch		ECORE_BUG();
2442255736Sdavidch	} else if (CHIP_IS_E1H(sc)) {
2443255736Sdavidch		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e1h;
2444255736Sdavidch		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2445255736Sdavidch		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2446255736Sdavidch		vlan_mac_obj->check_move        = ecore_check_move_always_err;
2447255736Sdavidch		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2448255736Sdavidch
2449255736Sdavidch		/* Exe Queue */
2450255736Sdavidch		ecore_exe_queue_init(sc,
2451255736Sdavidch				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2452255736Sdavidch				     ecore_validate_vlan_mac,
2453255736Sdavidch				     ecore_remove_vlan_mac,
2454255736Sdavidch				     ecore_optimize_vlan_mac,
2455255736Sdavidch				     ecore_execute_vlan_mac,
2456255736Sdavidch				     ecore_exeq_get_vlan_mac);
2457255736Sdavidch	} else {
2458255736Sdavidch		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e2;
2459255736Sdavidch		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2460255736Sdavidch		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2461255736Sdavidch		vlan_mac_obj->check_move        = ecore_check_move;
2462255736Sdavidch		vlan_mac_obj->ramrod_cmd        =
2463255736Sdavidch			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2464255736Sdavidch
2465255736Sdavidch		/* Exe Queue */
2466255736Sdavidch		ecore_exe_queue_init(sc,
2467255736Sdavidch				     &vlan_mac_obj->exe_queue,
2468255736Sdavidch				     CLASSIFY_RULES_COUNT,
2469255736Sdavidch				     qable_obj, ecore_validate_vlan_mac,
2470255736Sdavidch				     ecore_remove_vlan_mac,
2471255736Sdavidch				     ecore_optimize_vlan_mac,
2472255736Sdavidch				     ecore_execute_vlan_mac,
2473255736Sdavidch				     ecore_exeq_get_vlan_mac);
2474255736Sdavidch	}
2475255736Sdavidch}
2476255736Sdavidch
2477296071Sdavidcsvoid ecore_init_vxlan_fltr_obj(struct bxe_softc *sc,
2478296071Sdavidcs				struct ecore_vlan_mac_obj *vlan_mac_obj,
2479296071Sdavidcs				uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2480296071Sdavidcs				ecore_dma_addr_t rdata_mapping, int state,
2481296071Sdavidcs				unsigned long *pstate, ecore_obj_type type,
2482296071Sdavidcs				struct ecore_credit_pool_obj *macs_pool,
2483296071Sdavidcs				struct ecore_credit_pool_obj *vlans_pool)
2484296071Sdavidcs{
2485296071Sdavidcs	union ecore_qable_obj *qable_obj =
2486296071Sdavidcs		(union ecore_qable_obj *)vlan_mac_obj;
2487296071Sdavidcs
2488296071Sdavidcs	ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id,
2489296071Sdavidcs				   rdata, rdata_mapping, state, pstate,
2490296071Sdavidcs				   type, macs_pool, vlans_pool);
2491296071Sdavidcs
2492296071Sdavidcs	/* CAM pool handling */
2493296071Sdavidcs	vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2494296071Sdavidcs	vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2495296071Sdavidcs	/* CAM offset is relevant for 57710 and 57711 chips only which have a
2496296071Sdavidcs	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2497296071Sdavidcs	 * will be taken from MACs' pool object only.
2498296071Sdavidcs	 */
2499296071Sdavidcs	vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2500296071Sdavidcs	vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2501296071Sdavidcs
2502296071Sdavidcs	if (CHIP_IS_E1x(sc)) {
2503296071Sdavidcs		ECORE_ERR("Do not support chips others than E2/E3\n");
2504296071Sdavidcs		ECORE_BUG();
2505296071Sdavidcs	} else {
2506296071Sdavidcs		vlan_mac_obj->set_one_rule      = ecore_set_one_vxlan_fltr_e2;
2507296071Sdavidcs		vlan_mac_obj->check_del         = ecore_check_vxlan_fltr_del;
2508296071Sdavidcs		vlan_mac_obj->check_add         = ecore_check_vxlan_fltr_add;
2509296071Sdavidcs		vlan_mac_obj->check_move        = ecore_check_move;
2510296071Sdavidcs		vlan_mac_obj->ramrod_cmd        =
2511296071Sdavidcs			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2512296071Sdavidcs
2513296071Sdavidcs		/* Exe Queue */
2514296071Sdavidcs		ecore_exe_queue_init(sc,
2515296071Sdavidcs				     &vlan_mac_obj->exe_queue,
2516296071Sdavidcs				     CLASSIFY_RULES_COUNT,
2517296071Sdavidcs				     qable_obj, ecore_validate_vlan_mac,
2518296071Sdavidcs				     ecore_remove_vlan_mac,
2519296071Sdavidcs				     ecore_optimize_vlan_mac,
2520296071Sdavidcs				     ecore_execute_vlan_mac,
2521296071Sdavidcs				     ecore_exeq_get_vxlan_fltr);
2522296071Sdavidcs	}
2523296071Sdavidcs}
2524296071Sdavidcs
2525255736Sdavidch/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2526255736Sdavidchstatic inline void __storm_memset_mac_filters(struct bxe_softc *sc,
2527255736Sdavidch			struct tstorm_eth_mac_filter_config *mac_filters,
2528255736Sdavidch			uint16_t pf_id)
2529255736Sdavidch{
2530255736Sdavidch	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2531255736Sdavidch
2532255736Sdavidch	uint32_t addr = BAR_TSTRORM_INTMEM +
2533255736Sdavidch			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2534255736Sdavidch
2535255736Sdavidch	ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
2536255736Sdavidch}
2537255736Sdavidch
2538255736Sdavidchstatic int ecore_set_rx_mode_e1x(struct bxe_softc *sc,
2539255736Sdavidch				 struct ecore_rx_mode_ramrod_params *p)
2540255736Sdavidch{
2541255736Sdavidch	/* update the sc MAC filter structure */
2542255736Sdavidch	uint32_t mask = (1 << p->cl_id);
2543255736Sdavidch
2544255736Sdavidch	struct tstorm_eth_mac_filter_config *mac_filters =
2545255736Sdavidch		(struct tstorm_eth_mac_filter_config *)p->rdata;
2546255736Sdavidch
2547255736Sdavidch	/* initial setting is drop-all */
2548255736Sdavidch	uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2549255736Sdavidch	uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2550255736Sdavidch	uint8_t unmatched_unicast = 0;
2551255736Sdavidch
2552255736Sdavidch    /* In e1x there we only take into account rx accept flag since tx switching
2553255736Sdavidch     * isn't enabled. */
2554255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2555255736Sdavidch		/* accept matched ucast */
2556255736Sdavidch		drop_all_ucast = 0;
2557255736Sdavidch
2558255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2559255736Sdavidch		/* accept matched mcast */
2560255736Sdavidch		drop_all_mcast = 0;
2561255736Sdavidch
2562255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2563255736Sdavidch		/* accept all mcast */
2564255736Sdavidch		drop_all_ucast = 0;
2565255736Sdavidch		accp_all_ucast = 1;
2566255736Sdavidch	}
2567255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2568255736Sdavidch		/* accept all mcast */
2569255736Sdavidch		drop_all_mcast = 0;
2570255736Sdavidch		accp_all_mcast = 1;
2571255736Sdavidch	}
2572255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2573255736Sdavidch		/* accept (all) bcast */
2574255736Sdavidch		accp_all_bcast = 1;
2575255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2576255736Sdavidch		/* accept unmatched unicasts */
2577255736Sdavidch		unmatched_unicast = 1;
2578255736Sdavidch
2579255736Sdavidch	mac_filters->ucast_drop_all = drop_all_ucast ?
2580255736Sdavidch		mac_filters->ucast_drop_all | mask :
2581255736Sdavidch		mac_filters->ucast_drop_all & ~mask;
2582255736Sdavidch
2583255736Sdavidch	mac_filters->mcast_drop_all = drop_all_mcast ?
2584255736Sdavidch		mac_filters->mcast_drop_all | mask :
2585255736Sdavidch		mac_filters->mcast_drop_all & ~mask;
2586255736Sdavidch
2587255736Sdavidch	mac_filters->ucast_accept_all = accp_all_ucast ?
2588255736Sdavidch		mac_filters->ucast_accept_all | mask :
2589255736Sdavidch		mac_filters->ucast_accept_all & ~mask;
2590255736Sdavidch
2591255736Sdavidch	mac_filters->mcast_accept_all = accp_all_mcast ?
2592255736Sdavidch		mac_filters->mcast_accept_all | mask :
2593255736Sdavidch		mac_filters->mcast_accept_all & ~mask;
2594255736Sdavidch
2595255736Sdavidch	mac_filters->bcast_accept_all = accp_all_bcast ?
2596255736Sdavidch		mac_filters->bcast_accept_all | mask :
2597255736Sdavidch		mac_filters->bcast_accept_all & ~mask;
2598255736Sdavidch
2599255736Sdavidch	mac_filters->unmatched_unicast = unmatched_unicast ?
2600255736Sdavidch		mac_filters->unmatched_unicast | mask :
2601255736Sdavidch		mac_filters->unmatched_unicast & ~mask;
2602255736Sdavidch
2603255736Sdavidch	ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2604255736Sdavidch			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2605255736Sdavidch	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2606255736Sdavidch	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2607255736Sdavidch	   mac_filters->bcast_accept_all);
2608255736Sdavidch
2609255736Sdavidch	/* write the MAC filter structure*/
2610255736Sdavidch	__storm_memset_mac_filters(sc, mac_filters, p->func_id);
2611255736Sdavidch
2612255736Sdavidch	/* The operation is completed */
2613255736Sdavidch	ECORE_CLEAR_BIT(p->state, p->pstate);
2614255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
2615255736Sdavidch
2616255736Sdavidch	return ECORE_SUCCESS;
2617255736Sdavidch}
2618255736Sdavidch
2619255736Sdavidch/* Setup ramrod data */
2620255736Sdavidchstatic inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2621255736Sdavidch				struct eth_classify_header *hdr,
2622255736Sdavidch				uint8_t rule_cnt)
2623255736Sdavidch{
2624255736Sdavidch	hdr->echo = ECORE_CPU_TO_LE32(cid);
2625255736Sdavidch	hdr->rule_cnt = rule_cnt;
2626255736Sdavidch}
2627255736Sdavidch
2628255736Sdavidchstatic inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc,
2629255736Sdavidch				unsigned long *accept_flags,
2630255736Sdavidch				struct eth_filter_rules_cmd *cmd,
2631255736Sdavidch				bool clear_accept_all)
2632255736Sdavidch{
2633255736Sdavidch	uint16_t state;
2634255736Sdavidch
2635255736Sdavidch	/* start with 'drop-all' */
2636255736Sdavidch	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2637255736Sdavidch		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2638255736Sdavidch
2639255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2640255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2641255736Sdavidch
2642255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2643255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2644255736Sdavidch
2645255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2646255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2647255736Sdavidch		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2648255736Sdavidch	}
2649255736Sdavidch
2650255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2651255736Sdavidch		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2652255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2653255736Sdavidch	}
2654255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2655255736Sdavidch		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2656255736Sdavidch
2657255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2658255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2659255736Sdavidch		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2660255736Sdavidch	}
2661255736Sdavidch	if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2662255736Sdavidch		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2663255736Sdavidch
2664255736Sdavidch	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2665255736Sdavidch	if (clear_accept_all) {
2666255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2667255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2668255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2669255736Sdavidch		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2670255736Sdavidch	}
2671255736Sdavidch
2672255736Sdavidch	cmd->state = ECORE_CPU_TO_LE16(state);
2673255736Sdavidch}
2674255736Sdavidch
2675255736Sdavidchstatic int ecore_set_rx_mode_e2(struct bxe_softc *sc,
2676255736Sdavidch				struct ecore_rx_mode_ramrod_params *p)
2677255736Sdavidch{
2678255736Sdavidch	struct eth_filter_rules_ramrod_data *data = p->rdata;
2679255736Sdavidch	int rc;
2680255736Sdavidch	uint8_t rule_idx = 0;
2681255736Sdavidch
2682255736Sdavidch	/* Reset the ramrod data buffer */
2683255736Sdavidch	ECORE_MEMSET(data, 0, sizeof(*data));
2684255736Sdavidch
2685255736Sdavidch	/* Setup ramrod data */
2686255736Sdavidch
2687255736Sdavidch	/* Tx (internal switching) */
2688255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2689255736Sdavidch		data->rules[rule_idx].client_id = p->cl_id;
2690255736Sdavidch		data->rules[rule_idx].func_id = p->func_id;
2691255736Sdavidch
2692255736Sdavidch		data->rules[rule_idx].cmd_general_data =
2693255736Sdavidch			ETH_FILTER_RULES_CMD_TX_CMD;
2694255736Sdavidch
2695255736Sdavidch		ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2696258187Sedavis					       &(data->rules[rule_idx++]),
2697258187Sedavis					       FALSE);
2698255736Sdavidch	}
2699255736Sdavidch
2700255736Sdavidch	/* Rx */
2701255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2702255736Sdavidch		data->rules[rule_idx].client_id = p->cl_id;
2703255736Sdavidch		data->rules[rule_idx].func_id = p->func_id;
2704255736Sdavidch
2705255736Sdavidch		data->rules[rule_idx].cmd_general_data =
2706255736Sdavidch			ETH_FILTER_RULES_CMD_RX_CMD;
2707255736Sdavidch
2708255736Sdavidch		ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2709258187Sedavis					       &(data->rules[rule_idx++]),
2710258187Sedavis					       FALSE);
2711255736Sdavidch	}
2712255736Sdavidch
2713255736Sdavidch	/* If FCoE Queue configuration has been requested configure the Rx and
2714255736Sdavidch	 * internal switching modes for this queue in separate rules.
2715255736Sdavidch	 *
2716255736Sdavidch	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2717255736Sdavidch	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2718255736Sdavidch	 */
2719255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2720255736Sdavidch		/*  Tx (internal switching) */
2721255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2722255736Sdavidch			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2723255736Sdavidch			data->rules[rule_idx].func_id = p->func_id;
2724255736Sdavidch
2725255736Sdavidch			data->rules[rule_idx].cmd_general_data =
2726255736Sdavidch						ETH_FILTER_RULES_CMD_TX_CMD;
2727255736Sdavidch
2728258187Sedavis			ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2729258187Sedavis						       &(data->rules[rule_idx]),
2730255736Sdavidch						       TRUE);
2731258187Sedavis			rule_idx++;
2732255736Sdavidch		}
2733255736Sdavidch
2734255736Sdavidch		/* Rx */
2735255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2736255736Sdavidch			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2737255736Sdavidch			data->rules[rule_idx].func_id = p->func_id;
2738255736Sdavidch
2739255736Sdavidch			data->rules[rule_idx].cmd_general_data =
2740255736Sdavidch						ETH_FILTER_RULES_CMD_RX_CMD;
2741255736Sdavidch
2742258187Sedavis			ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2743258187Sedavis						       &(data->rules[rule_idx]),
2744255736Sdavidch						       TRUE);
2745258187Sedavis			rule_idx++;
2746255736Sdavidch		}
2747255736Sdavidch	}
2748255736Sdavidch
2749255736Sdavidch	/* Set the ramrod header (most importantly - number of rules to
2750255736Sdavidch	 * configure).
2751255736Sdavidch	 */
2752255736Sdavidch	ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2753255736Sdavidch
2754255736Sdavidch	ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2755255736Sdavidch		  data->header.rule_cnt, p->rx_accept_flags,
2756255736Sdavidch		  p->tx_accept_flags);
2757255736Sdavidch
2758296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
2759296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
2760255736Sdavidch	 * and updating of the SPQ producer which involves a memory
2761296071Sdavidcs	 * read. If the memory read is removed we will have to put a
2762296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
2763255736Sdavidch	 */
2764255736Sdavidch
2765255736Sdavidch	/* Send a ramrod */
2766255736Sdavidch	rc = ecore_sp_post(sc,
2767255736Sdavidch			   RAMROD_CMD_ID_ETH_FILTER_RULES,
2768255736Sdavidch			   p->cid,
2769255736Sdavidch			   p->rdata_mapping,
2770255736Sdavidch			   ETH_CONNECTION_TYPE);
2771255736Sdavidch	if (rc)
2772255736Sdavidch		return rc;
2773255736Sdavidch
2774255736Sdavidch	/* Ramrod completion is pending */
2775255736Sdavidch	return ECORE_PENDING;
2776255736Sdavidch}
2777255736Sdavidch
2778255736Sdavidchstatic int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc,
2779255736Sdavidch				      struct ecore_rx_mode_ramrod_params *p)
2780255736Sdavidch{
2781255736Sdavidch	return ecore_state_wait(sc, p->state, p->pstate);
2782255736Sdavidch}
2783255736Sdavidch
2784255736Sdavidchstatic int ecore_empty_rx_mode_wait(struct bxe_softc *sc,
2785255736Sdavidch				    struct ecore_rx_mode_ramrod_params *p)
2786255736Sdavidch{
2787255736Sdavidch	/* Do nothing */
2788255736Sdavidch	return ECORE_SUCCESS;
2789255736Sdavidch}
2790255736Sdavidch
2791255736Sdavidchint ecore_config_rx_mode(struct bxe_softc *sc,
2792255736Sdavidch			 struct ecore_rx_mode_ramrod_params *p)
2793255736Sdavidch{
2794255736Sdavidch	int rc;
2795255736Sdavidch
2796255736Sdavidch	/* Configure the new classification in the chip */
2797255736Sdavidch	rc = p->rx_mode_obj->config_rx_mode(sc, p);
2798255736Sdavidch	if (rc < 0)
2799255736Sdavidch		return rc;
2800255736Sdavidch
2801255736Sdavidch	/* Wait for a ramrod completion if was requested */
2802255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2803255736Sdavidch		rc = p->rx_mode_obj->wait_comp(sc, p);
2804255736Sdavidch		if (rc)
2805255736Sdavidch			return rc;
2806255736Sdavidch	}
2807255736Sdavidch
2808255736Sdavidch	return rc;
2809255736Sdavidch}
2810255736Sdavidch
2811255736Sdavidchvoid ecore_init_rx_mode_obj(struct bxe_softc *sc,
2812255736Sdavidch			    struct ecore_rx_mode_obj *o)
2813255736Sdavidch{
2814255736Sdavidch	if (CHIP_IS_E1x(sc)) {
2815255736Sdavidch		o->wait_comp      = ecore_empty_rx_mode_wait;
2816255736Sdavidch		o->config_rx_mode = ecore_set_rx_mode_e1x;
2817255736Sdavidch	} else {
2818255736Sdavidch		o->wait_comp      = ecore_wait_rx_mode_comp_e2;
2819255736Sdavidch		o->config_rx_mode = ecore_set_rx_mode_e2;
2820255736Sdavidch	}
2821255736Sdavidch}
2822255736Sdavidch
2823255736Sdavidch/********************* Multicast verbs: SET, CLEAR ****************************/
2824255736Sdavidchstatic inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
2825255736Sdavidch{
2826255736Sdavidch	return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2827255736Sdavidch}
2828255736Sdavidch
2829255736Sdavidchstruct ecore_mcast_mac_elem {
2830255736Sdavidch	ecore_list_entry_t link;
2831255736Sdavidch	uint8_t mac[ETH_ALEN];
2832255736Sdavidch	uint8_t pad[2]; /* For a natural alignment of the following buffer */
2833255736Sdavidch};
2834255736Sdavidch
2835255736Sdavidchstruct ecore_pending_mcast_cmd {
2836255736Sdavidch	ecore_list_entry_t link;
2837255736Sdavidch	int type; /* ECORE_MCAST_CMD_X */
2838255736Sdavidch	union {
2839255736Sdavidch		ecore_list_t macs_head;
2840255736Sdavidch		uint32_t macs_num; /* Needed for DEL command */
2841255736Sdavidch		int next_bin; /* Needed for RESTORE flow with aprox match */
2842255736Sdavidch	} data;
2843255736Sdavidch
2844255736Sdavidch	bool done; /* set to TRUE, when the command has been handled,
2845255736Sdavidch		    * practically used in 57712 handling only, where one pending
2846255736Sdavidch		    * command may be handled in a few operations. As long as for
2847255736Sdavidch		    * other chips every operation handling is completed in a
2848255736Sdavidch		    * single ramrod, there is no need to utilize this field.
2849255736Sdavidch		    */
2850255736Sdavidch};
2851255736Sdavidch
2852255736Sdavidchstatic int ecore_mcast_wait(struct bxe_softc *sc,
2853255736Sdavidch			    struct ecore_mcast_obj *o)
2854255736Sdavidch{
2855255736Sdavidch	if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2856255736Sdavidch			o->raw.wait_comp(sc, &o->raw))
2857255736Sdavidch		return ECORE_TIMEOUT;
2858255736Sdavidch
2859255736Sdavidch	return ECORE_SUCCESS;
2860255736Sdavidch}
2861255736Sdavidch
2862255736Sdavidchstatic int ecore_mcast_enqueue_cmd(struct bxe_softc *sc,
2863255736Sdavidch				   struct ecore_mcast_obj *o,
2864255736Sdavidch				   struct ecore_mcast_ramrod_params *p,
2865255736Sdavidch				   enum ecore_mcast_cmd cmd)
2866255736Sdavidch{
2867255736Sdavidch	int total_sz;
2868255736Sdavidch	struct ecore_pending_mcast_cmd *new_cmd;
2869255736Sdavidch	struct ecore_mcast_mac_elem *cur_mac = NULL;
2870255736Sdavidch	struct ecore_mcast_list_elem *pos;
2871255736Sdavidch	int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2872255736Sdavidch			     p->mcast_list_len : 0);
2873255736Sdavidch
2874255736Sdavidch	/* If the command is empty ("handle pending commands only"), break */
2875255736Sdavidch	if (!p->mcast_list_len)
2876255736Sdavidch		return ECORE_SUCCESS;
2877255736Sdavidch
2878255736Sdavidch	total_sz = sizeof(*new_cmd) +
2879255736Sdavidch		macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2880255736Sdavidch
2881255736Sdavidch	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2882255736Sdavidch	new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2883255736Sdavidch
2884255736Sdavidch	if (!new_cmd)
2885255736Sdavidch		return ECORE_NOMEM;
2886255736Sdavidch
2887258187Sedavis	ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n",
2888255736Sdavidch		  cmd, macs_list_len);
2889255736Sdavidch
2890255736Sdavidch	ECORE_LIST_INIT(&new_cmd->data.macs_head);
2891255736Sdavidch
2892255736Sdavidch	new_cmd->type = cmd;
2893255736Sdavidch	new_cmd->done = FALSE;
2894255736Sdavidch
2895255736Sdavidch	switch (cmd) {
2896255736Sdavidch	case ECORE_MCAST_CMD_ADD:
2897255736Sdavidch		cur_mac = (struct ecore_mcast_mac_elem *)
2898255736Sdavidch			  ((uint8_t *)new_cmd + sizeof(*new_cmd));
2899255736Sdavidch
2900255736Sdavidch		/* Push the MACs of the current command into the pending command
2901255736Sdavidch		 * MACs list: FIFO
2902255736Sdavidch		 */
2903255736Sdavidch		ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2904255736Sdavidch					  struct ecore_mcast_list_elem) {
2905255736Sdavidch			ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2906255736Sdavidch			ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2907255736Sdavidch					     &new_cmd->data.macs_head);
2908255736Sdavidch			cur_mac++;
2909255736Sdavidch		}
2910255736Sdavidch
2911255736Sdavidch		break;
2912255736Sdavidch
2913255736Sdavidch	case ECORE_MCAST_CMD_DEL:
2914255736Sdavidch		new_cmd->data.macs_num = p->mcast_list_len;
2915255736Sdavidch		break;
2916255736Sdavidch
2917255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
2918255736Sdavidch		new_cmd->data.next_bin = 0;
2919255736Sdavidch		break;
2920255736Sdavidch
2921255736Sdavidch	default:
2922255736Sdavidch		ECORE_FREE(sc, new_cmd, total_sz);
2923255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
2924255736Sdavidch		return ECORE_INVAL;
2925255736Sdavidch	}
2926255736Sdavidch
2927255736Sdavidch	/* Push the new pending command to the tail of the pending list: FIFO */
2928255736Sdavidch	ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2929255736Sdavidch
2930255736Sdavidch	o->set_sched(o);
2931255736Sdavidch
2932255736Sdavidch	return ECORE_PENDING;
2933255736Sdavidch}
2934255736Sdavidch
2935255736Sdavidch/**
2936255736Sdavidch * ecore_mcast_get_next_bin - get the next set bin (index)
2937255736Sdavidch *
2938255736Sdavidch * @o:
2939255736Sdavidch * @last:	index to start looking from (including)
2940255736Sdavidch *
2941255736Sdavidch * returns the next found (set) bin or a negative value if none is found.
2942255736Sdavidch */
2943255736Sdavidchstatic inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2944255736Sdavidch{
2945255736Sdavidch	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2946255736Sdavidch
2947255736Sdavidch	for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2948255736Sdavidch		if (o->registry.aprox_match.vec[i])
2949255736Sdavidch			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2950255736Sdavidch				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2951255736Sdavidch				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2952255736Sdavidch						       vec, cur_bit)) {
2953255736Sdavidch					return cur_bit;
2954255736Sdavidch				}
2955255736Sdavidch			}
2956255736Sdavidch		inner_start = 0;
2957255736Sdavidch	}
2958255736Sdavidch
2959255736Sdavidch	/* None found */
2960255736Sdavidch	return -1;
2961255736Sdavidch}
2962255736Sdavidch
2963255736Sdavidch/**
2964255736Sdavidch * ecore_mcast_clear_first_bin - find the first set bin and clear it
2965255736Sdavidch *
2966255736Sdavidch * @o:
2967255736Sdavidch *
2968255736Sdavidch * returns the index of the found bin or -1 if none is found
2969255736Sdavidch */
2970255736Sdavidchstatic inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2971255736Sdavidch{
2972255736Sdavidch	int cur_bit = ecore_mcast_get_next_bin(o, 0);
2973255736Sdavidch
2974255736Sdavidch	if (cur_bit >= 0)
2975255736Sdavidch		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2976255736Sdavidch
2977255736Sdavidch	return cur_bit;
2978255736Sdavidch}
2979255736Sdavidch
2980255736Sdavidchstatic inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2981255736Sdavidch{
2982255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
2983255736Sdavidch	uint8_t rx_tx_flag = 0;
2984255736Sdavidch
2985255736Sdavidch	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2986255736Sdavidch	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2987255736Sdavidch		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2988255736Sdavidch
2989255736Sdavidch	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2990255736Sdavidch	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2991255736Sdavidch		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2992255736Sdavidch
2993255736Sdavidch	return rx_tx_flag;
2994255736Sdavidch}
2995255736Sdavidch
2996255736Sdavidchstatic void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc,
2997255736Sdavidch					struct ecore_mcast_obj *o, int idx,
2998255736Sdavidch					union ecore_mcast_config_data *cfg_data,
2999255736Sdavidch					enum ecore_mcast_cmd cmd)
3000255736Sdavidch{
3001255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
3002255736Sdavidch	struct eth_multicast_rules_ramrod_data *data =
3003255736Sdavidch		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3004255736Sdavidch	uint8_t func_id = r->func_id;
3005255736Sdavidch	uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
3006255736Sdavidch	int bin;
3007255736Sdavidch
3008255736Sdavidch	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
3009255736Sdavidch		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
3010255736Sdavidch
3011255736Sdavidch	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
3012255736Sdavidch
3013255736Sdavidch	/* Get a bin and update a bins' vector */
3014255736Sdavidch	switch (cmd) {
3015255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3016255736Sdavidch		bin = ecore_mcast_bin_from_mac(cfg_data->mac);
3017255736Sdavidch		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
3018255736Sdavidch		break;
3019255736Sdavidch
3020255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3021255736Sdavidch		/* If there were no more bins to clear
3022255736Sdavidch		 * (ecore_mcast_clear_first_bin() returns -1) then we would
3023255736Sdavidch		 * clear any (0xff) bin.
3024255736Sdavidch		 * See ecore_mcast_validate_e2() for explanation when it may
3025255736Sdavidch		 * happen.
3026255736Sdavidch		 */
3027255736Sdavidch		bin = ecore_mcast_clear_first_bin(o);
3028255736Sdavidch		break;
3029255736Sdavidch
3030255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3031255736Sdavidch		bin = cfg_data->bin;
3032255736Sdavidch		break;
3033255736Sdavidch
3034255736Sdavidch	default:
3035255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
3036255736Sdavidch		return;
3037255736Sdavidch	}
3038255736Sdavidch
3039255736Sdavidch	ECORE_MSG(sc, "%s bin %d\n",
3040255736Sdavidch		  ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
3041255736Sdavidch		   "Setting"  : "Clearing"), bin);
3042255736Sdavidch
3043255736Sdavidch	data->rules[idx].bin_id    = (uint8_t)bin;
3044255736Sdavidch	data->rules[idx].func_id   = func_id;
3045255736Sdavidch	data->rules[idx].engine_id = o->engine_id;
3046255736Sdavidch}
3047255736Sdavidch
3048255736Sdavidch/**
3049255736Sdavidch * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
3050255736Sdavidch *
3051255736Sdavidch * @sc:		device handle
3052255736Sdavidch * @o:
3053255736Sdavidch * @start_bin:	index in the registry to start from (including)
3054255736Sdavidch * @rdata_idx:	index in the ramrod data to start from
3055255736Sdavidch *
3056255736Sdavidch * returns last handled bin index or -1 if all bins have been handled
3057255736Sdavidch */
3058255736Sdavidchstatic inline int ecore_mcast_handle_restore_cmd_e2(
3059255736Sdavidch	struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin,
3060255736Sdavidch	int *rdata_idx)
3061255736Sdavidch{
3062255736Sdavidch	int cur_bin, cnt = *rdata_idx;
3063255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
3064255736Sdavidch
3065255736Sdavidch	/* go through the registry and configure the bins from it */
3066255736Sdavidch	for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
3067255736Sdavidch	    cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
3068255736Sdavidch
3069255736Sdavidch		cfg_data.bin = (uint8_t)cur_bin;
3070255736Sdavidch		o->set_one_rule(sc, o, cnt, &cfg_data,
3071255736Sdavidch				ECORE_MCAST_CMD_RESTORE);
3072255736Sdavidch
3073255736Sdavidch		cnt++;
3074255736Sdavidch
3075255736Sdavidch		ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
3076255736Sdavidch
3077255736Sdavidch		/* Break if we reached the maximum number
3078255736Sdavidch		 * of rules.
3079255736Sdavidch		 */
3080255736Sdavidch		if (cnt >= o->max_cmd_len)
3081255736Sdavidch			break;
3082255736Sdavidch	}
3083255736Sdavidch
3084255736Sdavidch	*rdata_idx = cnt;
3085255736Sdavidch
3086255736Sdavidch	return cur_bin;
3087255736Sdavidch}
3088255736Sdavidch
3089255736Sdavidchstatic inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc,
3090255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3091255736Sdavidch	int *line_idx)
3092255736Sdavidch{
3093255736Sdavidch	struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
3094255736Sdavidch	int cnt = *line_idx;
3095255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
3096255736Sdavidch
3097255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
3098255736Sdavidch		&cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
3099255736Sdavidch
3100255736Sdavidch		cfg_data.mac = &pmac_pos->mac[0];
3101255736Sdavidch		o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3102255736Sdavidch
3103255736Sdavidch		cnt++;
3104255736Sdavidch
3105255736Sdavidch		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3106255736Sdavidch			  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3107255736Sdavidch
3108255736Sdavidch		ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
3109255736Sdavidch					&cmd_pos->data.macs_head);
3110255736Sdavidch
3111255736Sdavidch		/* Break if we reached the maximum number
3112255736Sdavidch		 * of rules.
3113255736Sdavidch		 */
3114255736Sdavidch		if (cnt >= o->max_cmd_len)
3115255736Sdavidch			break;
3116255736Sdavidch	}
3117255736Sdavidch
3118255736Sdavidch	*line_idx = cnt;
3119255736Sdavidch
3120255736Sdavidch	/* if no more MACs to configure - we are done */
3121255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
3122255736Sdavidch		cmd_pos->done = TRUE;
3123255736Sdavidch}
3124255736Sdavidch
3125255736Sdavidchstatic inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc,
3126255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3127255736Sdavidch	int *line_idx)
3128255736Sdavidch{
3129255736Sdavidch	int cnt = *line_idx;
3130255736Sdavidch
3131255736Sdavidch	while (cmd_pos->data.macs_num) {
3132255736Sdavidch		o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
3133255736Sdavidch
3134255736Sdavidch		cnt++;
3135255736Sdavidch
3136255736Sdavidch		cmd_pos->data.macs_num--;
3137255736Sdavidch
3138255736Sdavidch		  ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
3139255736Sdavidch				  cmd_pos->data.macs_num, cnt);
3140255736Sdavidch
3141255736Sdavidch		/* Break if we reached the maximum
3142255736Sdavidch		 * number of rules.
3143255736Sdavidch		 */
3144255736Sdavidch		if (cnt >= o->max_cmd_len)
3145255736Sdavidch			break;
3146255736Sdavidch	}
3147255736Sdavidch
3148255736Sdavidch	*line_idx = cnt;
3149255736Sdavidch
3150255736Sdavidch	/* If we cleared all bins - we are done */
3151255736Sdavidch	if (!cmd_pos->data.macs_num)
3152255736Sdavidch		cmd_pos->done = TRUE;
3153255736Sdavidch}
3154255736Sdavidch
3155255736Sdavidchstatic inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc,
3156255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3157255736Sdavidch	int *line_idx)
3158255736Sdavidch{
3159255736Sdavidch	cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
3160255736Sdavidch						line_idx);
3161255736Sdavidch
3162255736Sdavidch	if (cmd_pos->data.next_bin < 0)
3163255736Sdavidch		/* If o->set_restore returned -1 we are done */
3164255736Sdavidch		cmd_pos->done = TRUE;
3165255736Sdavidch	else
3166255736Sdavidch		/* Start from the next bin next time */
3167255736Sdavidch		cmd_pos->data.next_bin++;
3168255736Sdavidch}
3169255736Sdavidch
3170255736Sdavidchstatic inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc,
3171255736Sdavidch				struct ecore_mcast_ramrod_params *p)
3172255736Sdavidch{
3173255736Sdavidch	struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3174255736Sdavidch	int cnt = 0;
3175255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3176255736Sdavidch
3177255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3178255736Sdavidch		&o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3179255736Sdavidch		switch (cmd_pos->type) {
3180255736Sdavidch		case ECORE_MCAST_CMD_ADD:
3181255736Sdavidch			ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
3182255736Sdavidch			break;
3183255736Sdavidch
3184255736Sdavidch		case ECORE_MCAST_CMD_DEL:
3185255736Sdavidch			ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
3186255736Sdavidch			break;
3187255736Sdavidch
3188255736Sdavidch		case ECORE_MCAST_CMD_RESTORE:
3189255736Sdavidch			ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
3190255736Sdavidch							   &cnt);
3191255736Sdavidch			break;
3192255736Sdavidch
3193255736Sdavidch		default:
3194255736Sdavidch			ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3195255736Sdavidch			return ECORE_INVAL;
3196255736Sdavidch		}
3197255736Sdavidch
3198255736Sdavidch		/* If the command has been completed - remove it from the list
3199255736Sdavidch		 * and free the memory
3200255736Sdavidch		 */
3201255736Sdavidch		if (cmd_pos->done) {
3202255736Sdavidch			ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3203255736Sdavidch						&o->pending_cmds_head);
3204255736Sdavidch			ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3205255736Sdavidch		}
3206255736Sdavidch
3207255736Sdavidch		/* Break if we reached the maximum number of rules */
3208255736Sdavidch		if (cnt >= o->max_cmd_len)
3209255736Sdavidch			break;
3210255736Sdavidch	}
3211255736Sdavidch
3212255736Sdavidch	return cnt;
3213255736Sdavidch}
3214255736Sdavidch
3215255736Sdavidchstatic inline void ecore_mcast_hdl_add(struct bxe_softc *sc,
3216255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3217255736Sdavidch	int *line_idx)
3218255736Sdavidch{
3219255736Sdavidch	struct ecore_mcast_list_elem *mlist_pos;
3220255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
3221255736Sdavidch	int cnt = *line_idx;
3222255736Sdavidch
3223255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3224255736Sdavidch				  struct ecore_mcast_list_elem) {
3225255736Sdavidch		cfg_data.mac = mlist_pos->mac;
3226255736Sdavidch		o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3227255736Sdavidch
3228255736Sdavidch		cnt++;
3229255736Sdavidch
3230255736Sdavidch		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3231255736Sdavidch			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3232255736Sdavidch	}
3233255736Sdavidch
3234255736Sdavidch	*line_idx = cnt;
3235255736Sdavidch}
3236255736Sdavidch
3237255736Sdavidchstatic inline void ecore_mcast_hdl_del(struct bxe_softc *sc,
3238255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3239255736Sdavidch	int *line_idx)
3240255736Sdavidch{
3241255736Sdavidch	int cnt = *line_idx, i;
3242255736Sdavidch
3243255736Sdavidch	for (i = 0; i < p->mcast_list_len; i++) {
3244255736Sdavidch		o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3245255736Sdavidch
3246255736Sdavidch		cnt++;
3247255736Sdavidch
3248255736Sdavidch		ECORE_MSG(sc, "Deleting MAC. %d left\n",
3249255736Sdavidch			  p->mcast_list_len - i - 1);
3250255736Sdavidch	}
3251255736Sdavidch
3252255736Sdavidch	*line_idx = cnt;
3253255736Sdavidch}
3254255736Sdavidch
3255255736Sdavidch/**
3256255736Sdavidch * ecore_mcast_handle_current_cmd -
3257255736Sdavidch *
3258255736Sdavidch * @sc:		device handle
3259255736Sdavidch * @p:
3260255736Sdavidch * @cmd:
3261255736Sdavidch * @start_cnt:	first line in the ramrod data that may be used
3262255736Sdavidch *
3263255736Sdavidch * This function is called iff there is enough place for the current command in
3264255736Sdavidch * the ramrod data.
3265255736Sdavidch * Returns number of lines filled in the ramrod data in total.
3266255736Sdavidch */
3267255736Sdavidchstatic inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc,
3268255736Sdavidch			struct ecore_mcast_ramrod_params *p,
3269255736Sdavidch			enum ecore_mcast_cmd cmd,
3270255736Sdavidch			int start_cnt)
3271255736Sdavidch{
3272255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3273255736Sdavidch	int cnt = start_cnt;
3274255736Sdavidch
3275255736Sdavidch	ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3276255736Sdavidch
3277255736Sdavidch	switch (cmd) {
3278255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3279255736Sdavidch		ecore_mcast_hdl_add(sc, o, p, &cnt);
3280255736Sdavidch		break;
3281255736Sdavidch
3282255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3283255736Sdavidch		ecore_mcast_hdl_del(sc, o, p, &cnt);
3284255736Sdavidch		break;
3285255736Sdavidch
3286255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3287255736Sdavidch		o->hdl_restore(sc, o, 0, &cnt);
3288255736Sdavidch		break;
3289255736Sdavidch
3290255736Sdavidch	default:
3291255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
3292255736Sdavidch		return ECORE_INVAL;
3293255736Sdavidch	}
3294255736Sdavidch
3295255736Sdavidch	/* The current command has been handled */
3296255736Sdavidch	p->mcast_list_len = 0;
3297255736Sdavidch
3298255736Sdavidch	return cnt;
3299255736Sdavidch}
3300255736Sdavidch
3301255736Sdavidchstatic int ecore_mcast_validate_e2(struct bxe_softc *sc,
3302255736Sdavidch				   struct ecore_mcast_ramrod_params *p,
3303255736Sdavidch				   enum ecore_mcast_cmd cmd)
3304255736Sdavidch{
3305255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3306255736Sdavidch	int reg_sz = o->get_registry_size(o);
3307255736Sdavidch
3308255736Sdavidch	switch (cmd) {
3309255736Sdavidch	/* DEL command deletes all currently configured MACs */
3310255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3311255736Sdavidch		o->set_registry_size(o, 0);
3312255736Sdavidch		/* Don't break */
3313255736Sdavidch
3314255736Sdavidch	/* RESTORE command will restore the entire multicast configuration */
3315255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3316255736Sdavidch		/* Here we set the approximate amount of work to do, which in
3317255736Sdavidch		 * fact may be only less as some MACs in postponed ADD
3318255736Sdavidch		 * command(s) scheduled before this command may fall into
3319255736Sdavidch		 * the same bin and the actual number of bins set in the
3320255736Sdavidch		 * registry would be less than we estimated here. See
3321255736Sdavidch		 * ecore_mcast_set_one_rule_e2() for further details.
3322255736Sdavidch		 */
3323255736Sdavidch		p->mcast_list_len = reg_sz;
3324255736Sdavidch		break;
3325255736Sdavidch
3326255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3327255736Sdavidch	case ECORE_MCAST_CMD_CONT:
3328255736Sdavidch		/* Here we assume that all new MACs will fall into new bins.
3329255736Sdavidch		 * However we will correct the real registry size after we
3330255736Sdavidch		 * handle all pending commands.
3331255736Sdavidch		 */
3332255736Sdavidch		o->set_registry_size(o, reg_sz + p->mcast_list_len);
3333255736Sdavidch		break;
3334255736Sdavidch
3335255736Sdavidch	default:
3336255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
3337255736Sdavidch		return ECORE_INVAL;
3338255736Sdavidch	}
3339255736Sdavidch
3340255736Sdavidch	/* Increase the total number of MACs pending to be configured */
3341255736Sdavidch	o->total_pending_num += p->mcast_list_len;
3342255736Sdavidch
3343255736Sdavidch	return ECORE_SUCCESS;
3344255736Sdavidch}
3345255736Sdavidch
3346255736Sdavidchstatic void ecore_mcast_revert_e2(struct bxe_softc *sc,
3347255736Sdavidch				      struct ecore_mcast_ramrod_params *p,
3348255736Sdavidch				      int old_num_bins)
3349255736Sdavidch{
3350255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3351255736Sdavidch
3352255736Sdavidch	o->set_registry_size(o, old_num_bins);
3353255736Sdavidch	o->total_pending_num -= p->mcast_list_len;
3354255736Sdavidch}
3355255736Sdavidch
3356255736Sdavidch/**
3357255736Sdavidch * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3358255736Sdavidch *
3359255736Sdavidch * @sc:		device handle
3360255736Sdavidch * @p:
3361255736Sdavidch * @len:	number of rules to handle
3362255736Sdavidch */
3363255736Sdavidchstatic inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc,
3364255736Sdavidch					struct ecore_mcast_ramrod_params *p,
3365255736Sdavidch					uint8_t len)
3366255736Sdavidch{
3367255736Sdavidch	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3368255736Sdavidch	struct eth_multicast_rules_ramrod_data *data =
3369255736Sdavidch		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3370255736Sdavidch
3371255736Sdavidch	data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3372255736Sdavidch					(ECORE_FILTER_MCAST_PENDING <<
3373255736Sdavidch					 ECORE_SWCID_SHIFT));
3374255736Sdavidch	data->header.rule_cnt = len;
3375255736Sdavidch}
3376255736Sdavidch
3377255736Sdavidch/**
3378255736Sdavidch * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3379255736Sdavidch *
3380255736Sdavidch * @sc:		device handle
3381255736Sdavidch * @o:
3382255736Sdavidch *
3383255736Sdavidch * Recalculate the actual number of set bins in the registry using Brian
3384255736Sdavidch * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3385255736Sdavidch *
3386255736Sdavidch * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3387255736Sdavidch */
3388255736Sdavidchstatic inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc,
3389255736Sdavidch						  struct ecore_mcast_obj *o)
3390255736Sdavidch{
3391255736Sdavidch	int i, cnt = 0;
3392255736Sdavidch	uint64_t elem;
3393255736Sdavidch
3394255736Sdavidch	for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3395255736Sdavidch		elem = o->registry.aprox_match.vec[i];
3396255736Sdavidch		for (; elem; cnt++)
3397255736Sdavidch			elem &= elem - 1;
3398255736Sdavidch	}
3399255736Sdavidch
3400255736Sdavidch	o->set_registry_size(o, cnt);
3401255736Sdavidch
3402255736Sdavidch	return ECORE_SUCCESS;
3403255736Sdavidch}
3404255736Sdavidch
3405255736Sdavidchstatic int ecore_mcast_setup_e2(struct bxe_softc *sc,
3406255736Sdavidch				struct ecore_mcast_ramrod_params *p,
3407255736Sdavidch				enum ecore_mcast_cmd cmd)
3408255736Sdavidch{
3409255736Sdavidch	struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3410255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3411255736Sdavidch	struct eth_multicast_rules_ramrod_data *data =
3412255736Sdavidch		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3413255736Sdavidch	int cnt = 0, rc;
3414255736Sdavidch
3415255736Sdavidch	/* Reset the ramrod data buffer */
3416255736Sdavidch	ECORE_MEMSET(data, 0, sizeof(*data));
3417255736Sdavidch
3418255736Sdavidch	cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
3419255736Sdavidch
3420255736Sdavidch	/* If there are no more pending commands - clear SCHEDULED state */
3421255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3422255736Sdavidch		o->clear_sched(o);
3423255736Sdavidch
3424255736Sdavidch	/* The below may be TRUE iff there was enough room in ramrod
3425255736Sdavidch	 * data for all pending commands and for the current
3426255736Sdavidch	 * command. Otherwise the current command would have been added
3427255736Sdavidch	 * to the pending commands and p->mcast_list_len would have been
3428255736Sdavidch	 * zeroed.
3429255736Sdavidch	 */
3430255736Sdavidch	if (p->mcast_list_len > 0)
3431255736Sdavidch		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
3432255736Sdavidch
3433255736Sdavidch	/* We've pulled out some MACs - update the total number of
3434255736Sdavidch	 * outstanding.
3435255736Sdavidch	 */
3436255736Sdavidch	o->total_pending_num -= cnt;
3437255736Sdavidch
3438255736Sdavidch	/* send a ramrod */
3439255736Sdavidch	ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
3440255736Sdavidch	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3441255736Sdavidch
3442255736Sdavidch	ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
3443255736Sdavidch
3444255736Sdavidch	/* Update a registry size if there are no more pending operations.
3445255736Sdavidch	 *
3446255736Sdavidch	 * We don't want to change the value of the registry size if there are
3447255736Sdavidch	 * pending operations because we want it to always be equal to the
3448255736Sdavidch	 * exact or the approximate number (see ecore_mcast_validate_e2()) of
3449255736Sdavidch	 * set bins after the last requested operation in order to properly
3450255736Sdavidch	 * evaluate the size of the next DEL/RESTORE operation.
3451255736Sdavidch	 *
3452255736Sdavidch	 * Note that we update the registry itself during command(s) handling
3453255736Sdavidch	 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3454255736Sdavidch	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3455255736Sdavidch	 * with a limited amount of update commands (per MAC/bin) and we don't
3456255736Sdavidch	 * know in this scope what the actual state of bins configuration is
3457255736Sdavidch	 * going to be after this ramrod.
3458255736Sdavidch	 */
3459255736Sdavidch	if (!o->total_pending_num)
3460255736Sdavidch		ecore_mcast_refresh_registry_e2(sc, o);
3461255736Sdavidch
3462255736Sdavidch	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3463255736Sdavidch	 * RAMROD_PENDING status immediately.
3464255736Sdavidch	 */
3465255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3466255736Sdavidch		raw->clear_pending(raw);
3467255736Sdavidch		return ECORE_SUCCESS;
3468255736Sdavidch	} else {
3469296071Sdavidcs		/* No need for an explicit memory barrier here as long as we
3470296071Sdavidcs		 * ensure the ordering of writing to the SPQ element
3471255736Sdavidch		 * and updating of the SPQ producer which involves a memory
3472296071Sdavidcs		 * read. If the memory read is removed we will have to put a
3473296071Sdavidcs		 * full memory barrier there (inside ecore_sp_post()).
3474255736Sdavidch		 */
3475255736Sdavidch
3476255736Sdavidch		/* Send a ramrod */
3477255736Sdavidch		rc = ecore_sp_post( sc,
3478255736Sdavidch				    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3479255736Sdavidch				    raw->cid,
3480255736Sdavidch				    raw->rdata_mapping,
3481255736Sdavidch				    ETH_CONNECTION_TYPE);
3482255736Sdavidch		if (rc)
3483255736Sdavidch			return rc;
3484255736Sdavidch
3485255736Sdavidch		/* Ramrod completion is pending */
3486255736Sdavidch		return ECORE_PENDING;
3487255736Sdavidch	}
3488255736Sdavidch}
3489255736Sdavidch
3490255736Sdavidchstatic int ecore_mcast_validate_e1h(struct bxe_softc *sc,
3491255736Sdavidch				    struct ecore_mcast_ramrod_params *p,
3492255736Sdavidch				    enum ecore_mcast_cmd cmd)
3493255736Sdavidch{
3494255736Sdavidch	/* Mark, that there is a work to do */
3495255736Sdavidch	if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3496255736Sdavidch		p->mcast_list_len = 1;
3497255736Sdavidch
3498255736Sdavidch	return ECORE_SUCCESS;
3499255736Sdavidch}
3500255736Sdavidch
3501255736Sdavidchstatic void ecore_mcast_revert_e1h(struct bxe_softc *sc,
3502255736Sdavidch				       struct ecore_mcast_ramrod_params *p,
3503255736Sdavidch				       int old_num_bins)
3504255736Sdavidch{
3505255736Sdavidch	/* Do nothing */
3506255736Sdavidch}
3507255736Sdavidch
3508255736Sdavidch#define ECORE_57711_SET_MC_FILTER(filter, bit) \
3509255736Sdavidchdo { \
3510255736Sdavidch	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3511255736Sdavidch} while (0)
3512255736Sdavidch
3513255736Sdavidchstatic inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc,
3514255736Sdavidch					   struct ecore_mcast_obj *o,
3515255736Sdavidch					   struct ecore_mcast_ramrod_params *p,
3516255736Sdavidch					   uint32_t *mc_filter)
3517255736Sdavidch{
3518255736Sdavidch	struct ecore_mcast_list_elem *mlist_pos;
3519255736Sdavidch	int bit;
3520255736Sdavidch
3521255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3522255736Sdavidch				  struct ecore_mcast_list_elem) {
3523255736Sdavidch		bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3524255736Sdavidch		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3525255736Sdavidch
3526255736Sdavidch		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3527255736Sdavidch			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3528255736Sdavidch
3529255736Sdavidch		/* bookkeeping... */
3530255736Sdavidch		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3531255736Sdavidch				  bit);
3532255736Sdavidch	}
3533255736Sdavidch}
3534255736Sdavidch
3535255736Sdavidchstatic inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc,
3536255736Sdavidch	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3537255736Sdavidch	uint32_t *mc_filter)
3538255736Sdavidch{
3539255736Sdavidch	int bit;
3540255736Sdavidch
3541255736Sdavidch	for (bit = ecore_mcast_get_next_bin(o, 0);
3542255736Sdavidch	     bit >= 0;
3543255736Sdavidch	     bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3544255736Sdavidch		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3545255736Sdavidch		ECORE_MSG(sc, "About to set bin %d\n", bit);
3546255736Sdavidch	}
3547255736Sdavidch}
3548255736Sdavidch
3549255736Sdavidch/* On 57711 we write the multicast MACs' approximate match
3550255736Sdavidch * table by directly into the TSTORM's internal RAM. So we don't
3551255736Sdavidch * really need to handle any tricks to make it work.
3552255736Sdavidch */
3553255736Sdavidchstatic int ecore_mcast_setup_e1h(struct bxe_softc *sc,
3554255736Sdavidch				 struct ecore_mcast_ramrod_params *p,
3555255736Sdavidch				 enum ecore_mcast_cmd cmd)
3556255736Sdavidch{
3557255736Sdavidch	int i;
3558255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3559255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
3560255736Sdavidch
3561255736Sdavidch	/* If CLEAR_ONLY has been requested - clear the registry
3562255736Sdavidch	 * and clear a pending bit.
3563255736Sdavidch	 */
3564255736Sdavidch	if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3565255736Sdavidch		uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
3566255736Sdavidch
3567255736Sdavidch		/* Set the multicast filter bits before writing it into
3568255736Sdavidch		 * the internal memory.
3569255736Sdavidch		 */
3570255736Sdavidch		switch (cmd) {
3571255736Sdavidch		case ECORE_MCAST_CMD_ADD:
3572255736Sdavidch			ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3573255736Sdavidch			break;
3574255736Sdavidch
3575255736Sdavidch		case ECORE_MCAST_CMD_DEL:
3576255736Sdavidch			ECORE_MSG(sc,
3577255736Sdavidch				  "Invalidating multicast MACs configuration\n");
3578255736Sdavidch
3579255736Sdavidch			/* clear the registry */
3580255736Sdavidch			ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3581255736Sdavidch			       sizeof(o->registry.aprox_match.vec));
3582255736Sdavidch			break;
3583255736Sdavidch
3584255736Sdavidch		case ECORE_MCAST_CMD_RESTORE:
3585255736Sdavidch			ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
3586255736Sdavidch			break;
3587255736Sdavidch
3588255736Sdavidch		default:
3589255736Sdavidch			ECORE_ERR("Unknown command: %d\n", cmd);
3590255736Sdavidch			return ECORE_INVAL;
3591255736Sdavidch		}
3592255736Sdavidch
3593255736Sdavidch		/* Set the mcast filter in the internal memory */
3594255736Sdavidch		for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3595255736Sdavidch			REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3596255736Sdavidch	} else
3597255736Sdavidch		/* clear the registry */
3598255736Sdavidch		ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3599255736Sdavidch		       sizeof(o->registry.aprox_match.vec));
3600255736Sdavidch
3601255736Sdavidch	/* We are done */
3602255736Sdavidch	r->clear_pending(r);
3603255736Sdavidch
3604255736Sdavidch	return ECORE_SUCCESS;
3605255736Sdavidch}
3606255736Sdavidch
3607255736Sdavidchstatic int ecore_mcast_validate_e1(struct bxe_softc *sc,
3608255736Sdavidch				   struct ecore_mcast_ramrod_params *p,
3609255736Sdavidch				   enum ecore_mcast_cmd cmd)
3610255736Sdavidch{
3611255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3612255736Sdavidch	int reg_sz = o->get_registry_size(o);
3613255736Sdavidch
3614255736Sdavidch	switch (cmd) {
3615255736Sdavidch	/* DEL command deletes all currently configured MACs */
3616255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3617255736Sdavidch		o->set_registry_size(o, 0);
3618255736Sdavidch		/* Don't break */
3619255736Sdavidch
3620255736Sdavidch	/* RESTORE command will restore the entire multicast configuration */
3621255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3622255736Sdavidch		p->mcast_list_len = reg_sz;
3623255736Sdavidch		  ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
3624255736Sdavidch				  cmd, p->mcast_list_len);
3625255736Sdavidch		break;
3626255736Sdavidch
3627255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3628255736Sdavidch	case ECORE_MCAST_CMD_CONT:
3629255736Sdavidch		/* Multicast MACs on 57710 are configured as unicast MACs and
3630255736Sdavidch		 * there is only a limited number of CAM entries for that
3631255736Sdavidch		 * matter.
3632255736Sdavidch		 */
3633255736Sdavidch		if (p->mcast_list_len > o->max_cmd_len) {
3634255736Sdavidch			ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3635255736Sdavidch				  o->max_cmd_len);
3636255736Sdavidch			return ECORE_INVAL;
3637255736Sdavidch		}
3638255736Sdavidch		/* Every configured MAC should be cleared if DEL command is
3639255736Sdavidch		 * called. Only the last ADD command is relevant as long as
3640255736Sdavidch		 * every ADD commands overrides the previous configuration.
3641255736Sdavidch		 */
3642255736Sdavidch		ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3643255736Sdavidch		if (p->mcast_list_len > 0)
3644255736Sdavidch			o->set_registry_size(o, p->mcast_list_len);
3645255736Sdavidch
3646255736Sdavidch		break;
3647255736Sdavidch
3648255736Sdavidch	default:
3649255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd);
3650255736Sdavidch		return ECORE_INVAL;
3651255736Sdavidch	}
3652255736Sdavidch
3653255736Sdavidch	/* We want to ensure that commands are executed one by one for 57710.
3654255736Sdavidch	 * Therefore each none-empty command will consume o->max_cmd_len.
3655255736Sdavidch	 */
3656255736Sdavidch	if (p->mcast_list_len)
3657255736Sdavidch		o->total_pending_num += o->max_cmd_len;
3658255736Sdavidch
3659255736Sdavidch	return ECORE_SUCCESS;
3660255736Sdavidch}
3661255736Sdavidch
3662255736Sdavidchstatic void ecore_mcast_revert_e1(struct bxe_softc *sc,
3663255736Sdavidch				      struct ecore_mcast_ramrod_params *p,
3664255736Sdavidch				      int old_num_macs)
3665255736Sdavidch{
3666255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3667255736Sdavidch
3668255736Sdavidch	o->set_registry_size(o, old_num_macs);
3669255736Sdavidch
3670255736Sdavidch	/* If current command hasn't been handled yet and we are
3671255736Sdavidch	 * here means that it's meant to be dropped and we have to
3672255736Sdavidch	 * update the number of outstanding MACs accordingly.
3673255736Sdavidch	 */
3674255736Sdavidch	if (p->mcast_list_len)
3675255736Sdavidch		o->total_pending_num -= o->max_cmd_len;
3676255736Sdavidch}
3677255736Sdavidch
3678255736Sdavidchstatic void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc,
3679255736Sdavidch					struct ecore_mcast_obj *o, int idx,
3680255736Sdavidch					union ecore_mcast_config_data *cfg_data,
3681255736Sdavidch					enum ecore_mcast_cmd cmd)
3682255736Sdavidch{
3683255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
3684255736Sdavidch	struct mac_configuration_cmd *data =
3685255736Sdavidch		(struct mac_configuration_cmd *)(r->rdata);
3686255736Sdavidch
3687255736Sdavidch	/* copy mac */
3688255736Sdavidch	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
3689255736Sdavidch		ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3690255736Sdavidch				      &data->config_table[idx].middle_mac_addr,
3691255736Sdavidch				      &data->config_table[idx].lsb_mac_addr,
3692255736Sdavidch				      cfg_data->mac);
3693255736Sdavidch
3694255736Sdavidch		data->config_table[idx].vlan_id = 0;
3695255736Sdavidch		data->config_table[idx].pf_id = r->func_id;
3696255736Sdavidch		data->config_table[idx].clients_bit_vector =
3697255736Sdavidch			ECORE_CPU_TO_LE32(1 << r->cl_id);
3698255736Sdavidch
3699255736Sdavidch		ECORE_SET_FLAG(data->config_table[idx].flags,
3700255736Sdavidch			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3701255736Sdavidch			       T_ETH_MAC_COMMAND_SET);
3702255736Sdavidch	}
3703255736Sdavidch}
3704255736Sdavidch
3705255736Sdavidch/**
3706255736Sdavidch * ecore_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3707255736Sdavidch *
3708255736Sdavidch * @sc:		device handle
3709255736Sdavidch * @p:
3710255736Sdavidch * @len:	number of rules to handle
3711255736Sdavidch */
3712255736Sdavidchstatic inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc,
3713255736Sdavidch					struct ecore_mcast_ramrod_params *p,
3714255736Sdavidch					uint8_t len)
3715255736Sdavidch{
3716255736Sdavidch	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3717255736Sdavidch	struct mac_configuration_cmd *data =
3718255736Sdavidch		(struct mac_configuration_cmd *)(r->rdata);
3719255736Sdavidch
3720255736Sdavidch	uint8_t offset = (CHIP_REV_IS_SLOW(sc) ?
3721255736Sdavidch		     ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
3722255736Sdavidch		     ECORE_MAX_MULTICAST*(1 + r->func_id));
3723255736Sdavidch
3724255736Sdavidch	data->hdr.offset = offset;
3725255736Sdavidch	data->hdr.client_id = ECORE_CPU_TO_LE16(0xff);
3726255736Sdavidch	data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3727255736Sdavidch				     (ECORE_FILTER_MCAST_PENDING <<
3728255736Sdavidch				      ECORE_SWCID_SHIFT));
3729255736Sdavidch	data->hdr.length = len;
3730255736Sdavidch}
3731255736Sdavidch
3732255736Sdavidch/**
3733255736Sdavidch * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
3734255736Sdavidch *
3735255736Sdavidch * @sc:		device handle
3736255736Sdavidch * @o:
3737255736Sdavidch * @start_idx:	index in the registry to start from
3738255736Sdavidch * @rdata_idx:	index in the ramrod data to start from
3739255736Sdavidch *
3740255736Sdavidch * restore command for 57710 is like all other commands - always a stand alone
3741255736Sdavidch * command - start_idx and rdata_idx will always be 0. This function will always
3742255736Sdavidch * succeed.
3743255736Sdavidch * returns -1 to comply with 57712 variant.
3744255736Sdavidch */
3745255736Sdavidchstatic inline int ecore_mcast_handle_restore_cmd_e1(
3746255736Sdavidch	struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx,
3747255736Sdavidch	int *rdata_idx)
3748255736Sdavidch{
3749255736Sdavidch	struct ecore_mcast_mac_elem *elem;
3750255736Sdavidch	int i = 0;
3751255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
3752255736Sdavidch
3753255736Sdavidch	/* go through the registry and configure the MACs from it. */
3754255736Sdavidch	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
3755255736Sdavidch				  struct ecore_mcast_mac_elem) {
3756255736Sdavidch		cfg_data.mac = &elem->mac[0];
3757255736Sdavidch		o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
3758255736Sdavidch
3759255736Sdavidch		i++;
3760255736Sdavidch
3761255736Sdavidch		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3762255736Sdavidch			  cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
3763255736Sdavidch	}
3764255736Sdavidch
3765255736Sdavidch	*rdata_idx = i;
3766255736Sdavidch
3767255736Sdavidch	return -1;
3768255736Sdavidch}
3769255736Sdavidch
3770255736Sdavidchstatic inline int ecore_mcast_handle_pending_cmds_e1(
3771255736Sdavidch	struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p)
3772255736Sdavidch{
3773255736Sdavidch	struct ecore_pending_mcast_cmd *cmd_pos;
3774255736Sdavidch	struct ecore_mcast_mac_elem *pmac_pos;
3775255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3776255736Sdavidch	union ecore_mcast_config_data cfg_data = {NULL};
3777255736Sdavidch	int cnt = 0;
3778255736Sdavidch
3779255736Sdavidch	/* If nothing to be done - return */
3780255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3781255736Sdavidch		return 0;
3782255736Sdavidch
3783255736Sdavidch	/* Handle the first command */
3784255736Sdavidch	cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
3785255736Sdavidch					 struct ecore_pending_mcast_cmd, link);
3786255736Sdavidch
3787255736Sdavidch	switch (cmd_pos->type) {
3788255736Sdavidch	case ECORE_MCAST_CMD_ADD:
3789255736Sdavidch		ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
3790255736Sdavidch					  link, struct ecore_mcast_mac_elem) {
3791255736Sdavidch			cfg_data.mac = &pmac_pos->mac[0];
3792255736Sdavidch			o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3793255736Sdavidch
3794255736Sdavidch			cnt++;
3795255736Sdavidch
3796255736Sdavidch			ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3797255736Sdavidch				  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3798255736Sdavidch		}
3799255736Sdavidch		break;
3800255736Sdavidch
3801255736Sdavidch	case ECORE_MCAST_CMD_DEL:
3802255736Sdavidch		cnt = cmd_pos->data.macs_num;
3803255736Sdavidch		ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt);
3804255736Sdavidch		break;
3805255736Sdavidch
3806255736Sdavidch	case ECORE_MCAST_CMD_RESTORE:
3807255736Sdavidch		o->hdl_restore(sc, o, 0, &cnt);
3808255736Sdavidch		break;
3809255736Sdavidch
3810255736Sdavidch	default:
3811255736Sdavidch		ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3812255736Sdavidch		return ECORE_INVAL;
3813255736Sdavidch	}
3814255736Sdavidch
3815255736Sdavidch	ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
3816255736Sdavidch	ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3817255736Sdavidch
3818255736Sdavidch	return cnt;
3819255736Sdavidch}
3820255736Sdavidch
3821255736Sdavidch/**
3822255736Sdavidch * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
3823255736Sdavidch *
3824255736Sdavidch * @fw_hi:
3825255736Sdavidch * @fw_mid:
3826255736Sdavidch * @fw_lo:
3827255736Sdavidch * @mac:
3828255736Sdavidch */
3829255736Sdavidchstatic inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid,
3830255736Sdavidch					 uint16_t *fw_lo, uint8_t *mac)
3831255736Sdavidch{
3832255736Sdavidch	mac[1] = ((uint8_t *)fw_hi)[0];
3833255736Sdavidch	mac[0] = ((uint8_t *)fw_hi)[1];
3834255736Sdavidch	mac[3] = ((uint8_t *)fw_mid)[0];
3835255736Sdavidch	mac[2] = ((uint8_t *)fw_mid)[1];
3836255736Sdavidch	mac[5] = ((uint8_t *)fw_lo)[0];
3837255736Sdavidch	mac[4] = ((uint8_t *)fw_lo)[1];
3838255736Sdavidch}
3839255736Sdavidch
3840255736Sdavidch/**
3841255736Sdavidch * ecore_mcast_refresh_registry_e1 -
3842255736Sdavidch *
3843255736Sdavidch * @sc:		device handle
3844255736Sdavidch * @cnt:
3845255736Sdavidch *
3846255736Sdavidch * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3847255736Sdavidch * and update the registry correspondingly: if ADD - allocate a memory and add
3848255736Sdavidch * the entries to the registry (list), if DELETE - clear the registry and free
3849255736Sdavidch * the memory.
3850255736Sdavidch */
3851255736Sdavidchstatic inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc,
3852255736Sdavidch						  struct ecore_mcast_obj *o)
3853255736Sdavidch{
3854255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
3855255736Sdavidch	struct ecore_mcast_mac_elem *elem;
3856255736Sdavidch	struct mac_configuration_cmd *data =
3857255736Sdavidch			(struct mac_configuration_cmd *)(raw->rdata);
3858255736Sdavidch
3859255736Sdavidch	/* If first entry contains a SET bit - the command was ADD,
3860255736Sdavidch	 * otherwise - DEL_ALL
3861255736Sdavidch	 */
3862255736Sdavidch	if (ECORE_GET_FLAG(data->config_table[0].flags,
3863255736Sdavidch			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3864255736Sdavidch		int i, len = data->hdr.length;
3865255736Sdavidch
3866255736Sdavidch		/* Break if it was a RESTORE command */
3867255736Sdavidch		if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
3868255736Sdavidch			return ECORE_SUCCESS;
3869255736Sdavidch
3870255736Sdavidch		elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc);
3871255736Sdavidch		if (!elem) {
3872255736Sdavidch			ECORE_ERR("Failed to allocate registry memory\n");
3873255736Sdavidch			return ECORE_NOMEM;
3874255736Sdavidch		}
3875255736Sdavidch
3876255736Sdavidch		for (i = 0; i < len; i++, elem++) {
3877255736Sdavidch			ecore_get_fw_mac_addr(
3878255736Sdavidch				&data->config_table[i].msb_mac_addr,
3879255736Sdavidch				&data->config_table[i].middle_mac_addr,
3880255736Sdavidch				&data->config_table[i].lsb_mac_addr,
3881255736Sdavidch				elem->mac);
3882255736Sdavidch			ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
3883255736Sdavidch				  elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
3884255736Sdavidch			ECORE_LIST_PUSH_TAIL(&elem->link,
3885255736Sdavidch					     &o->registry.exact_match.macs);
3886255736Sdavidch		}
3887255736Sdavidch	} else {
3888255736Sdavidch		elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
3889255736Sdavidch					      struct ecore_mcast_mac_elem,
3890255736Sdavidch					      link);
3891255736Sdavidch		ECORE_MSG(sc, "Deleting a registry\n");
3892255736Sdavidch		ECORE_FREE(sc, elem, sizeof(*elem));
3893255736Sdavidch		ECORE_LIST_INIT(&o->registry.exact_match.macs);
3894255736Sdavidch	}
3895255736Sdavidch
3896255736Sdavidch	return ECORE_SUCCESS;
3897255736Sdavidch}
3898255736Sdavidch
3899255736Sdavidchstatic int ecore_mcast_setup_e1(struct bxe_softc *sc,
3900255736Sdavidch				struct ecore_mcast_ramrod_params *p,
3901255736Sdavidch				enum ecore_mcast_cmd cmd)
3902255736Sdavidch{
3903255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
3904255736Sdavidch	struct ecore_raw_obj *raw = &o->raw;
3905255736Sdavidch	struct mac_configuration_cmd *data =
3906255736Sdavidch		(struct mac_configuration_cmd *)(raw->rdata);
3907255736Sdavidch	int cnt = 0, i, rc;
3908255736Sdavidch
3909255736Sdavidch	/* Reset the ramrod data buffer */
3910255736Sdavidch	ECORE_MEMSET(data, 0, sizeof(*data));
3911255736Sdavidch
3912255736Sdavidch	/* First set all entries as invalid */
3913255736Sdavidch	for (i = 0; i < o->max_cmd_len ; i++)
3914255736Sdavidch		ECORE_SET_FLAG(data->config_table[i].flags,
3915255736Sdavidch			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3916255736Sdavidch			T_ETH_MAC_COMMAND_INVALIDATE);
3917255736Sdavidch
3918255736Sdavidch	/* Handle pending commands first */
3919255736Sdavidch	cnt = ecore_mcast_handle_pending_cmds_e1(sc, p);
3920255736Sdavidch
3921255736Sdavidch	/* If there are no more pending commands - clear SCHEDULED state */
3922255736Sdavidch	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3923255736Sdavidch		o->clear_sched(o);
3924255736Sdavidch
3925255736Sdavidch	/* The below may be TRUE iff there were no pending commands */
3926255736Sdavidch	if (!cnt)
3927255736Sdavidch		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0);
3928255736Sdavidch
3929255736Sdavidch	/* For 57710 every command has o->max_cmd_len length to ensure that
3930255736Sdavidch	 * commands are done one at a time.
3931255736Sdavidch	 */
3932255736Sdavidch	o->total_pending_num -= o->max_cmd_len;
3933255736Sdavidch
3934255736Sdavidch	/* send a ramrod */
3935255736Sdavidch
3936255736Sdavidch	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3937255736Sdavidch
3938255736Sdavidch	/* Set ramrod header (in particular, a number of entries to update) */
3939255736Sdavidch	ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt);
3940255736Sdavidch
3941255736Sdavidch	/* update a registry: we need the registry contents to be always up
3942255736Sdavidch	 * to date in order to be able to execute a RESTORE opcode. Here
3943255736Sdavidch	 * we use the fact that for 57710 we sent one command at a time
3944255736Sdavidch	 * hence we may take the registry update out of the command handling
3945255736Sdavidch	 * and do it in a simpler way here.
3946255736Sdavidch	 */
3947255736Sdavidch	rc = ecore_mcast_refresh_registry_e1(sc, o);
3948255736Sdavidch	if (rc)
3949255736Sdavidch		return rc;
3950255736Sdavidch
3951255736Sdavidch	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3952255736Sdavidch	 * RAMROD_PENDING status immediately.
3953255736Sdavidch	 */
3954255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3955255736Sdavidch		raw->clear_pending(raw);
3956255736Sdavidch		return ECORE_SUCCESS;
3957255736Sdavidch	} else {
3958296071Sdavidcs		/* No need for an explicit memory barrier here as long as we
3959296071Sdavidcs		 * ensure the ordering of writing to the SPQ element
3960255736Sdavidch		 * and updating of the SPQ producer which involves a memory
3961296071Sdavidcs		 * read. If the memory read is removed we will have to put a
3962296071Sdavidcs		 * full memory barrier there (inside ecore_sp_post()).
3963255736Sdavidch		 */
3964255736Sdavidch
3965255736Sdavidch		/* Send a ramrod */
3966255736Sdavidch		rc = ecore_sp_post( sc,
3967255736Sdavidch				    RAMROD_CMD_ID_ETH_SET_MAC,
3968255736Sdavidch				    raw->cid,
3969255736Sdavidch				    raw->rdata_mapping,
3970255736Sdavidch				    ETH_CONNECTION_TYPE);
3971255736Sdavidch		if (rc)
3972255736Sdavidch			return rc;
3973255736Sdavidch
3974255736Sdavidch		/* Ramrod completion is pending */
3975255736Sdavidch		return ECORE_PENDING;
3976255736Sdavidch	}
3977255736Sdavidch}
3978255736Sdavidch
3979255736Sdavidchstatic int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
3980255736Sdavidch{
3981255736Sdavidch	return o->registry.exact_match.num_macs_set;
3982255736Sdavidch}
3983255736Sdavidch
3984255736Sdavidchstatic int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3985255736Sdavidch{
3986255736Sdavidch	return o->registry.aprox_match.num_bins_set;
3987255736Sdavidch}
3988255736Sdavidch
3989255736Sdavidchstatic void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
3990255736Sdavidch						int n)
3991255736Sdavidch{
3992255736Sdavidch	o->registry.exact_match.num_macs_set = n;
3993255736Sdavidch}
3994255736Sdavidch
3995255736Sdavidchstatic void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3996255736Sdavidch						int n)
3997255736Sdavidch{
3998255736Sdavidch	o->registry.aprox_match.num_bins_set = n;
3999255736Sdavidch}
4000255736Sdavidch
4001255736Sdavidchint ecore_config_mcast(struct bxe_softc *sc,
4002255736Sdavidch		       struct ecore_mcast_ramrod_params *p,
4003255736Sdavidch		       enum ecore_mcast_cmd cmd)
4004255736Sdavidch{
4005255736Sdavidch	struct ecore_mcast_obj *o = p->mcast_obj;
4006255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
4007255736Sdavidch	int rc = 0, old_reg_size;
4008255736Sdavidch
4009255736Sdavidch	/* This is needed to recover number of currently configured mcast macs
4010255736Sdavidch	 * in case of failure.
4011255736Sdavidch	 */
4012255736Sdavidch	old_reg_size = o->get_registry_size(o);
4013255736Sdavidch
4014255736Sdavidch	/* Do some calculations and checks */
4015255736Sdavidch	rc = o->validate(sc, p, cmd);
4016255736Sdavidch	if (rc)
4017255736Sdavidch		return rc;
4018255736Sdavidch
4019255736Sdavidch	/* Return if there is no work to do */
4020255736Sdavidch	if ((!p->mcast_list_len) && (!o->check_sched(o)))
4021255736Sdavidch		return ECORE_SUCCESS;
4022255736Sdavidch
4023255736Sdavidch	ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
4024255736Sdavidch		  o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
4025255736Sdavidch
4026255736Sdavidch	/* Enqueue the current command to the pending list if we can't complete
4027255736Sdavidch	 * it in the current iteration
4028255736Sdavidch	 */
4029255736Sdavidch	if (r->check_pending(r) ||
4030255736Sdavidch	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
4031255736Sdavidch		rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
4032255736Sdavidch		if (rc < 0)
4033255736Sdavidch			goto error_exit1;
4034255736Sdavidch
4035255736Sdavidch		/* As long as the current command is in a command list we
4036255736Sdavidch		 * don't need to handle it separately.
4037255736Sdavidch		 */
4038255736Sdavidch		p->mcast_list_len = 0;
4039255736Sdavidch	}
4040255736Sdavidch
4041255736Sdavidch	if (!r->check_pending(r)) {
4042255736Sdavidch
4043255736Sdavidch		/* Set 'pending' state */
4044255736Sdavidch		r->set_pending(r);
4045255736Sdavidch
4046255736Sdavidch		/* Configure the new classification in the chip */
4047255736Sdavidch		rc = o->config_mcast(sc, p, cmd);
4048255736Sdavidch		if (rc < 0)
4049255736Sdavidch			goto error_exit2;
4050255736Sdavidch
4051255736Sdavidch		/* Wait for a ramrod completion if was requested */
4052255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4053255736Sdavidch			rc = o->wait_comp(sc, o);
4054255736Sdavidch	}
4055255736Sdavidch
4056255736Sdavidch	return rc;
4057255736Sdavidch
4058255736Sdavidcherror_exit2:
4059255736Sdavidch	r->clear_pending(r);
4060255736Sdavidch
4061255736Sdavidcherror_exit1:
4062255736Sdavidch	o->revert(sc, p, old_reg_size);
4063255736Sdavidch
4064255736Sdavidch	return rc;
4065255736Sdavidch}
4066255736Sdavidch
4067255736Sdavidchstatic void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
4068255736Sdavidch{
4069255736Sdavidch	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
4070255736Sdavidch	ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
4071255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
4072255736Sdavidch}
4073255736Sdavidch
4074255736Sdavidchstatic void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
4075255736Sdavidch{
4076255736Sdavidch	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
4077255736Sdavidch	ECORE_SET_BIT(o->sched_state, o->raw.pstate);
4078255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
4079255736Sdavidch}
4080255736Sdavidch
4081255736Sdavidchstatic bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
4082255736Sdavidch{
4083255736Sdavidch	return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
4084255736Sdavidch}
4085255736Sdavidch
4086255736Sdavidchstatic bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
4087255736Sdavidch{
4088255736Sdavidch	return o->raw.check_pending(&o->raw) || o->check_sched(o);
4089255736Sdavidch}
4090255736Sdavidch
4091255736Sdavidchvoid ecore_init_mcast_obj(struct bxe_softc *sc,
4092255736Sdavidch			  struct ecore_mcast_obj *mcast_obj,
4093255736Sdavidch			  uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
4094255736Sdavidch			  uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
4095255736Sdavidch			  int state, unsigned long *pstate, ecore_obj_type type)
4096255736Sdavidch{
4097255736Sdavidch	ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
4098255736Sdavidch
4099255736Sdavidch	ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
4100255736Sdavidch			   rdata, rdata_mapping, state, pstate, type);
4101255736Sdavidch
4102255736Sdavidch	mcast_obj->engine_id = engine_id;
4103255736Sdavidch
4104255736Sdavidch	ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
4105255736Sdavidch
4106255736Sdavidch	mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
4107255736Sdavidch	mcast_obj->check_sched = ecore_mcast_check_sched;
4108255736Sdavidch	mcast_obj->set_sched = ecore_mcast_set_sched;
4109255736Sdavidch	mcast_obj->clear_sched = ecore_mcast_clear_sched;
4110255736Sdavidch
4111255736Sdavidch	if (CHIP_IS_E1(sc)) {
4112255736Sdavidch		mcast_obj->config_mcast      = ecore_mcast_setup_e1;
4113255736Sdavidch		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
4114255736Sdavidch		mcast_obj->hdl_restore       =
4115255736Sdavidch			ecore_mcast_handle_restore_cmd_e1;
4116255736Sdavidch		mcast_obj->check_pending     = ecore_mcast_check_pending;
4117255736Sdavidch
4118255736Sdavidch		if (CHIP_REV_IS_SLOW(sc))
4119255736Sdavidch			mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
4120255736Sdavidch		else
4121255736Sdavidch			mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
4122255736Sdavidch
4123255736Sdavidch		mcast_obj->wait_comp         = ecore_mcast_wait;
4124255736Sdavidch		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e1;
4125255736Sdavidch		mcast_obj->validate          = ecore_mcast_validate_e1;
4126255736Sdavidch		mcast_obj->revert            = ecore_mcast_revert_e1;
4127255736Sdavidch		mcast_obj->get_registry_size =
4128255736Sdavidch			ecore_mcast_get_registry_size_exact;
4129255736Sdavidch		mcast_obj->set_registry_size =
4130255736Sdavidch			ecore_mcast_set_registry_size_exact;
4131255736Sdavidch
4132255736Sdavidch		/* 57710 is the only chip that uses the exact match for mcast
4133255736Sdavidch		 * at the moment.
4134255736Sdavidch		 */
4135255736Sdavidch		ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
4136255736Sdavidch
4137255736Sdavidch	} else if (CHIP_IS_E1H(sc)) {
4138255736Sdavidch		mcast_obj->config_mcast  = ecore_mcast_setup_e1h;
4139255736Sdavidch		mcast_obj->enqueue_cmd   = NULL;
4140255736Sdavidch		mcast_obj->hdl_restore   = NULL;
4141255736Sdavidch		mcast_obj->check_pending = ecore_mcast_check_pending;
4142255736Sdavidch
4143255736Sdavidch		/* 57711 doesn't send a ramrod, so it has unlimited credit
4144255736Sdavidch		 * for one command.
4145255736Sdavidch		 */
4146255736Sdavidch		mcast_obj->max_cmd_len       = -1;
4147255736Sdavidch		mcast_obj->wait_comp         = ecore_mcast_wait;
4148255736Sdavidch		mcast_obj->set_one_rule      = NULL;
4149255736Sdavidch		mcast_obj->validate          = ecore_mcast_validate_e1h;
4150255736Sdavidch		mcast_obj->revert            = ecore_mcast_revert_e1h;
4151255736Sdavidch		mcast_obj->get_registry_size =
4152255736Sdavidch			ecore_mcast_get_registry_size_aprox;
4153255736Sdavidch		mcast_obj->set_registry_size =
4154255736Sdavidch			ecore_mcast_set_registry_size_aprox;
4155255736Sdavidch	} else {
4156255736Sdavidch		mcast_obj->config_mcast      = ecore_mcast_setup_e2;
4157255736Sdavidch		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
4158255736Sdavidch		mcast_obj->hdl_restore       =
4159255736Sdavidch			ecore_mcast_handle_restore_cmd_e2;
4160255736Sdavidch		mcast_obj->check_pending     = ecore_mcast_check_pending;
4161255736Sdavidch		/* TODO: There should be a proper HSI define for this number!!!
4162255736Sdavidch		 */
4163255736Sdavidch		mcast_obj->max_cmd_len       = 16;
4164255736Sdavidch		mcast_obj->wait_comp         = ecore_mcast_wait;
4165255736Sdavidch		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e2;
4166255736Sdavidch		mcast_obj->validate          = ecore_mcast_validate_e2;
4167255736Sdavidch		mcast_obj->revert            = ecore_mcast_revert_e2;
4168255736Sdavidch		mcast_obj->get_registry_size =
4169255736Sdavidch			ecore_mcast_get_registry_size_aprox;
4170255736Sdavidch		mcast_obj->set_registry_size =
4171255736Sdavidch			ecore_mcast_set_registry_size_aprox;
4172255736Sdavidch	}
4173255736Sdavidch}
4174255736Sdavidch
4175255736Sdavidch/*************************** Credit handling **********************************/
4176255736Sdavidch
4177255736Sdavidch/**
4178255736Sdavidch * atomic_add_ifless - add if the result is less than a given value.
4179255736Sdavidch *
4180255736Sdavidch * @v:	pointer of type ecore_atomic_t
4181255736Sdavidch * @a:	the amount to add to v...
4182255736Sdavidch * @u:	...if (v + a) is less than u.
4183255736Sdavidch *
4184255736Sdavidch * returns TRUE if (v + a) was less than u, and FALSE otherwise.
4185255736Sdavidch *
4186255736Sdavidch */
4187255736Sdavidchstatic inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
4188255736Sdavidch{
4189255736Sdavidch	int c, old;
4190255736Sdavidch
4191255736Sdavidch	c = ECORE_ATOMIC_READ(v);
4192255736Sdavidch	for (;;) {
4193255736Sdavidch		if (ECORE_UNLIKELY(c + a >= u))
4194255736Sdavidch			return FALSE;
4195255736Sdavidch
4196255736Sdavidch		old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
4197255736Sdavidch		if (ECORE_LIKELY(old == c))
4198255736Sdavidch			break;
4199255736Sdavidch		c = old;
4200255736Sdavidch	}
4201255736Sdavidch
4202255736Sdavidch	return TRUE;
4203255736Sdavidch}
4204255736Sdavidch
4205255736Sdavidch/**
4206255736Sdavidch * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4207255736Sdavidch *
4208255736Sdavidch * @v:	pointer of type ecore_atomic_t
4209255736Sdavidch * @a:	the amount to dec from v...
4210255736Sdavidch * @u:	...if (v - a) is more or equal than u.
4211255736Sdavidch *
4212255736Sdavidch * returns TRUE if (v - a) was more or equal than u, and FALSE
4213255736Sdavidch * otherwise.
4214255736Sdavidch */
4215255736Sdavidchstatic inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
4216255736Sdavidch{
4217255736Sdavidch	int c, old;
4218255736Sdavidch
4219255736Sdavidch	c = ECORE_ATOMIC_READ(v);
4220255736Sdavidch	for (;;) {
4221255736Sdavidch		if (ECORE_UNLIKELY(c - a < u))
4222255736Sdavidch			return FALSE;
4223255736Sdavidch
4224255736Sdavidch		old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
4225255736Sdavidch		if (ECORE_LIKELY(old == c))
4226255736Sdavidch			break;
4227255736Sdavidch		c = old;
4228255736Sdavidch	}
4229255736Sdavidch
4230255736Sdavidch	return TRUE;
4231255736Sdavidch}
4232255736Sdavidch
4233255736Sdavidchstatic bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
4234255736Sdavidch{
4235255736Sdavidch	bool rc;
4236255736Sdavidch
4237255736Sdavidch	ECORE_SMP_MB();
4238255736Sdavidch	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4239255736Sdavidch	ECORE_SMP_MB();
4240255736Sdavidch
4241255736Sdavidch	return rc;
4242255736Sdavidch}
4243255736Sdavidch
4244255736Sdavidchstatic bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
4245255736Sdavidch{
4246255736Sdavidch	bool rc;
4247255736Sdavidch
4248255736Sdavidch	ECORE_SMP_MB();
4249255736Sdavidch
4250255736Sdavidch	/* Don't let to refill if credit + cnt > pool_sz */
4251255736Sdavidch	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4252255736Sdavidch
4253255736Sdavidch	ECORE_SMP_MB();
4254255736Sdavidch
4255255736Sdavidch	return rc;
4256255736Sdavidch}
4257255736Sdavidch
4258255736Sdavidchstatic int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
4259255736Sdavidch{
4260255736Sdavidch	int cur_credit;
4261255736Sdavidch
4262255736Sdavidch	ECORE_SMP_MB();
4263255736Sdavidch	cur_credit = ECORE_ATOMIC_READ(&o->credit);
4264255736Sdavidch
4265255736Sdavidch	return cur_credit;
4266255736Sdavidch}
4267255736Sdavidch
4268255736Sdavidchstatic bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
4269255736Sdavidch					  int cnt)
4270255736Sdavidch{
4271255736Sdavidch	return TRUE;
4272255736Sdavidch}
4273255736Sdavidch
4274255736Sdavidchstatic bool ecore_credit_pool_get_entry(
4275255736Sdavidch	struct ecore_credit_pool_obj *o,
4276255736Sdavidch	int *offset)
4277255736Sdavidch{
4278255736Sdavidch	int idx, vec, i;
4279255736Sdavidch
4280255736Sdavidch	*offset = -1;
4281255736Sdavidch
4282255736Sdavidch	/* Find "internal cam-offset" then add to base for this object... */
4283255736Sdavidch	for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
4284255736Sdavidch
4285255736Sdavidch		/* Skip the current vector if there are no free entries in it */
4286255736Sdavidch		if (!o->pool_mirror[vec])
4287255736Sdavidch			continue;
4288255736Sdavidch
4289255736Sdavidch		/* If we've got here we are going to find a free entry */
4290255736Sdavidch		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4291255736Sdavidch		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
4292255736Sdavidch
4293255736Sdavidch			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4294255736Sdavidch				/* Got one!! */
4295255736Sdavidch				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4296255736Sdavidch				*offset = o->base_pool_offset + idx;
4297255736Sdavidch				return TRUE;
4298255736Sdavidch			}
4299255736Sdavidch	}
4300255736Sdavidch
4301255736Sdavidch	return FALSE;
4302255736Sdavidch}
4303255736Sdavidch
4304255736Sdavidchstatic bool ecore_credit_pool_put_entry(
4305255736Sdavidch	struct ecore_credit_pool_obj *o,
4306255736Sdavidch	int offset)
4307255736Sdavidch{
4308255736Sdavidch	if (offset < o->base_pool_offset)
4309255736Sdavidch		return FALSE;
4310255736Sdavidch
4311255736Sdavidch	offset -= o->base_pool_offset;
4312255736Sdavidch
4313255736Sdavidch	if (offset >= o->pool_sz)
4314255736Sdavidch		return FALSE;
4315255736Sdavidch
4316255736Sdavidch	/* Return the entry to the pool */
4317255736Sdavidch	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4318255736Sdavidch
4319255736Sdavidch	return TRUE;
4320255736Sdavidch}
4321255736Sdavidch
4322255736Sdavidchstatic bool ecore_credit_pool_put_entry_always_TRUE(
4323255736Sdavidch	struct ecore_credit_pool_obj *o,
4324255736Sdavidch	int offset)
4325255736Sdavidch{
4326255736Sdavidch	return TRUE;
4327255736Sdavidch}
4328255736Sdavidch
4329255736Sdavidchstatic bool ecore_credit_pool_get_entry_always_TRUE(
4330255736Sdavidch	struct ecore_credit_pool_obj *o,
4331255736Sdavidch	int *offset)
4332255736Sdavidch{
4333255736Sdavidch	*offset = -1;
4334255736Sdavidch	return TRUE;
4335255736Sdavidch}
4336255736Sdavidch/**
4337255736Sdavidch * ecore_init_credit_pool - initialize credit pool internals.
4338255736Sdavidch *
4339255736Sdavidch * @p:
4340255736Sdavidch * @base:	Base entry in the CAM to use.
4341255736Sdavidch * @credit:	pool size.
4342255736Sdavidch *
4343255736Sdavidch * If base is negative no CAM entries handling will be performed.
4344255736Sdavidch * If credit is negative pool operations will always succeed (unlimited pool).
4345255736Sdavidch *
4346255736Sdavidch */
4347296071Sdavidcsvoid ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
4348255736Sdavidch					  int base, int credit)
4349255736Sdavidch{
4350255736Sdavidch	/* Zero the object first */
4351255736Sdavidch	ECORE_MEMSET(p, 0, sizeof(*p));
4352255736Sdavidch
4353255736Sdavidch	/* Set the table to all 1s */
4354255736Sdavidch	ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4355255736Sdavidch
4356255736Sdavidch	/* Init a pool as full */
4357255736Sdavidch	ECORE_ATOMIC_SET(&p->credit, credit);
4358255736Sdavidch
4359255736Sdavidch	/* The total poll size */
4360255736Sdavidch	p->pool_sz = credit;
4361255736Sdavidch
4362255736Sdavidch	p->base_pool_offset = base;
4363255736Sdavidch
4364255736Sdavidch	/* Commit the change */
4365255736Sdavidch	ECORE_SMP_MB();
4366255736Sdavidch
4367255736Sdavidch	p->check = ecore_credit_pool_check;
4368255736Sdavidch
4369255736Sdavidch	/* if pool credit is negative - disable the checks */
4370255736Sdavidch	if (credit >= 0) {
4371255736Sdavidch		p->put      = ecore_credit_pool_put;
4372255736Sdavidch		p->get      = ecore_credit_pool_get;
4373255736Sdavidch		p->put_entry = ecore_credit_pool_put_entry;
4374255736Sdavidch		p->get_entry = ecore_credit_pool_get_entry;
4375255736Sdavidch	} else {
4376255736Sdavidch		p->put      = ecore_credit_pool_always_TRUE;
4377255736Sdavidch		p->get      = ecore_credit_pool_always_TRUE;
4378255736Sdavidch		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4379255736Sdavidch		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4380255736Sdavidch	}
4381255736Sdavidch
4382255736Sdavidch	/* If base is negative - disable entries handling */
4383255736Sdavidch	if (base < 0) {
4384255736Sdavidch		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4385255736Sdavidch		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4386255736Sdavidch	}
4387255736Sdavidch}
4388255736Sdavidch
4389255736Sdavidchvoid ecore_init_mac_credit_pool(struct bxe_softc *sc,
4390255736Sdavidch				struct ecore_credit_pool_obj *p, uint8_t func_id,
4391255736Sdavidch				uint8_t func_num)
4392255736Sdavidch{
4393255736Sdavidch/* TODO: this will be defined in consts as well... */
4394255736Sdavidch#define ECORE_CAM_SIZE_EMUL 5
4395255736Sdavidch
4396255736Sdavidch	int cam_sz;
4397255736Sdavidch
4398255736Sdavidch	if (CHIP_IS_E1(sc)) {
4399255736Sdavidch		/* In E1, Multicast is saved in cam... */
4400255736Sdavidch		if (!CHIP_REV_IS_SLOW(sc))
4401255736Sdavidch			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
4402255736Sdavidch		else
4403255736Sdavidch			cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
4404255736Sdavidch
4405255736Sdavidch		ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4406255736Sdavidch
4407255736Sdavidch	} else if (CHIP_IS_E1H(sc)) {
4408255736Sdavidch		/* CAM credit is equally divided between all active functions
4409255736Sdavidch		 * on the PORT!.
4410255736Sdavidch		 */
4411255736Sdavidch		if ((func_num > 0)) {
4412255736Sdavidch			if (!CHIP_REV_IS_SLOW(sc))
4413255736Sdavidch				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4414255736Sdavidch			else
4415255736Sdavidch				cam_sz = ECORE_CAM_SIZE_EMUL;
4416255736Sdavidch			ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4417255736Sdavidch		} else {
4418255736Sdavidch			/* this should never happen! Block MAC operations. */
4419255736Sdavidch			ecore_init_credit_pool(p, 0, 0);
4420255736Sdavidch		}
4421255736Sdavidch	} else {
4422255736Sdavidch		/*
4423255736Sdavidch		 * CAM credit is equaly divided between all active functions
4424255736Sdavidch		 * on the PATH.
4425255736Sdavidch		 */
4426296071Sdavidcs		if (func_num > 0) {
4427255736Sdavidch			if (!CHIP_REV_IS_SLOW(sc))
4428296071Sdavidcs				cam_sz = PF_MAC_CREDIT_E2(sc, func_num);
4429255736Sdavidch			else
4430255736Sdavidch				cam_sz = ECORE_CAM_SIZE_EMUL;
4431255736Sdavidch
4432255736Sdavidch			/* No need for CAM entries handling for 57712 and
4433255736Sdavidch			 * newer.
4434255736Sdavidch			 */
4435255736Sdavidch			ecore_init_credit_pool(p, -1, cam_sz);
4436255736Sdavidch		} else {
4437255736Sdavidch			/* this should never happen! Block MAC operations. */
4438255736Sdavidch			ecore_init_credit_pool(p, 0, 0);
4439255736Sdavidch		}
4440255736Sdavidch	}
4441255736Sdavidch}
4442255736Sdavidch
4443255736Sdavidchvoid ecore_init_vlan_credit_pool(struct bxe_softc *sc,
4444255736Sdavidch				 struct ecore_credit_pool_obj *p,
4445255736Sdavidch				 uint8_t func_id,
4446255736Sdavidch				 uint8_t func_num)
4447255736Sdavidch{
4448255736Sdavidch	if (CHIP_IS_E1x(sc)) {
4449255736Sdavidch		/* There is no VLAN credit in HW on 57710 and 57711 only
4450255736Sdavidch		 * MAC / MAC-VLAN can be set
4451255736Sdavidch		 */
4452255736Sdavidch		ecore_init_credit_pool(p, 0, -1);
4453255736Sdavidch	} else {
4454255736Sdavidch		/* CAM credit is equally divided between all active functions
4455255736Sdavidch		 * on the PATH.
4456255736Sdavidch		 */
4457255736Sdavidch		if (func_num > 0) {
4458296071Sdavidcs			int credit = PF_VLAN_CREDIT_E2(sc, func_num);
4459296071Sdavidcs
4460296071Sdavidcs			ecore_init_credit_pool(p, -1/*unused for E2*/, credit);
4461255736Sdavidch		} else
4462255736Sdavidch			/* this should never happen! Block VLAN operations. */
4463255736Sdavidch			ecore_init_credit_pool(p, 0, 0);
4464255736Sdavidch	}
4465255736Sdavidch}
4466255736Sdavidch
4467255736Sdavidch/****************** RSS Configuration ******************/
4468255736Sdavidch
4469255736Sdavidch/**
4470255736Sdavidch * ecore_setup_rss - configure RSS
4471255736Sdavidch *
4472255736Sdavidch * @sc:		device handle
4473255736Sdavidch * @p:		rss configuration
4474255736Sdavidch *
4475255736Sdavidch * sends on UPDATE ramrod for that matter.
4476255736Sdavidch */
4477255736Sdavidchstatic int ecore_setup_rss(struct bxe_softc *sc,
4478255736Sdavidch			   struct ecore_config_rss_params *p)
4479255736Sdavidch{
4480255736Sdavidch	struct ecore_rss_config_obj *o = p->rss_obj;
4481255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
4482255736Sdavidch	struct eth_rss_update_ramrod_data *data =
4483255736Sdavidch		(struct eth_rss_update_ramrod_data *)(r->rdata);
4484296071Sdavidcs	uint16_t caps = 0;
4485255736Sdavidch	uint8_t rss_mode = 0;
4486255736Sdavidch	int rc;
4487255736Sdavidch
4488255736Sdavidch	ECORE_MEMSET(data, 0, sizeof(*data));
4489255736Sdavidch
4490255736Sdavidch	ECORE_MSG(sc, "Configuring RSS\n");
4491255736Sdavidch
4492255736Sdavidch	/* Set an echo field */
4493255736Sdavidch	data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
4494255736Sdavidch				 (r->state << ECORE_SWCID_SHIFT));
4495255736Sdavidch
4496255736Sdavidch	/* RSS mode */
4497255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
4498255736Sdavidch		rss_mode = ETH_RSS_MODE_DISABLED;
4499255736Sdavidch	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
4500255736Sdavidch		rss_mode = ETH_RSS_MODE_REGULAR;
4501255736Sdavidch
4502255736Sdavidch	data->rss_mode = rss_mode;
4503255736Sdavidch
4504255736Sdavidch	ECORE_MSG(sc, "rss_mode=%d\n", rss_mode);
4505255736Sdavidch
4506255736Sdavidch	/* RSS capabilities */
4507255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
4508296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4509255736Sdavidch
4510255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
4511296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4512255736Sdavidch
4513255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
4514296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4515255736Sdavidch
4516255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
4517296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4518255736Sdavidch
4519255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
4520296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4521255736Sdavidch
4522255736Sdavidch	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
4523296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4524255736Sdavidch
4525296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_VXLAN, &p->rss_flags))
4526296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
4527296071Sdavidcs
4528296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_VXLAN, &p->rss_flags))
4529296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
4530296071Sdavidcs
4531296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_RSS_TUNN_INNER_HDRS, &p->rss_flags))
4532296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
4533296071Sdavidcs
4534296071Sdavidcs	/* RSS keys */
4535296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
4536296071Sdavidcs		ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
4537296071Sdavidcs		       sizeof(data->rss_key));
4538296071Sdavidcs		caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4539255736Sdavidch	}
4540255736Sdavidch
4541296071Sdavidcs	data->capabilities = ECORE_CPU_TO_LE16(caps);
4542296071Sdavidcs
4543255736Sdavidch	/* Hashing mask */
4544255736Sdavidch	data->rss_result_mask = p->rss_result_mask;
4545255736Sdavidch
4546255736Sdavidch	/* RSS engine ID */
4547255736Sdavidch	data->rss_engine_id = o->engine_id;
4548255736Sdavidch
4549255736Sdavidch	ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id);
4550255736Sdavidch
4551255736Sdavidch	/* Indirection table */
4552255736Sdavidch	ECORE_MEMCPY(data->indirection_table, p->ind_table,
4553255736Sdavidch		  T_ETH_INDIRECTION_TABLE_SIZE);
4554255736Sdavidch
4555255736Sdavidch	/* Remember the last configuration */
4556255736Sdavidch	ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4557255736Sdavidch
4558255736Sdavidch
4559296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
4560296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
4561255736Sdavidch	 * and updating of the SPQ producer which involves a memory
4562296071Sdavidcs	 * read. If the memory read is removed we will have to put a
4563296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
4564255736Sdavidch	 */
4565255736Sdavidch
4566255736Sdavidch	/* Send a ramrod */
4567255736Sdavidch	rc = ecore_sp_post(sc,
4568255736Sdavidch			     RAMROD_CMD_ID_ETH_RSS_UPDATE,
4569255736Sdavidch			     r->cid,
4570255736Sdavidch			     r->rdata_mapping,
4571255736Sdavidch			     ETH_CONNECTION_TYPE);
4572255736Sdavidch
4573255736Sdavidch	if (rc < 0)
4574255736Sdavidch		return rc;
4575255736Sdavidch
4576255736Sdavidch	return ECORE_PENDING;
4577255736Sdavidch}
4578255736Sdavidch
4579255736Sdavidchvoid ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
4580255736Sdavidch			     uint8_t *ind_table)
4581255736Sdavidch{
4582255736Sdavidch	ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4583255736Sdavidch}
4584255736Sdavidch
4585255736Sdavidchint ecore_config_rss(struct bxe_softc *sc,
4586255736Sdavidch		     struct ecore_config_rss_params *p)
4587255736Sdavidch{
4588255736Sdavidch	int rc;
4589255736Sdavidch	struct ecore_rss_config_obj *o = p->rss_obj;
4590255736Sdavidch	struct ecore_raw_obj *r = &o->raw;
4591255736Sdavidch
4592255736Sdavidch	/* Do nothing if only driver cleanup was requested */
4593260113Sedavis	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4594260113Sedavis		ECORE_MSG(sc, "Not configuring RSS ramrod_flags=%lx\n",
4595260113Sedavis			  p->ramrod_flags);
4596255736Sdavidch		return ECORE_SUCCESS;
4597260113Sedavis	}
4598255736Sdavidch
4599255736Sdavidch	r->set_pending(r);
4600255736Sdavidch
4601255736Sdavidch	rc = o->config_rss(sc, p);
4602255736Sdavidch	if (rc < 0) {
4603255736Sdavidch		r->clear_pending(r);
4604255736Sdavidch		return rc;
4605255736Sdavidch	}
4606255736Sdavidch
4607255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4608255736Sdavidch		rc = r->wait_comp(sc, r);
4609255736Sdavidch
4610255736Sdavidch	return rc;
4611255736Sdavidch}
4612255736Sdavidch
4613255736Sdavidchvoid ecore_init_rss_config_obj(struct bxe_softc *sc,
4614255736Sdavidch			       struct ecore_rss_config_obj *rss_obj,
4615255736Sdavidch			       uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
4616255736Sdavidch			       void *rdata, ecore_dma_addr_t rdata_mapping,
4617255736Sdavidch			       int state, unsigned long *pstate,
4618255736Sdavidch			       ecore_obj_type type)
4619255736Sdavidch{
4620255736Sdavidch	ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4621255736Sdavidch			   rdata_mapping, state, pstate, type);
4622255736Sdavidch
4623255736Sdavidch	rss_obj->engine_id  = engine_id;
4624255736Sdavidch	rss_obj->config_rss = ecore_setup_rss;
4625255736Sdavidch}
4626255736Sdavidch
4627258187Sedavis
4628255736Sdavidch/********************** Queue state object ***********************************/
4629255736Sdavidch
4630255736Sdavidch/**
4631255736Sdavidch * ecore_queue_state_change - perform Queue state change transition
4632255736Sdavidch *
4633255736Sdavidch * @sc:		device handle
4634255736Sdavidch * @params:	parameters to perform the transition
4635255736Sdavidch *
4636255736Sdavidch * returns 0 in case of successfully completed transition, negative error
4637255736Sdavidch * code in case of failure, positive (EBUSY) value if there is a completion
4638255736Sdavidch * to that is still pending (possible only if RAMROD_COMP_WAIT is
4639255736Sdavidch * not set in params->ramrod_flags for asynchronous commands).
4640255736Sdavidch *
4641255736Sdavidch */
4642255736Sdavidchint ecore_queue_state_change(struct bxe_softc *sc,
4643255736Sdavidch			     struct ecore_queue_state_params *params)
4644255736Sdavidch{
4645255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
4646255736Sdavidch	int rc, pending_bit;
4647255736Sdavidch	unsigned long *pending = &o->pending;
4648255736Sdavidch
4649255736Sdavidch	/* Check that the requested transition is legal */
4650255736Sdavidch	rc = o->check_transition(sc, o, params);
4651255736Sdavidch	if (rc) {
4652255736Sdavidch		ECORE_ERR("check transition returned an error. rc %d\n", rc);
4653255736Sdavidch		return ECORE_INVAL;
4654255736Sdavidch	}
4655255736Sdavidch
4656255736Sdavidch	/* Set "pending" bit */
4657255736Sdavidch	ECORE_MSG(sc, "pending bit was=%lx\n", o->pending);
4658255736Sdavidch	pending_bit = o->set_pending(o, params);
4659255736Sdavidch	ECORE_MSG(sc, "pending bit now=%lx\n", o->pending);
4660255736Sdavidch
4661255736Sdavidch	/* Don't send a command if only driver cleanup was requested */
4662255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4663255736Sdavidch		o->complete_cmd(sc, o, pending_bit);
4664255736Sdavidch	else {
4665255736Sdavidch		/* Send a ramrod */
4666255736Sdavidch		rc = o->send_cmd(sc, params);
4667255736Sdavidch		if (rc) {
4668255736Sdavidch			o->next_state = ECORE_Q_STATE_MAX;
4669255736Sdavidch			ECORE_CLEAR_BIT(pending_bit, pending);
4670255736Sdavidch			ECORE_SMP_MB_AFTER_CLEAR_BIT();
4671255736Sdavidch			return rc;
4672255736Sdavidch		}
4673255736Sdavidch
4674255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4675255736Sdavidch			rc = o->wait_comp(sc, o, pending_bit);
4676255736Sdavidch			if (rc)
4677255736Sdavidch				return rc;
4678255736Sdavidch
4679255736Sdavidch			return ECORE_SUCCESS;
4680255736Sdavidch		}
4681255736Sdavidch	}
4682255736Sdavidch
4683255736Sdavidch	return ECORE_RET_PENDING(pending_bit, pending);
4684255736Sdavidch}
4685255736Sdavidch
4686255736Sdavidchstatic int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
4687255736Sdavidch				   struct ecore_queue_state_params *params)
4688255736Sdavidch{
4689255736Sdavidch	enum ecore_queue_cmd cmd = params->cmd, bit;
4690255736Sdavidch
4691255736Sdavidch	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4692255736Sdavidch	 * UPDATE command.
4693255736Sdavidch	 */
4694255736Sdavidch	if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
4695255736Sdavidch	    (cmd == ECORE_Q_CMD_DEACTIVATE))
4696255736Sdavidch		bit = ECORE_Q_CMD_UPDATE;
4697255736Sdavidch	else
4698255736Sdavidch		bit = cmd;
4699255736Sdavidch
4700255736Sdavidch	ECORE_SET_BIT(bit, &obj->pending);
4701255736Sdavidch	return bit;
4702255736Sdavidch}
4703255736Sdavidch
4704255736Sdavidchstatic int ecore_queue_wait_comp(struct bxe_softc *sc,
4705255736Sdavidch				 struct ecore_queue_sp_obj *o,
4706255736Sdavidch				 enum ecore_queue_cmd cmd)
4707255736Sdavidch{
4708255736Sdavidch	return ecore_state_wait(sc, cmd, &o->pending);
4709255736Sdavidch}
4710255736Sdavidch
4711255736Sdavidch/**
4712255736Sdavidch * ecore_queue_comp_cmd - complete the state change command.
4713255736Sdavidch *
4714255736Sdavidch * @sc:		device handle
4715255736Sdavidch * @o:
4716255736Sdavidch * @cmd:
4717255736Sdavidch *
4718255736Sdavidch * Checks that the arrived completion is expected.
4719255736Sdavidch */
4720255736Sdavidchstatic int ecore_queue_comp_cmd(struct bxe_softc *sc,
4721255736Sdavidch				struct ecore_queue_sp_obj *o,
4722255736Sdavidch				enum ecore_queue_cmd cmd)
4723255736Sdavidch{
4724255736Sdavidch	unsigned long cur_pending = o->pending;
4725255736Sdavidch
4726255736Sdavidch	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4727255736Sdavidch		ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4728255736Sdavidch			  cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
4729255736Sdavidch			  o->state, cur_pending, o->next_state);
4730255736Sdavidch		return ECORE_INVAL;
4731255736Sdavidch	}
4732255736Sdavidch
4733255736Sdavidch	if (o->next_tx_only >= o->max_cos)
4734255736Sdavidch		/* >= because tx only must always be smaller than cos since the
4735255736Sdavidch		 * primary connection supports COS 0
4736255736Sdavidch		 */
4737255736Sdavidch		ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
4738255736Sdavidch			  o->next_tx_only, o->max_cos);
4739255736Sdavidch
4740255736Sdavidch	ECORE_MSG(sc,
4741255736Sdavidch		  "Completing command %d for queue %d, setting state to %d\n",
4742255736Sdavidch		  cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
4743255736Sdavidch
4744255736Sdavidch	if (o->next_tx_only)  /* print num tx-only if any exist */
4745255736Sdavidch		ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n",
4746255736Sdavidch			  o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
4747255736Sdavidch
4748255736Sdavidch	o->state = o->next_state;
4749255736Sdavidch	o->num_tx_only = o->next_tx_only;
4750255736Sdavidch	o->next_state = ECORE_Q_STATE_MAX;
4751255736Sdavidch
4752255736Sdavidch	/* It's important that o->state and o->next_state are
4753255736Sdavidch	 * updated before o->pending.
4754255736Sdavidch	 */
4755255736Sdavidch	wmb();
4756255736Sdavidch
4757255736Sdavidch	ECORE_CLEAR_BIT(cmd, &o->pending);
4758255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
4759255736Sdavidch
4760255736Sdavidch	return ECORE_SUCCESS;
4761255736Sdavidch}
4762255736Sdavidch
4763255736Sdavidchstatic void ecore_q_fill_setup_data_e2(struct bxe_softc *sc,
4764255736Sdavidch				struct ecore_queue_state_params *cmd_params,
4765255736Sdavidch				struct client_init_ramrod_data *data)
4766255736Sdavidch{
4767255736Sdavidch	struct ecore_queue_setup_params *params = &cmd_params->params.setup;
4768255736Sdavidch
4769255736Sdavidch	/* Rx data */
4770255736Sdavidch
4771255736Sdavidch	/* IPv6 TPA supported for E2 and above only */
4772255736Sdavidch	data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
4773255736Sdavidch					  &params->flags) *
4774255736Sdavidch				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4775255736Sdavidch}
4776255736Sdavidch
4777255736Sdavidchstatic void ecore_q_fill_init_general_data(struct bxe_softc *sc,
4778255736Sdavidch				struct ecore_queue_sp_obj *o,
4779255736Sdavidch				struct ecore_general_setup_params *params,
4780255736Sdavidch				struct client_init_general_data *gen_data,
4781255736Sdavidch				unsigned long *flags)
4782255736Sdavidch{
4783255736Sdavidch	gen_data->client_id = o->cl_id;
4784255736Sdavidch
4785255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
4786255736Sdavidch		gen_data->statistics_counter_id =
4787255736Sdavidch					params->stat_id;
4788255736Sdavidch		gen_data->statistics_en_flg = 1;
4789255736Sdavidch		gen_data->statistics_zero_flg =
4790255736Sdavidch			ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
4791255736Sdavidch	} else
4792255736Sdavidch		gen_data->statistics_counter_id =
4793255736Sdavidch					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4794255736Sdavidch
4795255736Sdavidch	gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
4796255736Sdavidch						   flags);
4797255736Sdavidch	gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4798255736Sdavidch						    flags);
4799255736Sdavidch	gen_data->sp_client_id = params->spcl_id;
4800255736Sdavidch	gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
4801255736Sdavidch	gen_data->func_id = o->func_id;
4802255736Sdavidch
4803255736Sdavidch	gen_data->cos = params->cos;
4804255736Sdavidch
4805255736Sdavidch	gen_data->traffic_type =
4806255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
4807255736Sdavidch		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4808255736Sdavidch
4809296071Sdavidcs	gen_data->fp_hsi_ver = params->fp_hsi;
4810296071Sdavidcs
4811255736Sdavidch	ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
4812255736Sdavidch		  gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4813255736Sdavidch}
4814255736Sdavidch
4815255736Sdavidchstatic void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
4816255736Sdavidch				struct ecore_txq_setup_params *params,
4817255736Sdavidch				struct client_init_tx_data *tx_data,
4818255736Sdavidch				unsigned long *flags)
4819255736Sdavidch{
4820255736Sdavidch	tx_data->enforce_security_flg =
4821255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
4822255736Sdavidch	tx_data->default_vlan =
4823255736Sdavidch		ECORE_CPU_TO_LE16(params->default_vlan);
4824255736Sdavidch	tx_data->default_vlan_flg =
4825255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
4826255736Sdavidch	tx_data->tx_switching_flg =
4827255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
4828255736Sdavidch	tx_data->anti_spoofing_flg =
4829255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
4830255736Sdavidch	tx_data->force_default_pri_flg =
4831255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
4832255736Sdavidch	tx_data->refuse_outband_vlan_flg =
4833255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4834255736Sdavidch	tx_data->tunnel_lso_inc_ip_id =
4835255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4836255736Sdavidch	tx_data->tunnel_non_lso_pcsum_location =
4837255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4838255736Sdavidch							    CSUM_ON_BD;
4839255736Sdavidch
4840255736Sdavidch	tx_data->tx_status_block_id = params->fw_sb_id;
4841255736Sdavidch	tx_data->tx_sb_index_number = params->sb_cq_index;
4842255736Sdavidch	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4843255736Sdavidch
4844255736Sdavidch	tx_data->tx_bd_page_base.lo =
4845255736Sdavidch		ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4846255736Sdavidch	tx_data->tx_bd_page_base.hi =
4847255736Sdavidch		ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4848255736Sdavidch
4849255736Sdavidch	/* Don't configure any Tx switching mode during queue SETUP */
4850255736Sdavidch	tx_data->state = 0;
4851255736Sdavidch}
4852255736Sdavidch
4853255736Sdavidchstatic void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
4854255736Sdavidch				struct rxq_pause_params *params,
4855255736Sdavidch				struct client_init_rx_data *rx_data)
4856255736Sdavidch{
4857255736Sdavidch	/* flow control data */
4858255736Sdavidch	rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
4859255736Sdavidch	rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
4860255736Sdavidch	rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
4861255736Sdavidch	rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
4862255736Sdavidch	rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
4863255736Sdavidch	rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
4864255736Sdavidch	rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
4865255736Sdavidch}
4866255736Sdavidch
4867255736Sdavidchstatic void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
4868255736Sdavidch				struct ecore_rxq_setup_params *params,
4869255736Sdavidch				struct client_init_rx_data *rx_data,
4870255736Sdavidch				unsigned long *flags)
4871255736Sdavidch{
4872255736Sdavidch	rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
4873255736Sdavidch				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4874255736Sdavidch	rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
4875255736Sdavidch				CLIENT_INIT_RX_DATA_TPA_MODE;
4876255736Sdavidch	rx_data->vmqueue_mode_en_flg = 0;
4877255736Sdavidch
4878255736Sdavidch	rx_data->extra_data_over_sgl_en_flg =
4879255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
4880255736Sdavidch	rx_data->cache_line_alignment_log_size =
4881255736Sdavidch		params->cache_line_log;
4882255736Sdavidch	rx_data->enable_dynamic_hc =
4883255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
4884255736Sdavidch	rx_data->max_sges_for_packet = params->max_sges_pkt;
4885255736Sdavidch	rx_data->client_qzone_id = params->cl_qzone_id;
4886255736Sdavidch	rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
4887255736Sdavidch
4888255736Sdavidch	/* Always start in DROP_ALL mode */
4889255736Sdavidch	rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4890255736Sdavidch				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4891255736Sdavidch
4892255736Sdavidch	/* We don't set drop flags */
4893255736Sdavidch	rx_data->drop_ip_cs_err_flg = 0;
4894255736Sdavidch	rx_data->drop_tcp_cs_err_flg = 0;
4895255736Sdavidch	rx_data->drop_ttl0_flg = 0;
4896255736Sdavidch	rx_data->drop_udp_cs_err_flg = 0;
4897255736Sdavidch	rx_data->inner_vlan_removal_enable_flg =
4898255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
4899255736Sdavidch	rx_data->outer_vlan_removal_enable_flg =
4900255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
4901255736Sdavidch	rx_data->status_block_id = params->fw_sb_id;
4902255736Sdavidch	rx_data->rx_sb_index_number = params->sb_cq_index;
4903255736Sdavidch	rx_data->max_tpa_queues = params->max_tpa_queues;
4904255736Sdavidch	rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
4905255736Sdavidch	rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz);
4906255736Sdavidch	rx_data->bd_page_base.lo =
4907255736Sdavidch		ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4908255736Sdavidch	rx_data->bd_page_base.hi =
4909255736Sdavidch		ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4910255736Sdavidch	rx_data->sge_page_base.lo =
4911255736Sdavidch		ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
4912255736Sdavidch	rx_data->sge_page_base.hi =
4913255736Sdavidch		ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
4914255736Sdavidch	rx_data->cqe_page_base.lo =
4915255736Sdavidch		ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
4916255736Sdavidch	rx_data->cqe_page_base.hi =
4917255736Sdavidch		ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
4918255736Sdavidch	rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
4919255736Sdavidch						 flags);
4920255736Sdavidch
4921255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
4922255736Sdavidch		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4923255736Sdavidch		rx_data->is_approx_mcast = 1;
4924255736Sdavidch	}
4925255736Sdavidch
4926255736Sdavidch	rx_data->rss_engine_id = params->rss_engine_id;
4927255736Sdavidch
4928255736Sdavidch	/* silent vlan removal */
4929255736Sdavidch	rx_data->silent_vlan_removal_flg =
4930255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
4931255736Sdavidch	rx_data->silent_vlan_value =
4932255736Sdavidch		ECORE_CPU_TO_LE16(params->silent_removal_value);
4933255736Sdavidch	rx_data->silent_vlan_mask =
4934255736Sdavidch		ECORE_CPU_TO_LE16(params->silent_removal_mask);
4935255736Sdavidch}
4936255736Sdavidch
4937255736Sdavidch/* initialize the general, tx and rx parts of a queue object */
4938255736Sdavidchstatic void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc,
4939255736Sdavidch				struct ecore_queue_state_params *cmd_params,
4940255736Sdavidch				struct client_init_ramrod_data *data)
4941255736Sdavidch{
4942255736Sdavidch	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4943255736Sdavidch				       &cmd_params->params.setup.gen_params,
4944255736Sdavidch				       &data->general,
4945255736Sdavidch				       &cmd_params->params.setup.flags);
4946255736Sdavidch
4947255736Sdavidch	ecore_q_fill_init_tx_data(cmd_params->q_obj,
4948255736Sdavidch				  &cmd_params->params.setup.txq_params,
4949255736Sdavidch				  &data->tx,
4950255736Sdavidch				  &cmd_params->params.setup.flags);
4951255736Sdavidch
4952255736Sdavidch	ecore_q_fill_init_rx_data(cmd_params->q_obj,
4953255736Sdavidch				  &cmd_params->params.setup.rxq_params,
4954255736Sdavidch				  &data->rx,
4955255736Sdavidch				  &cmd_params->params.setup.flags);
4956255736Sdavidch
4957255736Sdavidch	ecore_q_fill_init_pause_data(cmd_params->q_obj,
4958255736Sdavidch				     &cmd_params->params.setup.pause_params,
4959255736Sdavidch				     &data->rx);
4960255736Sdavidch}
4961255736Sdavidch
4962255736Sdavidch/* initialize the general and tx parts of a tx-only queue object */
4963255736Sdavidchstatic void ecore_q_fill_setup_tx_only(struct bxe_softc *sc,
4964255736Sdavidch				struct ecore_queue_state_params *cmd_params,
4965255736Sdavidch				struct tx_queue_init_ramrod_data *data)
4966255736Sdavidch{
4967255736Sdavidch	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4968255736Sdavidch				       &cmd_params->params.tx_only.gen_params,
4969255736Sdavidch				       &data->general,
4970255736Sdavidch				       &cmd_params->params.tx_only.flags);
4971255736Sdavidch
4972255736Sdavidch	ecore_q_fill_init_tx_data(cmd_params->q_obj,
4973255736Sdavidch				  &cmd_params->params.tx_only.txq_params,
4974255736Sdavidch				  &data->tx,
4975255736Sdavidch				  &cmd_params->params.tx_only.flags);
4976255736Sdavidch
4977255736Sdavidch	ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
4978255736Sdavidch		  cmd_params->q_obj->cids[0],
4979255736Sdavidch		  data->tx.tx_bd_page_base.lo,
4980255736Sdavidch		  data->tx.tx_bd_page_base.hi);
4981255736Sdavidch}
4982255736Sdavidch
4983255736Sdavidch/**
4984255736Sdavidch * ecore_q_init - init HW/FW queue
4985255736Sdavidch *
4986255736Sdavidch * @sc:		device handle
4987255736Sdavidch * @params:
4988255736Sdavidch *
4989255736Sdavidch * HW/FW initial Queue configuration:
4990255736Sdavidch *      - HC: Rx and Tx
4991255736Sdavidch *      - CDU context validation
4992255736Sdavidch *
4993255736Sdavidch */
4994255736Sdavidchstatic inline int ecore_q_init(struct bxe_softc *sc,
4995255736Sdavidch			       struct ecore_queue_state_params *params)
4996255736Sdavidch{
4997255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
4998255736Sdavidch	struct ecore_queue_init_params *init = &params->params.init;
4999255736Sdavidch	uint16_t hc_usec;
5000255736Sdavidch	uint8_t cos;
5001255736Sdavidch
5002255736Sdavidch	/* Tx HC configuration */
5003255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
5004255736Sdavidch	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
5005255736Sdavidch		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
5006255736Sdavidch
5007255736Sdavidch		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
5008255736Sdavidch			init->tx.sb_cq_index,
5009255736Sdavidch			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
5010255736Sdavidch			hc_usec);
5011255736Sdavidch	}
5012255736Sdavidch
5013255736Sdavidch	/* Rx HC configuration */
5014255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
5015255736Sdavidch	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
5016255736Sdavidch		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
5017255736Sdavidch
5018255736Sdavidch		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
5019255736Sdavidch			init->rx.sb_cq_index,
5020255736Sdavidch			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
5021255736Sdavidch			hc_usec);
5022255736Sdavidch	}
5023255736Sdavidch
5024255736Sdavidch	/* Set CDU context validation values */
5025255736Sdavidch	for (cos = 0; cos < o->max_cos; cos++) {
5026255736Sdavidch		ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n",
5027255736Sdavidch			  o->cids[cos], cos);
5028255736Sdavidch		ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]);
5029255736Sdavidch		ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
5030255736Sdavidch	}
5031255736Sdavidch
5032255736Sdavidch	/* As no ramrod is sent, complete the command immediately  */
5033255736Sdavidch	o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
5034255736Sdavidch
5035255736Sdavidch	ECORE_MMIOWB();
5036255736Sdavidch	ECORE_SMP_MB();
5037255736Sdavidch
5038255736Sdavidch	return ECORE_SUCCESS;
5039255736Sdavidch}
5040255736Sdavidch
5041255736Sdavidchstatic inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
5042255736Sdavidch					struct ecore_queue_state_params *params)
5043255736Sdavidch{
5044255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5045255736Sdavidch	struct client_init_ramrod_data *rdata =
5046255736Sdavidch		(struct client_init_ramrod_data *)o->rdata;
5047255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5048255736Sdavidch	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5049255736Sdavidch
5050255736Sdavidch	/* Clear the ramrod data */
5051255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5052255736Sdavidch
5053255736Sdavidch	/* Fill the ramrod data */
5054255736Sdavidch	ecore_q_fill_setup_data_cmn(sc, params, rdata);
5055255736Sdavidch
5056296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
5057296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
5058255736Sdavidch	 * and updating of the SPQ producer which involves a memory
5059296071Sdavidcs	 * read. If the memory read is removed we will have to put a
5060296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
5061255736Sdavidch	 */
5062255736Sdavidch	return ecore_sp_post(sc,
5063255736Sdavidch			     ramrod,
5064255736Sdavidch			     o->cids[ECORE_PRIMARY_CID_INDEX],
5065255736Sdavidch			     data_mapping,
5066255736Sdavidch			     ETH_CONNECTION_TYPE);
5067255736Sdavidch}
5068255736Sdavidch
5069255736Sdavidchstatic inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
5070255736Sdavidch					struct ecore_queue_state_params *params)
5071255736Sdavidch{
5072255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5073255736Sdavidch	struct client_init_ramrod_data *rdata =
5074255736Sdavidch		(struct client_init_ramrod_data *)o->rdata;
5075255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5076255736Sdavidch	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5077255736Sdavidch
5078255736Sdavidch	/* Clear the ramrod data */
5079255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5080255736Sdavidch
5081255736Sdavidch	/* Fill the ramrod data */
5082255736Sdavidch	ecore_q_fill_setup_data_cmn(sc, params, rdata);
5083255736Sdavidch	ecore_q_fill_setup_data_e2(sc, params, rdata);
5084255736Sdavidch
5085296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
5086296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
5087255736Sdavidch	 * and updating of the SPQ producer which involves a memory
5088296071Sdavidcs	 * read. If the memory read is removed we will have to put a
5089296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
5090255736Sdavidch	 */
5091255736Sdavidch	return ecore_sp_post(sc,
5092255736Sdavidch			     ramrod,
5093255736Sdavidch			     o->cids[ECORE_PRIMARY_CID_INDEX],
5094255736Sdavidch			     data_mapping,
5095255736Sdavidch			     ETH_CONNECTION_TYPE);
5096255736Sdavidch}
5097255736Sdavidch
5098255736Sdavidchstatic inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
5099255736Sdavidch				  struct ecore_queue_state_params *params)
5100255736Sdavidch{
5101255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5102255736Sdavidch	struct tx_queue_init_ramrod_data *rdata =
5103255736Sdavidch		(struct tx_queue_init_ramrod_data *)o->rdata;
5104255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5105255736Sdavidch	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
5106255736Sdavidch	struct ecore_queue_setup_tx_only_params *tx_only_params =
5107255736Sdavidch		&params->params.tx_only;
5108255736Sdavidch	uint8_t cid_index = tx_only_params->cid_index;
5109255736Sdavidch
5110255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
5111255736Sdavidch		ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
5112255736Sdavidch	ECORE_MSG(sc, "sending forward tx-only ramrod");
5113255736Sdavidch
5114255736Sdavidch	if (cid_index >= o->max_cos) {
5115255736Sdavidch		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5116255736Sdavidch			  o->cl_id, cid_index);
5117255736Sdavidch		return ECORE_INVAL;
5118255736Sdavidch	}
5119255736Sdavidch
5120255736Sdavidch	ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n",
5121255736Sdavidch		  tx_only_params->gen_params.cos,
5122255736Sdavidch		  tx_only_params->gen_params.spcl_id);
5123255736Sdavidch
5124255736Sdavidch	/* Clear the ramrod data */
5125255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5126255736Sdavidch
5127255736Sdavidch	/* Fill the ramrod data */
5128255736Sdavidch	ecore_q_fill_setup_tx_only(sc, params, rdata);
5129255736Sdavidch
5130255736Sdavidch	ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
5131255736Sdavidch		  o->cids[cid_index], rdata->general.client_id,
5132255736Sdavidch		  rdata->general.sp_client_id, rdata->general.cos);
5133255736Sdavidch
5134296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
5135296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
5136255736Sdavidch	 * and updating of the SPQ producer which involves a memory
5137296071Sdavidcs	 * read. If the memory read is removed we will have to put a
5138296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
5139255736Sdavidch	 */
5140255736Sdavidch	return ecore_sp_post(sc, ramrod, o->cids[cid_index],
5141255736Sdavidch			     data_mapping, ETH_CONNECTION_TYPE);
5142255736Sdavidch}
5143255736Sdavidch
5144255736Sdavidchstatic void ecore_q_fill_update_data(struct bxe_softc *sc,
5145255736Sdavidch				     struct ecore_queue_sp_obj *obj,
5146255736Sdavidch				     struct ecore_queue_update_params *params,
5147255736Sdavidch				     struct client_update_ramrod_data *data)
5148255736Sdavidch{
5149255736Sdavidch	/* Client ID of the client to update */
5150255736Sdavidch	data->client_id = obj->cl_id;
5151255736Sdavidch
5152255736Sdavidch	/* Function ID of the client to update */
5153255736Sdavidch	data->func_id = obj->func_id;
5154255736Sdavidch
5155255736Sdavidch	/* Default VLAN value */
5156255736Sdavidch	data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
5157255736Sdavidch
5158255736Sdavidch	/* Inner VLAN stripping */
5159255736Sdavidch	data->inner_vlan_removal_enable_flg =
5160255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
5161255736Sdavidch			       &params->update_flags);
5162255736Sdavidch	data->inner_vlan_removal_change_flg =
5163255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
5164255736Sdavidch		       &params->update_flags);
5165255736Sdavidch
5166255736Sdavidch	/* Outer VLAN stripping */
5167255736Sdavidch	data->outer_vlan_removal_enable_flg =
5168255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
5169255736Sdavidch			       &params->update_flags);
5170255736Sdavidch	data->outer_vlan_removal_change_flg =
5171255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
5172255736Sdavidch		       &params->update_flags);
5173255736Sdavidch
5174255736Sdavidch	/* Drop packets that have source MAC that doesn't belong to this
5175255736Sdavidch	 * Queue.
5176255736Sdavidch	 */
5177255736Sdavidch	data->anti_spoofing_enable_flg =
5178255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
5179255736Sdavidch			       &params->update_flags);
5180255736Sdavidch	data->anti_spoofing_change_flg =
5181255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
5182255736Sdavidch		       &params->update_flags);
5183255736Sdavidch
5184255736Sdavidch	/* Activate/Deactivate */
5185255736Sdavidch	data->activate_flg =
5186255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
5187255736Sdavidch	data->activate_change_flg =
5188255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5189255736Sdavidch			       &params->update_flags);
5190255736Sdavidch
5191255736Sdavidch	/* Enable default VLAN */
5192255736Sdavidch	data->default_vlan_enable_flg =
5193255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
5194255736Sdavidch			       &params->update_flags);
5195255736Sdavidch	data->default_vlan_change_flg =
5196255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
5197255736Sdavidch		       &params->update_flags);
5198255736Sdavidch
5199255736Sdavidch	/* silent vlan removal */
5200255736Sdavidch	data->silent_vlan_change_flg =
5201255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5202255736Sdavidch			       &params->update_flags);
5203255736Sdavidch	data->silent_vlan_removal_flg =
5204255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
5205255736Sdavidch			       &params->update_flags);
5206255736Sdavidch	data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value);
5207255736Sdavidch	data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
5208255736Sdavidch
5209255736Sdavidch	/* tx switching */
5210255736Sdavidch	data->tx_switching_flg =
5211255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
5212255736Sdavidch			       &params->update_flags);
5213255736Sdavidch	data->tx_switching_change_flg =
5214255736Sdavidch		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
5215255736Sdavidch			       &params->update_flags);
5216296071Sdavidcs
5217296071Sdavidcs	/* PTP */
5218296071Sdavidcs	data->handle_ptp_pkts_flg =
5219296071Sdavidcs		ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS,
5220296071Sdavidcs			       &params->update_flags);
5221296071Sdavidcs	data->handle_ptp_pkts_change_flg =
5222296071Sdavidcs		ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS_CHNG,
5223296071Sdavidcs			       &params->update_flags);
5224255736Sdavidch}
5225255736Sdavidch
5226255736Sdavidchstatic inline int ecore_q_send_update(struct bxe_softc *sc,
5227255736Sdavidch				      struct ecore_queue_state_params *params)
5228255736Sdavidch{
5229255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5230255736Sdavidch	struct client_update_ramrod_data *rdata =
5231255736Sdavidch		(struct client_update_ramrod_data *)o->rdata;
5232255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5233255736Sdavidch	struct ecore_queue_update_params *update_params =
5234255736Sdavidch		&params->params.update;
5235255736Sdavidch	uint8_t cid_index = update_params->cid_index;
5236255736Sdavidch
5237255736Sdavidch	if (cid_index >= o->max_cos) {
5238255736Sdavidch		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5239255736Sdavidch			  o->cl_id, cid_index);
5240255736Sdavidch		return ECORE_INVAL;
5241255736Sdavidch	}
5242255736Sdavidch
5243255736Sdavidch	/* Clear the ramrod data */
5244255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5245255736Sdavidch
5246255736Sdavidch	/* Fill the ramrod data */
5247255736Sdavidch	ecore_q_fill_update_data(sc, o, update_params, rdata);
5248255736Sdavidch
5249296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
5250296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
5251255736Sdavidch	 * and updating of the SPQ producer which involves a memory
5252296071Sdavidcs	 * read. If the memory read is removed we will have to put a
5253296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
5254255736Sdavidch	 */
5255255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5256255736Sdavidch			     o->cids[cid_index], data_mapping,
5257255736Sdavidch			     ETH_CONNECTION_TYPE);
5258255736Sdavidch}
5259255736Sdavidch
5260255736Sdavidch/**
5261255736Sdavidch * ecore_q_send_deactivate - send DEACTIVATE command
5262255736Sdavidch *
5263255736Sdavidch * @sc:		device handle
5264255736Sdavidch * @params:
5265255736Sdavidch *
5266255736Sdavidch * implemented using the UPDATE command.
5267255736Sdavidch */
5268255736Sdavidchstatic inline int ecore_q_send_deactivate(struct bxe_softc *sc,
5269255736Sdavidch					struct ecore_queue_state_params *params)
5270255736Sdavidch{
5271255736Sdavidch	struct ecore_queue_update_params *update = &params->params.update;
5272255736Sdavidch
5273255736Sdavidch	ECORE_MEMSET(update, 0, sizeof(*update));
5274255736Sdavidch
5275255736Sdavidch	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5276255736Sdavidch
5277255736Sdavidch	return ecore_q_send_update(sc, params);
5278255736Sdavidch}
5279255736Sdavidch
5280255736Sdavidch/**
5281255736Sdavidch * ecore_q_send_activate - send ACTIVATE command
5282255736Sdavidch *
5283255736Sdavidch * @sc:		device handle
5284255736Sdavidch * @params:
5285255736Sdavidch *
5286255736Sdavidch * implemented using the UPDATE command.
5287255736Sdavidch */
5288255736Sdavidchstatic inline int ecore_q_send_activate(struct bxe_softc *sc,
5289255736Sdavidch					struct ecore_queue_state_params *params)
5290255736Sdavidch{
5291255736Sdavidch	struct ecore_queue_update_params *update = &params->params.update;
5292255736Sdavidch
5293255736Sdavidch	ECORE_MEMSET(update, 0, sizeof(*update));
5294255736Sdavidch
5295255736Sdavidch	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
5296255736Sdavidch	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5297255736Sdavidch
5298255736Sdavidch	return ecore_q_send_update(sc, params);
5299255736Sdavidch}
5300255736Sdavidch
5301296071Sdavidcsstatic void ecore_q_fill_update_tpa_data(struct bxe_softc *sc,
5302296071Sdavidcs				struct ecore_queue_sp_obj *obj,
5303296071Sdavidcs				struct ecore_queue_update_tpa_params *params,
5304296071Sdavidcs				struct tpa_update_ramrod_data *data)
5305296071Sdavidcs{
5306296071Sdavidcs	data->client_id = obj->cl_id;
5307296071Sdavidcs	data->complete_on_both_clients = params->complete_on_both_clients;
5308296071Sdavidcs	data->dont_verify_rings_pause_thr_flg =
5309296071Sdavidcs		params->dont_verify_thr;
5310296071Sdavidcs	data->max_agg_size = ECORE_CPU_TO_LE16(params->max_agg_sz);
5311296071Sdavidcs	data->max_sges_for_packet = params->max_sges_pkt;
5312296071Sdavidcs	data->max_tpa_queues = params->max_tpa_queues;
5313296071Sdavidcs	data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buff_sz);
5314296071Sdavidcs	data->sge_page_base_hi = ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
5315296071Sdavidcs	data->sge_page_base_lo = ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
5316296071Sdavidcs	data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_pause_thr_high);
5317296071Sdavidcs	data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_pause_thr_low);
5318296071Sdavidcs	data->tpa_mode = params->tpa_mode;
5319296071Sdavidcs	data->update_ipv4 = params->update_ipv4;
5320296071Sdavidcs	data->update_ipv6 = params->update_ipv6;
5321296071Sdavidcs}
5322296071Sdavidcs
5323255736Sdavidchstatic inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
5324255736Sdavidch					struct ecore_queue_state_params *params)
5325255736Sdavidch{
5326296071Sdavidcs	struct ecore_queue_sp_obj *o = params->q_obj;
5327296071Sdavidcs	struct tpa_update_ramrod_data *rdata =
5328296071Sdavidcs		(struct tpa_update_ramrod_data *)o->rdata;
5329296071Sdavidcs	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5330296071Sdavidcs	struct ecore_queue_update_tpa_params *update_tpa_params =
5331296071Sdavidcs		&params->params.update_tpa;
5332296071Sdavidcs	uint16_t type;
5333296071Sdavidcs
5334296071Sdavidcs	/* Clear the ramrod data */
5335296071Sdavidcs	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5336296071Sdavidcs
5337296071Sdavidcs	/* Fill the ramrod data */
5338296071Sdavidcs	ecore_q_fill_update_tpa_data(sc, o, update_tpa_params, rdata);
5339296071Sdavidcs
5340296071Sdavidcs	/* Add the function id inside the type, so that sp post function
5341296071Sdavidcs	 * doesn't automatically add the PF func-id, this is required
5342296071Sdavidcs	 * for operations done by PFs on behalf of their VFs
5343296071Sdavidcs	 */
5344296071Sdavidcs	type = ETH_CONNECTION_TYPE |
5345296071Sdavidcs		((o->func_id) << SPE_HDR_T_FUNCTION_ID_SHIFT);
5346296071Sdavidcs
5347296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
5348296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
5349296071Sdavidcs	 * and updating of the SPQ producer which involves a memory
5350296071Sdavidcs	 * read. If the memory read is removed we will have to put a
5351296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
5352296071Sdavidcs	 */
5353296071Sdavidcs	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TPA_UPDATE,
5354296071Sdavidcs			     o->cids[ECORE_PRIMARY_CID_INDEX],
5355296071Sdavidcs			     data_mapping, type);
5356255736Sdavidch}
5357255736Sdavidch
5358255736Sdavidchstatic inline int ecore_q_send_halt(struct bxe_softc *sc,
5359255736Sdavidch				    struct ecore_queue_state_params *params)
5360255736Sdavidch{
5361255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5362255736Sdavidch
5363255736Sdavidch	/* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
5364255736Sdavidch	ecore_dma_addr_t data_mapping = 0;
5365255736Sdavidch	data_mapping = (ecore_dma_addr_t)o->cl_id;
5366255736Sdavidch
5367296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
5368296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
5369296071Sdavidcs	 * and updating of the SPQ producer which involves a memory
5370296071Sdavidcs	 * read. If the memory read is removed we will have to put a
5371296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
5372296071Sdavidcs	 */
5373255736Sdavidch	return ecore_sp_post(sc,
5374255736Sdavidch			     RAMROD_CMD_ID_ETH_HALT,
5375255736Sdavidch			     o->cids[ECORE_PRIMARY_CID_INDEX],
5376255736Sdavidch			     data_mapping,
5377255736Sdavidch			     ETH_CONNECTION_TYPE);
5378255736Sdavidch}
5379255736Sdavidch
5380255736Sdavidchstatic inline int ecore_q_send_cfc_del(struct bxe_softc *sc,
5381255736Sdavidch				       struct ecore_queue_state_params *params)
5382255736Sdavidch{
5383255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5384255736Sdavidch	uint8_t cid_idx = params->params.cfc_del.cid_index;
5385255736Sdavidch
5386255736Sdavidch	if (cid_idx >= o->max_cos) {
5387255736Sdavidch		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5388255736Sdavidch			  o->cl_id, cid_idx);
5389255736Sdavidch		return ECORE_INVAL;
5390255736Sdavidch	}
5391255736Sdavidch
5392255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
5393255736Sdavidch			     o->cids[cid_idx], 0,
5394255736Sdavidch			     NONE_CONNECTION_TYPE);
5395255736Sdavidch}
5396255736Sdavidch
5397255736Sdavidchstatic inline int ecore_q_send_terminate(struct bxe_softc *sc,
5398255736Sdavidch					struct ecore_queue_state_params *params)
5399255736Sdavidch{
5400255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5401255736Sdavidch	uint8_t cid_index = params->params.terminate.cid_index;
5402255736Sdavidch
5403255736Sdavidch	if (cid_index >= o->max_cos) {
5404255736Sdavidch		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5405255736Sdavidch			  o->cl_id, cid_index);
5406255736Sdavidch		return ECORE_INVAL;
5407255736Sdavidch	}
5408255736Sdavidch
5409255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
5410255736Sdavidch			     o->cids[cid_index], 0,
5411255736Sdavidch			     ETH_CONNECTION_TYPE);
5412255736Sdavidch}
5413255736Sdavidch
5414255736Sdavidchstatic inline int ecore_q_send_empty(struct bxe_softc *sc,
5415255736Sdavidch				     struct ecore_queue_state_params *params)
5416255736Sdavidch{
5417255736Sdavidch	struct ecore_queue_sp_obj *o = params->q_obj;
5418255736Sdavidch
5419255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
5420255736Sdavidch			     o->cids[ECORE_PRIMARY_CID_INDEX], 0,
5421255736Sdavidch			     ETH_CONNECTION_TYPE);
5422255736Sdavidch}
5423255736Sdavidch
5424255736Sdavidchstatic inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc,
5425255736Sdavidch					struct ecore_queue_state_params *params)
5426255736Sdavidch{
5427255736Sdavidch	switch (params->cmd) {
5428255736Sdavidch	case ECORE_Q_CMD_INIT:
5429255736Sdavidch		return ecore_q_init(sc, params);
5430255736Sdavidch	case ECORE_Q_CMD_SETUP_TX_ONLY:
5431255736Sdavidch		return ecore_q_send_setup_tx_only(sc, params);
5432255736Sdavidch	case ECORE_Q_CMD_DEACTIVATE:
5433255736Sdavidch		return ecore_q_send_deactivate(sc, params);
5434255736Sdavidch	case ECORE_Q_CMD_ACTIVATE:
5435255736Sdavidch		return ecore_q_send_activate(sc, params);
5436255736Sdavidch	case ECORE_Q_CMD_UPDATE:
5437255736Sdavidch		return ecore_q_send_update(sc, params);
5438255736Sdavidch	case ECORE_Q_CMD_UPDATE_TPA:
5439255736Sdavidch		return ecore_q_send_update_tpa(sc, params);
5440255736Sdavidch	case ECORE_Q_CMD_HALT:
5441255736Sdavidch		return ecore_q_send_halt(sc, params);
5442255736Sdavidch	case ECORE_Q_CMD_CFC_DEL:
5443255736Sdavidch		return ecore_q_send_cfc_del(sc, params);
5444255736Sdavidch	case ECORE_Q_CMD_TERMINATE:
5445255736Sdavidch		return ecore_q_send_terminate(sc, params);
5446255736Sdavidch	case ECORE_Q_CMD_EMPTY:
5447255736Sdavidch		return ecore_q_send_empty(sc, params);
5448255736Sdavidch	default:
5449255736Sdavidch		ECORE_ERR("Unknown command: %d\n", params->cmd);
5450255736Sdavidch		return ECORE_INVAL;
5451255736Sdavidch	}
5452255736Sdavidch}
5453255736Sdavidch
5454255736Sdavidchstatic int ecore_queue_send_cmd_e1x(struct bxe_softc *sc,
5455255736Sdavidch				    struct ecore_queue_state_params *params)
5456255736Sdavidch{
5457255736Sdavidch	switch (params->cmd) {
5458255736Sdavidch	case ECORE_Q_CMD_SETUP:
5459255736Sdavidch		return ecore_q_send_setup_e1x(sc, params);
5460255736Sdavidch	case ECORE_Q_CMD_INIT:
5461255736Sdavidch	case ECORE_Q_CMD_SETUP_TX_ONLY:
5462255736Sdavidch	case ECORE_Q_CMD_DEACTIVATE:
5463255736Sdavidch	case ECORE_Q_CMD_ACTIVATE:
5464255736Sdavidch	case ECORE_Q_CMD_UPDATE:
5465255736Sdavidch	case ECORE_Q_CMD_UPDATE_TPA:
5466255736Sdavidch	case ECORE_Q_CMD_HALT:
5467255736Sdavidch	case ECORE_Q_CMD_CFC_DEL:
5468255736Sdavidch	case ECORE_Q_CMD_TERMINATE:
5469255736Sdavidch	case ECORE_Q_CMD_EMPTY:
5470255736Sdavidch		return ecore_queue_send_cmd_cmn(sc, params);
5471255736Sdavidch	default:
5472255736Sdavidch		ECORE_ERR("Unknown command: %d\n", params->cmd);
5473255736Sdavidch		return ECORE_INVAL;
5474255736Sdavidch	}
5475255736Sdavidch}
5476255736Sdavidch
5477255736Sdavidchstatic int ecore_queue_send_cmd_e2(struct bxe_softc *sc,
5478255736Sdavidch				   struct ecore_queue_state_params *params)
5479255736Sdavidch{
5480255736Sdavidch	switch (params->cmd) {
5481255736Sdavidch	case ECORE_Q_CMD_SETUP:
5482255736Sdavidch		return ecore_q_send_setup_e2(sc, params);
5483255736Sdavidch	case ECORE_Q_CMD_INIT:
5484255736Sdavidch	case ECORE_Q_CMD_SETUP_TX_ONLY:
5485255736Sdavidch	case ECORE_Q_CMD_DEACTIVATE:
5486255736Sdavidch	case ECORE_Q_CMD_ACTIVATE:
5487255736Sdavidch	case ECORE_Q_CMD_UPDATE:
5488255736Sdavidch	case ECORE_Q_CMD_UPDATE_TPA:
5489255736Sdavidch	case ECORE_Q_CMD_HALT:
5490255736Sdavidch	case ECORE_Q_CMD_CFC_DEL:
5491255736Sdavidch	case ECORE_Q_CMD_TERMINATE:
5492255736Sdavidch	case ECORE_Q_CMD_EMPTY:
5493255736Sdavidch		return ecore_queue_send_cmd_cmn(sc, params);
5494255736Sdavidch	default:
5495255736Sdavidch		ECORE_ERR("Unknown command: %d\n", params->cmd);
5496255736Sdavidch		return ECORE_INVAL;
5497255736Sdavidch	}
5498255736Sdavidch}
5499255736Sdavidch
5500255736Sdavidch/**
5501255736Sdavidch * ecore_queue_chk_transition - check state machine of a regular Queue
5502255736Sdavidch *
5503255736Sdavidch * @sc:		device handle
5504255736Sdavidch * @o:
5505255736Sdavidch * @params:
5506255736Sdavidch *
5507255736Sdavidch * (not Forwarding)
5508255736Sdavidch * It both checks if the requested command is legal in a current
5509255736Sdavidch * state and, if it's legal, sets a `next_state' in the object
5510255736Sdavidch * that will be used in the completion flow to set the `state'
5511255736Sdavidch * of the object.
5512255736Sdavidch *
5513255736Sdavidch * returns 0 if a requested command is a legal transition,
5514255736Sdavidch *         ECORE_INVAL otherwise.
5515255736Sdavidch */
5516255736Sdavidchstatic int ecore_queue_chk_transition(struct bxe_softc *sc,
5517255736Sdavidch				      struct ecore_queue_sp_obj *o,
5518255736Sdavidch				      struct ecore_queue_state_params *params)
5519255736Sdavidch{
5520255736Sdavidch	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5521255736Sdavidch	enum ecore_queue_cmd cmd = params->cmd;
5522255736Sdavidch	struct ecore_queue_update_params *update_params =
5523255736Sdavidch		 &params->params.update;
5524255736Sdavidch	uint8_t next_tx_only = o->num_tx_only;
5525255736Sdavidch
5526255736Sdavidch	/* Forget all pending for completion commands if a driver only state
5527255736Sdavidch	 * transition has been requested.
5528255736Sdavidch	 */
5529255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5530255736Sdavidch		o->pending = 0;
5531255736Sdavidch		o->next_state = ECORE_Q_STATE_MAX;
5532255736Sdavidch	}
5533255736Sdavidch
5534255736Sdavidch	/* Don't allow a next state transition if we are in the middle of
5535255736Sdavidch	 * the previous one.
5536255736Sdavidch	 */
5537255736Sdavidch	if (o->pending) {
5538255736Sdavidch		ECORE_ERR("Blocking transition since pending was %lx\n",
5539255736Sdavidch			  o->pending);
5540255736Sdavidch		return ECORE_BUSY;
5541255736Sdavidch	}
5542255736Sdavidch
5543255736Sdavidch	switch (state) {
5544255736Sdavidch	case ECORE_Q_STATE_RESET:
5545255736Sdavidch		if (cmd == ECORE_Q_CMD_INIT)
5546255736Sdavidch			next_state = ECORE_Q_STATE_INITIALIZED;
5547255736Sdavidch
5548255736Sdavidch		break;
5549255736Sdavidch	case ECORE_Q_STATE_INITIALIZED:
5550255736Sdavidch		if (cmd == ECORE_Q_CMD_SETUP) {
5551255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5552255736Sdavidch					   &params->params.setup.flags))
5553255736Sdavidch				next_state = ECORE_Q_STATE_ACTIVE;
5554255736Sdavidch			else
5555255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5556255736Sdavidch		}
5557255736Sdavidch
5558255736Sdavidch		break;
5559255736Sdavidch	case ECORE_Q_STATE_ACTIVE:
5560255736Sdavidch		if (cmd == ECORE_Q_CMD_DEACTIVATE)
5561255736Sdavidch			next_state = ECORE_Q_STATE_INACTIVE;
5562255736Sdavidch
5563255736Sdavidch		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5564255736Sdavidch			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5565255736Sdavidch			next_state = ECORE_Q_STATE_ACTIVE;
5566255736Sdavidch
5567255736Sdavidch		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5568255736Sdavidch			next_state = ECORE_Q_STATE_MULTI_COS;
5569255736Sdavidch			next_tx_only = 1;
5570255736Sdavidch		}
5571255736Sdavidch
5572255736Sdavidch		else if (cmd == ECORE_Q_CMD_HALT)
5573255736Sdavidch			next_state = ECORE_Q_STATE_STOPPED;
5574255736Sdavidch
5575255736Sdavidch		else if (cmd == ECORE_Q_CMD_UPDATE) {
5576255736Sdavidch			/* If "active" state change is requested, update the
5577255736Sdavidch			 *  state accordingly.
5578255736Sdavidch			 */
5579255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5580255736Sdavidch					   &update_params->update_flags) &&
5581255736Sdavidch			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5582255736Sdavidch					    &update_params->update_flags))
5583255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5584255736Sdavidch			else
5585255736Sdavidch				next_state = ECORE_Q_STATE_ACTIVE;
5586255736Sdavidch		}
5587255736Sdavidch
5588255736Sdavidch		break;
5589255736Sdavidch	case ECORE_Q_STATE_MULTI_COS:
5590255736Sdavidch		if (cmd == ECORE_Q_CMD_TERMINATE)
5591255736Sdavidch			next_state = ECORE_Q_STATE_MCOS_TERMINATED;
5592255736Sdavidch
5593255736Sdavidch		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5594255736Sdavidch			next_state = ECORE_Q_STATE_MULTI_COS;
5595255736Sdavidch			next_tx_only = o->num_tx_only + 1;
5596255736Sdavidch		}
5597255736Sdavidch
5598255736Sdavidch		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5599255736Sdavidch			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5600255736Sdavidch			next_state = ECORE_Q_STATE_MULTI_COS;
5601255736Sdavidch
5602255736Sdavidch		else if (cmd == ECORE_Q_CMD_UPDATE) {
5603255736Sdavidch			/* If "active" state change is requested, update the
5604255736Sdavidch			 *  state accordingly.
5605255736Sdavidch			 */
5606255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5607255736Sdavidch					   &update_params->update_flags) &&
5608255736Sdavidch			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5609255736Sdavidch					    &update_params->update_flags))
5610255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5611255736Sdavidch			else
5612255736Sdavidch				next_state = ECORE_Q_STATE_MULTI_COS;
5613255736Sdavidch		}
5614255736Sdavidch
5615255736Sdavidch		break;
5616255736Sdavidch	case ECORE_Q_STATE_MCOS_TERMINATED:
5617255736Sdavidch		if (cmd == ECORE_Q_CMD_CFC_DEL) {
5618255736Sdavidch			next_tx_only = o->num_tx_only - 1;
5619255736Sdavidch			if (next_tx_only == 0)
5620255736Sdavidch				next_state = ECORE_Q_STATE_ACTIVE;
5621255736Sdavidch			else
5622255736Sdavidch				next_state = ECORE_Q_STATE_MULTI_COS;
5623255736Sdavidch		}
5624255736Sdavidch
5625255736Sdavidch		break;
5626255736Sdavidch	case ECORE_Q_STATE_INACTIVE:
5627255736Sdavidch		if (cmd == ECORE_Q_CMD_ACTIVATE)
5628255736Sdavidch			next_state = ECORE_Q_STATE_ACTIVE;
5629255736Sdavidch
5630255736Sdavidch		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5631255736Sdavidch			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5632255736Sdavidch			next_state = ECORE_Q_STATE_INACTIVE;
5633255736Sdavidch
5634255736Sdavidch		else if (cmd == ECORE_Q_CMD_HALT)
5635255736Sdavidch			next_state = ECORE_Q_STATE_STOPPED;
5636255736Sdavidch
5637255736Sdavidch		else if (cmd == ECORE_Q_CMD_UPDATE) {
5638255736Sdavidch			/* If "active" state change is requested, update the
5639255736Sdavidch			 * state accordingly.
5640255736Sdavidch			 */
5641255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5642255736Sdavidch					   &update_params->update_flags) &&
5643255736Sdavidch			    ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5644255736Sdavidch					   &update_params->update_flags)){
5645255736Sdavidch				if (o->num_tx_only == 0)
5646255736Sdavidch					next_state = ECORE_Q_STATE_ACTIVE;
5647255736Sdavidch				else /* tx only queues exist for this queue */
5648255736Sdavidch					next_state = ECORE_Q_STATE_MULTI_COS;
5649255736Sdavidch			} else
5650255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5651255736Sdavidch		}
5652255736Sdavidch
5653255736Sdavidch		break;
5654255736Sdavidch	case ECORE_Q_STATE_STOPPED:
5655255736Sdavidch		if (cmd == ECORE_Q_CMD_TERMINATE)
5656255736Sdavidch			next_state = ECORE_Q_STATE_TERMINATED;
5657255736Sdavidch
5658255736Sdavidch		break;
5659255736Sdavidch	case ECORE_Q_STATE_TERMINATED:
5660255736Sdavidch		if (cmd == ECORE_Q_CMD_CFC_DEL)
5661255736Sdavidch			next_state = ECORE_Q_STATE_RESET;
5662255736Sdavidch
5663255736Sdavidch		break;
5664255736Sdavidch	default:
5665255736Sdavidch		ECORE_ERR("Illegal state: %d\n", state);
5666255736Sdavidch	}
5667255736Sdavidch
5668255736Sdavidch	/* Transition is assured */
5669255736Sdavidch	if (next_state != ECORE_Q_STATE_MAX) {
5670255736Sdavidch		ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5671255736Sdavidch			  state, cmd, next_state);
5672255736Sdavidch		o->next_state = next_state;
5673255736Sdavidch		o->next_tx_only = next_tx_only;
5674255736Sdavidch		return ECORE_SUCCESS;
5675255736Sdavidch	}
5676255736Sdavidch
5677255736Sdavidch	ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5678255736Sdavidch
5679255736Sdavidch	return ECORE_INVAL;
5680255736Sdavidch}
5681255736Sdavidch
5682255736Sdavidch/**
5683255736Sdavidch * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
5684255736Sdavidch *
5685255736Sdavidch * @sc:		device handle
5686255736Sdavidch * @o:
5687255736Sdavidch * @params:
5688255736Sdavidch *
5689255736Sdavidch * It both checks if the requested command is legal in a current
5690255736Sdavidch * state and, if it's legal, sets a `next_state' in the object
5691255736Sdavidch * that will be used in the completion flow to set the `state'
5692255736Sdavidch * of the object.
5693255736Sdavidch *
5694255736Sdavidch * returns 0 if a requested command is a legal transition,
5695255736Sdavidch *         ECORE_INVAL otherwise.
5696255736Sdavidch */
5697255736Sdavidchstatic int ecore_queue_chk_fwd_transition(struct bxe_softc *sc,
5698255736Sdavidch					  struct ecore_queue_sp_obj *o,
5699255736Sdavidch					struct ecore_queue_state_params *params)
5700255736Sdavidch{
5701255736Sdavidch	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5702255736Sdavidch	enum ecore_queue_cmd cmd = params->cmd;
5703255736Sdavidch
5704255736Sdavidch	switch (state) {
5705255736Sdavidch	case ECORE_Q_STATE_RESET:
5706255736Sdavidch		if (cmd == ECORE_Q_CMD_INIT)
5707255736Sdavidch			next_state = ECORE_Q_STATE_INITIALIZED;
5708255736Sdavidch
5709255736Sdavidch		break;
5710255736Sdavidch	case ECORE_Q_STATE_INITIALIZED:
5711255736Sdavidch		if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5712255736Sdavidch			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5713255736Sdavidch					   &params->params.tx_only.flags))
5714255736Sdavidch				next_state = ECORE_Q_STATE_ACTIVE;
5715255736Sdavidch			else
5716255736Sdavidch				next_state = ECORE_Q_STATE_INACTIVE;
5717255736Sdavidch		}
5718255736Sdavidch
5719255736Sdavidch		break;
5720255736Sdavidch	case ECORE_Q_STATE_ACTIVE:
5721255736Sdavidch	case ECORE_Q_STATE_INACTIVE:
5722255736Sdavidch		if (cmd == ECORE_Q_CMD_CFC_DEL)
5723255736Sdavidch			next_state = ECORE_Q_STATE_RESET;
5724255736Sdavidch
5725255736Sdavidch		break;
5726255736Sdavidch	default:
5727255736Sdavidch		ECORE_ERR("Illegal state: %d\n", state);
5728255736Sdavidch	}
5729255736Sdavidch
5730255736Sdavidch	/* Transition is assured */
5731255736Sdavidch	if (next_state != ECORE_Q_STATE_MAX) {
5732255736Sdavidch		ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5733255736Sdavidch			  state, cmd, next_state);
5734255736Sdavidch		o->next_state = next_state;
5735255736Sdavidch		return ECORE_SUCCESS;
5736255736Sdavidch	}
5737255736Sdavidch
5738255736Sdavidch	ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5739255736Sdavidch	return ECORE_INVAL;
5740255736Sdavidch}
5741255736Sdavidch
5742255736Sdavidchvoid ecore_init_queue_obj(struct bxe_softc *sc,
5743255736Sdavidch			  struct ecore_queue_sp_obj *obj,
5744255736Sdavidch			  uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id,
5745255736Sdavidch			  void *rdata,
5746255736Sdavidch			  ecore_dma_addr_t rdata_mapping, unsigned long type)
5747255736Sdavidch{
5748255736Sdavidch	ECORE_MEMSET(obj, 0, sizeof(*obj));
5749255736Sdavidch
5750255736Sdavidch	/* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
5751255736Sdavidch	ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
5752255736Sdavidch
5753255736Sdavidch	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5754255736Sdavidch	obj->max_cos = cid_cnt;
5755255736Sdavidch	obj->cl_id = cl_id;
5756255736Sdavidch	obj->func_id = func_id;
5757255736Sdavidch	obj->rdata = rdata;
5758255736Sdavidch	obj->rdata_mapping = rdata_mapping;
5759255736Sdavidch	obj->type = type;
5760255736Sdavidch	obj->next_state = ECORE_Q_STATE_MAX;
5761255736Sdavidch
5762255736Sdavidch	if (CHIP_IS_E1x(sc))
5763255736Sdavidch		obj->send_cmd = ecore_queue_send_cmd_e1x;
5764255736Sdavidch	else
5765255736Sdavidch		obj->send_cmd = ecore_queue_send_cmd_e2;
5766255736Sdavidch
5767255736Sdavidch	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
5768255736Sdavidch		obj->check_transition = ecore_queue_chk_fwd_transition;
5769255736Sdavidch	else
5770255736Sdavidch	obj->check_transition = ecore_queue_chk_transition;
5771255736Sdavidch
5772255736Sdavidch	obj->complete_cmd = ecore_queue_comp_cmd;
5773255736Sdavidch	obj->wait_comp = ecore_queue_wait_comp;
5774255736Sdavidch	obj->set_pending = ecore_queue_set_pending;
5775255736Sdavidch}
5776255736Sdavidch
5777255736Sdavidch/* return a queue object's logical state*/
5778255736Sdavidchint ecore_get_q_logical_state(struct bxe_softc *sc,
5779255736Sdavidch			       struct ecore_queue_sp_obj *obj)
5780255736Sdavidch{
5781255736Sdavidch	switch (obj->state) {
5782255736Sdavidch	case ECORE_Q_STATE_ACTIVE:
5783255736Sdavidch	case ECORE_Q_STATE_MULTI_COS:
5784255736Sdavidch		return ECORE_Q_LOGICAL_STATE_ACTIVE;
5785255736Sdavidch	case ECORE_Q_STATE_RESET:
5786255736Sdavidch	case ECORE_Q_STATE_INITIALIZED:
5787255736Sdavidch	case ECORE_Q_STATE_MCOS_TERMINATED:
5788255736Sdavidch	case ECORE_Q_STATE_INACTIVE:
5789255736Sdavidch	case ECORE_Q_STATE_STOPPED:
5790255736Sdavidch	case ECORE_Q_STATE_TERMINATED:
5791255736Sdavidch	case ECORE_Q_STATE_FLRED:
5792255736Sdavidch		return ECORE_Q_LOGICAL_STATE_STOPPED;
5793255736Sdavidch	default:
5794255736Sdavidch		return ECORE_INVAL;
5795255736Sdavidch	}
5796255736Sdavidch}
5797255736Sdavidch
5798255736Sdavidch/********************** Function state object *********************************/
5799255736Sdavidchenum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
5800255736Sdavidch					   struct ecore_func_sp_obj *o)
5801255736Sdavidch{
5802255736Sdavidch	/* in the middle of transaction - return INVALID state */
5803255736Sdavidch	if (o->pending)
5804255736Sdavidch		return ECORE_F_STATE_MAX;
5805255736Sdavidch
5806255736Sdavidch	/* unsure the order of reading of o->pending and o->state
5807255736Sdavidch	 * o->pending should be read first
5808255736Sdavidch	 */
5809255736Sdavidch	rmb();
5810255736Sdavidch
5811255736Sdavidch	return o->state;
5812255736Sdavidch}
5813255736Sdavidch
5814255736Sdavidchstatic int ecore_func_wait_comp(struct bxe_softc *sc,
5815255736Sdavidch				struct ecore_func_sp_obj *o,
5816255736Sdavidch				enum ecore_func_cmd cmd)
5817255736Sdavidch{
5818255736Sdavidch	return ecore_state_wait(sc, cmd, &o->pending);
5819255736Sdavidch}
5820255736Sdavidch
5821255736Sdavidch/**
5822255736Sdavidch * ecore_func_state_change_comp - complete the state machine transition
5823255736Sdavidch *
5824255736Sdavidch * @sc:		device handle
5825255736Sdavidch * @o:
5826255736Sdavidch * @cmd:
5827255736Sdavidch *
5828255736Sdavidch * Called on state change transition. Completes the state
5829255736Sdavidch * machine transition only - no HW interaction.
5830255736Sdavidch */
5831255736Sdavidchstatic inline int ecore_func_state_change_comp(struct bxe_softc *sc,
5832255736Sdavidch					       struct ecore_func_sp_obj *o,
5833255736Sdavidch					       enum ecore_func_cmd cmd)
5834255736Sdavidch{
5835255736Sdavidch	unsigned long cur_pending = o->pending;
5836255736Sdavidch
5837255736Sdavidch	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
5838255736Sdavidch		ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5839255736Sdavidch			  cmd, ECORE_FUNC_ID(sc), o->state,
5840255736Sdavidch			  cur_pending, o->next_state);
5841255736Sdavidch		return ECORE_INVAL;
5842255736Sdavidch	}
5843255736Sdavidch
5844255736Sdavidch	ECORE_MSG(sc,
5845255736Sdavidch		  "Completing command %d for func %d, setting state to %d\n",
5846255736Sdavidch		  cmd, ECORE_FUNC_ID(sc), o->next_state);
5847255736Sdavidch
5848255736Sdavidch	o->state = o->next_state;
5849255736Sdavidch	o->next_state = ECORE_F_STATE_MAX;
5850255736Sdavidch
5851255736Sdavidch	/* It's important that o->state and o->next_state are
5852255736Sdavidch	 * updated before o->pending.
5853255736Sdavidch	 */
5854255736Sdavidch	wmb();
5855255736Sdavidch
5856255736Sdavidch	ECORE_CLEAR_BIT(cmd, &o->pending);
5857255736Sdavidch	ECORE_SMP_MB_AFTER_CLEAR_BIT();
5858255736Sdavidch
5859255736Sdavidch	return ECORE_SUCCESS;
5860255736Sdavidch}
5861255736Sdavidch
5862255736Sdavidch/**
5863255736Sdavidch * ecore_func_comp_cmd - complete the state change command
5864255736Sdavidch *
5865255736Sdavidch * @sc:		device handle
5866255736Sdavidch * @o:
5867255736Sdavidch * @cmd:
5868255736Sdavidch *
5869255736Sdavidch * Checks that the arrived completion is expected.
5870255736Sdavidch */
5871255736Sdavidchstatic int ecore_func_comp_cmd(struct bxe_softc *sc,
5872255736Sdavidch			       struct ecore_func_sp_obj *o,
5873255736Sdavidch			       enum ecore_func_cmd cmd)
5874255736Sdavidch{
5875255736Sdavidch	/* Complete the state machine part first, check if it's a
5876255736Sdavidch	 * legal completion.
5877255736Sdavidch	 */
5878255736Sdavidch	int rc = ecore_func_state_change_comp(sc, o, cmd);
5879255736Sdavidch	return rc;
5880255736Sdavidch}
5881255736Sdavidch
5882255736Sdavidch/**
5883255736Sdavidch * ecore_func_chk_transition - perform function state machine transition
5884255736Sdavidch *
5885255736Sdavidch * @sc:		device handle
5886255736Sdavidch * @o:
5887255736Sdavidch * @params:
5888255736Sdavidch *
5889255736Sdavidch * It both checks if the requested command is legal in a current
5890255736Sdavidch * state and, if it's legal, sets a `next_state' in the object
5891255736Sdavidch * that will be used in the completion flow to set the `state'
5892255736Sdavidch * of the object.
5893255736Sdavidch *
5894255736Sdavidch * returns 0 if a requested command is a legal transition,
5895255736Sdavidch *         ECORE_INVAL otherwise.
5896255736Sdavidch */
5897255736Sdavidchstatic int ecore_func_chk_transition(struct bxe_softc *sc,
5898255736Sdavidch				     struct ecore_func_sp_obj *o,
5899255736Sdavidch				     struct ecore_func_state_params *params)
5900255736Sdavidch{
5901255736Sdavidch	enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
5902255736Sdavidch	enum ecore_func_cmd cmd = params->cmd;
5903255736Sdavidch
5904255736Sdavidch	/* Forget all pending for completion commands if a driver only state
5905255736Sdavidch	 * transition has been requested.
5906255736Sdavidch	 */
5907255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5908255736Sdavidch		o->pending = 0;
5909255736Sdavidch		o->next_state = ECORE_F_STATE_MAX;
5910255736Sdavidch	}
5911255736Sdavidch
5912255736Sdavidch	/* Don't allow a next state transition if we are in the middle of
5913255736Sdavidch	 * the previous one.
5914255736Sdavidch	 */
5915255736Sdavidch	if (o->pending)
5916255736Sdavidch		return ECORE_BUSY;
5917255736Sdavidch
5918255736Sdavidch	switch (state) {
5919255736Sdavidch	case ECORE_F_STATE_RESET:
5920255736Sdavidch		if (cmd == ECORE_F_CMD_HW_INIT)
5921255736Sdavidch			next_state = ECORE_F_STATE_INITIALIZED;
5922255736Sdavidch
5923255736Sdavidch		break;
5924255736Sdavidch	case ECORE_F_STATE_INITIALIZED:
5925255736Sdavidch		if (cmd == ECORE_F_CMD_START)
5926255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5927255736Sdavidch
5928255736Sdavidch		else if (cmd == ECORE_F_CMD_HW_RESET)
5929255736Sdavidch			next_state = ECORE_F_STATE_RESET;
5930255736Sdavidch
5931255736Sdavidch		break;
5932255736Sdavidch	case ECORE_F_STATE_STARTED:
5933255736Sdavidch		if (cmd == ECORE_F_CMD_STOP)
5934255736Sdavidch			next_state = ECORE_F_STATE_INITIALIZED;
5935255736Sdavidch		/* afex ramrods can be sent only in started mode, and only
5936255736Sdavidch		 * if not pending for function_stop ramrod completion
5937255736Sdavidch		 * for these events - next state remained STARTED.
5938255736Sdavidch		 */
5939255736Sdavidch		else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
5940255736Sdavidch			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5941255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5942255736Sdavidch
5943255736Sdavidch		else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
5944255736Sdavidch			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5945255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5946255736Sdavidch
5947255736Sdavidch		/* Switch_update ramrod can be sent in either started or
5948255736Sdavidch		 * tx_stopped state, and it doesn't change the state.
5949255736Sdavidch		 */
5950255736Sdavidch		else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5951255736Sdavidch			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5952255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5953255736Sdavidch
5954296071Sdavidcs		else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
5955296071Sdavidcs			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5956296071Sdavidcs			next_state = ECORE_F_STATE_STARTED;
5957296071Sdavidcs
5958255736Sdavidch		else if (cmd == ECORE_F_CMD_TX_STOP)
5959255736Sdavidch			next_state = ECORE_F_STATE_TX_STOPPED;
5960255736Sdavidch
5961255736Sdavidch		break;
5962255736Sdavidch	case ECORE_F_STATE_TX_STOPPED:
5963255736Sdavidch		if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5964255736Sdavidch		    (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5965255736Sdavidch			next_state = ECORE_F_STATE_TX_STOPPED;
5966255736Sdavidch
5967296071Sdavidcs		else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
5968296071Sdavidcs		    (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5969296071Sdavidcs			next_state = ECORE_F_STATE_TX_STOPPED;
5970296071Sdavidcs
5971255736Sdavidch		else if (cmd == ECORE_F_CMD_TX_START)
5972255736Sdavidch			next_state = ECORE_F_STATE_STARTED;
5973255736Sdavidch
5974255736Sdavidch		break;
5975255736Sdavidch	default:
5976255736Sdavidch		ECORE_ERR("Unknown state: %d\n", state);
5977255736Sdavidch	}
5978255736Sdavidch
5979255736Sdavidch	/* Transition is assured */
5980255736Sdavidch	if (next_state != ECORE_F_STATE_MAX) {
5981255736Sdavidch		ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n",
5982255736Sdavidch			  state, cmd, next_state);
5983255736Sdavidch		o->next_state = next_state;
5984255736Sdavidch		return ECORE_SUCCESS;
5985255736Sdavidch	}
5986255736Sdavidch
5987255736Sdavidch	ECORE_MSG(sc, "Bad function state transition request: %d %d\n",
5988255736Sdavidch		  state, cmd);
5989255736Sdavidch
5990255736Sdavidch	return ECORE_INVAL;
5991255736Sdavidch}
5992255736Sdavidch
5993255736Sdavidch/**
5994255736Sdavidch * ecore_func_init_func - performs HW init at function stage
5995255736Sdavidch *
5996255736Sdavidch * @sc:		device handle
5997255736Sdavidch * @drv:
5998255736Sdavidch *
5999255736Sdavidch * Init HW when the current phase is
6000255736Sdavidch * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
6001255736Sdavidch * HW blocks.
6002255736Sdavidch */
6003255736Sdavidchstatic inline int ecore_func_init_func(struct bxe_softc *sc,
6004255736Sdavidch				       const struct ecore_func_sp_drv_ops *drv)
6005255736Sdavidch{
6006255736Sdavidch	return drv->init_hw_func(sc);
6007255736Sdavidch}
6008255736Sdavidch
6009255736Sdavidch/**
6010255736Sdavidch * ecore_func_init_port - performs HW init at port stage
6011255736Sdavidch *
6012255736Sdavidch * @sc:		device handle
6013255736Sdavidch * @drv:
6014255736Sdavidch *
6015255736Sdavidch * Init HW when the current phase is
6016255736Sdavidch * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
6017255736Sdavidch * FUNCTION-only HW blocks.
6018255736Sdavidch *
6019255736Sdavidch */
6020255736Sdavidchstatic inline int ecore_func_init_port(struct bxe_softc *sc,
6021255736Sdavidch				       const struct ecore_func_sp_drv_ops *drv)
6022255736Sdavidch{
6023255736Sdavidch	int rc = drv->init_hw_port(sc);
6024255736Sdavidch	if (rc)
6025255736Sdavidch		return rc;
6026255736Sdavidch
6027255736Sdavidch	return ecore_func_init_func(sc, drv);
6028255736Sdavidch}
6029255736Sdavidch
6030255736Sdavidch/**
6031255736Sdavidch * ecore_func_init_cmn_chip - performs HW init at chip-common stage
6032255736Sdavidch *
6033255736Sdavidch * @sc:		device handle
6034255736Sdavidch * @drv:
6035255736Sdavidch *
6036255736Sdavidch * Init HW when the current phase is
6037255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
6038255736Sdavidch * PORT-only and FUNCTION-only HW blocks.
6039255736Sdavidch */
6040255736Sdavidchstatic inline int ecore_func_init_cmn_chip(struct bxe_softc *sc,
6041255736Sdavidch					const struct ecore_func_sp_drv_ops *drv)
6042255736Sdavidch{
6043255736Sdavidch	int rc = drv->init_hw_cmn_chip(sc);
6044255736Sdavidch	if (rc)
6045255736Sdavidch		return rc;
6046255736Sdavidch
6047255736Sdavidch	return ecore_func_init_port(sc, drv);
6048255736Sdavidch}
6049255736Sdavidch
6050255736Sdavidch/**
6051255736Sdavidch * ecore_func_init_cmn - performs HW init at common stage
6052255736Sdavidch *
6053255736Sdavidch * @sc:		device handle
6054255736Sdavidch * @drv:
6055255736Sdavidch *
6056255736Sdavidch * Init HW when the current phase is
6057255736Sdavidch * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
6058255736Sdavidch * PORT-only and FUNCTION-only HW blocks.
6059255736Sdavidch */
6060255736Sdavidchstatic inline int ecore_func_init_cmn(struct bxe_softc *sc,
6061255736Sdavidch				      const struct ecore_func_sp_drv_ops *drv)
6062255736Sdavidch{
6063255736Sdavidch	int rc = drv->init_hw_cmn(sc);
6064255736Sdavidch	if (rc)
6065255736Sdavidch		return rc;
6066255736Sdavidch
6067255736Sdavidch	return ecore_func_init_port(sc, drv);
6068255736Sdavidch}
6069255736Sdavidch
6070255736Sdavidchstatic int ecore_func_hw_init(struct bxe_softc *sc,
6071255736Sdavidch			      struct ecore_func_state_params *params)
6072255736Sdavidch{
6073255736Sdavidch	uint32_t load_code = params->params.hw_init.load_phase;
6074255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6075255736Sdavidch	const struct ecore_func_sp_drv_ops *drv = o->drv;
6076255736Sdavidch	int rc = 0;
6077255736Sdavidch
6078255736Sdavidch	ECORE_MSG(sc, "function %d  load_code %x\n",
6079255736Sdavidch		  ECORE_ABS_FUNC_ID(sc), load_code);
6080255736Sdavidch
6081255736Sdavidch	/* Prepare buffers for unzipping the FW */
6082255736Sdavidch	rc = drv->gunzip_init(sc);
6083255736Sdavidch	if (rc)
6084255736Sdavidch		return rc;
6085255736Sdavidch
6086255736Sdavidch	/* Prepare FW */
6087255736Sdavidch	rc = drv->init_fw(sc);
6088255736Sdavidch	if (rc) {
6089255736Sdavidch		ECORE_ERR("Error loading firmware\n");
6090255736Sdavidch		goto init_err;
6091255736Sdavidch	}
6092255736Sdavidch
6093255736Sdavidch	/* Handle the beginning of COMMON_XXX pases separately... */
6094255736Sdavidch	switch (load_code) {
6095255736Sdavidch	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6096255736Sdavidch		rc = ecore_func_init_cmn_chip(sc, drv);
6097255736Sdavidch		if (rc)
6098255736Sdavidch			goto init_err;
6099255736Sdavidch
6100255736Sdavidch		break;
6101255736Sdavidch	case FW_MSG_CODE_DRV_LOAD_COMMON:
6102255736Sdavidch		rc = ecore_func_init_cmn(sc, drv);
6103255736Sdavidch		if (rc)
6104255736Sdavidch			goto init_err;
6105255736Sdavidch
6106255736Sdavidch		break;
6107255736Sdavidch	case FW_MSG_CODE_DRV_LOAD_PORT:
6108255736Sdavidch		rc = ecore_func_init_port(sc, drv);
6109255736Sdavidch		if (rc)
6110255736Sdavidch			goto init_err;
6111255736Sdavidch
6112255736Sdavidch		break;
6113255736Sdavidch	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6114255736Sdavidch		rc = ecore_func_init_func(sc, drv);
6115255736Sdavidch		if (rc)
6116255736Sdavidch			goto init_err;
6117255736Sdavidch
6118255736Sdavidch		break;
6119255736Sdavidch	default:
6120255736Sdavidch		ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6121255736Sdavidch		rc = ECORE_INVAL;
6122255736Sdavidch	}
6123255736Sdavidch
6124255736Sdavidchinit_err:
6125255736Sdavidch	drv->gunzip_end(sc);
6126255736Sdavidch
6127255736Sdavidch	/* In case of success, complete the command immediately: no ramrods
6128255736Sdavidch	 * have been sent.
6129255736Sdavidch	 */
6130255736Sdavidch	if (!rc)
6131255736Sdavidch		o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
6132255736Sdavidch
6133255736Sdavidch	return rc;
6134255736Sdavidch}
6135255736Sdavidch
6136255736Sdavidch/**
6137255736Sdavidch * ecore_func_reset_func - reset HW at function stage
6138255736Sdavidch *
6139255736Sdavidch * @sc:		device handle
6140255736Sdavidch * @drv:
6141255736Sdavidch *
6142255736Sdavidch * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
6143255736Sdavidch * FUNCTION-only HW blocks.
6144255736Sdavidch */
6145255736Sdavidchstatic inline void ecore_func_reset_func(struct bxe_softc *sc,
6146255736Sdavidch					const struct ecore_func_sp_drv_ops *drv)
6147255736Sdavidch{
6148255736Sdavidch	drv->reset_hw_func(sc);
6149255736Sdavidch}
6150255736Sdavidch
6151255736Sdavidch/**
6152255736Sdavidch * ecore_func_reset_port - reser HW at port stage
6153255736Sdavidch *
6154255736Sdavidch * @sc:		device handle
6155255736Sdavidch * @drv:
6156255736Sdavidch *
6157255736Sdavidch * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
6158255736Sdavidch * FUNCTION-only and PORT-only HW blocks.
6159255736Sdavidch *
6160255736Sdavidch *                 !!!IMPORTANT!!!
6161255736Sdavidch *
6162255736Sdavidch * It's important to call reset_port before reset_func() as the last thing
6163255736Sdavidch * reset_func does is pf_disable() thus disabling PGLUE_B, which
6164255736Sdavidch * makes impossible any DMAE transactions.
6165255736Sdavidch */
6166255736Sdavidchstatic inline void ecore_func_reset_port(struct bxe_softc *sc,
6167255736Sdavidch					const struct ecore_func_sp_drv_ops *drv)
6168255736Sdavidch{
6169255736Sdavidch	drv->reset_hw_port(sc);
6170255736Sdavidch	ecore_func_reset_func(sc, drv);
6171255736Sdavidch}
6172255736Sdavidch
6173255736Sdavidch/**
6174255736Sdavidch * ecore_func_reset_cmn - reser HW at common stage
6175255736Sdavidch *
6176255736Sdavidch * @sc:		device handle
6177255736Sdavidch * @drv:
6178255736Sdavidch *
6179255736Sdavidch * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
6180255736Sdavidch * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
6181255736Sdavidch * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
6182255736Sdavidch */
6183255736Sdavidchstatic inline void ecore_func_reset_cmn(struct bxe_softc *sc,
6184255736Sdavidch					const struct ecore_func_sp_drv_ops *drv)
6185255736Sdavidch{
6186255736Sdavidch	ecore_func_reset_port(sc, drv);
6187255736Sdavidch	drv->reset_hw_cmn(sc);
6188255736Sdavidch}
6189255736Sdavidch
6190255736Sdavidchstatic inline int ecore_func_hw_reset(struct bxe_softc *sc,
6191255736Sdavidch				      struct ecore_func_state_params *params)
6192255736Sdavidch{
6193255736Sdavidch	uint32_t reset_phase = params->params.hw_reset.reset_phase;
6194255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6195255736Sdavidch	const struct ecore_func_sp_drv_ops *drv = o->drv;
6196255736Sdavidch
6197255736Sdavidch	ECORE_MSG(sc, "function %d  reset_phase %x\n", ECORE_ABS_FUNC_ID(sc),
6198255736Sdavidch		  reset_phase);
6199255736Sdavidch
6200255736Sdavidch	switch (reset_phase) {
6201255736Sdavidch	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6202255736Sdavidch		ecore_func_reset_cmn(sc, drv);
6203255736Sdavidch		break;
6204255736Sdavidch	case FW_MSG_CODE_DRV_UNLOAD_PORT:
6205255736Sdavidch		ecore_func_reset_port(sc, drv);
6206255736Sdavidch		break;
6207255736Sdavidch	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6208255736Sdavidch		ecore_func_reset_func(sc, drv);
6209255736Sdavidch		break;
6210255736Sdavidch	default:
6211255736Sdavidch		ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
6212255736Sdavidch			  reset_phase);
6213255736Sdavidch		break;
6214255736Sdavidch	}
6215255736Sdavidch
6216255736Sdavidch	/* Complete the command immediately: no ramrods have been sent. */
6217255736Sdavidch	o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
6218255736Sdavidch
6219255736Sdavidch	return ECORE_SUCCESS;
6220255736Sdavidch}
6221255736Sdavidch
6222255736Sdavidchstatic inline int ecore_func_send_start(struct bxe_softc *sc,
6223255736Sdavidch					struct ecore_func_state_params *params)
6224255736Sdavidch{
6225255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6226255736Sdavidch	struct function_start_data *rdata =
6227255736Sdavidch		(struct function_start_data *)o->rdata;
6228255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6229255736Sdavidch	struct ecore_func_start_params *start_params = &params->params.start;
6230255736Sdavidch
6231255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6232255736Sdavidch
6233255736Sdavidch	/* Fill the ramrod data with provided parameters */
6234255736Sdavidch	rdata->function_mode	= (uint8_t)start_params->mf_mode;
6235255736Sdavidch	rdata->sd_vlan_tag	= ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
6236255736Sdavidch	rdata->path_id		= ECORE_PATH_ID(sc);
6237255736Sdavidch	rdata->network_cos_mode	= start_params->network_cos_mode;
6238255736Sdavidch
6239296071Sdavidcs	rdata->vxlan_dst_port	= start_params->vxlan_dst_port;
6240296071Sdavidcs	rdata->geneve_dst_port	= start_params->geneve_dst_port;
6241296071Sdavidcs	rdata->inner_clss_l2gre	= start_params->inner_clss_l2gre;
6242296071Sdavidcs	rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
6243296071Sdavidcs	rdata->inner_clss_vxlan	= start_params->inner_clss_vxlan;
6244296071Sdavidcs	rdata->inner_rss	= start_params->inner_rss;
6245296071Sdavidcs
6246296071Sdavidcs	rdata->sd_accept_mf_clss_fail = start_params->class_fail;
6247296071Sdavidcs	if (start_params->class_fail_ethtype) {
6248296071Sdavidcs		rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
6249296071Sdavidcs		rdata->sd_accept_mf_clss_fail_ethtype =
6250296071Sdavidcs			ECORE_CPU_TO_LE16(start_params->class_fail_ethtype);
6251296071Sdavidcs	}
6252296071Sdavidcs	rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
6253296071Sdavidcs	rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
6254296071Sdavidcs
6255296071Sdavidcs	/** @@@TMP - until FW 7.10.7 (which will introduce an HSI change)
6256296071Sdavidcs	 * `sd_vlan_eth_type' will replace ethertype in SD mode even if
6257296071Sdavidcs	 * it's set to 0; This will probably break SD, so we're setting it
6258296071Sdavidcs	 * to ethertype 0x8100 for now.
6259296071Sdavidcs	 */
6260296071Sdavidcs	if (start_params->sd_vlan_eth_type)
6261296071Sdavidcs		rdata->sd_vlan_eth_type =
6262296071Sdavidcs			ECORE_CPU_TO_LE16(start_params->sd_vlan_eth_type);
6263296071Sdavidcs	else
6264296071Sdavidcs		rdata->sd_vlan_eth_type =
6265296071Sdavidcs			ECORE_CPU_TO_LE16((uint16_t) 0x8100);
6266296071Sdavidcs
6267296071Sdavidcs	rdata->no_added_tags = start_params->no_added_tags;
6268296071Sdavidcs
6269296071Sdavidcs	rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
6270296071Sdavidcs	if (rdata->c2s_pri_tt_valid) {
6271296071Sdavidcs		memcpy(rdata->c2s_pri_trans_table.val,
6272296071Sdavidcs		       start_params->c2s_pri,
6273296071Sdavidcs		       MAX_VLAN_PRIORITIES);
6274296071Sdavidcs		rdata->c2s_pri_default = start_params->c2s_pri_default;
6275296071Sdavidcs	}
6276296071Sdavidcs
6277296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
6278296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
6279255736Sdavidch	 *  and updating of the SPQ producer which involves a memory
6280296071Sdavidcs	 * read. If the memory read is removed we will have to put a
6281296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
6282255736Sdavidch	 */
6283255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6284255736Sdavidch			     data_mapping, NONE_CONNECTION_TYPE);
6285255736Sdavidch}
6286255736Sdavidch
6287255736Sdavidchstatic inline int ecore_func_send_switch_update(struct bxe_softc *sc,
6288255736Sdavidch					struct ecore_func_state_params *params)
6289255736Sdavidch{
6290255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6291255736Sdavidch	struct function_update_data *rdata =
6292255736Sdavidch		(struct function_update_data *)o->rdata;
6293255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6294255736Sdavidch	struct ecore_func_switch_update_params *switch_update_params =
6295255736Sdavidch		&params->params.switch_update;
6296255736Sdavidch
6297255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6298255736Sdavidch
6299255736Sdavidch	/* Fill the ramrod data with provided parameters */
6300296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
6301296071Sdavidcs			   &switch_update_params->changes)) {
6302255736Sdavidch	rdata->tx_switch_suspend_change_flg = 1;
6303296071Sdavidcs		rdata->tx_switch_suspend =
6304296071Sdavidcs			ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
6305296071Sdavidcs				       &switch_update_params->changes);
6306296071Sdavidcs	}
6307296071Sdavidcs
6308296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
6309296071Sdavidcs			   &switch_update_params->changes)) {
6310296071Sdavidcs		rdata->sd_vlan_tag_change_flg = 1;
6311296071Sdavidcs		rdata->sd_vlan_tag =
6312296071Sdavidcs			ECORE_CPU_TO_LE16(switch_update_params->vlan);
6313296071Sdavidcs	}
6314296071Sdavidcs
6315296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
6316296071Sdavidcs			   &switch_update_params->changes)) {
6317296071Sdavidcs		rdata->sd_vlan_eth_type_change_flg = 1;
6318296071Sdavidcs		rdata->sd_vlan_eth_type =
6319296071Sdavidcs			ECORE_CPU_TO_LE16(switch_update_params->vlan_eth_type);
6320296071Sdavidcs	}
6321296071Sdavidcs
6322296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
6323296071Sdavidcs			   &switch_update_params->changes)) {
6324296071Sdavidcs		rdata->sd_vlan_force_pri_change_flg = 1;
6325296071Sdavidcs		if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
6326296071Sdavidcs				   &switch_update_params->changes))
6327296071Sdavidcs			rdata->sd_vlan_force_pri_flg = 1;
6328296071Sdavidcs		rdata->sd_vlan_force_pri_flg =
6329296071Sdavidcs			switch_update_params->vlan_force_prio;
6330296071Sdavidcs	}
6331296071Sdavidcs
6332296071Sdavidcs	if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
6333296071Sdavidcs			   &switch_update_params->changes)) {
6334296071Sdavidcs		rdata->update_tunn_cfg_flg = 1;
6335296071Sdavidcs		if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
6336296071Sdavidcs				   &switch_update_params->changes))
6337296071Sdavidcs			rdata->inner_clss_l2gre = 1;
6338296071Sdavidcs		if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
6339296071Sdavidcs				   &switch_update_params->changes))
6340296071Sdavidcs			rdata->inner_clss_vxlan = 1;
6341296071Sdavidcs		if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
6342296071Sdavidcs				   &switch_update_params->changes))
6343296071Sdavidcs			rdata->inner_clss_l2geneve = 1;
6344296071Sdavidcs		if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_RSS,
6345296071Sdavidcs				   &switch_update_params->changes))
6346296071Sdavidcs			rdata->inner_rss = 1;
6347296071Sdavidcs
6348296071Sdavidcs		rdata->vxlan_dst_port =
6349296071Sdavidcs			ECORE_CPU_TO_LE16(switch_update_params->vxlan_dst_port);
6350296071Sdavidcs		rdata->geneve_dst_port =
6351296071Sdavidcs			ECORE_CPU_TO_LE16(switch_update_params->geneve_dst_port);
6352296071Sdavidcs	}
6353296071Sdavidcs
6354255736Sdavidch	rdata->echo = SWITCH_UPDATE;
6355255736Sdavidch
6356296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
6357296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
6358296071Sdavidcs	 * and updating of the SPQ producer which involves a memory
6359296071Sdavidcs	 * read. If the memory read is removed we will have to put a
6360296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
6361296071Sdavidcs	 */
6362255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6363255736Sdavidch			     data_mapping, NONE_CONNECTION_TYPE);
6364255736Sdavidch}
6365255736Sdavidch
6366255736Sdavidchstatic inline int ecore_func_send_afex_update(struct bxe_softc *sc,
6367255736Sdavidch					 struct ecore_func_state_params *params)
6368255736Sdavidch{
6369255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6370255736Sdavidch	struct function_update_data *rdata =
6371255736Sdavidch		(struct function_update_data *)o->afex_rdata;
6372255736Sdavidch	ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
6373255736Sdavidch	struct ecore_func_afex_update_params *afex_update_params =
6374255736Sdavidch		&params->params.afex_update;
6375255736Sdavidch
6376255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6377255736Sdavidch
6378255736Sdavidch	/* Fill the ramrod data with provided parameters */
6379255736Sdavidch	rdata->vif_id_change_flg = 1;
6380255736Sdavidch	rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
6381255736Sdavidch	rdata->afex_default_vlan_change_flg = 1;
6382255736Sdavidch	rdata->afex_default_vlan =
6383255736Sdavidch		ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
6384255736Sdavidch	rdata->allowed_priorities_change_flg = 1;
6385255736Sdavidch	rdata->allowed_priorities = afex_update_params->allowed_priorities;
6386255736Sdavidch	rdata->echo = AFEX_UPDATE;
6387255736Sdavidch
6388296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
6389296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
6390255736Sdavidch	 *  and updating of the SPQ producer which involves a memory
6391296071Sdavidcs	 * read. If the memory read is removed we will have to put a
6392296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
6393255736Sdavidch	 */
6394255736Sdavidch	ECORE_MSG(sc,
6395255736Sdavidch		  "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6396255736Sdavidch		  rdata->vif_id,
6397255736Sdavidch		  rdata->afex_default_vlan, rdata->allowed_priorities);
6398255736Sdavidch
6399255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6400255736Sdavidch			     data_mapping, NONE_CONNECTION_TYPE);
6401255736Sdavidch}
6402255736Sdavidch
6403255736Sdavidchstatic
6404255736Sdavidchinline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
6405255736Sdavidch					 struct ecore_func_state_params *params)
6406255736Sdavidch{
6407255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6408255736Sdavidch	struct afex_vif_list_ramrod_data *rdata =
6409255736Sdavidch		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
6410255736Sdavidch	struct ecore_func_afex_viflists_params *afex_vif_params =
6411255736Sdavidch		&params->params.afex_viflists;
6412255736Sdavidch	uint64_t *p_rdata = (uint64_t *)rdata;
6413255736Sdavidch
6414255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6415255736Sdavidch
6416255736Sdavidch	/* Fill the ramrod data with provided parameters */
6417255736Sdavidch	rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
6418255736Sdavidch	rdata->func_bit_map          = afex_vif_params->func_bit_map;
6419255736Sdavidch	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6420255736Sdavidch	rdata->func_to_clear         = afex_vif_params->func_to_clear;
6421255736Sdavidch
6422255736Sdavidch	/* send in echo type of sub command */
6423255736Sdavidch	rdata->echo = afex_vif_params->afex_vif_list_command;
6424255736Sdavidch
6425255736Sdavidch	ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6426255736Sdavidch		  rdata->afex_vif_list_command, rdata->vif_list_index,
6427255736Sdavidch		  rdata->func_bit_map, rdata->func_to_clear);
6428255736Sdavidch
6429296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
6430296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
6431296071Sdavidcs	 * and updating of the SPQ producer which involves a memory
6432296071Sdavidcs	 * read. If the memory read is removed we will have to put a
6433296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
6434296071Sdavidcs	 */
6435296071Sdavidcs
6436255736Sdavidch	/* this ramrod sends data directly and not through DMA mapping */
6437255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6438255736Sdavidch			     *p_rdata, NONE_CONNECTION_TYPE);
6439255736Sdavidch}
6440255736Sdavidch
6441255736Sdavidchstatic inline int ecore_func_send_stop(struct bxe_softc *sc,
6442255736Sdavidch				       struct ecore_func_state_params *params)
6443255736Sdavidch{
6444255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
6445255736Sdavidch			     NONE_CONNECTION_TYPE);
6446255736Sdavidch}
6447255736Sdavidch
6448255736Sdavidchstatic inline int ecore_func_send_tx_stop(struct bxe_softc *sc,
6449255736Sdavidch				       struct ecore_func_state_params *params)
6450255736Sdavidch{
6451255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
6452255736Sdavidch			     NONE_CONNECTION_TYPE);
6453255736Sdavidch}
6454255736Sdavidchstatic inline int ecore_func_send_tx_start(struct bxe_softc *sc,
6455255736Sdavidch				       struct ecore_func_state_params *params)
6456255736Sdavidch{
6457255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6458255736Sdavidch	struct flow_control_configuration *rdata =
6459255736Sdavidch		(struct flow_control_configuration *)o->rdata;
6460255736Sdavidch	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6461255736Sdavidch	struct ecore_func_tx_start_params *tx_start_params =
6462255736Sdavidch		&params->params.tx_start;
6463255736Sdavidch	int i;
6464255736Sdavidch
6465255736Sdavidch	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6466255736Sdavidch
6467255736Sdavidch	rdata->dcb_enabled = tx_start_params->dcb_enabled;
6468255736Sdavidch	rdata->dcb_version = tx_start_params->dcb_version;
6469255736Sdavidch	rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
6470255736Sdavidch
6471255736Sdavidch	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6472255736Sdavidch		rdata->traffic_type_to_priority_cos[i] =
6473255736Sdavidch			tx_start_params->traffic_type_to_priority_cos[i];
6474255736Sdavidch
6475296071Sdavidcs	for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
6476296071Sdavidcs		rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
6477296071Sdavidcs
6478296071Sdavidcs	/* No need for an explicit memory barrier here as long as we
6479296071Sdavidcs	 * ensure the ordering of writing to the SPQ element
6480296071Sdavidcs	 * and updating of the SPQ producer which involves a memory
6481296071Sdavidcs	 * read. If the memory read is removed we will have to put a
6482296071Sdavidcs	 * full memory barrier there (inside ecore_sp_post()).
6483296071Sdavidcs	 */
6484255736Sdavidch	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6485255736Sdavidch			     data_mapping, NONE_CONNECTION_TYPE);
6486255736Sdavidch}
6487255736Sdavidch
6488296071Sdavidcsstatic inline int ecore_func_send_set_timesync(struct bxe_softc *sc,
6489296071Sdavidcs					struct ecore_func_state_params *params)
6490296071Sdavidcs{
6491296071Sdavidcs	struct ecore_func_sp_obj *o = params->f_obj;
6492296071Sdavidcs	struct set_timesync_ramrod_data *rdata =
6493296071Sdavidcs		(struct set_timesync_ramrod_data *)o->rdata;
6494296071Sdavidcs	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6495296071Sdavidcs	struct ecore_func_set_timesync_params *set_timesync_params =
6496296071Sdavidcs		&params->params.set_timesync;
6497296071Sdavidcs
6498296071Sdavidcs	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6499296071Sdavidcs
6500296071Sdavidcs	/* Fill the ramrod data with provided parameters */
6501296071Sdavidcs	rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
6502296071Sdavidcs	rdata->offset_cmd = set_timesync_params->offset_cmd;
6503296071Sdavidcs	rdata->add_sub_drift_adjust_value =
6504296071Sdavidcs		set_timesync_params->add_sub_drift_adjust_value;
6505296071Sdavidcs	rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
6506296071Sdavidcs	rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
6507296071Sdavidcs	rdata->offset_delta.lo =
6508296071Sdavidcs		ECORE_CPU_TO_LE32(U64_LO(set_timesync_params->offset_delta));
6509296071Sdavidcs	rdata->offset_delta.hi =
6510296071Sdavidcs		ECORE_CPU_TO_LE32(U64_HI(set_timesync_params->offset_delta));
6511296071Sdavidcs
6512296071Sdavidcs	ECORE_MSG(sc, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
6513296071Sdavidcs	   rdata->drift_adjust_cmd, rdata->offset_cmd,
6514296071Sdavidcs	   rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
6515296071Sdavidcs	   rdata->drift_adjust_period, rdata->offset_delta.lo,
6516296071Sdavidcs	   rdata->offset_delta.hi);
6517296071Sdavidcs
6518296071Sdavidcs	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
6519296071Sdavidcs			     data_mapping, NONE_CONNECTION_TYPE);
6520296071Sdavidcs}
6521296071Sdavidcs
6522255736Sdavidchstatic int ecore_func_send_cmd(struct bxe_softc *sc,
6523255736Sdavidch			       struct ecore_func_state_params *params)
6524255736Sdavidch{
6525255736Sdavidch	switch (params->cmd) {
6526255736Sdavidch	case ECORE_F_CMD_HW_INIT:
6527255736Sdavidch		return ecore_func_hw_init(sc, params);
6528255736Sdavidch	case ECORE_F_CMD_START:
6529255736Sdavidch		return ecore_func_send_start(sc, params);
6530255736Sdavidch	case ECORE_F_CMD_STOP:
6531255736Sdavidch		return ecore_func_send_stop(sc, params);
6532255736Sdavidch	case ECORE_F_CMD_HW_RESET:
6533255736Sdavidch		return ecore_func_hw_reset(sc, params);
6534255736Sdavidch	case ECORE_F_CMD_AFEX_UPDATE:
6535255736Sdavidch		return ecore_func_send_afex_update(sc, params);
6536255736Sdavidch	case ECORE_F_CMD_AFEX_VIFLISTS:
6537255736Sdavidch		return ecore_func_send_afex_viflists(sc, params);
6538255736Sdavidch	case ECORE_F_CMD_TX_STOP:
6539255736Sdavidch		return ecore_func_send_tx_stop(sc, params);
6540255736Sdavidch	case ECORE_F_CMD_TX_START:
6541255736Sdavidch		return ecore_func_send_tx_start(sc, params);
6542255736Sdavidch	case ECORE_F_CMD_SWITCH_UPDATE:
6543255736Sdavidch		return ecore_func_send_switch_update(sc, params);
6544296071Sdavidcs	case ECORE_F_CMD_SET_TIMESYNC:
6545296071Sdavidcs		return ecore_func_send_set_timesync(sc, params);
6546255736Sdavidch	default:
6547255736Sdavidch		ECORE_ERR("Unknown command: %d\n", params->cmd);
6548255736Sdavidch		return ECORE_INVAL;
6549255736Sdavidch	}
6550255736Sdavidch}
6551255736Sdavidch
6552255736Sdavidchvoid ecore_init_func_obj(struct bxe_softc *sc,
6553255736Sdavidch			 struct ecore_func_sp_obj *obj,
6554255736Sdavidch			 void *rdata, ecore_dma_addr_t rdata_mapping,
6555255736Sdavidch			 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
6556255736Sdavidch			 struct ecore_func_sp_drv_ops *drv_iface)
6557255736Sdavidch{
6558255736Sdavidch	ECORE_MEMSET(obj, 0, sizeof(*obj));
6559255736Sdavidch
6560255736Sdavidch	ECORE_MUTEX_INIT(&obj->one_pending_mutex);
6561255736Sdavidch
6562255736Sdavidch	obj->rdata = rdata;
6563255736Sdavidch	obj->rdata_mapping = rdata_mapping;
6564255736Sdavidch	obj->afex_rdata = afex_rdata;
6565255736Sdavidch	obj->afex_rdata_mapping = afex_rdata_mapping;
6566255736Sdavidch	obj->send_cmd = ecore_func_send_cmd;
6567255736Sdavidch	obj->check_transition = ecore_func_chk_transition;
6568255736Sdavidch	obj->complete_cmd = ecore_func_comp_cmd;
6569255736Sdavidch	obj->wait_comp = ecore_func_wait_comp;
6570255736Sdavidch	obj->drv = drv_iface;
6571255736Sdavidch}
6572255736Sdavidch
6573255736Sdavidch/**
6574255736Sdavidch * ecore_func_state_change - perform Function state change transition
6575255736Sdavidch *
6576255736Sdavidch * @sc:		device handle
6577255736Sdavidch * @params:	parameters to perform the transaction
6578255736Sdavidch *
6579255736Sdavidch * returns 0 in case of successfully completed transition,
6580255736Sdavidch *         negative error code in case of failure, positive
6581255736Sdavidch *         (EBUSY) value if there is a completion to that is
6582255736Sdavidch *         still pending (possible only if RAMROD_COMP_WAIT is
6583255736Sdavidch *         not set in params->ramrod_flags for asynchronous
6584255736Sdavidch *         commands).
6585255736Sdavidch */
6586255736Sdavidchint ecore_func_state_change(struct bxe_softc *sc,
6587255736Sdavidch			    struct ecore_func_state_params *params)
6588255736Sdavidch{
6589255736Sdavidch	struct ecore_func_sp_obj *o = params->f_obj;
6590255736Sdavidch	int rc, cnt = 300;
6591255736Sdavidch	enum ecore_func_cmd cmd = params->cmd;
6592255736Sdavidch	unsigned long *pending = &o->pending;
6593255736Sdavidch
6594255736Sdavidch	ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6595255736Sdavidch
6596255736Sdavidch	/* Check that the requested transition is legal */
6597255736Sdavidch	rc = o->check_transition(sc, o, params);
6598255736Sdavidch	if ((rc == ECORE_BUSY) &&
6599255736Sdavidch	    (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
6600255736Sdavidch		while ((rc == ECORE_BUSY) && (--cnt > 0)) {
6601255736Sdavidch			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6602255736Sdavidch			ECORE_MSLEEP(10);
6603255736Sdavidch			ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6604255736Sdavidch			rc = o->check_transition(sc, o, params);
6605255736Sdavidch		}
6606255736Sdavidch		if (rc == ECORE_BUSY) {
6607255736Sdavidch			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6608255736Sdavidch			ECORE_ERR("timeout waiting for previous ramrod completion\n");
6609255736Sdavidch			return rc;
6610255736Sdavidch		}
6611255736Sdavidch	} else if (rc) {
6612255736Sdavidch		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6613255736Sdavidch		return rc;
6614255736Sdavidch	}
6615255736Sdavidch
6616255736Sdavidch	/* Set "pending" bit */
6617255736Sdavidch	ECORE_SET_BIT(cmd, pending);
6618255736Sdavidch
6619255736Sdavidch	/* Don't send a command if only driver cleanup was requested */
6620255736Sdavidch	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6621255736Sdavidch		ecore_func_state_change_comp(sc, o, cmd);
6622255736Sdavidch		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6623255736Sdavidch	} else {
6624255736Sdavidch		/* Send a ramrod */
6625255736Sdavidch		rc = o->send_cmd(sc, params);
6626255736Sdavidch
6627255736Sdavidch		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6628255736Sdavidch
6629255736Sdavidch		if (rc) {
6630255736Sdavidch			o->next_state = ECORE_F_STATE_MAX;
6631255736Sdavidch			ECORE_CLEAR_BIT(cmd, pending);
6632255736Sdavidch			ECORE_SMP_MB_AFTER_CLEAR_BIT();
6633255736Sdavidch			return rc;
6634255736Sdavidch		}
6635255736Sdavidch
6636255736Sdavidch		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6637255736Sdavidch			rc = o->wait_comp(sc, o, cmd);
6638255736Sdavidch			if (rc)
6639255736Sdavidch				return rc;
6640255736Sdavidch
6641255736Sdavidch			return ECORE_SUCCESS;
6642255736Sdavidch		}
6643255736Sdavidch	}
6644255736Sdavidch
6645255736Sdavidch	return ECORE_RET_PENDING(cmd, pending);
6646255736Sdavidch}
6647