1/*-
2 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
15 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
18 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
20 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
24 * THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30#include "bxe.h"
31#include "ecore_init.h"
32
33/**** Exe Queue interfaces ****/
34
35/**
36 * ecore_exe_queue_init - init the Exe Queue object
37 *
38 * @o:		pointer to the object
39 * @exe_len:	length
40 * @owner:	pointer to the owner
41 * @validate:	validate function pointer
42 * @optimize:	optimize function pointer
43 * @exec:	execute function pointer
44 * @get:	get function pointer
45 */
46static inline void ecore_exe_queue_init(struct bxe_softc *sc,
47					struct ecore_exe_queue_obj *o,
48					int exe_len,
49					union ecore_qable_obj *owner,
50					exe_q_validate validate,
51					exe_q_remove remove,
52					exe_q_optimize optimize,
53					exe_q_execute exec,
54					exe_q_get get)
55{
56	ECORE_MEMSET(o, 0, sizeof(*o));
57
58	ECORE_LIST_INIT(&o->exe_queue);
59	ECORE_LIST_INIT(&o->pending_comp);
60
61	ECORE_SPIN_LOCK_INIT(&o->lock, sc);
62
63	o->exe_chunk_len = exe_len;
64	o->owner         = owner;
65
66	/* Owner specific callbacks */
67	o->validate      = validate;
68	o->remove        = remove;
69	o->optimize      = optimize;
70	o->execute       = exec;
71	o->get           = get;
72
73	ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
74		  exe_len);
75}
76
77static inline void ecore_exe_queue_free_elem(struct bxe_softc *sc,
78					     struct ecore_exeq_elem *elem)
79{
80	ECORE_MSG(sc, "Deleting an exe_queue element\n");
81	ECORE_FREE(sc, elem, sizeof(*elem));
82}
83
84static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
85{
86	struct ecore_exeq_elem *elem;
87	int cnt = 0;
88
89	ECORE_SPIN_LOCK_BH(&o->lock);
90
91	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
92				  struct ecore_exeq_elem)
93		cnt++;
94
95	ECORE_SPIN_UNLOCK_BH(&o->lock);
96
97	return cnt;
98}
99
100/**
101 * ecore_exe_queue_add - add a new element to the execution queue
102 *
103 * @sc:		driver handle
104 * @o:		queue
105 * @cmd:	new command to add
106 * @restore:	true - do not optimize the command
107 *
108 * If the element is optimized or is illegal, frees it.
109 */
110static inline int ecore_exe_queue_add(struct bxe_softc *sc,
111				      struct ecore_exe_queue_obj *o,
112				      struct ecore_exeq_elem *elem,
113				      bool restore)
114{
115	int rc;
116
117	ECORE_SPIN_LOCK_BH(&o->lock);
118
119	if (!restore) {
120		/* Try to cancel this element queue */
121		rc = o->optimize(sc, o->owner, elem);
122		if (rc)
123			goto free_and_exit;
124
125		/* Check if this request is ok */
126		rc = o->validate(sc, o->owner, elem);
127		if (rc) {
128			ECORE_MSG(sc, "Preamble failed: %d\n", rc);
129			goto free_and_exit;
130		}
131	}
132
133	/* If so, add it to the execution queue */
134	ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
135
136	ECORE_SPIN_UNLOCK_BH(&o->lock);
137
138	return ECORE_SUCCESS;
139
140free_and_exit:
141	ecore_exe_queue_free_elem(sc, elem);
142
143	ECORE_SPIN_UNLOCK_BH(&o->lock);
144
145	return rc;
146}
147
148static inline void __ecore_exe_queue_reset_pending(
149	struct bxe_softc *sc,
150	struct ecore_exe_queue_obj *o)
151{
152	struct ecore_exeq_elem *elem;
153
154	while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
155		elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
156					      struct ecore_exeq_elem,
157					      link);
158
159		ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
160		ecore_exe_queue_free_elem(sc, elem);
161	}
162}
163
164/**
165 * ecore_exe_queue_step - execute one execution chunk atomically
166 *
167 * @sc:			driver handle
168 * @o:			queue
169 * @ramrod_flags:	flags
170 *
171 * (Should be called while holding the exe_queue->lock).
172 */
173static inline int ecore_exe_queue_step(struct bxe_softc *sc,
174				       struct ecore_exe_queue_obj *o,
175				       unsigned long *ramrod_flags)
176{
177	struct ecore_exeq_elem *elem, spacer;
178	int cur_len = 0, rc;
179
180	ECORE_MEMSET(&spacer, 0, sizeof(spacer));
181
182	/* Next step should not be performed until the current is finished,
183	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
184	 * properly clear object internals without sending any command to the FW
185	 * which also implies there won't be any completion to clear the
186	 * 'pending' list.
187	 */
188	if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
189		if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
190			ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
191			__ecore_exe_queue_reset_pending(sc, o);
192		} else {
193			return ECORE_PENDING;
194		}
195	}
196
197	/* Run through the pending commands list and create a next
198	 * execution chunk.
199	 */
200	while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
201		elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
202					      struct ecore_exeq_elem,
203					      link);
204		ECORE_DBG_BREAK_IF(!elem->cmd_len);
205
206		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
207			cur_len += elem->cmd_len;
208			/* Prevent from both lists being empty when moving an
209			 * element. This will allow the call of
210			 * ecore_exe_queue_empty() without locking.
211			 */
212			ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
213			mb();
214			ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
215			ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
216			ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
217		} else
218			break;
219	}
220
221	/* Sanity check */
222	if (!cur_len)
223		return ECORE_SUCCESS;
224
225	rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
226	if (rc < 0)
227		/* In case of an error return the commands back to the queue
228		 *  and reset the pending_comp.
229		 */
230		ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
231	else if (!rc)
232		/* If zero is returned, means there are no outstanding pending
233		 * completions and we may dismiss the pending list.
234		 */
235		__ecore_exe_queue_reset_pending(sc, o);
236
237	return rc;
238}
239
240static inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
241{
242	bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
243
244	/* Don't reorder!!! */
245	mb();
246
247	return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
248}
249
250static inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
251	struct bxe_softc *sc)
252{
253	ECORE_MSG(sc, "Allocating a new exe_queue element\n");
254	return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
255			    sc);
256}
257
258/************************ raw_obj functions ***********************************/
259static bool ecore_raw_check_pending(struct ecore_raw_obj *o)
260{
261	/*
262     * !! converts the value returned by ECORE_TEST_BIT such that it
263     * is guaranteed not to be truncated regardless of bool definition.
264	 *
265	 * Note we cannot simply define the function's return value type
266     * to match the type returned by ECORE_TEST_BIT, as it varies by
267     * platform/implementation.
268	 */
269
270	return !!ECORE_TEST_BIT(o->state, o->pstate);
271}
272
273static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
274{
275	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
276	ECORE_CLEAR_BIT(o->state, o->pstate);
277	ECORE_SMP_MB_AFTER_CLEAR_BIT();
278}
279
280static void ecore_raw_set_pending(struct ecore_raw_obj *o)
281{
282	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
283	ECORE_SET_BIT(o->state, o->pstate);
284	ECORE_SMP_MB_AFTER_CLEAR_BIT();
285}
286
287/**
288 * ecore_state_wait - wait until the given bit(state) is cleared
289 *
290 * @sc:		device handle
291 * @state:	state which is to be cleared
292 * @state_p:	state buffer
293 *
294 */
295static inline int ecore_state_wait(struct bxe_softc *sc, int state,
296				   unsigned long *pstate)
297{
298	/* can take a while if any port is running */
299	int cnt = 5000;
300
301
302	if (CHIP_REV_IS_EMUL(sc))
303		cnt *= 20;
304
305	ECORE_MSG(sc, "waiting for state to become %d\n", state);
306
307	ECORE_MIGHT_SLEEP();
308	while (cnt--) {
309		if (!ECORE_TEST_BIT(state, pstate)) {
310#ifdef ECORE_STOP_ON_ERROR
311			ECORE_MSG(sc, "exit  (cnt %d)\n", 5000 - cnt);
312#endif
313			return ECORE_SUCCESS;
314		}
315
316		ECORE_WAIT(sc, delay_us);
317
318		if (sc->panic)
319			return ECORE_IO;
320	}
321
322	/* timeout! */
323	ECORE_ERR("timeout waiting for state %d\n", state);
324#ifdef ECORE_STOP_ON_ERROR
325	ecore_panic();
326#endif
327
328	return ECORE_TIMEOUT;
329}
330
331static int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw)
332{
333	return ecore_state_wait(sc, raw->state, raw->pstate);
334}
335
336/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
337/* credit handling callbacks */
338static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
339{
340	struct ecore_credit_pool_obj *mp = o->macs_pool;
341
342	ECORE_DBG_BREAK_IF(!mp);
343
344	return mp->get_entry(mp, offset);
345}
346
347static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
348{
349	struct ecore_credit_pool_obj *mp = o->macs_pool;
350
351	ECORE_DBG_BREAK_IF(!mp);
352
353	return mp->get(mp, 1);
354}
355
356static bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
357{
358	struct ecore_credit_pool_obj *vp = o->vlans_pool;
359
360	ECORE_DBG_BREAK_IF(!vp);
361
362	return vp->get_entry(vp, offset);
363}
364
365static bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
366{
367	struct ecore_credit_pool_obj *vp = o->vlans_pool;
368
369	ECORE_DBG_BREAK_IF(!vp);
370
371	return vp->get(vp, 1);
372}
373
374static bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
375{
376	struct ecore_credit_pool_obj *mp = o->macs_pool;
377	struct ecore_credit_pool_obj *vp = o->vlans_pool;
378
379	if (!mp->get(mp, 1))
380		return FALSE;
381
382	if (!vp->get(vp, 1)) {
383		mp->put(mp, 1);
384		return FALSE;
385	}
386
387	return TRUE;
388}
389
390static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
391{
392	struct ecore_credit_pool_obj *mp = o->macs_pool;
393
394	return mp->put_entry(mp, offset);
395}
396
397static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
398{
399	struct ecore_credit_pool_obj *mp = o->macs_pool;
400
401	return mp->put(mp, 1);
402}
403
404static bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
405{
406	struct ecore_credit_pool_obj *vp = o->vlans_pool;
407
408	return vp->put_entry(vp, offset);
409}
410
411static bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
412{
413	struct ecore_credit_pool_obj *vp = o->vlans_pool;
414
415	return vp->put(vp, 1);
416}
417
418static bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
419{
420	struct ecore_credit_pool_obj *mp = o->macs_pool;
421	struct ecore_credit_pool_obj *vp = o->vlans_pool;
422
423	if (!mp->put(mp, 1))
424		return FALSE;
425
426	if (!vp->put(vp, 1)) {
427		mp->get(mp, 1);
428		return FALSE;
429	}
430
431	return TRUE;
432}
433
434/**
435 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
436 * head list.
437 *
438 * @sc:		device handle
439 * @o:		vlan_mac object
440 *
441 * @details: Non-blocking implementation; should be called under execution
442 *           queue lock.
443 */
444static int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc,
445					    struct ecore_vlan_mac_obj *o)
446{
447	if (o->head_reader) {
448		ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
449		return ECORE_BUSY;
450	}
451
452	ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
453	return ECORE_SUCCESS;
454}
455
456/**
457 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
458 * which wasn't able to run due to a taken lock on vlan mac head list.
459 *
460 * @sc:		device handle
461 * @o:		vlan_mac object
462 *
463 * @details Should be called under execution queue lock; notice it might release
464 *          and reclaim it during its run.
465 */
466static void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc,
467					    struct ecore_vlan_mac_obj *o)
468{
469	int rc;
470	unsigned long ramrod_flags = o->saved_ramrod_flags;
471
472	ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
473		  ramrod_flags);
474	o->head_exe_request = FALSE;
475	o->saved_ramrod_flags = 0;
476	rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
477	if (rc != ECORE_SUCCESS) {
478		ECORE_ERR("execution of pending commands failed with rc %d\n",
479			  rc);
480#ifdef ECORE_STOP_ON_ERROR
481		ecore_panic();
482#endif
483	}
484}
485
486/**
487 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
488 * called due to vlan mac head list lock being taken.
489 *
490 * @sc:			device handle
491 * @o:			vlan_mac object
492 * @ramrod_flags:	ramrod flags of missed execution
493 *
494 * @details Should be called under execution queue lock.
495 */
496static void __ecore_vlan_mac_h_pend(struct bxe_softc *sc,
497				    struct ecore_vlan_mac_obj *o,
498				    unsigned long ramrod_flags)
499{
500	o->head_exe_request = TRUE;
501	o->saved_ramrod_flags = ramrod_flags;
502	ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
503		  ramrod_flags);
504}
505
506/**
507 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
508 *
509 * @sc:			device handle
510 * @o:			vlan_mac object
511 *
512 * @details Should be called under execution queue lock. Notice if a pending
513 *          execution exists, it would perform it - possibly releasing and
514 *          reclaiming the execution queue lock.
515 */
516static void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
517					    struct ecore_vlan_mac_obj *o)
518{
519	/* It's possible a new pending execution was added since this writer
520	 * executed. If so, execute again. [Ad infinitum]
521	 */
522	while(o->head_exe_request) {
523		ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
524		__ecore_vlan_mac_h_exec_pending(sc, o);
525	}
526}
527
528/**
529 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
530 *
531 * @sc:			device handle
532 * @o:			vlan_mac object
533 *
534 * @details Notice if a pending execution exists, it would perform it -
535 *          possibly releasing and reclaiming the execution queue lock.
536 */
537void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
538				   struct ecore_vlan_mac_obj *o)
539{
540	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
541	__ecore_vlan_mac_h_write_unlock(sc, o);
542	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
543}
544
545/**
546 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
547 *
548 * @sc:			device handle
549 * @o:			vlan_mac object
550 *
551 * @details Should be called under the execution queue lock. May sleep. May
552 *          release and reclaim execution queue lock during its run.
553 */
554static int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
555					struct ecore_vlan_mac_obj *o)
556{
557	/* If we got here, we're holding lock --> no WRITER exists */
558	o->head_reader++;
559	ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
560		  o->head_reader);
561
562	return ECORE_SUCCESS;
563}
564
565/**
566 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
567 *
568 * @sc:			device handle
569 * @o:			vlan_mac object
570 *
571 * @details May sleep. Claims and releases execution queue lock during its run.
572 */
573int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
574			       struct ecore_vlan_mac_obj *o)
575{
576	int rc;
577
578	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
579	rc = __ecore_vlan_mac_h_read_lock(sc, o);
580	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
581
582	return rc;
583}
584
585/**
586 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
587 *
588 * @sc:			device handle
589 * @o:			vlan_mac object
590 *
591 * @details Should be called under execution queue lock. Notice if a pending
592 *          execution exists, it would be performed if this was the last
593 *          reader. possibly releasing and reclaiming the execution queue lock.
594 */
595static void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
596					  struct ecore_vlan_mac_obj *o)
597{
598	if (!o->head_reader) {
599		ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
600#ifdef ECORE_STOP_ON_ERROR
601		ecore_panic();
602#endif
603	} else {
604		o->head_reader--;
605		ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
606			  o->head_reader);
607	}
608
609	/* It's possible a new pending execution was added, and that this reader
610	 * was last - if so we need to execute the command.
611	 */
612	if (!o->head_reader && o->head_exe_request) {
613		ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
614
615		/* Writer release will do the trick */
616		__ecore_vlan_mac_h_write_unlock(sc, o);
617	}
618}
619
620/**
621 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
622 *
623 * @sc:			device handle
624 * @o:			vlan_mac object
625 *
626 * @details Notice if a pending execution exists, it would be performed if this
627 *          was the last reader. Claims and releases the execution queue lock
628 *          during its run.
629 */
630void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
631				  struct ecore_vlan_mac_obj *o)
632{
633	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
634	__ecore_vlan_mac_h_read_unlock(sc, o);
635	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
636}
637
638/**
639 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
640 *
641 * @sc:			device handle
642 * @o:			vlan_mac object
643 * @n:			number of elements to get
644 * @base:		base address for element placement
645 * @stride:		stride between elements (in bytes)
646 */
647static int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
648				 int n, uint8_t *base, uint8_t stride, uint8_t size)
649{
650	struct ecore_vlan_mac_registry_elem *pos;
651	uint8_t *next = base;
652	int counter = 0;
653	int read_lock;
654
655	ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
656	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
657	if (read_lock != ECORE_SUCCESS)
658		ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
659
660	/* traverse list */
661	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
662				  struct ecore_vlan_mac_registry_elem) {
663		if (counter < n) {
664			ECORE_MEMCPY(next, &pos->u, size);
665			counter++;
666			ECORE_MSG(sc, "copied element number %d to address %p element was:\n",
667				  counter, next);
668			next += stride + size;
669		}
670	}
671
672	if (read_lock == ECORE_SUCCESS) {
673		ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
674		ecore_vlan_mac_h_read_unlock(sc, o);
675	}
676
677	return counter * ETH_ALEN;
678}
679
680/* check_add() callbacks */
681static int ecore_check_mac_add(struct bxe_softc *sc,
682			       struct ecore_vlan_mac_obj *o,
683			       union ecore_classification_ramrod_data *data)
684{
685	struct ecore_vlan_mac_registry_elem *pos;
686
687	ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
688
689	if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
690		return ECORE_INVAL;
691
692	/* Check if a requested MAC already exists */
693	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
694				  struct ecore_vlan_mac_registry_elem)
695		if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
696		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
697			return ECORE_EXISTS;
698
699	return ECORE_SUCCESS;
700}
701
702static int ecore_check_vlan_add(struct bxe_softc *sc,
703				struct ecore_vlan_mac_obj *o,
704				union ecore_classification_ramrod_data *data)
705{
706	struct ecore_vlan_mac_registry_elem *pos;
707
708	ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
709
710	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
711				  struct ecore_vlan_mac_registry_elem)
712		if (data->vlan.vlan == pos->u.vlan.vlan)
713			return ECORE_EXISTS;
714
715	return ECORE_SUCCESS;
716}
717
718static int ecore_check_vlan_mac_add(struct bxe_softc *sc,
719				    struct ecore_vlan_mac_obj *o,
720				   union ecore_classification_ramrod_data *data)
721{
722	struct ecore_vlan_mac_registry_elem *pos;
723
724	ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
725		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
726
727	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
728				  struct ecore_vlan_mac_registry_elem)
729		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
730		    (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
731				  ETH_ALEN)) &&
732		    (data->vlan_mac.is_inner_mac ==
733		     pos->u.vlan_mac.is_inner_mac))
734			return ECORE_EXISTS;
735
736	return ECORE_SUCCESS;
737}
738
739/* check_del() callbacks */
740static struct ecore_vlan_mac_registry_elem *
741	ecore_check_mac_del(struct bxe_softc *sc,
742			    struct ecore_vlan_mac_obj *o,
743			    union ecore_classification_ramrod_data *data)
744{
745	struct ecore_vlan_mac_registry_elem *pos;
746
747	ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
748
749	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
750				  struct ecore_vlan_mac_registry_elem)
751		if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
752		    (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
753			return pos;
754
755	return NULL;
756}
757
758static struct ecore_vlan_mac_registry_elem *
759	ecore_check_vlan_del(struct bxe_softc *sc,
760			     struct ecore_vlan_mac_obj *o,
761			     union ecore_classification_ramrod_data *data)
762{
763	struct ecore_vlan_mac_registry_elem *pos;
764
765	ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
766
767	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
768				  struct ecore_vlan_mac_registry_elem)
769		if (data->vlan.vlan == pos->u.vlan.vlan)
770			return pos;
771
772	return NULL;
773}
774
775static struct ecore_vlan_mac_registry_elem *
776	ecore_check_vlan_mac_del(struct bxe_softc *sc,
777				 struct ecore_vlan_mac_obj *o,
778				 union ecore_classification_ramrod_data *data)
779{
780	struct ecore_vlan_mac_registry_elem *pos;
781
782	ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
783		  data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
784
785	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
786				  struct ecore_vlan_mac_registry_elem)
787		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
788		    (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
789			     ETH_ALEN)) &&
790		    (data->vlan_mac.is_inner_mac ==
791		     pos->u.vlan_mac.is_inner_mac))
792			return pos;
793
794	return NULL;
795}
796
797/* check_move() callback */
798static bool ecore_check_move(struct bxe_softc *sc,
799			     struct ecore_vlan_mac_obj *src_o,
800			     struct ecore_vlan_mac_obj *dst_o,
801			     union ecore_classification_ramrod_data *data)
802{
803	struct ecore_vlan_mac_registry_elem *pos;
804	int rc;
805
806	/* Check if we can delete the requested configuration from the first
807	 * object.
808	 */
809	pos = src_o->check_del(sc, src_o, data);
810
811	/*  check if configuration can be added */
812	rc = dst_o->check_add(sc, dst_o, data);
813
814	/* If this classification can not be added (is already set)
815	 * or can't be deleted - return an error.
816	 */
817	if (rc || !pos)
818		return FALSE;
819
820	return TRUE;
821}
822
823static bool ecore_check_move_always_err(
824	struct bxe_softc *sc,
825	struct ecore_vlan_mac_obj *src_o,
826	struct ecore_vlan_mac_obj *dst_o,
827	union ecore_classification_ramrod_data *data)
828{
829	return FALSE;
830}
831
832static inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
833{
834	struct ecore_raw_obj *raw = &o->raw;
835	uint8_t rx_tx_flag = 0;
836
837	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
838	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
839		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
840
841	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
842	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
843		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
844
845	return rx_tx_flag;
846}
847
848void ecore_set_mac_in_nig(struct bxe_softc *sc,
849			  bool add, unsigned char *dev_addr, int index)
850{
851	uint32_t wb_data[2];
852	uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
853			 NIG_REG_LLH0_FUNC_MEM;
854
855	if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
856		return;
857
858	if (index > ECORE_LLH_CAM_MAX_PF_LINE)
859		return;
860
861	ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
862		  (add ? "ADD" : "DELETE"), index);
863
864	if (add) {
865		/* LLH_FUNC_MEM is a uint64_t WB register */
866		reg_offset += 8*index;
867
868		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
869			      (dev_addr[4] <<  8) |  dev_addr[5]);
870		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
871
872		ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
873	}
874
875	REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
876				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
877}
878
879/**
880 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
881 *
882 * @sc:		device handle
883 * @o:		queue for which we want to configure this rule
884 * @add:	if TRUE the command is an ADD command, DEL otherwise
885 * @opcode:	CLASSIFY_RULE_OPCODE_XXX
886 * @hdr:	pointer to a header to setup
887 *
888 */
889static inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc,
890	struct ecore_vlan_mac_obj *o, bool add, int opcode,
891	struct eth_classify_cmd_header *hdr)
892{
893	struct ecore_raw_obj *raw = &o->raw;
894
895	hdr->client_id = raw->cl_id;
896	hdr->func_id = raw->func_id;
897
898	/* Rx or/and Tx (internal switching) configuration ? */
899	hdr->cmd_general_data |=
900		ecore_vlan_mac_get_rx_tx_flag(o);
901
902	if (add)
903		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
904
905	hdr->cmd_general_data |=
906		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
907}
908
909/**
910 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
911 *
912 * @cid:	connection id
913 * @type:	ECORE_FILTER_XXX_PENDING
914 * @hdr:	pointer to header to setup
915 * @rule_cnt:
916 *
917 * currently we always configure one rule and echo field to contain a CID and an
918 * opcode type.
919 */
920static inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
921				struct eth_classify_header *hdr, int rule_cnt)
922{
923	hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
924				(type << ECORE_SWCID_SHIFT));
925	hdr->rule_cnt = (uint8_t)rule_cnt;
926}
927
928/* hw_config() callbacks */
929static void ecore_set_one_mac_e2(struct bxe_softc *sc,
930				 struct ecore_vlan_mac_obj *o,
931				 struct ecore_exeq_elem *elem, int rule_idx,
932				 int cam_offset)
933{
934	struct ecore_raw_obj *raw = &o->raw;
935	struct eth_classify_rules_ramrod_data *data =
936		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
937	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
938	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
939	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
940	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
941	uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
942
943	/* Set LLH CAM entry: currently only iSCSI and ETH macs are
944	 * relevant. In addition, current implementation is tuned for a
945	 * single ETH MAC.
946	 *
947	 * When multiple unicast ETH MACs PF configuration in switch
948	 * independent mode is required (NetQ, multiple netdev MACs,
949	 * etc.), consider better utilisation of 8 per function MAC
950	 * entries in the LLH register. There is also
951	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
952	 * total number of CAM entries to 16.
953	 *
954	 * Currently we won't configure NIG for MACs other than a primary ETH
955	 * MAC and iSCSI L2 MAC.
956	 *
957	 * If this MAC is moving from one Queue to another, no need to change
958	 * NIG configuration.
959	 */
960	if (cmd != ECORE_VLAN_MAC_MOVE) {
961		if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
962			ecore_set_mac_in_nig(sc, add, mac,
963					     ECORE_LLH_CAM_ISCSI_ETH_LINE);
964		else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
965			ecore_set_mac_in_nig(sc, add, mac,
966					     ECORE_LLH_CAM_ETH_LINE);
967	}
968
969	/* Reset the ramrod data buffer for the first rule */
970	if (rule_idx == 0)
971		ECORE_MEMSET(data, 0, sizeof(*data));
972
973	/* Setup a command header */
974	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
975				      &rule_entry->mac.header);
976
977	ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
978		  (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
979
980	/* Set a MAC itself */
981	ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
982			      &rule_entry->mac.mac_mid,
983			      &rule_entry->mac.mac_lsb, mac);
984	rule_entry->mac.inner_mac =
985		elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
986
987	/* MOVE: Add a rule that will add this MAC to the target Queue */
988	if (cmd == ECORE_VLAN_MAC_MOVE) {
989		rule_entry++;
990		rule_cnt++;
991
992		/* Setup ramrod data */
993		ecore_vlan_mac_set_cmd_hdr_e2(sc,
994					elem->cmd_data.vlan_mac.target_obj,
995					      TRUE, CLASSIFY_RULE_OPCODE_MAC,
996					      &rule_entry->mac.header);
997
998		/* Set a MAC itself */
999		ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1000				      &rule_entry->mac.mac_mid,
1001				      &rule_entry->mac.mac_lsb, mac);
1002		rule_entry->mac.inner_mac =
1003			elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
1004	}
1005
1006	/* Set the ramrod data header */
1007	/* TODO: take this to the higher level in order to prevent multiple
1008		 writing */
1009	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1010					rule_cnt);
1011}
1012
1013/**
1014 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1015 *
1016 * @sc:		device handle
1017 * @o:		queue
1018 * @type:
1019 * @cam_offset:	offset in cam memory
1020 * @hdr:	pointer to a header to setup
1021 *
1022 * E1/E1H
1023 */
1024static inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc,
1025	struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1026	struct mac_configuration_hdr *hdr)
1027{
1028	struct ecore_raw_obj *r = &o->raw;
1029
1030	hdr->length = 1;
1031	hdr->offset = (uint8_t)cam_offset;
1032	hdr->client_id = ECORE_CPU_TO_LE16(0xff);
1033	hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
1034				(type << ECORE_SWCID_SHIFT));
1035}
1036
1037static inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc,
1038	struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
1039	uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
1040{
1041	struct ecore_raw_obj *r = &o->raw;
1042	uint32_t cl_bit_vec = (1 << r->cl_id);
1043
1044	cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
1045	cfg_entry->pf_id = r->func_id;
1046	cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
1047
1048	if (add) {
1049		ECORE_SET_FLAG(cfg_entry->flags,
1050			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1051			       T_ETH_MAC_COMMAND_SET);
1052		ECORE_SET_FLAG(cfg_entry->flags,
1053			       MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1054			       opcode);
1055
1056		/* Set a MAC in a ramrod data */
1057		ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1058				      &cfg_entry->middle_mac_addr,
1059				      &cfg_entry->lsb_mac_addr, mac);
1060	} else
1061		ECORE_SET_FLAG(cfg_entry->flags,
1062			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1063			       T_ETH_MAC_COMMAND_INVALIDATE);
1064}
1065
1066static inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc,
1067	struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
1068	uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
1069{
1070	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1071	struct ecore_raw_obj *raw = &o->raw;
1072
1073	ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
1074					 &config->hdr);
1075	ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
1076					 cfg_entry);
1077
1078	ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1079		  (add ? "setting" : "clearing"),
1080		  mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1081}
1082
1083/**
1084 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1085 *
1086 * @sc:		device handle
1087 * @o:		ecore_vlan_mac_obj
1088 * @elem:	ecore_exeq_elem
1089 * @rule_idx:	rule_idx
1090 * @cam_offset: cam_offset
1091 */
1092static void ecore_set_one_mac_e1x(struct bxe_softc *sc,
1093				  struct ecore_vlan_mac_obj *o,
1094				  struct ecore_exeq_elem *elem, int rule_idx,
1095				  int cam_offset)
1096{
1097	struct ecore_raw_obj *raw = &o->raw;
1098	struct mac_configuration_cmd *config =
1099		(struct mac_configuration_cmd *)(raw->rdata);
1100	/* 57710 and 57711 do not support MOVE command,
1101	 * so it's either ADD or DEL
1102	 */
1103	bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1104		TRUE : FALSE;
1105
1106	/* Reset the ramrod data buffer */
1107	ECORE_MEMSET(config, 0, sizeof(*config));
1108
1109	ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1110				     cam_offset, add,
1111				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
1112				     ETH_VLAN_FILTER_ANY_VLAN, config);
1113}
1114
1115static void ecore_set_one_vlan_e2(struct bxe_softc *sc,
1116				  struct ecore_vlan_mac_obj *o,
1117				  struct ecore_exeq_elem *elem, int rule_idx,
1118				  int cam_offset)
1119{
1120	struct ecore_raw_obj *raw = &o->raw;
1121	struct eth_classify_rules_ramrod_data *data =
1122		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1123	int rule_cnt = rule_idx + 1;
1124	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1125	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1126	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1127	uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1128
1129	/* Reset the ramrod data buffer for the first rule */
1130	if (rule_idx == 0)
1131		ECORE_MEMSET(data, 0, sizeof(*data));
1132
1133	/* Set a rule header */
1134	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1135				      &rule_entry->vlan.header);
1136
1137	ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1138		  vlan);
1139
1140	/* Set a VLAN itself */
1141	rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1142
1143	/* MOVE: Add a rule that will add this MAC to the target Queue */
1144	if (cmd == ECORE_VLAN_MAC_MOVE) {
1145		rule_entry++;
1146		rule_cnt++;
1147
1148		/* Setup ramrod data */
1149		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1150					elem->cmd_data.vlan_mac.target_obj,
1151					      TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1152					      &rule_entry->vlan.header);
1153
1154		/* Set a VLAN itself */
1155		rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1156	}
1157
1158	/* Set the ramrod data header */
1159	/* TODO: take this to the higher level in order to prevent multiple
1160		 writing */
1161	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1162					rule_cnt);
1163}
1164
1165static void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
1166				      struct ecore_vlan_mac_obj *o,
1167				      struct ecore_exeq_elem *elem,
1168				      int rule_idx, int cam_offset)
1169{
1170	struct ecore_raw_obj *raw = &o->raw;
1171	struct eth_classify_rules_ramrod_data *data =
1172		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
1173	int rule_cnt = rule_idx + 1;
1174	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1175	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1176	bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1177	uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1178	uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1179
1180	/* Reset the ramrod data buffer for the first rule */
1181	if (rule_idx == 0)
1182		ECORE_MEMSET(data, 0, sizeof(*data));
1183
1184	/* Set a rule header */
1185	ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1186				      &rule_entry->pair.header);
1187
1188	/* Set VLAN and MAC themselves */
1189	rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1190	ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1191			      &rule_entry->pair.mac_mid,
1192			      &rule_entry->pair.mac_lsb, mac);
1193	rule_entry->pair.inner_mac =
1194			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1195	/* MOVE: Add a rule that will add this MAC to the target Queue */
1196	if (cmd == ECORE_VLAN_MAC_MOVE) {
1197		rule_entry++;
1198		rule_cnt++;
1199
1200		/* Setup ramrod data */
1201		ecore_vlan_mac_set_cmd_hdr_e2(sc,
1202					elem->cmd_data.vlan_mac.target_obj,
1203					      TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1204					      &rule_entry->pair.header);
1205
1206		/* Set a VLAN itself */
1207		rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1208		ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1209				      &rule_entry->pair.mac_mid,
1210				      &rule_entry->pair.mac_lsb, mac);
1211		rule_entry->pair.inner_mac =
1212			elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1213	}
1214
1215	/* Set the ramrod data header */
1216	/* TODO: take this to the higher level in order to prevent multiple
1217		 writing */
1218	ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1219					rule_cnt);
1220}
1221
1222/**
1223 * ecore_set_one_vlan_mac_e1h -
1224 *
1225 * @sc:		device handle
1226 * @o:		ecore_vlan_mac_obj
1227 * @elem:	ecore_exeq_elem
1228 * @rule_idx:	rule_idx
1229 * @cam_offset:	cam_offset
1230 */
1231static void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc,
1232				       struct ecore_vlan_mac_obj *o,
1233				       struct ecore_exeq_elem *elem,
1234				       int rule_idx, int cam_offset)
1235{
1236	struct ecore_raw_obj *raw = &o->raw;
1237	struct mac_configuration_cmd *config =
1238		(struct mac_configuration_cmd *)(raw->rdata);
1239	/* 57710 and 57711 do not support MOVE command,
1240	 * so it's either ADD or DEL
1241	 */
1242	bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1243		TRUE : FALSE;
1244
1245	/* Reset the ramrod data buffer */
1246	ECORE_MEMSET(config, 0, sizeof(*config));
1247
1248	ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
1249				     cam_offset, add,
1250				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1251				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1252				     ETH_VLAN_FILTER_CLASSIFY, config);
1253}
1254
1255#define list_next_entry(pos, member) \
1256	list_entry((pos)->member.next, typeof(*(pos)), member)
1257
1258/**
1259 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1260 *
1261 * @sc:		device handle
1262 * @p:		command parameters
1263 * @ppos:	pointer to the cookie
1264 *
1265 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1266 * previously configured elements list.
1267 *
1268 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
1269 * into an account
1270 *
1271 * pointer to the cookie  - that should be given back in the next call to make
1272 * function handle the next element. If *ppos is set to NULL it will restart the
1273 * iterator. If returned *ppos == NULL this means that the last element has been
1274 * handled.
1275 *
1276 */
1277static int ecore_vlan_mac_restore(struct bxe_softc *sc,
1278			   struct ecore_vlan_mac_ramrod_params *p,
1279			   struct ecore_vlan_mac_registry_elem **ppos)
1280{
1281	struct ecore_vlan_mac_registry_elem *pos;
1282	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1283
1284	/* If list is empty - there is nothing to do here */
1285	if (ECORE_LIST_IS_EMPTY(&o->head)) {
1286		*ppos = NULL;
1287		return 0;
1288	}
1289
1290	/* make a step... */
1291	if (*ppos == NULL)
1292		*ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1293					    struct ecore_vlan_mac_registry_elem,
1294					       link);
1295	else
1296		*ppos = ECORE_LIST_NEXT(*ppos, link,
1297					struct ecore_vlan_mac_registry_elem);
1298
1299	pos = *ppos;
1300
1301	/* If it's the last step - return NULL */
1302	if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1303		*ppos = NULL;
1304
1305	/* Prepare a 'user_req' */
1306	ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1307
1308	/* Set the command */
1309	p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1310
1311	/* Set vlan_mac_flags */
1312	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1313
1314	/* Set a restore bit */
1315	ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1316
1317	return ecore_config_vlan_mac(sc, p);
1318}
1319
1320/* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1321 * pointer to an element with a specific criteria and NULL if such an element
1322 * hasn't been found.
1323 */
1324static struct ecore_exeq_elem *ecore_exeq_get_mac(
1325	struct ecore_exe_queue_obj *o,
1326	struct ecore_exeq_elem *elem)
1327{
1328	struct ecore_exeq_elem *pos;
1329	struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1330
1331	/* Check pending for execution commands */
1332	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1333				  struct ecore_exeq_elem)
1334		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1335			      sizeof(*data)) &&
1336		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1337			return pos;
1338
1339	return NULL;
1340}
1341
1342static struct ecore_exeq_elem *ecore_exeq_get_vlan(
1343	struct ecore_exe_queue_obj *o,
1344	struct ecore_exeq_elem *elem)
1345{
1346	struct ecore_exeq_elem *pos;
1347	struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1348
1349	/* Check pending for execution commands */
1350	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1351				  struct ecore_exeq_elem)
1352		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
1353			      sizeof(*data)) &&
1354		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1355			return pos;
1356
1357	return NULL;
1358}
1359
1360static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1361	struct ecore_exe_queue_obj *o,
1362	struct ecore_exeq_elem *elem)
1363{
1364	struct ecore_exeq_elem *pos;
1365	struct ecore_vlan_mac_ramrod_data *data =
1366		&elem->cmd_data.vlan_mac.u.vlan_mac;
1367
1368	/* Check pending for execution commands */
1369	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1370				  struct ecore_exeq_elem)
1371		if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1372			      sizeof(*data)) &&
1373		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1374			return pos;
1375
1376	return NULL;
1377}
1378
1379/**
1380 * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1381 *
1382 * @sc:		device handle
1383 * @qo:		ecore_qable_obj
1384 * @elem:	ecore_exeq_elem
1385 *
1386 * Checks that the requested configuration can be added. If yes and if
1387 * requested, consume CAM credit.
1388 *
1389 * The 'validate' is run after the 'optimize'.
1390 *
1391 */
1392static inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc,
1393					      union ecore_qable_obj *qo,
1394					      struct ecore_exeq_elem *elem)
1395{
1396	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1397	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1398	int rc;
1399
1400	/* Check the registry */
1401	rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1402	if (rc) {
1403		ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
1404		return rc;
1405	}
1406
1407	/* Check if there is a pending ADD command for this
1408	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1409	 */
1410	if (exeq->get(exeq, elem)) {
1411		ECORE_MSG(sc, "There is a pending ADD command already\n");
1412		return ECORE_EXISTS;
1413	}
1414
1415	/* TODO: Check the pending MOVE from other objects where this
1416	 * object is a destination object.
1417	 */
1418
1419	/* Consume the credit if not requested not to */
1420	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1421			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1422	    o->get_credit(o)))
1423		return ECORE_INVAL;
1424
1425	return ECORE_SUCCESS;
1426}
1427
1428/**
1429 * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1430 *
1431 * @sc:		device handle
1432 * @qo:		quable object to check
1433 * @elem:	element that needs to be deleted
1434 *
1435 * Checks that the requested configuration can be deleted. If yes and if
1436 * requested, returns a CAM credit.
1437 *
1438 * The 'validate' is run after the 'optimize'.
1439 */
1440static inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc,
1441					      union ecore_qable_obj *qo,
1442					      struct ecore_exeq_elem *elem)
1443{
1444	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1445	struct ecore_vlan_mac_registry_elem *pos;
1446	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1447	struct ecore_exeq_elem query_elem;
1448
1449	/* If this classification can not be deleted (doesn't exist)
1450	 * - return a ECORE_EXIST.
1451	 */
1452	pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1453	if (!pos) {
1454		ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
1455		return ECORE_EXISTS;
1456	}
1457
1458	/* Check if there are pending DEL or MOVE commands for this
1459	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1460	 */
1461	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1462
1463	/* Check for MOVE commands */
1464	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1465	if (exeq->get(exeq, &query_elem)) {
1466		ECORE_ERR("There is a pending MOVE command already\n");
1467		return ECORE_INVAL;
1468	}
1469
1470	/* Check for DEL commands */
1471	if (exeq->get(exeq, elem)) {
1472		ECORE_MSG(sc, "There is a pending DEL command already\n");
1473		return ECORE_EXISTS;
1474	}
1475
1476	/* Return the credit to the credit pool if not requested not to */
1477	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1478			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1479	    o->put_credit(o))) {
1480		ECORE_ERR("Failed to return a credit\n");
1481		return ECORE_INVAL;
1482	}
1483
1484	return ECORE_SUCCESS;
1485}
1486
1487/**
1488 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1489 *
1490 * @sc:		device handle
1491 * @qo:		quable object to check (source)
1492 * @elem:	element that needs to be moved
1493 *
1494 * Checks that the requested configuration can be moved. If yes and if
1495 * requested, returns a CAM credit.
1496 *
1497 * The 'validate' is run after the 'optimize'.
1498 */
1499static inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc,
1500					       union ecore_qable_obj *qo,
1501					       struct ecore_exeq_elem *elem)
1502{
1503	struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1504	struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1505	struct ecore_exeq_elem query_elem;
1506	struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1507	struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1508
1509	/* Check if we can perform this operation based on the current registry
1510	 * state.
1511	 */
1512	if (!src_o->check_move(sc, src_o, dest_o,
1513			       &elem->cmd_data.vlan_mac.u)) {
1514		ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
1515		return ECORE_INVAL;
1516	}
1517
1518	/* Check if there is an already pending DEL or MOVE command for the
1519	 * source object or ADD command for a destination object. Return an
1520	 * error if so.
1521	 */
1522	ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1523
1524	/* Check DEL on source */
1525	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1526	if (src_exeq->get(src_exeq, &query_elem)) {
1527		ECORE_ERR("There is a pending DEL command on the source queue already\n");
1528		return ECORE_INVAL;
1529	}
1530
1531	/* Check MOVE on source */
1532	if (src_exeq->get(src_exeq, elem)) {
1533		ECORE_MSG(sc, "There is a pending MOVE command already\n");
1534		return ECORE_EXISTS;
1535	}
1536
1537	/* Check ADD on destination */
1538	query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1539	if (dest_exeq->get(dest_exeq, &query_elem)) {
1540		ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1541		return ECORE_INVAL;
1542	}
1543
1544	/* Consume the credit if not requested not to */
1545	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1546			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1547	    dest_o->get_credit(dest_o)))
1548		return ECORE_INVAL;
1549
1550	if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1551			     &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1552	    src_o->put_credit(src_o))) {
1553		/* return the credit taken from dest... */
1554		dest_o->put_credit(dest_o);
1555		return ECORE_INVAL;
1556	}
1557
1558	return ECORE_SUCCESS;
1559}
1560
1561static int ecore_validate_vlan_mac(struct bxe_softc *sc,
1562				   union ecore_qable_obj *qo,
1563				   struct ecore_exeq_elem *elem)
1564{
1565	switch (elem->cmd_data.vlan_mac.cmd) {
1566	case ECORE_VLAN_MAC_ADD:
1567		return ecore_validate_vlan_mac_add(sc, qo, elem);
1568	case ECORE_VLAN_MAC_DEL:
1569		return ecore_validate_vlan_mac_del(sc, qo, elem);
1570	case ECORE_VLAN_MAC_MOVE:
1571		return ecore_validate_vlan_mac_move(sc, qo, elem);
1572	default:
1573		return ECORE_INVAL;
1574	}
1575}
1576
1577static int ecore_remove_vlan_mac(struct bxe_softc *sc,
1578				  union ecore_qable_obj *qo,
1579				  struct ecore_exeq_elem *elem)
1580{
1581	int rc = 0;
1582
1583	/* If consumption wasn't required, nothing to do */
1584	if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1585			   &elem->cmd_data.vlan_mac.vlan_mac_flags))
1586		return ECORE_SUCCESS;
1587
1588	switch (elem->cmd_data.vlan_mac.cmd) {
1589	case ECORE_VLAN_MAC_ADD:
1590	case ECORE_VLAN_MAC_MOVE:
1591		rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1592		break;
1593	case ECORE_VLAN_MAC_DEL:
1594		rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1595		break;
1596	default:
1597		return ECORE_INVAL;
1598	}
1599
1600	if (rc != TRUE)
1601		return ECORE_INVAL;
1602
1603	return ECORE_SUCCESS;
1604}
1605
1606/**
1607 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1608 *
1609 * @sc:		device handle
1610 * @o:		ecore_vlan_mac_obj
1611 *
1612 */
1613static int ecore_wait_vlan_mac(struct bxe_softc *sc,
1614			       struct ecore_vlan_mac_obj *o)
1615{
1616	int cnt = 5000, rc;
1617	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1618	struct ecore_raw_obj *raw = &o->raw;
1619
1620	while (cnt--) {
1621		/* Wait for the current command to complete */
1622		rc = raw->wait_comp(sc, raw);
1623		if (rc)
1624			return rc;
1625
1626		/* Wait until there are no pending commands */
1627		if (!ecore_exe_queue_empty(exeq))
1628			ECORE_WAIT(sc, 1000);
1629		else
1630			return ECORE_SUCCESS;
1631	}
1632
1633	return ECORE_TIMEOUT;
1634}
1635
1636static int __ecore_vlan_mac_execute_step(struct bxe_softc *sc,
1637					 struct ecore_vlan_mac_obj *o,
1638					 unsigned long *ramrod_flags)
1639{
1640	int rc = ECORE_SUCCESS;
1641
1642	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1643
1644	ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
1645	rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1646
1647	if (rc != ECORE_SUCCESS) {
1648		__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1649
1650		/** Calling function should not diffrentiate between this case
1651		 *  and the case in which there is already a pending ramrod
1652		 */
1653		rc = ECORE_PENDING;
1654	} else {
1655		rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1656	}
1657	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1658
1659	return rc;
1660}
1661
1662/**
1663 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1664 *
1665 * @sc:		device handle
1666 * @o:		ecore_vlan_mac_obj
1667 * @cqe:
1668 * @cont:	if TRUE schedule next execution chunk
1669 *
1670 */
1671static int ecore_complete_vlan_mac(struct bxe_softc *sc,
1672				   struct ecore_vlan_mac_obj *o,
1673				   union event_ring_elem *cqe,
1674				   unsigned long *ramrod_flags)
1675{
1676	struct ecore_raw_obj *r = &o->raw;
1677	int rc;
1678
1679	/* Clearing the pending list & raw state should be made
1680	 * atomically (as execution flow assumes they represent the same)
1681	 */
1682	ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1683
1684	/* Reset pending list */
1685	__ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1686
1687	/* Clear pending */
1688	r->clear_pending(r);
1689
1690	ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1691
1692	/* If ramrod failed this is most likely a SW bug */
1693	if (cqe->message.error)
1694		return ECORE_INVAL;
1695
1696	/* Run the next bulk of pending commands if requested */
1697	if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1698		rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1699		if (rc < 0)
1700			return rc;
1701	}
1702
1703	/* If there is more work to do return PENDING */
1704	if (!ecore_exe_queue_empty(&o->exe_queue))
1705		return ECORE_PENDING;
1706
1707	return ECORE_SUCCESS;
1708}
1709
1710/**
1711 * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1712 *
1713 * @sc:		device handle
1714 * @o:		ecore_qable_obj
1715 * @elem:	ecore_exeq_elem
1716 */
1717static int ecore_optimize_vlan_mac(struct bxe_softc *sc,
1718				   union ecore_qable_obj *qo,
1719				   struct ecore_exeq_elem *elem)
1720{
1721	struct ecore_exeq_elem query, *pos;
1722	struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1723	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1724
1725	ECORE_MEMCPY(&query, elem, sizeof(query));
1726
1727	switch (elem->cmd_data.vlan_mac.cmd) {
1728	case ECORE_VLAN_MAC_ADD:
1729		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1730		break;
1731	case ECORE_VLAN_MAC_DEL:
1732		query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1733		break;
1734	default:
1735		/* Don't handle anything other than ADD or DEL */
1736		return 0;
1737	}
1738
1739	/* If we found the appropriate element - delete it */
1740	pos = exeq->get(exeq, &query);
1741	if (pos) {
1742
1743		/* Return the credit of the optimized command */
1744		if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1745				     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1746			if ((query.cmd_data.vlan_mac.cmd ==
1747			     ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1748				ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1749				return ECORE_INVAL;
1750			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1751				ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1752				return ECORE_INVAL;
1753			}
1754		}
1755
1756		ECORE_MSG(sc, "Optimizing %s command\n",
1757			  (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1758			  "ADD" : "DEL");
1759
1760		ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1761		ecore_exe_queue_free_elem(sc, pos);
1762		return 1;
1763	}
1764
1765	return 0;
1766}
1767
1768/**
1769 * ecore_vlan_mac_get_registry_elem - prepare a registry element
1770 *
1771 * @sc:	  device handle
1772 * @o:
1773 * @elem:
1774 * @restore:
1775 * @re:
1776 *
1777 * prepare a registry element according to the current command request.
1778 */
1779static inline int ecore_vlan_mac_get_registry_elem(
1780	struct bxe_softc *sc,
1781	struct ecore_vlan_mac_obj *o,
1782	struct ecore_exeq_elem *elem,
1783	bool restore,
1784	struct ecore_vlan_mac_registry_elem **re)
1785{
1786	enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1787	struct ecore_vlan_mac_registry_elem *reg_elem;
1788
1789	/* Allocate a new registry element if needed. */
1790	if (!restore &&
1791	    ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1792		reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1793		if (!reg_elem)
1794			return ECORE_NOMEM;
1795
1796		/* Get a new CAM offset */
1797		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1798			/* This shall never happen, because we have checked the
1799			 * CAM availability in the 'validate'.
1800			 */
1801			ECORE_DBG_BREAK_IF(1);
1802			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1803			return ECORE_INVAL;
1804		}
1805
1806		ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
1807
1808		/* Set a VLAN-MAC data */
1809		ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1810			  sizeof(reg_elem->u));
1811
1812		/* Copy the flags (needed for DEL and RESTORE flows) */
1813		reg_elem->vlan_mac_flags =
1814			elem->cmd_data.vlan_mac.vlan_mac_flags;
1815	} else /* DEL, RESTORE */
1816		reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1817
1818	*re = reg_elem;
1819	return ECORE_SUCCESS;
1820}
1821
1822/**
1823 * ecore_execute_vlan_mac - execute vlan mac command
1824 *
1825 * @sc:			device handle
1826 * @qo:
1827 * @exe_chunk:
1828 * @ramrod_flags:
1829 *
1830 * go and send a ramrod!
1831 */
1832static int ecore_execute_vlan_mac(struct bxe_softc *sc,
1833				  union ecore_qable_obj *qo,
1834				  ecore_list_t *exe_chunk,
1835				  unsigned long *ramrod_flags)
1836{
1837	struct ecore_exeq_elem *elem;
1838	struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1839	struct ecore_raw_obj *r = &o->raw;
1840	int rc, idx = 0;
1841	bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1842	bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1843	struct ecore_vlan_mac_registry_elem *reg_elem;
1844	enum ecore_vlan_mac_cmd cmd;
1845
1846	/* If DRIVER_ONLY execution is requested, cleanup a registry
1847	 * and exit. Otherwise send a ramrod to FW.
1848	 */
1849	if (!drv_only) {
1850		ECORE_DBG_BREAK_IF(r->check_pending(r));
1851
1852		/* Set pending */
1853		r->set_pending(r);
1854
1855		/* Fill the ramrod data */
1856		ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1857					  struct ecore_exeq_elem) {
1858			cmd = elem->cmd_data.vlan_mac.cmd;
1859			/* We will add to the target object in MOVE command, so
1860			 * change the object for a CAM search.
1861			 */
1862			if (cmd == ECORE_VLAN_MAC_MOVE)
1863				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1864			else
1865				cam_obj = o;
1866
1867			rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1868							      elem, restore,
1869							      &reg_elem);
1870			if (rc)
1871				goto error_exit;
1872
1873			ECORE_DBG_BREAK_IF(!reg_elem);
1874
1875			/* Push a new entry into the registry */
1876			if (!restore &&
1877			    ((cmd == ECORE_VLAN_MAC_ADD) ||
1878			    (cmd == ECORE_VLAN_MAC_MOVE)))
1879				ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1880						     &cam_obj->head);
1881
1882			/* Configure a single command in a ramrod data buffer */
1883			o->set_one_rule(sc, o, elem, idx,
1884					reg_elem->cam_offset);
1885
1886			/* MOVE command consumes 2 entries in the ramrod data */
1887			if (cmd == ECORE_VLAN_MAC_MOVE)
1888				idx += 2;
1889			else
1890				idx++;
1891		}
1892
1893		/*
1894		 *  No need for an explicit memory barrier here as long we would
1895		 *  need to ensure the ordering of writing to the SPQ element
1896		 *  and updating of the SPQ producer which involves a memory
1897		 *  read and we will have to put a full memory barrier there
1898		 *  (inside ecore_sp_post()).
1899		 */
1900
1901		rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1902				   r->rdata_mapping,
1903				   ETH_CONNECTION_TYPE);
1904		if (rc)
1905			goto error_exit;
1906	}
1907
1908	/* Now, when we are done with the ramrod - clean up the registry */
1909	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1910				  struct ecore_exeq_elem) {
1911		cmd = elem->cmd_data.vlan_mac.cmd;
1912		if ((cmd == ECORE_VLAN_MAC_DEL) ||
1913		    (cmd == ECORE_VLAN_MAC_MOVE)) {
1914			reg_elem = o->check_del(sc, o,
1915						&elem->cmd_data.vlan_mac.u);
1916
1917			ECORE_DBG_BREAK_IF(!reg_elem);
1918
1919			o->put_cam_offset(o, reg_elem->cam_offset);
1920			ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1921			ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1922		}
1923	}
1924
1925	if (!drv_only)
1926		return ECORE_PENDING;
1927	else
1928		return ECORE_SUCCESS;
1929
1930error_exit:
1931	r->clear_pending(r);
1932
1933	/* Cleanup a registry in case of a failure */
1934	ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1935				  struct ecore_exeq_elem) {
1936		cmd = elem->cmd_data.vlan_mac.cmd;
1937
1938		if (cmd == ECORE_VLAN_MAC_MOVE)
1939			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1940		else
1941			cam_obj = o;
1942
1943		/* Delete all newly added above entries */
1944		if (!restore &&
1945		    ((cmd == ECORE_VLAN_MAC_ADD) ||
1946		    (cmd == ECORE_VLAN_MAC_MOVE))) {
1947			reg_elem = o->check_del(sc, cam_obj,
1948						&elem->cmd_data.vlan_mac.u);
1949			if (reg_elem) {
1950				ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1951							&cam_obj->head);
1952				ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1953			}
1954		}
1955	}
1956
1957	return rc;
1958}
1959
1960static inline int ecore_vlan_mac_push_new_cmd(
1961	struct bxe_softc *sc,
1962	struct ecore_vlan_mac_ramrod_params *p)
1963{
1964	struct ecore_exeq_elem *elem;
1965	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1966	bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1967
1968	/* Allocate the execution queue element */
1969	elem = ecore_exe_queue_alloc_elem(sc);
1970	if (!elem)
1971		return ECORE_NOMEM;
1972
1973	/* Set the command 'length' */
1974	switch (p->user_req.cmd) {
1975	case ECORE_VLAN_MAC_MOVE:
1976		elem->cmd_len = 2;
1977		break;
1978	default:
1979		elem->cmd_len = 1;
1980	}
1981
1982	/* Fill the object specific info */
1983	ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1984
1985	/* Try to add a new command to the pending list */
1986	return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1987}
1988
1989/**
1990 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1991 *
1992 * @sc:	  device handle
1993 * @p:
1994 *
1995 */
1996int ecore_config_vlan_mac(struct bxe_softc *sc,
1997			   struct ecore_vlan_mac_ramrod_params *p)
1998{
1999	int rc = ECORE_SUCCESS;
2000	struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2001	unsigned long *ramrod_flags = &p->ramrod_flags;
2002	bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2003	struct ecore_raw_obj *raw = &o->raw;
2004
2005	/*
2006	 * Add new elements to the execution list for commands that require it.
2007	 */
2008	if (!cont) {
2009		rc = ecore_vlan_mac_push_new_cmd(sc, p);
2010		if (rc)
2011			return rc;
2012	}
2013
2014	/* If nothing will be executed further in this iteration we want to
2015	 * return PENDING if there are pending commands
2016	 */
2017	if (!ecore_exe_queue_empty(&o->exe_queue))
2018		rc = ECORE_PENDING;
2019
2020	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
2021		ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2022		raw->clear_pending(raw);
2023	}
2024
2025	/* Execute commands if required */
2026	if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2027	    ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2028		rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
2029						   &p->ramrod_flags);
2030		if (rc < 0)
2031			return rc;
2032	}
2033
2034	/* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2035	 * then user want to wait until the last command is done.
2036	 */
2037	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2038		/* Wait maximum for the current exe_queue length iterations plus
2039		 * one (for the current pending command).
2040		 */
2041		int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2042
2043		while (!ecore_exe_queue_empty(&o->exe_queue) &&
2044		       max_iterations--) {
2045
2046			/* Wait for the current command to complete */
2047			rc = raw->wait_comp(sc, raw);
2048			if (rc)
2049				return rc;
2050
2051			/* Make a next step */
2052			rc = __ecore_vlan_mac_execute_step(sc,
2053							   p->vlan_mac_obj,
2054							   &p->ramrod_flags);
2055			if (rc < 0)
2056				return rc;
2057		}
2058
2059		return ECORE_SUCCESS;
2060	}
2061
2062	return rc;
2063}
2064
2065/**
2066 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2067 *
2068 * @sc:			device handle
2069 * @o:
2070 * @vlan_mac_flags:
2071 * @ramrod_flags:	execution flags to be used for this deletion
2072 *
2073 * if the last operation has completed successfully and there are no
2074 * more elements left, positive value if the last operation has completed
2075 * successfully and there are more previously configured elements, negative
2076 * value is current operation has failed.
2077 */
2078static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
2079				  struct ecore_vlan_mac_obj *o,
2080				  unsigned long *vlan_mac_flags,
2081				  unsigned long *ramrod_flags)
2082{
2083	struct ecore_vlan_mac_registry_elem *pos = NULL;
2084	struct ecore_vlan_mac_ramrod_params p;
2085	struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2086	struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2087	int read_lock;
2088	int rc = 0;
2089
2090	/* Clear pending commands first */
2091
2092	ECORE_SPIN_LOCK_BH(&exeq->lock);
2093
2094	ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2095				       &exeq->exe_queue, link,
2096				       struct ecore_exeq_elem) {
2097		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
2098		    *vlan_mac_flags) {
2099			rc = exeq->remove(sc, exeq->owner, exeq_pos);
2100			if (rc) {
2101				ECORE_ERR("Failed to remove command\n");
2102				ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2103				return rc;
2104			}
2105			ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2106						&exeq->exe_queue);
2107			ecore_exe_queue_free_elem(sc, exeq_pos);
2108		}
2109	}
2110
2111	ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2112
2113	/* Prepare a command request */
2114	ECORE_MEMSET(&p, 0, sizeof(p));
2115	p.vlan_mac_obj = o;
2116	p.ramrod_flags = *ramrod_flags;
2117	p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2118
2119	/* Add all but the last VLAN-MAC to the execution queue without actually
2120	 * execution anything.
2121	 */
2122	ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2123	ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2124	ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2125
2126	ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2127	read_lock = ecore_vlan_mac_h_read_lock(sc, o);
2128	if (read_lock != ECORE_SUCCESS)
2129		return read_lock;
2130
2131	ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2132				  struct ecore_vlan_mac_registry_elem) {
2133		if (pos->vlan_mac_flags == *vlan_mac_flags) {
2134			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2135			ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
2136			rc = ecore_config_vlan_mac(sc, &p);
2137			if (rc < 0) {
2138				ECORE_ERR("Failed to add a new DEL command\n");
2139				ecore_vlan_mac_h_read_unlock(sc, o);
2140				return rc;
2141			}
2142		}
2143	}
2144
2145	ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2146	ecore_vlan_mac_h_read_unlock(sc, o);
2147
2148	p.ramrod_flags = *ramrod_flags;
2149	ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2150
2151	return ecore_config_vlan_mac(sc, &p);
2152}
2153
2154static inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
2155	uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
2156	unsigned long *pstate, ecore_obj_type type)
2157{
2158	raw->func_id = func_id;
2159	raw->cid = cid;
2160	raw->cl_id = cl_id;
2161	raw->rdata = rdata;
2162	raw->rdata_mapping = rdata_mapping;
2163	raw->state = state;
2164	raw->pstate = pstate;
2165	raw->obj_type = type;
2166	raw->check_pending = ecore_raw_check_pending;
2167	raw->clear_pending = ecore_raw_clear_pending;
2168	raw->set_pending = ecore_raw_set_pending;
2169	raw->wait_comp = ecore_raw_wait;
2170}
2171
2172static inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2173	uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
2174	int state, unsigned long *pstate, ecore_obj_type type,
2175	struct ecore_credit_pool_obj *macs_pool,
2176	struct ecore_credit_pool_obj *vlans_pool)
2177{
2178	ECORE_LIST_INIT(&o->head);
2179	o->head_reader = 0;
2180	o->head_exe_request = FALSE;
2181	o->saved_ramrod_flags = 0;
2182
2183	o->macs_pool = macs_pool;
2184	o->vlans_pool = vlans_pool;
2185
2186	o->delete_all = ecore_vlan_mac_del_all;
2187	o->restore = ecore_vlan_mac_restore;
2188	o->complete = ecore_complete_vlan_mac;
2189	o->wait = ecore_wait_vlan_mac;
2190
2191	ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2192			   state, pstate, type);
2193}
2194
2195void ecore_init_mac_obj(struct bxe_softc *sc,
2196			struct ecore_vlan_mac_obj *mac_obj,
2197			uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2198			ecore_dma_addr_t rdata_mapping, int state,
2199			unsigned long *pstate, ecore_obj_type type,
2200			struct ecore_credit_pool_obj *macs_pool)
2201{
2202	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2203
2204	ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2205				   rdata_mapping, state, pstate, type,
2206				   macs_pool, NULL);
2207
2208	/* CAM credit pool handling */
2209	mac_obj->get_credit = ecore_get_credit_mac;
2210	mac_obj->put_credit = ecore_put_credit_mac;
2211	mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2212	mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2213
2214	if (CHIP_IS_E1x(sc)) {
2215		mac_obj->set_one_rule      = ecore_set_one_mac_e1x;
2216		mac_obj->check_del         = ecore_check_mac_del;
2217		mac_obj->check_add         = ecore_check_mac_add;
2218		mac_obj->check_move        = ecore_check_move_always_err;
2219		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2220
2221		/* Exe Queue */
2222		ecore_exe_queue_init(sc,
2223				     &mac_obj->exe_queue, 1, qable_obj,
2224				     ecore_validate_vlan_mac,
2225				     ecore_remove_vlan_mac,
2226				     ecore_optimize_vlan_mac,
2227				     ecore_execute_vlan_mac,
2228				     ecore_exeq_get_mac);
2229	} else {
2230		mac_obj->set_one_rule      = ecore_set_one_mac_e2;
2231		mac_obj->check_del         = ecore_check_mac_del;
2232		mac_obj->check_add         = ecore_check_mac_add;
2233		mac_obj->check_move        = ecore_check_move;
2234		mac_obj->ramrod_cmd        =
2235			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2236		mac_obj->get_n_elements    = ecore_get_n_elements;
2237
2238		/* Exe Queue */
2239		ecore_exe_queue_init(sc,
2240				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2241				     qable_obj, ecore_validate_vlan_mac,
2242				     ecore_remove_vlan_mac,
2243				     ecore_optimize_vlan_mac,
2244				     ecore_execute_vlan_mac,
2245				     ecore_exeq_get_mac);
2246	}
2247}
2248
2249void ecore_init_vlan_obj(struct bxe_softc *sc,
2250			 struct ecore_vlan_mac_obj *vlan_obj,
2251			 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2252			 ecore_dma_addr_t rdata_mapping, int state,
2253			 unsigned long *pstate, ecore_obj_type type,
2254			 struct ecore_credit_pool_obj *vlans_pool)
2255{
2256	union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2257
2258	ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2259				   rdata_mapping, state, pstate, type, NULL,
2260				   vlans_pool);
2261
2262	vlan_obj->get_credit = ecore_get_credit_vlan;
2263	vlan_obj->put_credit = ecore_put_credit_vlan;
2264	vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2265	vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2266
2267	if (CHIP_IS_E1x(sc)) {
2268		ECORE_ERR("Do not support chips others than E2 and newer\n");
2269		ECORE_BUG();
2270	} else {
2271		vlan_obj->set_one_rule      = ecore_set_one_vlan_e2;
2272		vlan_obj->check_del         = ecore_check_vlan_del;
2273		vlan_obj->check_add         = ecore_check_vlan_add;
2274		vlan_obj->check_move        = ecore_check_move;
2275		vlan_obj->ramrod_cmd        =
2276			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2277		vlan_obj->get_n_elements    = ecore_get_n_elements;
2278
2279		/* Exe Queue */
2280		ecore_exe_queue_init(sc,
2281				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2282				     qable_obj, ecore_validate_vlan_mac,
2283				     ecore_remove_vlan_mac,
2284				     ecore_optimize_vlan_mac,
2285				     ecore_execute_vlan_mac,
2286				     ecore_exeq_get_vlan);
2287	}
2288}
2289
2290void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
2291			     struct ecore_vlan_mac_obj *vlan_mac_obj,
2292			     uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2293			     ecore_dma_addr_t rdata_mapping, int state,
2294			     unsigned long *pstate, ecore_obj_type type,
2295			     struct ecore_credit_pool_obj *macs_pool,
2296			     struct ecore_credit_pool_obj *vlans_pool)
2297{
2298	union ecore_qable_obj *qable_obj =
2299		(union ecore_qable_obj *)vlan_mac_obj;
2300
2301	ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2302				   rdata_mapping, state, pstate, type,
2303				   macs_pool, vlans_pool);
2304
2305	/* CAM pool handling */
2306	vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2307	vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2308	/* CAM offset is relevant for 57710 and 57711 chips only which have a
2309	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2310	 * will be taken from MACs' pool object only.
2311	 */
2312	vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2313	vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2314
2315	if (CHIP_IS_E1(sc)) {
2316		ECORE_ERR("Do not support chips others than E2\n");
2317		ECORE_BUG();
2318	} else if (CHIP_IS_E1H(sc)) {
2319		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e1h;
2320		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2321		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2322		vlan_mac_obj->check_move        = ecore_check_move_always_err;
2323		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
2324
2325		/* Exe Queue */
2326		ecore_exe_queue_init(sc,
2327				     &vlan_mac_obj->exe_queue, 1, qable_obj,
2328				     ecore_validate_vlan_mac,
2329				     ecore_remove_vlan_mac,
2330				     ecore_optimize_vlan_mac,
2331				     ecore_execute_vlan_mac,
2332				     ecore_exeq_get_vlan_mac);
2333	} else {
2334		vlan_mac_obj->set_one_rule      = ecore_set_one_vlan_mac_e2;
2335		vlan_mac_obj->check_del         = ecore_check_vlan_mac_del;
2336		vlan_mac_obj->check_add         = ecore_check_vlan_mac_add;
2337		vlan_mac_obj->check_move        = ecore_check_move;
2338		vlan_mac_obj->ramrod_cmd        =
2339			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2340
2341		/* Exe Queue */
2342		ecore_exe_queue_init(sc,
2343				     &vlan_mac_obj->exe_queue,
2344				     CLASSIFY_RULES_COUNT,
2345				     qable_obj, ecore_validate_vlan_mac,
2346				     ecore_remove_vlan_mac,
2347				     ecore_optimize_vlan_mac,
2348				     ecore_execute_vlan_mac,
2349				     ecore_exeq_get_vlan_mac);
2350	}
2351}
2352
2353/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2354static inline void __storm_memset_mac_filters(struct bxe_softc *sc,
2355			struct tstorm_eth_mac_filter_config *mac_filters,
2356			uint16_t pf_id)
2357{
2358	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2359
2360	uint32_t addr = BAR_TSTRORM_INTMEM +
2361			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2362
2363	ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
2364}
2365
2366static int ecore_set_rx_mode_e1x(struct bxe_softc *sc,
2367				 struct ecore_rx_mode_ramrod_params *p)
2368{
2369	/* update the sc MAC filter structure */
2370	uint32_t mask = (1 << p->cl_id);
2371
2372	struct tstorm_eth_mac_filter_config *mac_filters =
2373		(struct tstorm_eth_mac_filter_config *)p->rdata;
2374
2375	/* initial setting is drop-all */
2376	uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2377	uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2378	uint8_t unmatched_unicast = 0;
2379
2380    /* In e1x there we only take into account rx accept flag since tx switching
2381     * isn't enabled. */
2382	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2383		/* accept matched ucast */
2384		drop_all_ucast = 0;
2385
2386	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2387		/* accept matched mcast */
2388		drop_all_mcast = 0;
2389
2390	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2391		/* accept all mcast */
2392		drop_all_ucast = 0;
2393		accp_all_ucast = 1;
2394	}
2395	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2396		/* accept all mcast */
2397		drop_all_mcast = 0;
2398		accp_all_mcast = 1;
2399	}
2400	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2401		/* accept (all) bcast */
2402		accp_all_bcast = 1;
2403	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2404		/* accept unmatched unicasts */
2405		unmatched_unicast = 1;
2406
2407	mac_filters->ucast_drop_all = drop_all_ucast ?
2408		mac_filters->ucast_drop_all | mask :
2409		mac_filters->ucast_drop_all & ~mask;
2410
2411	mac_filters->mcast_drop_all = drop_all_mcast ?
2412		mac_filters->mcast_drop_all | mask :
2413		mac_filters->mcast_drop_all & ~mask;
2414
2415	mac_filters->ucast_accept_all = accp_all_ucast ?
2416		mac_filters->ucast_accept_all | mask :
2417		mac_filters->ucast_accept_all & ~mask;
2418
2419	mac_filters->mcast_accept_all = accp_all_mcast ?
2420		mac_filters->mcast_accept_all | mask :
2421		mac_filters->mcast_accept_all & ~mask;
2422
2423	mac_filters->bcast_accept_all = accp_all_bcast ?
2424		mac_filters->bcast_accept_all | mask :
2425		mac_filters->bcast_accept_all & ~mask;
2426
2427	mac_filters->unmatched_unicast = unmatched_unicast ?
2428		mac_filters->unmatched_unicast | mask :
2429		mac_filters->unmatched_unicast & ~mask;
2430
2431	ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2432			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2433	   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2434	   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2435	   mac_filters->bcast_accept_all);
2436
2437	/* write the MAC filter structure*/
2438	__storm_memset_mac_filters(sc, mac_filters, p->func_id);
2439
2440	/* The operation is completed */
2441	ECORE_CLEAR_BIT(p->state, p->pstate);
2442	ECORE_SMP_MB_AFTER_CLEAR_BIT();
2443
2444	return ECORE_SUCCESS;
2445}
2446
2447/* Setup ramrod data */
2448static inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2449				struct eth_classify_header *hdr,
2450				uint8_t rule_cnt)
2451{
2452	hdr->echo = ECORE_CPU_TO_LE32(cid);
2453	hdr->rule_cnt = rule_cnt;
2454}
2455
2456static inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc,
2457				unsigned long *accept_flags,
2458				struct eth_filter_rules_cmd *cmd,
2459				bool clear_accept_all)
2460{
2461	uint16_t state;
2462
2463	/* start with 'drop-all' */
2464	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2465		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2466
2467	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2468		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2469
2470	if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2471		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2472
2473	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2474		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2475		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2476	}
2477
2478	if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2479		state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2480		state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2481	}
2482	if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2483		state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2484
2485	if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2486		state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2487		state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2488	}
2489	if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2490		state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2491
2492	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2493	if (clear_accept_all) {
2494		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2495		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2496		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2497		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2498	}
2499
2500	cmd->state = ECORE_CPU_TO_LE16(state);
2501}
2502
2503static int ecore_set_rx_mode_e2(struct bxe_softc *sc,
2504				struct ecore_rx_mode_ramrod_params *p)
2505{
2506	struct eth_filter_rules_ramrod_data *data = p->rdata;
2507	int rc;
2508	uint8_t rule_idx = 0;
2509
2510	/* Reset the ramrod data buffer */
2511	ECORE_MEMSET(data, 0, sizeof(*data));
2512
2513	/* Setup ramrod data */
2514
2515	/* Tx (internal switching) */
2516	if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2517		data->rules[rule_idx].client_id = p->cl_id;
2518		data->rules[rule_idx].func_id = p->func_id;
2519
2520		data->rules[rule_idx].cmd_general_data =
2521			ETH_FILTER_RULES_CMD_TX_CMD;
2522
2523		ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2524					       &(data->rules[rule_idx++]),
2525					       FALSE);
2526	}
2527
2528	/* Rx */
2529	if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2530		data->rules[rule_idx].client_id = p->cl_id;
2531		data->rules[rule_idx].func_id = p->func_id;
2532
2533		data->rules[rule_idx].cmd_general_data =
2534			ETH_FILTER_RULES_CMD_RX_CMD;
2535
2536		ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2537					       &(data->rules[rule_idx++]),
2538					       FALSE);
2539	}
2540
2541	/* If FCoE Queue configuration has been requested configure the Rx and
2542	 * internal switching modes for this queue in separate rules.
2543	 *
2544	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2545	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2546	 */
2547	if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2548		/*  Tx (internal switching) */
2549		if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2550			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2551			data->rules[rule_idx].func_id = p->func_id;
2552
2553			data->rules[rule_idx].cmd_general_data =
2554						ETH_FILTER_RULES_CMD_TX_CMD;
2555
2556			ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2557						       &(data->rules[rule_idx]),
2558						       TRUE);
2559			rule_idx++;
2560		}
2561
2562		/* Rx */
2563		if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2564			data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2565			data->rules[rule_idx].func_id = p->func_id;
2566
2567			data->rules[rule_idx].cmd_general_data =
2568						ETH_FILTER_RULES_CMD_RX_CMD;
2569
2570			ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2571						       &(data->rules[rule_idx]),
2572						       TRUE);
2573			rule_idx++;
2574		}
2575	}
2576
2577	/* Set the ramrod header (most importantly - number of rules to
2578	 * configure).
2579	 */
2580	ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2581
2582	ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2583		  data->header.rule_cnt, p->rx_accept_flags,
2584		  p->tx_accept_flags);
2585
2586	/* No need for an explicit memory barrier here as long we would
2587	 * need to ensure the ordering of writing to the SPQ element
2588	 * and updating of the SPQ producer which involves a memory
2589	 * read and we will have to put a full memory barrier there
2590	 * (inside ecore_sp_post()).
2591	 */
2592
2593	/* Send a ramrod */
2594	rc = ecore_sp_post(sc,
2595			   RAMROD_CMD_ID_ETH_FILTER_RULES,
2596			   p->cid,
2597			   p->rdata_mapping,
2598			   ETH_CONNECTION_TYPE);
2599	if (rc)
2600		return rc;
2601
2602	/* Ramrod completion is pending */
2603	return ECORE_PENDING;
2604}
2605
2606static int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc,
2607				      struct ecore_rx_mode_ramrod_params *p)
2608{
2609	return ecore_state_wait(sc, p->state, p->pstate);
2610}
2611
2612static int ecore_empty_rx_mode_wait(struct bxe_softc *sc,
2613				    struct ecore_rx_mode_ramrod_params *p)
2614{
2615	/* Do nothing */
2616	return ECORE_SUCCESS;
2617}
2618
2619int ecore_config_rx_mode(struct bxe_softc *sc,
2620			 struct ecore_rx_mode_ramrod_params *p)
2621{
2622	int rc;
2623
2624	/* Configure the new classification in the chip */
2625	rc = p->rx_mode_obj->config_rx_mode(sc, p);
2626	if (rc < 0)
2627		return rc;
2628
2629	/* Wait for a ramrod completion if was requested */
2630	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2631		rc = p->rx_mode_obj->wait_comp(sc, p);
2632		if (rc)
2633			return rc;
2634	}
2635
2636	return rc;
2637}
2638
2639void ecore_init_rx_mode_obj(struct bxe_softc *sc,
2640			    struct ecore_rx_mode_obj *o)
2641{
2642	if (CHIP_IS_E1x(sc)) {
2643		o->wait_comp      = ecore_empty_rx_mode_wait;
2644		o->config_rx_mode = ecore_set_rx_mode_e1x;
2645	} else {
2646		o->wait_comp      = ecore_wait_rx_mode_comp_e2;
2647		o->config_rx_mode = ecore_set_rx_mode_e2;
2648	}
2649}
2650
2651/********************* Multicast verbs: SET, CLEAR ****************************/
2652static inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
2653{
2654	return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2655}
2656
2657struct ecore_mcast_mac_elem {
2658	ecore_list_entry_t link;
2659	uint8_t mac[ETH_ALEN];
2660	uint8_t pad[2]; /* For a natural alignment of the following buffer */
2661};
2662
2663struct ecore_pending_mcast_cmd {
2664	ecore_list_entry_t link;
2665	int type; /* ECORE_MCAST_CMD_X */
2666	union {
2667		ecore_list_t macs_head;
2668		uint32_t macs_num; /* Needed for DEL command */
2669		int next_bin; /* Needed for RESTORE flow with aprox match */
2670	} data;
2671
2672	bool done; /* set to TRUE, when the command has been handled,
2673		    * practically used in 57712 handling only, where one pending
2674		    * command may be handled in a few operations. As long as for
2675		    * other chips every operation handling is completed in a
2676		    * single ramrod, there is no need to utilize this field.
2677		    */
2678};
2679
2680static int ecore_mcast_wait(struct bxe_softc *sc,
2681			    struct ecore_mcast_obj *o)
2682{
2683	if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2684			o->raw.wait_comp(sc, &o->raw))
2685		return ECORE_TIMEOUT;
2686
2687	return ECORE_SUCCESS;
2688}
2689
2690static int ecore_mcast_enqueue_cmd(struct bxe_softc *sc,
2691				   struct ecore_mcast_obj *o,
2692				   struct ecore_mcast_ramrod_params *p,
2693				   enum ecore_mcast_cmd cmd)
2694{
2695	int total_sz;
2696	struct ecore_pending_mcast_cmd *new_cmd;
2697	struct ecore_mcast_mac_elem *cur_mac = NULL;
2698	struct ecore_mcast_list_elem *pos;
2699	int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2700			     p->mcast_list_len : 0);
2701
2702	/* If the command is empty ("handle pending commands only"), break */
2703	if (!p->mcast_list_len)
2704		return ECORE_SUCCESS;
2705
2706	total_sz = sizeof(*new_cmd) +
2707		macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2708
2709	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2710	new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2711
2712	if (!new_cmd)
2713		return ECORE_NOMEM;
2714
2715	ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n",
2716		  cmd, macs_list_len);
2717
2718	ECORE_LIST_INIT(&new_cmd->data.macs_head);
2719
2720	new_cmd->type = cmd;
2721	new_cmd->done = FALSE;
2722
2723	switch (cmd) {
2724	case ECORE_MCAST_CMD_ADD:
2725		cur_mac = (struct ecore_mcast_mac_elem *)
2726			  ((uint8_t *)new_cmd + sizeof(*new_cmd));
2727
2728		/* Push the MACs of the current command into the pending command
2729		 * MACs list: FIFO
2730		 */
2731		ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2732					  struct ecore_mcast_list_elem) {
2733			ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2734			ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2735					     &new_cmd->data.macs_head);
2736			cur_mac++;
2737		}
2738
2739		break;
2740
2741	case ECORE_MCAST_CMD_DEL:
2742		new_cmd->data.macs_num = p->mcast_list_len;
2743		break;
2744
2745	case ECORE_MCAST_CMD_RESTORE:
2746		new_cmd->data.next_bin = 0;
2747		break;
2748
2749	default:
2750		ECORE_FREE(sc, new_cmd, total_sz);
2751		ECORE_ERR("Unknown command: %d\n", cmd);
2752		return ECORE_INVAL;
2753	}
2754
2755	/* Push the new pending command to the tail of the pending list: FIFO */
2756	ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2757
2758	o->set_sched(o);
2759
2760	return ECORE_PENDING;
2761}
2762
2763/**
2764 * ecore_mcast_get_next_bin - get the next set bin (index)
2765 *
2766 * @o:
2767 * @last:	index to start looking from (including)
2768 *
2769 * returns the next found (set) bin or a negative value if none is found.
2770 */
2771static inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2772{
2773	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2774
2775	for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2776		if (o->registry.aprox_match.vec[i])
2777			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2778				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2779				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2780						       vec, cur_bit)) {
2781					return cur_bit;
2782				}
2783			}
2784		inner_start = 0;
2785	}
2786
2787	/* None found */
2788	return -1;
2789}
2790
2791/**
2792 * ecore_mcast_clear_first_bin - find the first set bin and clear it
2793 *
2794 * @o:
2795 *
2796 * returns the index of the found bin or -1 if none is found
2797 */
2798static inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2799{
2800	int cur_bit = ecore_mcast_get_next_bin(o, 0);
2801
2802	if (cur_bit >= 0)
2803		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2804
2805	return cur_bit;
2806}
2807
2808static inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2809{
2810	struct ecore_raw_obj *raw = &o->raw;
2811	uint8_t rx_tx_flag = 0;
2812
2813	if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2814	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2815		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2816
2817	if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2818	    (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2819		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2820
2821	return rx_tx_flag;
2822}
2823
2824static void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc,
2825					struct ecore_mcast_obj *o, int idx,
2826					union ecore_mcast_config_data *cfg_data,
2827					enum ecore_mcast_cmd cmd)
2828{
2829	struct ecore_raw_obj *r = &o->raw;
2830	struct eth_multicast_rules_ramrod_data *data =
2831		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2832	uint8_t func_id = r->func_id;
2833	uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2834	int bin;
2835
2836	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2837		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2838
2839	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2840
2841	/* Get a bin and update a bins' vector */
2842	switch (cmd) {
2843	case ECORE_MCAST_CMD_ADD:
2844		bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2845		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2846		break;
2847
2848	case ECORE_MCAST_CMD_DEL:
2849		/* If there were no more bins to clear
2850		 * (ecore_mcast_clear_first_bin() returns -1) then we would
2851		 * clear any (0xff) bin.
2852		 * See ecore_mcast_validate_e2() for explanation when it may
2853		 * happen.
2854		 */
2855		bin = ecore_mcast_clear_first_bin(o);
2856		break;
2857
2858	case ECORE_MCAST_CMD_RESTORE:
2859		bin = cfg_data->bin;
2860		break;
2861
2862	default:
2863		ECORE_ERR("Unknown command: %d\n", cmd);
2864		return;
2865	}
2866
2867	ECORE_MSG(sc, "%s bin %d\n",
2868		  ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2869		   "Setting"  : "Clearing"), bin);
2870
2871	data->rules[idx].bin_id    = (uint8_t)bin;
2872	data->rules[idx].func_id   = func_id;
2873	data->rules[idx].engine_id = o->engine_id;
2874}
2875
2876/**
2877 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2878 *
2879 * @sc:		device handle
2880 * @o:
2881 * @start_bin:	index in the registry to start from (including)
2882 * @rdata_idx:	index in the ramrod data to start from
2883 *
2884 * returns last handled bin index or -1 if all bins have been handled
2885 */
2886static inline int ecore_mcast_handle_restore_cmd_e2(
2887	struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin,
2888	int *rdata_idx)
2889{
2890	int cur_bin, cnt = *rdata_idx;
2891	union ecore_mcast_config_data cfg_data = {NULL};
2892
2893	/* go through the registry and configure the bins from it */
2894	for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2895	    cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2896
2897		cfg_data.bin = (uint8_t)cur_bin;
2898		o->set_one_rule(sc, o, cnt, &cfg_data,
2899				ECORE_MCAST_CMD_RESTORE);
2900
2901		cnt++;
2902
2903		ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
2904
2905		/* Break if we reached the maximum number
2906		 * of rules.
2907		 */
2908		if (cnt >= o->max_cmd_len)
2909			break;
2910	}
2911
2912	*rdata_idx = cnt;
2913
2914	return cur_bin;
2915}
2916
2917static inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc,
2918	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2919	int *line_idx)
2920{
2921	struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2922	int cnt = *line_idx;
2923	union ecore_mcast_config_data cfg_data = {NULL};
2924
2925	ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2926		&cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
2927
2928		cfg_data.mac = &pmac_pos->mac[0];
2929		o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2930
2931		cnt++;
2932
2933		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
2934			  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2935
2936		ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2937					&cmd_pos->data.macs_head);
2938
2939		/* Break if we reached the maximum number
2940		 * of rules.
2941		 */
2942		if (cnt >= o->max_cmd_len)
2943			break;
2944	}
2945
2946	*line_idx = cnt;
2947
2948	/* if no more MACs to configure - we are done */
2949	if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2950		cmd_pos->done = TRUE;
2951}
2952
2953static inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc,
2954	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2955	int *line_idx)
2956{
2957	int cnt = *line_idx;
2958
2959	while (cmd_pos->data.macs_num) {
2960		o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2961
2962		cnt++;
2963
2964		cmd_pos->data.macs_num--;
2965
2966		  ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
2967				  cmd_pos->data.macs_num, cnt);
2968
2969		/* Break if we reached the maximum
2970		 * number of rules.
2971		 */
2972		if (cnt >= o->max_cmd_len)
2973			break;
2974	}
2975
2976	*line_idx = cnt;
2977
2978	/* If we cleared all bins - we are done */
2979	if (!cmd_pos->data.macs_num)
2980		cmd_pos->done = TRUE;
2981}
2982
2983static inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc,
2984	struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
2985	int *line_idx)
2986{
2987	cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2988						line_idx);
2989
2990	if (cmd_pos->data.next_bin < 0)
2991		/* If o->set_restore returned -1 we are done */
2992		cmd_pos->done = TRUE;
2993	else
2994		/* Start from the next bin next time */
2995		cmd_pos->data.next_bin++;
2996}
2997
2998static inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc,
2999				struct ecore_mcast_ramrod_params *p)
3000{
3001	struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3002	int cnt = 0;
3003	struct ecore_mcast_obj *o = p->mcast_obj;
3004
3005	ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3006		&o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3007		switch (cmd_pos->type) {
3008		case ECORE_MCAST_CMD_ADD:
3009			ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
3010			break;
3011
3012		case ECORE_MCAST_CMD_DEL:
3013			ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
3014			break;
3015
3016		case ECORE_MCAST_CMD_RESTORE:
3017			ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
3018							   &cnt);
3019			break;
3020
3021		default:
3022			ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3023			return ECORE_INVAL;
3024		}
3025
3026		/* If the command has been completed - remove it from the list
3027		 * and free the memory
3028		 */
3029		if (cmd_pos->done) {
3030			ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3031						&o->pending_cmds_head);
3032			ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3033		}
3034
3035		/* Break if we reached the maximum number of rules */
3036		if (cnt >= o->max_cmd_len)
3037			break;
3038	}
3039
3040	return cnt;
3041}
3042
3043static inline void ecore_mcast_hdl_add(struct bxe_softc *sc,
3044	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3045	int *line_idx)
3046{
3047	struct ecore_mcast_list_elem *mlist_pos;
3048	union ecore_mcast_config_data cfg_data = {NULL};
3049	int cnt = *line_idx;
3050
3051	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3052				  struct ecore_mcast_list_elem) {
3053		cfg_data.mac = mlist_pos->mac;
3054		o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3055
3056		cnt++;
3057
3058		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3059			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3060	}
3061
3062	*line_idx = cnt;
3063}
3064
3065static inline void ecore_mcast_hdl_del(struct bxe_softc *sc,
3066	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3067	int *line_idx)
3068{
3069	int cnt = *line_idx, i;
3070
3071	for (i = 0; i < p->mcast_list_len; i++) {
3072		o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3073
3074		cnt++;
3075
3076		ECORE_MSG(sc, "Deleting MAC. %d left\n",
3077			  p->mcast_list_len - i - 1);
3078	}
3079
3080	*line_idx = cnt;
3081}
3082
3083/**
3084 * ecore_mcast_handle_current_cmd -
3085 *
3086 * @sc:		device handle
3087 * @p:
3088 * @cmd:
3089 * @start_cnt:	first line in the ramrod data that may be used
3090 *
3091 * This function is called iff there is enough place for the current command in
3092 * the ramrod data.
3093 * Returns number of lines filled in the ramrod data in total.
3094 */
3095static inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc,
3096			struct ecore_mcast_ramrod_params *p,
3097			enum ecore_mcast_cmd cmd,
3098			int start_cnt)
3099{
3100	struct ecore_mcast_obj *o = p->mcast_obj;
3101	int cnt = start_cnt;
3102
3103	ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3104
3105	switch (cmd) {
3106	case ECORE_MCAST_CMD_ADD:
3107		ecore_mcast_hdl_add(sc, o, p, &cnt);
3108		break;
3109
3110	case ECORE_MCAST_CMD_DEL:
3111		ecore_mcast_hdl_del(sc, o, p, &cnt);
3112		break;
3113
3114	case ECORE_MCAST_CMD_RESTORE:
3115		o->hdl_restore(sc, o, 0, &cnt);
3116		break;
3117
3118	default:
3119		ECORE_ERR("Unknown command: %d\n", cmd);
3120		return ECORE_INVAL;
3121	}
3122
3123	/* The current command has been handled */
3124	p->mcast_list_len = 0;
3125
3126	return cnt;
3127}
3128
3129static int ecore_mcast_validate_e2(struct bxe_softc *sc,
3130				   struct ecore_mcast_ramrod_params *p,
3131				   enum ecore_mcast_cmd cmd)
3132{
3133	struct ecore_mcast_obj *o = p->mcast_obj;
3134	int reg_sz = o->get_registry_size(o);
3135
3136	switch (cmd) {
3137	/* DEL command deletes all currently configured MACs */
3138	case ECORE_MCAST_CMD_DEL:
3139		o->set_registry_size(o, 0);
3140		/* Don't break */
3141
3142	/* RESTORE command will restore the entire multicast configuration */
3143	case ECORE_MCAST_CMD_RESTORE:
3144		/* Here we set the approximate amount of work to do, which in
3145		 * fact may be only less as some MACs in postponed ADD
3146		 * command(s) scheduled before this command may fall into
3147		 * the same bin and the actual number of bins set in the
3148		 * registry would be less than we estimated here. See
3149		 * ecore_mcast_set_one_rule_e2() for further details.
3150		 */
3151		p->mcast_list_len = reg_sz;
3152		break;
3153
3154	case ECORE_MCAST_CMD_ADD:
3155	case ECORE_MCAST_CMD_CONT:
3156		/* Here we assume that all new MACs will fall into new bins.
3157		 * However we will correct the real registry size after we
3158		 * handle all pending commands.
3159		 */
3160		o->set_registry_size(o, reg_sz + p->mcast_list_len);
3161		break;
3162
3163	default:
3164		ECORE_ERR("Unknown command: %d\n", cmd);
3165		return ECORE_INVAL;
3166	}
3167
3168	/* Increase the total number of MACs pending to be configured */
3169	o->total_pending_num += p->mcast_list_len;
3170
3171	return ECORE_SUCCESS;
3172}
3173
3174static void ecore_mcast_revert_e2(struct bxe_softc *sc,
3175				      struct ecore_mcast_ramrod_params *p,
3176				      int old_num_bins)
3177{
3178	struct ecore_mcast_obj *o = p->mcast_obj;
3179
3180	o->set_registry_size(o, old_num_bins);
3181	o->total_pending_num -= p->mcast_list_len;
3182}
3183
3184/**
3185 * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3186 *
3187 * @sc:		device handle
3188 * @p:
3189 * @len:	number of rules to handle
3190 */
3191static inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc,
3192					struct ecore_mcast_ramrod_params *p,
3193					uint8_t len)
3194{
3195	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3196	struct eth_multicast_rules_ramrod_data *data =
3197		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
3198
3199	data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3200					(ECORE_FILTER_MCAST_PENDING <<
3201					 ECORE_SWCID_SHIFT));
3202	data->header.rule_cnt = len;
3203}
3204
3205/**
3206 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3207 *
3208 * @sc:		device handle
3209 * @o:
3210 *
3211 * Recalculate the actual number of set bins in the registry using Brian
3212 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3213 *
3214 * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3215 */
3216static inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc,
3217						  struct ecore_mcast_obj *o)
3218{
3219	int i, cnt = 0;
3220	uint64_t elem;
3221
3222	for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3223		elem = o->registry.aprox_match.vec[i];
3224		for (; elem; cnt++)
3225			elem &= elem - 1;
3226	}
3227
3228	o->set_registry_size(o, cnt);
3229
3230	return ECORE_SUCCESS;
3231}
3232
3233static int ecore_mcast_setup_e2(struct bxe_softc *sc,
3234				struct ecore_mcast_ramrod_params *p,
3235				enum ecore_mcast_cmd cmd)
3236{
3237	struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3238	struct ecore_mcast_obj *o = p->mcast_obj;
3239	struct eth_multicast_rules_ramrod_data *data =
3240		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3241	int cnt = 0, rc;
3242
3243	/* Reset the ramrod data buffer */
3244	ECORE_MEMSET(data, 0, sizeof(*data));
3245
3246	cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
3247
3248	/* If there are no more pending commands - clear SCHEDULED state */
3249	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3250		o->clear_sched(o);
3251
3252	/* The below may be TRUE iff there was enough room in ramrod
3253	 * data for all pending commands and for the current
3254	 * command. Otherwise the current command would have been added
3255	 * to the pending commands and p->mcast_list_len would have been
3256	 * zeroed.
3257	 */
3258	if (p->mcast_list_len > 0)
3259		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
3260
3261	/* We've pulled out some MACs - update the total number of
3262	 * outstanding.
3263	 */
3264	o->total_pending_num -= cnt;
3265
3266	/* send a ramrod */
3267	ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
3268	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3269
3270	ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
3271
3272	/* Update a registry size if there are no more pending operations.
3273	 *
3274	 * We don't want to change the value of the registry size if there are
3275	 * pending operations because we want it to always be equal to the
3276	 * exact or the approximate number (see ecore_mcast_validate_e2()) of
3277	 * set bins after the last requested operation in order to properly
3278	 * evaluate the size of the next DEL/RESTORE operation.
3279	 *
3280	 * Note that we update the registry itself during command(s) handling
3281	 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3282	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3283	 * with a limited amount of update commands (per MAC/bin) and we don't
3284	 * know in this scope what the actual state of bins configuration is
3285	 * going to be after this ramrod.
3286	 */
3287	if (!o->total_pending_num)
3288		ecore_mcast_refresh_registry_e2(sc, o);
3289
3290	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3291	 * RAMROD_PENDING status immediately.
3292	 */
3293	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3294		raw->clear_pending(raw);
3295		return ECORE_SUCCESS;
3296	} else {
3297		/* No need for an explicit memory barrier here as long we would
3298		 * need to ensure the ordering of writing to the SPQ element
3299		 * and updating of the SPQ producer which involves a memory
3300		 * read and we will have to put a full memory barrier there
3301		 * (inside ecore_sp_post()).
3302		 */
3303
3304		/* Send a ramrod */
3305		rc = ecore_sp_post( sc,
3306				    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3307				    raw->cid,
3308				    raw->rdata_mapping,
3309				    ETH_CONNECTION_TYPE);
3310		if (rc)
3311			return rc;
3312
3313		/* Ramrod completion is pending */
3314		return ECORE_PENDING;
3315	}
3316}
3317
3318static int ecore_mcast_validate_e1h(struct bxe_softc *sc,
3319				    struct ecore_mcast_ramrod_params *p,
3320				    enum ecore_mcast_cmd cmd)
3321{
3322	/* Mark, that there is a work to do */
3323	if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3324		p->mcast_list_len = 1;
3325
3326	return ECORE_SUCCESS;
3327}
3328
3329static void ecore_mcast_revert_e1h(struct bxe_softc *sc,
3330				       struct ecore_mcast_ramrod_params *p,
3331				       int old_num_bins)
3332{
3333	/* Do nothing */
3334}
3335
3336#define ECORE_57711_SET_MC_FILTER(filter, bit) \
3337do { \
3338	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3339} while (0)
3340
3341static inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc,
3342					   struct ecore_mcast_obj *o,
3343					   struct ecore_mcast_ramrod_params *p,
3344					   uint32_t *mc_filter)
3345{
3346	struct ecore_mcast_list_elem *mlist_pos;
3347	int bit;
3348
3349	ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3350				  struct ecore_mcast_list_elem) {
3351		bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3352		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3353
3354		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3355			  mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3356
3357		/* bookkeeping... */
3358		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3359				  bit);
3360	}
3361}
3362
3363static inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc,
3364	struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3365	uint32_t *mc_filter)
3366{
3367	int bit;
3368
3369	for (bit = ecore_mcast_get_next_bin(o, 0);
3370	     bit >= 0;
3371	     bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3372		ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3373		ECORE_MSG(sc, "About to set bin %d\n", bit);
3374	}
3375}
3376
3377/* On 57711 we write the multicast MACs' approximate match
3378 * table by directly into the TSTORM's internal RAM. So we don't
3379 * really need to handle any tricks to make it work.
3380 */
3381static int ecore_mcast_setup_e1h(struct bxe_softc *sc,
3382				 struct ecore_mcast_ramrod_params *p,
3383				 enum ecore_mcast_cmd cmd)
3384{
3385	int i;
3386	struct ecore_mcast_obj *o = p->mcast_obj;
3387	struct ecore_raw_obj *r = &o->raw;
3388
3389	/* If CLEAR_ONLY has been requested - clear the registry
3390	 * and clear a pending bit.
3391	 */
3392	if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3393		uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
3394
3395		/* Set the multicast filter bits before writing it into
3396		 * the internal memory.
3397		 */
3398		switch (cmd) {
3399		case ECORE_MCAST_CMD_ADD:
3400			ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3401			break;
3402
3403		case ECORE_MCAST_CMD_DEL:
3404			ECORE_MSG(sc,
3405				  "Invalidating multicast MACs configuration\n");
3406
3407			/* clear the registry */
3408			ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3409			       sizeof(o->registry.aprox_match.vec));
3410			break;
3411
3412		case ECORE_MCAST_CMD_RESTORE:
3413			ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
3414			break;
3415
3416		default:
3417			ECORE_ERR("Unknown command: %d\n", cmd);
3418			return ECORE_INVAL;
3419		}
3420
3421		/* Set the mcast filter in the internal memory */
3422		for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3423			REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3424	} else
3425		/* clear the registry */
3426		ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3427		       sizeof(o->registry.aprox_match.vec));
3428
3429	/* We are done */
3430	r->clear_pending(r);
3431
3432	return ECORE_SUCCESS;
3433}
3434
3435static int ecore_mcast_validate_e1(struct bxe_softc *sc,
3436				   struct ecore_mcast_ramrod_params *p,
3437				   enum ecore_mcast_cmd cmd)
3438{
3439	struct ecore_mcast_obj *o = p->mcast_obj;
3440	int reg_sz = o->get_registry_size(o);
3441
3442	switch (cmd) {
3443	/* DEL command deletes all currently configured MACs */
3444	case ECORE_MCAST_CMD_DEL:
3445		o->set_registry_size(o, 0);
3446		/* Don't break */
3447
3448	/* RESTORE command will restore the entire multicast configuration */
3449	case ECORE_MCAST_CMD_RESTORE:
3450		p->mcast_list_len = reg_sz;
3451		  ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
3452				  cmd, p->mcast_list_len);
3453		break;
3454
3455	case ECORE_MCAST_CMD_ADD:
3456	case ECORE_MCAST_CMD_CONT:
3457		/* Multicast MACs on 57710 are configured as unicast MACs and
3458		 * there is only a limited number of CAM entries for that
3459		 * matter.
3460		 */
3461		if (p->mcast_list_len > o->max_cmd_len) {
3462			ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3463				  o->max_cmd_len);
3464			return ECORE_INVAL;
3465		}
3466		/* Every configured MAC should be cleared if DEL command is
3467		 * called. Only the last ADD command is relevant as long as
3468		 * every ADD commands overrides the previous configuration.
3469		 */
3470		ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3471		if (p->mcast_list_len > 0)
3472			o->set_registry_size(o, p->mcast_list_len);
3473
3474		break;
3475
3476	default:
3477		ECORE_ERR("Unknown command: %d\n", cmd);
3478		return ECORE_INVAL;
3479	}
3480
3481	/* We want to ensure that commands are executed one by one for 57710.
3482	 * Therefore each none-empty command will consume o->max_cmd_len.
3483	 */
3484	if (p->mcast_list_len)
3485		o->total_pending_num += o->max_cmd_len;
3486
3487	return ECORE_SUCCESS;
3488}
3489
3490static void ecore_mcast_revert_e1(struct bxe_softc *sc,
3491				      struct ecore_mcast_ramrod_params *p,
3492				      int old_num_macs)
3493{
3494	struct ecore_mcast_obj *o = p->mcast_obj;
3495
3496	o->set_registry_size(o, old_num_macs);
3497
3498	/* If current command hasn't been handled yet and we are
3499	 * here means that it's meant to be dropped and we have to
3500	 * update the number of outstanding MACs accordingly.
3501	 */
3502	if (p->mcast_list_len)
3503		o->total_pending_num -= o->max_cmd_len;
3504}
3505
3506static void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc,
3507					struct ecore_mcast_obj *o, int idx,
3508					union ecore_mcast_config_data *cfg_data,
3509					enum ecore_mcast_cmd cmd)
3510{
3511	struct ecore_raw_obj *r = &o->raw;
3512	struct mac_configuration_cmd *data =
3513		(struct mac_configuration_cmd *)(r->rdata);
3514
3515	/* copy mac */
3516	if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
3517		ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3518				      &data->config_table[idx].middle_mac_addr,
3519				      &data->config_table[idx].lsb_mac_addr,
3520				      cfg_data->mac);
3521
3522		data->config_table[idx].vlan_id = 0;
3523		data->config_table[idx].pf_id = r->func_id;
3524		data->config_table[idx].clients_bit_vector =
3525			ECORE_CPU_TO_LE32(1 << r->cl_id);
3526
3527		ECORE_SET_FLAG(data->config_table[idx].flags,
3528			       MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3529			       T_ETH_MAC_COMMAND_SET);
3530	}
3531}
3532
3533/**
3534 * ecore_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3535 *
3536 * @sc:		device handle
3537 * @p:
3538 * @len:	number of rules to handle
3539 */
3540static inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc,
3541					struct ecore_mcast_ramrod_params *p,
3542					uint8_t len)
3543{
3544	struct ecore_raw_obj *r = &p->mcast_obj->raw;
3545	struct mac_configuration_cmd *data =
3546		(struct mac_configuration_cmd *)(r->rdata);
3547
3548	uint8_t offset = (CHIP_REV_IS_SLOW(sc) ?
3549		     ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
3550		     ECORE_MAX_MULTICAST*(1 + r->func_id));
3551
3552	data->hdr.offset = offset;
3553	data->hdr.client_id = ECORE_CPU_TO_LE16(0xff);
3554	data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3555				     (ECORE_FILTER_MCAST_PENDING <<
3556				      ECORE_SWCID_SHIFT));
3557	data->hdr.length = len;
3558}
3559
3560/**
3561 * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
3562 *
3563 * @sc:		device handle
3564 * @o:
3565 * @start_idx:	index in the registry to start from
3566 * @rdata_idx:	index in the ramrod data to start from
3567 *
3568 * restore command for 57710 is like all other commands - always a stand alone
3569 * command - start_idx and rdata_idx will always be 0. This function will always
3570 * succeed.
3571 * returns -1 to comply with 57712 variant.
3572 */
3573static inline int ecore_mcast_handle_restore_cmd_e1(
3574	struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx,
3575	int *rdata_idx)
3576{
3577	struct ecore_mcast_mac_elem *elem;
3578	int i = 0;
3579	union ecore_mcast_config_data cfg_data = {NULL};
3580
3581	/* go through the registry and configure the MACs from it. */
3582	ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
3583				  struct ecore_mcast_mac_elem) {
3584		cfg_data.mac = &elem->mac[0];
3585		o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
3586
3587		i++;
3588
3589		ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3590			  cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
3591	}
3592
3593	*rdata_idx = i;
3594
3595	return -1;
3596}
3597
3598static inline int ecore_mcast_handle_pending_cmds_e1(
3599	struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p)
3600{
3601	struct ecore_pending_mcast_cmd *cmd_pos;
3602	struct ecore_mcast_mac_elem *pmac_pos;
3603	struct ecore_mcast_obj *o = p->mcast_obj;
3604	union ecore_mcast_config_data cfg_data = {NULL};
3605	int cnt = 0;
3606
3607	/* If nothing to be done - return */
3608	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3609		return 0;
3610
3611	/* Handle the first command */
3612	cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
3613					 struct ecore_pending_mcast_cmd, link);
3614
3615	switch (cmd_pos->type) {
3616	case ECORE_MCAST_CMD_ADD:
3617		ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
3618					  link, struct ecore_mcast_mac_elem) {
3619			cfg_data.mac = &pmac_pos->mac[0];
3620			o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3621
3622			cnt++;
3623
3624			ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3625				  pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3626		}
3627		break;
3628
3629	case ECORE_MCAST_CMD_DEL:
3630		cnt = cmd_pos->data.macs_num;
3631		ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt);
3632		break;
3633
3634	case ECORE_MCAST_CMD_RESTORE:
3635		o->hdl_restore(sc, o, 0, &cnt);
3636		break;
3637
3638	default:
3639		ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3640		return ECORE_INVAL;
3641	}
3642
3643	ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
3644	ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3645
3646	return cnt;
3647}
3648
3649/**
3650 * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
3651 *
3652 * @fw_hi:
3653 * @fw_mid:
3654 * @fw_lo:
3655 * @mac:
3656 */
3657static inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid,
3658					 uint16_t *fw_lo, uint8_t *mac)
3659{
3660	mac[1] = ((uint8_t *)fw_hi)[0];
3661	mac[0] = ((uint8_t *)fw_hi)[1];
3662	mac[3] = ((uint8_t *)fw_mid)[0];
3663	mac[2] = ((uint8_t *)fw_mid)[1];
3664	mac[5] = ((uint8_t *)fw_lo)[0];
3665	mac[4] = ((uint8_t *)fw_lo)[1];
3666}
3667
3668/**
3669 * ecore_mcast_refresh_registry_e1 -
3670 *
3671 * @sc:		device handle
3672 * @cnt:
3673 *
3674 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3675 * and update the registry correspondingly: if ADD - allocate a memory and add
3676 * the entries to the registry (list), if DELETE - clear the registry and free
3677 * the memory.
3678 */
3679static inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc,
3680						  struct ecore_mcast_obj *o)
3681{
3682	struct ecore_raw_obj *raw = &o->raw;
3683	struct ecore_mcast_mac_elem *elem;
3684	struct mac_configuration_cmd *data =
3685			(struct mac_configuration_cmd *)(raw->rdata);
3686
3687	/* If first entry contains a SET bit - the command was ADD,
3688	 * otherwise - DEL_ALL
3689	 */
3690	if (ECORE_GET_FLAG(data->config_table[0].flags,
3691			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3692		int i, len = data->hdr.length;
3693
3694		/* Break if it was a RESTORE command */
3695		if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
3696			return ECORE_SUCCESS;
3697
3698		elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc);
3699		if (!elem) {
3700			ECORE_ERR("Failed to allocate registry memory\n");
3701			return ECORE_NOMEM;
3702		}
3703
3704		for (i = 0; i < len; i++, elem++) {
3705			ecore_get_fw_mac_addr(
3706				&data->config_table[i].msb_mac_addr,
3707				&data->config_table[i].middle_mac_addr,
3708				&data->config_table[i].lsb_mac_addr,
3709				elem->mac);
3710			ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
3711				  elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
3712			ECORE_LIST_PUSH_TAIL(&elem->link,
3713					     &o->registry.exact_match.macs);
3714		}
3715	} else {
3716		elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
3717					      struct ecore_mcast_mac_elem,
3718					      link);
3719		ECORE_MSG(sc, "Deleting a registry\n");
3720		ECORE_FREE(sc, elem, sizeof(*elem));
3721		ECORE_LIST_INIT(&o->registry.exact_match.macs);
3722	}
3723
3724	return ECORE_SUCCESS;
3725}
3726
3727static int ecore_mcast_setup_e1(struct bxe_softc *sc,
3728				struct ecore_mcast_ramrod_params *p,
3729				enum ecore_mcast_cmd cmd)
3730{
3731	struct ecore_mcast_obj *o = p->mcast_obj;
3732	struct ecore_raw_obj *raw = &o->raw;
3733	struct mac_configuration_cmd *data =
3734		(struct mac_configuration_cmd *)(raw->rdata);
3735	int cnt = 0, i, rc;
3736
3737	/* Reset the ramrod data buffer */
3738	ECORE_MEMSET(data, 0, sizeof(*data));
3739
3740	/* First set all entries as invalid */
3741	for (i = 0; i < o->max_cmd_len ; i++)
3742		ECORE_SET_FLAG(data->config_table[i].flags,
3743			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3744			T_ETH_MAC_COMMAND_INVALIDATE);
3745
3746	/* Handle pending commands first */
3747	cnt = ecore_mcast_handle_pending_cmds_e1(sc, p);
3748
3749	/* If there are no more pending commands - clear SCHEDULED state */
3750	if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3751		o->clear_sched(o);
3752
3753	/* The below may be TRUE iff there were no pending commands */
3754	if (!cnt)
3755		cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0);
3756
3757	/* For 57710 every command has o->max_cmd_len length to ensure that
3758	 * commands are done one at a time.
3759	 */
3760	o->total_pending_num -= o->max_cmd_len;
3761
3762	/* send a ramrod */
3763
3764	ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3765
3766	/* Set ramrod header (in particular, a number of entries to update) */
3767	ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt);
3768
3769	/* update a registry: we need the registry contents to be always up
3770	 * to date in order to be able to execute a RESTORE opcode. Here
3771	 * we use the fact that for 57710 we sent one command at a time
3772	 * hence we may take the registry update out of the command handling
3773	 * and do it in a simpler way here.
3774	 */
3775	rc = ecore_mcast_refresh_registry_e1(sc, o);
3776	if (rc)
3777		return rc;
3778
3779	/* If CLEAR_ONLY was requested - don't send a ramrod and clear
3780	 * RAMROD_PENDING status immediately.
3781	 */
3782	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3783		raw->clear_pending(raw);
3784		return ECORE_SUCCESS;
3785	} else {
3786		/* No need for an explicit memory barrier here as long we would
3787		 * need to ensure the ordering of writing to the SPQ element
3788		 * and updating of the SPQ producer which involves a memory
3789		 * read and we will have to put a full memory barrier there
3790		 * (inside ecore_sp_post()).
3791		 */
3792
3793		/* Send a ramrod */
3794		rc = ecore_sp_post( sc,
3795				    RAMROD_CMD_ID_ETH_SET_MAC,
3796				    raw->cid,
3797				    raw->rdata_mapping,
3798				    ETH_CONNECTION_TYPE);
3799		if (rc)
3800			return rc;
3801
3802		/* Ramrod completion is pending */
3803		return ECORE_PENDING;
3804	}
3805}
3806
3807static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
3808{
3809	return o->registry.exact_match.num_macs_set;
3810}
3811
3812static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3813{
3814	return o->registry.aprox_match.num_bins_set;
3815}
3816
3817static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
3818						int n)
3819{
3820	o->registry.exact_match.num_macs_set = n;
3821}
3822
3823static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3824						int n)
3825{
3826	o->registry.aprox_match.num_bins_set = n;
3827}
3828
3829int ecore_config_mcast(struct bxe_softc *sc,
3830		       struct ecore_mcast_ramrod_params *p,
3831		       enum ecore_mcast_cmd cmd)
3832{
3833	struct ecore_mcast_obj *o = p->mcast_obj;
3834	struct ecore_raw_obj *r = &o->raw;
3835	int rc = 0, old_reg_size;
3836
3837	/* This is needed to recover number of currently configured mcast macs
3838	 * in case of failure.
3839	 */
3840	old_reg_size = o->get_registry_size(o);
3841
3842	/* Do some calculations and checks */
3843	rc = o->validate(sc, p, cmd);
3844	if (rc)
3845		return rc;
3846
3847	/* Return if there is no work to do */
3848	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3849		return ECORE_SUCCESS;
3850
3851	ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
3852		  o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3853
3854	/* Enqueue the current command to the pending list if we can't complete
3855	 * it in the current iteration
3856	 */
3857	if (r->check_pending(r) ||
3858	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3859		rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3860		if (rc < 0)
3861			goto error_exit1;
3862
3863		/* As long as the current command is in a command list we
3864		 * don't need to handle it separately.
3865		 */
3866		p->mcast_list_len = 0;
3867	}
3868
3869	if (!r->check_pending(r)) {
3870
3871		/* Set 'pending' state */
3872		r->set_pending(r);
3873
3874		/* Configure the new classification in the chip */
3875		rc = o->config_mcast(sc, p, cmd);
3876		if (rc < 0)
3877			goto error_exit2;
3878
3879		/* Wait for a ramrod completion if was requested */
3880		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3881			rc = o->wait_comp(sc, o);
3882	}
3883
3884	return rc;
3885
3886error_exit2:
3887	r->clear_pending(r);
3888
3889error_exit1:
3890	o->revert(sc, p, old_reg_size);
3891
3892	return rc;
3893}
3894
3895static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3896{
3897	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3898	ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3899	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3900}
3901
3902static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3903{
3904	ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3905	ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3906	ECORE_SMP_MB_AFTER_CLEAR_BIT();
3907}
3908
3909static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3910{
3911	return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3912}
3913
3914static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3915{
3916	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3917}
3918
3919void ecore_init_mcast_obj(struct bxe_softc *sc,
3920			  struct ecore_mcast_obj *mcast_obj,
3921			  uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
3922			  uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
3923			  int state, unsigned long *pstate, ecore_obj_type type)
3924{
3925	ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3926
3927	ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3928			   rdata, rdata_mapping, state, pstate, type);
3929
3930	mcast_obj->engine_id = engine_id;
3931
3932	ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3933
3934	mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3935	mcast_obj->check_sched = ecore_mcast_check_sched;
3936	mcast_obj->set_sched = ecore_mcast_set_sched;
3937	mcast_obj->clear_sched = ecore_mcast_clear_sched;
3938
3939	if (CHIP_IS_E1(sc)) {
3940		mcast_obj->config_mcast      = ecore_mcast_setup_e1;
3941		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
3942		mcast_obj->hdl_restore       =
3943			ecore_mcast_handle_restore_cmd_e1;
3944		mcast_obj->check_pending     = ecore_mcast_check_pending;
3945
3946		if (CHIP_REV_IS_SLOW(sc))
3947			mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
3948		else
3949			mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
3950
3951		mcast_obj->wait_comp         = ecore_mcast_wait;
3952		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e1;
3953		mcast_obj->validate          = ecore_mcast_validate_e1;
3954		mcast_obj->revert            = ecore_mcast_revert_e1;
3955		mcast_obj->get_registry_size =
3956			ecore_mcast_get_registry_size_exact;
3957		mcast_obj->set_registry_size =
3958			ecore_mcast_set_registry_size_exact;
3959
3960		/* 57710 is the only chip that uses the exact match for mcast
3961		 * at the moment.
3962		 */
3963		ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
3964
3965	} else if (CHIP_IS_E1H(sc)) {
3966		mcast_obj->config_mcast  = ecore_mcast_setup_e1h;
3967		mcast_obj->enqueue_cmd   = NULL;
3968		mcast_obj->hdl_restore   = NULL;
3969		mcast_obj->check_pending = ecore_mcast_check_pending;
3970
3971		/* 57711 doesn't send a ramrod, so it has unlimited credit
3972		 * for one command.
3973		 */
3974		mcast_obj->max_cmd_len       = -1;
3975		mcast_obj->wait_comp         = ecore_mcast_wait;
3976		mcast_obj->set_one_rule      = NULL;
3977		mcast_obj->validate          = ecore_mcast_validate_e1h;
3978		mcast_obj->revert            = ecore_mcast_revert_e1h;
3979		mcast_obj->get_registry_size =
3980			ecore_mcast_get_registry_size_aprox;
3981		mcast_obj->set_registry_size =
3982			ecore_mcast_set_registry_size_aprox;
3983	} else {
3984		mcast_obj->config_mcast      = ecore_mcast_setup_e2;
3985		mcast_obj->enqueue_cmd       = ecore_mcast_enqueue_cmd;
3986		mcast_obj->hdl_restore       =
3987			ecore_mcast_handle_restore_cmd_e2;
3988		mcast_obj->check_pending     = ecore_mcast_check_pending;
3989		/* TODO: There should be a proper HSI define for this number!!!
3990		 */
3991		mcast_obj->max_cmd_len       = 16;
3992		mcast_obj->wait_comp         = ecore_mcast_wait;
3993		mcast_obj->set_one_rule      = ecore_mcast_set_one_rule_e2;
3994		mcast_obj->validate          = ecore_mcast_validate_e2;
3995		mcast_obj->revert            = ecore_mcast_revert_e2;
3996		mcast_obj->get_registry_size =
3997			ecore_mcast_get_registry_size_aprox;
3998		mcast_obj->set_registry_size =
3999			ecore_mcast_set_registry_size_aprox;
4000	}
4001}
4002
4003/*************************** Credit handling **********************************/
4004
4005/**
4006 * atomic_add_ifless - add if the result is less than a given value.
4007 *
4008 * @v:	pointer of type ecore_atomic_t
4009 * @a:	the amount to add to v...
4010 * @u:	...if (v + a) is less than u.
4011 *
4012 * returns TRUE if (v + a) was less than u, and FALSE otherwise.
4013 *
4014 */
4015static inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
4016{
4017	int c, old;
4018
4019	c = ECORE_ATOMIC_READ(v);
4020	for (;;) {
4021		if (ECORE_UNLIKELY(c + a >= u))
4022			return FALSE;
4023
4024		old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
4025		if (ECORE_LIKELY(old == c))
4026			break;
4027		c = old;
4028	}
4029
4030	return TRUE;
4031}
4032
4033/**
4034 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4035 *
4036 * @v:	pointer of type ecore_atomic_t
4037 * @a:	the amount to dec from v...
4038 * @u:	...if (v - a) is more or equal than u.
4039 *
4040 * returns TRUE if (v - a) was more or equal than u, and FALSE
4041 * otherwise.
4042 */
4043static inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
4044{
4045	int c, old;
4046
4047	c = ECORE_ATOMIC_READ(v);
4048	for (;;) {
4049		if (ECORE_UNLIKELY(c - a < u))
4050			return FALSE;
4051
4052		old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
4053		if (ECORE_LIKELY(old == c))
4054			break;
4055		c = old;
4056	}
4057
4058	return TRUE;
4059}
4060
4061static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
4062{
4063	bool rc;
4064
4065	ECORE_SMP_MB();
4066	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4067	ECORE_SMP_MB();
4068
4069	return rc;
4070}
4071
4072static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
4073{
4074	bool rc;
4075
4076	ECORE_SMP_MB();
4077
4078	/* Don't let to refill if credit + cnt > pool_sz */
4079	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4080
4081	ECORE_SMP_MB();
4082
4083	return rc;
4084}
4085
4086static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
4087{
4088	int cur_credit;
4089
4090	ECORE_SMP_MB();
4091	cur_credit = ECORE_ATOMIC_READ(&o->credit);
4092
4093	return cur_credit;
4094}
4095
4096static bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
4097					  int cnt)
4098{
4099	return TRUE;
4100}
4101
4102static bool ecore_credit_pool_get_entry(
4103	struct ecore_credit_pool_obj *o,
4104	int *offset)
4105{
4106	int idx, vec, i;
4107
4108	*offset = -1;
4109
4110	/* Find "internal cam-offset" then add to base for this object... */
4111	for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
4112
4113		/* Skip the current vector if there are no free entries in it */
4114		if (!o->pool_mirror[vec])
4115			continue;
4116
4117		/* If we've got here we are going to find a free entry */
4118		for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4119		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
4120
4121			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4122				/* Got one!! */
4123				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4124				*offset = o->base_pool_offset + idx;
4125				return TRUE;
4126			}
4127	}
4128
4129	return FALSE;
4130}
4131
4132static bool ecore_credit_pool_put_entry(
4133	struct ecore_credit_pool_obj *o,
4134	int offset)
4135{
4136	if (offset < o->base_pool_offset)
4137		return FALSE;
4138
4139	offset -= o->base_pool_offset;
4140
4141	if (offset >= o->pool_sz)
4142		return FALSE;
4143
4144	/* Return the entry to the pool */
4145	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4146
4147	return TRUE;
4148}
4149
4150static bool ecore_credit_pool_put_entry_always_TRUE(
4151	struct ecore_credit_pool_obj *o,
4152	int offset)
4153{
4154	return TRUE;
4155}
4156
4157static bool ecore_credit_pool_get_entry_always_TRUE(
4158	struct ecore_credit_pool_obj *o,
4159	int *offset)
4160{
4161	*offset = -1;
4162	return TRUE;
4163}
4164/**
4165 * ecore_init_credit_pool - initialize credit pool internals.
4166 *
4167 * @p:
4168 * @base:	Base entry in the CAM to use.
4169 * @credit:	pool size.
4170 *
4171 * If base is negative no CAM entries handling will be performed.
4172 * If credit is negative pool operations will always succeed (unlimited pool).
4173 *
4174 */
4175static inline void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
4176					  int base, int credit)
4177{
4178	/* Zero the object first */
4179	ECORE_MEMSET(p, 0, sizeof(*p));
4180
4181	/* Set the table to all 1s */
4182	ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4183
4184	/* Init a pool as full */
4185	ECORE_ATOMIC_SET(&p->credit, credit);
4186
4187	/* The total poll size */
4188	p->pool_sz = credit;
4189
4190	p->base_pool_offset = base;
4191
4192	/* Commit the change */
4193	ECORE_SMP_MB();
4194
4195	p->check = ecore_credit_pool_check;
4196
4197	/* if pool credit is negative - disable the checks */
4198	if (credit >= 0) {
4199		p->put      = ecore_credit_pool_put;
4200		p->get      = ecore_credit_pool_get;
4201		p->put_entry = ecore_credit_pool_put_entry;
4202		p->get_entry = ecore_credit_pool_get_entry;
4203	} else {
4204		p->put      = ecore_credit_pool_always_TRUE;
4205		p->get      = ecore_credit_pool_always_TRUE;
4206		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4207		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4208	}
4209
4210	/* If base is negative - disable entries handling */
4211	if (base < 0) {
4212		p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4213		p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4214	}
4215}
4216
4217void ecore_init_mac_credit_pool(struct bxe_softc *sc,
4218				struct ecore_credit_pool_obj *p, uint8_t func_id,
4219				uint8_t func_num)
4220{
4221/* TODO: this will be defined in consts as well... */
4222#define ECORE_CAM_SIZE_EMUL 5
4223
4224	int cam_sz;
4225
4226	if (CHIP_IS_E1(sc)) {
4227		/* In E1, Multicast is saved in cam... */
4228		if (!CHIP_REV_IS_SLOW(sc))
4229			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
4230		else
4231			cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
4232
4233		ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4234
4235	} else if (CHIP_IS_E1H(sc)) {
4236		/* CAM credit is equally divided between all active functions
4237		 * on the PORT!.
4238		 */
4239		if ((func_num > 0)) {
4240			if (!CHIP_REV_IS_SLOW(sc))
4241				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4242			else
4243				cam_sz = ECORE_CAM_SIZE_EMUL;
4244			ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4245		} else {
4246			/* this should never happen! Block MAC operations. */
4247			ecore_init_credit_pool(p, 0, 0);
4248		}
4249
4250	} else {
4251
4252		/*
4253		 * CAM credit is equaly divided between all active functions
4254		 * on the PATH.
4255		 */
4256		if ((func_num > 1)) {
4257			if (!CHIP_REV_IS_SLOW(sc))
4258				cam_sz = (MAX_MAC_CREDIT_E2
4259				- GET_NUM_VFS_PER_PATH(sc))
4260				/ func_num
4261				+ GET_NUM_VFS_PER_PF(sc);
4262			else
4263				cam_sz = ECORE_CAM_SIZE_EMUL;
4264
4265			/* No need for CAM entries handling for 57712 and
4266			 * newer.
4267			 */
4268			ecore_init_credit_pool(p, -1, cam_sz);
4269		} else if (func_num == 1) {
4270			if (!CHIP_REV_IS_SLOW(sc))
4271				cam_sz = MAX_MAC_CREDIT_E2;
4272			else
4273				cam_sz = ECORE_CAM_SIZE_EMUL;
4274
4275			/* No need for CAM entries handling for 57712 and
4276			 * newer.
4277			 */
4278			ecore_init_credit_pool(p, -1, cam_sz);
4279		} else {
4280			/* this should never happen! Block MAC operations. */
4281			ecore_init_credit_pool(p, 0, 0);
4282		}
4283	}
4284}
4285
4286void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
4287				 struct ecore_credit_pool_obj *p,
4288				 uint8_t func_id,
4289				 uint8_t func_num)
4290{
4291	if (CHIP_IS_E1x(sc)) {
4292		/* There is no VLAN credit in HW on 57710 and 57711 only
4293		 * MAC / MAC-VLAN can be set
4294		 */
4295		ecore_init_credit_pool(p, 0, -1);
4296	} else {
4297		/* CAM credit is equally divided between all active functions
4298		 * on the PATH.
4299		 */
4300		if (func_num > 0) {
4301			int credit = MAX_VLAN_CREDIT_E2 / func_num;
4302			ecore_init_credit_pool(p, func_id * credit, credit);
4303		} else
4304			/* this should never happen! Block VLAN operations. */
4305			ecore_init_credit_pool(p, 0, 0);
4306	}
4307}
4308
4309/****************** RSS Configuration ******************/
4310
4311/**
4312 * ecore_setup_rss - configure RSS
4313 *
4314 * @sc:		device handle
4315 * @p:		rss configuration
4316 *
4317 * sends on UPDATE ramrod for that matter.
4318 */
4319static int ecore_setup_rss(struct bxe_softc *sc,
4320			   struct ecore_config_rss_params *p)
4321{
4322	struct ecore_rss_config_obj *o = p->rss_obj;
4323	struct ecore_raw_obj *r = &o->raw;
4324	struct eth_rss_update_ramrod_data *data =
4325		(struct eth_rss_update_ramrod_data *)(r->rdata);
4326	uint8_t rss_mode = 0;
4327	int rc;
4328
4329	ECORE_MEMSET(data, 0, sizeof(*data));
4330
4331	ECORE_MSG(sc, "Configuring RSS\n");
4332
4333	/* Set an echo field */
4334	data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
4335				 (r->state << ECORE_SWCID_SHIFT));
4336
4337	/* RSS mode */
4338	if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
4339		rss_mode = ETH_RSS_MODE_DISABLED;
4340	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
4341		rss_mode = ETH_RSS_MODE_REGULAR;
4342#if defined(__VMKLNX__) && (VMWARE_ESX_DDK_VERSION < 55000) /* ! BNX2X_UPSTREAM */
4343	else if (ECORE_TEST_BIT(ECORE_RSS_MODE_ESX51, &p->rss_flags))
4344		rss_mode = ETH_RSS_MODE_ESX51;
4345#endif
4346
4347	data->rss_mode = rss_mode;
4348
4349	ECORE_MSG(sc, "rss_mode=%d\n", rss_mode);
4350
4351	/* RSS capabilities */
4352	if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
4353		data->capabilities |=
4354			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4355
4356	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
4357		data->capabilities |=
4358			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4359
4360	if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
4361		data->capabilities |=
4362			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4363
4364	if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
4365		data->capabilities |=
4366			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4367
4368	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
4369		data->capabilities |=
4370			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4371
4372	if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
4373		data->capabilities |=
4374			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4375
4376	if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
4377		data->udp_4tuple_dst_port_mask = ECORE_CPU_TO_LE16(p->tunnel_mask);
4378		data->udp_4tuple_dst_port_value =
4379			ECORE_CPU_TO_LE16(p->tunnel_value);
4380	}
4381
4382	/* Hashing mask */
4383	data->rss_result_mask = p->rss_result_mask;
4384
4385	/* RSS engine ID */
4386	data->rss_engine_id = o->engine_id;
4387
4388	ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id);
4389
4390	/* Indirection table */
4391	ECORE_MEMCPY(data->indirection_table, p->ind_table,
4392		  T_ETH_INDIRECTION_TABLE_SIZE);
4393
4394	/* Remember the last configuration */
4395	ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4396
4397
4398	/* RSS keys */
4399	if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
4400		ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
4401		       sizeof(data->rss_key));
4402		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4403	}
4404
4405	/* No need for an explicit memory barrier here as long we would
4406	 * need to ensure the ordering of writing to the SPQ element
4407	 * and updating of the SPQ producer which involves a memory
4408	 * read and we will have to put a full memory barrier there
4409	 * (inside ecore_sp_post()).
4410	 */
4411
4412	/* Send a ramrod */
4413	rc = ecore_sp_post(sc,
4414			     RAMROD_CMD_ID_ETH_RSS_UPDATE,
4415			     r->cid,
4416			     r->rdata_mapping,
4417			     ETH_CONNECTION_TYPE);
4418
4419	if (rc < 0)
4420		return rc;
4421
4422	return ECORE_PENDING;
4423}
4424
4425void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
4426			     uint8_t *ind_table)
4427{
4428	ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4429}
4430
4431int ecore_config_rss(struct bxe_softc *sc,
4432		     struct ecore_config_rss_params *p)
4433{
4434	int rc;
4435	struct ecore_rss_config_obj *o = p->rss_obj;
4436	struct ecore_raw_obj *r = &o->raw;
4437
4438	/* Do nothing if only driver cleanup was requested */
4439	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4440		ECORE_MSG(sc, "Not configuring RSS ramrod_flags=%lx\n",
4441			  p->ramrod_flags);
4442		return ECORE_SUCCESS;
4443	}
4444
4445	r->set_pending(r);
4446
4447	rc = o->config_rss(sc, p);
4448	if (rc < 0) {
4449		r->clear_pending(r);
4450		return rc;
4451	}
4452
4453	if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4454		rc = r->wait_comp(sc, r);
4455
4456	return rc;
4457}
4458
4459void ecore_init_rss_config_obj(struct bxe_softc *sc,
4460			       struct ecore_rss_config_obj *rss_obj,
4461			       uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
4462			       void *rdata, ecore_dma_addr_t rdata_mapping,
4463			       int state, unsigned long *pstate,
4464			       ecore_obj_type type)
4465{
4466	ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4467			   rdata_mapping, state, pstate, type);
4468
4469	rss_obj->engine_id  = engine_id;
4470	rss_obj->config_rss = ecore_setup_rss;
4471}
4472
4473int validate_vlan_mac(struct bxe_softc *sc,
4474		      struct ecore_vlan_mac_obj *vlan_mac)
4475{
4476	if (!vlan_mac->get_n_elements) {
4477		ECORE_ERR("vlan mac object was not intialized\n");
4478		return ECORE_INVAL;
4479	}
4480	return 0;
4481}
4482
4483/********************** Queue state object ***********************************/
4484
4485/**
4486 * ecore_queue_state_change - perform Queue state change transition
4487 *
4488 * @sc:		device handle
4489 * @params:	parameters to perform the transition
4490 *
4491 * returns 0 in case of successfully completed transition, negative error
4492 * code in case of failure, positive (EBUSY) value if there is a completion
4493 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4494 * not set in params->ramrod_flags for asynchronous commands).
4495 *
4496 */
4497int ecore_queue_state_change(struct bxe_softc *sc,
4498			     struct ecore_queue_state_params *params)
4499{
4500	struct ecore_queue_sp_obj *o = params->q_obj;
4501	int rc, pending_bit;
4502	unsigned long *pending = &o->pending;
4503
4504	/* Check that the requested transition is legal */
4505	rc = o->check_transition(sc, o, params);
4506	if (rc) {
4507		ECORE_ERR("check transition returned an error. rc %d\n", rc);
4508		return ECORE_INVAL;
4509	}
4510
4511	/* Set "pending" bit */
4512	ECORE_MSG(sc, "pending bit was=%lx\n", o->pending);
4513	pending_bit = o->set_pending(o, params);
4514	ECORE_MSG(sc, "pending bit now=%lx\n", o->pending);
4515
4516	/* Don't send a command if only driver cleanup was requested */
4517	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4518		o->complete_cmd(sc, o, pending_bit);
4519	else {
4520		/* Send a ramrod */
4521		rc = o->send_cmd(sc, params);
4522		if (rc) {
4523			o->next_state = ECORE_Q_STATE_MAX;
4524			ECORE_CLEAR_BIT(pending_bit, pending);
4525			ECORE_SMP_MB_AFTER_CLEAR_BIT();
4526			return rc;
4527		}
4528
4529		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4530			rc = o->wait_comp(sc, o, pending_bit);
4531			if (rc)
4532				return rc;
4533
4534			return ECORE_SUCCESS;
4535		}
4536	}
4537
4538	return ECORE_RET_PENDING(pending_bit, pending);
4539}
4540
4541static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
4542				   struct ecore_queue_state_params *params)
4543{
4544	enum ecore_queue_cmd cmd = params->cmd, bit;
4545
4546	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4547	 * UPDATE command.
4548	 */
4549	if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
4550	    (cmd == ECORE_Q_CMD_DEACTIVATE))
4551		bit = ECORE_Q_CMD_UPDATE;
4552	else
4553		bit = cmd;
4554
4555	ECORE_SET_BIT(bit, &obj->pending);
4556	return bit;
4557}
4558
4559static int ecore_queue_wait_comp(struct bxe_softc *sc,
4560				 struct ecore_queue_sp_obj *o,
4561				 enum ecore_queue_cmd cmd)
4562{
4563	return ecore_state_wait(sc, cmd, &o->pending);
4564}
4565
4566/**
4567 * ecore_queue_comp_cmd - complete the state change command.
4568 *
4569 * @sc:		device handle
4570 * @o:
4571 * @cmd:
4572 *
4573 * Checks that the arrived completion is expected.
4574 */
4575static int ecore_queue_comp_cmd(struct bxe_softc *sc,
4576				struct ecore_queue_sp_obj *o,
4577				enum ecore_queue_cmd cmd)
4578{
4579	unsigned long cur_pending = o->pending;
4580
4581	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4582		ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4583			  cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
4584			  o->state, cur_pending, o->next_state);
4585		return ECORE_INVAL;
4586	}
4587
4588	if (o->next_tx_only >= o->max_cos)
4589		/* >= because tx only must always be smaller than cos since the
4590		 * primary connection supports COS 0
4591		 */
4592		ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
4593			  o->next_tx_only, o->max_cos);
4594
4595	ECORE_MSG(sc,
4596		  "Completing command %d for queue %d, setting state to %d\n",
4597		  cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
4598
4599	if (o->next_tx_only)  /* print num tx-only if any exist */
4600		ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n",
4601			  o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
4602
4603	o->state = o->next_state;
4604	o->num_tx_only = o->next_tx_only;
4605	o->next_state = ECORE_Q_STATE_MAX;
4606
4607	/* It's important that o->state and o->next_state are
4608	 * updated before o->pending.
4609	 */
4610	wmb();
4611
4612	ECORE_CLEAR_BIT(cmd, &o->pending);
4613	ECORE_SMP_MB_AFTER_CLEAR_BIT();
4614
4615	return ECORE_SUCCESS;
4616}
4617
4618static void ecore_q_fill_setup_data_e2(struct bxe_softc *sc,
4619				struct ecore_queue_state_params *cmd_params,
4620				struct client_init_ramrod_data *data)
4621{
4622	struct ecore_queue_setup_params *params = &cmd_params->params.setup;
4623
4624	/* Rx data */
4625
4626	/* IPv6 TPA supported for E2 and above only */
4627	data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
4628					  &params->flags) *
4629				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4630}
4631
4632static void ecore_q_fill_init_general_data(struct bxe_softc *sc,
4633				struct ecore_queue_sp_obj *o,
4634				struct ecore_general_setup_params *params,
4635				struct client_init_general_data *gen_data,
4636				unsigned long *flags)
4637{
4638	gen_data->client_id = o->cl_id;
4639
4640	if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
4641		gen_data->statistics_counter_id =
4642					params->stat_id;
4643		gen_data->statistics_en_flg = 1;
4644		gen_data->statistics_zero_flg =
4645			ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
4646	} else
4647		gen_data->statistics_counter_id =
4648					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4649
4650	gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
4651						   flags);
4652	gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4653						    flags);
4654	gen_data->sp_client_id = params->spcl_id;
4655	gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
4656	gen_data->func_id = o->func_id;
4657
4658	gen_data->cos = params->cos;
4659
4660	gen_data->traffic_type =
4661		ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
4662		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4663
4664	ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
4665		  gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4666}
4667
4668static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
4669				struct ecore_txq_setup_params *params,
4670				struct client_init_tx_data *tx_data,
4671				unsigned long *flags)
4672{
4673	tx_data->enforce_security_flg =
4674		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
4675	tx_data->default_vlan =
4676		ECORE_CPU_TO_LE16(params->default_vlan);
4677	tx_data->default_vlan_flg =
4678		ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
4679	tx_data->tx_switching_flg =
4680		ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
4681	tx_data->anti_spoofing_flg =
4682		ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
4683	tx_data->force_default_pri_flg =
4684		ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
4685	tx_data->refuse_outband_vlan_flg =
4686		ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4687	tx_data->tunnel_lso_inc_ip_id =
4688		ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4689	tx_data->tunnel_non_lso_pcsum_location =
4690		ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4691							    CSUM_ON_BD;
4692
4693	tx_data->tx_status_block_id = params->fw_sb_id;
4694	tx_data->tx_sb_index_number = params->sb_cq_index;
4695	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4696
4697	tx_data->tx_bd_page_base.lo =
4698		ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4699	tx_data->tx_bd_page_base.hi =
4700		ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4701
4702	/* Don't configure any Tx switching mode during queue SETUP */
4703	tx_data->state = 0;
4704}
4705
4706static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
4707				struct rxq_pause_params *params,
4708				struct client_init_rx_data *rx_data)
4709{
4710	/* flow control data */
4711	rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
4712	rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
4713	rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
4714	rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
4715	rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
4716	rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
4717	rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
4718}
4719
4720static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
4721				struct ecore_rxq_setup_params *params,
4722				struct client_init_rx_data *rx_data,
4723				unsigned long *flags)
4724{
4725	rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
4726				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4727	rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
4728				CLIENT_INIT_RX_DATA_TPA_MODE;
4729	rx_data->vmqueue_mode_en_flg = 0;
4730
4731	rx_data->extra_data_over_sgl_en_flg =
4732		ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
4733	rx_data->cache_line_alignment_log_size =
4734		params->cache_line_log;
4735	rx_data->enable_dynamic_hc =
4736		ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
4737	rx_data->max_sges_for_packet = params->max_sges_pkt;
4738	rx_data->client_qzone_id = params->cl_qzone_id;
4739	rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
4740
4741	/* Always start in DROP_ALL mode */
4742	rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4743				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4744
4745	/* We don't set drop flags */
4746	rx_data->drop_ip_cs_err_flg = 0;
4747	rx_data->drop_tcp_cs_err_flg = 0;
4748	rx_data->drop_ttl0_flg = 0;
4749	rx_data->drop_udp_cs_err_flg = 0;
4750	rx_data->inner_vlan_removal_enable_flg =
4751		ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
4752	rx_data->outer_vlan_removal_enable_flg =
4753		ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
4754	rx_data->status_block_id = params->fw_sb_id;
4755	rx_data->rx_sb_index_number = params->sb_cq_index;
4756	rx_data->max_tpa_queues = params->max_tpa_queues;
4757	rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
4758	rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz);
4759	rx_data->bd_page_base.lo =
4760		ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4761	rx_data->bd_page_base.hi =
4762		ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4763	rx_data->sge_page_base.lo =
4764		ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
4765	rx_data->sge_page_base.hi =
4766		ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
4767	rx_data->cqe_page_base.lo =
4768		ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
4769	rx_data->cqe_page_base.hi =
4770		ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
4771	rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
4772						 flags);
4773
4774	if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
4775		rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4776		rx_data->is_approx_mcast = 1;
4777	}
4778
4779	rx_data->rss_engine_id = params->rss_engine_id;
4780
4781	/* silent vlan removal */
4782	rx_data->silent_vlan_removal_flg =
4783		ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
4784	rx_data->silent_vlan_value =
4785		ECORE_CPU_TO_LE16(params->silent_removal_value);
4786	rx_data->silent_vlan_mask =
4787		ECORE_CPU_TO_LE16(params->silent_removal_mask);
4788}
4789
4790/* initialize the general, tx and rx parts of a queue object */
4791static void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc,
4792				struct ecore_queue_state_params *cmd_params,
4793				struct client_init_ramrod_data *data)
4794{
4795	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4796				       &cmd_params->params.setup.gen_params,
4797				       &data->general,
4798				       &cmd_params->params.setup.flags);
4799
4800	ecore_q_fill_init_tx_data(cmd_params->q_obj,
4801				  &cmd_params->params.setup.txq_params,
4802				  &data->tx,
4803				  &cmd_params->params.setup.flags);
4804
4805	ecore_q_fill_init_rx_data(cmd_params->q_obj,
4806				  &cmd_params->params.setup.rxq_params,
4807				  &data->rx,
4808				  &cmd_params->params.setup.flags);
4809
4810	ecore_q_fill_init_pause_data(cmd_params->q_obj,
4811				     &cmd_params->params.setup.pause_params,
4812				     &data->rx);
4813}
4814
4815/* initialize the general and tx parts of a tx-only queue object */
4816static void ecore_q_fill_setup_tx_only(struct bxe_softc *sc,
4817				struct ecore_queue_state_params *cmd_params,
4818				struct tx_queue_init_ramrod_data *data)
4819{
4820	ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4821				       &cmd_params->params.tx_only.gen_params,
4822				       &data->general,
4823				       &cmd_params->params.tx_only.flags);
4824
4825	ecore_q_fill_init_tx_data(cmd_params->q_obj,
4826				  &cmd_params->params.tx_only.txq_params,
4827				  &data->tx,
4828				  &cmd_params->params.tx_only.flags);
4829
4830	ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
4831		  cmd_params->q_obj->cids[0],
4832		  data->tx.tx_bd_page_base.lo,
4833		  data->tx.tx_bd_page_base.hi);
4834}
4835
4836/**
4837 * ecore_q_init - init HW/FW queue
4838 *
4839 * @sc:		device handle
4840 * @params:
4841 *
4842 * HW/FW initial Queue configuration:
4843 *      - HC: Rx and Tx
4844 *      - CDU context validation
4845 *
4846 */
4847static inline int ecore_q_init(struct bxe_softc *sc,
4848			       struct ecore_queue_state_params *params)
4849{
4850	struct ecore_queue_sp_obj *o = params->q_obj;
4851	struct ecore_queue_init_params *init = &params->params.init;
4852	uint16_t hc_usec;
4853	uint8_t cos;
4854
4855	/* Tx HC configuration */
4856	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
4857	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
4858		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4859
4860		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
4861			init->tx.sb_cq_index,
4862			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
4863			hc_usec);
4864	}
4865
4866	/* Rx HC configuration */
4867	if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
4868	    ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
4869		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4870
4871		ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
4872			init->rx.sb_cq_index,
4873			!ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
4874			hc_usec);
4875	}
4876
4877	/* Set CDU context validation values */
4878	for (cos = 0; cos < o->max_cos; cos++) {
4879		ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n",
4880			  o->cids[cos], cos);
4881		ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]);
4882		ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
4883	}
4884
4885	/* As no ramrod is sent, complete the command immediately  */
4886	o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
4887
4888	ECORE_MMIOWB();
4889	ECORE_SMP_MB();
4890
4891	return ECORE_SUCCESS;
4892}
4893
4894static inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
4895					struct ecore_queue_state_params *params)
4896{
4897	struct ecore_queue_sp_obj *o = params->q_obj;
4898	struct client_init_ramrod_data *rdata =
4899		(struct client_init_ramrod_data *)o->rdata;
4900	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4901	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4902
4903	/* Clear the ramrod data */
4904	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4905
4906	/* Fill the ramrod data */
4907	ecore_q_fill_setup_data_cmn(sc, params, rdata);
4908
4909	/* No need for an explicit memory barrier here as long we would
4910	 * need to ensure the ordering of writing to the SPQ element
4911	 * and updating of the SPQ producer which involves a memory
4912	 * read and we will have to put a full memory barrier there
4913	 * (inside ecore_sp_post()).
4914	 */
4915
4916	return ecore_sp_post(sc,
4917			     ramrod,
4918			     o->cids[ECORE_PRIMARY_CID_INDEX],
4919			     data_mapping,
4920			     ETH_CONNECTION_TYPE);
4921}
4922
4923static inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
4924					struct ecore_queue_state_params *params)
4925{
4926	struct ecore_queue_sp_obj *o = params->q_obj;
4927	struct client_init_ramrod_data *rdata =
4928		(struct client_init_ramrod_data *)o->rdata;
4929	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4930	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4931
4932	/* Clear the ramrod data */
4933	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4934
4935	/* Fill the ramrod data */
4936	ecore_q_fill_setup_data_cmn(sc, params, rdata);
4937	ecore_q_fill_setup_data_e2(sc, params, rdata);
4938
4939	/* No need for an explicit memory barrier here as long we would
4940	 * need to ensure the ordering of writing to the SPQ element
4941	 * and updating of the SPQ producer which involves a memory
4942	 * read and we will have to put a full memory barrier there
4943	 * (inside ecore_sp_post()).
4944	 */
4945
4946	return ecore_sp_post(sc,
4947			     ramrod,
4948			     o->cids[ECORE_PRIMARY_CID_INDEX],
4949			     data_mapping,
4950			     ETH_CONNECTION_TYPE);
4951}
4952
4953static inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
4954				  struct ecore_queue_state_params *params)
4955{
4956	struct ecore_queue_sp_obj *o = params->q_obj;
4957	struct tx_queue_init_ramrod_data *rdata =
4958		(struct tx_queue_init_ramrod_data *)o->rdata;
4959	ecore_dma_addr_t data_mapping = o->rdata_mapping;
4960	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4961	struct ecore_queue_setup_tx_only_params *tx_only_params =
4962		&params->params.tx_only;
4963	uint8_t cid_index = tx_only_params->cid_index;
4964
4965	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4966		ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4967	ECORE_MSG(sc, "sending forward tx-only ramrod");
4968
4969	if (cid_index >= o->max_cos) {
4970		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
4971			  o->cl_id, cid_index);
4972		return ECORE_INVAL;
4973	}
4974
4975	ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n",
4976		  tx_only_params->gen_params.cos,
4977		  tx_only_params->gen_params.spcl_id);
4978
4979	/* Clear the ramrod data */
4980	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4981
4982	/* Fill the ramrod data */
4983	ecore_q_fill_setup_tx_only(sc, params, rdata);
4984
4985	ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
4986		  o->cids[cid_index], rdata->general.client_id,
4987		  rdata->general.sp_client_id, rdata->general.cos);
4988
4989	/* No need for an explicit memory barrier here as long we would
4990	 * need to ensure the ordering of writing to the SPQ element
4991	 * and updating of the SPQ producer which involves a memory
4992	 * read and we will have to put a full memory barrier there
4993	 * (inside ecore_sp_post()).
4994	 */
4995
4996	return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4997			     data_mapping, ETH_CONNECTION_TYPE);
4998}
4999
5000static void ecore_q_fill_update_data(struct bxe_softc *sc,
5001				     struct ecore_queue_sp_obj *obj,
5002				     struct ecore_queue_update_params *params,
5003				     struct client_update_ramrod_data *data)
5004{
5005	/* Client ID of the client to update */
5006	data->client_id = obj->cl_id;
5007
5008	/* Function ID of the client to update */
5009	data->func_id = obj->func_id;
5010
5011	/* Default VLAN value */
5012	data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
5013
5014	/* Inner VLAN stripping */
5015	data->inner_vlan_removal_enable_flg =
5016		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
5017			       &params->update_flags);
5018	data->inner_vlan_removal_change_flg =
5019		ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
5020		       &params->update_flags);
5021
5022	/* Outer VLAN stripping */
5023	data->outer_vlan_removal_enable_flg =
5024		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
5025			       &params->update_flags);
5026	data->outer_vlan_removal_change_flg =
5027		ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
5028		       &params->update_flags);
5029
5030	/* Drop packets that have source MAC that doesn't belong to this
5031	 * Queue.
5032	 */
5033	data->anti_spoofing_enable_flg =
5034		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
5035			       &params->update_flags);
5036	data->anti_spoofing_change_flg =
5037		ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
5038		       &params->update_flags);
5039
5040	/* Activate/Deactivate */
5041	data->activate_flg =
5042		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
5043	data->activate_change_flg =
5044		ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5045			       &params->update_flags);
5046
5047	/* Enable default VLAN */
5048	data->default_vlan_enable_flg =
5049		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
5050			       &params->update_flags);
5051	data->default_vlan_change_flg =
5052		ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
5053		       &params->update_flags);
5054
5055	/* silent vlan removal */
5056	data->silent_vlan_change_flg =
5057		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5058			       &params->update_flags);
5059	data->silent_vlan_removal_flg =
5060		ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
5061			       &params->update_flags);
5062	data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value);
5063	data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
5064
5065	/* tx switching */
5066	data->tx_switching_flg =
5067		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
5068			       &params->update_flags);
5069	data->tx_switching_change_flg =
5070		ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
5071			       &params->update_flags);
5072}
5073
5074static inline int ecore_q_send_update(struct bxe_softc *sc,
5075				      struct ecore_queue_state_params *params)
5076{
5077	struct ecore_queue_sp_obj *o = params->q_obj;
5078	struct client_update_ramrod_data *rdata =
5079		(struct client_update_ramrod_data *)o->rdata;
5080	ecore_dma_addr_t data_mapping = o->rdata_mapping;
5081	struct ecore_queue_update_params *update_params =
5082		&params->params.update;
5083	uint8_t cid_index = update_params->cid_index;
5084
5085	if (cid_index >= o->max_cos) {
5086		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5087			  o->cl_id, cid_index);
5088		return ECORE_INVAL;
5089	}
5090
5091	/* Clear the ramrod data */
5092	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5093
5094	/* Fill the ramrod data */
5095	ecore_q_fill_update_data(sc, o, update_params, rdata);
5096
5097	/* No need for an explicit memory barrier here as long we would
5098	 * need to ensure the ordering of writing to the SPQ element
5099	 * and updating of the SPQ producer which involves a memory
5100	 * read and we will have to put a full memory barrier there
5101	 * (inside ecore_sp_post()).
5102	 */
5103
5104	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5105			     o->cids[cid_index], data_mapping,
5106			     ETH_CONNECTION_TYPE);
5107}
5108
5109/**
5110 * ecore_q_send_deactivate - send DEACTIVATE command
5111 *
5112 * @sc:		device handle
5113 * @params:
5114 *
5115 * implemented using the UPDATE command.
5116 */
5117static inline int ecore_q_send_deactivate(struct bxe_softc *sc,
5118					struct ecore_queue_state_params *params)
5119{
5120	struct ecore_queue_update_params *update = &params->params.update;
5121
5122	ECORE_MEMSET(update, 0, sizeof(*update));
5123
5124	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5125
5126	return ecore_q_send_update(sc, params);
5127}
5128
5129/**
5130 * ecore_q_send_activate - send ACTIVATE command
5131 *
5132 * @sc:		device handle
5133 * @params:
5134 *
5135 * implemented using the UPDATE command.
5136 */
5137static inline int ecore_q_send_activate(struct bxe_softc *sc,
5138					struct ecore_queue_state_params *params)
5139{
5140	struct ecore_queue_update_params *update = &params->params.update;
5141
5142	ECORE_MEMSET(update, 0, sizeof(*update));
5143
5144	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
5145	ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5146
5147	return ecore_q_send_update(sc, params);
5148}
5149
5150static inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
5151					struct ecore_queue_state_params *params)
5152{
5153	/* TODO: Not implemented yet. */
5154	return -1;
5155}
5156
5157static inline int ecore_q_send_halt(struct bxe_softc *sc,
5158				    struct ecore_queue_state_params *params)
5159{
5160	struct ecore_queue_sp_obj *o = params->q_obj;
5161
5162	/* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
5163	ecore_dma_addr_t data_mapping = 0;
5164	data_mapping = (ecore_dma_addr_t)o->cl_id;
5165
5166	return ecore_sp_post(sc,
5167			     RAMROD_CMD_ID_ETH_HALT,
5168			     o->cids[ECORE_PRIMARY_CID_INDEX],
5169			     data_mapping,
5170			     ETH_CONNECTION_TYPE);
5171}
5172
5173static inline int ecore_q_send_cfc_del(struct bxe_softc *sc,
5174				       struct ecore_queue_state_params *params)
5175{
5176	struct ecore_queue_sp_obj *o = params->q_obj;
5177	uint8_t cid_idx = params->params.cfc_del.cid_index;
5178
5179	if (cid_idx >= o->max_cos) {
5180		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5181			  o->cl_id, cid_idx);
5182		return ECORE_INVAL;
5183	}
5184
5185	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
5186			     o->cids[cid_idx], 0,
5187			     NONE_CONNECTION_TYPE);
5188}
5189
5190static inline int ecore_q_send_terminate(struct bxe_softc *sc,
5191					struct ecore_queue_state_params *params)
5192{
5193	struct ecore_queue_sp_obj *o = params->q_obj;
5194	uint8_t cid_index = params->params.terminate.cid_index;
5195
5196	if (cid_index >= o->max_cos) {
5197		ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5198			  o->cl_id, cid_index);
5199		return ECORE_INVAL;
5200	}
5201
5202	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
5203			     o->cids[cid_index], 0,
5204			     ETH_CONNECTION_TYPE);
5205}
5206
5207static inline int ecore_q_send_empty(struct bxe_softc *sc,
5208				     struct ecore_queue_state_params *params)
5209{
5210	struct ecore_queue_sp_obj *o = params->q_obj;
5211
5212	return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
5213			     o->cids[ECORE_PRIMARY_CID_INDEX], 0,
5214			     ETH_CONNECTION_TYPE);
5215}
5216
5217static inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc,
5218					struct ecore_queue_state_params *params)
5219{
5220	switch (params->cmd) {
5221	case ECORE_Q_CMD_INIT:
5222		return ecore_q_init(sc, params);
5223	case ECORE_Q_CMD_SETUP_TX_ONLY:
5224		return ecore_q_send_setup_tx_only(sc, params);
5225	case ECORE_Q_CMD_DEACTIVATE:
5226		return ecore_q_send_deactivate(sc, params);
5227	case ECORE_Q_CMD_ACTIVATE:
5228		return ecore_q_send_activate(sc, params);
5229	case ECORE_Q_CMD_UPDATE:
5230		return ecore_q_send_update(sc, params);
5231	case ECORE_Q_CMD_UPDATE_TPA:
5232		return ecore_q_send_update_tpa(sc, params);
5233	case ECORE_Q_CMD_HALT:
5234		return ecore_q_send_halt(sc, params);
5235	case ECORE_Q_CMD_CFC_DEL:
5236		return ecore_q_send_cfc_del(sc, params);
5237	case ECORE_Q_CMD_TERMINATE:
5238		return ecore_q_send_terminate(sc, params);
5239	case ECORE_Q_CMD_EMPTY:
5240		return ecore_q_send_empty(sc, params);
5241	default:
5242		ECORE_ERR("Unknown command: %d\n", params->cmd);
5243		return ECORE_INVAL;
5244	}
5245}
5246
5247static int ecore_queue_send_cmd_e1x(struct bxe_softc *sc,
5248				    struct ecore_queue_state_params *params)
5249{
5250	switch (params->cmd) {
5251	case ECORE_Q_CMD_SETUP:
5252		return ecore_q_send_setup_e1x(sc, params);
5253	case ECORE_Q_CMD_INIT:
5254	case ECORE_Q_CMD_SETUP_TX_ONLY:
5255	case ECORE_Q_CMD_DEACTIVATE:
5256	case ECORE_Q_CMD_ACTIVATE:
5257	case ECORE_Q_CMD_UPDATE:
5258	case ECORE_Q_CMD_UPDATE_TPA:
5259	case ECORE_Q_CMD_HALT:
5260	case ECORE_Q_CMD_CFC_DEL:
5261	case ECORE_Q_CMD_TERMINATE:
5262	case ECORE_Q_CMD_EMPTY:
5263		return ecore_queue_send_cmd_cmn(sc, params);
5264	default:
5265		ECORE_ERR("Unknown command: %d\n", params->cmd);
5266		return ECORE_INVAL;
5267	}
5268}
5269
5270static int ecore_queue_send_cmd_e2(struct bxe_softc *sc,
5271				   struct ecore_queue_state_params *params)
5272{
5273	switch (params->cmd) {
5274	case ECORE_Q_CMD_SETUP:
5275		return ecore_q_send_setup_e2(sc, params);
5276	case ECORE_Q_CMD_INIT:
5277	case ECORE_Q_CMD_SETUP_TX_ONLY:
5278	case ECORE_Q_CMD_DEACTIVATE:
5279	case ECORE_Q_CMD_ACTIVATE:
5280	case ECORE_Q_CMD_UPDATE:
5281	case ECORE_Q_CMD_UPDATE_TPA:
5282	case ECORE_Q_CMD_HALT:
5283	case ECORE_Q_CMD_CFC_DEL:
5284	case ECORE_Q_CMD_TERMINATE:
5285	case ECORE_Q_CMD_EMPTY:
5286		return ecore_queue_send_cmd_cmn(sc, params);
5287	default:
5288		ECORE_ERR("Unknown command: %d\n", params->cmd);
5289		return ECORE_INVAL;
5290	}
5291}
5292
5293/**
5294 * ecore_queue_chk_transition - check state machine of a regular Queue
5295 *
5296 * @sc:		device handle
5297 * @o:
5298 * @params:
5299 *
5300 * (not Forwarding)
5301 * It both checks if the requested command is legal in a current
5302 * state and, if it's legal, sets a `next_state' in the object
5303 * that will be used in the completion flow to set the `state'
5304 * of the object.
5305 *
5306 * returns 0 if a requested command is a legal transition,
5307 *         ECORE_INVAL otherwise.
5308 */
5309static int ecore_queue_chk_transition(struct bxe_softc *sc,
5310				      struct ecore_queue_sp_obj *o,
5311				      struct ecore_queue_state_params *params)
5312{
5313	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5314	enum ecore_queue_cmd cmd = params->cmd;
5315	struct ecore_queue_update_params *update_params =
5316		 &params->params.update;
5317	uint8_t next_tx_only = o->num_tx_only;
5318
5319	/* Forget all pending for completion commands if a driver only state
5320	 * transition has been requested.
5321	 */
5322	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5323		o->pending = 0;
5324		o->next_state = ECORE_Q_STATE_MAX;
5325	}
5326
5327	/* Don't allow a next state transition if we are in the middle of
5328	 * the previous one.
5329	 */
5330	if (o->pending) {
5331		ECORE_ERR("Blocking transition since pending was %lx\n",
5332			  o->pending);
5333		return ECORE_BUSY;
5334	}
5335
5336	switch (state) {
5337	case ECORE_Q_STATE_RESET:
5338		if (cmd == ECORE_Q_CMD_INIT)
5339			next_state = ECORE_Q_STATE_INITIALIZED;
5340
5341		break;
5342	case ECORE_Q_STATE_INITIALIZED:
5343		if (cmd == ECORE_Q_CMD_SETUP) {
5344			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5345					   &params->params.setup.flags))
5346				next_state = ECORE_Q_STATE_ACTIVE;
5347			else
5348				next_state = ECORE_Q_STATE_INACTIVE;
5349		}
5350
5351		break;
5352	case ECORE_Q_STATE_ACTIVE:
5353		if (cmd == ECORE_Q_CMD_DEACTIVATE)
5354			next_state = ECORE_Q_STATE_INACTIVE;
5355
5356		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5357			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5358			next_state = ECORE_Q_STATE_ACTIVE;
5359
5360		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5361			next_state = ECORE_Q_STATE_MULTI_COS;
5362			next_tx_only = 1;
5363		}
5364
5365		else if (cmd == ECORE_Q_CMD_HALT)
5366			next_state = ECORE_Q_STATE_STOPPED;
5367
5368		else if (cmd == ECORE_Q_CMD_UPDATE) {
5369			/* If "active" state change is requested, update the
5370			 *  state accordingly.
5371			 */
5372			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5373					   &update_params->update_flags) &&
5374			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5375					    &update_params->update_flags))
5376				next_state = ECORE_Q_STATE_INACTIVE;
5377			else
5378				next_state = ECORE_Q_STATE_ACTIVE;
5379		}
5380
5381		break;
5382	case ECORE_Q_STATE_MULTI_COS:
5383		if (cmd == ECORE_Q_CMD_TERMINATE)
5384			next_state = ECORE_Q_STATE_MCOS_TERMINATED;
5385
5386		else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5387			next_state = ECORE_Q_STATE_MULTI_COS;
5388			next_tx_only = o->num_tx_only + 1;
5389		}
5390
5391		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5392			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5393			next_state = ECORE_Q_STATE_MULTI_COS;
5394
5395		else if (cmd == ECORE_Q_CMD_UPDATE) {
5396			/* If "active" state change is requested, update the
5397			 *  state accordingly.
5398			 */
5399			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5400					   &update_params->update_flags) &&
5401			    !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5402					    &update_params->update_flags))
5403				next_state = ECORE_Q_STATE_INACTIVE;
5404			else
5405				next_state = ECORE_Q_STATE_MULTI_COS;
5406		}
5407
5408		break;
5409	case ECORE_Q_STATE_MCOS_TERMINATED:
5410		if (cmd == ECORE_Q_CMD_CFC_DEL) {
5411			next_tx_only = o->num_tx_only - 1;
5412			if (next_tx_only == 0)
5413				next_state = ECORE_Q_STATE_ACTIVE;
5414			else
5415				next_state = ECORE_Q_STATE_MULTI_COS;
5416		}
5417
5418		break;
5419	case ECORE_Q_STATE_INACTIVE:
5420		if (cmd == ECORE_Q_CMD_ACTIVATE)
5421			next_state = ECORE_Q_STATE_ACTIVE;
5422
5423		else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5424			 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5425			next_state = ECORE_Q_STATE_INACTIVE;
5426
5427		else if (cmd == ECORE_Q_CMD_HALT)
5428			next_state = ECORE_Q_STATE_STOPPED;
5429
5430		else if (cmd == ECORE_Q_CMD_UPDATE) {
5431			/* If "active" state change is requested, update the
5432			 * state accordingly.
5433			 */
5434			if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5435					   &update_params->update_flags) &&
5436			    ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5437					   &update_params->update_flags)){
5438				if (o->num_tx_only == 0)
5439					next_state = ECORE_Q_STATE_ACTIVE;
5440				else /* tx only queues exist for this queue */
5441					next_state = ECORE_Q_STATE_MULTI_COS;
5442			} else
5443				next_state = ECORE_Q_STATE_INACTIVE;
5444		}
5445
5446		break;
5447	case ECORE_Q_STATE_STOPPED:
5448		if (cmd == ECORE_Q_CMD_TERMINATE)
5449			next_state = ECORE_Q_STATE_TERMINATED;
5450
5451		break;
5452	case ECORE_Q_STATE_TERMINATED:
5453		if (cmd == ECORE_Q_CMD_CFC_DEL)
5454			next_state = ECORE_Q_STATE_RESET;
5455
5456		break;
5457	default:
5458		ECORE_ERR("Illegal state: %d\n", state);
5459	}
5460
5461	/* Transition is assured */
5462	if (next_state != ECORE_Q_STATE_MAX) {
5463		ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5464			  state, cmd, next_state);
5465		o->next_state = next_state;
5466		o->next_tx_only = next_tx_only;
5467		return ECORE_SUCCESS;
5468	}
5469
5470	ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5471
5472	return ECORE_INVAL;
5473}
5474
5475/**
5476 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
5477 *
5478 * @sc:		device handle
5479 * @o:
5480 * @params:
5481 *
5482 * It both checks if the requested command is legal in a current
5483 * state and, if it's legal, sets a `next_state' in the object
5484 * that will be used in the completion flow to set the `state'
5485 * of the object.
5486 *
5487 * returns 0 if a requested command is a legal transition,
5488 *         ECORE_INVAL otherwise.
5489 */
5490static int ecore_queue_chk_fwd_transition(struct bxe_softc *sc,
5491					  struct ecore_queue_sp_obj *o,
5492					struct ecore_queue_state_params *params)
5493{
5494	enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5495	enum ecore_queue_cmd cmd = params->cmd;
5496
5497	switch (state) {
5498	case ECORE_Q_STATE_RESET:
5499		if (cmd == ECORE_Q_CMD_INIT)
5500			next_state = ECORE_Q_STATE_INITIALIZED;
5501
5502		break;
5503	case ECORE_Q_STATE_INITIALIZED:
5504		if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5505			if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5506					   &params->params.tx_only.flags))
5507				next_state = ECORE_Q_STATE_ACTIVE;
5508			else
5509				next_state = ECORE_Q_STATE_INACTIVE;
5510		}
5511
5512		break;
5513	case ECORE_Q_STATE_ACTIVE:
5514	case ECORE_Q_STATE_INACTIVE:
5515		if (cmd == ECORE_Q_CMD_CFC_DEL)
5516			next_state = ECORE_Q_STATE_RESET;
5517
5518		break;
5519	default:
5520		ECORE_ERR("Illegal state: %d\n", state);
5521	}
5522
5523	/* Transition is assured */
5524	if (next_state != ECORE_Q_STATE_MAX) {
5525		ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5526			  state, cmd, next_state);
5527		o->next_state = next_state;
5528		return ECORE_SUCCESS;
5529	}
5530
5531	ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5532	return ECORE_INVAL;
5533}
5534
5535void ecore_init_queue_obj(struct bxe_softc *sc,
5536			  struct ecore_queue_sp_obj *obj,
5537			  uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id,
5538			  void *rdata,
5539			  ecore_dma_addr_t rdata_mapping, unsigned long type)
5540{
5541	ECORE_MEMSET(obj, 0, sizeof(*obj));
5542
5543	/* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
5544	ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
5545
5546	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5547	obj->max_cos = cid_cnt;
5548	obj->cl_id = cl_id;
5549	obj->func_id = func_id;
5550	obj->rdata = rdata;
5551	obj->rdata_mapping = rdata_mapping;
5552	obj->type = type;
5553	obj->next_state = ECORE_Q_STATE_MAX;
5554
5555	if (CHIP_IS_E1x(sc))
5556		obj->send_cmd = ecore_queue_send_cmd_e1x;
5557	else
5558		obj->send_cmd = ecore_queue_send_cmd_e2;
5559
5560	if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
5561		obj->check_transition = ecore_queue_chk_fwd_transition;
5562	else
5563	obj->check_transition = ecore_queue_chk_transition;
5564
5565	obj->complete_cmd = ecore_queue_comp_cmd;
5566	obj->wait_comp = ecore_queue_wait_comp;
5567	obj->set_pending = ecore_queue_set_pending;
5568}
5569
5570/* return a queue object's logical state*/
5571int ecore_get_q_logical_state(struct bxe_softc *sc,
5572			       struct ecore_queue_sp_obj *obj)
5573{
5574	switch (obj->state) {
5575	case ECORE_Q_STATE_ACTIVE:
5576	case ECORE_Q_STATE_MULTI_COS:
5577		return ECORE_Q_LOGICAL_STATE_ACTIVE;
5578	case ECORE_Q_STATE_RESET:
5579	case ECORE_Q_STATE_INITIALIZED:
5580	case ECORE_Q_STATE_MCOS_TERMINATED:
5581	case ECORE_Q_STATE_INACTIVE:
5582	case ECORE_Q_STATE_STOPPED:
5583	case ECORE_Q_STATE_TERMINATED:
5584	case ECORE_Q_STATE_FLRED:
5585		return ECORE_Q_LOGICAL_STATE_STOPPED;
5586	default:
5587		return ECORE_INVAL;
5588	}
5589}
5590
5591/********************** Function state object *********************************/
5592enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
5593					   struct ecore_func_sp_obj *o)
5594{
5595	/* in the middle of transaction - return INVALID state */
5596	if (o->pending)
5597		return ECORE_F_STATE_MAX;
5598
5599	/* unsure the order of reading of o->pending and o->state
5600	 * o->pending should be read first
5601	 */
5602	rmb();
5603
5604	return o->state;
5605}
5606
5607static int ecore_func_wait_comp(struct bxe_softc *sc,
5608				struct ecore_func_sp_obj *o,
5609				enum ecore_func_cmd cmd)
5610{
5611	return ecore_state_wait(sc, cmd, &o->pending);
5612}
5613
5614/**
5615 * ecore_func_state_change_comp - complete the state machine transition
5616 *
5617 * @sc:		device handle
5618 * @o:
5619 * @cmd:
5620 *
5621 * Called on state change transition. Completes the state
5622 * machine transition only - no HW interaction.
5623 */
5624static inline int ecore_func_state_change_comp(struct bxe_softc *sc,
5625					       struct ecore_func_sp_obj *o,
5626					       enum ecore_func_cmd cmd)
5627{
5628	unsigned long cur_pending = o->pending;
5629
5630	if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
5631		ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5632			  cmd, ECORE_FUNC_ID(sc), o->state,
5633			  cur_pending, o->next_state);
5634		return ECORE_INVAL;
5635	}
5636
5637	ECORE_MSG(sc,
5638		  "Completing command %d for func %d, setting state to %d\n",
5639		  cmd, ECORE_FUNC_ID(sc), o->next_state);
5640
5641	o->state = o->next_state;
5642	o->next_state = ECORE_F_STATE_MAX;
5643
5644	/* It's important that o->state and o->next_state are
5645	 * updated before o->pending.
5646	 */
5647	wmb();
5648
5649	ECORE_CLEAR_BIT(cmd, &o->pending);
5650	ECORE_SMP_MB_AFTER_CLEAR_BIT();
5651
5652	return ECORE_SUCCESS;
5653}
5654
5655/**
5656 * ecore_func_comp_cmd - complete the state change command
5657 *
5658 * @sc:		device handle
5659 * @o:
5660 * @cmd:
5661 *
5662 * Checks that the arrived completion is expected.
5663 */
5664static int ecore_func_comp_cmd(struct bxe_softc *sc,
5665			       struct ecore_func_sp_obj *o,
5666			       enum ecore_func_cmd cmd)
5667{
5668	/* Complete the state machine part first, check if it's a
5669	 * legal completion.
5670	 */
5671	int rc = ecore_func_state_change_comp(sc, o, cmd);
5672	return rc;
5673}
5674
5675/**
5676 * ecore_func_chk_transition - perform function state machine transition
5677 *
5678 * @sc:		device handle
5679 * @o:
5680 * @params:
5681 *
5682 * It both checks if the requested command is legal in a current
5683 * state and, if it's legal, sets a `next_state' in the object
5684 * that will be used in the completion flow to set the `state'
5685 * of the object.
5686 *
5687 * returns 0 if a requested command is a legal transition,
5688 *         ECORE_INVAL otherwise.
5689 */
5690static int ecore_func_chk_transition(struct bxe_softc *sc,
5691				     struct ecore_func_sp_obj *o,
5692				     struct ecore_func_state_params *params)
5693{
5694	enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
5695	enum ecore_func_cmd cmd = params->cmd;
5696
5697	/* Forget all pending for completion commands if a driver only state
5698	 * transition has been requested.
5699	 */
5700	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5701		o->pending = 0;
5702		o->next_state = ECORE_F_STATE_MAX;
5703	}
5704
5705	/* Don't allow a next state transition if we are in the middle of
5706	 * the previous one.
5707	 */
5708	if (o->pending)
5709		return ECORE_BUSY;
5710
5711	switch (state) {
5712	case ECORE_F_STATE_RESET:
5713		if (cmd == ECORE_F_CMD_HW_INIT)
5714			next_state = ECORE_F_STATE_INITIALIZED;
5715
5716		break;
5717	case ECORE_F_STATE_INITIALIZED:
5718		if (cmd == ECORE_F_CMD_START)
5719			next_state = ECORE_F_STATE_STARTED;
5720
5721		else if (cmd == ECORE_F_CMD_HW_RESET)
5722			next_state = ECORE_F_STATE_RESET;
5723
5724		break;
5725	case ECORE_F_STATE_STARTED:
5726		if (cmd == ECORE_F_CMD_STOP)
5727			next_state = ECORE_F_STATE_INITIALIZED;
5728		/* afex ramrods can be sent only in started mode, and only
5729		 * if not pending for function_stop ramrod completion
5730		 * for these events - next state remained STARTED.
5731		 */
5732		else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
5733			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5734			next_state = ECORE_F_STATE_STARTED;
5735
5736		else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
5737			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5738			next_state = ECORE_F_STATE_STARTED;
5739
5740		/* Switch_update ramrod can be sent in either started or
5741		 * tx_stopped state, and it doesn't change the state.
5742		 */
5743		else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5744			 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5745			next_state = ECORE_F_STATE_STARTED;
5746
5747		else if (cmd == ECORE_F_CMD_TX_STOP)
5748			next_state = ECORE_F_STATE_TX_STOPPED;
5749
5750		break;
5751	case ECORE_F_STATE_TX_STOPPED:
5752		if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5753		    (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5754			next_state = ECORE_F_STATE_TX_STOPPED;
5755
5756		else if (cmd == ECORE_F_CMD_TX_START)
5757			next_state = ECORE_F_STATE_STARTED;
5758
5759		break;
5760	default:
5761		ECORE_ERR("Unknown state: %d\n", state);
5762	}
5763
5764	/* Transition is assured */
5765	if (next_state != ECORE_F_STATE_MAX) {
5766		ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n",
5767			  state, cmd, next_state);
5768		o->next_state = next_state;
5769		return ECORE_SUCCESS;
5770	}
5771
5772	ECORE_MSG(sc, "Bad function state transition request: %d %d\n",
5773		  state, cmd);
5774
5775	return ECORE_INVAL;
5776}
5777
5778/**
5779 * ecore_func_init_func - performs HW init at function stage
5780 *
5781 * @sc:		device handle
5782 * @drv:
5783 *
5784 * Init HW when the current phase is
5785 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5786 * HW blocks.
5787 */
5788static inline int ecore_func_init_func(struct bxe_softc *sc,
5789				       const struct ecore_func_sp_drv_ops *drv)
5790{
5791	return drv->init_hw_func(sc);
5792}
5793
5794/**
5795 * ecore_func_init_port - performs HW init at port stage
5796 *
5797 * @sc:		device handle
5798 * @drv:
5799 *
5800 * Init HW when the current phase is
5801 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5802 * FUNCTION-only HW blocks.
5803 *
5804 */
5805static inline int ecore_func_init_port(struct bxe_softc *sc,
5806				       const struct ecore_func_sp_drv_ops *drv)
5807{
5808	int rc = drv->init_hw_port(sc);
5809	if (rc)
5810		return rc;
5811
5812	return ecore_func_init_func(sc, drv);
5813}
5814
5815/**
5816 * ecore_func_init_cmn_chip - performs HW init at chip-common stage
5817 *
5818 * @sc:		device handle
5819 * @drv:
5820 *
5821 * Init HW when the current phase is
5822 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5823 * PORT-only and FUNCTION-only HW blocks.
5824 */
5825static inline int ecore_func_init_cmn_chip(struct bxe_softc *sc,
5826					const struct ecore_func_sp_drv_ops *drv)
5827{
5828	int rc = drv->init_hw_cmn_chip(sc);
5829	if (rc)
5830		return rc;
5831
5832	return ecore_func_init_port(sc, drv);
5833}
5834
5835/**
5836 * ecore_func_init_cmn - performs HW init at common stage
5837 *
5838 * @sc:		device handle
5839 * @drv:
5840 *
5841 * Init HW when the current phase is
5842 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5843 * PORT-only and FUNCTION-only HW blocks.
5844 */
5845static inline int ecore_func_init_cmn(struct bxe_softc *sc,
5846				      const struct ecore_func_sp_drv_ops *drv)
5847{
5848	int rc = drv->init_hw_cmn(sc);
5849	if (rc)
5850		return rc;
5851
5852	return ecore_func_init_port(sc, drv);
5853}
5854
5855static int ecore_func_hw_init(struct bxe_softc *sc,
5856			      struct ecore_func_state_params *params)
5857{
5858	uint32_t load_code = params->params.hw_init.load_phase;
5859	struct ecore_func_sp_obj *o = params->f_obj;
5860	const struct ecore_func_sp_drv_ops *drv = o->drv;
5861	int rc = 0;
5862
5863	ECORE_MSG(sc, "function %d  load_code %x\n",
5864		  ECORE_ABS_FUNC_ID(sc), load_code);
5865
5866	/* Prepare buffers for unzipping the FW */
5867	rc = drv->gunzip_init(sc);
5868	if (rc)
5869		return rc;
5870
5871	/* Prepare FW */
5872	rc = drv->init_fw(sc);
5873	if (rc) {
5874		ECORE_ERR("Error loading firmware\n");
5875		goto init_err;
5876	}
5877
5878	/* Handle the beginning of COMMON_XXX pases separately... */
5879	switch (load_code) {
5880	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5881		rc = ecore_func_init_cmn_chip(sc, drv);
5882		if (rc)
5883			goto init_err;
5884
5885		break;
5886	case FW_MSG_CODE_DRV_LOAD_COMMON:
5887		rc = ecore_func_init_cmn(sc, drv);
5888		if (rc)
5889			goto init_err;
5890
5891		break;
5892	case FW_MSG_CODE_DRV_LOAD_PORT:
5893		rc = ecore_func_init_port(sc, drv);
5894		if (rc)
5895			goto init_err;
5896
5897		break;
5898	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5899		rc = ecore_func_init_func(sc, drv);
5900		if (rc)
5901			goto init_err;
5902
5903		break;
5904	default:
5905		ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5906		rc = ECORE_INVAL;
5907	}
5908
5909init_err:
5910	drv->gunzip_end(sc);
5911
5912	/* In case of success, complete the command immediately: no ramrods
5913	 * have been sent.
5914	 */
5915	if (!rc)
5916		o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
5917
5918	return rc;
5919}
5920
5921/**
5922 * ecore_func_reset_func - reset HW at function stage
5923 *
5924 * @sc:		device handle
5925 * @drv:
5926 *
5927 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5928 * FUNCTION-only HW blocks.
5929 */
5930static inline void ecore_func_reset_func(struct bxe_softc *sc,
5931					const struct ecore_func_sp_drv_ops *drv)
5932{
5933	drv->reset_hw_func(sc);
5934}
5935
5936/**
5937 * ecore_func_reset_port - reser HW at port stage
5938 *
5939 * @sc:		device handle
5940 * @drv:
5941 *
5942 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5943 * FUNCTION-only and PORT-only HW blocks.
5944 *
5945 *                 !!!IMPORTANT!!!
5946 *
5947 * It's important to call reset_port before reset_func() as the last thing
5948 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5949 * makes impossible any DMAE transactions.
5950 */
5951static inline void ecore_func_reset_port(struct bxe_softc *sc,
5952					const struct ecore_func_sp_drv_ops *drv)
5953{
5954	drv->reset_hw_port(sc);
5955	ecore_func_reset_func(sc, drv);
5956}
5957
5958/**
5959 * ecore_func_reset_cmn - reser HW at common stage
5960 *
5961 * @sc:		device handle
5962 * @drv:
5963 *
5964 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5965 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5966 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5967 */
5968static inline void ecore_func_reset_cmn(struct bxe_softc *sc,
5969					const struct ecore_func_sp_drv_ops *drv)
5970{
5971	ecore_func_reset_port(sc, drv);
5972	drv->reset_hw_cmn(sc);
5973}
5974
5975static inline int ecore_func_hw_reset(struct bxe_softc *sc,
5976				      struct ecore_func_state_params *params)
5977{
5978	uint32_t reset_phase = params->params.hw_reset.reset_phase;
5979	struct ecore_func_sp_obj *o = params->f_obj;
5980	const struct ecore_func_sp_drv_ops *drv = o->drv;
5981
5982	ECORE_MSG(sc, "function %d  reset_phase %x\n", ECORE_ABS_FUNC_ID(sc),
5983		  reset_phase);
5984
5985	switch (reset_phase) {
5986	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5987		ecore_func_reset_cmn(sc, drv);
5988		break;
5989	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5990		ecore_func_reset_port(sc, drv);
5991		break;
5992	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5993		ecore_func_reset_func(sc, drv);
5994		break;
5995	default:
5996		ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
5997			  reset_phase);
5998		break;
5999	}
6000
6001	/* Complete the command immediately: no ramrods have been sent. */
6002	o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
6003
6004	return ECORE_SUCCESS;
6005}
6006
6007static inline int ecore_func_send_start(struct bxe_softc *sc,
6008					struct ecore_func_state_params *params)
6009{
6010	struct ecore_func_sp_obj *o = params->f_obj;
6011	struct function_start_data *rdata =
6012		(struct function_start_data *)o->rdata;
6013	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6014	struct ecore_func_start_params *start_params = &params->params.start;
6015
6016	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6017
6018	/* Fill the ramrod data with provided parameters */
6019	rdata->function_mode	= (uint8_t)start_params->mf_mode;
6020	rdata->sd_vlan_tag	= ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
6021	rdata->path_id		= ECORE_PATH_ID(sc);
6022	rdata->network_cos_mode	= start_params->network_cos_mode;
6023	rdata->gre_tunnel_mode	= start_params->gre_tunnel_mode;
6024	rdata->gre_tunnel_rss	= start_params->gre_tunnel_rss;
6025
6026	/*
6027	 *  No need for an explicit memory barrier here as long we would
6028	 *  need to ensure the ordering of writing to the SPQ element
6029	 *  and updating of the SPQ producer which involves a memory
6030	 *  read and we will have to put a full memory barrier there
6031	 *  (inside ecore_sp_post()).
6032	 */
6033
6034	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6035			     data_mapping, NONE_CONNECTION_TYPE);
6036}
6037
6038static inline int ecore_func_send_switch_update(struct bxe_softc *sc,
6039					struct ecore_func_state_params *params)
6040{
6041	struct ecore_func_sp_obj *o = params->f_obj;
6042	struct function_update_data *rdata =
6043		(struct function_update_data *)o->rdata;
6044	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6045	struct ecore_func_switch_update_params *switch_update_params =
6046		&params->params.switch_update;
6047
6048	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6049
6050	/* Fill the ramrod data with provided parameters */
6051	rdata->tx_switch_suspend_change_flg = 1;
6052	rdata->tx_switch_suspend = switch_update_params->suspend;
6053	rdata->echo = SWITCH_UPDATE;
6054
6055	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6056			     data_mapping, NONE_CONNECTION_TYPE);
6057}
6058
6059static inline int ecore_func_send_afex_update(struct bxe_softc *sc,
6060					 struct ecore_func_state_params *params)
6061{
6062	struct ecore_func_sp_obj *o = params->f_obj;
6063	struct function_update_data *rdata =
6064		(struct function_update_data *)o->afex_rdata;
6065	ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
6066	struct ecore_func_afex_update_params *afex_update_params =
6067		&params->params.afex_update;
6068
6069	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6070
6071	/* Fill the ramrod data with provided parameters */
6072	rdata->vif_id_change_flg = 1;
6073	rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
6074	rdata->afex_default_vlan_change_flg = 1;
6075	rdata->afex_default_vlan =
6076		ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
6077	rdata->allowed_priorities_change_flg = 1;
6078	rdata->allowed_priorities = afex_update_params->allowed_priorities;
6079	rdata->echo = AFEX_UPDATE;
6080
6081	/*  No need for an explicit memory barrier here as long we would
6082	 *  need to ensure the ordering of writing to the SPQ element
6083	 *  and updating of the SPQ producer which involves a memory
6084	 *  read and we will have to put a full memory barrier there
6085	 *  (inside ecore_sp_post()).
6086	 */
6087	ECORE_MSG(sc,
6088		  "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6089		  rdata->vif_id,
6090		  rdata->afex_default_vlan, rdata->allowed_priorities);
6091
6092	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6093			     data_mapping, NONE_CONNECTION_TYPE);
6094}
6095
6096static
6097inline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
6098					 struct ecore_func_state_params *params)
6099{
6100	struct ecore_func_sp_obj *o = params->f_obj;
6101	struct afex_vif_list_ramrod_data *rdata =
6102		(struct afex_vif_list_ramrod_data *)o->afex_rdata;
6103	struct ecore_func_afex_viflists_params *afex_vif_params =
6104		&params->params.afex_viflists;
6105	uint64_t *p_rdata = (uint64_t *)rdata;
6106
6107	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6108
6109	/* Fill the ramrod data with provided parameters */
6110	rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
6111	rdata->func_bit_map          = afex_vif_params->func_bit_map;
6112	rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6113	rdata->func_to_clear         = afex_vif_params->func_to_clear;
6114
6115	/* send in echo type of sub command */
6116	rdata->echo = afex_vif_params->afex_vif_list_command;
6117
6118	/*  No need for an explicit memory barrier here as long we would
6119	 *  need to ensure the ordering of writing to the SPQ element
6120	 *  and updating of the SPQ producer which involves a memory
6121	 *  read and we will have to put a full memory barrier there
6122	 *  (inside ecore_sp_post()).
6123	 */
6124
6125	ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6126		  rdata->afex_vif_list_command, rdata->vif_list_index,
6127		  rdata->func_bit_map, rdata->func_to_clear);
6128
6129	/* this ramrod sends data directly and not through DMA mapping */
6130	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6131			     *p_rdata, NONE_CONNECTION_TYPE);
6132}
6133
6134static inline int ecore_func_send_stop(struct bxe_softc *sc,
6135				       struct ecore_func_state_params *params)
6136{
6137	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
6138			     NONE_CONNECTION_TYPE);
6139}
6140
6141static inline int ecore_func_send_tx_stop(struct bxe_softc *sc,
6142				       struct ecore_func_state_params *params)
6143{
6144	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
6145			     NONE_CONNECTION_TYPE);
6146}
6147static inline int ecore_func_send_tx_start(struct bxe_softc *sc,
6148				       struct ecore_func_state_params *params)
6149{
6150	struct ecore_func_sp_obj *o = params->f_obj;
6151	struct flow_control_configuration *rdata =
6152		(struct flow_control_configuration *)o->rdata;
6153	ecore_dma_addr_t data_mapping = o->rdata_mapping;
6154	struct ecore_func_tx_start_params *tx_start_params =
6155		&params->params.tx_start;
6156	int i;
6157
6158	ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6159
6160	rdata->dcb_enabled = tx_start_params->dcb_enabled;
6161	rdata->dcb_version = tx_start_params->dcb_version;
6162	rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
6163
6164	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6165		rdata->traffic_type_to_priority_cos[i] =
6166			tx_start_params->traffic_type_to_priority_cos[i];
6167
6168	return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6169			     data_mapping, NONE_CONNECTION_TYPE);
6170}
6171
6172static int ecore_func_send_cmd(struct bxe_softc *sc,
6173			       struct ecore_func_state_params *params)
6174{
6175	switch (params->cmd) {
6176	case ECORE_F_CMD_HW_INIT:
6177		return ecore_func_hw_init(sc, params);
6178	case ECORE_F_CMD_START:
6179		return ecore_func_send_start(sc, params);
6180	case ECORE_F_CMD_STOP:
6181		return ecore_func_send_stop(sc, params);
6182	case ECORE_F_CMD_HW_RESET:
6183		return ecore_func_hw_reset(sc, params);
6184	case ECORE_F_CMD_AFEX_UPDATE:
6185		return ecore_func_send_afex_update(sc, params);
6186	case ECORE_F_CMD_AFEX_VIFLISTS:
6187		return ecore_func_send_afex_viflists(sc, params);
6188	case ECORE_F_CMD_TX_STOP:
6189		return ecore_func_send_tx_stop(sc, params);
6190	case ECORE_F_CMD_TX_START:
6191		return ecore_func_send_tx_start(sc, params);
6192	case ECORE_F_CMD_SWITCH_UPDATE:
6193		return ecore_func_send_switch_update(sc, params);
6194	default:
6195		ECORE_ERR("Unknown command: %d\n", params->cmd);
6196		return ECORE_INVAL;
6197	}
6198}
6199
6200void ecore_init_func_obj(struct bxe_softc *sc,
6201			 struct ecore_func_sp_obj *obj,
6202			 void *rdata, ecore_dma_addr_t rdata_mapping,
6203			 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
6204			 struct ecore_func_sp_drv_ops *drv_iface)
6205{
6206	ECORE_MEMSET(obj, 0, sizeof(*obj));
6207
6208	ECORE_MUTEX_INIT(&obj->one_pending_mutex);
6209
6210	obj->rdata = rdata;
6211	obj->rdata_mapping = rdata_mapping;
6212	obj->afex_rdata = afex_rdata;
6213	obj->afex_rdata_mapping = afex_rdata_mapping;
6214	obj->send_cmd = ecore_func_send_cmd;
6215	obj->check_transition = ecore_func_chk_transition;
6216	obj->complete_cmd = ecore_func_comp_cmd;
6217	obj->wait_comp = ecore_func_wait_comp;
6218	obj->drv = drv_iface;
6219}
6220
6221/**
6222 * ecore_func_state_change - perform Function state change transition
6223 *
6224 * @sc:		device handle
6225 * @params:	parameters to perform the transaction
6226 *
6227 * returns 0 in case of successfully completed transition,
6228 *         negative error code in case of failure, positive
6229 *         (EBUSY) value if there is a completion to that is
6230 *         still pending (possible only if RAMROD_COMP_WAIT is
6231 *         not set in params->ramrod_flags for asynchronous
6232 *         commands).
6233 */
6234int ecore_func_state_change(struct bxe_softc *sc,
6235			    struct ecore_func_state_params *params)
6236{
6237	struct ecore_func_sp_obj *o = params->f_obj;
6238	int rc, cnt = 300;
6239	enum ecore_func_cmd cmd = params->cmd;
6240	unsigned long *pending = &o->pending;
6241
6242	ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6243
6244	/* Check that the requested transition is legal */
6245	rc = o->check_transition(sc, o, params);
6246	if ((rc == ECORE_BUSY) &&
6247	    (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
6248		while ((rc == ECORE_BUSY) && (--cnt > 0)) {
6249			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6250			ECORE_MSLEEP(10);
6251			ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6252			rc = o->check_transition(sc, o, params);
6253		}
6254		if (rc == ECORE_BUSY) {
6255			ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6256			ECORE_ERR("timeout waiting for previous ramrod completion\n");
6257			return rc;
6258		}
6259	} else if (rc) {
6260		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6261		return rc;
6262	}
6263
6264	/* Set "pending" bit */
6265	ECORE_SET_BIT(cmd, pending);
6266
6267	/* Don't send a command if only driver cleanup was requested */
6268	if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
6269		ecore_func_state_change_comp(sc, o, cmd);
6270		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6271	} else {
6272		/* Send a ramrod */
6273		rc = o->send_cmd(sc, params);
6274
6275		ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6276
6277		if (rc) {
6278			o->next_state = ECORE_F_STATE_MAX;
6279			ECORE_CLEAR_BIT(cmd, pending);
6280			ECORE_SMP_MB_AFTER_CLEAR_BIT();
6281			return rc;
6282		}
6283
6284		if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
6285			rc = o->wait_comp(sc, o, cmd);
6286			if (rc)
6287				return rc;
6288
6289			return ECORE_SUCCESS;
6290		}
6291	}
6292
6293	return ECORE_RET_PENDING(cmd, pending);
6294}
6295