1/******************************************************************************
2
3 � 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
4 All rights reserved.
5
6 This is proprietary source code of Freescale Semiconductor Inc.,
7 and its use is subject to the NetComm Device Drivers EULA.
8 The copyright notice above does not evidence any actual or intended
9 publication of such source code.
10
11 ALTERNATIVELY, redistribution and use in source and binary forms, with
12 or without modification, are permitted provided that the following
13 conditions are met:
14     * Redistributions of source code must retain the above copyright
15       notice, this list of conditions and the following disclaimer.
16     * Redistributions in binary form must reproduce the above copyright
17       notice, this list of conditions and the following disclaimer in the
18       documentation and/or other materials provided with the distribution.
19     * Neither the name of Freescale Semiconductor nor the
20       names of its contributors may be used to endorse or promote products
21       derived from this software without specific prior written permission.
22
23 THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34
35 **************************************************************************/
36/******************************************************************************
37 @File          qm.c
38
39 @Description   QM & Portal implementation
40*//***************************************************************************/
41#include <sys/cdefs.h>
42#include <sys/types.h>
43#include <machine/atomic.h>
44
45#include "error_ext.h"
46#include "std_ext.h"
47#include "string_ext.h"
48#include "mm_ext.h"
49#include "qm.h"
50#include "qman_low.h"
51
52#include <machine/vmparam.h>
53
54/****************************************/
55/*       static functions               */
56/****************************************/
57
58#define SLOW_POLL_IDLE   1000
59#define SLOW_POLL_BUSY   10
60
61/*
62 * Context entries are 32-bit.  The qman driver uses the pointer to the queue as
63 * its context, and the pointer is 64-byte aligned, per the XX_MallocSmart()
64 * call.  Take advantage of this fact to shove a 64-bit kernel pointer into a
65 * 32-bit context integer, and back.
66 *
67 * XXX: This depends on the fact that VM_MAX_KERNEL_ADDRESS is less than 38-bit
68 * count from VM_MIN_KERNEL_ADDRESS.  If this ever changes, this needs to be
69 * updated.
70 */
71CTASSERT((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) < (1ULL << 35));
72static inline uint32_t
73aligned_int_from_ptr(const void *p)
74{
75	uintptr_t ctx;
76
77	ctx = (uintptr_t)p;
78	KASSERT(ctx >= VM_MIN_KERNEL_ADDRESS, ("%p is too low!\n", p));
79	ctx -= VM_MIN_KERNEL_ADDRESS;
80	KASSERT((ctx & 0x07) == 0, ("Pointer %p is not 8-byte aligned!\n", p));
81
82	return (ctx >> 3);
83}
84
85static inline void *
86ptr_from_aligned_int(uint32_t ctx)
87{
88	uintptr_t p;
89
90	p = ctx;
91	p = VM_MIN_KERNEL_ADDRESS + (p << 3);
92
93	return ((void *)p);
94}
95
96static t_Error qman_volatile_dequeue(t_QmPortal     *p_QmPortal,
97                                     struct qman_fq *p_Fq,
98                                     uint32_t       vdqcr)
99{
100    ASSERT_COND((p_Fq->state == qman_fq_state_parked) ||
101                (p_Fq->state == qman_fq_state_retired));
102    ASSERT_COND(!(vdqcr & QM_VDQCR_FQID_MASK));
103    ASSERT_COND(!(p_Fq->flags & QMAN_FQ_STATE_VDQCR));
104
105    vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | p_Fq->fqid;
106    NCSW_PLOCK(p_QmPortal);
107    FQLOCK(p_Fq);
108    p_Fq->flags |= QMAN_FQ_STATE_VDQCR;
109    qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, vdqcr);
110    FQUNLOCK(p_Fq);
111    PUNLOCK(p_QmPortal);
112
113    return E_OK;
114}
115
116static const char *mcr_result_str(uint8_t result)
117{
118    switch (result) {
119    case QM_MCR_RESULT_NULL:
120        return "QM_MCR_RESULT_NULL";
121    case QM_MCR_RESULT_OK:
122        return "QM_MCR_RESULT_OK";
123    case QM_MCR_RESULT_ERR_FQID:
124        return "QM_MCR_RESULT_ERR_FQID";
125    case QM_MCR_RESULT_ERR_FQSTATE:
126        return "QM_MCR_RESULT_ERR_FQSTATE";
127    case QM_MCR_RESULT_ERR_NOTEMPTY:
128        return "QM_MCR_RESULT_ERR_NOTEMPTY";
129    case QM_MCR_RESULT_PENDING:
130        return "QM_MCR_RESULT_PENDING";
131    }
132    return "<unknown MCR result>";
133}
134
135static t_Error qman_create_fq(t_QmPortal        *p_QmPortal,
136                              uint32_t          fqid,
137                              uint32_t          flags,
138                              struct qman_fq    *p_Fq)
139{
140    struct qm_fqd fqd;
141    struct qm_mcr_queryfq_np np;
142    struct qm_mc_command *p_Mcc;
143    struct qm_mc_result *p_Mcr;
144
145    p_Fq->fqid = fqid;
146    p_Fq->flags = flags;
147    p_Fq->state = qman_fq_state_oos;
148    p_Fq->cgr_groupid = 0;
149    if (!(flags & QMAN_FQ_FLAG_RECOVER) ||
150            (flags & QMAN_FQ_FLAG_NO_MODIFY))
151        return E_OK;
152    /* Everything else is RECOVER support */
153    NCSW_PLOCK(p_QmPortal);
154    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
155    p_Mcc->queryfq.fqid = fqid;
156    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ);
157    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
158    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
159    if (p_Mcr->result != QM_MCR_RESULT_OK) {
160        PUNLOCK(p_QmPortal);
161        RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QUERYFQ failed: %s", mcr_result_str(p_Mcr->result)));
162    }
163    fqd = p_Mcr->queryfq.fqd;
164    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
165    p_Mcc->queryfq_np.fqid = fqid;
166    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP);
167    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
168    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
169    if (p_Mcr->result != QM_MCR_RESULT_OK) {
170        PUNLOCK(p_QmPortal);
171        RETURN_ERROR(MAJOR, E_INVALID_STATE, ("UERYFQ_NP failed: %s", mcr_result_str(p_Mcr->result)));
172    }
173    np = p_Mcr->queryfq_np;
174    /* Phew, have queryfq and queryfq_np results, stitch together
175     * the FQ object from those. */
176    p_Fq->cgr_groupid = fqd.cgid;
177    switch (np.state & QM_MCR_NP_STATE_MASK) {
178    case QM_MCR_NP_STATE_OOS:
179        break;
180    case QM_MCR_NP_STATE_RETIRED:
181        p_Fq->state = qman_fq_state_retired;
182        if (np.frm_cnt)
183            p_Fq->flags |= QMAN_FQ_STATE_NE;
184        break;
185    case QM_MCR_NP_STATE_TEN_SCHED:
186    case QM_MCR_NP_STATE_TRU_SCHED:
187    case QM_MCR_NP_STATE_ACTIVE:
188        p_Fq->state = qman_fq_state_sched;
189        if (np.state & QM_MCR_NP_STATE_R)
190            p_Fq->flags |= QMAN_FQ_STATE_CHANGING;
191        break;
192    case QM_MCR_NP_STATE_PARKED:
193        p_Fq->state = qman_fq_state_parked;
194        break;
195    default:
196        ASSERT_COND(FALSE);
197    }
198    if (fqd.fq_ctrl & QM_FQCTRL_CGE)
199        p_Fq->state |= QMAN_FQ_STATE_CGR_EN;
200    PUNLOCK(p_QmPortal);
201
202    return E_OK;
203}
204
205static void qman_destroy_fq(struct qman_fq *p_Fq, uint32_t flags)
206{
207    /* We don't need to lock the FQ as it is a pre-condition that the FQ be
208     * quiesced. Instead, run some checks. */
209    UNUSED(flags);
210    switch (p_Fq->state) {
211    case qman_fq_state_parked:
212        ASSERT_COND(flags & QMAN_FQ_DESTROY_PARKED);
213    case qman_fq_state_oos:
214        return;
215    default:
216        break;
217    }
218    ASSERT_COND(FALSE);
219}
220
221static t_Error qman_init_fq(t_QmPortal          *p_QmPortal,
222                            struct qman_fq      *p_Fq,
223                            uint32_t            flags,
224                            struct qm_mcc_initfq *p_Opts)
225{
226    struct qm_mc_command    *p_Mcc;
227    struct qm_mc_result     *p_Mcr;
228    uint8_t res, myverb = (uint8_t)((flags & QMAN_INITFQ_FLAG_SCHED) ?
229        QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED);
230
231    SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_oos) ||
232                              (p_Fq->state == qman_fq_state_parked),
233                              E_INVALID_STATE);
234
235    if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
236        return ERROR_CODE(E_INVALID_VALUE);
237    /* Issue an INITFQ_[PARKED|SCHED] management command */
238    NCSW_PLOCK(p_QmPortal);
239    FQLOCK(p_Fq);
240    if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
241            ((p_Fq->state != qman_fq_state_oos) &&
242                (p_Fq->state != qman_fq_state_parked))) {
243        FQUNLOCK(p_Fq);
244        PUNLOCK(p_QmPortal);
245        return ERROR_CODE(E_BUSY);
246    }
247    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
248    Mem2IOCpy32((void*)&p_Mcc->initfq, p_Opts, sizeof(struct qm_mcc_initfq));
249    qm_mc_commit(p_QmPortal->p_LowQmPortal, myverb);
250    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
251    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == myverb);
252    res = p_Mcr->result;
253    if (res != QM_MCR_RESULT_OK) {
254        FQUNLOCK(p_Fq);
255        PUNLOCK(p_QmPortal);
256        RETURN_ERROR(MINOR, E_INVALID_STATE,("INITFQ failed: %s", mcr_result_str(res)));
257    }
258
259    if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_FQCTRL) {
260        if (p_Mcc->initfq.fqd.fq_ctrl & QM_FQCTRL_CGE)
261            p_Fq->flags |= QMAN_FQ_STATE_CGR_EN;
262        else
263            p_Fq->flags &= ~QMAN_FQ_STATE_CGR_EN;
264    }
265    if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_CGID)
266        p_Fq->cgr_groupid = p_Mcc->initfq.fqd.cgid;
267    p_Fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
268            qman_fq_state_sched : qman_fq_state_parked;
269    FQUNLOCK(p_Fq);
270    PUNLOCK(p_QmPortal);
271    return E_OK;
272}
273
274static t_Error qman_retire_fq(t_QmPortal        *p_QmPortal,
275                              struct qman_fq    *p_Fq,
276                              uint32_t          *p_Flags,
277                              bool              drain)
278{
279    struct qm_mc_command    *p_Mcc;
280    struct qm_mc_result     *p_Mcr;
281    t_Error                 err = E_OK;
282    uint8_t                 res;
283
284    SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_parked) ||
285                              (p_Fq->state == qman_fq_state_sched),
286                              E_INVALID_STATE);
287
288    if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
289        return E_INVALID_VALUE;
290    NCSW_PLOCK(p_QmPortal);
291    FQLOCK(p_Fq);
292    if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
293            (p_Fq->state == qman_fq_state_retired) ||
294                (p_Fq->state == qman_fq_state_oos)) {
295        err = E_BUSY;
296        goto out;
297    }
298    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
299    p_Mcc->alterfq.fqid = p_Fq->fqid;
300    if (drain)
301        p_Mcc->alterfq.context_b = aligned_int_from_ptr(p_Fq);
302    qm_mc_commit(p_QmPortal->p_LowQmPortal,
303                 (uint8_t)((drain)?QM_MCC_VERB_ALTER_RETIRE_CTXB:QM_MCC_VERB_ALTER_RETIRE));
304    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
305    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) ==
306                (drain)?QM_MCR_VERB_ALTER_RETIRE_CTXB:QM_MCR_VERB_ALTER_RETIRE);
307    res = p_Mcr->result;
308    if (res == QM_MCR_RESULT_OK)
309    {
310        /* Process 'fq' right away, we'll ignore FQRNI */
311        if (p_Mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
312            p_Fq->flags |= QMAN_FQ_STATE_NE;
313        if (p_Mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
314            p_Fq->flags |= QMAN_FQ_STATE_ORL;
315        p_Fq->state = qman_fq_state_retired;
316    }
317    else if (res == QM_MCR_RESULT_PENDING)
318        p_Fq->flags |= QMAN_FQ_STATE_CHANGING;
319    else {
320        XX_Print("ALTER_RETIRE failed: %s\n",
321                mcr_result_str(res));
322        err = E_INVALID_STATE;
323    }
324    if (p_Flags)
325        *p_Flags = p_Fq->flags;
326out:
327    FQUNLOCK(p_Fq);
328    PUNLOCK(p_QmPortal);
329    return err;
330}
331
332static t_Error qman_oos_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
333{
334    struct qm_mc_command    *p_Mcc;
335    struct qm_mc_result     *p_Mcr;
336    uint8_t                 res;
337
338    ASSERT_COND(p_Fq->state == qman_fq_state_retired);
339    if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
340        return ERROR_CODE(E_INVALID_VALUE);
341    NCSW_PLOCK(p_QmPortal);
342    FQLOCK(p_Fq);
343    if ((p_Fq->flags & QMAN_FQ_STATE_BLOCKOOS) ||
344            (p_Fq->state != qman_fq_state_retired)) {
345        FQUNLOCK(p_Fq);
346        PUNLOCK(p_QmPortal);
347        return ERROR_CODE(E_BUSY);
348    }
349    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
350    p_Mcc->alterfq.fqid = p_Fq->fqid;
351    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_OOS);
352    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
353    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
354    res = p_Mcr->result;
355    if (res != QM_MCR_RESULT_OK) {
356        FQUNLOCK(p_Fq);
357        PUNLOCK(p_QmPortal);
358        RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_OOS failed: %s\n", mcr_result_str(res)));
359    }
360    p_Fq->state = qman_fq_state_oos;
361
362    FQUNLOCK(p_Fq);
363    PUNLOCK(p_QmPortal);
364    return E_OK;
365}
366
367static t_Error qman_schedule_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
368{
369    struct qm_mc_command    *p_Mcc;
370    struct qm_mc_result     *p_Mcr;
371    uint8_t                 res;
372
373    ASSERT_COND(p_Fq->state == qman_fq_state_parked);
374    if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)
375        return ERROR_CODE(E_INVALID_VALUE);
376    /* Issue a ALTERFQ_SCHED management command */
377    NCSW_PLOCK(p_QmPortal);
378    FQLOCK(p_Fq);
379    if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) ||
380            (p_Fq->state != qman_fq_state_parked)) {
381        FQUNLOCK(p_Fq);
382        PUNLOCK(p_QmPortal);
383        return ERROR_CODE(E_BUSY);
384    }
385    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
386    p_Mcc->alterfq.fqid = p_Fq->fqid;
387    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_SCHED);
388    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
389    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
390    res = p_Mcr->result;
391    if (res != QM_MCR_RESULT_OK) {
392        FQUNLOCK(p_Fq);
393        PUNLOCK(p_QmPortal);
394        RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_SCHED failed: %s\n", mcr_result_str(res)));
395    }
396    p_Fq->state = qman_fq_state_sched;
397
398    FQUNLOCK(p_Fq);
399    PUNLOCK(p_QmPortal);
400    return E_OK;
401}
402
403/* Inline helper to reduce nesting in LoopMessageRing() */
404static __inline__ void fq_state_change(struct qman_fq *p_Fq,
405                                       struct qm_mr_entry *p_Msg,
406                                       uint8_t verb)
407{
408    FQLOCK(p_Fq);
409    switch(verb) {
410        case QM_MR_VERB_FQRL:
411            ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_ORL);
412            p_Fq->flags &= ~QMAN_FQ_STATE_ORL;
413            break;
414        case QM_MR_VERB_FQRN:
415            ASSERT_COND((p_Fq->state == qman_fq_state_parked) ||
416                (p_Fq->state == qman_fq_state_sched));
417            ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING);
418            p_Fq->flags &= ~QMAN_FQ_STATE_CHANGING;
419            if (p_Msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
420                p_Fq->flags |= QMAN_FQ_STATE_NE;
421            if (p_Msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
422                p_Fq->flags |= QMAN_FQ_STATE_ORL;
423            p_Fq->state = qman_fq_state_retired;
424            break;
425        case QM_MR_VERB_FQPN:
426            ASSERT_COND(p_Fq->state == qman_fq_state_sched);
427            ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING);
428            p_Fq->state = qman_fq_state_parked;
429    }
430    FQUNLOCK(p_Fq);
431}
432
433static t_Error freeDrainedFq(struct qman_fq *p_Fq)
434{
435    t_QmFqr     *p_QmFqr;
436    uint32_t    i;
437
438    ASSERT_COND(p_Fq);
439    p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr;
440    ASSERT_COND(p_QmFqr);
441
442    ASSERT_COND(!p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset]);
443    p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset] = TRUE;
444    p_QmFqr->numOfDrainedFqids++;
445    if (p_QmFqr->numOfDrainedFqids == p_QmFqr->numOfFqids)
446    {
447        for (i=0;i<p_QmFqr->numOfFqids;i++)
448        {
449            if ((p_QmFqr->p_Fqs[i]->state == qman_fq_state_retired) &&
450                    (qman_oos_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]) != E_OK))
451                RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!"));
452            qman_destroy_fq(p_QmFqr->p_Fqs[i], 0);
453            XX_FreeSmart(p_QmFqr->p_Fqs[i]);
454        }
455        XX_Free(p_QmFqr->p_DrainedFqs);
456        p_QmFqr->p_DrainedFqs = NULL;
457
458        if (p_QmFqr->f_CompletionCB)
459        {
460            p_QmFqr->f_CompletionCB(p_QmFqr->h_App, p_QmFqr);
461            XX_Free(p_QmFqr->p_Fqs);
462            if (p_QmFqr->fqidBase)
463                QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
464            XX_Free(p_QmFqr);
465        }
466    }
467
468    return E_OK;
469}
470
471static t_Error drainRetiredFq(struct qman_fq *p_Fq)
472{
473    t_QmFqr     *p_QmFqr;
474
475    ASSERT_COND(p_Fq);
476    p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr;
477    ASSERT_COND(p_QmFqr);
478
479    if (p_Fq->flags & QMAN_FQ_STATE_NE)
480    {
481        if (qman_volatile_dequeue(p_QmFqr->h_QmPortal, p_Fq,
482                                (QM_VDQCR_PRECEDENCE_VDQCR | QM_VDQCR_NUMFRAMES_TILLEMPTY)) != E_OK)
483
484            RETURN_ERROR(MAJOR, E_INVALID_STATE, ("drain with volatile failed"));
485        return E_OK;
486    }
487    else
488        return freeDrainedFq(p_Fq);
489}
490
491static e_RxStoreResponse drainCB(t_Handle h_App,
492                                 t_Handle h_QmFqr,
493                                 t_Handle h_QmPortal,
494                                 uint32_t fqidOffset,
495                                 t_DpaaFD *p_Frame)
496{
497    UNUSED(h_App);
498    UNUSED(h_QmFqr);
499    UNUSED(h_QmPortal);
500    UNUSED(fqidOffset);
501    UNUSED(p_Frame);
502
503    DBG(TRACE,("got fd for fqid %d", ((t_QmFqr *)h_QmFqr)->fqidBase + fqidOffset));
504    return e_RX_STORE_RESPONSE_CONTINUE;
505}
506
507static void cb_ern_dcErn(t_Handle                   h_App,
508                         t_Handle                   h_QmPortal,
509                         struct qman_fq             *p_Fq,
510                         const struct qm_mr_entry   *p_Msg)
511{
512    static int cnt = 0;
513    UNUSED(p_Fq);
514    UNUSED(p_Msg);
515    UNUSED(h_App);
516    UNUSED(h_QmPortal);
517
518    XX_Print("cb_ern_dcErn_fqs() unimplemented %d\n", ++cnt);
519}
520
521static void cb_fqs(t_Handle                   h_App,
522                   t_Handle                   h_QmPortal,
523                   struct qman_fq             *p_Fq,
524                   const struct qm_mr_entry   *p_Msg)
525{
526    UNUSED(p_Msg);
527    UNUSED(h_App);
528    UNUSED(h_QmPortal);
529
530    if (p_Fq->state == qman_fq_state_retired &&
531        !(p_Fq->flags & QMAN_FQ_STATE_ORL))
532        drainRetiredFq(p_Fq);
533}
534
535static void null_cb_mr(t_Handle                   h_App,
536                       t_Handle                   h_QmPortal,
537                       struct qman_fq             *p_Fq,
538                       const struct qm_mr_entry   *p_Msg)
539{
540    t_QmPortal      *p_QmPortal = (t_QmPortal *)h_QmPortal;
541
542    UNUSED(p_Fq);UNUSED(h_App);
543
544    if ((p_Msg->verb & QM_MR_VERB_DC_ERN) == QM_MR_VERB_DC_ERN)
545        XX_Print("Ignoring unowned MR frame on cpu %d, dc-portal 0x%02x.\n",
546                 p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->dcern.portal);
547    else
548        XX_Print("Ignoring unowned MR frame on cpu %d, verb 0x%02x.\n",
549                 p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->verb);
550}
551
552static uint32_t LoopMessageRing(t_QmPortal *p_QmPortal, uint32_t is)
553{
554    struct qm_mr_entry          *p_Msg;
555
556    if (is & QM_PIRQ_CSCI) {
557        struct qm_mc_result *p_Mcr;
558        struct qman_cgrs    tmp;
559        uint32_t            mask;
560        unsigned int        i, j;
561
562        NCSW_PLOCK(p_QmPortal);
563        qm_mc_start(p_QmPortal->p_LowQmPortal);
564        qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCONGESTION);
565        while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
566
567        /* cgrs[0] is the portal mask for its cg's, cgrs[1] is the
568           previous state of cg's */
569        for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++)
570        {
571            /* get curent state */
572            tmp.q.__state[i] = p_Mcr->querycongestion.state.__state[i];
573            /* keep only cg's that are registered for this portal */
574            tmp.q.__state[i] &= p_QmPortal->cgrs[0].q.__state[i];
575            /* handle only cg's that changed their state from previous exception */
576            tmp.q.__state[i] ^= p_QmPortal->cgrs[1].q.__state[i];
577            /* update previous */
578            p_QmPortal->cgrs[1].q.__state[i] = p_Mcr->querycongestion.state.__state[i];
579        }
580        PUNLOCK(p_QmPortal);
581
582        /* if in interrupt */
583        /* call the callback routines for any CG with a changed state */
584        for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++)
585            for(j=0, mask = 0x80000000; j<32 ; j++, mask>>=1)
586            {
587                if(tmp.q.__state[i] & mask)
588                {
589                    t_QmCg *p_QmCg = (t_QmCg *)(p_QmPortal->cgsHandles[i*32 + j]);
590                    if(p_QmCg->f_Exception)
591                        p_QmCg->f_Exception(p_QmCg->h_App, e_QM_EX_CG_STATE_CHANGE);
592                }
593            }
594
595    }
596
597
598    if (is & QM_PIRQ_EQRI) {
599        NCSW_PLOCK(p_QmPortal);
600        qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
601        qm_eqcr_set_ithresh(p_QmPortal->p_LowQmPortal, 0);
602        PUNLOCK(p_QmPortal);
603    }
604
605    if (is & QM_PIRQ_MRI) {
606mr_loop:
607        qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
608        p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
609        if (p_Msg) {
610            struct qman_fq  *p_FqFqs  = ptr_from_aligned_int(p_Msg->fq.contextB);
611            struct qman_fq  *p_FqErn  = ptr_from_aligned_int(p_Msg->ern.tag);
612            uint8_t         verb    =(uint8_t)(p_Msg->verb & QM_MR_VERB_TYPE_MASK);
613            t_QmRejectedFrameInfo   rejectedFrameInfo;
614
615            memset(&rejectedFrameInfo, 0, sizeof(t_QmRejectedFrameInfo));
616            if (!(verb & QM_MR_VERB_DC_ERN))
617            {
618                switch(p_Msg->ern.rc)
619                {
620                    case(QM_MR_RC_CGR_TAILDROP):
621                        rejectedFrameInfo.rejectionCode = e_QM_RC_CG_TAILDROP;
622                        rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
623                        break;
624                    case(QM_MR_RC_WRED):
625                        rejectedFrameInfo.rejectionCode = e_QM_RC_CG_WRED;
626                        rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
627                        break;
628                    case(QM_MR_RC_FQ_TAILDROP):
629                        rejectedFrameInfo.rejectionCode = e_QM_RC_FQ_TAILDROP;
630                        rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid;
631                        break;
632                    case(QM_MR_RC_ERROR):
633                        break;
634                    default:
635                        REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("Unknown rejection code"));
636                }
637                if (!p_FqErn)
638                    p_QmPortal->p_NullCB->ern(p_QmPortal->h_App, NULL, p_QmPortal, 0, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo);
639                else
640                    p_FqErn->cb.ern(p_FqErn->h_App, p_FqErn->h_QmFqr, p_QmPortal, p_FqErn->fqidOffset, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo);
641            } else if (verb == QM_MR_VERB_DC_ERN)
642            {
643                if (!p_FqErn)
644                    p_QmPortal->p_NullCB->dc_ern(NULL, p_QmPortal, NULL, p_Msg);
645                else
646                    p_FqErn->cb.dc_ern(p_FqErn->h_App, p_QmPortal, p_FqErn, p_Msg);
647            } else
648            {
649                if (verb == QM_MR_VERB_FQRNI)
650                    ; /* we drop FQRNIs on the floor */
651                else if (!p_FqFqs)
652                            p_QmPortal->p_NullCB->fqs(NULL, p_QmPortal, NULL, p_Msg);
653                else if ((verb == QM_MR_VERB_FQRN) ||
654                         (verb == QM_MR_VERB_FQRL) ||
655                         (verb == QM_MR_VERB_FQPN))
656                {
657                    fq_state_change(p_FqFqs, p_Msg, verb);
658                    p_FqFqs->cb.fqs(p_FqFqs->h_App, p_QmPortal, p_FqFqs, p_Msg);
659                }
660            }
661            qm_mr_next(p_QmPortal->p_LowQmPortal);
662            qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1);
663
664            goto mr_loop;
665        }
666    }
667
668    return is & (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
669}
670
671static void LoopDequeueRing(t_Handle h_QmPortal)
672{
673    struct qm_dqrr_entry        *p_Dq;
674    struct qman_fq              *p_Fq;
675    enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
676    e_RxStoreResponse           tmpRes;
677    t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
678    int                         prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
679
680    while (res != qman_cb_dqrr_pause)
681    {
682        if (prefetch)
683            qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
684        qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
685        p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
686        if (!p_Dq)
687            break;
688	p_Fq = ptr_from_aligned_int(p_Dq->contextB);
689        if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
690            /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
691             * to check for clearing it when doing volatile dequeues. It's
692             * one less thing to check in the critical path (SDQCR). */
693            tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
694            if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
695                res = qman_cb_dqrr_pause;
696            /* Check for VDQCR completion */
697            if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
698                p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
699            if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
700            {
701                p_Fq->flags &= ~QMAN_FQ_STATE_NE;
702                freeDrainedFq(p_Fq);
703            }
704        }
705        else
706        {
707            /* Interpret 'dq' from the owner's perspective. */
708            /* use portal default handlers */
709            ASSERT_COND(p_Dq->fqid);
710            if (p_Fq)
711            {
712                tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
713                                       p_Fq->h_QmFqr,
714                                       p_QmPortal,
715                                       p_Fq->fqidOffset,
716                                       (t_DpaaFD*)&p_Dq->fd);
717                if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
718                    res = qman_cb_dqrr_pause;
719                else if (p_Fq->state == qman_fq_state_waiting_parked)
720                    res = qman_cb_dqrr_park;
721            }
722            else
723            {
724                tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
725                                                    NULL,
726                                                    p_QmPortal,
727                                                    p_Dq->fqid,
728                                                    (t_DpaaFD*)&p_Dq->fd);
729                if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
730                    res = qman_cb_dqrr_pause;
731            }
732        }
733
734        /* Parking isn't possible unless HELDACTIVE was set. NB,
735         * FORCEELIGIBLE implies HELDACTIVE, so we only need to
736         * check for HELDACTIVE to cover both. */
737        ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
738                    (res != qman_cb_dqrr_park));
739        if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) {
740            /* Defer just means "skip it, I'll consume it myself later on" */
741            if (res != qman_cb_dqrr_defer)
742                qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
743                                           p_Dq,
744                                           (res == qman_cb_dqrr_park));
745            qm_dqrr_next(p_QmPortal->p_LowQmPortal);
746        } else {
747            if (res == qman_cb_dqrr_park)
748                /* The only thing to do for non-DCA is the park-request */
749                qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal);
750            qm_dqrr_next(p_QmPortal->p_LowQmPortal);
751            qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
752        }
753    }
754}
755
756static void LoopDequeueRingDcaOptimized(t_Handle h_QmPortal)
757{
758    struct qm_dqrr_entry        *p_Dq;
759    struct qman_fq              *p_Fq;
760    enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
761    e_RxStoreResponse           tmpRes;
762    t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
763
764    while (res != qman_cb_dqrr_pause)
765    {
766        qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
767        p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
768        if (!p_Dq)
769            break;
770	p_Fq = ptr_from_aligned_int(p_Dq->contextB);
771        if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
772            /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
773             * to check for clearing it when doing volatile dequeues. It's
774             * one less thing to check in the critical path (SDQCR). */
775            tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
776            if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
777                res = qman_cb_dqrr_pause;
778            /* Check for VDQCR completion */
779            if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
780                p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
781            if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
782            {
783                p_Fq->flags &= ~QMAN_FQ_STATE_NE;
784                freeDrainedFq(p_Fq);
785            }
786        }
787        else
788        {
789            /* Interpret 'dq' from the owner's perspective. */
790            /* use portal default handlers */
791            ASSERT_COND(p_Dq->fqid);
792            if (p_Fq)
793            {
794                tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
795                                       p_Fq->h_QmFqr,
796                                       p_QmPortal,
797                                       p_Fq->fqidOffset,
798                                       (t_DpaaFD*)&p_Dq->fd);
799                if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
800                    res = qman_cb_dqrr_pause;
801                else if (p_Fq->state == qman_fq_state_waiting_parked)
802                    res = qman_cb_dqrr_park;
803            }
804            else
805            {
806                tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
807                                                    NULL,
808                                                    p_QmPortal,
809                                                    p_Dq->fqid,
810                                                    (t_DpaaFD*)&p_Dq->fd);
811                if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
812                    res = qman_cb_dqrr_pause;
813            }
814        }
815
816        /* Parking isn't possible unless HELDACTIVE was set. NB,
817         * FORCEELIGIBLE implies HELDACTIVE, so we only need to
818         * check for HELDACTIVE to cover both. */
819        ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
820                (res != qman_cb_dqrr_park));
821        /* Defer just means "skip it, I'll consume it myself later on" */
822        if (res != qman_cb_dqrr_defer)
823            qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
824                                       p_Dq,
825                                       (res == qman_cb_dqrr_park));
826        qm_dqrr_next(p_QmPortal->p_LowQmPortal);
827    }
828}
829
830static void LoopDequeueRingOptimized(t_Handle h_QmPortal)
831{
832    struct qm_dqrr_entry        *p_Dq;
833    struct qman_fq              *p_Fq;
834    enum qman_cb_dqrr_result    res = qman_cb_dqrr_consume;
835    e_RxStoreResponse           tmpRes;
836    t_QmPortal                  *p_QmPortal = (t_QmPortal *)h_QmPortal;
837
838    while (res != qman_cb_dqrr_pause)
839    {
840        qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
841        p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
842        if (!p_Dq)
843            break;
844	p_Fq = ptr_from_aligned_int(p_Dq->contextB);
845        if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
846            /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
847             * to check for clearing it when doing volatile dequeues. It's
848             * one less thing to check in the critical path (SDQCR). */
849            tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd);
850            if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
851                res = qman_cb_dqrr_pause;
852            /* Check for VDQCR completion */
853            if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
854                p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR;
855            if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY)
856            {
857                p_Fq->flags &= ~QMAN_FQ_STATE_NE;
858                freeDrainedFq(p_Fq);
859            }
860        }
861        else
862        {
863            /* Interpret 'dq' from the owner's perspective. */
864            /* use portal default handlers */
865            ASSERT_COND(p_Dq->fqid);
866            if (p_Fq)
867            {
868                tmpRes = p_Fq->cb.dqrr(p_Fq->h_App,
869                                       p_Fq->h_QmFqr,
870                                       p_QmPortal,
871                                       p_Fq->fqidOffset,
872                                       (t_DpaaFD*)&p_Dq->fd);
873                if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
874                    res = qman_cb_dqrr_pause;
875                else if (p_Fq->state == qman_fq_state_waiting_parked)
876                    res = qman_cb_dqrr_park;
877            }
878            else
879            {
880                tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App,
881                                                    NULL,
882                                                    p_QmPortal,
883                                                    p_Dq->fqid,
884                                                    (t_DpaaFD*)&p_Dq->fd);
885                if (tmpRes == e_RX_STORE_RESPONSE_PAUSE)
886                    res = qman_cb_dqrr_pause;
887            }
888        }
889
890        /* Parking isn't possible unless HELDACTIVE was set. NB,
891         * FORCEELIGIBLE implies HELDACTIVE, so we only need to
892         * check for HELDACTIVE to cover both. */
893        ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
894                (res != qman_cb_dqrr_park));
895        if (res == qman_cb_dqrr_park)
896            /* The only thing to do for non-DCA is the park-request */
897            qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal);
898        qm_dqrr_next(p_QmPortal->p_LowQmPortal);
899        qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
900    }
901}
902
903/* Portal interrupt handler */
904static void portal_isr(void *ptr)
905{
906    t_QmPortal  *p_QmPortal = ptr;
907    uint32_t    event = 0;
908    uint32_t    enableEvents = qm_isr_enable_read(p_QmPortal->p_LowQmPortal);
909
910    DBG(TRACE, ("software-portal %d got interrupt", p_QmPortal->p_LowQmPortal->config.cpu));
911
912    event |= (qm_isr_status_read(p_QmPortal->p_LowQmPortal) &
913            enableEvents);
914
915    qm_isr_status_clear(p_QmPortal->p_LowQmPortal, event);
916    /* Only do fast-path handling if it's required */
917    if (/*(event & QM_PIRQ_DQRI) &&*/
918        (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_FAST))
919        p_QmPortal->f_LoopDequeueRingCB(p_QmPortal);
920    if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_SLOW)
921        LoopMessageRing(p_QmPortal, event);
922}
923
924
925static t_Error qman_query_fq_np(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, struct qm_mcr_queryfq_np *p_Np)
926{
927    struct qm_mc_command    *p_Mcc;
928    struct qm_mc_result     *p_Mcr;
929    uint8_t                 res;
930
931    NCSW_PLOCK(p_QmPortal);
932    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
933    p_Mcc->queryfq_np.fqid = p_Fq->fqid;
934    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP);
935    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
936    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
937    res = p_Mcr->result;
938    if (res == QM_MCR_RESULT_OK)
939        *p_Np = p_Mcr->queryfq_np;
940    PUNLOCK(p_QmPortal);
941    if (res != QM_MCR_RESULT_OK)
942        RETURN_ERROR(MINOR, E_INVALID_STATE, ("QUERYFQ_NP failed: %s\n", mcr_result_str(res)));
943    return E_OK;
944}
945
946static uint8_t QmCgGetCgId(t_Handle h_QmCg)
947{
948   t_QmCg *p_QmCg = (t_QmCg *)h_QmCg;
949
950   return p_QmCg->id;
951
952}
953
954static t_Error qm_new_fq(t_QmPortal                         *p_QmPortal,
955                         uint32_t                           fqid,
956                         uint32_t                           fqidOffset,
957                         uint32_t                           channel,
958                         uint32_t                           wqid,
959                         uint16_t                           count,
960                         uint32_t                           flags,
961                         t_QmFqrCongestionAvoidanceParams   *p_CgParams,
962                         t_QmContextA                       *p_ContextA,
963                         t_QmContextB                       *p_ContextB,
964                         bool                               initParked,
965                         t_Handle                           h_QmFqr,
966                         struct qman_fq                     **p_Fqs)
967{
968    struct qman_fq          *p_Fq = NULL;
969    struct qm_mcc_initfq    fq_opts;
970    uint32_t                i;
971    t_Error                 err = E_OK;
972    int         gap, tmp;
973    uint32_t    tmpA, tmpN, ta=0, tn=0, initFqFlag;
974
975    ASSERT_COND(p_QmPortal);
976    ASSERT_COND(count);
977
978    for(i=0;i<count;i++)
979    {
980        p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64);
981        if (!p_Fq)
982            RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
983        memset(p_Fq, 0, sizeof(struct qman_fq));
984        p_Fq->cb.dqrr     = p_QmPortal->f_DfltFrame;
985        p_Fq->cb.ern      = p_QmPortal->f_RejectedFrame;
986        p_Fq->cb.dc_ern   = cb_ern_dcErn;
987        p_Fq->cb.fqs      = cb_fqs;
988        p_Fq->h_App       = p_QmPortal->h_App;
989        p_Fq->h_QmFqr     = h_QmFqr;
990        p_Fq->fqidOffset  = fqidOffset;
991        p_Fqs[i] = p_Fq;
992        if ((err = qman_create_fq(p_QmPortal,(uint32_t)(fqid + i), 0, p_Fqs[i])) != E_OK)
993            break;
994    }
995
996    if (err != E_OK)
997    {
998        for(i=0;i<count;i++)
999            if (p_Fqs[i])
1000            {
1001                XX_FreeSmart(p_Fqs[i]);
1002                p_Fqs[i] = NULL;
1003            }
1004        RETURN_ERROR(MINOR, err, ("Failed to create Fqs"));
1005    }
1006
1007    memset(&fq_opts,0,sizeof(fq_opts));
1008    fq_opts.fqid = fqid;
1009    fq_opts.count = (uint16_t)(count-1);
1010    fq_opts.we_mask |= QM_INITFQ_WE_DESTWQ;
1011    fq_opts.fqd.dest.channel = channel;
1012    fq_opts.fqd.dest.wq = wqid;
1013    fq_opts.we_mask |= QM_INITFQ_WE_FQCTRL;
1014    fq_opts.fqd.fq_ctrl = (uint16_t)flags;
1015
1016    if ((flags & QM_FQCTRL_CGE) || (flags & QM_FQCTRL_TDE))
1017        ASSERT_COND(p_CgParams);
1018
1019    if(flags & QM_FQCTRL_CGE)
1020    {
1021        ASSERT_COND(p_CgParams->h_QmCg);
1022
1023        /* CG OAC and FQ TD may not be configured at the same time. if both are required,
1024           than we configure CG first, and the FQ TD later - see below. */
1025        fq_opts.fqd.cgid = QmCgGetCgId(p_CgParams->h_QmCg);
1026        fq_opts.we_mask |= QM_INITFQ_WE_CGID;
1027        if(p_CgParams->overheadAccountingLength)
1028        {
1029            fq_opts.we_mask |= QM_INITFQ_WE_OAC;
1030            fq_opts.we_mask &= ~QM_INITFQ_WE_TDTHRESH;
1031            fq_opts.fqd.td_thresh = (uint16_t)(QM_FQD_TD_THRESH_OAC_EN | p_CgParams->overheadAccountingLength);
1032        }
1033    }
1034    if((flags & QM_FQCTRL_TDE) && (!p_CgParams->overheadAccountingLength))
1035    {
1036        ASSERT_COND(p_CgParams->fqTailDropThreshold);
1037
1038        fq_opts.we_mask |= QM_INITFQ_WE_TDTHRESH;
1039
1040            /* express thresh as ta*2^tn */
1041            gap = (int)p_CgParams->fqTailDropThreshold;
1042            for (tmpA=0 ; tmpA<256; tmpA++ )
1043                for (tmpN=0 ; tmpN<32; tmpN++ )
1044                {
1045                    tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<<tmpN)));
1046                    if (tmp < gap)
1047                    {
1048                       ta = tmpA;
1049                       tn = tmpN;
1050                       gap = tmp;
1051                    }
1052                }
1053            fq_opts.fqd.td.exp = tn;
1054            fq_opts.fqd.td.mant = ta;
1055    }
1056
1057    if (p_ContextA)
1058    {
1059        fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTA;
1060        memcpy((void*)&fq_opts.fqd.context_a, p_ContextA, sizeof(t_QmContextA));
1061    }
1062    /* If this FQ will not be used for tx, we can use contextB field */
1063    if (fq_opts.fqd.dest.channel < e_QM_FQ_CHANNEL_FMAN0_SP0)
1064    {
1065            fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
1066            fq_opts.fqd.context_b = aligned_int_from_ptr(p_Fqs[0]);
1067    }
1068    else if (p_ContextB) /* Tx-Queue */
1069    {
1070        fq_opts.we_mask |= QM_INITFQ_WE_CONTEXTB;
1071        memcpy((void*)&fq_opts.fqd.context_b, p_ContextB, sizeof(t_QmContextB));
1072    }
1073
1074    if((flags & QM_FQCTRL_TDE) && (p_CgParams->overheadAccountingLength))
1075        initFqFlag = 0;
1076    else
1077        initFqFlag = (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED);
1078
1079    if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], initFqFlag, &fq_opts)) != E_OK)
1080    {
1081        for(i=0;i<count;i++)
1082            if (p_Fqs[i])
1083            {
1084                XX_FreeSmart(p_Fqs[i]);
1085                p_Fqs[i] = NULL;
1086            }
1087        RETURN_ERROR(MINOR, err, ("Failed to init Fqs [%d-%d]", fqid, fqid+count-1));
1088    }
1089
1090    /* if both CG OAC and FQ TD are needed, we call qman_init_fq again, this time for the FQ TD only */
1091    if((flags & QM_FQCTRL_TDE) && (p_CgParams->overheadAccountingLength))
1092    {
1093        ASSERT_COND(p_CgParams->fqTailDropThreshold);
1094
1095        fq_opts.we_mask = QM_INITFQ_WE_TDTHRESH;
1096
1097        /* express thresh as ta*2^tn */
1098        gap = (int)p_CgParams->fqTailDropThreshold;
1099        for (tmpA=0 ; tmpA<256; tmpA++ )
1100            for (tmpN=0 ; tmpN<32; tmpN++ )
1101            {
1102                tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<<tmpN)));
1103                if (tmp < gap)
1104                {
1105                   ta = tmpA;
1106                   tn = tmpN;
1107                   gap = tmp;
1108                }
1109            }
1110        fq_opts.fqd.td.exp = tn;
1111        fq_opts.fqd.td.mant = ta;
1112        if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED), &fq_opts)) != E_OK)
1113        {
1114            for(i=0;i<count;i++)
1115                if (p_Fqs[i])
1116                {
1117                    XX_FreeSmart(p_Fqs[i]);
1118                    p_Fqs[i] = NULL;
1119                }
1120            RETURN_ERROR(MINOR, err, ("Failed to init Fqs"));
1121        }
1122    }
1123
1124
1125    for(i=1;i<count;i++)
1126    {
1127        memcpy(p_Fqs[i], p_Fqs[0], sizeof(struct qman_fq));
1128        p_Fqs[i]->fqid += i;
1129    }
1130
1131    return err;
1132}
1133
1134
1135static t_Error qm_free_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq)
1136{
1137    uint32_t flags=0;
1138
1139    if (qman_retire_fq(p_QmPortal, p_Fq, &flags, false) != E_OK)
1140        RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!"));
1141
1142    if (flags & QMAN_FQ_STATE_CHANGING)
1143        RETURN_ERROR(MAJOR, E_INVALID_STATE, ("fq %d currently in use, will be retired", p_Fq->fqid));
1144
1145    if (flags & QMAN_FQ_STATE_NE)
1146        RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed;" \
1147                                          "Frame Queue Not Empty, Need to dequeue"));
1148
1149    if (qman_oos_fq(p_QmPortal, p_Fq) != E_OK)
1150        RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!"));
1151
1152    qman_destroy_fq(p_Fq,0);
1153
1154    return E_OK;
1155}
1156
1157static void qman_disable_portal(t_QmPortal *p_QmPortal)
1158{
1159    NCSW_PLOCK(p_QmPortal);
1160    if (!(p_QmPortal->disable_count++))
1161        qm_dqrr_set_maxfill(p_QmPortal->p_LowQmPortal, 0);
1162    PUNLOCK(p_QmPortal);
1163}
1164
1165
1166/* quiesce SDQCR/VDQCR, then drain till h/w wraps up anything it
1167 * was doing (5ms is more than enough to ensure it's done). */
1168static void clean_dqrr_mr(t_QmPortal *p_QmPortal)
1169{
1170    struct qm_dqrr_entry    *p_Dq;
1171    struct qm_mr_entry      *p_Msg;
1172    int                     idle = 0;
1173
1174    qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, 0);
1175    qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, 0);
1176drain_loop:
1177    qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1178    qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1179    qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal);
1180    p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1181    p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal);
1182    if (p_Dq) {
1183        qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1184        qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1185    }
1186    if (p_Msg) {
1187    qm_mr_next(p_QmPortal->p_LowQmPortal);
1188        qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1189    }
1190    if (!p_Dq && !p_Msg) {
1191    if (++idle < 5) {
1192    XX_UDelay(1000);
1193    goto drain_loop;
1194    }
1195    } else {
1196    idle = 0;
1197    goto drain_loop;
1198    }
1199}
1200
1201static t_Error qman_create_portal(t_QmPortal *p_QmPortal,
1202                                   uint32_t flags,
1203                                   uint32_t sdqcrFlags,
1204                                   uint8_t  dqrrSize)
1205{
1206    const struct qm_portal_config   *p_Config = &(p_QmPortal->p_LowQmPortal->config);
1207    int                             ret = 0;
1208    t_Error                         err;
1209    uint32_t                        isdr;
1210
1211    if ((err = qm_eqcr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalEqcrCCE)) != E_OK)
1212        RETURN_ERROR(MINOR, err, ("Qman EQCR initialization failed\n"));
1213
1214    if (qm_dqrr_init(p_QmPortal->p_LowQmPortal,
1215                     sdqcrFlags ? e_QmPortalDequeuePushMode : e_QmPortalDequeuePullMode,
1216                     e_QmPortalPVB,
1217                     (flags & QMAN_PORTAL_FLAG_DCA) ? e_QmPortalDqrrDCA : e_QmPortalDqrrCCI,
1218                     dqrrSize,
1219                     (flags & QMAN_PORTAL_FLAG_RSTASH) ? 1 : 0,
1220                     (flags & QMAN_PORTAL_FLAG_DSTASH) ? 1 : 0)) {
1221        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR initialization failed"));
1222        goto fail_dqrr;
1223    }
1224
1225    if (qm_mr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalMrCCI)) {
1226        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR initialization failed"));
1227        goto fail_mr;
1228    }
1229    if (qm_mc_init(p_QmPortal->p_LowQmPortal)) {
1230        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MC initialization failed"));
1231        goto fail_mc;
1232    }
1233    if (qm_isr_init(p_QmPortal->p_LowQmPortal)) {
1234        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("ISR initialization failed"));
1235        goto fail_isr;
1236    }
1237    /* static interrupt-gating controls */
1238    qm_dqrr_set_ithresh(p_QmPortal->p_LowQmPortal, 12);
1239    qm_mr_set_ithresh(p_QmPortal->p_LowQmPortal, 4);
1240    qm_isr_set_iperiod(p_QmPortal->p_LowQmPortal, 100);
1241    p_QmPortal->options = flags;
1242    isdr = 0xffffffff;
1243    qm_isr_status_clear(p_QmPortal->p_LowQmPortal, 0xffffffff);
1244    qm_isr_enable_write(p_QmPortal->p_LowQmPortal, DEFAULT_portalExceptions);
1245    qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1246    if (flags & QMAN_PORTAL_FLAG_IRQ)
1247    {
1248        XX_SetIntr(p_Config->irq, portal_isr, p_QmPortal);
1249        XX_EnableIntr(p_Config->irq);
1250        qm_isr_uninhibit(p_QmPortal->p_LowQmPortal);
1251    } else
1252        /* without IRQ, we can't block */
1253        flags &= ~QMAN_PORTAL_FLAG_WAIT;
1254    /* Need EQCR to be empty before continuing */
1255    isdr ^= QM_PIRQ_EQCI;
1256    qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1257    ret = qm_eqcr_get_fill(p_QmPortal->p_LowQmPortal);
1258    if (ret) {
1259        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("EQCR unclean"));
1260        goto fail_eqcr_empty;
1261    }
1262    isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
1263    qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr);
1264    if (qm_dqrr_current(p_QmPortal->p_LowQmPortal) != NULL)
1265    {
1266        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR unclean"));
1267goto fail_dqrr_mr_empty;
1268    }
1269    if (qm_mr_current(p_QmPortal->p_LowQmPortal) != NULL)
1270    {
1271        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR unclean"));
1272goto fail_dqrr_mr_empty;
1273    }
1274    qm_isr_disable_write(p_QmPortal->p_LowQmPortal, 0);
1275    qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags);
1276    return E_OK;
1277fail_dqrr_mr_empty:
1278fail_eqcr_empty:
1279    qm_isr_finish(p_QmPortal->p_LowQmPortal);
1280fail_isr:
1281    qm_mc_finish(p_QmPortal->p_LowQmPortal);
1282fail_mc:
1283    qm_mr_finish(p_QmPortal->p_LowQmPortal);
1284fail_mr:
1285    qm_dqrr_finish(p_QmPortal->p_LowQmPortal);
1286fail_dqrr:
1287    qm_eqcr_finish(p_QmPortal->p_LowQmPortal);
1288    return ERROR_CODE(E_INVALID_STATE);
1289}
1290
1291static void qman_destroy_portal(t_QmPortal *p_QmPortal)
1292{
1293    /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1294     * something related to QM_PIRQ_EQCI, this may need fixing. */
1295    qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
1296    if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ)
1297    {
1298        XX_DisableIntr(p_QmPortal->p_LowQmPortal->config.irq);
1299        XX_FreeIntr(p_QmPortal->p_LowQmPortal->config.irq);
1300    }
1301    qm_isr_finish(p_QmPortal->p_LowQmPortal);
1302    qm_mc_finish(p_QmPortal->p_LowQmPortal);
1303    qm_mr_finish(p_QmPortal->p_LowQmPortal);
1304    qm_dqrr_finish(p_QmPortal->p_LowQmPortal);
1305    qm_eqcr_finish(p_QmPortal->p_LowQmPortal);
1306}
1307
1308static inline struct qm_eqcr_entry *try_eq_start(t_QmPortal *p_QmPortal)
1309{
1310    struct qm_eqcr_entry    *p_Eq;
1311    uint8_t                 avail;
1312
1313    avail = qm_eqcr_get_avail(p_QmPortal->p_LowQmPortal);
1314    if (avail == EQCR_THRESH)
1315        qmPortalEqcrCcePrefetch(p_QmPortal->p_LowQmPortal);
1316    else if (avail < EQCR_THRESH)
1317            qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal);
1318    p_Eq = qm_eqcr_start(p_QmPortal->p_LowQmPortal);
1319
1320    return p_Eq;
1321}
1322
1323
1324static t_Error qman_orp_update(t_QmPortal   *p_QmPortal,
1325                               uint32_t     orpId,
1326                               uint16_t     orpSeqnum,
1327                               uint32_t     flags)
1328{
1329    struct qm_eqcr_entry *p_Eq;
1330
1331    NCSW_PLOCK(p_QmPortal);
1332    p_Eq = try_eq_start(p_QmPortal);
1333    if (!p_Eq)
1334    {
1335        PUNLOCK(p_QmPortal);
1336        return ERROR_CODE(E_BUSY);
1337    }
1338
1339    if (flags & QMAN_ENQUEUE_FLAG_NESN)
1340        orpSeqnum |= QM_EQCR_SEQNUM_NESN;
1341    else
1342        /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
1343        orpSeqnum &= ~QM_EQCR_SEQNUM_NESN;
1344    p_Eq->seqnum  = orpSeqnum;
1345    p_Eq->orp     = orpId;
1346qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal, (uint8_t)QM_EQCR_VERB_ORP);
1347
1348    PUNLOCK(p_QmPortal);
1349    return E_OK;
1350}
1351
1352static __inline__ t_Error CheckStashParams(t_QmFqrParams *p_QmFqrParams)
1353{
1354    ASSERT_COND(p_QmFqrParams);
1355
1356    if (p_QmFqrParams->stashingParams.frameAnnotationSize > QM_CONTEXTA_MAX_STASH_SIZE)
1357        RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Annotation Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1358    if (p_QmFqrParams->stashingParams.frameDataSize > QM_CONTEXTA_MAX_STASH_SIZE)
1359        RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Data Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1360    if (p_QmFqrParams->stashingParams.fqContextSize > QM_CONTEXTA_MAX_STASH_SIZE)
1361        RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Context Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE));
1362    if (p_QmFqrParams->stashingParams.fqContextSize)
1363    {
1364        if (!p_QmFqrParams->stashingParams.fqContextAddr)
1365            RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be givven"));
1366        if (!IS_ALIGNED(p_QmFqrParams->stashingParams.fqContextAddr, CACHELINE_SIZE))
1367            RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be aligned to %d", CACHELINE_SIZE));
1368        if (p_QmFqrParams->stashingParams.fqContextAddr & 0xffffff0000000000LL)
1369            RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address May be up to 40 bit"));
1370    }
1371
1372    return E_OK;
1373}
1374
1375static t_Error QmPortalRegisterCg(t_Handle h_QmPortal, t_Handle h_QmCg, uint8_t  cgId)
1376{
1377    t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1378
1379    /* cgrs[0] is the mask of registered CG's*/
1380    if(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32)))
1381        RETURN_ERROR(MINOR, E_BUSY, ("CG already used"));
1382
1383    p_QmPortal->cgrs[0].q.__state[cgId/32] |=  0x80000000 >> (cgId % 32);
1384    p_QmPortal->cgsHandles[cgId] = h_QmCg;
1385
1386    return E_OK;
1387}
1388
1389static t_Error QmPortalUnregisterCg(t_Handle h_QmPortal, uint8_t  cgId)
1390{
1391    t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1392
1393    /* cgrs[0] is the mask of registered CG's*/
1394    if(!(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32))))
1395        RETURN_ERROR(MINOR, E_BUSY, ("CG is not in use"));
1396
1397    p_QmPortal->cgrs[0].q.__state[cgId/32] &=  ~0x80000000 >> (cgId % 32);
1398    p_QmPortal->cgsHandles[cgId] = NULL;
1399
1400    return E_OK;
1401}
1402
1403static e_DpaaSwPortal QmPortalGetSwPortalId(t_Handle h_QmPortal)
1404{
1405    t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal;
1406
1407    return (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu;
1408}
1409
1410static t_Error CalcWredCurve(t_QmCgWredCurve *p_WredCurve, uint32_t  *p_CurveWord)
1411{
1412    uint32_t    maxP, roundDown, roundUp, tmpA, tmpN;
1413    uint32_t    ma=0, mn=0, slope, sa=0, sn=0, pn;
1414    int         pres = 1000;
1415    int         gap, tmp;
1416
1417/*  TODO - change maxTh to uint64_t?
1418   if(p_WredCurve->maxTh > (1<<39))
1419        RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh is not in range"));*/
1420
1421    /* express maxTh as ma*2^mn */
1422     gap = (int)p_WredCurve->maxTh;
1423     for (tmpA=0 ; tmpA<256; tmpA++ )
1424         for (tmpN=0 ; tmpN<32; tmpN++ )
1425         {
1426             tmp = ABS((int)(p_WredCurve->maxTh - tmpA*(1<<tmpN)));
1427             if (tmp < gap)
1428             {
1429                ma = tmpA;
1430                mn = tmpN;
1431                gap = tmp;
1432             }
1433         }
1434     ASSERT_COND(ma <256);
1435     ASSERT_COND(mn <32);
1436     p_WredCurve->maxTh = ma*(1<<mn);
1437
1438     if(p_WredCurve->maxTh <= p_WredCurve->minTh)
1439        RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh must be larger than minTh"));
1440     if(p_WredCurve->probabilityDenominator > 64)
1441        RETURN_ERROR(MINOR, E_INVALID_VALUE, ("probabilityDenominator mustn't be 1-64"));
1442
1443    /* first we translate from Cisco probabilityDenominator
1444       to 256 fixed denominator, result must be divisible by 4. */
1445    /* we multiply by a fixed value to get better accuracy (without
1446       using floating point) */
1447    maxP = (uint32_t)(256*1000/p_WredCurve->probabilityDenominator);
1448    if (maxP % 4*pres)
1449    {
1450        roundDown  = maxP + (maxP % (4*pres));
1451        roundUp = roundDown + 4*pres;
1452        if((roundUp - maxP) > (maxP - roundDown))
1453            maxP = roundDown;
1454        else
1455            maxP = roundUp;
1456    }
1457    maxP = maxP/pres;
1458    ASSERT_COND(maxP <= 256);
1459    pn = (uint8_t)(maxP/4 - 1);
1460
1461    if(maxP >= (p_WredCurve->maxTh - p_WredCurve->minTh))
1462        RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Due to probabilityDenominator selected, maxTh-minTh must be larger than %d", maxP));
1463
1464    pres = 1000000;
1465    slope = maxP*pres/(p_WredCurve->maxTh - p_WredCurve->minTh);
1466    /* express slope as sa/2^sn */
1467    gap = (int)slope;
1468    for (tmpA=(uint32_t)(64*pres) ; tmpA<128*pres; tmpA += pres )
1469        for (tmpN=7 ; tmpN<64; tmpN++ )
1470        {
1471            tmp = ABS((int)(slope - tmpA/(1<<tmpN)));
1472            if (tmp < gap)
1473            {
1474               sa = tmpA;
1475               sn = tmpN;
1476               gap = tmp;
1477            }
1478        }
1479    sa = sa/pres;
1480    ASSERT_COND(sa<128 && sa>=64);
1481    sn = sn;
1482    ASSERT_COND(sn<64 && sn>=7);
1483
1484    *p_CurveWord = ((ma << 24) |
1485                    (mn << 19) |
1486                    (sa << 12) |
1487                    (sn << 6) |
1488                    pn);
1489
1490    return E_OK;
1491}
1492
1493static t_Error QmPortalPullFrame(t_Handle h_QmPortal, uint32_t pdqcr, t_DpaaFD *p_Frame)
1494{
1495    t_QmPortal              *p_QmPortal = (t_QmPortal *)h_QmPortal;
1496    struct qm_dqrr_entry    *p_Dq;
1497    struct qman_fq          *p_Fq;
1498    int                     prefetch;
1499    uint32_t                *p_Dst, *p_Src;
1500
1501    ASSERT_COND(p_QmPortal);
1502    ASSERT_COND(p_Frame);
1503    SANITY_CHECK_RETURN_ERROR(p_QmPortal->pullMode, E_INVALID_STATE);
1504
1505    NCSW_PLOCK(p_QmPortal);
1506
1507    qm_dqrr_pdqcr_set(p_QmPortal->p_LowQmPortal, pdqcr);
1508    mb();
1509    while (qm_dqrr_pdqcr_get(p_QmPortal->p_LowQmPortal)) ;
1510
1511    prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
1512    while(TRUE)
1513    {
1514        if (prefetch)
1515            qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1516        qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1517        p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1518        if (!p_Dq)
1519            continue;
1520        p_Fq = ptr_from_aligned_int(p_Dq->contextB);
1521        ASSERT_COND(p_Dq->fqid);
1522        p_Dst = (uint32_t *)p_Frame;
1523        p_Src = (uint32_t *)&p_Dq->fd;
1524        p_Dst[0] = p_Src[0];
1525        p_Dst[1] = p_Src[1];
1526        p_Dst[2] = p_Src[2];
1527        p_Dst[3] = p_Src[3];
1528        if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA)
1529        {
1530            qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
1531                                       p_Dq,
1532                                       false);
1533            qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1534        }
1535        else
1536        {
1537            qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1538            qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1539        }
1540        break;
1541    }
1542
1543    PUNLOCK(p_QmPortal);
1544
1545    if (!(p_Dq->stat & QM_DQRR_STAT_FD_VALID))
1546        return ERROR_CODE(E_EMPTY);
1547
1548    return E_OK;
1549}
1550
1551
1552/****************************************/
1553/*       API Init unit functions        */
1554/****************************************/
1555t_Handle QM_PORTAL_Config(t_QmPortalParam *p_QmPortalParam)
1556{
1557    t_QmPortal          *p_QmPortal;
1558    uint32_t            i;
1559
1560    SANITY_CHECK_RETURN_VALUE(p_QmPortalParam, E_INVALID_HANDLE, NULL);
1561    SANITY_CHECK_RETURN_VALUE(p_QmPortalParam->swPortalId < DPAA_MAX_NUM_OF_SW_PORTALS, E_INVALID_VALUE, 0);
1562
1563    p_QmPortal = (t_QmPortal *)XX_Malloc(sizeof(t_QmPortal));
1564    if (!p_QmPortal)
1565    {
1566        REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal obj!!!"));
1567        return NULL;
1568    }
1569    memset(p_QmPortal, 0, sizeof(t_QmPortal));
1570
1571    p_QmPortal->p_LowQmPortal = (struct qm_portal *)XX_Malloc(sizeof(struct qm_portal));
1572    if (!p_QmPortal->p_LowQmPortal)
1573    {
1574        XX_Free(p_QmPortal);
1575        REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Low qm p_QmPortal obj!!!"));
1576        return NULL;
1577    }
1578    memset(p_QmPortal->p_LowQmPortal, 0, sizeof(struct qm_portal));
1579
1580    p_QmPortal->p_QmPortalDriverParams = (t_QmPortalDriverParams *)XX_Malloc(sizeof(t_QmPortalDriverParams));
1581    if (!p_QmPortal->p_QmPortalDriverParams)
1582    {
1583        XX_Free(p_QmPortal->p_LowQmPortal);
1584        XX_Free(p_QmPortal);
1585        REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal driver parameters"));
1586        return NULL;
1587    }
1588    memset(p_QmPortal->p_QmPortalDriverParams, 0, sizeof(t_QmPortalDriverParams));
1589
1590    p_QmPortal->p_LowQmPortal->addr.addr_ce = UINT_TO_PTR(p_QmPortalParam->ceBaseAddress);
1591    p_QmPortal->p_LowQmPortal->addr.addr_ci = UINT_TO_PTR(p_QmPortalParam->ciBaseAddress);
1592    p_QmPortal->p_LowQmPortal->config.irq = p_QmPortalParam->irq;
1593    p_QmPortal->p_LowQmPortal->config.bound = 0;
1594    p_QmPortal->p_LowQmPortal->config.cpu = (int)p_QmPortalParam->swPortalId;
1595    p_QmPortal->p_LowQmPortal->config.channel = (e_QmFQChannel)(e_QM_FQ_CHANNEL_SWPORTAL0 + p_QmPortalParam->swPortalId);
1596    p_QmPortal->p_LowQmPortal->bind_lock = XX_InitSpinlock();
1597
1598    p_QmPortal->h_Qm                = p_QmPortalParam->h_Qm;
1599    p_QmPortal->f_DfltFrame         = p_QmPortalParam->f_DfltFrame;
1600    p_QmPortal->f_RejectedFrame     = p_QmPortalParam->f_RejectedFrame;
1601    p_QmPortal->h_App               = p_QmPortalParam->h_App;
1602
1603    p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset           = p_QmPortalParam->fdLiodnOffset;
1604    p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode          = DEFAULT_dequeueDcaMode;
1605    p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames  = DEFAULT_dequeueUpToThreeFrames;
1606    p_QmPortal->p_QmPortalDriverParams->commandType             = DEFAULT_dequeueCommandType;
1607    p_QmPortal->p_QmPortalDriverParams->userToken               = DEFAULT_dequeueUserToken;
1608    p_QmPortal->p_QmPortalDriverParams->specifiedWq             = DEFAULT_dequeueSpecifiedWq;
1609    p_QmPortal->p_QmPortalDriverParams->dedicatedChannel        = DEFAULT_dequeueDedicatedChannel;
1610    p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels =
1611        DEFAULT_dequeueDedicatedChannelHasPrecedenceOverPoolChannels;
1612    p_QmPortal->p_QmPortalDriverParams->poolChannelId           = DEFAULT_dequeuePoolChannelId;
1613    p_QmPortal->p_QmPortalDriverParams->wqId                    = DEFAULT_dequeueWqId;
1614    for (i=0;i<QM_MAX_NUM_OF_POOL_CHANNELS;i++)
1615        p_QmPortal->p_QmPortalDriverParams->poolChannels[i] = FALSE;
1616    p_QmPortal->p_QmPortalDriverParams->dqrrSize                = DEFAULT_dqrrSize;
1617    p_QmPortal->p_QmPortalDriverParams->pullMode                = DEFAULT_pullMode;
1618
1619    return p_QmPortal;
1620}
1621
1622t_Error QM_PORTAL_Init(t_Handle h_QmPortal)
1623{
1624    t_QmPortal                          *p_QmPortal = (t_QmPortal *)h_QmPortal;
1625    uint32_t                            i, flags=0, sdqcrFlags=0;
1626    t_Error                             err;
1627    t_QmInterModulePortalInitParams     qmParams;
1628
1629    SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1630    SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE);
1631
1632    memset(&qmParams, 0, sizeof(qmParams));
1633    qmParams.portalId       = (uint8_t)p_QmPortal->p_LowQmPortal->config.cpu;
1634    qmParams.liodn          = p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset;
1635    qmParams.dqrrLiodn      = p_QmPortal->p_QmPortalDriverParams->dqrrLiodn;
1636    qmParams.fdFqLiodn      = p_QmPortal->p_QmPortalDriverParams->fdFqLiodn;
1637    qmParams.stashDestQueue = p_QmPortal->p_QmPortalDriverParams->stashDestQueue;
1638    if ((err = QmGetSetPortalParams(p_QmPortal->h_Qm, &qmParams)) != E_OK)
1639        RETURN_ERROR(MAJOR, err, NO_MSG);
1640
1641    flags = (uint32_t)(((p_QmPortal->p_LowQmPortal->config.irq == NO_IRQ) ?
1642            0 :
1643            (QMAN_PORTAL_FLAG_IRQ |
1644             QMAN_PORTAL_FLAG_IRQ_FAST |
1645             QMAN_PORTAL_FLAG_IRQ_SLOW)));
1646    flags |= ((p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode) ? QMAN_PORTAL_FLAG_DCA : 0);
1647    flags |= (p_QmPortal->p_QmPortalDriverParams->dqrr)?QMAN_PORTAL_FLAG_RSTASH:0;
1648    flags |= (p_QmPortal->p_QmPortalDriverParams->fdFq)?QMAN_PORTAL_FLAG_DSTASH:0;
1649
1650    p_QmPortal->pullMode = p_QmPortal->p_QmPortalDriverParams->pullMode;
1651    if (!p_QmPortal->pullMode)
1652    {
1653        sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames) ? QM_SDQCR_COUNT_UPTO3 : QM_SDQCR_COUNT_EXACT1;
1654        sdqcrFlags |= QM_SDQCR_TOKEN_SET(p_QmPortal->p_QmPortalDriverParams->userToken);
1655        sdqcrFlags |= QM_SDQCR_TYPE_SET(p_QmPortal->p_QmPortalDriverParams->commandType);
1656        if (!p_QmPortal->p_QmPortalDriverParams->specifiedWq)
1657        {
1658            /* sdqcrFlags |= QM_SDQCR_SOURCE_CHANNELS;*/ /* removed as the macro is '0' */
1659            sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels) ? QM_SDQCR_DEDICATED_PRECEDENCE : 0;
1660            sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ? QM_SDQCR_CHANNELS_DEDICATED : 0;
1661            for (i=0;i<QM_MAX_NUM_OF_POOL_CHANNELS;i++)
1662                sdqcrFlags |= ((p_QmPortal->p_QmPortalDriverParams->poolChannels[i]) ?
1663                     QM_SDQCR_CHANNELS_POOL(i+1) : 0);
1664        }
1665        else
1666        {
1667            sdqcrFlags |= QM_SDQCR_SOURCE_SPECIFICWQ;
1668            sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ?
1669                            QM_SDQCR_SPECIFICWQ_DEDICATED : QM_SDQCR_SPECIFICWQ_POOL(p_QmPortal->p_QmPortalDriverParams->poolChannelId);
1670            sdqcrFlags |= QM_SDQCR_SPECIFICWQ_WQ(p_QmPortal->p_QmPortalDriverParams->wqId);
1671        }
1672    }
1673    if ((flags & QMAN_PORTAL_FLAG_RSTASH) && (flags & QMAN_PORTAL_FLAG_DCA))
1674        p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingDcaOptimized;
1675    else if ((flags & QMAN_PORTAL_FLAG_RSTASH) && !(flags & QMAN_PORTAL_FLAG_DCA))
1676        p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingOptimized;
1677    else
1678        p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRing;
1679
1680    if ((!p_QmPortal->f_RejectedFrame) || (!p_QmPortal->f_DfltFrame))
1681        RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_RejectedFrame or f_DfltFrame callback not provided"));
1682
1683    p_QmPortal->p_NullCB = (struct qman_fq_cb *)XX_Malloc(sizeof(struct qman_fq_cb));
1684    if (!p_QmPortal->p_NullCB)
1685        RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ Null CB obj!!!"));
1686    memset(p_QmPortal->p_NullCB, 0, sizeof(struct qman_fq_cb));
1687
1688    p_QmPortal->p_NullCB->dqrr      = p_QmPortal->f_DfltFrame;
1689    p_QmPortal->p_NullCB->ern       = p_QmPortal->f_RejectedFrame;
1690    p_QmPortal->p_NullCB->dc_ern    = p_QmPortal->p_NullCB->fqs = null_cb_mr;
1691
1692    if (qman_create_portal(p_QmPortal, flags, sdqcrFlags, p_QmPortal->p_QmPortalDriverParams->dqrrSize) != E_OK)
1693    {
1694        RETURN_ERROR(MAJOR, E_NO_MEMORY, ("create portal failed"));
1695    }
1696
1697    QmSetPortalHandle(p_QmPortal->h_Qm, (t_Handle)p_QmPortal, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu);
1698    XX_Free(p_QmPortal->p_QmPortalDriverParams);
1699    p_QmPortal->p_QmPortalDriverParams = NULL;
1700
1701    DBG(TRACE, ("Qman-Portal %d @ %p:%p",
1702                p_QmPortal->p_LowQmPortal->config.cpu,
1703                p_QmPortal->p_LowQmPortal->addr.addr_ce,
1704                p_QmPortal->p_LowQmPortal->addr.addr_ci
1705                ));
1706
1707    DBG(TRACE, ("Qman-Portal %d phys @ 0x%016llx:0x%016llx",
1708                p_QmPortal->p_LowQmPortal->config.cpu,
1709                (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ce),
1710                (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ci)
1711                ));
1712
1713    return E_OK;
1714}
1715
1716t_Error QM_PORTAL_Free(t_Handle h_QmPortal)
1717{
1718    t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1719
1720    if (!p_QmPortal)
1721       return ERROR_CODE(E_INVALID_HANDLE);
1722
1723    ASSERT_COND(p_QmPortal->p_LowQmPortal);
1724    QmSetPortalHandle(p_QmPortal->h_Qm, NULL, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu);
1725    qman_destroy_portal(p_QmPortal);
1726    if (p_QmPortal->p_NullCB)
1727        XX_Free(p_QmPortal->p_NullCB);
1728
1729    if (p_QmPortal->p_LowQmPortal->bind_lock)
1730        XX_FreeSpinlock(p_QmPortal->p_LowQmPortal->bind_lock);
1731    if(p_QmPortal->p_QmPortalDriverParams)
1732        XX_Free(p_QmPortal->p_QmPortalDriverParams);
1733    XX_Free(p_QmPortal->p_LowQmPortal);
1734    XX_Free(p_QmPortal);
1735
1736    return E_OK;
1737}
1738
1739t_Error QM_PORTAL_ConfigDcaMode(t_Handle h_QmPortal, bool enable)
1740{
1741    t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1742
1743    SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1744    SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE);
1745
1746    p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = enable;
1747
1748    return E_OK;
1749}
1750
1751t_Error QM_PORTAL_ConfigStash(t_Handle h_QmPortal, t_QmPortalStashParam *p_StashParams)
1752{
1753    t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1754
1755    SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1756    SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER);
1757    SANITY_CHECK_RETURN_ERROR(p_StashParams, E_NULL_POINTER);
1758
1759    p_QmPortal->p_QmPortalDriverParams->stashDestQueue  = p_StashParams->stashDestQueue;
1760    p_QmPortal->p_QmPortalDriverParams->dqrrLiodn       = p_StashParams->dqrrLiodn;
1761    p_QmPortal->p_QmPortalDriverParams->fdFqLiodn       = p_StashParams->fdFqLiodn;
1762    p_QmPortal->p_QmPortalDriverParams->eqcr            = p_StashParams->eqcr;
1763    p_QmPortal->p_QmPortalDriverParams->eqcrHighPri     = p_StashParams->eqcrHighPri;
1764    p_QmPortal->p_QmPortalDriverParams->dqrr            = p_StashParams->dqrr;
1765    p_QmPortal->p_QmPortalDriverParams->dqrrHighPri     = p_StashParams->dqrrHighPri;
1766    p_QmPortal->p_QmPortalDriverParams->fdFq            = p_StashParams->fdFq;
1767    p_QmPortal->p_QmPortalDriverParams->fdFqHighPri     = p_StashParams->fdFqHighPri;
1768    p_QmPortal->p_QmPortalDriverParams->fdFqDrop        = p_StashParams->fdFqDrop;
1769
1770    return E_OK;
1771}
1772
1773
1774t_Error QM_PORTAL_ConfigPullMode(t_Handle h_QmPortal, bool pullMode)
1775{
1776    t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1777
1778    SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1779    SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER);
1780
1781    p_QmPortal->p_QmPortalDriverParams->pullMode  = pullMode;
1782
1783    return E_OK;
1784}
1785
1786t_Error QM_PORTAL_AddPoolChannel(t_Handle h_QmPortal, uint8_t poolChannelId)
1787{
1788    t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1789    uint32_t    sdqcrFlags;
1790
1791    SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1792    SANITY_CHECK_RETURN_ERROR((poolChannelId < QM_MAX_NUM_OF_POOL_CHANNELS), E_INVALID_VALUE);
1793
1794    sdqcrFlags = qm_dqrr_sdqcr_get(p_QmPortal->p_LowQmPortal);
1795    sdqcrFlags |= QM_SDQCR_CHANNELS_POOL(poolChannelId+1);
1796    qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags);
1797
1798    return E_OK;
1799}
1800
1801t_Error QM_PORTAL_Poll(t_Handle h_QmPortal, e_QmPortalPollSource source)
1802{
1803    t_QmPortal  *p_QmPortal = (t_QmPortal *)h_QmPortal;
1804
1805    SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1806
1807    NCSW_PLOCK(p_QmPortal);
1808
1809    if ((source == e_QM_PORTAL_POLL_SOURCE_CONTROL_FRAMES) ||
1810        (source == e_QM_PORTAL_POLL_SOURCE_BOTH))
1811    {
1812        uint32_t is = qm_isr_status_read(p_QmPortal->p_LowQmPortal);
1813        uint32_t active = LoopMessageRing(p_QmPortal, is);
1814        if (active)
1815            qm_isr_status_clear(p_QmPortal->p_LowQmPortal, active);
1816    }
1817    if ((source == e_QM_PORTAL_POLL_SOURCE_DATA_FRAMES) ||
1818        (source == e_QM_PORTAL_POLL_SOURCE_BOTH))
1819        p_QmPortal->f_LoopDequeueRingCB((t_Handle)p_QmPortal);
1820
1821    PUNLOCK(p_QmPortal);
1822
1823    return E_OK;
1824}
1825
1826t_Error QM_PORTAL_PollFrame(t_Handle h_QmPortal, t_QmPortalFrameInfo *p_frameInfo)
1827{
1828    t_QmPortal              *p_QmPortal     = (t_QmPortal *)h_QmPortal;
1829    struct qm_dqrr_entry    *p_Dq;
1830    struct qman_fq          *p_Fq;
1831    int                     prefetch;
1832
1833    SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE);
1834    SANITY_CHECK_RETURN_ERROR(p_frameInfo, E_NULL_POINTER);
1835
1836    NCSW_PLOCK(p_QmPortal);
1837
1838    prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH);
1839    if (prefetch)
1840        qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal);
1841    qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal);
1842    p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal);
1843    if (!p_Dq)
1844    {
1845        PUNLOCK(p_QmPortal);
1846        return ERROR_CODE(E_EMPTY);
1847    }
1848    p_Fq = ptr_from_aligned_int(p_Dq->contextB);
1849    ASSERT_COND(p_Dq->fqid);
1850    if (p_Fq)
1851    {
1852        p_frameInfo->h_App = p_Fq->h_App;
1853        p_frameInfo->h_QmFqr = p_Fq->h_QmFqr;
1854        p_frameInfo->fqidOffset = p_Fq->fqidOffset;
1855        memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD));
1856    }
1857    else
1858    {
1859        p_frameInfo->h_App = p_QmPortal->h_App;
1860        p_frameInfo->h_QmFqr = NULL;
1861        p_frameInfo->fqidOffset = p_Dq->fqid;
1862        memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD));
1863    }
1864    if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) {
1865        qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal,
1866                                   p_Dq,
1867                                   false);
1868        qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1869    } else {
1870        qm_dqrr_next(p_QmPortal->p_LowQmPortal);
1871        qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1);
1872    }
1873
1874    PUNLOCK(p_QmPortal);
1875
1876    return E_OK;
1877}
1878
1879
1880t_Handle QM_FQR_Create(t_QmFqrParams *p_QmFqrParams)
1881{
1882    t_QmFqr             *p_QmFqr;
1883    uint32_t            i, flags = 0;
1884    u_QmFqdContextA     cnxtA;
1885
1886    SANITY_CHECK_RETURN_VALUE(p_QmFqrParams, E_INVALID_HANDLE, NULL);
1887    SANITY_CHECK_RETURN_VALUE(p_QmFqrParams->h_Qm, E_INVALID_HANDLE, NULL);
1888
1889    if (p_QmFqrParams->shadowMode &&
1890        (!p_QmFqrParams->useForce || p_QmFqrParams->numOfFqids != 1))
1891    {
1892        REPORT_ERROR(MAJOR, E_CONFLICT, ("shadowMode must be use with useForce and numOfFqids==1!!!"));
1893        return NULL;
1894    }
1895
1896    p_QmFqr = (t_QmFqr *)XX_MallocSmart(sizeof(t_QmFqr), 0, 64);
1897    if (!p_QmFqr)
1898    {
1899        REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQR obj!!!"));
1900        return NULL;
1901    }
1902    memset(p_QmFqr, 0, sizeof(t_QmFqr));
1903
1904    p_QmFqr->h_Qm       = p_QmFqrParams->h_Qm;
1905    p_QmFqr->h_QmPortal = p_QmFqrParams->h_QmPortal;
1906    p_QmFqr->shadowMode = p_QmFqrParams->shadowMode;
1907    p_QmFqr->numOfFqids = (p_QmFqrParams->useForce && !p_QmFqrParams->numOfFqids) ?
1908                              1 : p_QmFqrParams->numOfFqids;
1909
1910    if (!p_QmFqr->h_QmPortal)
1911    {
1912        p_QmFqr->h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
1913        SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_QmPortal, E_INVALID_HANDLE, NULL);
1914    }
1915
1916    p_QmFqr->p_Fqs = (struct qman_fq **)XX_Malloc(sizeof(struct qman_fq *) * p_QmFqr->numOfFqids);
1917    if (!p_QmFqr->p_Fqs)
1918    {
1919        REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQs obj!!!"));
1920        QM_FQR_Free(p_QmFqr);
1921        return NULL;
1922    }
1923    memset(p_QmFqr->p_Fqs, 0, sizeof(struct qman_fq *) * p_QmFqr->numOfFqids);
1924
1925    if (p_QmFqr->shadowMode)
1926    {
1927        struct qman_fq          *p_Fq = NULL;
1928
1929        p_QmFqr->fqidBase = p_QmFqrParams->qs.frcQ.fqid;
1930        p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64);
1931        if (!p_Fq)
1932        {
1933            REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!"));
1934            QM_FQR_Free(p_QmFqr);
1935            return NULL;
1936        }
1937        memset(p_Fq, 0, sizeof(struct qman_fq));
1938        p_Fq->cb.dqrr     = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_DfltFrame;
1939        p_Fq->cb.ern      = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_RejectedFrame;
1940        p_Fq->cb.dc_ern   = cb_ern_dcErn;
1941        p_Fq->cb.fqs      = cb_fqs;
1942        p_Fq->h_App       = ((t_QmPortal*)p_QmFqr->h_QmPortal)->h_App;
1943        p_Fq->h_QmFqr     = p_QmFqr;
1944        p_Fq->state       = qman_fq_state_sched;
1945        p_Fq->fqid        = p_QmFqr->fqidBase;
1946        p_QmFqr->p_Fqs[0] = p_Fq;
1947    }
1948    else
1949    {
1950        p_QmFqr->channel    = p_QmFqrParams->channel;
1951        p_QmFqr->workQueue  = p_QmFqrParams->wq;
1952
1953        p_QmFqr->fqidBase = QmFqidGet(p_QmFqr->h_Qm,
1954                                      p_QmFqr->numOfFqids,
1955                                      p_QmFqrParams->qs.nonFrcQs.align,
1956                                      p_QmFqrParams->useForce,
1957                                      p_QmFqrParams->qs.frcQ.fqid);
1958        if (p_QmFqr->fqidBase == (uint32_t)ILLEGAL_BASE)
1959        {
1960            REPORT_ERROR(CRITICAL,E_INVALID_STATE,("can't allocate a fqid"));
1961            QM_FQR_Free(p_QmFqr);
1962            return NULL;
1963        }
1964
1965        if(p_QmFqrParams->congestionAvoidanceEnable &&
1966            (p_QmFqrParams->congestionAvoidanceParams.h_QmCg == NULL) &&
1967            (p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold == 0))
1968        {
1969            REPORT_ERROR(CRITICAL,E_INVALID_STATE,("NULL congestion group handle and no FQ Threshold"));
1970            QM_FQR_Free(p_QmFqr);
1971            return NULL;
1972        }
1973        if(p_QmFqrParams->congestionAvoidanceEnable)
1974        {
1975            if(p_QmFqrParams->congestionAvoidanceParams.h_QmCg)
1976                flags |= QM_FQCTRL_CGE;
1977            if(p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold)
1978                flags |= QM_FQCTRL_TDE;
1979        }
1980
1981    /*
1982        flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_ORP : 0;
1983        flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_CPCSTASH : 0;
1984        flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_FORCESFDR : 0;
1985        flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_AVOIDBLOCK : 0;
1986    */
1987        flags |= (p_QmFqrParams->holdActive)    ? QM_FQCTRL_HOLDACTIVE : 0;
1988        flags |= (p_QmFqrParams->preferInCache) ? QM_FQCTRL_LOCKINCACHE : 0;
1989
1990        if (p_QmFqrParams->useContextAForStash)
1991        {
1992            if (CheckStashParams(p_QmFqrParams) != E_OK)
1993            {
1994                REPORT_ERROR(CRITICAL,E_INVALID_STATE,NO_MSG);
1995                QM_FQR_Free(p_QmFqr);
1996                return NULL;
1997            }
1998
1999            memset(&cnxtA, 0, sizeof(cnxtA));
2000            cnxtA.stashing.annotation_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameAnnotationSize, CACHELINE_SIZE);
2001            cnxtA.stashing.data_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameDataSize, CACHELINE_SIZE);
2002            cnxtA.stashing.context_cl = DIV_CEIL(p_QmFqrParams->stashingParams.fqContextSize, CACHELINE_SIZE);
2003            cnxtA.context_hi = (uint8_t)((p_QmFqrParams->stashingParams.fqContextAddr >> 32) & 0xff);
2004            cnxtA.context_lo = (uint32_t)(p_QmFqrParams->stashingParams.fqContextAddr);
2005            flags |= QM_FQCTRL_CTXASTASHING;
2006        }
2007
2008        for(i=0;i<p_QmFqr->numOfFqids;i++)
2009            if (qm_new_fq(p_QmFqr->h_QmPortal,
2010                          p_QmFqr->fqidBase+i,
2011                          i,
2012                          p_QmFqr->channel,
2013                          p_QmFqr->workQueue,
2014                          1/*p_QmFqr->numOfFqids*/,
2015                          flags,
2016                          (p_QmFqrParams->congestionAvoidanceEnable ?
2017                              &p_QmFqrParams->congestionAvoidanceParams : NULL),
2018                          p_QmFqrParams->useContextAForStash ?
2019                              (t_QmContextA *)&cnxtA : p_QmFqrParams->p_ContextA,
2020                          p_QmFqrParams->p_ContextB,
2021                          p_QmFqrParams->initParked,
2022                          p_QmFqr,
2023                          &p_QmFqr->p_Fqs[i]) != E_OK)
2024            {
2025                QM_FQR_Free(p_QmFqr);
2026                return NULL;
2027            }
2028    }
2029    return p_QmFqr;
2030}
2031
2032t_Error  QM_FQR_Free(t_Handle h_QmFqr)
2033{
2034    t_QmFqr     *p_QmFqr    = (t_QmFqr *)h_QmFqr;
2035    uint32_t    i;
2036
2037    if (!p_QmFqr)
2038        return ERROR_CODE(E_INVALID_HANDLE);
2039
2040    if (p_QmFqr->p_Fqs)
2041    {
2042        for (i=0;i<p_QmFqr->numOfFqids;i++)
2043            if (p_QmFqr->p_Fqs[i])
2044            {
2045                if (!p_QmFqr->shadowMode)
2046                    qm_free_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]);
2047                XX_FreeSmart(p_QmFqr->p_Fqs[i]);
2048            }
2049        XX_Free(p_QmFqr->p_Fqs);
2050    }
2051
2052    if (!p_QmFqr->shadowMode && p_QmFqr->fqidBase)
2053        QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
2054
2055    XX_FreeSmart(p_QmFqr);
2056
2057    return E_OK;
2058}
2059
2060t_Error  QM_FQR_FreeWDrain(t_Handle                     h_QmFqr,
2061                           t_QmFqrDrainedCompletionCB   *f_CompletionCB,
2062                           bool                         deliverFrame,
2063                           t_QmReceivedFrameCallback    *f_CallBack,
2064                           t_Handle                     h_App)
2065{
2066    t_QmFqr     *p_QmFqr    = (t_QmFqr *)h_QmFqr;
2067    uint32_t    i;
2068
2069    if (!p_QmFqr)
2070        return ERROR_CODE(E_INVALID_HANDLE);
2071
2072    if (p_QmFqr->shadowMode)
2073        RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("QM_FQR_FreeWDrain can't be called to shadow FQR!!!. call QM_FQR_Free"));
2074
2075    p_QmFqr->p_DrainedFqs = (bool *)XX_Malloc(sizeof(bool) * p_QmFqr->numOfFqids);
2076    if (!p_QmFqr->p_DrainedFqs)
2077        RETURN_ERROR(MAJOR, E_NO_MEMORY, ("QM Drained-FQs obj!!!. Try to Free without draining"));
2078    memset(p_QmFqr->p_DrainedFqs, 0, sizeof(bool) * p_QmFqr->numOfFqids);
2079
2080    if (f_CompletionCB)
2081    {
2082        p_QmFqr->f_CompletionCB = f_CompletionCB;
2083        p_QmFqr->h_App          = h_App;
2084    }
2085
2086    if (deliverFrame)
2087    {
2088        if (!f_CallBack)
2089        {
2090            REPORT_ERROR(MAJOR, E_NULL_POINTER, ("f_CallBack must be given."));
2091            XX_Free(p_QmFqr->p_DrainedFqs);
2092            return ERROR_CODE(E_NULL_POINTER);
2093        }
2094        QM_FQR_RegisterCB(p_QmFqr, f_CallBack, h_App);
2095    }
2096    else
2097        QM_FQR_RegisterCB(p_QmFqr, drainCB, h_App);
2098
2099    for (i=0;i<p_QmFqr->numOfFqids;i++)
2100    {
2101        if (qman_retire_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i], 0, true) != E_OK)
2102            RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!"));
2103
2104        if (p_QmFqr->p_Fqs[i]->flags & QMAN_FQ_STATE_CHANGING)
2105            DBG(INFO, ("fq %d currently in use, will be retired", p_QmFqr->p_Fqs[i]->fqid));
2106        else
2107            drainRetiredFq(p_QmFqr->p_Fqs[i]);
2108    }
2109
2110    if (!p_QmFqr->f_CompletionCB)
2111    {
2112        while(p_QmFqr->p_DrainedFqs) ;
2113        DBG(TRACE, ("QM-FQR with base %d completed", p_QmFqr->fqidBase));
2114        XX_FreeSmart(p_QmFqr->p_Fqs);
2115        if (p_QmFqr->fqidBase)
2116            QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase);
2117        XX_FreeSmart(p_QmFqr);
2118    }
2119
2120    return E_OK;
2121}
2122
2123t_Error QM_FQR_RegisterCB(t_Handle h_QmFqr, t_QmReceivedFrameCallback *f_CallBack, t_Handle h_App)
2124{
2125    t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2126    int         i;
2127
2128    SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2129
2130    for (i=0;i<p_QmFqr->numOfFqids;i++)
2131    {
2132        p_QmFqr->p_Fqs[i]->cb.dqrr = f_CallBack;
2133        p_QmFqr->p_Fqs[i]->h_App   = h_App;
2134    }
2135
2136    return E_OK;
2137}
2138
2139t_Error QM_FQR_Enqueue(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame)
2140{
2141    t_QmFqr                 *p_QmFqr = (t_QmFqr *)h_QmFqr;
2142    t_QmPortal              *p_QmPortal;
2143    struct qm_eqcr_entry    *p_Eq;
2144    uint32_t                *p_Dst, *p_Src;
2145    const struct qman_fq    *p_Fq;
2146
2147    SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2148    SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2149
2150    if (!h_QmPortal)
2151    {
2152        SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2153        h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2154        SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2155    }
2156    p_QmPortal = (t_QmPortal *)h_QmPortal;
2157
2158    p_Fq = p_QmFqr->p_Fqs[fqidOffset];
2159
2160#ifdef QM_CHECKING
2161    if (p_Fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE)
2162        RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG);
2163    if ((!(p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)) &&
2164        ((p_Fq->state == qman_fq_state_retired) ||
2165         (p_Fq->state == qman_fq_state_oos)))
2166        return ERROR_CODE(E_BUSY);
2167#endif /* QM_CHECKING */
2168
2169    NCSW_PLOCK(p_QmPortal);
2170    p_Eq = try_eq_start(p_QmPortal);
2171    if (!p_Eq)
2172    {
2173        PUNLOCK(p_QmPortal);
2174        return ERROR_CODE(E_BUSY);
2175    }
2176
2177    p_Eq->fqid = p_Fq->fqid;
2178    p_Eq->tag = aligned_int_from_ptr(p_Fq);
2179    /* gcc does a dreadful job of the following;
2180     *  eq->fd = *fd;
2181     * It causes the entire function to save/restore a wider range of
2182     * registers, and comes up with instruction-waste galore. This will do
2183     * until we can rework the function for better code-generation. */
2184    p_Dst = (uint32_t *)&p_Eq->fd;
2185    p_Src = (uint32_t *)p_Frame;
2186    p_Dst[0] = p_Src[0];
2187    p_Dst[1] = p_Src[1];
2188    p_Dst[2] = p_Src[2];
2189    p_Dst[3] = p_Src[3];
2190
2191    qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal,
2192                          (uint8_t)(QM_EQCR_VERB_CMD_ENQUEUE/* |
2193                          (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))*/));
2194    PUNLOCK(p_QmPortal);
2195
2196    return E_OK;
2197}
2198
2199
2200t_Error QM_FQR_PullFrame(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame)
2201{
2202    t_QmFqr                 *p_QmFqr = (t_QmFqr *)h_QmFqr;
2203    uint32_t                pdqcr = 0;
2204
2205    SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2206    SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2207    SANITY_CHECK_RETURN_ERROR(p_Frame, E_NULL_POINTER);
2208    SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_oos) ||
2209                              (p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_parked),
2210                              E_INVALID_STATE);
2211    if (!h_QmPortal)
2212    {
2213        SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2214        h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2215        SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2216    }
2217
2218    pdqcr |= QM_PDQCR_MODE_UNSCHEDULED;
2219    pdqcr |= QM_PDQCR_FQID(p_QmFqr->p_Fqs[fqidOffset]->fqid);
2220    return QmPortalPullFrame(h_QmPortal, pdqcr, p_Frame);
2221}
2222
2223t_Error QM_FQR_Resume(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset)
2224{
2225    t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2226
2227    SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2228    SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2229
2230    if (!h_QmPortal)
2231    {
2232        SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE);
2233        h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2234        SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE);
2235    }
2236    return qman_schedule_fq(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset]);
2237}
2238
2239t_Error  QM_FQR_Suspend(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset)
2240{
2241    t_QmFqr     *p_QmFqr = (t_QmFqr *)h_QmFqr;
2242
2243    SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE);
2244    SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE);
2245    SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->flags & QM_FQCTRL_HOLDACTIVE), E_INVALID_STATE);
2246
2247    UNUSED(h_QmPortal);
2248    p_QmFqr->p_Fqs[fqidOffset]->state = qman_fq_state_waiting_parked;
2249
2250    return E_OK;
2251}
2252
2253uint32_t QM_FQR_GetFqid(t_Handle h_QmFqr)
2254{
2255    t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2256
2257    SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0);
2258
2259    return p_QmFqr->fqidBase;
2260}
2261
2262uint32_t QM_FQR_GetCounter(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, e_QmFqrCounters counter)
2263{
2264    t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr;
2265    struct qm_mcr_queryfq_np    queryfq_np;
2266
2267    SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0);
2268    SANITY_CHECK_RETURN_VALUE((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE, 0);
2269
2270    if (!h_QmPortal)
2271    {
2272        SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_Qm, E_INVALID_HANDLE, 0);
2273        h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm);
2274        SANITY_CHECK_RETURN_VALUE(h_QmPortal, E_INVALID_HANDLE, 0);
2275    }
2276    if (qman_query_fq_np(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset], &queryfq_np) != E_OK)
2277        return 0;
2278    switch (counter)
2279    {
2280        case e_QM_FQR_COUNTERS_FRAME :
2281            return queryfq_np.frm_cnt;
2282        case e_QM_FQR_COUNTERS_BYTE :
2283            return queryfq_np.byte_cnt;
2284        default :
2285            break;
2286    }
2287    /* should never get here */
2288    ASSERT_COND(FALSE);
2289
2290    return 0;
2291}
2292
2293
2294t_Handle QM_CG_Create(t_QmCgParams *p_CgParams)
2295{
2296    t_QmCg                          *p_QmCg;
2297    t_QmPortal                      *p_QmPortal;
2298    t_Error                         err;
2299    uint32_t                        wredParams;
2300    uint32_t                        tmpA, tmpN, ta=0, tn=0;
2301    int                             gap, tmp;
2302    struct qm_mc_command            *p_Mcc;
2303    struct qm_mc_result             *p_Mcr;
2304
2305    SANITY_CHECK_RETURN_VALUE(p_CgParams, E_INVALID_HANDLE, NULL);
2306    SANITY_CHECK_RETURN_VALUE(p_CgParams->h_Qm, E_INVALID_HANDLE, NULL);
2307
2308    if(p_CgParams->notifyDcPortal &&
2309       ((p_CgParams->dcPortalId == e_DPAA_DCPORTAL2) || (p_CgParams->dcPortalId == e_DPAA_DCPORTAL3)))
2310    {
2311        REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("notifyDcPortal is invalid for this DC Portal"));
2312        return NULL;
2313    }
2314
2315    if (!p_CgParams->h_QmPortal)
2316    {
2317        p_QmPortal = QmGetPortalHandle(p_CgParams->h_Qm);
2318        SANITY_CHECK_RETURN_VALUE(p_QmPortal, E_INVALID_STATE, NULL);
2319    }
2320    else
2321        p_QmPortal = p_CgParams->h_QmPortal;
2322
2323    p_QmCg = (t_QmCg *)XX_Malloc(sizeof(t_QmCg));
2324    if (!p_QmCg)
2325    {
2326        REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM CG obj!!!"));
2327        return NULL;
2328    }
2329    memset(p_QmCg, 0, sizeof(t_QmCg));
2330
2331    /* build CG struct */
2332    p_QmCg->h_Qm        = p_CgParams->h_Qm;
2333    p_QmCg->h_QmPortal  = p_QmPortal;
2334    p_QmCg->h_App       = p_CgParams->h_App;
2335    err = QmGetCgId(p_CgParams->h_Qm, &p_QmCg->id);
2336    if (err)
2337    {
2338        XX_Free(p_QmCg);
2339        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmGetCgId failed"));
2340        return NULL;
2341    }
2342
2343    NCSW_PLOCK(p_QmPortal);
2344    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2345    p_Mcc->initcgr.cgid = p_QmCg->id;
2346
2347    err = QmPortalRegisterCg(p_QmPortal, p_QmCg, p_QmCg->id);
2348    if (err)
2349    {
2350        XX_Free(p_QmCg);
2351        PUNLOCK(p_QmPortal);
2352        REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalRegisterCg failed"));
2353        return NULL;
2354    }
2355
2356    /*  Build CGR command */
2357    {
2358#ifdef QM_CGS_NO_FRAME_MODE
2359    t_QmRevisionInfo    revInfo;
2360
2361    QmGetRevision(p_QmCg->h_Qm, &revInfo);
2362
2363    if (!((revInfo.majorRev == 1) && (revInfo.minorRev == 0)))
2364#endif /* QM_CGS_NO_FRAME_MODE */
2365        if (p_CgParams->frameCount)
2366        {
2367            p_Mcc->initcgr.we_mask |= QM_CGR_WE_MODE;
2368            p_Mcc->initcgr.cgr.frame_mode = QM_CGR_EN;
2369        }
2370    }
2371
2372    if (p_CgParams->wredEnable)
2373    {
2374        if (p_CgParams->wredParams.enableGreen)
2375        {
2376            err = CalcWredCurve(&p_CgParams->wredParams.greenCurve, &wredParams);
2377            if(err)
2378            {
2379                XX_Free(p_QmCg);
2380                PUNLOCK(p_QmPortal);
2381                REPORT_ERROR(MAJOR, err, NO_MSG);
2382                return NULL;
2383            }
2384            p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G;
2385            p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN;
2386            p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams;
2387        }
2388        if (p_CgParams->wredParams.enableYellow)
2389        {
2390            err = CalcWredCurve(&p_CgParams->wredParams.yellowCurve, &wredParams);
2391            if(err)
2392            {
2393                XX_Free(p_QmCg);
2394                PUNLOCK(p_QmPortal);
2395                REPORT_ERROR(MAJOR, err, NO_MSG);
2396                return NULL;
2397            }
2398            p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y;
2399            p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN;
2400            p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams;
2401        }
2402        if (p_CgParams->wredParams.enableRed)
2403        {
2404            err = CalcWredCurve(&p_CgParams->wredParams.redCurve, &wredParams);
2405            if(err)
2406            {
2407                XX_Free(p_QmCg);
2408                PUNLOCK(p_QmPortal);
2409                REPORT_ERROR(MAJOR, err, NO_MSG);
2410                return NULL;
2411            }
2412            p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R;
2413            p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN;
2414            p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams;
2415        }
2416    }
2417
2418    if (p_CgParams->tailDropEnable)
2419    {
2420        if (!p_CgParams->threshold)
2421        {
2422            XX_Free(p_QmCg);
2423            PUNLOCK(p_QmPortal);
2424            REPORT_ERROR(MINOR, E_INVALID_STATE, ("tailDropThreshold must be configured if tailDropEnable "));
2425            return NULL;
2426        }
2427        p_Mcc->initcgr.cgr.cstd_en = QM_CGR_EN;
2428        p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
2429    }
2430
2431    if (p_CgParams->threshold)
2432    {
2433        p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES;
2434        p_QmCg->f_Exception = p_CgParams->f_Exception;
2435        if (p_QmCg->f_Exception || p_CgParams->notifyDcPortal)
2436        {
2437            p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN;
2438            p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSCN_EN | QM_CGR_WE_CSCN_TARG;
2439            /* if SW - set target, if HW - if FM, set HW target, otherwize, set SW target */
2440            p_Mcc->initcgr.cgr.cscn_targ = 0;
2441            if (p_QmCg->f_Exception)
2442                p_Mcc->initcgr.cgr.cscn_targ = (uint32_t)QM_CGR_TARGET_SWP(QmPortalGetSwPortalId(p_QmCg->h_QmPortal));
2443            if (p_CgParams->notifyDcPortal)
2444                p_Mcc->initcgr.cgr.cscn_targ |= (uint32_t)QM_CGR_TARGET_DCP(p_CgParams->dcPortalId);
2445        }
2446
2447        /* express thresh as ta*2^tn */
2448        gap = (int)p_CgParams->threshold;
2449        for (tmpA=0 ; tmpA<256; tmpA++ )
2450            for (tmpN=0 ; tmpN<32; tmpN++ )
2451            {
2452                tmp = ABS((int)(p_CgParams->threshold - tmpA*(1<<tmpN)));
2453                if (tmp < gap)
2454                {
2455                   ta = tmpA;
2456                   tn = tmpN;
2457                   gap = tmp;
2458                }
2459            }
2460        p_Mcc->initcgr.cgr.cs_thres.TA = ta;
2461        p_Mcc->initcgr.cgr.cs_thres.Tn = tn;
2462    }
2463    else if(p_CgParams->f_Exception)
2464    {
2465        XX_Free(p_QmCg);
2466        PUNLOCK(p_QmPortal);
2467        REPORT_ERROR(MINOR, E_INVALID_STATE, ("No threshold configured, but f_Exception defined"));
2468        return NULL;
2469    }
2470
2471    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_INITCGR);
2472    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2473    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_INITCGR);
2474    if (p_Mcr->result != QM_MCR_RESULT_OK)
2475    {
2476        XX_Free(p_QmCg);
2477        PUNLOCK(p_QmPortal);
2478        REPORT_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2479        return NULL;
2480    }
2481    PUNLOCK(p_QmPortal);
2482
2483    return p_QmCg;
2484}
2485
2486t_Error QM_CG_Free(t_Handle h_QmCg)
2487{
2488
2489    t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2490    t_Error                 err;
2491    struct qm_mc_command    *p_Mcc;
2492    struct qm_mc_result     *p_Mcr;
2493    t_QmPortal              *p_QmPortal;
2494
2495    SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2496
2497    p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2498
2499    NCSW_PLOCK(p_QmPortal);
2500    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2501    p_Mcc->initcgr.cgid = p_QmCg->id;
2502    p_Mcc->initcgr.we_mask = QM_CGR_WE_MASK;
2503
2504    err = QmFreeCgId(p_QmCg->h_Qm, p_QmCg->id);
2505    if(err)
2506    {
2507        XX_Free(p_QmCg);
2508        PUNLOCK(p_QmPortal);
2509        RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmFreeCgId failed"));
2510    }
2511
2512    err = QmPortalUnregisterCg(p_QmCg->h_QmPortal, p_QmCg->id);
2513    if(err)
2514    {
2515        XX_Free(p_QmCg);
2516        PUNLOCK(p_QmPortal);
2517        RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalUnregisterCg failed"));
2518    }
2519
2520    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2521    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2522    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2523    if (p_Mcr->result != QM_MCR_RESULT_OK)
2524    {
2525        PUNLOCK(p_QmPortal);
2526        RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2527    }
2528    PUNLOCK(p_QmPortal);
2529
2530    XX_Free(p_QmCg);
2531
2532    return E_OK;
2533}
2534
2535t_Error QM_CG_SetException(t_Handle h_QmCg, e_QmExceptions exception, bool enable)
2536{
2537    t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2538    struct qm_mc_command    *p_Mcc;
2539    struct qm_mc_result     *p_Mcr;
2540    t_QmPortal              *p_QmPortal;
2541
2542    SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2543
2544    p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2545    if (!p_QmCg->f_Exception)
2546        RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Either threshold or exception callback was not configured."));
2547
2548    NCSW_PLOCK(p_QmPortal);
2549    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2550    p_Mcc->initcgr.cgid = p_QmCg->id;
2551    p_Mcc->initcgr.we_mask = QM_CGR_WE_CSCN_EN;
2552
2553    if(exception == e_QM_EX_CG_STATE_CHANGE)
2554    {
2555        if(enable)
2556            p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN;
2557    }
2558    else
2559    {
2560        PUNLOCK(p_QmPortal);
2561        RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal exception"));
2562    }
2563
2564    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2565    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2566    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2567    if (p_Mcr->result != QM_MCR_RESULT_OK)
2568    {
2569        PUNLOCK(p_QmPortal);
2570        RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2571    }
2572    PUNLOCK(p_QmPortal);
2573
2574    return E_OK;
2575}
2576
2577t_Error QM_CG_ModifyWredCurve(t_Handle h_QmCg, t_QmCgModifyWredParams *p_QmCgModifyParams)
2578{
2579    t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2580    uint32_t                wredParams;
2581    struct qm_mc_command    *p_Mcc;
2582    struct qm_mc_result     *p_Mcr;
2583    t_QmPortal              *p_QmPortal;
2584    t_Error                 err = E_OK;
2585
2586    SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2587
2588    p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2589
2590    NCSW_PLOCK(p_QmPortal);
2591    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2592    p_Mcc->initcgr.cgid = p_QmCg->id;
2593
2594    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR);
2595    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2596    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2597    if (p_Mcr->result != QM_MCR_RESULT_OK)
2598    {
2599        PUNLOCK(p_QmPortal);
2600        RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result)));
2601    }
2602
2603    switch(p_QmCgModifyParams->color)
2604    {
2605        case(e_QM_CG_COLOR_GREEN):
2606            if(!p_Mcr->querycgr.cgr.wr_en_g)
2607            {
2608                PUNLOCK(p_QmPortal);
2609                RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for green"));
2610            }
2611            break;
2612        case(e_QM_CG_COLOR_YELLOW):
2613            if(!p_Mcr->querycgr.cgr.wr_en_y)
2614            {
2615                PUNLOCK(p_QmPortal);
2616                RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for yellow"));
2617            }
2618            break;
2619        case(e_QM_CG_COLOR_RED):
2620            if(!p_Mcr->querycgr.cgr.wr_en_r)
2621            {
2622                PUNLOCK(p_QmPortal);
2623                RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for red"));
2624            }
2625            break;
2626    }
2627
2628    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2629    p_Mcc->initcgr.cgid = p_QmCg->id;
2630
2631    switch(p_QmCgModifyParams->color)
2632    {
2633        case(e_QM_CG_COLOR_GREEN):
2634            err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2635            p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G;
2636            p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN;
2637            p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams;
2638            break;
2639        case(e_QM_CG_COLOR_YELLOW):
2640            err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2641            p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y;
2642            p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN;
2643            p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams;
2644            break;
2645        case(e_QM_CG_COLOR_RED):
2646            err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams);
2647            p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R;
2648            p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN;
2649            p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams;
2650            break;
2651    }
2652    if (err)
2653    {
2654        PUNLOCK(p_QmPortal);
2655        RETURN_ERROR(MINOR, err, NO_MSG);
2656    }
2657
2658    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2659    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2660    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2661    if (p_Mcr->result != QM_MCR_RESULT_OK)
2662    {
2663        PUNLOCK(p_QmPortal);
2664        RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2665    }
2666    PUNLOCK(p_QmPortal);
2667
2668    return E_OK;
2669}
2670
2671t_Error QM_CG_ModifyTailDropThreshold(t_Handle h_QmCg, uint32_t threshold)
2672{
2673    t_QmCg                  *p_QmCg = (t_QmCg *)h_QmCg;
2674    struct qm_mc_command    *p_Mcc;
2675    struct qm_mc_result     *p_Mcr;
2676    t_QmPortal              *p_QmPortal;
2677    uint32_t                tmpA, tmpN, ta=0, tn=0;
2678    int                     gap, tmp;
2679
2680    SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE);
2681
2682    p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal;
2683
2684    NCSW_PLOCK(p_QmPortal);
2685    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2686    p_Mcc->initcgr.cgid = p_QmCg->id;
2687
2688    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR);
2689    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2690    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2691    if (p_Mcr->result != QM_MCR_RESULT_OK)
2692    {
2693        PUNLOCK(p_QmPortal);
2694        RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result)));
2695    }
2696
2697    if(!p_Mcr->querycgr.cgr.cstd_en)
2698    {
2699        PUNLOCK(p_QmPortal);
2700        RETURN_ERROR(MINOR, E_INVALID_STATE, ("Tail Drop is not enabled!"));
2701    }
2702
2703    p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal);
2704    p_Mcc->initcgr.cgid = p_QmCg->id;
2705    p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES;
2706
2707    /* express thresh as ta*2^tn */
2708    gap = (int)threshold;
2709    for (tmpA=0 ; tmpA<256; tmpA++ )
2710        for (tmpN=0 ; tmpN<32; tmpN++ )
2711        {
2712            tmp = ABS((int)(threshold - tmpA*(1<<tmpN)));
2713            if (tmp < gap)
2714            {
2715               ta = tmpA;
2716               tn = tmpN;
2717               gap = tmp;
2718            }
2719        }
2720    p_Mcc->initcgr.cgr.cs_thres.TA = ta;
2721    p_Mcc->initcgr.cgr.cs_thres.Tn = tn;
2722
2723    qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR);
2724    while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ;
2725    ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR);
2726    if (p_Mcr->result != QM_MCR_RESULT_OK)
2727    {
2728        PUNLOCK(p_QmPortal);
2729        RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result)));
2730    }
2731    PUNLOCK(p_QmPortal);
2732
2733    return E_OK;
2734}
2735
2736