1/***********************license start***************
2 * Copyright (c) 2003-2011  Cavium Inc. (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41/**
42 * @file
43 *
44 * Interface to the hardware Packet Order / Work unit.
45 *
46 * New, starting with SDK 1.7.0, cvmx-pow supports a number of
47 * extended consistency checks. The define
48 * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
49 * internal state checks to find common programming errors. If
50 * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
51 * enabled. For example, cvmx-pow will check for the following
52 * program errors or POW state inconsistency.
53 * - Requesting a POW operation with an active tag switch in
54 *   progress.
55 * - Waiting for a tag switch to complete for an excessively
56 *   long period. This is normally a sign of an error in locking
57 *   causing deadlock.
58 * - Illegal tag switches from NULL_NULL.
59 * - Illegal tag switches from NULL.
60 * - Illegal deschedule request.
61 * - WQE pointer not matching the one attached to the core by
62 *   the POW.
63 *
64 * <hr>$Revision: 70030 $<hr>
65 */
66
67#ifndef __CVMX_POW_H__
68#define __CVMX_POW_H__
69
70#include "cvmx-scratch.h"
71#include "cvmx-wqe.h"
72
73#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
74#include <asm/octeon/cvmx-sso-defs.h>
75#else
76#include "cvmx-warn.h"
77#endif
78
79#ifdef  __cplusplus
80extern "C" {
81#endif
82
83#if defined(__FreeBSD__) && defined(_KERNEL)
84    /*
85     * For the FreeBSD kernel, have POW consistency checks depend on
86     * the setting of INVARIANTS.
87     */
88    #ifndef CVMX_ENABLE_POW_CHECKS
89        #ifdef INVARIANTS
90            #define CVMX_ENABLE_POW_CHECKS 1
91        #else
92            #define CVMX_ENABLE_POW_CHECKS 0
93        #endif
94    #endif
95#else
96    /* Default to having all POW constancy checks turned on */
97    #ifndef CVMX_ENABLE_POW_CHECKS
98        #define CVMX_ENABLE_POW_CHECKS 1
99    #endif
100#endif
101
102/**
103 * Wait flag values for pow functions.
104 */
105typedef enum
106{
107    CVMX_POW_WAIT = 1,
108    CVMX_POW_NO_WAIT = 0,
109} cvmx_pow_wait_t;
110
111/**
112 *  POW tag operations.  These are used in the data stored to the POW.
113 */
114typedef enum
115{
116    CVMX_POW_TAG_OP_SWTAG = 0L,         /**< switch the tag (only) for this PP
117                                            - the previous tag should be non-NULL in this case
118                                            - tag switch response required
119                                            - fields used: op, type, tag */
120    CVMX_POW_TAG_OP_SWTAG_FULL = 1L,    /**< switch the tag for this PP, with full information
121                                            - this should be used when the previous tag is NULL
122                                            - tag switch response required
123                                            - fields used: address, op, grp, type, tag */
124    CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,   /**< switch the tag (and/or group) for this PP and de-schedule
125                                            - OK to keep the tag the same and only change the group
126                                            - fields used: op, no_sched, grp, type, tag */
127    CVMX_POW_TAG_OP_DESCH = 3L,         /**< just de-schedule
128                                            - fields used: op, no_sched */
129    CVMX_POW_TAG_OP_ADDWQ = 4L,         /**< create an entirely new work queue entry
130                                            - fields used: address, op, qos, grp, type, tag */
131    CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,/**< just update the work queue pointer and grp for this PP
132                                            - fields used: address, op, grp */
133    CVMX_POW_TAG_OP_SET_NSCHED = 6L,    /**< set the no_sched bit on the de-schedule list
134                                            - does nothing if the selected entry is not on the de-schedule list
135                                            - does nothing if the stored work queue pointer does not match the address field
136                                            - fields used: address, index, op
137                                            Before issuing a *_NSCHED operation, SW must guarantee that all
138                                            prior deschedules and set/clr NSCHED operations are complete and all
139                                            prior switches are complete. The hardware provides the opsdone bit
140                                            and swdone bit for SW polling. After issuing a *_NSCHED operation,
141                                            SW must guarantee that the set/clr NSCHED is complete before
142                                            any subsequent operations. */
143    CVMX_POW_TAG_OP_CLR_NSCHED = 7L,    /**< clears the no_sched bit on the de-schedule list
144                                            - does nothing if the selected entry is not on the de-schedule list
145                                            - does nothing if the stored work queue pointer does not match the address field
146                                            - fields used: address, index, op
147                                            Before issuing a *_NSCHED operation, SW must guarantee that all
148                                            prior deschedules and set/clr NSCHED operations are complete and all
149                                            prior switches are complete. The hardware provides the opsdone bit
150                                            and swdone bit for SW polling. After issuing a *_NSCHED operation,
151                                            SW must guarantee that the set/clr NSCHED is complete before
152                                            any subsequent operations. */
153    CVMX_POW_TAG_OP_NOP = 15L           /**< do nothing */
154} cvmx_pow_tag_op_t;
155
156/**
157 * This structure defines the store data on a store to POW
158 */
159typedef union
160{
161    uint64_t u64;
162    struct
163    {
164#ifdef __BIG_ENDIAN_BITFIELD
165        uint64_t              no_sched  : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
166        uint64_t                unused  : 2;
167        uint64_t                 index  :13; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
168        cvmx_pow_tag_op_t          op   : 4; /**< the operation to perform */
169        uint64_t                unused2 : 2;
170        uint64_t                   qos  : 3; /**< the QOS level for the packet. qos is only used for CVMX_POW_TAG_OP_ADDWQ */
171        uint64_t                   grp  : 4; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
172        cvmx_pow_tag_type_t        type : 3; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
173        uint64_t                   tag  :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
174#else
175        uint64_t                   tag  :32;
176        cvmx_pow_tag_type_t        type : 3;
177        uint64_t                   grp  : 4;
178        uint64_t                   qos  : 3;
179        uint64_t                unused2 : 2;
180        cvmx_pow_tag_op_t          op   : 4;
181        uint64_t                 index  :13;
182        uint64_t                unused  : 2;
183        uint64_t              no_sched  : 1;
184#endif
185    } s_cn38xx;
186    struct {
187#ifdef __BIG_ENDIAN_BITFIELD
188        uint64_t              no_sched  : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
189        cvmx_pow_tag_op_t          op   : 4; /**< the operation to perform */
190        uint64_t               unused1  : 4;
191        uint64_t                 index  :11; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
192        uint64_t               unused2  : 1;
193        uint64_t                   grp  : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
194        uint64_t               unused3  : 3;
195        cvmx_pow_tag_type_t        type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
196        uint64_t                   tag  :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
197#else
198        uint64_t                   tag  :32;
199        cvmx_pow_tag_type_t        type : 2;
200        uint64_t               unused3  : 3;
201        uint64_t                   grp  : 6;
202        uint64_t               unused2  : 1;
203        uint64_t                 index  :11;
204        uint64_t               unused1  : 4;
205        cvmx_pow_tag_op_t          op   : 4;
206        uint64_t              no_sched  : 1;
207#endif
208    } s_cn68xx_clr;
209    struct {
210#ifdef __BIG_ENDIAN_BITFIELD
211        uint64_t              no_sched  : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
212        cvmx_pow_tag_op_t          op   : 4; /**< the operation to perform */
213        uint64_t               unused1  : 12;
214        uint64_t                    qos : 3; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
215        uint64_t               unused2  : 1;
216        uint64_t                   grp  : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
217        uint64_t               unused3  : 3;
218        cvmx_pow_tag_type_t        type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
219        uint64_t                   tag  :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
220#else
221        uint64_t                   tag  :32;
222        cvmx_pow_tag_type_t        type : 2;
223        uint64_t               unused3  : 3;
224        uint64_t                   grp  : 6;
225        uint64_t               unused2  : 1;
226        uint64_t                   qos  : 3;
227        uint64_t               unused1  : 12;
228        cvmx_pow_tag_op_t          op   : 4;
229        uint64_t              no_sched  : 1;
230#endif
231    } s_cn68xx_add;
232    struct {
233#ifdef __BIG_ENDIAN_BITFIELD
234        uint64_t              no_sched  : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
235        cvmx_pow_tag_op_t          op   : 4; /**< the operation to perform */
236        uint64_t               unused1  : 16;
237        uint64_t                   grp  : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
238        uint64_t               unused3  : 3;
239        cvmx_pow_tag_type_t        type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
240        uint64_t                   tag  :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
241#else
242        uint64_t                   tag  :32;
243        cvmx_pow_tag_type_t        type : 2;
244        uint64_t               unused3  : 3;
245        uint64_t                   grp  : 6;
246        uint64_t               unused1  : 16;
247        cvmx_pow_tag_op_t          op   : 4;
248        uint64_t              no_sched  : 1;
249#endif
250    } s_cn68xx_other;
251
252} cvmx_pow_tag_req_t;
253
254typedef struct {
255    uint32_t tag;
256    uint16_t index;
257    uint8_t  grp;
258    uint8_t tag_type;
259}cvmx_pow_tag_info_t;
260
261/**
262 * This structure describes the address to load stuff from POW
263 */
264typedef union
265{
266    uint64_t u64;
267
268    /**
269     * Address for new work request loads (did<2:0> == 0)
270     */
271    struct
272    {
273#ifdef __BIG_ENDIAN_BITFIELD
274        uint64_t    mem_region      : 2;    /**< Mips64 address region. Should be CVMX_IO_SEG */
275        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
276        uint64_t    is_io           : 1;    /**< Must be one */
277        uint64_t    did             : 8;    /**< the ID of POW -- did<2:0> == 0 in this case */
278        uint64_t    reserved_4_39   : 36;   /**< Must be zero */
279        uint64_t    wait            : 1;    /**< If set, don't return load response until work is available */
280        uint64_t    reserved_0_2    : 3;    /**< Must be zero */
281#else
282        uint64_t    reserved_0_2    : 3;
283        uint64_t    wait            : 1;
284        uint64_t    reserved_4_39   : 36;
285        uint64_t    did             : 8;
286        uint64_t    is_io           : 1;
287        uint64_t    reserved_49_61  : 13;
288        uint64_t    mem_region      : 2;
289#endif
290    } swork;
291
292    /**
293     * Address for loads to get POW internal status
294     */
295    struct
296    {
297#ifdef __BIG_ENDIAN_BITFIELD
298        uint64_t    mem_region      : 2;    /**< Mips64 address region. Should be CVMX_IO_SEG */
299        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
300        uint64_t    is_io           : 1;    /**< Must be one */
301        uint64_t    did             : 8;    /**< the ID of POW -- did<2:0> == 1 in this case */
302        uint64_t    reserved_10_39  : 30;   /**< Must be zero */
303        uint64_t    coreid          : 4;    /**< The core id to get status for */
304        uint64_t    get_rev         : 1;    /**< If set and get_cur is set, return reverse tag-list pointer rather than forward tag-list pointer */
305        uint64_t    get_cur         : 1;    /**< If set, return current status rather than pending status */
306        uint64_t    get_wqp         : 1;    /**< If set, get the work-queue pointer rather than tag/type */
307        uint64_t    reserved_0_2    : 3;    /**< Must be zero */
308#else
309        uint64_t    reserved_0_2    : 3;
310        uint64_t    get_wqp         : 1;
311        uint64_t    get_cur         : 1;
312        uint64_t    get_rev         : 1;
313        uint64_t    coreid          : 4;
314        uint64_t    reserved_10_39  : 30;
315        uint64_t    did             : 8;
316        uint64_t    is_io           : 1;
317        uint64_t    reserved_49_61  : 13;
318        uint64_t    mem_region      : 2;
319#endif
320    } sstatus;
321
322    /**
323     * Address for loads to get 68XX SS0 internal status
324     */
325    struct
326    {
327#ifdef __BIG_ENDIAN_BITFIELD
328        uint64_t    mem_region      : 2;    /**< Mips64 address region. Should be CVMX_IO_SEG */
329        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
330        uint64_t    is_io           : 1;    /**< Must be one */
331        uint64_t    did             : 8;    /**< the ID of POW -- did<2:0> == 1 in this case */
332        uint64_t    reserved_14_39  : 26;   /**< Must be zero */
333        uint64_t    coreid          : 5;    /**< The core id to get status for */
334        uint64_t    reserved_6_8    : 3;
335        uint64_t    opcode          : 3;    /**< Status operation */
336        uint64_t    reserved_0_2    : 3;    /**< Must be zero */
337#else
338        uint64_t    reserved_0_2    : 3;
339        uint64_t    opcode          : 3;
340        uint64_t    reserved_6_8    : 3;
341        uint64_t    coreid          : 5;
342        uint64_t    reserved_14_39  : 26;
343        uint64_t    did             : 8;
344        uint64_t    is_io           : 1;
345        uint64_t    reserved_49_61  : 13;
346        uint64_t    mem_region      : 2;
347#endif
348    } sstatus_cn68xx;
349
350    /**
351     * Address for memory loads to get POW internal state
352     */
353    struct
354    {
355#ifdef __BIG_ENDIAN_BITFIELD
356        uint64_t    mem_region      : 2;    /**< Mips64 address region. Should be CVMX_IO_SEG */
357        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
358        uint64_t    is_io           : 1;    /**< Must be one */
359        uint64_t    did             : 8;    /**< the ID of POW -- did<2:0> == 2 in this case */
360        uint64_t    reserved_16_39  : 24;   /**< Must be zero */
361        uint64_t    index           : 11;   /**< POW memory index */
362        uint64_t    get_des         : 1;    /**< If set, return deschedule information rather than the standard
363                                                response for work-queue index (invalid if the work-queue entry is not on the
364                                                deschedule list). */
365        uint64_t    get_wqp         : 1;    /**< If set, get the work-queue pointer rather than tag/type (no effect when get_des set). */
366        uint64_t    reserved_0_2    : 3;    /**< Must be zero */
367#else
368        uint64_t    reserved_0_2    : 3;
369        uint64_t    get_wqp         : 1;
370        uint64_t    get_des         : 1;
371        uint64_t    index           : 11;
372        uint64_t    reserved_16_39  : 24;
373        uint64_t    did             : 8;
374        uint64_t    is_io           : 1;
375        uint64_t    reserved_49_61  : 13;
376        uint64_t    mem_region      : 2;
377#endif
378    } smemload;
379
380    /**
381     * Address for memory loads to get SSO internal state
382     */
383    struct
384    {
385#ifdef __BIG_ENDIAN_BITFIELD
386        uint64_t    mem_region      : 2;    /**< Mips64 address region. Should be CVMX_IO_SEG */
387        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
388        uint64_t    is_io           : 1;    /**< Must be one */
389        uint64_t    did             : 8;    /**< the ID of SSO - did<2:0> == 2 in this case */
390        uint64_t    reserved_20_39  : 20;   /**< Must be zero */
391        uint64_t    index           : 11;   /**< SSO memory index */
392        uint64_t    reserved_6_8    : 3;    /**< Must be zero */
393        uint64_t    opcode          : 3;    /**< Read TAG/WQ pointer/pending tag/next potr */
394        uint64_t    reserved_0_2    : 3;    /**< Must be zero */
395#else
396        uint64_t    reserved_0_2    : 3;
397        uint64_t    opcode          : 3;
398        uint64_t    reserved_3_5    : 3;
399        uint64_t    index           : 11;
400        uint64_t    reserved_20_39  : 20;
401        uint64_t    did             : 8;
402        uint64_t    is_io           : 1;
403        uint64_t    reserved_49_61  : 13;
404        uint64_t    mem_region      : 2;
405#endif
406    } smemload_cn68xx;
407
408    /**
409     * Address for index/pointer loads
410     */
411    struct
412    {
413#ifdef __BIG_ENDIAN_BITFIELD
414        uint64_t    mem_region      : 2;    /**< Mips64 address region. Should be CVMX_IO_SEG */
415        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
416        uint64_t    is_io           : 1;    /**< Must be one */
417        uint64_t    did             : 8;    /**< the ID of POW -- did<2:0> == 3 in this case */
418        uint64_t    reserved_9_39   : 31;   /**< Must be zero */
419        uint64_t    qosgrp          : 4;    /**< when {get_rmt ==0 AND get_des_get_tail == 0}, this field selects one of
420                                                eight POW internal-input queues (0-7), one per QOS level; values 8-15 are
421                                                illegal in this case;
422                                                when {get_rmt ==0 AND get_des_get_tail == 1}, this field selects one of
423                                                16 deschedule lists (per group);
424                                                when get_rmt ==1, this field selects one of 16 memory-input queue lists.
425                                                The two memory-input queue lists associated with each QOS level are:
426                                                - qosgrp = 0, qosgrp = 8:      QOS0
427                                                - qosgrp = 1, qosgrp = 9:      QOS1
428                                                - qosgrp = 2, qosgrp = 10:     QOS2
429                                                - qosgrp = 3, qosgrp = 11:     QOS3
430                                                - qosgrp = 4, qosgrp = 12:     QOS4
431                                                - qosgrp = 5, qosgrp = 13:     QOS5
432                                                - qosgrp = 6, qosgrp = 14:     QOS6
433                                                - qosgrp = 7, qosgrp = 15:     QOS7 */
434        uint64_t    get_des_get_tail: 1;    /**< If set and get_rmt is clear, return deschedule list indexes
435                                                rather than indexes for the specified qos level; if set and get_rmt is set, return
436                                                the tail pointer rather than the head pointer for the specified qos level. */
437        uint64_t    get_rmt         : 1;    /**< If set, return remote pointers rather than the local indexes for the specified qos level. */
438        uint64_t    reserved_0_2    : 3;    /**< Must be zero */
439#else
440        uint64_t    reserved_0_2    : 3;
441        uint64_t    get_rmt         : 1;
442        uint64_t    get_des_get_tail: 1;
443        uint64_t    qosgrp          : 4;
444        uint64_t    reserved_9_39   : 31;
445        uint64_t    did             : 8;
446        uint64_t    is_io           : 1;
447        uint64_t    reserved_49_61  : 13;
448        uint64_t    mem_region      : 2;
449#endif
450    } sindexload;
451
452    /**
453     * Address for a Index/Pointer loads to get SSO internal state
454     */
455    struct
456    {
457#ifdef __BIG_ENDIAN_BITFIELD
458        uint64_t    mem_region      : 2;    /**< Mips64 address region. Should be CVMX_IO_SEG */
459        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
460        uint64_t    is_io           : 1;    /**< Must be one */
461        uint64_t    did             : 8;    /**< the ID of SSO - did<2:0> == 2 in this case */
462        uint64_t    reserved_15_39  : 25;   /**< Must be zero */
463        uint64_t    qos_grp         : 6;    /**< When opcode = IPL_IQ, this field specifies IQ (or QOS).
464                                                 When opcode = IPL_DESCHED, this field specifies the group.
465                                                 This field is reserved for all other opcodes. */
466        uint64_t    reserved_6_8    : 3;    /**< Must be zero */
467        uint64_t    opcode          : 3;    /**< Read TAG/WQ pointer/pending tag/next potr */
468        uint64_t    reserved_0_2    : 3;    /**< Must be zero */
469#else
470        uint64_t    reserved_0_2    : 3;
471        uint64_t    opcode          : 3;
472        uint64_t    reserved_3_5    : 3;
473        uint64_t    qos_grp         : 6;
474        uint64_t    reserved_15_39  : 25;
475        uint64_t    did             : 8;
476        uint64_t    is_io           : 1;
477        uint64_t    reserved_49_61  : 13;
478        uint64_t    mem_region      : 2;
479#endif
480    } sindexload_cn68xx;
481
482    /**
483     * address for NULL_RD request (did<2:0> == 4)
484     * when this is read, HW attempts to change the state to NULL if it is NULL_NULL
485     * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available -
486     * software may need to recover by finishing another piece of work before a POW
487     * entry can ever become available.)
488     */
489    struct
490    {
491#ifdef __BIG_ENDIAN_BITFIELD
492        uint64_t    mem_region      : 2;    /**< Mips64 address region. Should be CVMX_IO_SEG */
493        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
494        uint64_t    is_io           : 1;    /**< Must be one */
495        uint64_t    did             : 8;    /**< the ID of POW -- did<2:0> == 4 in this case */
496        uint64_t    reserved_0_39   : 40;   /**< Must be zero */
497#else
498        uint64_t    reserved_0_39   : 40;
499        uint64_t    did             : 8;
500        uint64_t    is_io           : 1;
501        uint64_t    reserved_49_61  : 13;
502        uint64_t    mem_region      : 2;
503#endif
504    } snull_rd;
505} cvmx_pow_load_addr_t;
506
507/**
508 * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads)
509 */
510typedef union
511{
512    uint64_t u64;
513
514    /**
515     * Response to new work request loads
516     */
517    struct
518    {
519#ifdef __BIG_ENDIAN_BITFIELD
520        uint64_t    no_work          : 1;   /**< Set when no new work queue entry was returned.
521                                                If there was de-scheduled work, the HW will definitely
522                                                return it. When this bit is set, it could mean
523                                                either mean:
524                                                - There was no work, or
525                                                - There was no work that the HW could find. This
526                                                    case can happen, regardless of the wait bit value
527                                                    in the original request, when there is work
528                                                    in the IQ's that is too deep down the list. */
529        uint64_t    reserved_40_62   : 23;  /**< Must be zero */
530        uint64_t    addr             : 40;  /**< 36 in O1 -- the work queue pointer */
531#else
532        uint64_t    addr             : 40;
533        uint64_t    reserved_40_62   : 23;
534        uint64_t    no_work          : 1;
535#endif
536    } s_work;
537
538    /**
539     * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
540     */
541    struct
542    {
543#ifdef __BIG_ENDIAN_BITFIELD
544        uint64_t    reserved_62_63  : 2;
545        uint64_t    pend_switch     : 1;    /**< Set when there is a pending non-NULL SWTAG or
546                                                SWTAG_FULL, and the POW entry has not left the list for the original tag. */
547        uint64_t    pend_switch_full: 1;    /**< Set when SWTAG_FULL and pend_switch is set. */
548        uint64_t    pend_switch_null: 1;    /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
549        uint64_t    pend_desched    : 1;    /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
550        uint64_t    pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
551        uint64_t    pend_nosched    : 1;    /**< Set when nosched is desired and pend_desched is set. */
552        uint64_t    pend_new_work   : 1;    /**< Set when there is a pending GET_WORK. */
553        uint64_t    pend_new_work_wait: 1;  /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
554        uint64_t    pend_null_rd    : 1;    /**< Set when there is a pending NULL_RD. */
555        uint64_t    pend_nosched_clr: 1;    /**< Set when there is a pending CLR_NSCHED. */
556        uint64_t    reserved_51     : 1;
557        uint64_t    pend_index      : 11;   /**< This is the index when pend_nosched_clr is set. */
558        uint64_t    pend_grp        : 4;    /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
559        uint64_t    reserved_34_35  : 2;
560        uint64_t    pend_type       : 2;    /**< This is the tag type when pend_switch or (pend_desched AND pend_desched_switch) are set. */
561        uint64_t    pend_tag        : 32;   /**< - this is the tag when pend_switch or (pend_desched AND pend_desched_switch) are set. */
562#else
563        uint64_t    pend_tag        : 32;
564        uint64_t    pend_type       : 2;
565        uint64_t    reserved_34_35  : 2;
566        uint64_t    pend_grp        : 4;
567        uint64_t    pend_index      : 11;
568        uint64_t    reserved_51     : 1;
569        uint64_t    pend_nosched_clr: 1;
570        uint64_t    pend_null_rd    : 1;
571        uint64_t    pend_new_work_wait: 1;
572        uint64_t    pend_new_work   : 1;
573        uint64_t    pend_nosched    : 1;
574        uint64_t    pend_desched_switch: 1;
575        uint64_t    pend_desched    : 1;
576        uint64_t    pend_switch_null: 1;
577        uint64_t    pend_switch_full: 1;
578        uint64_t    pend_switch     : 1;
579        uint64_t    reserved_62_63  : 2;
580#endif
581    } s_sstatus0;
582
583    /**
584     * Result for a SSO Status Load (when opcode is SL_PENDTAG)
585     */
586    struct
587    {
588#ifdef __BIG_ENDIAN_BITFIELD
589        uint64_t    pend_switch     : 1;    /**< Set when there is a pending non-UNSCHEDULED SWTAG or
590                                                SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
591        uint64_t    pend_get_work   : 1;    /**< Set when there is a pending GET_WORK */
592        uint64_t    pend_get_work_wait: 1;  /**< when pend_get_work is set, this biit indicates that the
593                                                 wait bit was set. */
594        uint64_t    pend_nosched    : 1;    /**< Set when nosched is desired and pend_desched is set. */
595        uint64_t    pend_nosched_clr: 1;    /**< Set when there is a pending CLR_NSCHED. */
596        uint64_t    pend_desched    : 1;    /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
597        uint64_t    pend_alloc_we   : 1;    /**< Set when there is a pending ALLOC_WE. */
598        uint64_t    reserved_48_56  : 9;
599        uint64_t    pend_index      : 11;   /**< This is the index when pend_nosched_clr is set. */
600        uint64_t    reserved_34_36  : 3;
601        uint64_t    pend_type       : 2;    /**< This is the tag type when pend_switch is set. */
602        uint64_t    pend_tag        : 32;   /**< This is the tag when pend_switch is set. */
603#else
604        uint64_t    pend_tag        : 32;
605        uint64_t    pend_type       : 2;
606        uint64_t    reserved_34_36  : 3;
607        uint64_t    pend_index      : 11;
608        uint64_t    reserved_48_56  : 9;
609        uint64_t    pend_alloc_we   : 1;
610        uint64_t    pend_desched    : 1;
611        uint64_t    pend_nosched_clr: 1;
612        uint64_t    pend_nosched    : 1;
613        uint64_t    pend_get_work_wait: 1;
614        uint64_t    pend_get_work   : 1;
615        uint64_t    pend_switch     : 1;
616#endif
617    } s_sstatus0_cn68xx;
618
619    /**
620     * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
621     */
622    struct
623    {
624#ifdef __BIG_ENDIAN_BITFIELD
625        uint64_t    reserved_62_63  : 2;
626        uint64_t    pend_switch     : 1;    /**< Set when there is a pending non-NULL SWTAG or
627                                                SWTAG_FULL, and the POW entry has not left the list for the original tag. */
628        uint64_t    pend_switch_full: 1;    /**< Set when SWTAG_FULL and pend_switch is set. */
629        uint64_t    pend_switch_null: 1;    /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
630        uint64_t    pend_desched    : 1;    /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
631        uint64_t    pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
632        uint64_t    pend_nosched    : 1;    /**< Set when nosched is desired and pend_desched is set. */
633        uint64_t    pend_new_work   : 1;    /**< Set when there is a pending GET_WORK. */
634        uint64_t    pend_new_work_wait: 1;  /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
635        uint64_t    pend_null_rd    : 1;    /**< Set when there is a pending NULL_RD. */
636        uint64_t    pend_nosched_clr: 1;    /**< Set when there is a pending CLR_NSCHED. */
637        uint64_t    reserved_51     : 1;
638        uint64_t    pend_index      : 11;   /**< This is the index when pend_nosched_clr is set. */
639        uint64_t    pend_grp        : 4;    /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
640        uint64_t    pend_wqp        : 36;   /**< This is the wqp when pend_nosched_clr is set. */
641#else
642        uint64_t    pend_wqp        : 36;
643        uint64_t    pend_grp        : 4;
644        uint64_t    pend_index      : 11;
645        uint64_t    reserved_51     : 1;
646        uint64_t    pend_nosched_clr: 1;
647        uint64_t    pend_null_rd    : 1;
648        uint64_t    pend_new_work_wait: 1;
649        uint64_t    pend_new_work   : 1;
650        uint64_t    pend_nosched    : 1;
651        uint64_t    pend_desched_switch: 1;
652        uint64_t    pend_desched    : 1;
653        uint64_t    pend_switch_null: 1;
654        uint64_t    pend_switch_full: 1;
655        uint64_t    pend_switch     : 1;
656        uint64_t    reserved_62_63  : 2;
657#endif
658    } s_sstatus1;
659
660    /**
661     * Result for a SSO Status Load (when opcode is SL_PENDWQP)
662     */
663    struct
664    {
665#ifdef __BIG_ENDIAN_BITFIELD
666        uint64_t    pend_switch     : 1;    /**< Set when there is a pending non-UNSCHEDULED SWTAG or
667                                                SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
668        uint64_t    pend_get_work   : 1;    /**< Set when there is a pending GET_WORK */
669        uint64_t    pend_get_work_wait: 1;  /**< when pend_get_work is set, this biit indicates that the
670                                                 wait bit was set. */
671        uint64_t    pend_nosched    : 1;    /**< Set when nosched is desired and pend_desched is set. */
672        uint64_t    pend_nosched_clr: 1;    /**< Set when there is a pending CLR_NSCHED. */
673        uint64_t    pend_desched    : 1;    /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
674        uint64_t    pend_alloc_we   : 1;    /**< Set when there is a pending ALLOC_WE. */
675        uint64_t    reserved_51_56  : 6;
676        uint64_t    pend_index      : 11;   /**< This is the index when pend_nosched_clr is set. */
677        uint64_t    reserved_38_39  : 2;
678        uint64_t    pend_wqp        : 38;   /**< This is the wqp when pend_nosched_clr is set. */
679#else
680        uint64_t    pend_wqp        : 38;
681        uint64_t    reserved_38_39  : 2;
682        uint64_t    pend_index      : 11;
683        uint64_t    reserved_51_56  : 6;
684        uint64_t    pend_alloc_we   : 1;
685        uint64_t    pend_desched    : 1;
686        uint64_t    pend_nosched_clr: 1;
687        uint64_t    pend_nosched    : 1;
688        uint64_t    pend_get_work_wait: 1;
689        uint64_t    pend_get_work   : 1;
690        uint64_t    pend_switch     : 1;
691#endif
692    } s_sstatus1_cn68xx;
693
694    /**
695     * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0)
696     */
697    struct
698    {
699#ifdef __BIG_ENDIAN_BITFIELD
700        uint64_t    reserved_62_63  : 2;
701        uint64_t    link_index      : 11;    /**< Points to the next POW entry in the tag list when tail == 0 (and
702                                                tag_type is not NULL or NULL_NULL). */
703        uint64_t    index           : 11;   /**< The POW entry attached to the core. */
704        uint64_t    grp             : 4;    /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
705        uint64_t    head            : 1;    /**< Set when this POW entry is at the head of its tag list (also set when in
706                                                the NULL or NULL_NULL state). */
707        uint64_t    tail            : 1;    /**< Set when this POW entry is at the tail of its tag list (also set when in the
708                                                NULL or NULL_NULL state). */
709        uint64_t    tag_type        : 2;    /**< The tag type attached to the core (updated when new tag list
710                                                entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
711        uint64_t    tag             : 32;   /**< The tag attached to the core (updated when new tag list entered on
712                                                SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
713#else
714        uint64_t    tag             : 32;
715        uint64_t    tag_type        : 2;
716        uint64_t    tail            : 1;
717        uint64_t    head            : 1;
718        uint64_t    grp             : 4;
719        uint64_t    index           : 11;
720        uint64_t    link_index      : 11;
721        uint64_t    reserved_62_63  : 2;
722#endif
723    } s_sstatus2;
724
725    /**
726     * Result for a SSO Status Load (when opcode is SL_TAG)
727     */
728    struct
729    {
730#ifdef __BIG_ENDIAN_BITFIELD
731        uint64_t    reserved_57_63  : 7;
732        uint64_t    index           : 11;   /**< The SSO entry attached to the core. */
733        uint64_t    reserved_45     : 1;
734        uint64_t    grp             : 6;    /**< The group attached to the core (updated when new tag list entered on
735                                                 SWTAG_FULL). */
736        uint64_t    head            : 1;    /**< Set when this SSO entry is at the head of its tag list (also set when in the
737                                                 UNSCHEDULED or EMPTY state). */
738        uint64_t    tail            : 1;    /**< Set when this SSO entry is at the tail of its tag list (also set when in the
739                                                 UNSCHEDULED or EMPTY state). */
740        uint64_t    reserved_34_36  : 3;
741        uint64_t    tag_type        : 2;    /**< The tag type attached to the core (updated when new tag list entered
742                                                 on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
743        uint64_t    tag             : 32;   /**< The tag attached to the core (updated when new tag list entered on SWTAG,
744                                                 SWTAG_FULL, or SWTAG_DESCHED). */
745#else
746        uint64_t    tag             : 32;
747        uint64_t    tag_type        : 2;
748        uint64_t    reserved_34_36  : 3;
749        uint64_t    tail            : 1;
750        uint64_t    head            : 1;
751        uint64_t    grp             : 6;
752        uint64_t    reserved_45     : 1;
753        uint64_t    index           : 11;
754        uint64_t    reserved_57_63  : 7;
755#endif
756    } s_sstatus2_cn68xx;
757
758    /**
759     * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
760     */
761    struct
762    {
763#ifdef __BIG_ENDIAN_BITFIELD
764        uint64_t    reserved_62_63  : 2;
765        uint64_t    revlink_index   : 11;   /**< Points to the prior POW entry in the tag list when head == 0
766                                                (and tag_type is not NULL or NULL_NULL). This field is unpredictable
767                                                when the core's state is NULL or NULL_NULL. */
768        uint64_t    index           : 11;   /**< The POW entry attached to the core. */
769        uint64_t    grp             : 4;    /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
770        uint64_t    head            : 1;    /**< Set when this POW entry is at the head of its tag list (also set when in
771                                                the NULL or NULL_NULL state). */
772        uint64_t    tail            : 1;    /**< Set when this POW entry is at the tail of its tag list (also set when in the
773                                                NULL or NULL_NULL state). */
774        uint64_t    tag_type        : 2;    /**< The tag type attached to the core (updated when new tag list
775                                                entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
776        uint64_t    tag             : 32;   /**< The tag attached to the core (updated when new tag list entered on
777                                                SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
778#else
779        uint64_t    tag             : 32;
780        uint64_t    tag_type        : 2;
781        uint64_t    tail            : 1;
782        uint64_t    head            : 1;
783        uint64_t    grp             : 4;
784        uint64_t    index           : 11;
785        uint64_t    revlink_index   : 11;
786        uint64_t    reserved_62_63  : 2;
787#endif
788    } s_sstatus3;
789
790    /**
791     * Result for a SSO Status Load (when opcode is SL_WQP)
792     */
793    struct
794    {
795#ifdef __BIG_ENDIAN_BITFIELD
796        uint64_t    reserved_58_63  : 6;
797        uint64_t    index           : 11;   /**< The SSO entry attached to the core. */
798        uint64_t    reserved_46     : 1;
799        uint64_t    grp             : 6;    /**< The group attached to the core (updated when new tag list entered on
800                                                 SWTAG_FULL). */
801        uint64_t    reserved_38_39  : 2;
802        uint64_t    wqp             : 38;   /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
803#else
804        uint64_t    wqp             : 38;
805        uint64_t    reserved_38_39  : 2;
806        uint64_t    grp             : 6;
807        uint64_t    reserved_46     : 1;
808        uint64_t    index           : 11;
809        uint64_t    reserved_58_63  : 6;
810#endif
811    } s_sstatus3_cn68xx;
812
813    /**
814     * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0)
815     */
816    struct
817    {
818#ifdef __BIG_ENDIAN_BITFIELD
819        uint64_t    reserved_62_63  : 2;
820        uint64_t    link_index      : 11;    /**< Points to the next POW entry in the tag list when tail == 0 (and
821                                                tag_type is not NULL or NULL_NULL). */
822        uint64_t    index           : 11;   /**< The POW entry attached to the core. */
823        uint64_t    grp             : 4;    /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
824        uint64_t    wqp             : 36;   /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
825#else
826        uint64_t    wqp             : 36;
827        uint64_t    grp             : 4;
828        uint64_t    index           : 11;
829        uint64_t    link_index      : 11;
830        uint64_t    reserved_62_63  : 2;
831#endif
832    } s_sstatus4;
833
834    /**
835     * Result for a SSO Status Load (when opcode is SL_LINKS)
836     */
837    struct
838    {
839#ifdef __BIG_ENDIAN_BITFIELD
840        uint64_t    reserved_46_63  : 18;
841        uint64_t    index           : 11;   /**< The SSO entry attached to the core. */
842        uint64_t    reserved_34     : 1;
843        uint64_t    grp             : 6;    /**< The group attached to the core (updated when new tag list entered on
844                                                 SWTAG_FULL). */
845        uint64_t    head            : 1;    /**< Set when this SSO entry is at the head of its tag list (also set when in the
846                                                 UNSCHEDULED or EMPTY state). */
847        uint64_t    tail            : 1;    /**< Set when this SSO entry is at the tail of its tag list (also set when in the
848                                                 UNSCHEDULED or EMPTY state). */
849        uint64_t    reserved_24_25  : 2;
850        uint64_t    revlink_index   : 11;   /**< Points to the prior SSO entry in the tag list when head==0 (and tag_type is not UNSCHEDULED or EMPTY). */
851        uint64_t    reserved_11_12  : 2;
852        uint64_t    link_index      : 11;   /**< Points to the next SSO entry in the tag list when tail==0 (and tag_type is not UNSCHEDULDED or EMPTY). */
853#else
854        uint64_t    link_index      : 11;
855        uint64_t    reserved_11_12  : 2;
856        uint64_t    revlink_index   : 11;
857        uint64_t    reserved_24_25  : 2;
858        uint64_t    tail            : 1;
859        uint64_t    head            : 1;
860        uint64_t    grp             : 6;
861        uint64_t    reserved_34     : 1;
862        uint64_t    index           : 11;
863        uint64_t    reserved_46_63  : 18;
864#endif
865    } s_sstatus4_cn68xx;
866
867    /**
868     * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1)
869     */
870    struct
871    {
872#ifdef __BIG_ENDIAN_BITFIELD
873        uint64_t    reserved_62_63  : 2;
874        uint64_t    revlink_index   : 11;   /**< Points to the prior POW entry in the tag list when head == 0
875                                                (and tag_type is not NULL or NULL_NULL). This field is unpredictable
876                                                when the core's state is NULL or NULL_NULL. */
877        uint64_t    index           : 11;   /**< The POW entry attached to the core. */
878        uint64_t    grp             : 4;    /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
879        uint64_t    wqp             : 36;   /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
880#else
881        uint64_t    wqp             : 36;
882        uint64_t    grp             : 4;
883        uint64_t    index           : 11;
884        uint64_t    revlink_index   : 11;
885        uint64_t    reserved_62_63  : 2;
886#endif
887    } s_sstatus5;
888
889    /**
890     * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
891     */
892    struct
893    {
894#ifdef __BIG_ENDIAN_BITFIELD
895        uint64_t    reserved_51_63  : 13;
896        uint64_t    next_index      : 11;    /**< The next entry in the input, free, descheduled_head list
897                                                (unpredictable if entry is the tail of the list). */
898        uint64_t    grp             : 4;    /**< The group of the POW entry. */
899        uint64_t    reserved_35     : 1;
900        uint64_t    tail            : 1;    /**< Set when this POW entry is at the tail of its tag list (also set when in the
901                                                NULL or NULL_NULL state). */
902        uint64_t    tag_type        : 2;    /**< The tag type of the POW entry. */
903        uint64_t    tag             : 32;   /**< The tag of the POW entry. */
904#else
905        uint64_t    tag             : 32;
906        uint64_t    tag_type        : 2;
907        uint64_t    tail            : 1;
908        uint64_t    reserved_35     : 1;
909        uint64_t    grp             : 4;
910        uint64_t    next_index      : 11;
911        uint64_t    reserved_51_63  : 13;
912#endif
913    } s_smemload0;
914
915    /**
916     * Result For SSO Memory Load (opcode is ML_TAG)
917     */
918    struct
919    {
920#ifdef __BIG_ENDIAN_BITFIELD
921        uint64_t    reserved_38_63  : 26;
922        uint64_t    tail            : 1;    /**< Set when this SSO entry is at the tail of its tag list (also set when in the
923                                                NULL or NULL_NULL state). */
924        uint64_t    reserved_34_36  : 3;
925        uint64_t    tag_type        : 2;    /**< The tag type of the SSO entry. */
926        uint64_t    tag             : 32;   /**< The tag of the SSO entry. */
927#else
928        uint64_t    tag             : 32;
929        uint64_t    tag_type        : 2;
930        uint64_t    reserved_34_36  : 3;
931        uint64_t    tail            : 1;
932        uint64_t    reserved_38_63  : 26;
933#endif
934    } s_smemload0_cn68xx;
935
936    /**
937     * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
938     */
939    struct
940    {
941#ifdef __BIG_ENDIAN_BITFIELD
942        uint64_t    reserved_51_63  : 13;
943        uint64_t    next_index      : 11;    /**< The next entry in the input, free, descheduled_head list
944                                                (unpredictable if entry is the tail of the list). */
945        uint64_t    grp             : 4;    /**< The group of the POW entry. */
946        uint64_t    wqp             : 36;   /**< The WQP held in the POW entry. */
947#else
948        uint64_t    wqp             : 36;
949        uint64_t    grp             : 4;
950        uint64_t    next_index      : 11;
951        uint64_t    reserved_51_63  : 13;
952#endif
953    } s_smemload1;
954
955    /**
956     * Result For SSO Memory Load (opcode is ML_WQPGRP)
957     */
958    struct
959    {
960#ifdef __BIG_ENDIAN_BITFIELD
961        uint64_t    reserved_48_63  : 16;
962        uint64_t    nosched         : 1;    /**< The nosched bit for the SSO entry. */
963        uint64_t    reserved_46     : 1;
964        uint64_t    grp             : 6;    /**< The group of the SSO entry. */
965        uint64_t    reserved_38_39  : 2;
966        uint64_t    wqp             : 38;   /**< The WQP held in the SSO entry. */
967#else
968        uint64_t    wqp             : 38;
969        uint64_t    reserved_38_39  : 2;
970        uint64_t    grp             : 6;
971        uint64_t    reserved_46     : 1;
972        uint64_t    nosched         : 1;
973        uint64_t    reserved_51_63  : 16;
974#endif
975    } s_smemload1_cn68xx;
976
977    /**
978     * Result For POW Memory Load (get_des == 1)
979     */
980    struct
981    {
982#ifdef __BIG_ENDIAN_BITFIELD
983        uint64_t    reserved_51_63  : 13;
984        uint64_t    fwd_index       : 11;   /**< The next entry in the tag list connected to the descheduled head. */
985        uint64_t    grp             : 4;    /**< The group of the POW entry. */
986        uint64_t    nosched         : 1;    /**< The nosched bit for the POW entry. */
987        uint64_t    pend_switch     : 1;    /**< There is a pending tag switch */
988        uint64_t    pend_type       : 2;    /**< The next tag type for the new tag list when pend_switch is set. */
989        uint64_t    pend_tag        : 32;   /**< The next tag for the new tag list when pend_switch is set. */
990#else
991        uint64_t    pend_tag        : 32;
992        uint64_t    pend_type       : 2;
993        uint64_t    pend_switch     : 1;
994        uint64_t    nosched         : 1;
995        uint64_t    grp             : 4;
996        uint64_t    fwd_index       : 11;
997        uint64_t    reserved_51_63  : 13;
998#endif
999    } s_smemload2;
1000
1001    /**
1002     * Result For SSO Memory Load (opcode is ML_PENTAG)
1003     */
1004    struct
1005    {
1006#ifdef __BIG_ENDIAN_BITFIELD
1007        uint64_t    reserved_38_63  : 26;
1008        uint64_t    pend_switch     : 1;    /**< Set when there is a pending non-UNSCHEDULED SWTAG or
1009                                                 SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
1010        uint64_t    reserved_34_36  : 3;
1011        uint64_t    pend_type       : 2;    /**< The next tag type for the new tag list when pend_switch is set. */
1012        uint64_t    pend_tag        : 32;   /**< The next tag for the new tag list when pend_switch is set. */
1013#else
1014        uint64_t    pend_tag        : 32;
1015        uint64_t    pend_type       : 2;
1016        uint64_t    reserved_34_36  : 3;
1017        uint64_t    pend_switch     : 1;
1018        uint64_t    reserved_38_63  : 26;
1019#endif
1020    } s_smemload2_cn68xx;
1021
1022    /**
1023     * Result For SSO Memory Load (opcode is ML_LINKS)
1024     */
1025    struct
1026    {
1027#ifdef __BIG_ENDIAN_BITFIELD
1028        uint64_t    reserved_24_63  : 40;
1029        uint64_t    fwd_index       : 11;   /**< The next entry in the tag list connected to the descheduled head. */
1030        uint64_t    reserved_11_12  : 2;
1031        uint64_t    next_index      : 11;   /**< The next entry in the input, free, descheduled_head list
1032                                                 (unpredicatble if entry is the tail of the list). */
1033#else
1034        uint64_t    next_index      : 11;
1035        uint64_t    reserved_11_12  : 2;
1036        uint64_t    fwd_index       : 11;
1037        uint64_t    reserved_24_63  : 40;
1038#endif
1039    } s_smemload3_cn68xx;
1040
1041    /**
1042     * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
1043     */
1044    struct
1045    {
1046#ifdef __BIG_ENDIAN_BITFIELD
1047        uint64_t    reserved_52_63  : 12;
1048        uint64_t    free_val        : 1;    /**< - set when there is one or more POW entries on the free list. */
1049        uint64_t    free_one        : 1;    /**< - set when there is exactly one POW entry on the free list. */
1050        uint64_t    reserved_49     : 1;
1051        uint64_t    free_head       : 11;   /**< - when free_val is set, indicates the first entry on the free list. */
1052        uint64_t    reserved_37     : 1;
1053        uint64_t    free_tail       : 11;   /**< - when free_val is set, indicates the last entry on the free list. */
1054        uint64_t    loc_val         : 1;    /**< - set when there is one or more POW entries on the input Q list selected by qosgrp. */
1055        uint64_t    loc_one         : 1;    /**< - set when there is exactly one POW entry on the input Q list selected by qosgrp. */
1056        uint64_t    reserved_23     : 1;
1057        uint64_t    loc_head        : 11;   /**< - when loc_val is set, indicates the first entry on the input Q list selected by qosgrp. */
1058        uint64_t    reserved_11     : 1;
1059        uint64_t    loc_tail        : 11;   /**< - when loc_val is set, indicates the last entry on the input Q list selected by qosgrp. */
1060#else
1061        uint64_t    loc_tail        : 11;
1062        uint64_t    reserved_11     : 1;
1063        uint64_t    loc_head        : 11;
1064        uint64_t    reserved_23     : 1;
1065        uint64_t    loc_one         : 1;
1066        uint64_t    loc_val         : 1;
1067        uint64_t    free_tail       : 11;
1068        uint64_t    reserved_37     : 1;
1069        uint64_t    free_head       : 11;
1070        uint64_t    reserved_49     : 1;
1071        uint64_t    free_one        : 1;
1072        uint64_t    free_val        : 1;
1073        uint64_t    reserved_52_63  : 12;
1074#endif
1075    } sindexload0;
1076
1077    /**
1078     * Result for SSO Index/Pointer Load(opcode == IPL_IQ/IPL_DESCHED/IPL_NOSCHED)
1079     */
1080    struct
1081    {
1082#ifdef __BIG_ENDIAN_BITFIELD
1083        uint64_t    reserved_28_63  : 36;
1084        uint64_t    queue_val       : 1;    /**< - If set, one or more valid entries are in the queue. */
1085        uint64_t    queue_one       : 1;    /**< - If set, exactly one valid entry is in the queue. */
1086        uint64_t    reserved_24_25  : 2;
1087        uint64_t    queue_head      : 11;   /**< - Index of entry at the head of the queue. */
1088        uint64_t    reserved_11_12  : 2;
1089        uint64_t    queue_tail      : 11;   /**< - Index of entry at the tail of the queue. */
1090#else
1091        uint64_t    queue_tail      : 11;
1092        uint64_t    reserved_11_12  : 2;
1093        uint64_t    queue_head      : 11;
1094        uint64_t    reserved_24_25  : 2;
1095        uint64_t    queue_one       : 1;
1096        uint64_t    queue_val       : 1;
1097        uint64_t    reserved_28_63  : 36;
1098#endif
1099    } sindexload0_cn68xx;
1100
1101    /**
1102     * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
1103     */
1104    struct
1105    {
1106#ifdef __BIG_ENDIAN_BITFIELD
1107        uint64_t    reserved_52_63  : 12;
1108        uint64_t    nosched_val     : 1;    /**< - set when there is one or more POW entries on the nosched list. */
1109        uint64_t    nosched_one     : 1;    /**< - set when there is exactly one POW entry on the nosched list. */
1110        uint64_t    reserved_49     : 1;
1111        uint64_t    nosched_head    : 11;    /**< - when nosched_val is set, indicates the first entry on the nosched list. */
1112        uint64_t    reserved_37     : 1;
1113        uint64_t    nosched_tail    : 11;    /**< - when nosched_val is set, indicates the last entry on the nosched list. */
1114        uint64_t    des_val         : 1;    /**< - set when there is one or more descheduled heads on the descheduled list selected by qosgrp. */
1115        uint64_t    des_one         : 1;    /**< - set when there is exactly one descheduled head on the descheduled list selected by qosgrp. */
1116        uint64_t    reserved_23     : 1;
1117        uint64_t    des_head        : 11;    /**< - when des_val is set, indicates the first descheduled head on the descheduled list selected by qosgrp. */
1118        uint64_t    reserved_11     : 1;
1119        uint64_t    des_tail        : 11;    /**< - when des_val is set, indicates the last descheduled head on the descheduled list selected by qosgrp. */
1120#else
1121        uint64_t    des_tail        : 11;
1122        uint64_t    reserved_11     : 1;
1123        uint64_t    des_head        : 11;
1124        uint64_t    reserved_23     : 1;
1125        uint64_t    des_one         : 1;
1126        uint64_t    des_val         : 1;
1127        uint64_t    nosched_tail    : 11;
1128        uint64_t    reserved_37     : 1;
1129        uint64_t    nosched_head    : 11;
1130        uint64_t    reserved_49     : 1;
1131        uint64_t    nosched_one     : 1;
1132        uint64_t    nosched_val     : 1;
1133        uint64_t    reserved_52_63  : 12;
1134#endif
1135    } sindexload1;
1136
1137    /**
1138     * Result for SSO Index/Pointer Load(opcode == IPL_FREE0/IPL_FREE1/IPL_FREE2)
1139     */
1140    struct
1141    {
1142#ifdef __BIG_ENDIAN_BITFIELD
1143        uint64_t    reserved_60_63  : 4;
1144        uint64_t    qnum_head       : 2;    /**< - Subqueue with current head */
1145        uint64_t    qnum_tail       : 2;    /**< - Subqueue with current tail */
1146        uint64_t    reserved_28_55  : 28;
1147        uint64_t    queue_val       : 1;    /**< - If set, one or more valid entries are in the queue. */
1148        uint64_t    queue_one       : 1;    /**< - If set, exactly one valid entry is in the queue. */
1149        uint64_t    reserved_24_25  : 2;
1150        uint64_t    queue_head      : 11;   /**< - Index of entry at the head of the queue. */
1151        uint64_t    reserved_11_12  : 2;
1152        uint64_t    queue_tail      : 11;   /**< - Index of entry at the tail of the queue. */
1153#else
1154        uint64_t    queue_tail      : 11;
1155        uint64_t    reserved_11_12  : 2;
1156        uint64_t    queue_head      : 11;
1157        uint64_t    reserved_24_25  : 2;
1158        uint64_t    queue_one       : 1;
1159        uint64_t    queue_val       : 1;
1160        uint64_t    reserved_28_55  : 28;
1161        uint64_t    qnum_tail       : 2;
1162        uint64_t    qnum_head       : 2;
1163        uint64_t    reserved_60_63  : 4;
1164#endif
1165    } sindexload1_cn68xx;
1166
1167    /**
1168     * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
1169     */
1170    struct
1171    {
1172#ifdef __BIG_ENDIAN_BITFIELD
1173        uint64_t    reserved_39_63  : 25;
1174        uint64_t    rmt_is_head     : 1;    /**< Set when this DRAM list is the current head (i.e. is the next to
1175                                                be reloaded when the POW hardware reloads a POW entry from DRAM). The
1176                                                POW hardware alternates between the two DRAM lists associated with a QOS
1177                                                level when it reloads work from DRAM into the POW unit. */
1178        uint64_t    rmt_val         : 1;    /**< Set when the DRAM portion of the input Q list selected by qosgrp
1179                                                contains one or more pieces of work. */
1180        uint64_t    rmt_one         : 1;    /**< Set when the DRAM portion of the input Q list selected by qosgrp
1181                                                contains exactly one piece of work. */
1182        uint64_t    rmt_head        : 36;   /**< When rmt_val is set, indicates the first piece of work on the
1183                                                DRAM input Q list selected by qosgrp. */
1184#else
1185        uint64_t    rmt_head        : 36;
1186        uint64_t    rmt_one         : 1;
1187        uint64_t    rmt_val         : 1;
1188        uint64_t    rmt_is_head     : 1;
1189        uint64_t    reserved_39_63  : 25;
1190#endif
1191    } sindexload2;
1192
1193    /**
1194     * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1)
1195     */
1196    struct
1197    {
1198#ifdef __BIG_ENDIAN_BITFIELD
1199        uint64_t    reserved_39_63  : 25;
1200        uint64_t    rmt_is_head     : 1;    /**< - set when this DRAM list is the current head (i.e. is the next to
1201                                                be reloaded when the POW hardware reloads a POW entry from DRAM). The
1202                                                POW hardware alternates between the two DRAM lists associated with a QOS
1203                                                level when it reloads work from DRAM into the POW unit. */
1204        uint64_t    rmt_val         : 1;    /**< - set when the DRAM portion of the input Q list selected by qosgrp
1205                                                contains one or more pieces of work. */
1206        uint64_t    rmt_one         : 1;    /**< - set when the DRAM portion of the input Q list selected by qosgrp
1207                                                contains exactly one piece of work. */
1208        uint64_t    rmt_tail        : 36;   /**< - when rmt_val is set, indicates the last piece of work on the DRAM
1209                                                input Q list selected by qosgrp. */
1210#else
1211        uint64_t    rmt_tail        : 36;
1212        uint64_t    rmt_one         : 1;
1213        uint64_t    rmt_val         : 1;
1214        uint64_t    rmt_is_head     : 1;
1215        uint64_t    reserved_39_63  : 25;
1216#endif
1217    } sindexload3;
1218
1219    /**
1220     * Response to NULL_RD request loads
1221     */
1222    struct
1223    {
1224#ifdef __BIG_ENDIAN_BITFIELD
1225        uint64_t    unused  : 62;
1226        uint64_t    state    : 2;  /**< of type cvmx_pow_tag_type_t. state is one of the following:
1227                                        - CVMX_POW_TAG_TYPE_ORDERED
1228                                        - CVMX_POW_TAG_TYPE_ATOMIC
1229                                        - CVMX_POW_TAG_TYPE_NULL
1230                                        - CVMX_POW_TAG_TYPE_NULL_NULL */
1231#else
1232        uint64_t    state    : 2;
1233        uint64_t    unused  : 62;
1234#endif
1235    } s_null_rd;
1236
1237} cvmx_pow_tag_load_resp_t;
1238
1239typedef union {
1240    uint64_t u64;
1241    struct {
1242#ifdef __BIG_ENDIAN_BITFIELD
1243        uint64_t    reserved_57_63  : 7;
1244        uint64_t    index           : 11;
1245        uint64_t    reserved_45     : 1;
1246        uint64_t    grp             : 6;
1247        uint64_t    head            : 1;
1248        uint64_t    tail            : 1;
1249        uint64_t    reserved_34_36  : 3;
1250        uint64_t    tag_type        : 2;
1251        uint64_t    tag             : 32;
1252#else
1253        uint64_t    tag             : 32;
1254        uint64_t    tag_type        : 2;
1255        uint64_t    reserved_34_36  : 3;
1256        uint64_t    tail            : 1;
1257        uint64_t    head            : 1;
1258        uint64_t    grp             : 6;
1259        uint64_t    reserved_45     : 1;
1260        uint64_t    index           : 11;
1261        uint64_t    reserved_57_63  : 7;
1262#endif
1263    } s;
1264} cvmx_pow_sl_tag_resp_t;
1265
1266/**
1267 * This structure describes the address used for stores to the POW.
1268 *  The store address is meaningful on stores to the POW.  The hardware assumes that an aligned
1269 *  64-bit store was used for all these stores.
1270 *  Note the assumption that the work queue entry is aligned on an 8-byte
1271 *  boundary (since the low-order 3 address bits must be zero).
1272 *  Note that not all fields are used by all operations.
1273 *
1274 *  NOTE: The following is the behavior of the pending switch bit at the PP
1275 *       for POW stores (i.e. when did<7:3> == 0xc)
1276 *     - did<2:0> == 0      => pending switch bit is set
1277 *     - did<2:0> == 1      => no affect on the pending switch bit
1278 *     - did<2:0> == 3      => pending switch bit is cleared
1279 *     - did<2:0> == 7      => no affect on the pending switch bit
1280 *     - did<2:0> == others => must not be used
1281 *     - No other loads/stores have an affect on the pending switch bit
1282 *     - The switch bus from POW can clear the pending switch bit
1283 *
1284 *  NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command
1285 *  that only contains the pointer). SW must never use did<2:0> == 2.
1286 */
1287typedef union
1288{
1289    /**
1290     * Unsigned 64 bit integer representation of store address
1291     */
1292    uint64_t u64;
1293
1294    struct
1295    {
1296#ifdef __BIG_ENDIAN_BITFIELD
1297        uint64_t    mem_reg         : 2;    /**< Memory region.  Should be CVMX_IO_SEG in most cases */
1298        uint64_t    reserved_49_61  : 13;   /**< Must be zero */
1299        uint64_t    is_io           : 1;    /**< Must be one */
1300        uint64_t    did             : 8;    /**< Device ID of POW.  Note that different sub-dids are used. */
1301        uint64_t    reserved_36_39  : 4;    /**< Must be zero */
1302        uint64_t    addr            : 36;   /**< Address field. addr<2:0> must be zero */
1303#else
1304        uint64_t    addr            : 36;
1305        uint64_t    reserved_36_39  : 4;
1306        uint64_t    did             : 8;
1307        uint64_t    is_io           : 1;
1308        uint64_t    reserved_49_61  : 13;
1309        uint64_t    mem_reg         : 2;
1310#endif
1311    } stag;
1312} cvmx_pow_tag_store_addr_t;
1313
1314/**
1315 * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
1316 */
1317typedef union
1318{
1319    uint64_t u64;
1320
1321    struct
1322    {
1323#ifdef __BIG_ENDIAN_BITFIELD
1324        uint64_t    scraddr : 8;    /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
1325        uint64_t    len     : 8;    /**< the number of words in the response (0 => no response) */
1326        uint64_t    did     : 8;    /**< the ID of the device on the non-coherent bus */
1327        uint64_t    unused  :36;
1328        uint64_t    wait    : 1;    /**< if set, don't return load response until work is available */
1329        uint64_t    unused2 : 3;
1330#else
1331        uint64_t    unused2 : 3;
1332        uint64_t    wait    : 1;
1333        uint64_t    unused  :36;
1334        uint64_t    did     : 8;
1335        uint64_t    len     : 8;
1336        uint64_t    scraddr : 8;
1337#endif
1338    } s;
1339
1340} cvmx_pow_iobdma_store_t;
1341
1342
1343/* CSR typedefs have been moved to cvmx-pow-defs.h */
1344
1345/**
1346 * Get the POW tag for this core. This returns the current
1347 * tag type, tag, group, and POW entry index associated with
1348 * this core. Index is only valid if the tag type isn't NULL_NULL.
1349 * If a tag switch is pending this routine returns the tag before
1350 * the tag switch, not after.
1351 *
1352 * @return Current tag
1353 */
1354static inline cvmx_pow_tag_info_t cvmx_pow_get_current_tag(void)
1355{
1356    cvmx_pow_load_addr_t load_addr;
1357    cvmx_pow_tag_info_t result;
1358
1359    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1360        cvmx_pow_sl_tag_resp_t load_resp;
1361        load_addr.u64 = 0;
1362        load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
1363        load_addr.sstatus_cn68xx.is_io = 1;
1364        load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
1365        load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
1366        load_addr.sstatus_cn68xx.opcode = 3;
1367        load_resp.u64 = cvmx_read_csr(load_addr.u64);
1368        result.grp = load_resp.s.grp;
1369        result.index = load_resp.s.index;
1370        result.tag_type = load_resp.s.tag_type;
1371        result.tag = load_resp.s.tag;
1372    } else {
1373        cvmx_pow_tag_load_resp_t load_resp;
1374        load_addr.u64 = 0;
1375        load_addr.sstatus.mem_region = CVMX_IO_SEG;
1376        load_addr.sstatus.is_io = 1;
1377        load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1378        load_addr.sstatus.coreid = cvmx_get_core_num();
1379        load_addr.sstatus.get_cur = 1;
1380        load_resp.u64 = cvmx_read_csr(load_addr.u64);
1381        result.grp = load_resp.s_sstatus2.grp;
1382        result.index = load_resp.s_sstatus2.index;
1383        result.tag_type = load_resp.s_sstatus2.tag_type;
1384        result.tag = load_resp.s_sstatus2.tag;
1385    }
1386    return result;
1387}
1388
1389/**
1390 * Get the POW WQE for this core. This returns the work queue
1391 * entry currently associated with this core.
1392 *
1393 * @return WQE pointer
1394 */
1395static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
1396{
1397    cvmx_pow_load_addr_t load_addr;
1398    cvmx_pow_tag_load_resp_t load_resp;
1399
1400    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1401        load_addr.u64 = 0;
1402        load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
1403        load_addr.sstatus_cn68xx.is_io = 1;
1404        load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
1405        load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
1406        load_addr.sstatus_cn68xx.opcode = 3;
1407        load_resp.u64 = cvmx_read_csr(load_addr.u64);
1408        if (load_resp.s_sstatus3_cn68xx.wqp)
1409            return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus3_cn68xx.wqp);
1410        else
1411            return (cvmx_wqe_t*)0;
1412    } else {
1413        load_addr.u64 = 0;
1414        load_addr.sstatus.mem_region = CVMX_IO_SEG;
1415        load_addr.sstatus.is_io = 1;
1416        load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1417        load_addr.sstatus.coreid = cvmx_get_core_num();
1418        load_addr.sstatus.get_cur = 1;
1419        load_addr.sstatus.get_wqp = 1;
1420        load_resp.u64 = cvmx_read_csr(load_addr.u64);
1421        return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
1422    }
1423}
1424
1425
1426/**
1427 * @INTERNAL
1428 * Print a warning if a tag switch is pending for this core
1429 *
1430 * @param function Function name checking for a pending tag switch
1431 */
1432static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
1433{
1434    uint64_t switch_complete;
1435    CVMX_MF_CHORD(switch_complete);
1436    cvmx_warn_if(!switch_complete, "%s called with tag switch in progress\n", function);
1437}
1438
1439
1440/**
1441 * Waits for a tag switch to complete by polling the completion bit.
1442 * Note that switches to NULL complete immediately and do not need
1443 * to be waited for.
1444 */
1445static inline void cvmx_pow_tag_sw_wait(void)
1446{
1447    const uint64_t MAX_CYCLES = 1ull<<31;
1448    uint64_t switch_complete;
1449    uint64_t start_cycle = cvmx_get_cycle();
1450    while (1)
1451    {
1452        CVMX_MF_CHORD(switch_complete);
1453        if (cvmx_unlikely(switch_complete))
1454            break;
1455        if (cvmx_unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES))
1456        {
1457            cvmx_dprintf("WARNING: Tag switch is taking a long time, possible deadlock\n");
1458            start_cycle = -MAX_CYCLES-1;
1459        }
1460    }
1461}
1462
1463
1464/**
1465 * Synchronous work request.  Requests work from the POW.
1466 * This function does NOT wait for previous tag switches to complete,
1467 * so the caller must ensure that there is not a pending tag switch.
1468 *
1469 * @param wait   When set, call stalls until work becomes avaiable, or times out.
1470 *               If not set, returns immediately.
1471 *
1472 * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
1473 */
1474static inline cvmx_wqe_t * cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait)
1475{
1476    cvmx_pow_load_addr_t ptr;
1477    cvmx_pow_tag_load_resp_t result;
1478
1479    if (CVMX_ENABLE_POW_CHECKS)
1480        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1481
1482    ptr.u64 = 0;
1483    ptr.swork.mem_region = CVMX_IO_SEG;
1484    ptr.swork.is_io = 1;
1485    ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
1486    ptr.swork.wait = wait;
1487
1488    result.u64 = cvmx_read_csr(ptr.u64);
1489
1490    if (result.s_work.no_work)
1491        return NULL;
1492    else
1493        return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
1494}
1495
1496
1497/**
1498 * Synchronous work request.  Requests work from the POW.
1499 * This function waits for any previous tag switch to complete before
1500 * requesting the new work.
1501 *
1502 * @param wait   When set, call stalls until work becomes avaiable, or times out.
1503 *               If not set, returns immediately.
1504 *
1505 * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
1506 */
1507static inline cvmx_wqe_t * cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
1508{
1509    if (CVMX_ENABLE_POW_CHECKS)
1510        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1511
1512    /* Must not have a switch pending when requesting work */
1513    cvmx_pow_tag_sw_wait();
1514    return(cvmx_pow_work_request_sync_nocheck(wait));
1515
1516}
1517
1518
1519/**
1520 * Synchronous null_rd request.  Requests a switch out of NULL_NULL POW state.
1521 * This function waits for any previous tag switch to complete before
1522 * requesting the null_rd.
1523 *
1524 * @return Returns the POW state of type cvmx_pow_tag_type_t.
1525 */
1526static inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void)
1527{
1528    cvmx_pow_load_addr_t ptr;
1529    cvmx_pow_tag_load_resp_t result;
1530
1531    if (CVMX_ENABLE_POW_CHECKS)
1532        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1533
1534    /* Must not have a switch pending when requesting work */
1535    cvmx_pow_tag_sw_wait();
1536
1537    ptr.u64 = 0;
1538    ptr.snull_rd.mem_region = CVMX_IO_SEG;
1539    ptr.snull_rd.is_io = 1;
1540    ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
1541
1542    result.u64 = cvmx_read_csr(ptr.u64);
1543
1544    return (cvmx_pow_tag_type_t)result.s_null_rd.state;
1545}
1546
1547
1548/**
1549 * Asynchronous work request.  Work is requested from the POW unit, and should later
1550 * be checked with function cvmx_pow_work_response_async.
1551 * This function does NOT wait for previous tag switches to complete,
1552 * so the caller must ensure that there is not a pending tag switch.
1553 *
1554 * @param scr_addr Scratch memory address that response will be returned to,
1555 *                  which is either a valid WQE, or a response with the invalid bit set.
1556 *                  Byte address, must be 8 byte aligned.
1557 * @param wait      1 to cause response to wait for work to become available (or timeout)
1558 *                  0 to cause response to return immediately
1559 */
1560static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait)
1561{
1562    cvmx_pow_iobdma_store_t data;
1563
1564    if (CVMX_ENABLE_POW_CHECKS)
1565        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1566
1567    /* scr_addr must be 8 byte aligned */
1568    data.u64 = 0;
1569    data.s.scraddr = scr_addr >> 3;
1570    data.s.len = 1;
1571    data.s.did = CVMX_OCT_DID_TAG_SWTAG;
1572    data.s.wait = wait;
1573    cvmx_send_single(data.u64);
1574}
1575/**
1576 * Asynchronous work request.  Work is requested from the POW unit, and should later
1577 * be checked with function cvmx_pow_work_response_async.
1578 * This function waits for any previous tag switch to complete before
1579 * requesting the new work.
1580 *
1581 * @param scr_addr Scratch memory address that response will be returned to,
1582 *                  which is either a valid WQE, or a response with the invalid bit set.
1583 *                  Byte address, must be 8 byte aligned.
1584 * @param wait      1 to cause response to wait for work to become available (or timeout)
1585 *                  0 to cause response to return immediately
1586 */
1587static inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait)
1588{
1589    if (CVMX_ENABLE_POW_CHECKS)
1590        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1591
1592    /* Must not have a switch pending when requesting work */
1593    cvmx_pow_tag_sw_wait();
1594    cvmx_pow_work_request_async_nocheck(scr_addr, wait);
1595}
1596
1597
1598/**
1599 * Gets result of asynchronous work request.  Performs a IOBDMA sync
1600 * to wait for the response.
1601 *
1602 * @param scr_addr Scratch memory address to get result from
1603 *                  Byte address, must be 8 byte aligned.
1604 * @return Returns the WQE from the scratch register, or NULL if no work was available.
1605 */
1606static inline cvmx_wqe_t * cvmx_pow_work_response_async(int scr_addr)
1607{
1608    cvmx_pow_tag_load_resp_t result;
1609
1610    CVMX_SYNCIOBDMA;
1611    result.u64 = cvmx_scratch_read64(scr_addr);
1612
1613    if (result.s_work.no_work)
1614        return NULL;
1615    else
1616        return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
1617}
1618
1619
1620/**
1621 * Checks if a work queue entry pointer returned by a work
1622 * request is valid.  It may be invalid due to no work
1623 * being available or due to a timeout.
1624 *
1625 * @param wqe_ptr pointer to a work queue entry returned by the POW
1626 *
1627 * @return 0 if pointer is valid
1628 *         1 if invalid (no work was returned)
1629 */
1630static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
1631{
1632    return (wqe_ptr == NULL);
1633}
1634
1635
1636
1637/**
1638 * Starts a tag switch to the provided tag value and tag type.  Completion for
1639 * the tag switch must be checked for separately.
1640 * This function does NOT update the
1641 * work queue entry in dram to match tag value and type, so the application must
1642 * keep track of these if they are important to the application.
1643 * This tag switch command must not be used for switches to NULL, as the tag
1644 * switch pending bit will be set by the switch request, but never cleared by the
1645 * hardware.
1646 *
1647 * NOTE: This should not be used when switching from a NULL tag.  Use
1648 * cvmx_pow_tag_sw_full() instead.
1649 *
1650 * This function does no checks, so the caller must ensure that any previous tag
1651 * switch has completed.
1652 *
1653 * @param tag      new tag value
1654 * @param tag_type new tag type (ordered or atomic)
1655 */
1656static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type)
1657{
1658    cvmx_addr_t ptr;
1659    cvmx_pow_tag_req_t tag_req;
1660
1661    if (CVMX_ENABLE_POW_CHECKS)
1662    {
1663        cvmx_pow_tag_info_t current_tag;
1664        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1665        current_tag = cvmx_pow_get_current_tag();
1666        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1667        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag\n", __FUNCTION__);
1668        cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
1669        cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
1670    }
1671
1672    /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1673    ** once the WQE is in flight.  See hardware manual for complete details.
1674    ** It is the application's responsibility to keep track of the current tag
1675    ** value if that is important.
1676    */
1677
1678    tag_req.u64 = 0;
1679    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1680        tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
1681        tag_req.s_cn68xx_other.tag = tag;
1682        tag_req.s_cn68xx_other.type = tag_type;
1683    } else {
1684        tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
1685        tag_req.s_cn38xx.tag = tag;
1686        tag_req.s_cn38xx.type = tag_type;
1687    }
1688
1689    ptr.u64 = 0;
1690    ptr.sio.mem_region = CVMX_IO_SEG;
1691    ptr.sio.is_io = 1;
1692    ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1693
1694    /* once this store arrives at POW, it will attempt the switch
1695       software must wait for the switch to complete separately */
1696    cvmx_write_io(ptr.u64, tag_req.u64);
1697}
1698
1699
1700/**
1701 * Starts a tag switch to the provided tag value and tag type.  Completion for
1702 * the tag switch must be checked for separately.
1703 * This function does NOT update the
1704 * work queue entry in dram to match tag value and type, so the application must
1705 * keep track of these if they are important to the application.
1706 * This tag switch command must not be used for switches to NULL, as the tag
1707 * switch pending bit will be set by the switch request, but never cleared by the
1708 * hardware.
1709 *
1710 * NOTE: This should not be used when switching from a NULL tag.  Use
1711 * cvmx_pow_tag_sw_full() instead.
1712 *
1713 * This function waits for any previous tag switch to complete, and also
1714 * displays an error on tag switches to NULL.
1715 *
1716 * @param tag      new tag value
1717 * @param tag_type new tag type (ordered or atomic)
1718 */
1719static inline void cvmx_pow_tag_sw(uint32_t tag, cvmx_pow_tag_type_t tag_type)
1720{
1721    if (CVMX_ENABLE_POW_CHECKS)
1722        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1723
1724    /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1725    ** once the WQE is in flight.  See hardware manual for complete details.
1726    ** It is the application's responsibility to keep track of the current tag
1727    ** value if that is important.
1728    */
1729
1730    /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1731    ** if a previous switch is still pending.  */
1732    cvmx_pow_tag_sw_wait();
1733    cvmx_pow_tag_sw_nocheck(tag, tag_type);
1734}
1735
1736
1737/**
1738 * Starts a tag switch to the provided tag value and tag type.  Completion for
1739 * the tag switch must be checked for separately.
1740 * This function does NOT update the
1741 * work queue entry in dram to match tag value and type, so the application must
1742 * keep track of these if they are important to the application.
1743 * This tag switch command must not be used for switches to NULL, as the tag
1744 * switch pending bit will be set by the switch request, but never cleared by the
1745 * hardware.
1746 *
1747 * This function must be used for tag switches from NULL.
1748 *
1749 * This function does no checks, so the caller must ensure that any previous tag
1750 * switch has completed.
1751 *
1752 * @param wqp      pointer to work queue entry to submit.  This entry is updated to match the other parameters
1753 * @param tag      tag value to be assigned to work queue entry
1754 * @param tag_type type of tag
1755 * @param group      group value for the work queue entry.
1756 */
1757static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
1758{
1759    cvmx_addr_t ptr;
1760    cvmx_pow_tag_req_t tag_req;
1761
1762    if (CVMX_ENABLE_POW_CHECKS)
1763    {
1764        cvmx_pow_tag_info_t current_tag;
1765        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1766        current_tag = cvmx_pow_get_current_tag();
1767        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1768        cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
1769        cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
1770        if ((wqp != cvmx_phys_to_ptr(0x80)) && cvmx_pow_get_current_wqp())
1771            cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(), "%s passed WQE(%p) doesn't match the address in the POW(%p)\n", __FUNCTION__, wqp, cvmx_pow_get_current_wqp());
1772    }
1773
1774    /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1775    ** once the WQE is in flight.  See hardware manual for complete details.
1776    ** It is the application's responsibility to keep track of the current tag
1777    ** value if that is important.
1778    */
1779
1780    tag_req.u64 = 0;
1781    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1782        tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1783        tag_req.s_cn68xx_other.tag = tag;
1784        tag_req.s_cn68xx_other.type = tag_type;
1785        tag_req.s_cn68xx_other.grp = group;
1786    } else {
1787        tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1788        tag_req.s_cn38xx.tag = tag;
1789        tag_req.s_cn38xx.type = tag_type;
1790        tag_req.s_cn38xx.grp = group;
1791    }
1792
1793    ptr.u64 = 0;
1794    ptr.sio.mem_region = CVMX_IO_SEG;
1795    ptr.sio.is_io = 1;
1796    ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1797    ptr.sio.offset = CAST64(wqp);
1798
1799    /* once this store arrives at POW, it will attempt the switch
1800       software must wait for the switch to complete separately */
1801    cvmx_write_io(ptr.u64, tag_req.u64);
1802}
1803
1804
1805/**
1806 * Starts a tag switch to the provided tag value and tag type.  Completion for
1807 * the tag switch must be checked for separately.
1808 * This function does NOT update the
1809 * work queue entry in dram to match tag value and type, so the application must
1810 * keep track of these if they are important to the application.
1811 * This tag switch command must not be used for switches to NULL, as the tag
1812 * switch pending bit will be set by the switch request, but never cleared by the
1813 * hardware.
1814 *
1815 * This function must be used for tag switches from NULL.
1816 *
1817 * This function waits for any pending tag switches to complete
1818 * before requesting the tag switch.
1819 *
1820 * @param wqp      pointer to work queue entry to submit.  This entry is updated to match the other parameters
1821 * @param tag      tag value to be assigned to work queue entry
1822 * @param tag_type type of tag
1823 * @param group      group value for the work queue entry.
1824 */
1825static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
1826{
1827    if (CVMX_ENABLE_POW_CHECKS)
1828        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1829
1830    /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1831    ** if a previous switch is still pending.  */
1832    cvmx_pow_tag_sw_wait();
1833    cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
1834}
1835
1836
1837/**
1838 * Switch to a NULL tag, which ends any ordering or
1839 * synchronization provided by the POW for the current
1840 * work queue entry.  This operation completes immediately,
1841 * so completion should not be waited for.
1842 * This function does NOT wait for previous tag switches to complete,
1843 * so the caller must ensure that any previous tag switches have completed.
1844 */
1845static inline void cvmx_pow_tag_sw_null_nocheck(void)
1846{
1847    cvmx_addr_t ptr;
1848    cvmx_pow_tag_req_t tag_req;
1849
1850    if (CVMX_ENABLE_POW_CHECKS)
1851    {
1852        cvmx_pow_tag_info_t current_tag;
1853        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1854        current_tag = cvmx_pow_get_current_tag();
1855        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1856        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called when we already have a NULL tag\n", __FUNCTION__);
1857    }
1858
1859    tag_req.u64 = 0;
1860    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1861        tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
1862        tag_req.s_cn68xx_other.type = CVMX_POW_TAG_TYPE_NULL;
1863    } else {
1864        tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
1865        tag_req.s_cn38xx.type = CVMX_POW_TAG_TYPE_NULL;
1866    }
1867
1868
1869    ptr.u64 = 0;
1870    ptr.sio.mem_region = CVMX_IO_SEG;
1871    ptr.sio.is_io = 1;
1872    ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1873
1874
1875    cvmx_write_io(ptr.u64, tag_req.u64);
1876
1877    /* switch to NULL completes immediately */
1878}
1879
1880/**
1881 * Switch to a NULL tag, which ends any ordering or
1882 * synchronization provided by the POW for the current
1883 * work queue entry.  This operation completes immediatly,
1884 * so completion should not be waited for.
1885 * This function waits for any pending tag switches to complete
1886 * before requesting the switch to NULL.
1887 */
1888static inline void cvmx_pow_tag_sw_null(void)
1889{
1890    if (CVMX_ENABLE_POW_CHECKS)
1891        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1892
1893    /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1894    ** if a previous switch is still pending.  */
1895    cvmx_pow_tag_sw_wait();
1896    cvmx_pow_tag_sw_null_nocheck();
1897
1898    /* switch to NULL completes immediately */
1899}
1900
1901
1902
1903/**
1904 * Submits work to an input queue.  This function updates the work queue entry in DRAM to match
1905 * the arguments given.
1906 * Note that the tag provided is for the work queue entry submitted, and is unrelated to the tag that
1907 * the core currently holds.
1908 *
1909 * @param wqp      pointer to work queue entry to submit.  This entry is updated to match the other parameters
1910 * @param tag      tag value to be assigned to work queue entry
1911 * @param tag_type type of tag
1912 * @param qos      Input queue to add to.
1913 * @param grp      group value for the work queue entry.
1914 */
1915static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t qos, uint64_t grp)
1916{
1917    cvmx_addr_t ptr;
1918    cvmx_pow_tag_req_t tag_req;
1919
1920    tag_req.u64 = 0;
1921
1922    wqp->word1.s.tag = tag;
1923    wqp->word1.s.tag_type = tag_type;
1924
1925    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1926        /* Reset all reserved bits */
1927        wqp->word1.cn68xx.zero_0 = 0;
1928        wqp->word1.cn68xx.zero_1 = 0;
1929        wqp->word1.cn68xx.zero_2 = 0;
1930        wqp->word1.cn68xx.qos = qos;
1931        wqp->word1.cn68xx.grp = grp;
1932
1933        tag_req.s_cn68xx_add.op = CVMX_POW_TAG_OP_ADDWQ;
1934        tag_req.s_cn68xx_add.type = tag_type;
1935        tag_req.s_cn68xx_add.tag = tag;
1936        tag_req.s_cn68xx_add.qos = qos;
1937        tag_req.s_cn68xx_add.grp = grp;
1938    } else {
1939        /* Reset all reserved bits */
1940        wqp->word1.cn38xx.zero_2 = 0;
1941        wqp->word1.cn38xx.qos = qos;
1942        wqp->word1.cn38xx.grp = grp;
1943
1944        tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_ADDWQ;
1945        tag_req.s_cn38xx.type = tag_type;
1946        tag_req.s_cn38xx.tag = tag;
1947        tag_req.s_cn38xx.qos = qos;
1948        tag_req.s_cn38xx.grp = grp;
1949    }
1950
1951    ptr.u64 = 0;
1952    ptr.sio.mem_region = CVMX_IO_SEG;
1953    ptr.sio.is_io = 1;
1954    ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1955    ptr.sio.offset = cvmx_ptr_to_phys(wqp);
1956
1957    /* SYNC write to memory before the work submit.  This is necessary
1958    ** as POW may read values from DRAM at this time */
1959    CVMX_SYNCWS;
1960    cvmx_write_io(ptr.u64, tag_req.u64);
1961}
1962
1963
1964
1965/**
1966 * This function sets the group mask for a core.  The group mask
1967 * indicates which groups each core will accept work from. There are
1968 * 16 groups.
1969 *
1970 * @param core_num   core to apply mask to
1971 * @param mask   Group mask. There are 16 groups, so only bits 0-15 are valid,
1972 *               representing groups 0-15.
1973 *               Each 1 bit in the mask enables the core to accept work from
1974 *               the corresponding group.
1975 */
1976static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
1977{
1978
1979    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
1980    {
1981        cvmx_sso_ppx_grp_msk_t grp_msk;
1982        grp_msk.s.grp_msk = mask;
1983        cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(core_num), grp_msk.u64);
1984    }
1985    else
1986    {
1987        cvmx_pow_pp_grp_mskx_t grp_msk;
1988        grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1989        grp_msk.s.grp_msk = mask;
1990        cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1991    }
1992}
1993
1994/**
1995 * This function sets POW static priorities for a core. Each input queue has
1996 * an associated priority value.
1997 *
1998 * @param core_num   core to apply priorities to
1999 * @param priority   Vector of 8 priorities, one per POW Input Queue (0-7).
2000 *                   Highest priority is 0 and lowest is 7. A priority value
2001 *                   of 0xF instructs POW to skip the Input Queue when
2002 *                   scheduling to this specific core.
2003 *                   NOTE: priorities should not have gaps in values, meaning
2004 *                         {0,1,1,1,1,1,1,1} is a valid configuration while
2005 *                         {0,2,2,2,2,2,2,2} is not.
2006 */
2007static inline void cvmx_pow_set_priority(uint64_t core_num, const uint8_t priority[])
2008{
2009    if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
2010        return;
2011
2012    /* Detect gaps between priorities and flag error */
2013    {
2014        int i;
2015        uint32_t prio_mask = 0;
2016
2017        for(i=0; i<8; i++)
2018            if (priority[i] != 0xF)
2019                prio_mask |= 1<<priority[i];
2020
2021        if ( prio_mask ^ ((1<<cvmx_pop(prio_mask)) - 1))
2022        {
2023            cvmx_dprintf("ERROR: POW static priorities should be contiguous (0x%llx)\n", (unsigned long long)prio_mask);
2024            return;
2025        }
2026    }
2027
2028    /* POW priorities are supported on CN5xxx and later */
2029    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
2030    {
2031        cvmx_sso_ppx_qos_pri_t qos_pri;
2032
2033        qos_pri.u64 = cvmx_read_csr(CVMX_SSO_PPX_QOS_PRI(core_num));
2034        qos_pri.s.qos0_pri = priority[0];
2035        qos_pri.s.qos1_pri = priority[1];
2036        qos_pri.s.qos2_pri = priority[2];
2037        qos_pri.s.qos3_pri = priority[3];
2038        qos_pri.s.qos4_pri = priority[4];
2039        qos_pri.s.qos5_pri = priority[5];
2040        qos_pri.s.qos6_pri = priority[6];
2041        qos_pri.s.qos7_pri = priority[7];
2042        cvmx_write_csr(CVMX_SSO_PPX_QOS_PRI(core_num), qos_pri.u64);
2043    }
2044    else
2045    {
2046        cvmx_pow_pp_grp_mskx_t grp_msk;
2047
2048        grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
2049        grp_msk.s.qos0_pri = priority[0];
2050        grp_msk.s.qos1_pri = priority[1];
2051        grp_msk.s.qos2_pri = priority[2];
2052        grp_msk.s.qos3_pri = priority[3];
2053        grp_msk.s.qos4_pri = priority[4];
2054        grp_msk.s.qos5_pri = priority[5];
2055        grp_msk.s.qos6_pri = priority[6];
2056        grp_msk.s.qos7_pri = priority[7];
2057
2058        cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
2059    }
2060}
2061
2062/**
2063 * Performs a tag switch and then an immediate deschedule. This completes
2064 * immediately, so completion must not be waited for.  This function does NOT
2065 * update the wqe in DRAM to match arguments.
2066 *
2067 * This function does NOT wait for any prior tag switches to complete, so the
2068 * calling code must do this.
2069 *
2070 * Note the following CAVEAT of the Octeon HW behavior when
2071 * re-scheduling DE-SCHEDULEd items whose (next) state is
2072 * ORDERED:
2073 *   - If there are no switches pending at the time that the
2074 *     HW executes the de-schedule, the HW will only re-schedule
2075 *     the head of the FIFO associated with the given tag. This
2076 *     means that in many respects, the HW treats this ORDERED
2077 *     tag as an ATOMIC tag. Note that in the SWTAG_DESCH
2078 *     case (to an ORDERED tag), the HW will do the switch
2079 *     before the deschedule whenever it is possible to do
2080 *     the switch immediately, so it may often look like
2081 *     this case.
2082 *   - If there is a pending switch to ORDERED at the time
2083 *     the HW executes the de-schedule, the HW will perform
2084 *     the switch at the time it re-schedules, and will be
2085 *     able to reschedule any/all of the entries with the
2086 *     same tag.
2087 * Due to this behavior, the RECOMMENDATION to software is
2088 * that they have a (next) state of ATOMIC when they
2089 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
2090 * SW can choose to immediately switch to an ORDERED tag
2091 * after the work (that has an ATOMIC tag) is re-scheduled.
2092 * Note that since there are never any tag switches pending
2093 * when the HW re-schedules, this switch can be IMMEDIATE upon
2094 * the reception of the pointer during the re-schedule.
2095 *
2096 * @param tag      New tag value
2097 * @param tag_type New tag type
2098 * @param group    New group value
2099 * @param no_sched Control whether this work queue entry will be rescheduled.
2100 *                 - 1 : don't schedule this work
2101 *                 - 0 : allow this work to be scheduled.
2102 */
2103static inline void cvmx_pow_tag_sw_desched_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
2104{
2105    cvmx_addr_t ptr;
2106    cvmx_pow_tag_req_t tag_req;
2107
2108    if (CVMX_ENABLE_POW_CHECKS)
2109    {
2110        cvmx_pow_tag_info_t current_tag;
2111        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2112        current_tag = cvmx_pow_get_current_tag();
2113        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
2114        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not allowed from NULL state\n", __FUNCTION__);
2115        cvmx_warn_if((current_tag.tag_type != CVMX_POW_TAG_TYPE_ATOMIC) && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC), "%s called where neither the before or after tag is ATOMIC\n", __FUNCTION__);
2116    }
2117
2118    tag_req.u64 = 0;
2119    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
2120        tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
2121        tag_req.s_cn68xx_other.tag = tag;
2122        tag_req.s_cn68xx_other.type = tag_type;
2123        tag_req.s_cn68xx_other.grp = group;
2124        tag_req.s_cn68xx_other.no_sched = no_sched;
2125    } else {
2126        tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
2127        tag_req.s_cn38xx.tag = tag;
2128        tag_req.s_cn38xx.type = tag_type;
2129        tag_req.s_cn38xx.grp = group;
2130        tag_req.s_cn38xx.no_sched = no_sched;
2131    }
2132
2133    ptr.u64 = 0;
2134    ptr.sio.mem_region = CVMX_IO_SEG;
2135    ptr.sio.is_io = 1;
2136    ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
2137
2138    cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
2139}
2140/**
2141 * Performs a tag switch and then an immediate deschedule. This completes
2142 * immediately, so completion must not be waited for.  This function does NOT
2143 * update the wqe in DRAM to match arguments.
2144 *
2145 * This function waits for any prior tag switches to complete, so the
2146 * calling code may call this function with a pending tag switch.
2147 *
2148 * Note the following CAVEAT of the Octeon HW behavior when
2149 * re-scheduling DE-SCHEDULEd items whose (next) state is
2150 * ORDERED:
2151 *   - If there are no switches pending at the time that the
2152 *     HW executes the de-schedule, the HW will only re-schedule
2153 *     the head of the FIFO associated with the given tag. This
2154 *     means that in many respects, the HW treats this ORDERED
2155 *     tag as an ATOMIC tag. Note that in the SWTAG_DESCH
2156 *     case (to an ORDERED tag), the HW will do the switch
2157 *     before the deschedule whenever it is possible to do
2158 *     the switch immediately, so it may often look like
2159 *     this case.
2160 *   - If there is a pending switch to ORDERED at the time
2161 *     the HW executes the de-schedule, the HW will perform
2162 *     the switch at the time it re-schedules, and will be
2163 *     able to reschedule any/all of the entries with the
2164 *     same tag.
2165 * Due to this behavior, the RECOMMENDATION to software is
2166 * that they have a (next) state of ATOMIC when they
2167 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
2168 * SW can choose to immediately switch to an ORDERED tag
2169 * after the work (that has an ATOMIC tag) is re-scheduled.
2170 * Note that since there are never any tag switches pending
2171 * when the HW re-schedules, this switch can be IMMEDIATE upon
2172 * the reception of the pointer during the re-schedule.
2173 *
2174 * @param tag      New tag value
2175 * @param tag_type New tag type
2176 * @param group    New group value
2177 * @param no_sched Control whether this work queue entry will be rescheduled.
2178 *                 - 1 : don't schedule this work
2179 *                 - 0 : allow this work to be scheduled.
2180 */
2181static inline void cvmx_pow_tag_sw_desched(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
2182{
2183    if (CVMX_ENABLE_POW_CHECKS)
2184        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2185
2186    /* Need to make sure any writes to the work queue entry are complete */
2187    CVMX_SYNCWS;
2188    /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
2189    ** if a previous switch is still pending.  */
2190    cvmx_pow_tag_sw_wait();
2191    cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
2192}
2193
2194
2195
2196
2197
2198/**
2199 * Descchedules the current work queue entry.
2200 *
2201 * @param no_sched no schedule flag value to be set on the work queue entry.  If this is set
2202 *                 the entry will not be rescheduled.
2203 */
2204static inline void cvmx_pow_desched(uint64_t no_sched)
2205{
2206    cvmx_addr_t ptr;
2207    cvmx_pow_tag_req_t tag_req;
2208
2209    if (CVMX_ENABLE_POW_CHECKS)
2210    {
2211        cvmx_pow_tag_info_t current_tag;
2212        __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2213        current_tag = cvmx_pow_get_current_tag();
2214        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
2215        cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not expected from NULL state\n", __FUNCTION__);
2216    }
2217
2218    /* Need to make sure any writes to the work queue entry are complete */
2219    CVMX_SYNCWS;
2220
2221    tag_req.u64 = 0;
2222    if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
2223        tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_DESCH;
2224        tag_req.s_cn68xx_other.no_sched = no_sched;
2225    } else {
2226        tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_DESCH;
2227        tag_req.s_cn38xx.no_sched = no_sched;
2228    }
2229
2230    ptr.u64 = 0;
2231    ptr.sio.mem_region = CVMX_IO_SEG;
2232    ptr.sio.is_io = 1;
2233    ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
2234
2235    cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
2236}
2237
2238
2239
2240
2241
2242
2243
2244/***********************************************************************************************
2245** Define usage of bits within the 32 bit tag values.
2246***********************************************************************************************/
2247
2248/*
2249 * Number of bits of the tag used by software.  The SW bits
2250 * are always a contiguous block of the high starting at bit 31.
2251 * The hardware bits are always the low bits.  By default, the top 8 bits
2252 * of the tag are reserved for software, and the low 24 are set by the IPD unit.
2253 */
2254#define CVMX_TAG_SW_BITS    (8)
2255#define CVMX_TAG_SW_SHIFT   (32 - CVMX_TAG_SW_BITS)
2256
2257/* Below is the list of values for the top 8 bits of the tag. */
2258#define CVMX_TAG_SW_BITS_INTERNAL  0x1  /* Tag values with top byte of this value are reserved for internal executive uses */
2259/* The executive divides the remaining 24 bits as follows:
2260**  * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
2261**  * the lower 16 bits (bits 15 - 0 of the tag) define are the value with the subgroup
2262** Note that this section describes the format of tags generated by software - refer to the
2263** hardware documentation for a description of the tags values generated by the packet input
2264** hardware.
2265** Subgroups are defined here */
2266#define CVMX_TAG_SUBGROUP_MASK  0xFFFF /* Mask for the value portion of the tag */
2267#define CVMX_TAG_SUBGROUP_SHIFT 16
2268#define CVMX_TAG_SUBGROUP_PKO  0x1
2269
2270
2271/* End of executive tag subgroup definitions */
2272
2273/* The remaining values software bit values 0x2 - 0xff are available for application use */
2274
2275
2276
2277/**
2278 * This function creates a 32 bit tag value from the two values provided.
2279 *
2280 * @param sw_bits The upper bits (number depends on configuration) are set to this value.  The remainder of
2281 *                bits are set by the hw_bits parameter.
2282 * @param hw_bits The lower bits (number depends on configuration) are set to this value.  The remainder of
2283 *                bits are set by the sw_bits parameter.
2284 *
2285 * @return 32 bit value of the combined hw and sw bits.
2286 */
2287static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
2288{
2289    return((((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) | (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS))));
2290}
2291/**
2292 * Extracts the bits allocated for software use from the tag
2293 *
2294 * @param tag    32 bit tag value
2295 *
2296 * @return N bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
2297 */
2298static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
2299{
2300    return((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS));
2301}
2302/**
2303 *
2304 * Extracts the bits allocated for hardware use from the tag
2305 *
2306 * @param tag    32 bit tag value
2307 *
2308 * @return (32 - N) bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
2309 */
2310static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
2311{
2312    return(tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
2313}
2314
2315/**
2316 * Store the current POW internal state into the supplied
2317 * buffer. It is recommended that you pass a buffer of at least
2318 * 128KB. The format of the capture may change based on SDK
2319 * version and Octeon chip.
2320 *
2321 * @param buffer Buffer to store capture into
2322 * @param buffer_size
2323 *               The size of the supplied buffer
2324 *
2325 * @return Zero on sucess, negative on failure
2326 */
2327extern int cvmx_pow_capture(void *buffer, int buffer_size);
2328
2329/**
2330 * Dump a POW capture to the console in a human readable format.
2331 *
2332 * @param buffer POW capture from cvmx_pow_capture()
2333 * @param buffer_size
2334 *               Size of the buffer
2335 */
2336extern void cvmx_pow_display(void *buffer, int buffer_size);
2337
2338/**
2339 * Return the number of POW entries supported by this chip
2340 *
2341 * @return Number of POW entries
2342 */
2343extern int cvmx_pow_get_num_entries(void);
2344
2345
2346#ifdef  __cplusplus
2347}
2348#endif
2349
2350#endif  /* __CVMX_POW_H__ */
2351