1/***********************license start*************** 2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights 3 * reserved. 4 * 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following 15 * disclaimer in the documentation and/or other materials provided 16 * with the distribution. 17 18 * * Neither the name of Cavium Networks nor the names of 19 * its contributors may be used to endorse or promote products 20 * derived from this software without specific prior written 21 * permission. 22 23 * This Software, including technical data, may be subject to U.S. export control 24 * laws, including the U.S. Export Administration Act and its associated 25 * regulations, and may be subject to export or import regulations in other 26 * countries. 27 28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR 30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO 31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR 32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM 33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, 34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF 35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR 36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR 37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 38 ***********************license end**************************************/ 39 40 41/** 42 * @file 43 * 44 * Interface to the hardware Packet Order / Work unit. 45 * 46 * New, starting with SDK 1.7.0, cvmx-pow supports a number of 47 * extended consistency checks. The define 48 * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW 49 * internal state checks to find common programming errors. If 50 * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default 51 * enabled. For example, cvmx-pow will check for the following 52 * program errors or POW state inconsistency. 53 * - Requesting a POW operation with an active tag switch in 54 * progress. 55 * - Waiting for a tag switch to complete for an excessively 56 * long period. This is normally a sign of an error in locking 57 * causing deadlock. 58 * - Illegal tag switches from NULL_NULL. 59 * - Illegal tag switches from NULL. 60 * - Illegal deschedule request. 61 * - WQE pointer not matching the one attached to the core by 62 * the POW. 63 * 64 * <hr>$Revision: 49448 $<hr> 65 */ 66 67#ifndef __CVMX_POW_H__ 68#define __CVMX_POW_H__ 69 70#include "cvmx-scratch.h" 71#include "cvmx-wqe.h" 72 73#ifndef CVMX_BUILD_FOR_LINUX_KERNEL 74#include "cvmx-warn.h" 75#endif 76 77#ifdef __cplusplus 78extern "C" { 79#endif 80 81/* Default to having all POW constancy checks turned on */ 82#ifndef CVMX_ENABLE_POW_CHECKS 83#define CVMX_ENABLE_POW_CHECKS 1 84#endif 85 86/** 87 * Wait flag values for pow functions. 88 */ 89typedef enum 90{ 91 CVMX_POW_WAIT = 1, 92 CVMX_POW_NO_WAIT = 0, 93} cvmx_pow_wait_t; 94 95/** 96 * POW tag operations. These are used in the data stored to the POW. 97 */ 98typedef enum 99{ 100 CVMX_POW_TAG_OP_SWTAG = 0L, /**< switch the tag (only) for this PP 101 - the previous tag should be non-NULL in this case 102 - tag switch response required 103 - fields used: op, type, tag */ 104 CVMX_POW_TAG_OP_SWTAG_FULL = 1L, /**< switch the tag for this PP, with full information 105 - this should be used when the previous tag is NULL 106 - tag switch response required 107 - fields used: address, op, grp, type, tag */ 108 CVMX_POW_TAG_OP_SWTAG_DESCH = 2L, /**< switch the tag (and/or group) for this PP and de-schedule 109 - OK to keep the tag the same and only change the group 110 - fields used: op, no_sched, grp, type, tag */ 111 CVMX_POW_TAG_OP_DESCH = 3L, /**< just de-schedule 112 - fields used: op, no_sched */ 113 CVMX_POW_TAG_OP_ADDWQ = 4L, /**< create an entirely new work queue entry 114 - fields used: address, op, qos, grp, type, tag */ 115 CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,/**< just update the work queue pointer and grp for this PP 116 - fields used: address, op, grp */ 117 CVMX_POW_TAG_OP_SET_NSCHED = 6L, /**< set the no_sched bit on the de-schedule list 118 - does nothing if the selected entry is not on the de-schedule list 119 - does nothing if the stored work queue pointer does not match the address field 120 - fields used: address, index, op 121 Before issuing a *_NSCHED operation, SW must guarantee that all 122 prior deschedules and set/clr NSCHED operations are complete and all 123 prior switches are complete. The hardware provides the opsdone bit 124 and swdone bit for SW polling. After issuing a *_NSCHED operation, 125 SW must guarantee that the set/clr NSCHED is complete before 126 any subsequent operations. */ 127 CVMX_POW_TAG_OP_CLR_NSCHED = 7L, /**< clears the no_sched bit on the de-schedule list 128 - does nothing if the selected entry is not on the de-schedule list 129 - does nothing if the stored work queue pointer does not match the address field 130 - fields used: address, index, op 131 Before issuing a *_NSCHED operation, SW must guarantee that all 132 prior deschedules and set/clr NSCHED operations are complete and all 133 prior switches are complete. The hardware provides the opsdone bit 134 and swdone bit for SW polling. After issuing a *_NSCHED operation, 135 SW must guarantee that the set/clr NSCHED is complete before 136 any subsequent operations. */ 137 CVMX_POW_TAG_OP_NOP = 15L /**< do nothing */ 138} cvmx_pow_tag_op_t; 139 140/** 141 * This structure defines the store data on a store to POW 142 */ 143typedef union 144{ 145 uint64_t u64; 146 struct 147 { 148#if __BYTE_ORDER == __BIG_ENDIAN 149 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */ 150 uint64_t unused : 2; 151 uint64_t index :13; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */ 152 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */ 153 uint64_t unused2 : 2; 154 uint64_t qos : 3; /**< the QOS level for the packet. qos is only used for CVMX_POW_TAG_OP_ADDWQ */ 155 uint64_t grp : 4; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */ 156 cvmx_pow_tag_type_t type : 3; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */ 157 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */ 158#else 159 uint64_t tag :32; 160 cvmx_pow_tag_type_t type : 3; 161 uint64_t grp : 4; 162 uint64_t qos : 3; 163 uint64_t unused2 : 2; 164 cvmx_pow_tag_op_t op : 4; 165 uint64_t index :13; 166 uint64_t unused : 2; 167 uint64_t no_sched : 1; 168#endif 169 } s; 170} cvmx_pow_tag_req_t; 171 172/** 173 * This structure describes the address to load stuff from POW 174 */ 175typedef union 176{ 177 uint64_t u64; 178 179 /** 180 * Address for new work request loads (did<2:0> == 0) 181 */ 182 struct 183 { 184#if __BYTE_ORDER == __BIG_ENDIAN 185 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 186 uint64_t reserved_49_61 : 13; /**< Must be zero */ 187 uint64_t is_io : 1; /**< Must be one */ 188 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 0 in this case */ 189 uint64_t reserved_4_39 : 36; /**< Must be zero */ 190 uint64_t wait : 1; /**< If set, don't return load response until work is available */ 191 uint64_t reserved_0_2 : 3; /**< Must be zero */ 192#else 193 uint64_t reserved_0_2 : 3; 194 uint64_t wait : 1; 195 uint64_t reserved_4_39 : 36; 196 uint64_t did : 8; 197 uint64_t is_io : 1; 198 uint64_t reserved_49_61 : 13; 199 uint64_t mem_region : 2; 200#endif 201 } swork; 202 203 /** 204 * Address for loads to get POW internal status 205 */ 206 struct 207 { 208#if __BYTE_ORDER == __BIG_ENDIAN 209 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 210 uint64_t reserved_49_61 : 13; /**< Must be zero */ 211 uint64_t is_io : 1; /**< Must be one */ 212 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */ 213 uint64_t reserved_10_39 : 30; /**< Must be zero */ 214 uint64_t coreid : 4; /**< The core id to get status for */ 215 uint64_t get_rev : 1; /**< If set and get_cur is set, return reverse tag-list pointer rather than forward tag-list pointer */ 216 uint64_t get_cur : 1; /**< If set, return current status rather than pending status */ 217 uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type */ 218 uint64_t reserved_0_2 : 3; /**< Must be zero */ 219#else 220 uint64_t reserved_0_2 : 3; 221 uint64_t get_wqp : 1; 222 uint64_t get_cur : 1; 223 uint64_t get_rev : 1; 224 uint64_t coreid : 4; 225 uint64_t reserved_10_39 : 30; 226 uint64_t did : 8; 227 uint64_t is_io : 1; 228 uint64_t reserved_49_61 : 13; 229 uint64_t mem_region : 2; 230#endif 231 } sstatus; 232 233 /** 234 * Address for memory loads to get POW internal state 235 */ 236 struct 237 { 238#if __BYTE_ORDER == __BIG_ENDIAN 239 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 240 uint64_t reserved_49_61 : 13; /**< Must be zero */ 241 uint64_t is_io : 1; /**< Must be one */ 242 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 2 in this case */ 243 uint64_t reserved_16_39 : 24; /**< Must be zero */ 244 uint64_t index : 11; /**< POW memory index */ 245 uint64_t get_des : 1; /**< If set, return deschedule information rather than the standard 246 response for work-queue index (invalid if the work-queue entry is not on the 247 deschedule list). */ 248 uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type (no effect when get_des set). */ 249 uint64_t reserved_0_2 : 3; /**< Must be zero */ 250#else 251 uint64_t reserved_0_2 : 3; 252 uint64_t get_wqp : 1; 253 uint64_t get_des : 1; 254 uint64_t index : 11; 255 uint64_t reserved_16_39 : 24; 256 uint64_t did : 8; 257 uint64_t is_io : 1; 258 uint64_t reserved_49_61 : 13; 259 uint64_t mem_region : 2; 260#endif 261 } smemload; 262 263 /** 264 * Address for index/pointer loads 265 */ 266 struct 267 { 268#if __BYTE_ORDER == __BIG_ENDIAN 269 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 270 uint64_t reserved_49_61 : 13; /**< Must be zero */ 271 uint64_t is_io : 1; /**< Must be one */ 272 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 3 in this case */ 273 uint64_t reserved_9_39 : 31; /**< Must be zero */ 274 uint64_t qosgrp : 4; /**< when {get_rmt ==0 AND get_des_get_tail == 0}, this field selects one of 275 eight POW internal-input queues (0-7), one per QOS level; values 8-15 are 276 illegal in this case; 277 when {get_rmt ==0 AND get_des_get_tail == 1}, this field selects one of 278 16 deschedule lists (per group); 279 when get_rmt ==1, this field selects one of 16 memory-input queue lists. 280 The two memory-input queue lists associated with each QOS level are: 281 - qosgrp = 0, qosgrp = 8: QOS0 282 - qosgrp = 1, qosgrp = 9: QOS1 283 - qosgrp = 2, qosgrp = 10: QOS2 284 - qosgrp = 3, qosgrp = 11: QOS3 285 - qosgrp = 4, qosgrp = 12: QOS4 286 - qosgrp = 5, qosgrp = 13: QOS5 287 - qosgrp = 6, qosgrp = 14: QOS6 288 - qosgrp = 7, qosgrp = 15: QOS7 */ 289 uint64_t get_des_get_tail: 1; /**< If set and get_rmt is clear, return deschedule list indexes 290 rather than indexes for the specified qos level; if set and get_rmt is set, return 291 the tail pointer rather than the head pointer for the specified qos level. */ 292 uint64_t get_rmt : 1; /**< If set, return remote pointers rather than the local indexes for the specified qos level. */ 293 uint64_t reserved_0_2 : 3; /**< Must be zero */ 294#else 295 uint64_t reserved_0_2 : 3; 296 uint64_t get_rmt : 1; 297 uint64_t get_des_get_tail: 1; 298 uint64_t qosgrp : 4; 299 uint64_t reserved_9_39 : 31; 300 uint64_t did : 8; 301 uint64_t is_io : 1; 302 uint64_t reserved_49_61 : 13; 303 uint64_t mem_region : 2; 304#endif 305 } sindexload; 306 307 /** 308 * address for NULL_RD request (did<2:0> == 4) 309 * when this is read, HW attempts to change the state to NULL if it is NULL_NULL 310 * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available - 311 * software may need to recover by finishing another piece of work before a POW 312 * entry can ever become available.) 313 */ 314 struct 315 { 316#if __BYTE_ORDER == __BIG_ENDIAN 317 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */ 318 uint64_t reserved_49_61 : 13; /**< Must be zero */ 319 uint64_t is_io : 1; /**< Must be one */ 320 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 4 in this case */ 321 uint64_t reserved_0_39 : 40; /**< Must be zero */ 322#else 323 uint64_t reserved_0_39 : 40; 324 uint64_t did : 8; 325 uint64_t is_io : 1; 326 uint64_t reserved_49_61 : 13; 327 uint64_t mem_region : 2; 328#endif 329 } snull_rd; 330} cvmx_pow_load_addr_t; 331 332/** 333 * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads) 334 */ 335typedef union 336{ 337 uint64_t u64; 338 339 /** 340 * Response to new work request loads 341 */ 342 struct 343 { 344#if __BYTE_ORDER == __BIG_ENDIAN 345 uint64_t no_work : 1; /**< Set when no new work queue entry was returned. 346 If there was de-scheduled work, the HW will definitely 347 return it. When this bit is set, it could mean 348 either mean: 349 - There was no work, or 350 - There was no work that the HW could find. This 351 case can happen, regardless of the wait bit value 352 in the original request, when there is work 353 in the IQ's that is too deep down the list. */ 354 uint64_t reserved_40_62 : 23; /**< Must be zero */ 355 uint64_t addr : 40; /**< 36 in O1 -- the work queue pointer */ 356#else 357 uint64_t addr : 40; 358 uint64_t reserved_40_62 : 23; 359 uint64_t no_work : 1; 360#endif 361 } s_work; 362 363 /** 364 * Result for a POW Status Load (when get_cur==0 and get_wqp==0) 365 */ 366 struct 367 { 368#if __BYTE_ORDER == __BIG_ENDIAN 369 uint64_t reserved_62_63 : 2; 370 uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or 371 SWTAG_FULL, and the POW entry has not left the list for the original tag. */ 372 uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */ 373 uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */ 374 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */ 375 uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */ 376 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */ 377 uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */ 378 uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */ 379 uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */ 380 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */ 381 uint64_t reserved_51 : 1; 382 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */ 383 uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */ 384 uint64_t reserved_34_35 : 2; 385 uint64_t pend_type : 2; /**< This is the tag type when pend_switch or (pend_desched AND pend_desched_switch) are set. */ 386 uint64_t pend_tag : 32; /**< - this is the tag when pend_switch or (pend_desched AND pend_desched_switch) are set. */ 387#else 388 uint64_t pend_tag : 32; 389 uint64_t pend_type : 2; 390 uint64_t reserved_34_35 : 2; 391 uint64_t pend_grp : 4; 392 uint64_t pend_index : 11; 393 uint64_t reserved_51 : 1; 394 uint64_t pend_nosched_clr: 1; 395 uint64_t pend_null_rd : 1; 396 uint64_t pend_new_work_wait: 1; 397 uint64_t pend_new_work : 1; 398 uint64_t pend_nosched : 1; 399 uint64_t pend_desched_switch: 1; 400 uint64_t pend_desched : 1; 401 uint64_t pend_switch_null: 1; 402 uint64_t pend_switch_full: 1; 403 uint64_t pend_switch : 1; 404 uint64_t reserved_62_63 : 2; 405#endif 406 } s_sstatus0; 407 408 /** 409 * Result for a POW Status Load (when get_cur==0 and get_wqp==1) 410 */ 411 struct 412 { 413#if __BYTE_ORDER == __BIG_ENDIAN 414 uint64_t reserved_62_63 : 2; 415 uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or 416 SWTAG_FULL, and the POW entry has not left the list for the original tag. */ 417 uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */ 418 uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */ 419 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */ 420 uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */ 421 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */ 422 uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */ 423 uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */ 424 uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */ 425 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */ 426 uint64_t reserved_51 : 1; 427 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */ 428 uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */ 429 uint64_t pend_wqp : 36; /**< This is the wqp when pend_nosched_clr is set. */ 430#else 431 uint64_t pend_wqp : 36; 432 uint64_t pend_grp : 4; 433 uint64_t pend_index : 11; 434 uint64_t reserved_51 : 1; 435 uint64_t pend_nosched_clr: 1; 436 uint64_t pend_null_rd : 1; 437 uint64_t pend_new_work_wait: 1; 438 uint64_t pend_new_work : 1; 439 uint64_t pend_nosched : 1; 440 uint64_t pend_desched_switch: 1; 441 uint64_t pend_desched : 1; 442 uint64_t pend_switch_null: 1; 443 uint64_t pend_switch_full: 1; 444 uint64_t pend_switch : 1; 445 uint64_t reserved_62_63 : 2; 446#endif 447 } s_sstatus1; 448 449 /** 450 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0) 451 */ 452 struct 453 { 454#if __BYTE_ORDER == __BIG_ENDIAN 455 uint64_t reserved_62_63 : 2; 456 uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and 457 tag_type is not NULL or NULL_NULL). */ 458 uint64_t index : 11; /**< The POW entry attached to the core. */ 459 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */ 460 uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in 461 the NULL or NULL_NULL state). */ 462 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the 463 NULL or NULL_NULL state). */ 464 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list 465 entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */ 466 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on 467 SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */ 468#else 469 uint64_t tag : 32; 470 uint64_t tag_type : 2; 471 uint64_t tail : 1; 472 uint64_t head : 1; 473 uint64_t grp : 4; 474 uint64_t index : 11; 475 uint64_t link_index : 11; 476 uint64_t reserved_62_63 : 2; 477#endif 478 } s_sstatus2; 479 480 /** 481 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1) 482 */ 483 struct 484 { 485#if __BYTE_ORDER == __BIG_ENDIAN 486 uint64_t reserved_62_63 : 2; 487 uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0 488 (and tag_type is not NULL or NULL_NULL). This field is unpredictable 489 when the core's state is NULL or NULL_NULL. */ 490 uint64_t index : 11; /**< The POW entry attached to the core. */ 491 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */ 492 uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in 493 the NULL or NULL_NULL state). */ 494 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the 495 NULL or NULL_NULL state). */ 496 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list 497 entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */ 498 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on 499 SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */ 500#else 501 uint64_t tag : 32; 502 uint64_t tag_type : 2; 503 uint64_t tail : 1; 504 uint64_t head : 1; 505 uint64_t grp : 4; 506 uint64_t index : 11; 507 uint64_t revlink_index : 11; 508 uint64_t reserved_62_63 : 2; 509#endif 510 } s_sstatus3; 511 512 /** 513 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0) 514 */ 515 struct 516 { 517#if __BYTE_ORDER == __BIG_ENDIAN 518 uint64_t reserved_62_63 : 2; 519 uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and 520 tag_type is not NULL or NULL_NULL). */ 521 uint64_t index : 11; /**< The POW entry attached to the core. */ 522 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */ 523 uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */ 524#else 525 uint64_t wqp : 36; 526 uint64_t grp : 4; 527 uint64_t index : 11; 528 uint64_t link_index : 11; 529 uint64_t reserved_62_63 : 2; 530#endif 531 } s_sstatus4; 532 533 /** 534 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1) 535 */ 536 struct 537 { 538#if __BYTE_ORDER == __BIG_ENDIAN 539 uint64_t reserved_62_63 : 2; 540 uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0 541 (and tag_type is not NULL or NULL_NULL). This field is unpredictable 542 when the core's state is NULL or NULL_NULL. */ 543 uint64_t index : 11; /**< The POW entry attached to the core. */ 544 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */ 545 uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */ 546#else 547 uint64_t wqp : 36; 548 uint64_t grp : 4; 549 uint64_t index : 11; 550 uint64_t revlink_index : 11; 551 uint64_t reserved_62_63 : 2; 552#endif 553 } s_sstatus5; 554 555 /** 556 * Result For POW Memory Load (get_des == 0 and get_wqp == 0) 557 */ 558 struct 559 { 560#if __BYTE_ORDER == __BIG_ENDIAN 561 uint64_t reserved_51_63 : 13; 562 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list 563 (unpredictable if entry is the tail of the list). */ 564 uint64_t grp : 4; /**< The group of the POW entry. */ 565 uint64_t reserved_35 : 1; 566 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the 567 NULL or NULL_NULL state). */ 568 uint64_t tag_type : 2; /**< The tag type of the POW entry. */ 569 uint64_t tag : 32; /**< The tag of the POW entry. */ 570#else 571 uint64_t tag : 32; 572 uint64_t tag_type : 2; 573 uint64_t tail : 1; 574 uint64_t reserved_35 : 1; 575 uint64_t grp : 4; 576 uint64_t next_index : 11; 577 uint64_t reserved_51_63 : 13; 578#endif 579 } s_smemload0; 580 581 /** 582 * Result For POW Memory Load (get_des == 0 and get_wqp == 1) 583 */ 584 struct 585 { 586#if __BYTE_ORDER == __BIG_ENDIAN 587 uint64_t reserved_51_63 : 13; 588 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list 589 (unpredictable if entry is the tail of the list). */ 590 uint64_t grp : 4; /**< The group of the POW entry. */ 591 uint64_t wqp : 36; /**< The WQP held in the POW entry. */ 592#else 593 uint64_t wqp : 36; 594 uint64_t grp : 4; 595 uint64_t next_index : 11; 596 uint64_t reserved_51_63 : 13; 597#endif 598 } s_smemload1; 599 600 /** 601 * Result For POW Memory Load (get_des == 1) 602 */ 603 struct 604 { 605#if __BYTE_ORDER == __BIG_ENDIAN 606 uint64_t reserved_51_63 : 13; 607 uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */ 608 uint64_t grp : 4; /**< The group of the POW entry. */ 609 uint64_t nosched : 1; /**< The nosched bit for the POW entry. */ 610 uint64_t pend_switch : 1; /**< There is a pending tag switch */ 611 uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */ 612 uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */ 613#else 614 uint64_t pend_tag : 32; 615 uint64_t pend_type : 2; 616 uint64_t pend_switch : 1; 617 uint64_t nosched : 1; 618 uint64_t grp : 4; 619 uint64_t fwd_index : 11; 620 uint64_t reserved_51_63 : 13; 621#endif 622 } s_smemload2; 623 624 /** 625 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0) 626 */ 627 struct 628 { 629#if __BYTE_ORDER == __BIG_ENDIAN 630 uint64_t reserved_52_63 : 12; 631 uint64_t free_val : 1; /**< - set when there is one or more POW entries on the free list. */ 632 uint64_t free_one : 1; /**< - set when there is exactly one POW entry on the free list. */ 633 uint64_t reserved_49 : 1; 634 uint64_t free_head : 11; /**< - when free_val is set, indicates the first entry on the free list. */ 635 uint64_t reserved_37 : 1; 636 uint64_t free_tail : 11; /**< - when free_val is set, indicates the last entry on the free list. */ 637 uint64_t loc_val : 1; /**< - set when there is one or more POW entries on the input Q list selected by qosgrp. */ 638 uint64_t loc_one : 1; /**< - set when there is exactly one POW entry on the input Q list selected by qosgrp. */ 639 uint64_t reserved_23 : 1; 640 uint64_t loc_head : 11; /**< - when loc_val is set, indicates the first entry on the input Q list selected by qosgrp. */ 641 uint64_t reserved_11 : 1; 642 uint64_t loc_tail : 11; /**< - when loc_val is set, indicates the last entry on the input Q list selected by qosgrp. */ 643#else 644 uint64_t loc_tail : 11; 645 uint64_t reserved_11 : 1; 646 uint64_t loc_head : 11; 647 uint64_t reserved_23 : 1; 648 uint64_t loc_one : 1; 649 uint64_t loc_val : 1; 650 uint64_t free_tail : 11; 651 uint64_t reserved_37 : 1; 652 uint64_t free_head : 11; 653 uint64_t reserved_49 : 1; 654 uint64_t free_one : 1; 655 uint64_t free_val : 1; 656 uint64_t reserved_52_63 : 12; 657#endif 658 } sindexload0; 659 660 /** 661 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1) 662 */ 663 struct 664 { 665#if __BYTE_ORDER == __BIG_ENDIAN 666 uint64_t reserved_52_63 : 12; 667 uint64_t nosched_val : 1; /**< - set when there is one or more POW entries on the nosched list. */ 668 uint64_t nosched_one : 1; /**< - set when there is exactly one POW entry on the nosched list. */ 669 uint64_t reserved_49 : 1; 670 uint64_t nosched_head : 11; /**< - when nosched_val is set, indicates the first entry on the nosched list. */ 671 uint64_t reserved_37 : 1; 672 uint64_t nosched_tail : 11; /**< - when nosched_val is set, indicates the last entry on the nosched list. */ 673 uint64_t des_val : 1; /**< - set when there is one or more descheduled heads on the descheduled list selected by qosgrp. */ 674 uint64_t des_one : 1; /**< - set when there is exactly one descheduled head on the descheduled list selected by qosgrp. */ 675 uint64_t reserved_23 : 1; 676 uint64_t des_head : 11; /**< - when des_val is set, indicates the first descheduled head on the descheduled list selected by qosgrp. */ 677 uint64_t reserved_11 : 1; 678 uint64_t des_tail : 11; /**< - when des_val is set, indicates the last descheduled head on the descheduled list selected by qosgrp. */ 679#else 680 uint64_t des_tail : 11; 681 uint64_t reserved_11 : 1; 682 uint64_t des_head : 11; 683 uint64_t reserved_23 : 1; 684 uint64_t des_one : 1; 685 uint64_t des_val : 1; 686 uint64_t nosched_tail : 11; 687 uint64_t reserved_37 : 1; 688 uint64_t nosched_head : 11; 689 uint64_t reserved_49 : 1; 690 uint64_t nosched_one : 1; 691 uint64_t nosched_val : 1; 692 uint64_t reserved_52_63 : 12; 693#endif 694 } sindexload1; 695 696 /** 697 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0) 698 */ 699 struct 700 { 701#if __BYTE_ORDER == __BIG_ENDIAN 702 uint64_t reserved_39_63 : 25; 703 uint64_t rmt_is_head : 1; /**< Set when this DRAM list is the current head (i.e. is the next to 704 be reloaded when the POW hardware reloads a POW entry from DRAM). The 705 POW hardware alternates between the two DRAM lists associated with a QOS 706 level when it reloads work from DRAM into the POW unit. */ 707 uint64_t rmt_val : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp 708 contains one or more pieces of work. */ 709 uint64_t rmt_one : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp 710 contains exactly one piece of work. */ 711 uint64_t rmt_head : 36; /**< When rmt_val is set, indicates the first piece of work on the 712 DRAM input Q list selected by qosgrp. */ 713#else 714 uint64_t rmt_head : 36; 715 uint64_t rmt_one : 1; 716 uint64_t rmt_val : 1; 717 uint64_t rmt_is_head : 1; 718 uint64_t reserved_39_63 : 25; 719#endif 720 } sindexload2; 721 722 /** 723 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1) 724 */ 725 struct 726 { 727#if __BYTE_ORDER == __BIG_ENDIAN 728 uint64_t reserved_39_63 : 25; 729 uint64_t rmt_is_head : 1; /**< - set when this DRAM list is the current head (i.e. is the next to 730 be reloaded when the POW hardware reloads a POW entry from DRAM). The 731 POW hardware alternates between the two DRAM lists associated with a QOS 732 level when it reloads work from DRAM into the POW unit. */ 733 uint64_t rmt_val : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp 734 contains one or more pieces of work. */ 735 uint64_t rmt_one : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp 736 contains exactly one piece of work. */ 737 uint64_t rmt_tail : 36; /**< - when rmt_val is set, indicates the last piece of work on the DRAM 738 input Q list selected by qosgrp. */ 739#else 740 uint64_t rmt_tail : 36; 741 uint64_t rmt_one : 1; 742 uint64_t rmt_val : 1; 743 uint64_t rmt_is_head : 1; 744 uint64_t reserved_39_63 : 25; 745#endif 746 } sindexload3; 747 748 /** 749 * Response to NULL_RD request loads 750 */ 751 struct 752 { 753#if __BYTE_ORDER == __BIG_ENDIAN 754 uint64_t unused : 62; 755 uint64_t state : 2; /**< of type cvmx_pow_tag_type_t. state is one of the following: 756 - CVMX_POW_TAG_TYPE_ORDERED 757 - CVMX_POW_TAG_TYPE_ATOMIC 758 - CVMX_POW_TAG_TYPE_NULL 759 - CVMX_POW_TAG_TYPE_NULL_NULL */ 760#else 761 uint64_t state : 2; 762 uint64_t unused : 62; 763#endif 764 } s_null_rd; 765 766} cvmx_pow_tag_load_resp_t; 767 768/** 769 * This structure describes the address used for stores to the POW. 770 * The store address is meaningful on stores to the POW. The hardware assumes that an aligned 771 * 64-bit store was used for all these stores. 772 * Note the assumption that the work queue entry is aligned on an 8-byte 773 * boundary (since the low-order 3 address bits must be zero). 774 * Note that not all fields are used by all operations. 775 * 776 * NOTE: The following is the behavior of the pending switch bit at the PP 777 * for POW stores (i.e. when did<7:3> == 0xc) 778 * - did<2:0> == 0 => pending switch bit is set 779 * - did<2:0> == 1 => no affect on the pending switch bit 780 * - did<2:0> == 3 => pending switch bit is cleared 781 * - did<2:0> == 7 => no affect on the pending switch bit 782 * - did<2:0> == others => must not be used 783 * - No other loads/stores have an affect on the pending switch bit 784 * - The switch bus from POW can clear the pending switch bit 785 * 786 * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command 787 * that only contains the pointer). SW must never use did<2:0> == 2. 788 */ 789typedef union 790{ 791 /** 792 * Unsigned 64 bit integer representation of store address 793 */ 794 uint64_t u64; 795 796 struct 797 { 798#if __BYTE_ORDER == __BIG_ENDIAN 799 uint64_t mem_reg : 2; /**< Memory region. Should be CVMX_IO_SEG in most cases */ 800 uint64_t reserved_49_61 : 13; /**< Must be zero */ 801 uint64_t is_io : 1; /**< Must be one */ 802 uint64_t did : 8; /**< Device ID of POW. Note that different sub-dids are used. */ 803 uint64_t reserved_36_39 : 4; /**< Must be zero */ 804 uint64_t addr : 36; /**< Address field. addr<2:0> must be zero */ 805#else 806 uint64_t addr : 36; 807 uint64_t reserved_36_39 : 4; 808 uint64_t did : 8; 809 uint64_t is_io : 1; 810 uint64_t reserved_49_61 : 13; 811 uint64_t mem_reg : 2; 812#endif 813 } stag; 814} cvmx_pow_tag_store_addr_t; 815 816/** 817 * decode of the store data when an IOBDMA SENDSINGLE is sent to POW 818 */ 819typedef union 820{ 821 uint64_t u64; 822 823 struct 824 { 825#if __BYTE_ORDER == __BIG_ENDIAN 826 uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */ 827 uint64_t len : 8; /**< the number of words in the response (0 => no response) */ 828 uint64_t did : 8; /**< the ID of the device on the non-coherent bus */ 829 uint64_t unused :36; 830 uint64_t wait : 1; /**< if set, don't return load response until work is available */ 831 uint64_t unused2 : 3; 832#else 833 uint64_t unused2 : 3; 834 uint64_t wait : 1; 835 uint64_t unused :36; 836 uint64_t did : 8; 837 uint64_t len : 8; 838 uint64_t scraddr : 8; 839#endif 840 } s; 841 842} cvmx_pow_iobdma_store_t; 843 844 845/* CSR typedefs have been moved to cvmx-pow-defs.h */ 846 847/** 848 * Get the POW tag for this core. This returns the current 849 * tag type, tag, group, and POW entry index associated with 850 * this core. Index is only valid if the tag type isn't NULL_NULL. 851 * If a tag switch is pending this routine returns the tag before 852 * the tag switch, not after. 853 * 854 * @return Current tag 855 */ 856static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void) 857{ 858 cvmx_pow_load_addr_t load_addr; 859 cvmx_pow_tag_load_resp_t load_resp; 860 cvmx_pow_tag_req_t result; 861 862 load_addr.u64 = 0; 863 load_addr.sstatus.mem_region = CVMX_IO_SEG; 864 load_addr.sstatus.is_io = 1; 865 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1; 866 load_addr.sstatus.coreid = cvmx_get_core_num(); 867 load_addr.sstatus.get_cur = 1; 868 load_resp.u64 = cvmx_read_csr(load_addr.u64); 869 result.u64 = 0; 870 result.s.grp = load_resp.s_sstatus2.grp; 871 result.s.index = load_resp.s_sstatus2.index; 872 result.s.type = (cvmx_pow_tag_type_t)load_resp.s_sstatus2.tag_type; 873 result.s.tag = load_resp.s_sstatus2.tag; 874 return result; 875} 876 877 878/** 879 * Get the POW WQE for this core. This returns the work queue 880 * entry currently associated with this core. 881 * 882 * @return WQE pointer 883 */ 884static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void) 885{ 886 cvmx_pow_load_addr_t load_addr; 887 cvmx_pow_tag_load_resp_t load_resp; 888 889 load_addr.u64 = 0; 890 load_addr.sstatus.mem_region = CVMX_IO_SEG; 891 load_addr.sstatus.is_io = 1; 892 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1; 893 load_addr.sstatus.coreid = cvmx_get_core_num(); 894 load_addr.sstatus.get_cur = 1; 895 load_addr.sstatus.get_wqp = 1; 896 load_resp.u64 = cvmx_read_csr(load_addr.u64); 897 return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp); 898} 899 900 901/** 902 * @INTERNAL 903 * Print a warning if a tag switch is pending for this core 904 * 905 * @param function Function name checking for a pending tag switch 906 */ 907static inline void __cvmx_pow_warn_if_pending_switch(const char *function) 908{ 909 uint64_t switch_complete; 910 CVMX_MF_CHORD(switch_complete); 911 cvmx_warn_if(!switch_complete, "%s called with tag switch in progress\n", function); 912} 913 914 915/** 916 * Waits for a tag switch to complete by polling the completion bit. 917 * Note that switches to NULL complete immediately and do not need 918 * to be waited for. 919 */ 920static inline void cvmx_pow_tag_sw_wait(void) 921{ 922 const uint64_t MAX_CYCLES = 1ull<<31; 923 uint64_t switch_complete; 924 uint64_t start_cycle = cvmx_get_cycle(); 925 while (1) 926 { 927 CVMX_MF_CHORD(switch_complete); 928 if (cvmx_unlikely(switch_complete)) 929 break; 930 if (cvmx_unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) 931 { 932 cvmx_dprintf("WARNING: Tag switch is taking a long time, possible deadlock\n"); 933 start_cycle = -MAX_CYCLES-1; 934 } 935 } 936} 937 938 939/** 940 * Synchronous work request. Requests work from the POW. 941 * This function does NOT wait for previous tag switches to complete, 942 * so the caller must ensure that there is not a pending tag switch. 943 * 944 * @param wait When set, call stalls until work becomes avaiable, or times out. 945 * If not set, returns immediately. 946 * 947 * @return Returns the WQE pointer from POW. Returns NULL if no work was available. 948 */ 949static inline cvmx_wqe_t * cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait) 950{ 951 cvmx_pow_load_addr_t ptr; 952 cvmx_pow_tag_load_resp_t result; 953 954 if (CVMX_ENABLE_POW_CHECKS) 955 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 956 957 ptr.u64 = 0; 958 ptr.swork.mem_region = CVMX_IO_SEG; 959 ptr.swork.is_io = 1; 960 ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG; 961 ptr.swork.wait = wait; 962 963 result.u64 = cvmx_read_csr(ptr.u64); 964 965 if (result.s_work.no_work) 966 return NULL; 967 else 968 return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr); 969} 970 971 972/** 973 * Synchronous work request. Requests work from the POW. 974 * This function waits for any previous tag switch to complete before 975 * requesting the new work. 976 * 977 * @param wait When set, call stalls until work becomes avaiable, or times out. 978 * If not set, returns immediately. 979 * 980 * @return Returns the WQE pointer from POW. Returns NULL if no work was available. 981 */ 982static inline cvmx_wqe_t * cvmx_pow_work_request_sync(cvmx_pow_wait_t wait) 983{ 984 if (CVMX_ENABLE_POW_CHECKS) 985 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 986 987 /* Must not have a switch pending when requesting work */ 988 cvmx_pow_tag_sw_wait(); 989 return(cvmx_pow_work_request_sync_nocheck(wait)); 990 991} 992 993 994/** 995 * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state. 996 * This function waits for any previous tag switch to complete before 997 * requesting the null_rd. 998 * 999 * @return Returns the POW state of type cvmx_pow_tag_type_t. 1000 */ 1001static inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void) 1002{ 1003 cvmx_pow_load_addr_t ptr; 1004 cvmx_pow_tag_load_resp_t result; 1005 1006 if (CVMX_ENABLE_POW_CHECKS) 1007 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1008 1009 /* Must not have a switch pending when requesting work */ 1010 cvmx_pow_tag_sw_wait(); 1011 1012 ptr.u64 = 0; 1013 ptr.snull_rd.mem_region = CVMX_IO_SEG; 1014 ptr.snull_rd.is_io = 1; 1015 ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD; 1016 1017 result.u64 = cvmx_read_csr(ptr.u64); 1018 1019 return (cvmx_pow_tag_type_t)result.s_null_rd.state; 1020} 1021 1022 1023/** 1024 * Asynchronous work request. Work is requested from the POW unit, and should later 1025 * be checked with function cvmx_pow_work_response_async. 1026 * This function does NOT wait for previous tag switches to complete, 1027 * so the caller must ensure that there is not a pending tag switch. 1028 * 1029 * @param scr_addr Scratch memory address that response will be returned to, 1030 * which is either a valid WQE, or a response with the invalid bit set. 1031 * Byte address, must be 8 byte aligned. 1032 * @param wait 1 to cause response to wait for work to become available (or timeout) 1033 * 0 to cause response to return immediately 1034 */ 1035static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait) 1036{ 1037 cvmx_pow_iobdma_store_t data; 1038 1039 if (CVMX_ENABLE_POW_CHECKS) 1040 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1041 1042 /* scr_addr must be 8 byte aligned */ 1043 data.s.scraddr = scr_addr >> 3; 1044 data.s.len = 1; 1045 data.s.did = CVMX_OCT_DID_TAG_SWTAG; 1046 data.s.wait = wait; 1047 cvmx_send_single(data.u64); 1048} 1049/** 1050 * Asynchronous work request. Work is requested from the POW unit, and should later 1051 * be checked with function cvmx_pow_work_response_async. 1052 * This function waits for any previous tag switch to complete before 1053 * requesting the new work. 1054 * 1055 * @param scr_addr Scratch memory address that response will be returned to, 1056 * which is either a valid WQE, or a response with the invalid bit set. 1057 * Byte address, must be 8 byte aligned. 1058 * @param wait 1 to cause response to wait for work to become available (or timeout) 1059 * 0 to cause response to return immediately 1060 */ 1061static inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait) 1062{ 1063 if (CVMX_ENABLE_POW_CHECKS) 1064 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1065 1066 /* Must not have a switch pending when requesting work */ 1067 cvmx_pow_tag_sw_wait(); 1068 cvmx_pow_work_request_async_nocheck(scr_addr, wait); 1069} 1070 1071 1072/** 1073 * Gets result of asynchronous work request. Performs a IOBDMA sync 1074 * to wait for the response. 1075 * 1076 * @param scr_addr Scratch memory address to get result from 1077 * Byte address, must be 8 byte aligned. 1078 * @return Returns the WQE from the scratch register, or NULL if no work was available. 1079 */ 1080static inline cvmx_wqe_t * cvmx_pow_work_response_async(int scr_addr) 1081{ 1082 cvmx_pow_tag_load_resp_t result; 1083 1084 CVMX_SYNCIOBDMA; 1085 result.u64 = cvmx_scratch_read64(scr_addr); 1086 1087 if (result.s_work.no_work) 1088 return NULL; 1089 else 1090 return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr); 1091} 1092 1093 1094/** 1095 * Checks if a work queue entry pointer returned by a work 1096 * request is valid. It may be invalid due to no work 1097 * being available or due to a timeout. 1098 * 1099 * @param wqe_ptr pointer to a work queue entry returned by the POW 1100 * 1101 * @return 0 if pointer is valid 1102 * 1 if invalid (no work was returned) 1103 */ 1104static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr) 1105{ 1106 return (wqe_ptr == NULL); 1107} 1108 1109 1110 1111/** 1112 * Starts a tag switch to the provided tag value and tag type. Completion for 1113 * the tag switch must be checked for separately. 1114 * This function does NOT update the 1115 * work queue entry in dram to match tag value and type, so the application must 1116 * keep track of these if they are important to the application. 1117 * This tag switch command must not be used for switches to NULL, as the tag 1118 * switch pending bit will be set by the switch request, but never cleared by the 1119 * hardware. 1120 * 1121 * NOTE: This should not be used when switching from a NULL tag. Use 1122 * cvmx_pow_tag_sw_full() instead. 1123 * 1124 * This function does no checks, so the caller must ensure that any previous tag 1125 * switch has completed. 1126 * 1127 * @param tag new tag value 1128 * @param tag_type new tag type (ordered or atomic) 1129 */ 1130static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type) 1131{ 1132 cvmx_addr_t ptr; 1133 cvmx_pow_tag_req_t tag_req; 1134 1135 if (CVMX_ENABLE_POW_CHECKS) 1136 { 1137 cvmx_pow_tag_req_t current_tag; 1138 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1139 current_tag = cvmx_pow_get_current_tag(); 1140 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1141 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag\n", __FUNCTION__); 1142 cvmx_warn_if((current_tag.s.type == tag_type) && (current_tag.s.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__); 1143 cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__); 1144 } 1145 1146 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM 1147 ** once the WQE is in flight. See hardware manual for complete details. 1148 ** It is the application's responsibility to keep track of the current tag 1149 ** value if that is important. 1150 */ 1151 1152 1153 tag_req.u64 = 0; 1154 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG; 1155 tag_req.s.tag = tag; 1156 tag_req.s.type = tag_type; 1157 1158 ptr.u64 = 0; 1159 ptr.sio.mem_region = CVMX_IO_SEG; 1160 ptr.sio.is_io = 1; 1161 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG; 1162 1163 /* once this store arrives at POW, it will attempt the switch 1164 software must wait for the switch to complete separately */ 1165 cvmx_write_io(ptr.u64, tag_req.u64); 1166} 1167 1168 1169/** 1170 * Starts a tag switch to the provided tag value and tag type. Completion for 1171 * the tag switch must be checked for separately. 1172 * This function does NOT update the 1173 * work queue entry in dram to match tag value and type, so the application must 1174 * keep track of these if they are important to the application. 1175 * This tag switch command must not be used for switches to NULL, as the tag 1176 * switch pending bit will be set by the switch request, but never cleared by the 1177 * hardware. 1178 * 1179 * NOTE: This should not be used when switching from a NULL tag. Use 1180 * cvmx_pow_tag_sw_full() instead. 1181 * 1182 * This function waits for any previous tag switch to complete, and also 1183 * displays an error on tag switches to NULL. 1184 * 1185 * @param tag new tag value 1186 * @param tag_type new tag type (ordered or atomic) 1187 */ 1188static inline void cvmx_pow_tag_sw(uint32_t tag, cvmx_pow_tag_type_t tag_type) 1189{ 1190 if (CVMX_ENABLE_POW_CHECKS) 1191 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1192 1193 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM 1194 ** once the WQE is in flight. See hardware manual for complete details. 1195 ** It is the application's responsibility to keep track of the current tag 1196 ** value if that is important. 1197 */ 1198 1199 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started 1200 ** if a previous switch is still pending. */ 1201 cvmx_pow_tag_sw_wait(); 1202 cvmx_pow_tag_sw_nocheck(tag, tag_type); 1203} 1204 1205 1206/** 1207 * Starts a tag switch to the provided tag value and tag type. Completion for 1208 * the tag switch must be checked for separately. 1209 * This function does NOT update the 1210 * work queue entry in dram to match tag value and type, so the application must 1211 * keep track of these if they are important to the application. 1212 * This tag switch command must not be used for switches to NULL, as the tag 1213 * switch pending bit will be set by the switch request, but never cleared by the 1214 * hardware. 1215 * 1216 * This function must be used for tag switches from NULL. 1217 * 1218 * This function does no checks, so the caller must ensure that any previous tag 1219 * switch has completed. 1220 * 1221 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters 1222 * @param tag tag value to be assigned to work queue entry 1223 * @param tag_type type of tag 1224 * @param group group value for the work queue entry. 1225 */ 1226static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group) 1227{ 1228 cvmx_addr_t ptr; 1229 cvmx_pow_tag_req_t tag_req; 1230 1231 if (CVMX_ENABLE_POW_CHECKS) 1232 { 1233 cvmx_pow_tag_req_t current_tag; 1234 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1235 current_tag = cvmx_pow_get_current_tag(); 1236 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1237 cvmx_warn_if((current_tag.s.type == tag_type) && (current_tag.s.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__); 1238 cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__); 1239 if (wqp != cvmx_phys_to_ptr(0x80)) 1240 cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(), "%s passed WQE(%p) doesn't match the address in the POW(%p)\n", __FUNCTION__, wqp, cvmx_pow_get_current_wqp()); 1241 } 1242 1243 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM 1244 ** once the WQE is in flight. See hardware manual for complete details. 1245 ** It is the application's responsibility to keep track of the current tag 1246 ** value if that is important. 1247 */ 1248 1249 tag_req.u64 = 0; 1250 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL; 1251 tag_req.s.tag = tag; 1252 tag_req.s.type = tag_type; 1253 tag_req.s.grp = group; 1254 1255 ptr.u64 = 0; 1256 ptr.sio.mem_region = CVMX_IO_SEG; 1257 ptr.sio.is_io = 1; 1258 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG; 1259 ptr.sio.offset = CAST64(wqp); 1260 1261 /* once this store arrives at POW, it will attempt the switch 1262 software must wait for the switch to complete separately */ 1263 cvmx_write_io(ptr.u64, tag_req.u64); 1264} 1265 1266 1267/** 1268 * Starts a tag switch to the provided tag value and tag type. Completion for 1269 * the tag switch must be checked for separately. 1270 * This function does NOT update the 1271 * work queue entry in dram to match tag value and type, so the application must 1272 * keep track of these if they are important to the application. 1273 * This tag switch command must not be used for switches to NULL, as the tag 1274 * switch pending bit will be set by the switch request, but never cleared by the 1275 * hardware. 1276 * 1277 * This function must be used for tag switches from NULL. 1278 * 1279 * This function waits for any pending tag switches to complete 1280 * before requesting the tag switch. 1281 * 1282 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters 1283 * @param tag tag value to be assigned to work queue entry 1284 * @param tag_type type of tag 1285 * @param group group value for the work queue entry. 1286 */ 1287static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group) 1288{ 1289 if (CVMX_ENABLE_POW_CHECKS) 1290 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1291 1292 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started 1293 ** if a previous switch is still pending. */ 1294 cvmx_pow_tag_sw_wait(); 1295 cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group); 1296} 1297 1298 1299/** 1300 * Switch to a NULL tag, which ends any ordering or 1301 * synchronization provided by the POW for the current 1302 * work queue entry. This operation completes immediatly, 1303 * so completetion should not be waited for. 1304 * This function does NOT wait for previous tag switches to complete, 1305 * so the caller must ensure that any previous tag switches have completed. 1306 */ 1307static inline void cvmx_pow_tag_sw_null_nocheck(void) 1308{ 1309 cvmx_addr_t ptr; 1310 cvmx_pow_tag_req_t tag_req; 1311 1312 if (CVMX_ENABLE_POW_CHECKS) 1313 { 1314 cvmx_pow_tag_req_t current_tag; 1315 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1316 current_tag = cvmx_pow_get_current_tag(); 1317 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1318 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called when we already have a NULL tag\n", __FUNCTION__); 1319 } 1320 1321 tag_req.u64 = 0; 1322 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG; 1323 tag_req.s.type = CVMX_POW_TAG_TYPE_NULL; 1324 1325 1326 ptr.u64 = 0; 1327 ptr.sio.mem_region = CVMX_IO_SEG; 1328 ptr.sio.is_io = 1; 1329 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1; 1330 1331 1332 cvmx_write_io(ptr.u64, tag_req.u64); 1333 1334 /* switch to NULL completes immediately */ 1335} 1336 1337/** 1338 * Switch to a NULL tag, which ends any ordering or 1339 * synchronization provided by the POW for the current 1340 * work queue entry. This operation completes immediatly, 1341 * so completetion should not be waited for. 1342 * This function waits for any pending tag switches to complete 1343 * before requesting the switch to NULL. 1344 */ 1345static inline void cvmx_pow_tag_sw_null(void) 1346{ 1347 if (CVMX_ENABLE_POW_CHECKS) 1348 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1349 1350 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started 1351 ** if a previous switch is still pending. */ 1352 cvmx_pow_tag_sw_wait(); 1353 cvmx_pow_tag_sw_null_nocheck(); 1354 1355 /* switch to NULL completes immediately */ 1356} 1357 1358 1359 1360/** 1361 * Submits work to an input queue. This function updates the work queue entry in DRAM to match 1362 * the arguments given. 1363 * Note that the tag provided is for the work queue entry submitted, and is unrelated to the tag that 1364 * the core currently holds. 1365 * 1366 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters 1367 * @param tag tag value to be assigned to work queue entry 1368 * @param tag_type type of tag 1369 * @param qos Input queue to add to. 1370 * @param grp group value for the work queue entry. 1371 */ 1372static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t qos, uint64_t grp) 1373{ 1374 cvmx_addr_t ptr; 1375 cvmx_pow_tag_req_t tag_req; 1376 1377 wqp->qos = qos; 1378 wqp->tag = tag; 1379 wqp->tag_type = tag_type; 1380 wqp->grp = grp; 1381 1382 tag_req.u64 = 0; 1383 tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ; 1384 tag_req.s.type = tag_type; 1385 tag_req.s.tag = tag; 1386 tag_req.s.qos = qos; 1387 tag_req.s.grp = grp; 1388 1389 1390 ptr.u64 = 0; 1391 ptr.sio.mem_region = CVMX_IO_SEG; 1392 ptr.sio.is_io = 1; 1393 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1; 1394 ptr.sio.offset = cvmx_ptr_to_phys(wqp); 1395 1396 /* SYNC write to memory before the work submit. This is necessary 1397 ** as POW may read values from DRAM at this time */ 1398 CVMX_SYNCWS; 1399 cvmx_write_io(ptr.u64, tag_req.u64); 1400} 1401 1402 1403 1404/** 1405 * This function sets the group mask for a core. The group mask 1406 * indicates which groups each core will accept work from. There are 1407 * 16 groups. 1408 * 1409 * @param core_num core to apply mask to 1410 * @param mask Group mask. There are 16 groups, so only bits 0-15 are valid, 1411 * representing groups 0-15. 1412 * Each 1 bit in the mask enables the core to accept work from 1413 * the corresponding group. 1414 */ 1415static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask) 1416{ 1417 cvmx_pow_pp_grp_mskx_t grp_msk; 1418 1419 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num)); 1420 grp_msk.s.grp_msk = mask; 1421 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64); 1422} 1423 1424/** 1425 * This function sets POW static priorities for a core. Each input queue has 1426 * an associated priority value. 1427 * 1428 * @param core_num core to apply priorities to 1429 * @param priority Vector of 8 priorities, one per POW Input Queue (0-7). 1430 * Highest priority is 0 and lowest is 7. A priority value 1431 * of 0xF instructs POW to skip the Input Queue when 1432 * scheduling to this specific core. 1433 * NOTE: priorities should not have gaps in values, meaning 1434 * {0,1,1,1,1,1,1,1} is a valid configuration while 1435 * {0,2,2,2,2,2,2,2} is not. 1436 */ 1437static inline void cvmx_pow_set_priority(uint64_t core_num, const uint8_t priority[]) 1438{ 1439 /* POW priorities are supported on CN5xxx and later */ 1440 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) 1441 { 1442 cvmx_pow_pp_grp_mskx_t grp_msk; 1443 1444 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num)); 1445 grp_msk.s.qos0_pri = priority[0]; 1446 grp_msk.s.qos1_pri = priority[1]; 1447 grp_msk.s.qos2_pri = priority[2]; 1448 grp_msk.s.qos3_pri = priority[3]; 1449 grp_msk.s.qos4_pri = priority[4]; 1450 grp_msk.s.qos5_pri = priority[5]; 1451 grp_msk.s.qos6_pri = priority[6]; 1452 grp_msk.s.qos7_pri = priority[7]; 1453 1454 /* Detect gaps between priorities and flag error */ 1455 { 1456 int i; 1457 uint32_t prio_mask = 0; 1458 1459 for(i=0; i<8; i++) 1460 if (priority[i] != 0xF) 1461 prio_mask |= 1<<priority[i]; 1462 1463 if ( prio_mask ^ ((1<<cvmx_pop(prio_mask)) - 1)) 1464 { 1465 cvmx_dprintf("ERROR: POW static priorities should be contiguous (0x%llx)\n", (unsigned long long)prio_mask); 1466 return; 1467 } 1468 } 1469 1470 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64); 1471 } 1472} 1473 1474/** 1475 * Performs a tag switch and then an immediate deschedule. This completes 1476 * immediatly, so completion must not be waited for. This function does NOT 1477 * update the wqe in DRAM to match arguments. 1478 * 1479 * This function does NOT wait for any prior tag switches to complete, so the 1480 * calling code must do this. 1481 * 1482 * Note the following CAVEAT of the Octeon HW behavior when 1483 * re-scheduling DE-SCHEDULEd items whose (next) state is 1484 * ORDERED: 1485 * - If there are no switches pending at the time that the 1486 * HW executes the de-schedule, the HW will only re-schedule 1487 * the head of the FIFO associated with the given tag. This 1488 * means that in many respects, the HW treats this ORDERED 1489 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH 1490 * case (to an ORDERED tag), the HW will do the switch 1491 * before the deschedule whenever it is possible to do 1492 * the switch immediately, so it may often look like 1493 * this case. 1494 * - If there is a pending switch to ORDERED at the time 1495 * the HW executes the de-schedule, the HW will perform 1496 * the switch at the time it re-schedules, and will be 1497 * able to reschedule any/all of the entries with the 1498 * same tag. 1499 * Due to this behavior, the RECOMMENDATION to software is 1500 * that they have a (next) state of ATOMIC when they 1501 * DE-SCHEDULE. If an ORDERED tag is what was really desired, 1502 * SW can choose to immediately switch to an ORDERED tag 1503 * after the work (that has an ATOMIC tag) is re-scheduled. 1504 * Note that since there are never any tag switches pending 1505 * when the HW re-schedules, this switch can be IMMEDIATE upon 1506 * the reception of the pointer during the re-schedule. 1507 * 1508 * @param tag New tag value 1509 * @param tag_type New tag type 1510 * @param group New group value 1511 * @param no_sched Control whether this work queue entry will be rescheduled. 1512 * - 1 : don't schedule this work 1513 * - 0 : allow this work to be scheduled. 1514 */ 1515static inline void cvmx_pow_tag_sw_desched_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched) 1516{ 1517 cvmx_addr_t ptr; 1518 cvmx_pow_tag_req_t tag_req; 1519 1520 if (CVMX_ENABLE_POW_CHECKS) 1521 { 1522 cvmx_pow_tag_req_t current_tag; 1523 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1524 current_tag = cvmx_pow_get_current_tag(); 1525 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1526 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not allowed from NULL state\n", __FUNCTION__); 1527 cvmx_warn_if((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC) && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC), "%s called where neither the before or after tag is ATOMIC\n", __FUNCTION__); 1528 } 1529 1530 tag_req.u64 = 0; 1531 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH; 1532 tag_req.s.tag = tag; 1533 tag_req.s.type = tag_type; 1534 tag_req.s.grp = group; 1535 tag_req.s.no_sched = no_sched; 1536 1537 ptr.u64 = 0; 1538 ptr.sio.mem_region = CVMX_IO_SEG; 1539 ptr.sio.is_io = 1; 1540 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3; 1541 1542 cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */ 1543} 1544/** 1545 * Performs a tag switch and then an immediate deschedule. This completes 1546 * immediatly, so completion must not be waited for. This function does NOT 1547 * update the wqe in DRAM to match arguments. 1548 * 1549 * This function waits for any prior tag switches to complete, so the 1550 * calling code may call this function with a pending tag switch. 1551 * 1552 * Note the following CAVEAT of the Octeon HW behavior when 1553 * re-scheduling DE-SCHEDULEd items whose (next) state is 1554 * ORDERED: 1555 * - If there are no switches pending at the time that the 1556 * HW executes the de-schedule, the HW will only re-schedule 1557 * the head of the FIFO associated with the given tag. This 1558 * means that in many respects, the HW treats this ORDERED 1559 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH 1560 * case (to an ORDERED tag), the HW will do the switch 1561 * before the deschedule whenever it is possible to do 1562 * the switch immediately, so it may often look like 1563 * this case. 1564 * - If there is a pending switch to ORDERED at the time 1565 * the HW executes the de-schedule, the HW will perform 1566 * the switch at the time it re-schedules, and will be 1567 * able to reschedule any/all of the entries with the 1568 * same tag. 1569 * Due to this behavior, the RECOMMENDATION to software is 1570 * that they have a (next) state of ATOMIC when they 1571 * DE-SCHEDULE. If an ORDERED tag is what was really desired, 1572 * SW can choose to immediately switch to an ORDERED tag 1573 * after the work (that has an ATOMIC tag) is re-scheduled. 1574 * Note that since there are never any tag switches pending 1575 * when the HW re-schedules, this switch can be IMMEDIATE upon 1576 * the reception of the pointer during the re-schedule. 1577 * 1578 * @param tag New tag value 1579 * @param tag_type New tag type 1580 * @param group New group value 1581 * @param no_sched Control whether this work queue entry will be rescheduled. 1582 * - 1 : don't schedule this work 1583 * - 0 : allow this work to be scheduled. 1584 */ 1585static inline void cvmx_pow_tag_sw_desched(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched) 1586{ 1587 if (CVMX_ENABLE_POW_CHECKS) 1588 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1589 1590 /* Need to make sure any writes to the work queue entry are complete */ 1591 CVMX_SYNCWS; 1592 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started 1593 ** if a previous switch is still pending. */ 1594 cvmx_pow_tag_sw_wait(); 1595 cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched); 1596} 1597 1598 1599 1600 1601 1602/** 1603 * Descchedules the current work queue entry. 1604 * 1605 * @param no_sched no schedule flag value to be set on the work queue entry. If this is set 1606 * the entry will not be rescheduled. 1607 */ 1608static inline void cvmx_pow_desched(uint64_t no_sched) 1609{ 1610 cvmx_addr_t ptr; 1611 cvmx_pow_tag_req_t tag_req; 1612 1613 if (CVMX_ENABLE_POW_CHECKS) 1614 { 1615 cvmx_pow_tag_req_t current_tag; 1616 __cvmx_pow_warn_if_pending_switch(__FUNCTION__); 1617 current_tag = cvmx_pow_get_current_tag(); 1618 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__); 1619 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not expected from NULL state\n", __FUNCTION__); 1620 } 1621 1622 /* Need to make sure any writes to the work queue entry are complete */ 1623 CVMX_SYNCWS; 1624 1625 tag_req.u64 = 0; 1626 tag_req.s.op = CVMX_POW_TAG_OP_DESCH; 1627 tag_req.s.no_sched = no_sched; 1628 1629 ptr.u64 = 0; 1630 ptr.sio.mem_region = CVMX_IO_SEG; 1631 ptr.sio.is_io = 1; 1632 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3; 1633 1634 cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */ 1635} 1636 1637 1638 1639 1640 1641 1642 1643/*********************************************************************************************** 1644** Define usage of bits within the 32 bit tag values. 1645***********************************************************************************************/ 1646 1647/* 1648 * Number of bits of the tag used by software. The SW bits 1649 * are always a contiguous block of the high starting at bit 31. 1650 * The hardware bits are always the low bits. By default, the top 8 bits 1651 * of the tag are reserved for software, and the low 24 are set by the IPD unit. 1652 */ 1653#define CVMX_TAG_SW_BITS (8) 1654#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS) 1655 1656/* Below is the list of values for the top 8 bits of the tag. */ 1657#define CVMX_TAG_SW_BITS_INTERNAL 0x1 /* Tag values with top byte of this value are reserved for internal executive uses */ 1658/* The executive divides the remaining 24 bits as follows: 1659** * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup 1660** * the lower 16 bits (bits 15 - 0 of the tag) define are the value with the subgroup 1661** Note that this section describes the format of tags generated by software - refer to the 1662** hardware documentation for a description of the tags values generated by the packet input 1663** hardware. 1664** Subgroups are defined here */ 1665#define CVMX_TAG_SUBGROUP_MASK 0xFFFF /* Mask for the value portion of the tag */ 1666#define CVMX_TAG_SUBGROUP_SHIFT 16 1667#define CVMX_TAG_SUBGROUP_PKO 0x1 1668 1669 1670/* End of executive tag subgroup definitions */ 1671 1672/* The remaining values software bit values 0x2 - 0xff are available for application use */ 1673 1674 1675 1676/** 1677 * This function creates a 32 bit tag value from the two values provided. 1678 * 1679 * @param sw_bits The upper bits (number depends on configuration) are set to this value. The remainder of 1680 * bits are set by the hw_bits parameter. 1681 * @param hw_bits The lower bits (number depends on configuration) are set to this value. The remainder of 1682 * bits are set by the sw_bits parameter. 1683 * 1684 * @return 32 bit value of the combined hw and sw bits. 1685 */ 1686static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits) 1687{ 1688 return((((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) | (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS)))); 1689} 1690/** 1691 * Extracts the bits allocated for software use from the tag 1692 * 1693 * @param tag 32 bit tag value 1694 * 1695 * @return N bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define 1696 */ 1697static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag) 1698{ 1699 return((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS)); 1700} 1701/** 1702 * 1703 * Extracts the bits allocated for hardware use from the tag 1704 * 1705 * @param tag 32 bit tag value 1706 * 1707 * @return (32 - N) bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define 1708 */ 1709static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag) 1710{ 1711 return(tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS)); 1712} 1713 1714/** 1715 * Store the current POW internal state into the supplied 1716 * buffer. It is recommended that you pass a buffer of at least 1717 * 128KB. The format of the capture may change based on SDK 1718 * version and Octeon chip. 1719 * 1720 * @param buffer Buffer to store capture into 1721 * @param buffer_size 1722 * The size of the supplied buffer 1723 * 1724 * @return Zero on sucess, negative on failure 1725 */ 1726extern int cvmx_pow_capture(void *buffer, int buffer_size); 1727 1728/** 1729 * Dump a POW capture to the console in a human readable format. 1730 * 1731 * @param buffer POW capture from cvmx_pow_capture() 1732 * @param buffer_size 1733 * Size of the buffer 1734 */ 1735extern void cvmx_pow_display(void *buffer, int buffer_size); 1736 1737/** 1738 * Return the number of POW entries supported by this chip 1739 * 1740 * @return Number of POW entries 1741 */ 1742extern int cvmx_pow_get_num_entries(void); 1743 1744 1745#ifdef __cplusplus 1746} 1747#endif 1748 1749#endif /* __CVMX_POW_H__ */ 1750