cvmx-pow.c revision 210286
1218885Sdim/***********************license start*************** 2218885Sdim * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights 3218885Sdim * reserved. 4218885Sdim * 5218885Sdim * 6218885Sdim * Redistribution and use in source and binary forms, with or without 7218885Sdim * modification, are permitted provided that the following conditions are 8218885Sdim * met: 9218885Sdim * 10218885Sdim * * Redistributions of source code must retain the above copyright 11218885Sdim * notice, this list of conditions and the following disclaimer. 12218885Sdim * 13218885Sdim * * Redistributions in binary form must reproduce the above 14280031Sdim * copyright notice, this list of conditions and the following 15280031Sdim * disclaimer in the documentation and/or other materials provided 16218885Sdim * with the distribution. 17218885Sdim * 18218885Sdim * * Neither the name of Cavium Networks nor the names of 19218885Sdim * its contributors may be used to endorse or promote products 20218885Sdim * derived from this software without specific prior written 21218885Sdim * permission. 22218885Sdim * 23218885Sdim * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" 24218885Sdim * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS 25218885Sdim * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH 26234353Sdim * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY 27234353Sdim * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT 28234353Sdim * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES 29234353Sdim * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR 30218885Sdim * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET 31218885Sdim * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT 32218885Sdim * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU. 33218885Sdim * 34218885Sdim * 35218885Sdim * For any questions regarding licensing please contact marketing@caviumnetworks.com 36261991Sdim * 37261991Sdim ***********************license end**************************************/ 38276479Sdim 39218885Sdim 40276479Sdim 41276479Sdim 42276479Sdim 43276479Sdim 44276479Sdim/** 45218885Sdim * @file 46218885Sdim * 47218885Sdim * Interface to the hardware Packet Order / Work unit. 48218885Sdim * 49280031Sdim * <hr>$Revision: 29727 $<hr> 50 */ 51 52#include "cvmx.h" 53#include "cvmx-pow.h" 54 55/** 56 * @INTERNAL 57 * This structure stores the internal POW state captured by 58 * cvmx_pow_capture(). It is purposely not exposed to the user 59 * since the format may change without notice. 60 */ 61typedef struct 62{ 63 cvmx_pow_tag_load_resp_t sstatus[16][8]; 64 cvmx_pow_tag_load_resp_t smemload[2048][3]; 65 cvmx_pow_tag_load_resp_t sindexload[16][4]; 66} __cvmx_pow_dump_t; 67 68typedef enum 69{ 70 CVMX_POW_LIST_UNKNOWN=0, 71 CVMX_POW_LIST_FREE=1, 72 CVMX_POW_LIST_INPUT=2, 73 CVMX_POW_LIST_CORE=CVMX_POW_LIST_INPUT+8, 74 CVMX_POW_LIST_DESCHED=CVMX_POW_LIST_CORE+16, 75 CVMX_POW_LIST_NOSCHED=CVMX_POW_LIST_DESCHED+16, 76} __cvmx_pow_list_types_t; 77 78static const char *__cvmx_pow_list_names[] = { 79 "Unknown", 80 "Free List", 81 "Queue 0", "Queue 1", "Queue 2", "Queue 3", 82 "Queue 4", "Queue 5", "Queue 6", "Queue 7", 83 "Core 0", "Core 1", "Core 2", "Core 3", 84 "Core 4", "Core 5", "Core 6", "Core 7", 85 "Core 8", "Core 9", "Core 10", "Core 11", 86 "Core 12", "Core 13", "Core 14", "Core 15", 87 "Desched 0", "Desched 1", "Desched 2", "Desched 3", 88 "Desched 4", "Desched 5", "Desched 6", "Desched 7", 89 "Desched 8", "Desched 9", "Desched 10", "Desched 11", 90 "Desched 12", "Desched 13", "Desched 14", "Desched 15", 91 "Nosched 0", "Nosched 1", "Nosched 2", "Nosched 3", 92 "Nosched 4", "Nosched 5", "Nosched 6", "Nosched 7", 93 "Nosched 8", "Nosched 9", "Nosched 10", "Nosched 11", 94 "Nosched 12", "Nosched 13", "Nosched 14", "Nosched 15" 95}; 96 97 98/** 99 * Return the number of POW entries supported by this chip 100 * 101 * @return Number of POW entries 102 */ 103int cvmx_pow_get_num_entries(void) 104{ 105 if (OCTEON_IS_MODEL(OCTEON_CN30XX)) 106 return 64; 107 else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) 108 return 256; 109 else if (OCTEON_IS_MODEL(OCTEON_CN52XX)) 110 return 512; 111 else 112 return 2048; 113} 114 115 116/** 117 * Store the current POW internal state into the supplied 118 * buffer. It is recommended that you pass a buffer of at least 119 * 128KB. The format of the capture may change based on SDK 120 * version and Octeon chip. 121 * 122 * @param buffer Buffer to store capture into 123 * @param buffer_size 124 * The size of the supplied buffer 125 * 126 * @return Zero on sucess, negative on failure 127 */ 128int cvmx_pow_capture(void *buffer, int buffer_size) 129{ 130 __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer; 131 int num_cores; 132 int num_pow_entries = cvmx_pow_get_num_entries(); 133 int core; 134 int index; 135 int bits; 136 137 if (buffer_size < (int)sizeof(__cvmx_pow_dump_t)) 138 { 139 cvmx_dprintf("cvmx_pow_capture: Buffer too small\n"); 140 return -1; 141 } 142 143 num_cores = cvmx_octeon_num_cores(); 144 145 /* Read all core related state */ 146 for (core=0; core<num_cores; core++) 147 { 148 cvmx_pow_load_addr_t load_addr; 149 load_addr.u64 = 0; 150 load_addr.sstatus.mem_region = CVMX_IO_SEG; 151 load_addr.sstatus.is_io = 1; 152 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1; 153 load_addr.sstatus.coreid = core; 154 for (bits=0; bits<8; bits++) 155 { 156 load_addr.sstatus.get_rev = (bits & 1) != 0; 157 load_addr.sstatus.get_cur = (bits & 2) != 0; 158 load_addr.sstatus.get_wqp = (bits & 4) != 0; 159 if ((load_addr.sstatus.get_cur == 0) && load_addr.sstatus.get_rev) 160 dump->sstatus[core][bits].u64 = -1; 161 else 162 dump->sstatus[core][bits].u64 = cvmx_read_csr(load_addr.u64); 163 } 164 } 165 166 /* Read all internal POW entries */ 167 for (index=0; index<num_pow_entries; index++) 168 { 169 cvmx_pow_load_addr_t load_addr; 170 load_addr.u64 = 0; 171 load_addr.smemload.mem_region = CVMX_IO_SEG; 172 load_addr.smemload.is_io = 1; 173 load_addr.smemload.did = CVMX_OCT_DID_TAG_TAG2; 174 load_addr.smemload.index = index; 175 for (bits=0; bits<3; bits++) 176 { 177 load_addr.smemload.get_des = (bits & 1) != 0; 178 load_addr.smemload.get_wqp = (bits & 2) != 0; 179 dump->smemload[index][bits].u64 = cvmx_read_csr(load_addr.u64); 180 } 181 } 182 183 /* Read all group and queue pointers */ 184 for (index=0; index<16; index++) 185 { 186 cvmx_pow_load_addr_t load_addr; 187 load_addr.u64 = 0; 188 load_addr.sindexload.mem_region = CVMX_IO_SEG; 189 load_addr.sindexload.is_io = 1; 190 load_addr.sindexload.did = CVMX_OCT_DID_TAG_TAG3; 191 load_addr.sindexload.qosgrp = index; 192 for (bits=0; bits<4; bits++) 193 { 194 load_addr.sindexload.get_rmt = (bits & 1) != 0; 195 load_addr.sindexload.get_des_get_tail = (bits & 2) != 0; 196 /* The first pass only has 8 valid index values */ 197 if ((load_addr.sindexload.get_rmt == 0) && 198 (load_addr.sindexload.get_des_get_tail == 0) && 199 (index >= 8)) 200 dump->sindexload[index][bits].u64 = -1; 201 else 202 dump->sindexload[index][bits].u64 = cvmx_read_csr(load_addr.u64); 203 } 204 } 205 return 0; 206} 207 208 209/** 210 * Function to display a POW internal queue to the user 211 * 212 * @param name User visible name for the queue 213 * @param name_param Parameter for printf in creating the name 214 * @param valid Set if the queue contains any elements 215 * @param has_one Set if the queue contains exactly one element 216 * @param head The head pointer 217 * @param tail The tail pointer 218 */ 219static void __cvmx_pow_display_list(const char *name, int name_param, int valid, int has_one, uint64_t head, uint64_t tail) 220{ 221 printf(name, name_param); 222 printf(": "); 223 if (valid) 224 { 225 if (has_one) 226 printf("One element index=%llu(0x%llx)\n", CAST64(head), CAST64(head)); 227 else 228 printf("Multiple elements head=%llu(0x%llx) tail=%llu(0x%llx)\n", CAST64(head), CAST64(head), CAST64(tail), CAST64(tail)); 229 } 230 else 231 printf("Empty\n"); 232} 233 234 235/** 236 * Mark which list a POW entry is on. Print a warning message if the 237 * entry is already on a list. This happens if the POW changed while 238 * the capture was running. 239 * 240 * @param entry_num Entry number to mark 241 * @param entry_type List type 242 * @param entry_list Array to store marks 243 * 244 * @return Zero on success, negative if already on a list 245 */ 246static int __cvmx_pow_entry_mark_list(int entry_num, __cvmx_pow_list_types_t entry_type, uint8_t entry_list[]) 247{ 248 if (entry_list[entry_num] == 0) 249 { 250 entry_list[entry_num] = entry_type; 251 return 0; 252 } 253 else 254 { 255 printf("\nWARNING: Entry %d already on list %s, but we tried to add it to %s\n", 256 entry_num, __cvmx_pow_list_names[entry_list[entry_num]], __cvmx_pow_list_names[entry_type]); 257 return -1; 258 } 259} 260 261 262/** 263 * Display a list and mark all elements on the list as belonging to 264 * the list. 265 * 266 * @param entry_type Type of the list to display and mark 267 * @param dump POW capture data 268 * @param entry_list Array to store marks in 269 * @param valid Set if the queue contains any elements 270 * @param has_one Set if the queue contains exactly one element 271 * @param head The head pointer 272 * @param tail The tail pointer 273 */ 274static void __cvmx_pow_display_list_and_walk(__cvmx_pow_list_types_t entry_type, 275 __cvmx_pow_dump_t *dump, uint8_t entry_list[], 276 int valid, int has_one, uint64_t head, uint64_t tail) 277{ 278 __cvmx_pow_display_list(__cvmx_pow_list_names[entry_type], 0, valid, has_one, head, tail); 279 if (valid) 280 { 281 if (has_one) 282 __cvmx_pow_entry_mark_list(head, entry_type, entry_list); 283 else 284 { 285 while (head != tail) 286 { 287 if (__cvmx_pow_entry_mark_list(head, entry_type, entry_list)) 288 break; 289 head = dump->smemload[head][0].s_smemload0.next_index; 290 } 291 __cvmx_pow_entry_mark_list(tail, entry_type, entry_list); 292 } 293 } 294} 295 296 297/** 298 * Dump a POW capture to the console in a human readable format. 299 * 300 * @param buffer POW capture from cvmx_pow_capture() 301 * @param buffer_size 302 * Size of the buffer 303 */ 304void cvmx_pow_display(void *buffer, int buffer_size) 305{ 306 __cvmx_pow_dump_t *dump = (__cvmx_pow_dump_t*)buffer; 307 int num_pow_entries = cvmx_pow_get_num_entries(); 308 int num_cores; 309 int core; 310 int index; 311 uint8_t entry_list[2048]; 312 313 if (buffer_size < (int)sizeof(__cvmx_pow_dump_t)) 314 { 315 cvmx_dprintf("cvmx_pow_dump: Buffer too small\n"); 316 return; 317 } 318 319 memset(entry_list, 0, sizeof(entry_list)); 320 num_cores = cvmx_octeon_num_cores(); 321 322 printf("POW Display Start\n"); 323 324 /* Print the free list info */ 325 __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_FREE, dump, entry_list, 326 dump->sindexload[0][0].sindexload0.free_val, 327 dump->sindexload[0][0].sindexload0.free_one, 328 dump->sindexload[0][0].sindexload0.free_head, 329 dump->sindexload[0][0].sindexload0.free_tail); 330 331 /* Print the core state */ 332 for (core=0; core<num_cores; core++) 333 { 334 const int bit_rev = 1; 335 const int bit_cur = 2; 336 const int bit_wqp = 4; 337 printf("Core %d State: tag=%s,0x%08x", core, 338 OCT_TAG_TYPE_STRING(dump->sstatus[core][bit_cur].s_sstatus2.tag_type), 339 dump->sstatus[core][bit_cur].s_sstatus2.tag); 340 if (dump->sstatus[core][bit_cur].s_sstatus2.tag_type != CVMX_POW_TAG_TYPE_NULL_NULL) 341 { 342 __cvmx_pow_entry_mark_list(dump->sstatus[core][bit_cur].s_sstatus2.index, CVMX_POW_LIST_CORE + core, entry_list); 343 printf(" grp=%d", dump->sstatus[core][bit_cur].s_sstatus2.grp); 344 printf(" wqp=0x%016llx", CAST64(dump->sstatus[core][bit_cur|bit_wqp].s_sstatus4.wqp)); 345 printf(" index=%d", dump->sstatus[core][bit_cur].s_sstatus2.index); 346 if (dump->sstatus[core][bit_cur].s_sstatus2.head) 347 printf(" head"); 348 else 349 printf(" prev=%d", dump->sstatus[core][bit_cur|bit_rev].s_sstatus3.revlink_index); 350 if (dump->sstatus[core][bit_cur].s_sstatus2.tail) 351 printf(" tail"); 352 else 353 printf(" next=%d", dump->sstatus[core][bit_cur].s_sstatus2.link_index); 354 } 355 356 if (dump->sstatus[core][0].s_sstatus0.pend_switch) 357 { 358 printf(" pend_switch=%d", dump->sstatus[core][0].s_sstatus0.pend_switch); 359 printf(" pend_switch_full=%d", dump->sstatus[core][0].s_sstatus0.pend_switch_full); 360 printf(" pend_switch_null=%d", dump->sstatus[core][0].s_sstatus0.pend_switch_null); 361 } 362 363 if (dump->sstatus[core][0].s_sstatus0.pend_desched) 364 { 365 printf(" pend_desched=%d", dump->sstatus[core][0].s_sstatus0.pend_desched); 366 printf(" pend_desched_switch=%d", dump->sstatus[core][0].s_sstatus0.pend_desched_switch); 367 printf(" pend_nosched=%d", dump->sstatus[core][0].s_sstatus0.pend_nosched); 368 if (dump->sstatus[core][0].s_sstatus0.pend_desched_switch) 369 printf(" pend_grp=%d", dump->sstatus[core][0].s_sstatus0.pend_grp); 370 } 371 372 if (dump->sstatus[core][0].s_sstatus0.pend_new_work) 373 { 374 if (dump->sstatus[core][0].s_sstatus0.pend_new_work_wait) 375 printf(" (Waiting for work)"); 376 else 377 printf(" (Getting work)"); 378 } 379 if (dump->sstatus[core][0].s_sstatus0.pend_null_rd) 380 printf(" pend_null_rd=%d", dump->sstatus[core][0].s_sstatus0.pend_null_rd); 381 if (dump->sstatus[core][0].s_sstatus0.pend_nosched_clr) 382 { 383 printf(" pend_nosched_clr=%d", dump->sstatus[core][0].s_sstatus0.pend_nosched_clr); 384 printf(" pend_index=%d", dump->sstatus[core][0].s_sstatus0.pend_index); 385 } 386 if (dump->sstatus[core][0].s_sstatus0.pend_switch || 387 (dump->sstatus[core][0].s_sstatus0.pend_desched && 388 dump->sstatus[core][0].s_sstatus0.pend_desched_switch)) 389 { 390 printf(" pending tag=%s,0x%08x", 391 OCT_TAG_TYPE_STRING(dump->sstatus[core][0].s_sstatus0.pend_type), 392 dump->sstatus[core][0].s_sstatus0.pend_tag); 393 } 394 if (dump->sstatus[core][0].s_sstatus0.pend_nosched_clr) 395 printf(" pend_wqp=0x%016llx\n", CAST64(dump->sstatus[core][bit_wqp].s_sstatus1.pend_wqp)); 396 printf("\n"); 397 } 398 399 /* Print out the state of the 16 deschedule lists. Each group has two 400 lists. One for entries marked noshed, the other for normal 401 deschedules */ 402 for (index=0; index<16; index++) 403 { 404 __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_NOSCHED + index, dump, entry_list, 405 dump->sindexload[index][2].sindexload1.nosched_val, 406 dump->sindexload[index][2].sindexload1.nosched_one, 407 dump->sindexload[index][2].sindexload1.nosched_head, 408 dump->sindexload[index][2].sindexload1.nosched_tail); 409 __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_DESCHED + index, dump, entry_list, 410 dump->sindexload[index][2].sindexload1.des_val, 411 dump->sindexload[index][2].sindexload1.des_one, 412 dump->sindexload[index][2].sindexload1.des_head, 413 dump->sindexload[index][2].sindexload1.des_tail); 414 } 415 416 /* Print out the state of the 8 internal input queues */ 417 for (index=0; index<8; index++) 418 { 419 __cvmx_pow_display_list_and_walk(CVMX_POW_LIST_INPUT + index, dump, entry_list, 420 dump->sindexload[index][0].sindexload0.loc_val, 421 dump->sindexload[index][0].sindexload0.loc_one, 422 dump->sindexload[index][0].sindexload0.loc_head, 423 dump->sindexload[index][0].sindexload0.loc_tail); 424 } 425 426 /* Print out the state of the 16 memory queues */ 427 for (index=0; index<8; index++) 428 { 429 const char *name; 430 if (dump->sindexload[index][1].sindexload2.rmt_is_head) 431 name = "Queue %da Memory (is head)"; 432 else 433 name = "Queue %da Memory"; 434 __cvmx_pow_display_list(name, index, 435 dump->sindexload[index][1].sindexload2.rmt_val, 436 dump->sindexload[index][1].sindexload2.rmt_one, 437 dump->sindexload[index][1].sindexload2.rmt_head, 438 dump->sindexload[index][3].sindexload3.rmt_tail); 439 if (dump->sindexload[index+8][1].sindexload2.rmt_is_head) 440 name = "Queue %db Memory (is head)"; 441 else 442 name = "Queue %db Memory"; 443 __cvmx_pow_display_list(name, index, 444 dump->sindexload[index+8][1].sindexload2.rmt_val, 445 dump->sindexload[index+8][1].sindexload2.rmt_one, 446 dump->sindexload[index+8][1].sindexload2.rmt_head, 447 dump->sindexload[index+8][3].sindexload3.rmt_tail); 448 } 449 450 /* Print out each of the internal POW entries. Each entry has a tag, group, 451 wqe, and possibly a next pointer. The next pointer is only valid if this 452 entry isn't make as a tail */ 453 for (index=0; index<num_pow_entries; index++) 454 { 455 printf("Entry %d(%-10s): tag=%s,0x%08x grp=%d wqp=0x%016llx", index, 456 __cvmx_pow_list_names[entry_list[index]], 457 OCT_TAG_TYPE_STRING(dump->smemload[index][0].s_smemload0.tag_type), 458 dump->smemload[index][0].s_smemload0.tag, 459 dump->smemload[index][0].s_smemload0.grp, 460 CAST64(dump->smemload[index][2].s_smemload1.wqp)); 461 if (dump->smemload[index][0].s_smemload0.tail) 462 printf(" tail"); 463 else 464 printf(" next=%d", dump->smemload[index][0].s_smemload0.next_index); 465 if (entry_list[index] >= CVMX_POW_LIST_DESCHED) 466 { 467 printf(" prev=%d", dump->smemload[index][1].s_smemload2.fwd_index); 468 printf(" nosched=%d", dump->smemload[index][1].s_smemload2.nosched); 469 if (dump->smemload[index][1].s_smemload2.pend_switch) 470 { 471 printf(" pending tag=%s,0x%08x", 472 OCT_TAG_TYPE_STRING(dump->smemload[index][1].s_smemload2.pend_type), 473 dump->smemload[index][1].s_smemload2.pend_tag); 474 } 475 } 476 printf("\n"); 477 } 478 479 printf("POW Display End\n"); 480} 481 482