1/*- 2 * Copyright (c) 2002-2007 Neterion, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include <dev/nxge/include/xgehal-device.h> 30#include <dev/nxge/include/xgehal-channel.h> 31#include <dev/nxge/include/xgehal-fifo.h> 32#include <dev/nxge/include/xgehal-ring.h> 33#include <dev/nxge/include/xgehal-driver.h> 34#include <dev/nxge/include/xgehal-mgmt.h> 35 36#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL 37#define END_SIGN 0x0 38 39#ifdef XGE_HAL_HERC_EMULATION 40#undef XGE_HAL_PROCESS_LINK_INT_IN_ISR 41#endif 42 43/* 44 * Jenkins hash key length(in bytes) 45 */ 46#define XGE_HAL_JHASH_MSG_LEN 50 47 48/* 49 * mix(a,b,c) used in Jenkins hash algorithm 50 */ 51#define mix(a,b,c) { \ 52 a -= b; a -= c; a ^= (c>>13); \ 53 b -= c; b -= a; b ^= (a<<8); \ 54 c -= a; c -= b; c ^= (b>>13); \ 55 a -= b; a -= c; a ^= (c>>12); \ 56 b -= c; b -= a; b ^= (a<<16); \ 57 c -= a; c -= b; c ^= (b>>5); \ 58 a -= b; a -= c; a ^= (c>>3); \ 59 b -= c; b -= a; b ^= (a<<10); \ 60 c -= a; c -= b; c ^= (b>>15); \ 61} 62 63 64/* 65 * __hal_device_event_queued 66 * @data: pointer to xge_hal_device_t structure 67 * 68 * Will be called when new event succesfully queued. 69 */ 70void 71__hal_device_event_queued(void *data, int event_type) 72{ 73 xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC); 74 if (g_xge_hal_driver->uld_callbacks.event_queued) { 75 g_xge_hal_driver->uld_callbacks.event_queued(data, event_type); 76 } 77} 78 79/* 80 * __hal_pio_mem_write32_upper 81 * 82 * Endiann-aware implementation of xge_os_pio_mem_write32(). 83 * Since Xframe has 64bit registers, we differintiate uppper and lower 84 * parts. 85 */ 86void 87__hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr) 88{ 89#if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) 90 xge_os_pio_mem_write32(pdev, regh, val, addr); 91#else 92 xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4)); 93#endif 94} 95 96/* 97 * __hal_pio_mem_write32_upper 98 * 99 * Endiann-aware implementation of xge_os_pio_mem_write32(). 100 * Since Xframe has 64bit registers, we differintiate uppper and lower 101 * parts. 102 */ 103void 104__hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val, 105 void *addr) 106{ 107#if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) 108 xge_os_pio_mem_write32(pdev, regh, val, 109 (void *) ((char *)addr + 4)); 110#else 111 xge_os_pio_mem_write32(pdev, regh, val, addr); 112#endif 113} 114 115/* 116 * __hal_device_register_poll 117 * @hldev: pointer to xge_hal_device_t structure 118 * @reg: register to poll for 119 * @op: 0 - bit reset, 1 - bit set 120 * @mask: mask for logical "and" condition based on %op 121 * @max_millis: maximum time to try to poll in milliseconds 122 * 123 * Will poll certain register for specified amount of time. 124 * Will poll until masked bit is not cleared. 125 */ 126xge_hal_status_e 127__hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, 128 int op, u64 mask, int max_millis) 129{ 130 u64 val64; 131 int i = 0; 132 xge_hal_status_e ret = XGE_HAL_FAIL; 133 134 xge_os_udelay(10); 135 136 do { 137 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); 138 if (op == 0 && !(val64 & mask)) 139 return XGE_HAL_OK; 140 else if (op == 1 && (val64 & mask) == mask) 141 return XGE_HAL_OK; 142 xge_os_udelay(100); 143 } while (++i <= 9); 144 145 do { 146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); 147 if (op == 0 && !(val64 & mask)) 148 return XGE_HAL_OK; 149 else if (op == 1 && (val64 & mask) == mask) 150 return XGE_HAL_OK; 151 xge_os_udelay(1000); 152 } while (++i < max_millis); 153 154 return ret; 155} 156 157/* 158 * __hal_device_wait_quiescent 159 * @hldev: the device 160 * @hw_status: hw_status in case of error 161 * 162 * Will wait until device is quiescent for some blocks. 163 */ 164static xge_hal_status_e 165__hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status) 166{ 167 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 168 169 /* poll and wait first */ 170#ifdef XGE_HAL_HERC_EMULATION 171 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, 172 (XGE_HAL_ADAPTER_STATUS_TDMA_READY | 173 XGE_HAL_ADAPTER_STATUS_RDMA_READY | 174 XGE_HAL_ADAPTER_STATUS_PFC_READY | 175 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | 176 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | 177 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | 178 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | 179 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK), 180 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); 181#else 182 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, 183 (XGE_HAL_ADAPTER_STATUS_TDMA_READY | 184 XGE_HAL_ADAPTER_STATUS_RDMA_READY | 185 XGE_HAL_ADAPTER_STATUS_PFC_READY | 186 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | 187 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | 188 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | 189 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | 190 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK | 191 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK), 192 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); 193#endif 194 195 return xge_hal_device_status(hldev, hw_status); 196} 197 198/** 199 * xge_hal_device_is_slot_freeze 200 * @devh: the device 201 * 202 * Returns non-zero if the slot is freezed. 203 * The determination is made based on the adapter_status 204 * register which will never give all FFs, unless PCI read 205 * cannot go through. 206 */ 207int 208xge_hal_device_is_slot_freeze(xge_hal_device_h devh) 209{ 210 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 211 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 212 u16 device_id; 213 u64 adapter_status = 214 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 215 &bar0->adapter_status); 216 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 217 xge_offsetof(xge_hal_pci_config_le_t, device_id), 218 &device_id); 219#ifdef TX_DEBUG 220 if (adapter_status == XGE_HAL_ALL_FOXES) 221 { 222 u64 dummy; 223 dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 224 &bar0->pcc_enable); 225 printf(">>> Slot is frozen!\n"); 226 brkpoint(0); 227 } 228#endif 229 return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff)); 230} 231 232 233/* 234 * __hal_device_led_actifity_fix 235 * @hldev: pointer to xge_hal_device_t structure 236 * 237 * SXE-002: Configure link and activity LED to turn it off 238 */ 239static void 240__hal_device_led_actifity_fix(xge_hal_device_t *hldev) 241{ 242 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 243 u16 subid; 244 u64 val64; 245 246 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 247 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid); 248 249 /* 250 * In the case of Herc, there is a new register named beacon control 251 * is added which was not present in Xena. 252 * Beacon control register in Herc is at the same offset as 253 * gpio control register in Xena. It means they are one and same in 254 * the case of Xena. Also, gpio control register offset in Herc and 255 * Xena is different. 256 * The current register map represents Herc(It means we have 257 * both beacon and gpio control registers in register map). 258 * WRT transition from Xena to Herc, all the code in Xena which was 259 * using gpio control register for LED handling would have to 260 * use beacon control register in Herc and the rest of the code 261 * which uses gpio control in Xena would use the same register 262 * in Herc. 263 * WRT LED handling(following code), In the case of Herc, beacon 264 * control register has to be used. This is applicable for Xena also, 265 * since it represents the gpio control register in Xena. 266 */ 267 if ((subid & 0xFF) >= 0x07) { 268 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 269 &bar0->beacon_control); 270 val64 |= 0x0000800000000000ULL; 271 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 272 val64, &bar0->beacon_control); 273 val64 = 0x0411040400000000ULL; 274 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 275 (void *) ((u8 *)bar0 + 0x2700)); 276 } 277} 278 279/* Constants for Fixing the MacAddress problem seen mostly on 280 * Alpha machines. 281 */ 282static u64 xena_fix_mac[] = { 283 0x0060000000000000ULL, 0x0060600000000000ULL, 284 0x0040600000000000ULL, 0x0000600000000000ULL, 285 0x0020600000000000ULL, 0x0060600000000000ULL, 286 0x0020600000000000ULL, 0x0060600000000000ULL, 287 0x0020600000000000ULL, 0x0060600000000000ULL, 288 0x0020600000000000ULL, 0x0060600000000000ULL, 289 0x0020600000000000ULL, 0x0060600000000000ULL, 290 0x0020600000000000ULL, 0x0060600000000000ULL, 291 0x0020600000000000ULL, 0x0060600000000000ULL, 292 0x0020600000000000ULL, 0x0060600000000000ULL, 293 0x0020600000000000ULL, 0x0060600000000000ULL, 294 0x0020600000000000ULL, 0x0060600000000000ULL, 295 0x0020600000000000ULL, 0x0000600000000000ULL, 296 0x0040600000000000ULL, 0x0060600000000000ULL, 297 END_SIGN 298}; 299 300/* 301 * __hal_device_fix_mac 302 * @hldev: HAL device handle. 303 * 304 * Fix for all "FFs" MAC address problems observed on Alpha platforms. 305 */ 306static void 307__hal_device_xena_fix_mac(xge_hal_device_t *hldev) 308{ 309 int i = 0; 310 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 311 312 /* 313 * In the case of Herc, there is a new register named beacon control 314 * is added which was not present in Xena. 315 * Beacon control register in Herc is at the same offset as 316 * gpio control register in Xena. It means they are one and same in 317 * the case of Xena. Also, gpio control register offset in Herc and 318 * Xena is different. 319 * The current register map represents Herc(It means we have 320 * both beacon and gpio control registers in register map). 321 * WRT transition from Xena to Herc, all the code in Xena which was 322 * using gpio control register for LED handling would have to 323 * use beacon control register in Herc and the rest of the code 324 * which uses gpio control in Xena would use the same register 325 * in Herc. 326 * In the following code(xena_fix_mac), beacon control register has 327 * to be used in the case of Xena, since it represents gpio control 328 * register. In the case of Herc, there is no change required. 329 */ 330 while (xena_fix_mac[i] != END_SIGN) { 331 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 332 xena_fix_mac[i++], &bar0->beacon_control); 333 xge_os_mdelay(1); 334 } 335} 336 337/* 338 * xge_hal_device_bcast_enable 339 * @hldev: HAL device handle. 340 * 341 * Enable receiving broadcasts. 342 * The host must first write RMAC_CFG_KEY "key" 343 * register, and then - MAC_CFG register. 344 */ 345void 346xge_hal_device_bcast_enable(xge_hal_device_h devh) 347{ 348 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 349 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 350 u64 val64; 351 352 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 353 &bar0->mac_cfg); 354 val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE; 355 356 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 357 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 358 359 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 360 (u32)(val64 >> 32), &bar0->mac_cfg); 361 362 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", 363 (unsigned long long)val64, 364 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); 365} 366 367/* 368 * xge_hal_device_bcast_disable 369 * @hldev: HAL device handle. 370 * 371 * Disable receiving broadcasts. 372 * The host must first write RMAC_CFG_KEY "key" 373 * register, and then - MAC_CFG register. 374 */ 375void 376xge_hal_device_bcast_disable(xge_hal_device_h devh) 377{ 378 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 379 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 380 u64 val64; 381 382 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 383 &bar0->mac_cfg); 384 385 val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE); 386 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 387 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 388 389 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 390 (u32)(val64 >> 32), &bar0->mac_cfg); 391 392 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", 393 (unsigned long long)val64, 394 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); 395} 396 397/* 398 * __hal_device_shared_splits_configure 399 * @hldev: HAL device handle. 400 * 401 * TxDMA will stop Read request if the number of read split had exceeded 402 * the limit set by shared_splits 403 */ 404static void 405__hal_device_shared_splits_configure(xge_hal_device_t *hldev) 406{ 407 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 408 u64 val64; 409 410 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 411 &bar0->pic_control); 412 val64 |= 413 XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits); 414 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 415 &bar0->pic_control); 416 xge_debug_device(XGE_TRACE, "%s", "shared splits configured"); 417} 418 419/* 420 * __hal_device_rmac_padding_configure 421 * @hldev: HAL device handle. 422 * 423 * Configure RMAC frame padding. Depends on configuration, it 424 * can be send to host or removed by MAC. 425 */ 426static void 427__hal_device_rmac_padding_configure(xge_hal_device_t *hldev) 428{ 429 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 430 u64 val64; 431 432 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 433 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 434 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 435 &bar0->mac_cfg); 436 val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE ); 437 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE ); 438 val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD; 439 440 /* 441 * If the RTH enable bit is not set, strip the FCS 442 */ 443 if (!hldev->config.rth_en || 444 !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 445 &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) { 446 val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS; 447 } 448 449 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD ); 450 val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM; 451 452 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 453 (u32)(val64 >> 32), (char*)&bar0->mac_cfg); 454 xge_os_mdelay(1); 455 456 xge_debug_device(XGE_TRACE, 457 "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured", 458 (unsigned long long)val64); 459} 460 461/* 462 * __hal_device_pause_frames_configure 463 * @hldev: HAL device handle. 464 * 465 * Set Pause threshold. 466 * 467 * Pause frame is generated if the amount of data outstanding 468 * on any queue exceeded the ratio of 469 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 470 */ 471static void 472__hal_device_pause_frames_configure(xge_hal_device_t *hldev) 473{ 474 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 475 int i; 476 u64 val64; 477 478 switch (hldev->config.mac.media) { 479 case XGE_HAL_MEDIA_SR: 480 case XGE_HAL_MEDIA_SW: 481 val64=0xfffbfffbfffbfffbULL; 482 break; 483 case XGE_HAL_MEDIA_LR: 484 case XGE_HAL_MEDIA_LW: 485 val64=0xffbbffbbffbbffbbULL; 486 break; 487 case XGE_HAL_MEDIA_ER: 488 case XGE_HAL_MEDIA_EW: 489 default: 490 val64=0xffbbffbbffbbffbbULL; 491 break; 492 } 493 494 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 495 val64, &bar0->mc_pause_thresh_q0q3); 496 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 497 val64, &bar0->mc_pause_thresh_q4q7); 498 499 /* Set the time value to be inserted in the pause frame generated 500 * by Xframe */ 501 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 502 &bar0->rmac_pause_cfg); 503 if (hldev->config.mac.rmac_pause_gen_en) 504 val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; 505 else 506 val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN); 507 if (hldev->config.mac.rmac_pause_rcv_en) 508 val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; 509 else 510 val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN); 511 val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff)); 512 val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time); 513 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 514 &bar0->rmac_pause_cfg); 515 516 val64 = 0; 517 for (i = 0; i<4; i++) { 518 val64 |= 519 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3) 520 <<(i*2*8)); 521 } 522 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 523 &bar0->mc_pause_thresh_q0q3); 524 525 val64 = 0; 526 for (i = 0; i<4; i++) { 527 val64 |= 528 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7) 529 <<(i*2*8)); 530 } 531 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 532 &bar0->mc_pause_thresh_q4q7); 533 xge_debug_device(XGE_TRACE, "%s", "pause frames configured"); 534} 535 536/* 537 * Herc's clock rate doubled, unless the slot is 33MHz. 538 */ 539unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev, 540 unsigned int time_ival) 541{ 542 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 543 return time_ival; 544 545 xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC); 546 547 if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN && 548 hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ) 549 time_ival *= 2; 550 551 return time_ival; 552} 553 554 555/* 556 * __hal_device_bus_master_disable 557 * @hldev: HAL device handle. 558 * 559 * Disable bus mastership. 560 */ 561static void 562__hal_device_bus_master_disable (xge_hal_device_t *hldev) 563{ 564 u16 cmd; 565 u16 bus_master = 4; 566 567 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 568 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 569 cmd &= ~bus_master; 570 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 571 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 572} 573 574/* 575 * __hal_device_bus_master_enable 576 * @hldev: HAL device handle. 577 * 578 * Disable bus mastership. 579 */ 580static void 581__hal_device_bus_master_enable (xge_hal_device_t *hldev) 582{ 583 u16 cmd; 584 u16 bus_master = 4; 585 586 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 587 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 588 589 /* already enabled? do nothing */ 590 if (cmd & bus_master) 591 return; 592 593 cmd |= bus_master; 594 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 595 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 596} 597/* 598 * __hal_device_intr_mgmt 599 * @hldev: HAL device handle. 600 * @mask: mask indicating which Intr block must be modified. 601 * @flag: if true - enable, otherwise - disable interrupts. 602 * 603 * Disable or enable device interrupts. Mask is used to specify 604 * which hardware blocks should produce interrupts. For details 605 * please refer to Xframe User Guide. 606 */ 607static void 608__hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag) 609{ 610 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 611 u64 val64 = 0, temp64 = 0; 612 u64 gim, gim_saved; 613 614 gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev, 615 hldev->regh0, &bar0->general_int_mask); 616 617 /* Top level interrupt classification */ 618 /* PIC Interrupts */ 619 if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) { 620 /* Enable PIC Intrs in the general intr mask register */ 621 val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/; 622 if (flag) { 623 gim &= ~((u64) val64); 624 temp64 = xge_os_pio_mem_read64(hldev->pdev, 625 hldev->regh0, &bar0->pic_int_mask); 626 627 temp64 &= ~XGE_HAL_PIC_INT_TX; 628#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 629 if (xge_hal_device_check_id(hldev) == 630 XGE_HAL_CARD_HERC) { 631 temp64 &= ~XGE_HAL_PIC_INT_MISC; 632 } 633#endif 634 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 635 temp64, &bar0->pic_int_mask); 636#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 637 if (xge_hal_device_check_id(hldev) == 638 XGE_HAL_CARD_HERC) { 639 /* 640 * Unmask only Link Up interrupt 641 */ 642 temp64 = xge_os_pio_mem_read64(hldev->pdev, 643 hldev->regh0, &bar0->misc_int_mask); 644 temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 645 xge_os_pio_mem_write64(hldev->pdev, 646 hldev->regh0, temp64, 647 &bar0->misc_int_mask); 648 xge_debug_device(XGE_TRACE, 649 "unmask link up flag "XGE_OS_LLXFMT, 650 (unsigned long long)temp64); 651 } 652#endif 653 } else { /* flag == 0 */ 654 655#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 656 if (xge_hal_device_check_id(hldev) == 657 XGE_HAL_CARD_HERC) { 658 /* 659 * Mask both Link Up and Down interrupts 660 */ 661 temp64 = xge_os_pio_mem_read64(hldev->pdev, 662 hldev->regh0, &bar0->misc_int_mask); 663 temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 664 temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 665 xge_os_pio_mem_write64(hldev->pdev, 666 hldev->regh0, temp64, 667 &bar0->misc_int_mask); 668 xge_debug_device(XGE_TRACE, 669 "mask link up/down flag "XGE_OS_LLXFMT, 670 (unsigned long long)temp64); 671 } 672#endif 673 /* Disable PIC Intrs in the general intr mask 674 * register */ 675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 676 XGE_HAL_ALL_INTRS_DIS, 677 &bar0->pic_int_mask); 678 gim |= val64; 679 } 680 } 681 682 /* DMA Interrupts */ 683 /* Enabling/Disabling Tx DMA interrupts */ 684 if (mask & XGE_HAL_TX_DMA_INTR) { 685 /* Enable TxDMA Intrs in the general intr mask register */ 686 val64 = XGE_HAL_TXDMA_INT_M; 687 if (flag) { 688 gim &= ~((u64) val64); 689 /* Enable all TxDMA interrupts */ 690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 691 0x0, &bar0->txdma_int_mask); 692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 693 0x0, &bar0->pfc_err_mask); 694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 695 0x0, &bar0->tda_err_mask); 696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 697 0x0, &bar0->pcc_err_mask); 698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 699 0x0, &bar0->tti_err_mask); 700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 701 0x0, &bar0->lso_err_mask); 702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 703 0x0, &bar0->tpa_err_mask); 704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 705 0x0, &bar0->sm_err_mask); 706 707 } else { /* flag == 0 */ 708 709 /* Disable TxDMA Intrs in the general intr mask 710 * register */ 711 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 712 XGE_HAL_ALL_INTRS_DIS, 713 &bar0->txdma_int_mask); 714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 715 XGE_HAL_ALL_INTRS_DIS, 716 &bar0->pfc_err_mask); 717 718 gim |= val64; 719 } 720 } 721 722 /* Enabling/Disabling Rx DMA interrupts */ 723 if (mask & XGE_HAL_RX_DMA_INTR) { 724 /* Enable RxDMA Intrs in the general intr mask register */ 725 val64 = XGE_HAL_RXDMA_INT_M; 726 if (flag) { 727 728 gim &= ~((u64) val64); 729 /* All RxDMA block interrupts are disabled for now 730 * TODO */ 731 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 732 XGE_HAL_ALL_INTRS_DIS, 733 &bar0->rxdma_int_mask); 734 735 } else { /* flag == 0 */ 736 737 /* Disable RxDMA Intrs in the general intr mask 738 * register */ 739 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 740 XGE_HAL_ALL_INTRS_DIS, 741 &bar0->rxdma_int_mask); 742 743 gim |= val64; 744 } 745 } 746 747 /* MAC Interrupts */ 748 /* Enabling/Disabling MAC interrupts */ 749 if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) { 750 val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M; 751 if (flag) { 752 753 gim &= ~((u64) val64); 754 755 /* All MAC block error inter. are disabled for now. */ 756 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 757 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); 758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 759 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); 760 761 } else { /* flag == 0 */ 762 763 /* Disable MAC Intrs in the general intr mask 764 * register */ 765 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 766 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); 767 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 768 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); 769 770 gim |= val64; 771 } 772 } 773 774 /* XGXS Interrupts */ 775 if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) { 776 val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M; 777 if (flag) { 778 779 gim &= ~((u64) val64); 780 /* All XGXS block error interrupts are disabled for now 781 * TODO */ 782 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 783 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); 784 785 } else { /* flag == 0 */ 786 787 /* Disable MC Intrs in the general intr mask register */ 788 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 789 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); 790 791 gim |= val64; 792 } 793 } 794 795 /* Memory Controller(MC) interrupts */ 796 if (mask & XGE_HAL_MC_INTR) { 797 val64 = XGE_HAL_MC_INT_M; 798 if (flag) { 799 800 gim &= ~((u64) val64); 801 802 /* Enable all MC blocks error interrupts */ 803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 804 0x0ULL, &bar0->mc_int_mask); 805 806 } else { /* flag == 0 */ 807 808 /* Disable MC Intrs in the general intr mask 809 * register */ 810 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 811 XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask); 812 813 gim |= val64; 814 } 815 } 816 817 818 /* Tx traffic interrupts */ 819 if (mask & XGE_HAL_TX_TRAFFIC_INTR) { 820 val64 = XGE_HAL_TXTRAFFIC_INT_M; 821 if (flag) { 822 823 gim &= ~((u64) val64); 824 825 /* Enable all the Tx side interrupts */ 826 /* '0' Enables all 64 TX interrupt levels. */ 827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, 828 &bar0->tx_traffic_mask); 829 830 } else { /* flag == 0 */ 831 832 /* Disable Tx Traffic Intrs in the general intr mask 833 * register. */ 834 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 835 XGE_HAL_ALL_INTRS_DIS, 836 &bar0->tx_traffic_mask); 837 gim |= val64; 838 } 839 } 840 841 /* Rx traffic interrupts */ 842 if (mask & XGE_HAL_RX_TRAFFIC_INTR) { 843 val64 = XGE_HAL_RXTRAFFIC_INT_M; 844 if (flag) { 845 gim &= ~((u64) val64); 846 /* '0' Enables all 8 RX interrupt levels. */ 847 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, 848 &bar0->rx_traffic_mask); 849 850 } else { /* flag == 0 */ 851 852 /* Disable Rx Traffic Intrs in the general intr mask 853 * register. 854 */ 855 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 856 XGE_HAL_ALL_INTRS_DIS, 857 &bar0->rx_traffic_mask); 858 859 gim |= val64; 860 } 861 } 862 863 /* Sched Timer interrupt */ 864 if (mask & XGE_HAL_SCHED_INTR) { 865 if (flag) { 866 temp64 = xge_os_pio_mem_read64(hldev->pdev, 867 hldev->regh0, &bar0->txpic_int_mask); 868 temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR; 869 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 870 temp64, &bar0->txpic_int_mask); 871 872 xge_hal_device_sched_timer(hldev, 873 hldev->config.sched_timer_us, 874 hldev->config.sched_timer_one_shot); 875 } else { 876 temp64 = xge_os_pio_mem_read64(hldev->pdev, 877 hldev->regh0, &bar0->txpic_int_mask); 878 temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR; 879 880 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 881 temp64, &bar0->txpic_int_mask); 882 883 xge_hal_device_sched_timer(hldev, 884 XGE_HAL_SCHED_TIMER_DISABLED, 885 XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE); 886 } 887 } 888 889 if (gim != gim_saved) { 890 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim, 891 &bar0->general_int_mask); 892 xge_debug_device(XGE_TRACE, "general_int_mask updated " 893 XGE_OS_LLXFMT" => "XGE_OS_LLXFMT, 894 (unsigned long long)gim_saved, (unsigned long long)gim); 895 } 896} 897 898/* 899 * __hal_device_bimodal_configure 900 * @hldev: HAL device handle. 901 * 902 * Bimodal parameters initialization. 903 */ 904static void 905__hal_device_bimodal_configure(xge_hal_device_t *hldev) 906{ 907 int i; 908 909 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 910 xge_hal_tti_config_t *tti; 911 xge_hal_rti_config_t *rti; 912 913 if (!hldev->config.ring.queue[i].configured) 914 continue; 915 rti = &hldev->config.ring.queue[i].rti; 916 tti = &hldev->bimodal_tti[i]; 917 918 tti->enabled = 1; 919 tti->urange_a = hldev->bimodal_urange_a_en * 10; 920 tti->urange_b = 20; 921 tti->urange_c = 30; 922 tti->ufc_a = hldev->bimodal_urange_a_en * 8; 923 tti->ufc_b = 16; 924 tti->ufc_c = 32; 925 tti->ufc_d = 64; 926 tti->timer_val_us = hldev->bimodal_timer_val_us; 927 tti->timer_ac_en = 1; 928 tti->timer_ci_en = 0; 929 930 rti->urange_a = 10; 931 rti->urange_b = 20; 932 rti->urange_c = 30; 933 rti->ufc_a = 1; /* <= for netpipe type of tests */ 934 rti->ufc_b = 4; 935 rti->ufc_c = 4; 936 rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */ 937 rti->timer_ac_en = 1; 938 rti->timer_val_us = 5; /* for optimal bus efficiency usage */ 939 } 940} 941 942/* 943 * __hal_device_tti_apply 944 * @hldev: HAL device handle. 945 * 946 * apply TTI configuration. 947 */ 948static xge_hal_status_e 949__hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti, 950 int num, int runtime) 951{ 952 u64 val64, data1 = 0, data2 = 0; 953 xge_hal_pci_bar0_t *bar0; 954 955 if (runtime) 956 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 957 else 958 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 959 960 if (tti->timer_val_us) { 961 unsigned int tx_interval; 962 963 if (hldev->config.pci_freq_mherz) { 964 tx_interval = hldev->config.pci_freq_mherz * 965 tti->timer_val_us / 64; 966 tx_interval = 967 __hal_fix_time_ival_herc(hldev, 968 tx_interval); 969 } else { 970 tx_interval = tti->timer_val_us; 971 } 972 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval); 973 if (tti->timer_ac_en) { 974 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN; 975 } 976 if (tti->timer_ci_en) { 977 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN; 978 } 979 980 if (!runtime) { 981 xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s", 982 num, tx_interval, tti->timer_ci_en ? 983 "enabled": "disabled"); 984 } 985 } 986 987 if (tti->urange_a || 988 tti->urange_b || 989 tti->urange_c || 990 tti->ufc_a || 991 tti->ufc_b || 992 tti->ufc_c || 993 tti->ufc_d ) { 994 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) | 995 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) | 996 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c); 997 998 data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) | 999 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) | 1000 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) | 1001 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d); 1002 } 1003 1004 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, 1005 &bar0->tti_data1_mem); 1006 (void)xge_os_pio_mem_read64(hldev->pdev, 1007 hldev->regh0, &bar0->tti_data1_mem); 1008 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, 1009 &bar0->tti_data2_mem); 1010 (void)xge_os_pio_mem_read64(hldev->pdev, 1011 hldev->regh0, &bar0->tti_data2_mem); 1012 xge_os_wmb(); 1013 1014 val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD | 1015 XGE_HAL_TTI_CMD_MEM_OFFSET(num); 1016 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1017 &bar0->tti_command_mem); 1018 1019 if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem, 1020 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD, 1021 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1022 /* upper layer may require to repeat */ 1023 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1024 } 1025 1026 if (!runtime) { 1027 xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x" 1028 XGE_OS_LLXFMT, num, 1029 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, 1030 hldev->regh0, &bar0->tti_data1_mem)); 1031 } 1032 1033 return XGE_HAL_OK; 1034} 1035 1036/* 1037 * __hal_device_tti_configure 1038 * @hldev: HAL device handle. 1039 * 1040 * TTI Initialization. 1041 * Initialize Transmit Traffic Interrupt Scheme. 1042 */ 1043static xge_hal_status_e 1044__hal_device_tti_configure(xge_hal_device_t *hldev, int runtime) 1045{ 1046 int i; 1047 1048 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { 1049 int j; 1050 1051 if (!hldev->config.fifo.queue[i].configured) 1052 continue; 1053 1054 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { 1055 xge_hal_status_e status; 1056 1057 if (!hldev->config.fifo.queue[i].tti[j].enabled) 1058 continue; 1059 1060 /* at least some TTI enabled. Record it. */ 1061 hldev->tti_enabled = 1; 1062 1063 status = __hal_device_tti_apply(hldev, 1064 &hldev->config.fifo.queue[i].tti[j], 1065 i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime); 1066 if (status != XGE_HAL_OK) 1067 return status; 1068 } 1069 } 1070 1071 /* processing bimodal TTIs */ 1072 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 1073 xge_hal_status_e status; 1074 1075 if (!hldev->bimodal_tti[i].enabled) 1076 continue; 1077 1078 /* at least some bimodal TTI enabled. Record it. */ 1079 hldev->tti_enabled = 1; 1080 1081 status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i], 1082 XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime); 1083 if (status != XGE_HAL_OK) 1084 return status; 1085 1086 } 1087 1088 return XGE_HAL_OK; 1089} 1090 1091/* 1092 * __hal_device_rti_configure 1093 * @hldev: HAL device handle. 1094 * 1095 * RTI Initialization. 1096 * Initialize Receive Traffic Interrupt Scheme. 1097 */ 1098xge_hal_status_e 1099__hal_device_rti_configure(xge_hal_device_t *hldev, int runtime) 1100{ 1101 xge_hal_pci_bar0_t *bar0; 1102 u64 val64, data1 = 0, data2 = 0; 1103 int i; 1104 1105 if (runtime) { 1106 /* 1107 * we don't want to re-configure RTI in case when 1108 * bimodal interrupts are in use. Instead reconfigure TTI 1109 * with new RTI values. 1110 */ 1111 if (hldev->config.bimodal_interrupts) { 1112 __hal_device_bimodal_configure(hldev); 1113 return __hal_device_tti_configure(hldev, 1); 1114 } 1115 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 1116 } else 1117 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1118 1119 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 1120 xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti; 1121 1122 if (!hldev->config.ring.queue[i].configured) 1123 continue; 1124 1125 if (rti->timer_val_us) { 1126 unsigned int rx_interval; 1127 1128 if (hldev->config.pci_freq_mherz) { 1129 rx_interval = hldev->config.pci_freq_mherz * 1130 rti->timer_val_us / 8; 1131 rx_interval = 1132 __hal_fix_time_ival_herc(hldev, 1133 rx_interval); 1134 } else { 1135 rx_interval = rti->timer_val_us; 1136 } 1137 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval); 1138 if (rti->timer_ac_en) { 1139 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN; 1140 } 1141 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN; 1142 } 1143 1144 if (rti->urange_a || 1145 rti->urange_b || 1146 rti->urange_c || 1147 rti->ufc_a || 1148 rti->ufc_b || 1149 rti->ufc_c || 1150 rti->ufc_d) { 1151 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) | 1152 XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) | 1153 XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c); 1154 1155 data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) | 1156 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) | 1157 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) | 1158 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d); 1159 } 1160 1161 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, 1162 &bar0->rti_data1_mem); 1163 (void)xge_os_pio_mem_read64(hldev->pdev, 1164 hldev->regh0, &bar0->rti_data1_mem); 1165 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, 1166 &bar0->rti_data2_mem); 1167 (void)xge_os_pio_mem_read64(hldev->pdev, 1168 hldev->regh0, &bar0->rti_data2_mem); 1169 xge_os_wmb(); 1170 1171 val64 = XGE_HAL_RTI_CMD_MEM_WE | 1172 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD; 1173 val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i); 1174 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1175 &bar0->rti_command_mem); 1176 1177 if (!runtime && __hal_device_register_poll(hldev, 1178 &bar0->rti_command_mem, 0, 1179 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD, 1180 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1181 /* upper layer may require to repeat */ 1182 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1183 } 1184 1185 if (!runtime) { 1186 xge_debug_device(XGE_TRACE, 1187 "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT, 1188 i, 1189 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, 1190 hldev->regh0, &bar0->rti_data1_mem)); 1191 } 1192 } 1193 1194 return XGE_HAL_OK; 1195} 1196 1197 1198/* Constants to be programmed into the Xena's registers to configure 1199 * the XAUI. */ 1200static u64 default_xena_mdio_cfg[] = { 1201 /* Reset PMA PLL */ 1202 0xC001010000000000ULL, 0xC0010100000000E0ULL, 1203 0xC0010100008000E4ULL, 1204 /* Remove Reset from PMA PLL */ 1205 0xC001010000000000ULL, 0xC0010100000000E0ULL, 1206 0xC0010100000000E4ULL, 1207 END_SIGN 1208}; 1209 1210static u64 default_herc_mdio_cfg[] = { 1211 END_SIGN 1212}; 1213 1214static u64 default_xena_dtx_cfg[] = { 1215 0x8000051500000000ULL, 0x80000515000000E0ULL, 1216 0x80000515D93500E4ULL, 0x8001051500000000ULL, 1217 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 1218 0x8002051500000000ULL, 0x80020515000000E0ULL, 1219 0x80020515F21000E4ULL, 1220 /* Set PADLOOPBACKN */ 1221 0x8002051500000000ULL, 0x80020515000000E0ULL, 1222 0x80020515B20000E4ULL, 0x8003051500000000ULL, 1223 0x80030515000000E0ULL, 0x80030515B20000E4ULL, 1224 0x8004051500000000ULL, 0x80040515000000E0ULL, 1225 0x80040515B20000E4ULL, 0x8005051500000000ULL, 1226 0x80050515000000E0ULL, 0x80050515B20000E4ULL, 1227 SWITCH_SIGN, 1228 /* Remove PADLOOPBACKN */ 1229 0x8002051500000000ULL, 0x80020515000000E0ULL, 1230 0x80020515F20000E4ULL, 0x8003051500000000ULL, 1231 0x80030515000000E0ULL, 0x80030515F20000E4ULL, 1232 0x8004051500000000ULL, 0x80040515000000E0ULL, 1233 0x80040515F20000E4ULL, 0x8005051500000000ULL, 1234 0x80050515000000E0ULL, 0x80050515F20000E4ULL, 1235 END_SIGN 1236}; 1237 1238/* 1239static u64 default_herc_dtx_cfg[] = { 1240 0x80000515BA750000ULL, 0x80000515BA7500E0ULL, 1241 0x80000515BA750004ULL, 0x80000515BA7500E4ULL, 1242 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 1243 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 1244 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 1245 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 1246 END_SIGN 1247}; 1248*/ 1249 1250static u64 default_herc_dtx_cfg[] = { 1251 0x8000051536750000ULL, 0x80000515367500E0ULL, 1252 0x8000051536750004ULL, 0x80000515367500E4ULL, 1253 1254 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 1255 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 1256 1257 0x801205150D440000ULL, 0x801205150D4400E0ULL, 1258 0x801205150D440004ULL, 0x801205150D4400E4ULL, 1259 1260 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 1261 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 1262 END_SIGN 1263}; 1264 1265 1266void 1267__hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg) 1268{ 1269 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 1270 (u32)(value>>32), reg); 1271 xge_os_wmb(); 1272 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 1273 (u32)value, reg); 1274 xge_os_wmb(); 1275 xge_os_mdelay(1); 1276} 1277 1278u64 1279__hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg) 1280{ 1281 u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1282 reg); 1283 xge_os_mdelay(1); 1284 return val64; 1285} 1286 1287/* 1288 * __hal_device_xaui_configure 1289 * @hldev: HAL device handle. 1290 * 1291 * Configure XAUI Interface of Xena. 1292 * 1293 * To Configure the Xena's XAUI, one has to write a series 1294 * of 64 bit values into two registers in a particular 1295 * sequence. Hence a macro 'SWITCH_SIGN' has been defined 1296 * which will be defined in the array of configuration values 1297 * (default_dtx_cfg & default_mdio_cfg) at appropriate places 1298 * to switch writing from one regsiter to another. We continue 1299 * writing these values until we encounter the 'END_SIGN' macro. 1300 * For example, After making a series of 21 writes into 1301 * dtx_control register the 'SWITCH_SIGN' appears and hence we 1302 * start writing into mdio_control until we encounter END_SIGN. 1303 */ 1304static void 1305__hal_device_xaui_configure(xge_hal_device_t *hldev) 1306{ 1307 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1308 int mdio_cnt = 0, dtx_cnt = 0; 1309 u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL; 1310 1311 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 1312 default_dtx_cfg = default_xena_dtx_cfg; 1313 default_mdio_cfg = default_xena_mdio_cfg; 1314 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 1315 default_dtx_cfg = default_herc_dtx_cfg; 1316 default_mdio_cfg = default_herc_mdio_cfg; 1317 } else { 1318 xge_assert(default_dtx_cfg); 1319 return; 1320 } 1321 1322 do { 1323 dtx_cfg: 1324 while (default_dtx_cfg[dtx_cnt] != END_SIGN) { 1325 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) { 1326 dtx_cnt++; 1327 goto mdio_cfg; 1328 } 1329 __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt], 1330 &bar0->dtx_control); 1331 dtx_cnt++; 1332 } 1333 mdio_cfg: 1334 while (default_mdio_cfg[mdio_cnt] != END_SIGN) { 1335 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { 1336 mdio_cnt++; 1337 goto dtx_cfg; 1338 } 1339 __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt], 1340 &bar0->mdio_control); 1341 mdio_cnt++; 1342 } 1343 } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) && 1344 (default_mdio_cfg[mdio_cnt] == END_SIGN)) ); 1345 1346 xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured"); 1347} 1348 1349/* 1350 * __hal_device_mac_link_util_set 1351 * @hldev: HAL device handle. 1352 * 1353 * Set sampling rate to calculate link utilization. 1354 */ 1355static void 1356__hal_device_mac_link_util_set(xge_hal_device_t *hldev) 1357{ 1358 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1359 u64 val64; 1360 1361 val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL( 1362 hldev->config.mac.tmac_util_period) | 1363 XGE_HAL_MAC_RX_LINK_UTIL_VAL( 1364 hldev->config.mac.rmac_util_period); 1365 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1366 &bar0->mac_link_util); 1367 xge_debug_device(XGE_TRACE, "%s", 1368 "bandwidth link utilization configured"); 1369} 1370 1371/* 1372 * __hal_device_set_swapper 1373 * @hldev: HAL device handle. 1374 * 1375 * Set the Xframe's byte "swapper" in accordance with 1376 * endianness of the host. 1377 */ 1378xge_hal_status_e 1379__hal_device_set_swapper(xge_hal_device_t *hldev) 1380{ 1381 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1382 u64 val64; 1383 1384 /* 1385 * from 32bit errarta: 1386 * 1387 * The SWAPPER_CONTROL register determines how the adapter accesses 1388 * host memory as well as how it responds to read and write requests 1389 * from the host system. Writes to this register should be performed 1390 * carefully, since the byte swappers could reverse the order of bytes. 1391 * When configuring this register keep in mind that writes to the PIF 1392 * read and write swappers could reverse the order of the upper and 1393 * lower 32-bit words. This means that the driver may have to write 1394 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to 1395 * configure the entire register. */ 1396 1397 /* 1398 * The device by default set to a big endian format, so a big endian 1399 * driver need not set anything. 1400 */ 1401 1402#if defined(XGE_HAL_CUSTOM_HW_SWAPPER) 1403 1404 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1405 0xffffffffffffffffULL, &bar0->swapper_ctrl); 1406 1407 val64 = XGE_HAL_CUSTOM_HW_SWAPPER; 1408 1409 xge_os_wmb(); 1410 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1411 &bar0->swapper_ctrl); 1412 1413 xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT, 1414 (unsigned long long)val64); 1415 1416#elif !defined(XGE_OS_HOST_BIG_ENDIAN) 1417 1418 /* 1419 * Initially we enable all bits to make it accessible by the driver, 1420 * then we selectively enable only those bits that we want to set. 1421 * i.e. force swapper to swap for the first time since second write 1422 * will overwrite with the final settings. 1423 * 1424 * Use only for little endian platforms. 1425 */ 1426 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1427 0xffffffffffffffffULL, &bar0->swapper_ctrl); 1428 xge_os_wmb(); 1429 val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE | 1430 XGE_HAL_SWAPPER_CTRL_PIF_R_SE | 1431 XGE_HAL_SWAPPER_CTRL_PIF_W_FE | 1432 XGE_HAL_SWAPPER_CTRL_PIF_W_SE | 1433 XGE_HAL_SWAPPER_CTRL_RTH_FE | 1434 XGE_HAL_SWAPPER_CTRL_RTH_SE | 1435 XGE_HAL_SWAPPER_CTRL_TXP_FE | 1436 XGE_HAL_SWAPPER_CTRL_TXP_SE | 1437 XGE_HAL_SWAPPER_CTRL_TXD_R_FE | 1438 XGE_HAL_SWAPPER_CTRL_TXD_R_SE | 1439 XGE_HAL_SWAPPER_CTRL_TXD_W_FE | 1440 XGE_HAL_SWAPPER_CTRL_TXD_W_SE | 1441 XGE_HAL_SWAPPER_CTRL_TXF_R_FE | 1442 XGE_HAL_SWAPPER_CTRL_RXD_R_FE | 1443 XGE_HAL_SWAPPER_CTRL_RXD_R_SE | 1444 XGE_HAL_SWAPPER_CTRL_RXD_W_FE | 1445 XGE_HAL_SWAPPER_CTRL_RXD_W_SE | 1446 XGE_HAL_SWAPPER_CTRL_RXF_W_FE | 1447 XGE_HAL_SWAPPER_CTRL_XMSI_FE | 1448 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE); 1449 1450 /* 1451 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 1452 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE; 1453 } */ 1454 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64, 1455 &bar0->swapper_ctrl); 1456 xge_os_wmb(); 1457 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), 1458 &bar0->swapper_ctrl); 1459 xge_os_wmb(); 1460 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), 1461 &bar0->swapper_ctrl); 1462 xge_debug_device(XGE_TRACE, "%s", "using little endian set"); 1463#endif 1464 1465 /* Verifying if endian settings are accurate by reading a feedback 1466 * register. */ 1467 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1468 &bar0->pif_rd_swapper_fb); 1469 if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) { 1470 xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT, 1471 (unsigned long long) val64); 1472 return XGE_HAL_ERR_SWAPPER_CTRL; 1473 } 1474 1475 xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled"); 1476 1477 return XGE_HAL_OK; 1478} 1479 1480/* 1481 * __hal_device_rts_mac_configure - Configure RTS steering based on 1482 * destination mac address. 1483 * @hldev: HAL device handle. 1484 * 1485 */ 1486xge_hal_status_e 1487__hal_device_rts_mac_configure(xge_hal_device_t *hldev) 1488{ 1489 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1490 u64 val64; 1491 1492 if (!hldev->config.rts_mac_en) { 1493 return XGE_HAL_OK; 1494 } 1495 1496 /* 1497 * Set the receive traffic steering mode from default(classic) 1498 * to enhanced. 1499 */ 1500 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1501 &bar0->rts_ctrl); 1502 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1503 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1504 val64, &bar0->rts_ctrl); 1505 return XGE_HAL_OK; 1506} 1507 1508/* 1509 * __hal_device_rts_port_configure - Configure RTS steering based on 1510 * destination or source port number. 1511 * @hldev: HAL device handle. 1512 * 1513 */ 1514xge_hal_status_e 1515__hal_device_rts_port_configure(xge_hal_device_t *hldev) 1516{ 1517 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1518 u64 val64; 1519 int rnum; 1520 1521 if (!hldev->config.rts_port_en) { 1522 return XGE_HAL_OK; 1523 } 1524 1525 /* 1526 * Set the receive traffic steering mode from default(classic) 1527 * to enhanced. 1528 */ 1529 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1530 &bar0->rts_ctrl); 1531 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1533 val64, &bar0->rts_ctrl); 1534 1535 /* 1536 * Initiate port steering according to per-ring configuration 1537 */ 1538 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { 1539 int pnum; 1540 xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum]; 1541 1542 if (!queue->configured || queue->rts_port_en) 1543 continue; 1544 1545 for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) { 1546 xge_hal_rts_port_t *port = &queue->rts_ports[pnum]; 1547 1548 /* 1549 * Skip and clear empty ports 1550 */ 1551 if (!port->num) { 1552 /* 1553 * Clear CAM memory 1554 */ 1555 xge_os_pio_mem_write64(hldev->pdev, 1556 hldev->regh0, 0ULL, 1557 &bar0->rts_pn_cam_data); 1558 1559 val64 = BIT(7) | BIT(15); 1560 } else { 1561 /* 1562 * Assign new Port values according 1563 * to configuration 1564 */ 1565 val64 = vBIT(port->num,8,16) | 1566 vBIT(rnum,37,3) | BIT(63); 1567 if (port->src) 1568 val64 = BIT(47); 1569 if (!port->udp) 1570 val64 = BIT(7); 1571 xge_os_pio_mem_write64(hldev->pdev, 1572 hldev->regh0, val64, 1573 &bar0->rts_pn_cam_data); 1574 1575 val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8); 1576 } 1577 1578 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1579 val64, &bar0->rts_pn_cam_ctrl); 1580 1581 /* poll until done */ 1582 if (__hal_device_register_poll(hldev, 1583 &bar0->rts_pn_cam_ctrl, 0, 1584 XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED, 1585 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != 1586 XGE_HAL_OK) { 1587 /* upper layer may require to repeat */ 1588 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1589 } 1590 } 1591 } 1592 return XGE_HAL_OK; 1593} 1594 1595/* 1596 * __hal_device_rts_qos_configure - Configure RTS steering based on 1597 * qos. 1598 * @hldev: HAL device handle. 1599 * 1600 */ 1601xge_hal_status_e 1602__hal_device_rts_qos_configure(xge_hal_device_t *hldev) 1603{ 1604 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1605 u64 val64; 1606 int j, rx_ring_num; 1607 1608 if (!hldev->config.rts_qos_en) { 1609 return XGE_HAL_OK; 1610 } 1611 1612 /* First clear the RTS_DS_MEM_DATA */ 1613 val64 = 0; 1614 for (j = 0; j < 64; j++ ) 1615 { 1616 /* First clear the value */ 1617 val64 = XGE_HAL_RTS_DS_MEM_DATA(0); 1618 1619 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1620 &bar0->rts_ds_mem_data); 1621 1622 val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE | 1623 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD | 1624 XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j ); 1625 1626 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1627 &bar0->rts_ds_mem_ctrl); 1628 1629 1630 /* poll until done */ 1631 if (__hal_device_register_poll(hldev, 1632 &bar0->rts_ds_mem_ctrl, 0, 1633 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, 1634 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1635 /* upper layer may require to repeat */ 1636 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1637 } 1638 1639 } 1640 1641 rx_ring_num = 0; 1642 for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) { 1643 if (hldev->config.ring.queue[j].configured) 1644 rx_ring_num++; 1645 } 1646 1647 switch (rx_ring_num) { 1648 case 1: 1649 val64 = 0x0; 1650 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1651 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1652 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1653 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1654 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1655 break; 1656 case 2: 1657 val64 = 0x0001000100010001ULL; 1658 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1659 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1660 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1661 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1662 val64 = 0x0001000100000000ULL; 1663 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1664 break; 1665 case 3: 1666 val64 = 0x0001020001020001ULL; 1667 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1668 val64 = 0x0200010200010200ULL; 1669 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1670 val64 = 0x0102000102000102ULL; 1671 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1672 val64 = 0x0001020001020001ULL; 1673 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1674 val64 = 0x0200010200000000ULL; 1675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1676 break; 1677 case 4: 1678 val64 = 0x0001020300010203ULL; 1679 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1680 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1681 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1682 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1683 val64 = 0x0001020300000000ULL; 1684 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1685 break; 1686 case 5: 1687 val64 = 0x0001020304000102ULL; 1688 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1689 val64 = 0x0304000102030400ULL; 1690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1691 val64 = 0x0102030400010203ULL; 1692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1693 val64 = 0x0400010203040001ULL; 1694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1695 val64 = 0x0203040000000000ULL; 1696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1697 break; 1698 case 6: 1699 val64 = 0x0001020304050001ULL; 1700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1701 val64 = 0x0203040500010203ULL; 1702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1703 val64 = 0x0405000102030405ULL; 1704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1705 val64 = 0x0001020304050001ULL; 1706 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1707 val64 = 0x0203040500000000ULL; 1708 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1709 break; 1710 case 7: 1711 val64 = 0x0001020304050600ULL; 1712 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1713 val64 = 0x0102030405060001ULL; 1714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1715 val64 = 0x0203040506000102ULL; 1716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1717 val64 = 0x0304050600010203ULL; 1718 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1719 val64 = 0x0405060000000000ULL; 1720 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1721 break; 1722 case 8: 1723 val64 = 0x0001020304050607ULL; 1724 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1725 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1726 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1728 val64 = 0x0001020300000000ULL; 1729 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1730 break; 1731 } 1732 1733 return XGE_HAL_OK; 1734} 1735 1736/* 1737 * xge__hal_device_rts_mac_enable 1738 * 1739 * @devh: HAL device handle. 1740 * @index: index number where the MAC addr will be stored 1741 * @macaddr: MAC address 1742 * 1743 * - Enable RTS steering for the given MAC address. This function has to be 1744 * called with lock acquired. 1745 * 1746 * NOTE: 1747 * 1. ULD has to call this function with the index value which 1748 * statisfies the following condition: 1749 * ring_num = (index % 8) 1750 * 2.ULD also needs to make sure that the index is not 1751 * occupied by any MAC address. If that index has any MAC address 1752 * it will be overwritten and HAL will not check for it. 1753 * 1754 */ 1755xge_hal_status_e 1756xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr) 1757{ 1758 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 1759 xge_hal_status_e status; 1760 1761 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1762 1763 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 1764 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 1765 1766 if ( index >= max_addr ) 1767 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 1768 1769 /* 1770 * Set the MAC address at the given location marked by index. 1771 */ 1772 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 1773 if (status != XGE_HAL_OK) { 1774 xge_debug_device(XGE_ERR, "%s", 1775 "Not able to set the mac addr"); 1776 return status; 1777 } 1778 1779 return xge_hal_device_rts_section_enable(hldev, index); 1780} 1781 1782/* 1783 * xge__hal_device_rts_mac_disable 1784 * @hldev: HAL device handle. 1785 * @index: index number where to disable the MAC addr 1786 * 1787 * Disable RTS Steering based on the MAC address. 1788 * This function should be called with lock acquired. 1789 * 1790 */ 1791xge_hal_status_e 1792xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index) 1793{ 1794 xge_hal_status_e status; 1795 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 1796 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 1797 1798 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1799 1800 xge_debug_ll(XGE_TRACE, "the index value is %d ", index); 1801 1802 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 1803 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 1804 1805 if ( index >= max_addr ) 1806 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 1807 1808 /* 1809 * Disable MAC address @ given index location 1810 */ 1811 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 1812 if (status != XGE_HAL_OK) { 1813 xge_debug_device(XGE_ERR, "%s", 1814 "Not able to set the mac addr"); 1815 return status; 1816 } 1817 1818 return XGE_HAL_OK; 1819} 1820 1821 1822/* 1823 * __hal_device_rth_configure - Configure RTH for the device 1824 * @hldev: HAL device handle. 1825 * 1826 * Using IT (Indirection Table). 1827 */ 1828xge_hal_status_e 1829__hal_device_rth_it_configure(xge_hal_device_t *hldev) 1830{ 1831 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1832 u64 val64; 1833 int rings[XGE_HAL_MAX_RING_NUM]={0}; 1834 int rnum; 1835 int rmax; 1836 int buckets_num; 1837 int bucket; 1838 1839 if (!hldev->config.rth_en) { 1840 return XGE_HAL_OK; 1841 } 1842 1843 /* 1844 * Set the receive traffic steering mode from default(classic) 1845 * to enhanced. 1846 */ 1847 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1848 &bar0->rts_ctrl); 1849 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1850 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1851 val64, &bar0->rts_ctrl); 1852 1853 buckets_num = (1 << hldev->config.rth_bucket_size); 1854 1855 rmax=0; 1856 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { 1857 if (hldev->config.ring.queue[rnum].configured && 1858 hldev->config.ring.queue[rnum].rth_en) 1859 rings[rmax++] = rnum; 1860 } 1861 1862 rnum = 0; 1863 /* for starters: fill in all the buckets with rings "equally" */ 1864 for (bucket = 0; bucket < buckets_num; bucket++) { 1865 1866 if (rnum == rmax) 1867 rnum = 0; 1868 1869 /* write data */ 1870 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | 1871 XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]); 1872 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1873 &bar0->rts_rth_map_mem_data); 1874 1875 /* execute */ 1876 val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | 1877 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | 1878 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket); 1879 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1880 &bar0->rts_rth_map_mem_ctrl); 1881 1882 /* poll until done */ 1883 if (__hal_device_register_poll(hldev, 1884 &bar0->rts_rth_map_mem_ctrl, 0, 1885 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, 1886 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1887 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1888 } 1889 1890 rnum++; 1891 } 1892 1893 val64 = XGE_HAL_RTS_RTH_EN; 1894 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size); 1895 val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN | 1896 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN | 1897 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN; 1898 1899 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1900 &bar0->rts_rth_cfg); 1901 1902 xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d", 1903 hldev->config.rth_bucket_size); 1904 1905 return XGE_HAL_OK; 1906} 1907 1908 1909/* 1910 * __hal_spdm_entry_add - Add a new entry to the SPDM table. 1911 * 1912 * Add a new entry to the SPDM table 1913 * 1914 * This function add a new entry to the SPDM table. 1915 * 1916 * Note: 1917 * This function should be called with spdm_lock. 1918 * 1919 * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove. 1920 */ 1921static xge_hal_status_e 1922__hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, 1923 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, 1924 u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry) 1925{ 1926 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1927 u64 val64; 1928 u64 spdm_line_arr[8]; 1929 u8 line_no; 1930 1931 /* 1932 * Clear the SPDM READY bit 1933 */ 1934 val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY; 1935 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1936 &bar0->rxpic_int_reg); 1937 1938 xge_debug_device(XGE_TRACE, 1939 "L4 SP %x:DP %x: hash %x tgt_queue %d ", 1940 l4_sp, l4_dp, jhash_value, tgt_queue); 1941 1942 xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr)); 1943 1944 /* 1945 * Construct the SPDM entry. 1946 */ 1947 spdm_line_arr[0] = vBIT(l4_sp,0,16) | 1948 vBIT(l4_dp,16,32) | 1949 vBIT(tgt_queue,53,3) | 1950 vBIT(is_tcp,59,1) | 1951 vBIT(is_ipv4,63,1); 1952 1953 1954 if (is_ipv4) { 1955 spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) | 1956 vBIT(dst_ip->ipv4.addr,32,32); 1957 1958 } else { 1959 xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8); 1960 xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8); 1961 xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8); 1962 xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8); 1963 } 1964 1965 spdm_line_arr[7] = vBIT(jhash_value,0,32) | 1966 BIT(63); /* entry enable bit */ 1967 1968 /* 1969 * Add the entry to the SPDM table 1970 */ 1971 for(line_no = 0; line_no < 8; line_no++) { 1972 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1973 spdm_line_arr[line_no], 1974 (void *)((char *)hldev->spdm_mem_base + 1975 (spdm_entry * 64) + 1976 (line_no * 8))); 1977 } 1978 1979 /* 1980 * Wait for the operation to be completed. 1981 */ 1982 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 1983 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 1984 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1985 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1986 } 1987 1988 /* 1989 * Add this information to a local SPDM table. The purpose of 1990 * maintaining a local SPDM table is to avoid a search in the 1991 * adapter SPDM table for spdm entry lookup which is very costly 1992 * in terms of time. 1993 */ 1994 hldev->spdm_table[spdm_entry]->in_use = 1; 1995 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip, 1996 sizeof(xge_hal_ipaddr_t)); 1997 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip, 1998 sizeof(xge_hal_ipaddr_t)); 1999 hldev->spdm_table[spdm_entry]->l4_sp = l4_sp; 2000 hldev->spdm_table[spdm_entry]->l4_dp = l4_dp; 2001 hldev->spdm_table[spdm_entry]->is_tcp = is_tcp; 2002 hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4; 2003 hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue; 2004 hldev->spdm_table[spdm_entry]->jhash_value = jhash_value; 2005 hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry; 2006 2007 return XGE_HAL_OK; 2008} 2009 2010/* 2011 * __hal_device_rth_spdm_configure - Configure RTH for the device 2012 * @hldev: HAL device handle. 2013 * 2014 * Using SPDM (Socket-Pair Direct Match). 2015 */ 2016xge_hal_status_e 2017__hal_device_rth_spdm_configure(xge_hal_device_t *hldev) 2018{ 2019 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 2020 u64 val64; 2021 u8 spdm_bar_num; 2022 u32 spdm_bar_offset; 2023 int spdm_table_size; 2024 int i; 2025 2026 if (!hldev->config.rth_spdm_en) { 2027 return XGE_HAL_OK; 2028 } 2029 2030 /* 2031 * Retrieve the base address of SPDM Table. 2032 */ 2033 val64 = xge_os_pio_mem_read64(hldev->pdev, 2034 hldev->regh0, &bar0->spdm_bir_offset); 2035 2036 spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64); 2037 spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64); 2038 2039 2040 /* 2041 * spdm_bar_num specifies the PCI bar num register used to 2042 * address the memory space. spdm_bar_offset specifies the offset 2043 * of the SPDM memory with in the bar num memory space. 2044 */ 2045 switch (spdm_bar_num) { 2046 case 0: 2047 { 2048 hldev->spdm_mem_base = (char *)bar0 + 2049 (spdm_bar_offset * 8); 2050 break; 2051 } 2052 case 1: 2053 { 2054 char *bar1 = (char *)hldev->bar1; 2055 hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8); 2056 break; 2057 } 2058 default: 2059 xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1))); 2060 } 2061 2062 /* 2063 * Retrieve the size of SPDM table(number of entries). 2064 */ 2065 val64 = xge_os_pio_mem_read64(hldev->pdev, 2066 hldev->regh0, &bar0->spdm_structure); 2067 hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64); 2068 2069 2070 spdm_table_size = hldev->spdm_max_entries * 2071 sizeof(xge_hal_spdm_entry_t); 2072 if (hldev->spdm_table == NULL) { 2073 void *mem; 2074 2075 /* 2076 * Allocate memory to hold the copy of SPDM table. 2077 */ 2078 if ((hldev->spdm_table = (xge_hal_spdm_entry_t **) 2079 xge_os_malloc( 2080 hldev->pdev, 2081 (sizeof(xge_hal_spdm_entry_t *) * 2082 hldev->spdm_max_entries))) == NULL) { 2083 return XGE_HAL_ERR_OUT_OF_MEMORY; 2084 } 2085 2086 if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL) 2087 { 2088 xge_os_free(hldev->pdev, hldev->spdm_table, 2089 (sizeof(xge_hal_spdm_entry_t *) * 2090 hldev->spdm_max_entries)); 2091 return XGE_HAL_ERR_OUT_OF_MEMORY; 2092 } 2093 2094 xge_os_memzero(mem, spdm_table_size); 2095 for (i = 0; i < hldev->spdm_max_entries; i++) { 2096 hldev->spdm_table[i] = (xge_hal_spdm_entry_t *) 2097 ((char *)mem + 2098 i * sizeof(xge_hal_spdm_entry_t)); 2099 } 2100 xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev); 2101 } else { 2102 /* 2103 * We are here because the host driver tries to 2104 * do a soft reset on the device. 2105 * Since the device soft reset clears the SPDM table, copy 2106 * the entries from the local SPDM table to the actual one. 2107 */ 2108 xge_os_spin_lock(&hldev->spdm_lock); 2109 for (i = 0; i < hldev->spdm_max_entries; i++) { 2110 xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i]; 2111 2112 if (spdm_entry->in_use) { 2113 if (__hal_spdm_entry_add(hldev, 2114 &spdm_entry->src_ip, 2115 &spdm_entry->dst_ip, 2116 spdm_entry->l4_sp, 2117 spdm_entry->l4_dp, 2118 spdm_entry->is_tcp, 2119 spdm_entry->is_ipv4, 2120 spdm_entry->tgt_queue, 2121 spdm_entry->jhash_value, 2122 spdm_entry->spdm_entry) 2123 != XGE_HAL_OK) { 2124 /* Log an warning */ 2125 xge_debug_device(XGE_ERR, 2126 "SPDM table update from local" 2127 " memory failed"); 2128 } 2129 } 2130 } 2131 xge_os_spin_unlock(&hldev->spdm_lock); 2132 } 2133 2134 /* 2135 * Set the receive traffic steering mode from default(classic) 2136 * to enhanced. 2137 */ 2138 val64 = xge_os_pio_mem_read64(hldev->pdev, 2139 hldev->regh0, &bar0->rts_ctrl); 2140 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 2141 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2142 val64, &bar0->rts_ctrl); 2143 2144 /* 2145 * We may not need to configure rts_rth_jhash_cfg register as the 2146 * default values are good enough to calculate the hash. 2147 */ 2148 2149 /* 2150 * As of now, set all the rth mask registers to zero. TODO. 2151 */ 2152 for(i = 0; i < 5; i++) { 2153 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2154 0, &bar0->rts_rth_hash_mask[i]); 2155 } 2156 2157 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2158 0, &bar0->rts_rth_hash_mask_5); 2159 2160 if (hldev->config.rth_spdm_use_l4) { 2161 val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4; 2162 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2163 val64, &bar0->rts_rth_status); 2164 } 2165 2166 val64 = XGE_HAL_RTS_RTH_EN; 2167 val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN; 2168 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2169 &bar0->rts_rth_cfg); 2170 2171 2172 return XGE_HAL_OK; 2173} 2174 2175/* 2176 * __hal_device_pci_init 2177 * @hldev: HAL device handle. 2178 * 2179 * Initialize certain PCI/PCI-X configuration registers 2180 * with recommended values. Save config space for future hw resets. 2181 */ 2182static void 2183__hal_device_pci_init(xge_hal_device_t *hldev) 2184{ 2185 int i, pcisize = 0; 2186 u16 cmd = 0; 2187 u8 val; 2188 2189 /* Store PCI device ID and revision for future references where in we 2190 * decide Xena revision using PCI sub system ID */ 2191 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 2192 xge_offsetof(xge_hal_pci_config_le_t, device_id), 2193 &hldev->device_id); 2194 xge_os_pci_read8(hldev->pdev,hldev->cfgh, 2195 xge_offsetof(xge_hal_pci_config_le_t, revision), 2196 &hldev->revision); 2197 2198 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 2199 pcisize = XGE_HAL_PCISIZE_HERC; 2200 else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 2201 pcisize = XGE_HAL_PCISIZE_XENA; 2202 2203 /* save original PCI config space to restore it on device_terminate() */ 2204 for (i = 0; i < pcisize; i++) { 2205 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, 2206 (u32*)&hldev->pci_config_space_bios + i); 2207 } 2208 2209 /* Set the PErr Repconse bit and SERR in PCI command register. */ 2210 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2211 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 2212 cmd |= 0x140; 2213 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2214 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 2215 2216 /* Set user spcecified value for the PCI Latency Timer */ 2217 if (hldev->config.latency_timer && 2218 hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) { 2219 xge_os_pci_write8(hldev->pdev, hldev->cfgh, 2220 xge_offsetof(xge_hal_pci_config_le_t, 2221 latency_timer), 2222 (u8)hldev->config.latency_timer); 2223 } 2224 /* Read back latency timer to reflect it into user level */ 2225 xge_os_pci_read8(hldev->pdev, hldev->cfgh, 2226 xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val); 2227 hldev->config.latency_timer = val; 2228 2229 /* Enable Data Parity Error Recovery in PCI-X command register. */ 2230 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2231 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2232 cmd |= 1; 2233 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2234 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); 2235 2236 /* Set MMRB count in PCI-X command register. */ 2237 if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) { 2238 cmd &= 0xFFF3; 2239 cmd |= hldev->config.mmrb_count << 2; 2240 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2241 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2242 cmd); 2243 } 2244 /* Read back MMRB count to reflect it into user level */ 2245 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2246 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2247 &cmd); 2248 cmd &= 0x000C; 2249 hldev->config.mmrb_count = cmd>>2; 2250 2251 /* Setting Maximum outstanding splits based on system type. */ 2252 if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) { 2253 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2254 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2255 &cmd); 2256 cmd &= 0xFF8F; 2257 cmd |= hldev->config.max_splits_trans << 4; 2258 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2259 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2260 cmd); 2261 } 2262 2263 /* Read back max split trans to reflect it into user level */ 2264 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2265 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2266 cmd &= 0x0070; 2267 hldev->config.max_splits_trans = cmd>>4; 2268 2269 /* Forcibly disabling relaxed ordering capability of the card. */ 2270 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2271 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2272 cmd &= 0xFFFD; 2273 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2274 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); 2275 2276 /* save PCI config space for future resets */ 2277 for (i = 0; i < pcisize; i++) { 2278 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, 2279 (u32*)&hldev->pci_config_space + i); 2280 } 2281} 2282 2283/* 2284 * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency 2285 * and mode. 2286 * @devh: HAL device handle. 2287 * @pci_mode: pointer to a variable of enumerated type 2288 * xge_hal_pci_mode_e{}. 2289 * @bus_frequency: pointer to a variable of enumerated type 2290 * xge_hal_pci_bus_frequency_e{}. 2291 * @bus_width: pointer to a variable of enumerated type 2292 * xge_hal_pci_bus_width_e{}. 2293 * 2294 * Get pci mode, frequency, and PCI bus width. 2295 * 2296 * Returns: one of the xge_hal_status_e{} enumerated types. 2297 * XGE_HAL_OK - for success. 2298 * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card. 2299 * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card. 2300 * 2301 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. 2302 */ 2303static xge_hal_status_e 2304__hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, 2305 xge_hal_pci_bus_frequency_e *bus_frequency, 2306 xge_hal_pci_bus_width_e *bus_width) 2307{ 2308 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 2309 xge_hal_status_e rc_status = XGE_HAL_OK; 2310 xge_hal_card_e card_id = xge_hal_device_check_id (devh); 2311 2312#ifdef XGE_HAL_HERC_EMULATION 2313 hldev->config.pci_freq_mherz = 2314 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2315 *bus_frequency = 2316 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2317 *pci_mode = XGE_HAL_PCI_66MHZ_MODE; 2318#else 2319 if (card_id == XGE_HAL_CARD_HERC) { 2320 xge_hal_pci_bar0_t *bar0 = 2321 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2322 u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2323 &bar0->pci_info); 2324 if (XGE_HAL_PCI_32_BIT & pci_info) 2325 *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT; 2326 else 2327 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; 2328 switch((pci_info & XGE_HAL_PCI_INFO)>>60) 2329 { 2330 case XGE_HAL_PCI_33MHZ_MODE: 2331 *bus_frequency = 2332 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ; 2333 *pci_mode = XGE_HAL_PCI_33MHZ_MODE; 2334 break; 2335 case XGE_HAL_PCI_66MHZ_MODE: 2336 *bus_frequency = 2337 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2338 *pci_mode = XGE_HAL_PCI_66MHZ_MODE; 2339 break; 2340 case XGE_HAL_PCIX_M1_66MHZ_MODE: 2341 *bus_frequency = 2342 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2343 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE; 2344 break; 2345 case XGE_HAL_PCIX_M1_100MHZ_MODE: 2346 *bus_frequency = 2347 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ; 2348 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE; 2349 break; 2350 case XGE_HAL_PCIX_M1_133MHZ_MODE: 2351 *bus_frequency = 2352 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2353 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE; 2354 break; 2355 case XGE_HAL_PCIX_M2_66MHZ_MODE: 2356 *bus_frequency = 2357 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2358 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE; 2359 break; 2360 case XGE_HAL_PCIX_M2_100MHZ_MODE: 2361 *bus_frequency = 2362 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ; 2363 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE; 2364 break; 2365 case XGE_HAL_PCIX_M2_133MHZ_MODE: 2366 *bus_frequency = 2367 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ; 2368 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE; 2369 break; 2370 case XGE_HAL_PCIX_M1_RESERVED: 2371 case XGE_HAL_PCIX_M1_66MHZ_NS: 2372 case XGE_HAL_PCIX_M1_100MHZ_NS: 2373 case XGE_HAL_PCIX_M1_133MHZ_NS: 2374 case XGE_HAL_PCIX_M2_RESERVED: 2375 case XGE_HAL_PCIX_533_RESERVED: 2376 default: 2377 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO; 2378 xge_debug_device(XGE_ERR, 2379 "invalid pci info "XGE_OS_LLXFMT, 2380 (unsigned long long)pci_info); 2381 break; 2382 } 2383 if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO) 2384 xge_debug_device(XGE_TRACE, "PCI info: mode %d width " 2385 "%d frequency %d", *pci_mode, *bus_width, 2386 *bus_frequency); 2387 if (hldev->config.pci_freq_mherz == 2388 XGE_HAL_DEFAULT_USE_HARDCODE) { 2389 hldev->config.pci_freq_mherz = *bus_frequency; 2390 } 2391 } 2392 /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width 2393 * are set to unknown */ 2394 else if (card_id == XGE_HAL_CARD_XENA) { 2395 u32 pcix_status; 2396 u8 dev_num, bus_num; 2397 /* initialize defaults for XENA */ 2398 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; 2399 *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; 2400 xge_os_pci_read32(hldev->pdev, hldev->cfgh, 2401 xge_offsetof(xge_hal_pci_config_le_t, pcix_status), 2402 &pcix_status); 2403 dev_num = (u8)((pcix_status & 0xF8) >> 3); 2404 bus_num = (u8)((pcix_status & 0xFF00) >> 8); 2405 if (dev_num == 0 && bus_num == 0) 2406 *pci_mode = XGE_HAL_PCI_BASIC_MODE; 2407 else 2408 *pci_mode = XGE_HAL_PCIX_BASIC_MODE; 2409 xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode); 2410 if (hldev->config.pci_freq_mherz == 2411 XGE_HAL_DEFAULT_USE_HARDCODE) { 2412 /* 2413 * There is no way to detect BUS frequency on Xena, 2414 * so, in case of automatic configuration we hopelessly 2415 * assume 133MHZ. 2416 */ 2417 hldev->config.pci_freq_mherz = 2418 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2419 } 2420 } else if (card_id == XGE_HAL_CARD_TITAN) { 2421 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; 2422 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ; 2423 if (hldev->config.pci_freq_mherz == 2424 XGE_HAL_DEFAULT_USE_HARDCODE) { 2425 hldev->config.pci_freq_mherz = *bus_frequency; 2426 } 2427 } else{ 2428 rc_status = XGE_HAL_ERR_BAD_DEVICE_ID; 2429 xge_debug_device(XGE_ERR, "invalid device id %d", card_id); 2430 } 2431#endif 2432 2433 return rc_status; 2434} 2435 2436/* 2437 * __hal_device_handle_link_up_ind 2438 * @hldev: HAL device handle. 2439 * 2440 * Link up indication handler. The function is invoked by HAL when 2441 * Xframe indicates that the link is up for programmable amount of time. 2442 */ 2443static int 2444__hal_device_handle_link_up_ind(xge_hal_device_t *hldev) 2445{ 2446 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2447 u64 val64; 2448 2449 /* 2450 * If the previous link state is not down, return. 2451 */ 2452 if (hldev->link_state == XGE_HAL_LINK_UP) { 2453#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2454 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ 2455 val64 = xge_os_pio_mem_read64( 2456 hldev->pdev, hldev->regh0, 2457 &bar0->misc_int_mask); 2458 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2459 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2460 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2461 val64, &bar0->misc_int_mask); 2462 } 2463#endif 2464 xge_debug_device(XGE_TRACE, 2465 "link up indication while link is up, ignoring.."); 2466 return 0; 2467 } 2468 2469 /* Now re-enable it as due to noise, hardware turned it off */ 2470 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2471 &bar0->adapter_control); 2472 val64 |= XGE_HAL_ADAPTER_CNTL_EN; 2473 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ 2474 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2475 &bar0->adapter_control); 2476 2477 /* Turn on the Laser */ 2478 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2479 &bar0->adapter_control); 2480 val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON | 2481 XGE_HAL_ADAPTER_LED_ON); 2482 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2483 &bar0->adapter_control); 2484 2485#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2486 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2487 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2488 &bar0->adapter_status); 2489 if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2490 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) { 2491 xge_debug_device(XGE_TRACE, "%s", 2492 "fail to transition link to up..."); 2493 return 0; 2494 } 2495 else { 2496 /* 2497 * Mask the Link Up interrupt and unmask the Link Down 2498 * interrupt. 2499 */ 2500 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2501 &bar0->misc_int_mask); 2502 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2503 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2504 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2505 &bar0->misc_int_mask); 2506 xge_debug_device(XGE_TRACE, "calling link up.."); 2507 hldev->link_state = XGE_HAL_LINK_UP; 2508 2509 /* notify ULD */ 2510 if (g_xge_hal_driver->uld_callbacks.link_up) { 2511 g_xge_hal_driver->uld_callbacks.link_up( 2512 hldev->upper_layer_info); 2513 } 2514 return 1; 2515 } 2516 } 2517#endif 2518 xge_os_mdelay(1); 2519 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, 2520 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2521 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), 2522 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { 2523 2524 /* notify ULD */ 2525 (void) xge_queue_produce_context(hldev->queueh, 2526 XGE_HAL_EVENT_LINK_IS_UP, 2527 hldev); 2528 /* link is up after been enabled */ 2529 return 1; 2530 } else { 2531 xge_debug_device(XGE_TRACE, "%s", 2532 "fail to transition link to up..."); 2533 return 0; 2534 } 2535} 2536 2537/* 2538 * __hal_device_handle_link_down_ind 2539 * @hldev: HAL device handle. 2540 * 2541 * Link down indication handler. The function is invoked by HAL when 2542 * Xframe indicates that the link is down. 2543 */ 2544static int 2545__hal_device_handle_link_down_ind(xge_hal_device_t *hldev) 2546{ 2547 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2548 u64 val64; 2549 2550 /* 2551 * If the previous link state is not up, return. 2552 */ 2553 if (hldev->link_state == XGE_HAL_LINK_DOWN) { 2554#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2555 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ 2556 val64 = xge_os_pio_mem_read64( 2557 hldev->pdev, hldev->regh0, 2558 &bar0->misc_int_mask); 2559 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2560 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2561 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2562 val64, &bar0->misc_int_mask); 2563 } 2564#endif 2565 xge_debug_device(XGE_TRACE, 2566 "link down indication while link is down, ignoring.."); 2567 return 0; 2568 } 2569 xge_os_mdelay(1); 2570 2571 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2572 &bar0->adapter_control); 2573 2574 /* try to debounce the link only if the adapter is enabled. */ 2575 if (val64 & XGE_HAL_ADAPTER_CNTL_EN) { 2576 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, 2577 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2578 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), 2579 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { 2580 xge_debug_device(XGE_TRACE, 2581 "link is actually up (possible noisy link?), ignoring."); 2582 return(0); 2583 } 2584 } 2585 2586 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2587 &bar0->adapter_control); 2588 /* turn off LED */ 2589 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 2590 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2591 &bar0->adapter_control); 2592 2593#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2594 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2595 /* 2596 * Mask the Link Down interrupt and unmask the Link up 2597 * interrupt 2598 */ 2599 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2600 &bar0->misc_int_mask); 2601 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2602 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2603 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2604 &bar0->misc_int_mask); 2605 2606 /* link is down */ 2607 xge_debug_device(XGE_TRACE, "calling link down.."); 2608 hldev->link_state = XGE_HAL_LINK_DOWN; 2609 2610 /* notify ULD */ 2611 if (g_xge_hal_driver->uld_callbacks.link_down) { 2612 g_xge_hal_driver->uld_callbacks.link_down( 2613 hldev->upper_layer_info); 2614 } 2615 return 1; 2616 } 2617#endif 2618 /* notify ULD */ 2619 (void) xge_queue_produce_context(hldev->queueh, 2620 XGE_HAL_EVENT_LINK_IS_DOWN, 2621 hldev); 2622 /* link is down */ 2623 return 1; 2624} 2625/* 2626 * __hal_device_handle_link_state_change 2627 * @hldev: HAL device handle. 2628 * 2629 * Link state change handler. The function is invoked by HAL when 2630 * Xframe indicates link state change condition. The code here makes sure to 2631 * 1) ignore redundant state change indications; 2632 * 2) execute link-up sequence, and handle the failure to bring the link up; 2633 * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by 2634 * upper-layer driver (ULD). 2635 */ 2636static int 2637__hal_device_handle_link_state_change(xge_hal_device_t *hldev) 2638{ 2639 u64 hw_status; 2640 int hw_link_state; 2641 int retcode; 2642 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2643 u64 val64; 2644 int i = 0; 2645 2646 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2647 &bar0->adapter_control); 2648 2649 /* If the adapter is not enabled but the hal thinks we are in the up 2650 * state then transition to the down state. 2651 */ 2652 if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) && 2653 (hldev->link_state == XGE_HAL_LINK_UP) ) { 2654 return(__hal_device_handle_link_down_ind(hldev)); 2655 } 2656 2657 do { 2658 xge_os_mdelay(1); 2659 (void) xge_hal_device_status(hldev, &hw_status); 2660 hw_link_state = (hw_status & 2661 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2662 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ? 2663 XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP; 2664 2665 /* check if the current link state is still considered 2666 * to be changed. This way we will make sure that this is 2667 * not a noise which needs to be filtered out */ 2668 if (hldev->link_state == hw_link_state) 2669 break; 2670 } while (i++ < hldev->config.link_valid_cnt); 2671 2672 /* If the current link state is same as previous, just return */ 2673 if (hldev->link_state == hw_link_state) 2674 retcode = 0; 2675 /* detected state change */ 2676 else if (hw_link_state == XGE_HAL_LINK_UP) 2677 retcode = __hal_device_handle_link_up_ind(hldev); 2678 else 2679 retcode = __hal_device_handle_link_down_ind(hldev); 2680 return retcode; 2681} 2682 2683/* 2684 * 2685 */ 2686static void 2687__hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value) 2688{ 2689 hldev->stats.sw_dev_err_stats.serr_cnt++; 2690 if (hldev->config.dump_on_serr) { 2691#ifdef XGE_HAL_USE_MGMT_AUX 2692 (void) xge_hal_aux_device_dump(hldev); 2693#endif 2694 } 2695 2696 (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev, 2697 1, sizeof(u64), (void *)&value); 2698 2699 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2700 (unsigned long long) value); 2701} 2702 2703/* 2704 * 2705 */ 2706static void 2707__hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value) 2708{ 2709 if (hldev->config.dump_on_eccerr) { 2710#ifdef XGE_HAL_USE_MGMT_AUX 2711 (void) xge_hal_aux_device_dump(hldev); 2712#endif 2713 } 2714 2715 /* Herc smart enough to recover on its own! */ 2716 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 2717 (void) xge_queue_produce(hldev->queueh, 2718 XGE_HAL_EVENT_ECCERR, hldev, 2719 1, sizeof(u64), (void *)&value); 2720 } 2721 2722 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2723 (unsigned long long) value); 2724} 2725 2726/* 2727 * 2728 */ 2729static void 2730__hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value) 2731{ 2732 if (hldev->config.dump_on_parityerr) { 2733#ifdef XGE_HAL_USE_MGMT_AUX 2734 (void) xge_hal_aux_device_dump(hldev); 2735#endif 2736 } 2737 (void) xge_queue_produce_context(hldev->queueh, 2738 XGE_HAL_EVENT_PARITYERR, hldev); 2739 2740 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2741 (unsigned long long) value); 2742} 2743 2744/* 2745 * 2746 */ 2747static void 2748__hal_device_handle_targetabort(xge_hal_device_t *hldev) 2749{ 2750 (void) xge_queue_produce_context(hldev->queueh, 2751 XGE_HAL_EVENT_TARGETABORT, hldev); 2752} 2753 2754 2755/* 2756 * __hal_device_hw_initialize 2757 * @hldev: HAL device handle. 2758 * 2759 * Initialize Xframe hardware. 2760 */ 2761static xge_hal_status_e 2762__hal_device_hw_initialize(xge_hal_device_t *hldev) 2763{ 2764 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2765 xge_hal_status_e status; 2766 u64 val64; 2767 2768 /* Set proper endian settings and verify the same by reading the PIF 2769 * Feed-back register. */ 2770 status = __hal_device_set_swapper(hldev); 2771 if (status != XGE_HAL_OK) { 2772 return status; 2773 } 2774 2775 /* update the pci mode, frequency, and width */ 2776 if (__hal_device_pci_info_get(hldev, &hldev->pci_mode, 2777 &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){ 2778 hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE; 2779 hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; 2780 hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; 2781 /* 2782 * FIXME: this cannot happen. 2783 * But if it happens we cannot continue just like that 2784 */ 2785 xge_debug_device(XGE_ERR, "unable to get pci info"); 2786 } 2787 2788 if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) || 2789 (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) || 2790 (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) { 2791 /* PCI optimization: set TxReqTimeOut 2792 * register (0x800+0x120) to 0x1ff or 2793 * something close to this. 2794 * Note: not to be used for PCI-X! */ 2795 2796 val64 = XGE_HAL_TXREQTO_VAL(0x1FF); 2797 val64 |= XGE_HAL_TXREQTO_EN; 2798 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2799 &bar0->txreqtimeout); 2800 2801 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 2802 &bar0->read_retry_delay); 2803 2804 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 2805 &bar0->write_retry_delay); 2806 2807 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode"); 2808 } 2809 2810 if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ || 2811 hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) { 2812 2813 /* Optimizing for PCI-X 266/250 */ 2814 2815 val64 = XGE_HAL_TXREQTO_VAL(0x7F); 2816 val64 |= XGE_HAL_TXREQTO_EN; 2817 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2818 &bar0->txreqtimeout); 2819 2820 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes"); 2821 } 2822 2823 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2824 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, 2825 &bar0->read_retry_delay); 2826 2827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, 2828 &bar0->write_retry_delay); 2829 } 2830 2831 /* added this to set the no of bytes used to update lso_bytes_sent 2832 returned TxD0 */ 2833 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2834 &bar0->pic_control_2); 2835 val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2); 2836 val64 |= XGE_HAL_TXD_WRITE_BC(0x4); 2837 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2838 &bar0->pic_control_2); 2839 /* added this to clear the EOI_RESET field while leaving XGXS_RESET 2840 * in reset, then a 1-second delay */ 2841 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2842 XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset); 2843 xge_os_mdelay(1000); 2844 2845 /* Clear the XGXS_RESET field of the SW_RESET register in order to 2846 * release the XGXS from reset. Its reset value is 0xA5; write 0x00 2847 * to activate the XGXS. The core requires a minimum 500 us reset.*/ 2848 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset); 2849 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2850 &bar0->sw_reset); 2851 xge_os_mdelay(1); 2852 2853 /* read registers in all blocks */ 2854 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2855 &bar0->mac_int_mask); 2856 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2857 &bar0->mc_int_mask); 2858 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2859 &bar0->xgxs_int_mask); 2860 2861 /* set default MTU and steer based on length*/ 2862 __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work 2863 2864 if (hldev->config.mac.rmac_bcast_en) { 2865 xge_hal_device_bcast_enable(hldev); 2866 } else { 2867 xge_hal_device_bcast_disable(hldev); 2868 } 2869 2870#ifndef XGE_HAL_HERC_EMULATION 2871 __hal_device_xaui_configure(hldev); 2872#endif 2873 __hal_device_mac_link_util_set(hldev); 2874 2875 __hal_device_mac_link_util_set(hldev); 2876 2877 /* 2878 * Keep its PCI REQ# line asserted during a write 2879 * transaction up to the end of the transaction 2880 */ 2881 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2882 &bar0->misc_control); 2883 2884 val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN; 2885 2886 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2887 val64, &bar0->misc_control); 2888 2889 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2890 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2891 &bar0->misc_control); 2892 2893 val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT; 2894 2895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2896 val64, &bar0->misc_control); 2897 } 2898 2899 /* 2900 * bimodal interrupts is when all Rx traffic interrupts 2901 * will go to TTI, so we need to adjust RTI settings and 2902 * use adaptive TTI timer. We need to make sure RTI is 2903 * properly configured to sane value which will not 2904 * distrupt bimodal behavior. 2905 */ 2906 if (hldev->config.bimodal_interrupts) { 2907 int i; 2908 2909 /* force polling_cnt to be "0", otherwise 2910 * IRQ workload statistics will be screwed. This could 2911 * be worked out in TXPIC handler later. */ 2912 hldev->config.isr_polling_cnt = 0; 2913 hldev->config.sched_timer_us = 10000; 2914 2915 /* disable all TTI < 56 */ 2916 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { 2917 int j; 2918 if (!hldev->config.fifo.queue[i].configured) 2919 continue; 2920 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { 2921 if (hldev->config.fifo.queue[i].tti[j].enabled) 2922 hldev->config.fifo.queue[i].tti[j].enabled = 0; 2923 } 2924 } 2925 2926 /* now configure bimodal interrupts */ 2927 __hal_device_bimodal_configure(hldev); 2928 } 2929 2930 status = __hal_device_tti_configure(hldev, 0); 2931 if (status != XGE_HAL_OK) 2932 return status; 2933 2934 status = __hal_device_rti_configure(hldev, 0); 2935 if (status != XGE_HAL_OK) 2936 return status; 2937 2938 status = __hal_device_rth_it_configure(hldev); 2939 if (status != XGE_HAL_OK) 2940 return status; 2941 2942 status = __hal_device_rth_spdm_configure(hldev); 2943 if (status != XGE_HAL_OK) 2944 return status; 2945 2946 status = __hal_device_rts_mac_configure(hldev); 2947 if (status != XGE_HAL_OK) { 2948 xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed "); 2949 return status; 2950 } 2951 2952 status = __hal_device_rts_port_configure(hldev); 2953 if (status != XGE_HAL_OK) { 2954 xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed "); 2955 return status; 2956 } 2957 2958 status = __hal_device_rts_qos_configure(hldev); 2959 if (status != XGE_HAL_OK) { 2960 xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed "); 2961 return status; 2962 } 2963 2964 __hal_device_pause_frames_configure(hldev); 2965 __hal_device_rmac_padding_configure(hldev); 2966 __hal_device_shared_splits_configure(hldev); 2967 2968 /* make sure all interrupts going to be disabled at the moment */ 2969 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); 2970 2971 /* SXE-008 Transmit DMA arbitration issue */ 2972 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && 2973 hldev->revision < 4) { 2974 xge_os_pio_mem_write64(hldev->pdev,hldev->regh0, 2975 XGE_HAL_ADAPTER_PCC_ENABLE_FOUR, 2976 &bar0->pcc_enable); 2977 } 2978#if 0 // Removing temporarily as FreeBSD is seeing lower performance 2979 // attributable to this fix. 2980 /* SXE-2-010 */ 2981 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2982 /* Turn off the ECC error reporting for RLDRAM interface */ 2983 if ((status = xge_hal_fix_rldram_ecc_error(hldev)) != XGE_HAL_OK) 2984 return status; 2985 } 2986#endif 2987 __hal_fifo_hw_initialize(hldev); 2988 __hal_ring_hw_initialize(hldev); 2989 2990 if (__hal_device_wait_quiescent(hldev, &val64)) { 2991 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2992 } 2993 2994 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, 2995 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, 2996 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 2997 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); 2998 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2999 } 3000 3001 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent", 3002 (unsigned long long)(ulong_t)hldev); 3003 3004 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX || 3005 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) { 3006 /* 3007 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL 3008 * is disabled. 3009 */ 3010 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3011 &bar0->pic_control); 3012 val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT); 3013 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 3014 &bar0->pic_control); 3015 } 3016 3017 hldev->hw_is_initialized = 1; 3018 hldev->terminating = 0; 3019 return XGE_HAL_OK; 3020} 3021 3022/* 3023 * __hal_device_reset - Reset device only. 3024 * @hldev: HAL device handle. 3025 * 3026 * Reset the device, and subsequently restore 3027 * the previously saved PCI configuration space. 3028 */ 3029#define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50 3030static xge_hal_status_e 3031__hal_device_reset(xge_hal_device_t *hldev) 3032{ 3033 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3034 int i, j, swap_done, pcisize = 0; 3035 u64 val64, rawval = 0ULL; 3036 3037 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 3038 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3039 if ( hldev->bar2 ) { 3040 u64 *msix_vetor_table = (u64 *)hldev->bar2; 3041 3042 // 2 64bit words for each entry 3043 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; 3044 i++) { 3045 hldev->msix_vector_table[i] = 3046 xge_os_pio_mem_read64(hldev->pdev, 3047 hldev->regh2, &msix_vetor_table[i]); 3048 } 3049 } 3050 } 3051 } 3052 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3053 &bar0->pif_rd_swapper_fb); 3054 swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB); 3055 3056 if (swap_done) { 3057 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 3058 (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset); 3059 } else { 3060 u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32); 3061#if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN) 3062 /* swap it */ 3063 val = (((val & (u32)0x000000ffUL) << 24) | 3064 ((val & (u32)0x0000ff00UL) << 8) | 3065 ((val & (u32)0x00ff0000UL) >> 8) | 3066 ((val & (u32)0xff000000UL) >> 24)); 3067#endif 3068 xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val, 3069 &bar0->sw_reset); 3070 } 3071 3072 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? 3073 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; 3074 3075 xge_os_mdelay(20); /* Wait for 20 ms after reset */ 3076 3077 { 3078 /* Poll for no more than 1 second */ 3079 for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++) 3080 { 3081 for (j = 0; j < pcisize; j++) { 3082 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, 3083 *((u32*)&hldev->pci_config_space + j)); 3084 } 3085 3086 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 3087 xge_offsetof(xge_hal_pci_config_le_t, device_id), 3088 &hldev->device_id); 3089 3090 if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN) 3091 break; 3092 xge_os_mdelay(20); 3093 } 3094 } 3095 3096 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN) 3097 { 3098 xge_debug_device(XGE_ERR, "device reset failed"); 3099 return XGE_HAL_ERR_RESET_FAILED; 3100 } 3101 3102 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3103 int cnt = 0; 3104 3105 rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC; 3106 pcisize = XGE_HAL_PCISIZE_HERC; 3107 xge_os_mdelay(1); 3108 do { 3109 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3110 &bar0->sw_reset); 3111 if (val64 != rawval) { 3112 break; 3113 } 3114 cnt++; 3115 xge_os_mdelay(1); /* Wait for 1ms before retry */ 3116 } while(cnt < 20); 3117 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 3118 rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA; 3119 pcisize = XGE_HAL_PCISIZE_XENA; 3120 xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS); 3121 } 3122 3123 /* Restore MSI-X vector table */ 3124 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 3125 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3126 if ( hldev->bar2 ) { 3127 /* 3128 * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 ) 3129 * 98: PBATable 00000404 ( BIR:4 Offset:0x400 ) 3130 */ 3131 u64 *msix_vetor_table = (u64 *)hldev->bar2; 3132 3133 /* 2 64bit words for each entry */ 3134 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; 3135 i++) { 3136 xge_os_pio_mem_write64(hldev->pdev, 3137 hldev->regh2, 3138 hldev->msix_vector_table[i], 3139 &msix_vetor_table[i]); 3140 } 3141 } 3142 } 3143 } 3144 3145 hldev->link_state = XGE_HAL_LINK_DOWN; 3146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3147 &bar0->sw_reset); 3148 3149 if (val64 != rawval) { 3150 xge_debug_device(XGE_ERR, "device has not been reset " 3151 "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT, 3152 (unsigned long long)val64, (unsigned long long)rawval); 3153 return XGE_HAL_ERR_RESET_FAILED; 3154 } 3155 3156 hldev->hw_is_initialized = 0; 3157 return XGE_HAL_OK; 3158} 3159 3160/* 3161 * __hal_device_poll - General private routine to poll the device. 3162 * @hldev: HAL device handle. 3163 * 3164 * Returns: one of the xge_hal_status_e{} enumerated types. 3165 * XGE_HAL_OK - for success. 3166 * XGE_HAL_ERR_CRITICAL - when encounters critical error. 3167 */ 3168static xge_hal_status_e 3169__hal_device_poll(xge_hal_device_t *hldev) 3170{ 3171 xge_hal_pci_bar0_t *bar0; 3172 u64 err_reg; 3173 3174 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3175 3176 /* Handling SERR errors by forcing a H/W reset. */ 3177 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3178 &bar0->serr_source); 3179 if (err_reg & XGE_HAL_SERR_SOURCE_ANY) { 3180 __hal_device_handle_serr(hldev, "serr_source", err_reg); 3181 return XGE_HAL_ERR_CRITICAL; 3182 } 3183 3184 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3185 &bar0->misc_int_reg); 3186 3187 if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) { 3188 hldev->stats.sw_dev_err_stats.parity_err_cnt++; 3189 __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg); 3190 return XGE_HAL_ERR_CRITICAL; 3191 } 3192 3193#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 3194 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 3195#endif 3196 { 3197 3198 /* Handling link status change error Intr */ 3199 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3200 &bar0->mac_rmac_err_reg); 3201 if (__hal_device_handle_link_state_change(hldev)) 3202 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3203 err_reg, &bar0->mac_rmac_err_reg); 3204 } 3205 3206 if (hldev->inject_serr != 0) { 3207 err_reg = hldev->inject_serr; 3208 hldev->inject_serr = 0; 3209 __hal_device_handle_serr(hldev, "inject_serr", err_reg); 3210 return XGE_HAL_ERR_CRITICAL; 3211 } 3212 3213 if (hldev->inject_ecc != 0) { 3214 err_reg = hldev->inject_ecc; 3215 hldev->inject_ecc = 0; 3216 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3217 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg); 3218 return XGE_HAL_ERR_CRITICAL; 3219 } 3220 3221 if (hldev->inject_bad_tcode != 0) { 3222 u8 t_code = hldev->inject_bad_tcode; 3223 xge_hal_channel_t channel; 3224 xge_hal_fifo_txd_t txd; 3225 xge_hal_ring_rxd_1_t rxd; 3226 3227 channel.devh = hldev; 3228 3229 if (hldev->inject_bad_tcode_for_chan_type == 3230 XGE_HAL_CHANNEL_TYPE_FIFO) { 3231 channel.type = XGE_HAL_CHANNEL_TYPE_FIFO; 3232 3233 } else { 3234 channel.type = XGE_HAL_CHANNEL_TYPE_RING; 3235 } 3236 3237 hldev->inject_bad_tcode = 0; 3238 3239 if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO) 3240 return xge_hal_device_handle_tcode(&channel, &txd, 3241 t_code); 3242 else 3243 return xge_hal_device_handle_tcode(&channel, &rxd, 3244 t_code); 3245 } 3246 3247 return XGE_HAL_OK; 3248} 3249 3250/* 3251 * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not 3252 * @hldev: HAL device handle. 3253 * @adp_status: Adapter Status value 3254 * Usage: See xge_hal_device_enable{}. 3255 */ 3256xge_hal_status_e 3257__hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status) 3258{ 3259 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && 3260 hldev->revision < 4) { 3261 /* 3262 * For Xena 1,2,3 we enable only 4 PCCs Due to 3263 * SXE-008 (Transmit DMA arbitration issue) 3264 */ 3265 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) 3266 != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) { 3267 xge_debug_device(XGE_TRACE, "%s", 3268 "PCC is not IDLE after adapter enabled!"); 3269 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3270 } 3271 } else { 3272 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) != 3273 XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) { 3274 xge_debug_device(XGE_TRACE, "%s", 3275 "PCC is not IDLE after adapter enabled!"); 3276 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3277 } 3278 } 3279 return XGE_HAL_OK; 3280} 3281 3282static void 3283__hal_update_bimodal(xge_hal_device_t *hldev, int ring_no) 3284{ 3285 int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist; 3286 int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg; 3287 int iwl_cnt, i; 3288 3289#define _HIST_SIZE 50 /* 0.5 sec history */ 3290#define _HIST_ADJ_TIMER 1 3291#define _STEP 2 3292 3293 static int bytes_avg_history[_HIST_SIZE] = {0}; 3294 static int d_avg_history[_HIST_SIZE] = {0}; 3295 static int history_idx = 0; 3296 static int pstep = 1; 3297 static int hist_adj_timer = 0; 3298 3299 /* 3300 * tval - current value of this bimodal timer 3301 */ 3302 tval = hldev->bimodal_tti[ring_no].timer_val_us; 3303 3304 /* 3305 * d - how many interrupts we were getting since last 3306 * bimodal timer tick. 3307 */ 3308 d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt - 3309 hldev->bimodal_intr_cnt; 3310 3311 /* advance bimodal interrupt counter */ 3312 hldev->bimodal_intr_cnt = 3313 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt; 3314 3315 /* 3316 * iwl_cnt - how many interrupts we've got since last 3317 * bimodal timer tick. 3318 */ 3319 iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ? 3320 hldev->irq_workload_rxcnt[ring_no] : 1); 3321 iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ? 3322 hldev->irq_workload_txcnt[ring_no] : 1); 3323 iwl_cnt = iwl_rxcnt + iwl_txcnt; 3324 3325 /* 3326 * we need to take hldev->config.isr_polling_cnt into account 3327 * but for some reason this line causing GCC to produce wrong 3328 * code on Solaris. As of now, if bimodal_interrupts is configured 3329 * hldev->config.isr_polling_cnt is forced to be "0". 3330 * 3331 * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */ 3332 3333 /* 3334 * iwl_avg - how many RXDs on avarage been processed since 3335 * last bimodal timer tick. This indirectly includes 3336 * CPU utilizations. 3337 */ 3338 iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt; 3339 iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt; 3340 iwl_avg = iwl_rxavg + iwl_txavg; 3341 iwl_avg = iwl_avg == 0 ? 1 : iwl_avg; 3342 3343 /* 3344 * len_avg - how many bytes on avarage been processed since 3345 * last bimodal timer tick. i.e. avarage frame size. 3346 */ 3347 len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] / 3348 (hldev->irq_workload_rxd[ring_no] ? 3349 hldev->irq_workload_rxd[ring_no] : 1); 3350 len_txavg = 1 + hldev->irq_workload_txlen[ring_no] / 3351 (hldev->irq_workload_txd[ring_no] ? 3352 hldev->irq_workload_txd[ring_no] : 1); 3353 len_avg = len_rxavg + len_txavg; 3354 if (len_avg < 60) 3355 len_avg = 60; 3356 3357 /* align on low boundary */ 3358 if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us) 3359 tval = hldev->config.bimodal_timer_lo_us; 3360 3361 /* reset faster */ 3362 if (iwl_avg == 1) { 3363 tval = hldev->config.bimodal_timer_lo_us; 3364 /* reset history */ 3365 for (i = 0; i < _HIST_SIZE; i++) 3366 bytes_avg_history[i] = d_avg_history[i] = 0; 3367 history_idx = 0; 3368 pstep = 1; 3369 hist_adj_timer = 0; 3370 } 3371 3372 /* always try to ajust timer to the best throughput value */ 3373 bytes_avg = iwl_avg * len_avg; 3374 history_idx %= _HIST_SIZE; 3375 bytes_avg_history[history_idx] = bytes_avg; 3376 d_avg_history[history_idx] = d; 3377 history_idx++; 3378 d_hist = bytes_hist = 0; 3379 for (i = 0; i < _HIST_SIZE; i++) { 3380 /* do not re-configure until history is gathered */ 3381 if (!bytes_avg_history[i]) { 3382 tval = hldev->config.bimodal_timer_lo_us; 3383 goto _end; 3384 } 3385 bytes_hist += bytes_avg_history[i]; 3386 d_hist += d_avg_history[i]; 3387 } 3388 bytes_hist /= _HIST_SIZE; 3389 d_hist /= _HIST_SIZE; 3390 3391// xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d", 3392// d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg, 3393// d_hist*bytes_hist, pstep); 3394 3395 /* make an adaptive step */ 3396 if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) { 3397 pstep = !pstep; 3398 hist_adj_timer = 0; 3399 } 3400 3401 if (pstep && 3402 (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) { 3403 tval += _STEP; 3404 hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++; 3405 } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) { 3406 tval -= _STEP; 3407 hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++; 3408 } 3409 3410 /* enable TTI range A for better latencies */ 3411 hldev->bimodal_urange_a_en = 0; 3412 if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2) 3413 hldev->bimodal_urange_a_en = 1; 3414 3415_end: 3416 /* reset workload statistics counters */ 3417 hldev->irq_workload_rxcnt[ring_no] = 0; 3418 hldev->irq_workload_rxd[ring_no] = 0; 3419 hldev->irq_workload_rxlen[ring_no] = 0; 3420 hldev->irq_workload_txcnt[ring_no] = 0; 3421 hldev->irq_workload_txd[ring_no] = 0; 3422 hldev->irq_workload_txlen[ring_no] = 0; 3423 3424 /* reconfigure TTI56 + ring_no with new timer value */ 3425 hldev->bimodal_timer_val_us = tval; 3426 (void) __hal_device_rti_configure(hldev, 1); 3427} 3428 3429static void 3430__hal_update_rxufca(xge_hal_device_t *hldev, int ring_no) 3431{ 3432 int ufc, ic, i; 3433 3434 ufc = hldev->config.ring.queue[ring_no].rti.ufc_a; 3435 ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt; 3436 3437 /* urange_a adaptive coalescing */ 3438 if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) { 3439 if (ic > hldev->rxufca_intr_thres) { 3440 if (ufc < hldev->config.rxufca_hi_lim) { 3441 ufc += 1; 3442 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) 3443 hldev->config.ring.queue[i].rti.ufc_a = ufc; 3444 (void) __hal_device_rti_configure(hldev, 1); 3445 hldev->stats.sw_dev_info_stats. 3446 rxufca_hi_adjust_cnt++; 3447 } 3448 hldev->rxufca_intr_thres = ic + 3449 hldev->config.rxufca_intr_thres; /* def: 30 */ 3450 } else { 3451 if (ufc > hldev->config.rxufca_lo_lim) { 3452 ufc -= 1; 3453 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) 3454 hldev->config.ring.queue[i].rti.ufc_a = ufc; 3455 (void) __hal_device_rti_configure(hldev, 1); 3456 hldev->stats.sw_dev_info_stats. 3457 rxufca_lo_adjust_cnt++; 3458 } 3459 } 3460 hldev->rxufca_lbolt_time = hldev->rxufca_lbolt + 3461 hldev->config.rxufca_lbolt_period; 3462 } 3463 hldev->rxufca_lbolt++; 3464} 3465 3466/* 3467 * __hal_device_handle_mc - Handle MC interrupt reason 3468 * @hldev: HAL device handle. 3469 * @reason: interrupt reason 3470 */ 3471xge_hal_status_e 3472__hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason) 3473{ 3474 xge_hal_pci_bar0_t *isrbar0 = 3475 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3476 u64 val64; 3477 3478 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3479 &isrbar0->mc_int_status); 3480 if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT)) 3481 return XGE_HAL_OK; 3482 3483 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3484 &isrbar0->mc_err_reg); 3485 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3486 val64, &isrbar0->mc_err_reg); 3487 3488 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L || 3489 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U || 3490 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 || 3491 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 || 3492 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && 3493 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L || 3494 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U || 3495 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L || 3496 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) { 3497 hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++; 3498 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3499 } 3500 3501 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L || 3502 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U || 3503 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || 3504 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 || 3505 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && 3506 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L || 3507 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U || 3508 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L || 3509 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) { 3510 hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++; 3511 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3512 } 3513 3514 if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) { 3515 hldev->stats.sw_dev_err_stats.sm_err_cnt++; 3516 } 3517 3518 /* those two should result in device reset */ 3519 if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || 3520 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) { 3521 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64); 3522 return XGE_HAL_ERR_CRITICAL; 3523 } 3524 3525 return XGE_HAL_OK; 3526} 3527 3528/* 3529 * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason 3530 * @hldev: HAL device handle. 3531 * @reason: interrupt reason 3532 */ 3533xge_hal_status_e 3534__hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason) 3535{ 3536 xge_hal_pci_bar0_t *isrbar0 = 3537 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3538 u64 val64; 3539 3540 if (reason & XGE_HAL_PIC_INT_FLSH) { 3541 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3542 &isrbar0->flsh_int_reg); 3543 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3544 val64, &isrbar0->flsh_int_reg); 3545 /* FIXME: handle register */ 3546 } 3547 if (reason & XGE_HAL_PIC_INT_MDIO) { 3548 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3549 &isrbar0->mdio_int_reg); 3550 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3551 val64, &isrbar0->mdio_int_reg); 3552 /* FIXME: handle register */ 3553 } 3554 if (reason & XGE_HAL_PIC_INT_IIC) { 3555 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3556 &isrbar0->iic_int_reg); 3557 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3558 val64, &isrbar0->iic_int_reg); 3559 /* FIXME: handle register */ 3560 } 3561 if (reason & XGE_HAL_PIC_INT_MISC) { 3562 val64 = xge_os_pio_mem_read64(hldev->pdev, 3563 hldev->regh0, &isrbar0->misc_int_reg); 3564#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 3565 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3566 /* Check for Link interrupts. If both Link Up/Down 3567 * bits are set, clear both and check adapter status 3568 */ 3569 if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) && 3570 (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) { 3571 u64 temp64; 3572 3573 xge_debug_device(XGE_TRACE, 3574 "both link up and link down detected "XGE_OS_LLXFMT, 3575 (unsigned long long)val64); 3576 3577 temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT | 3578 XGE_HAL_MISC_INT_REG_LINK_UP_INT); 3579 xge_os_pio_mem_write64(hldev->pdev, 3580 hldev->regh0, temp64, 3581 &isrbar0->misc_int_reg); 3582 } 3583 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) { 3584 xge_debug_device(XGE_TRACE, 3585 "link up call request, misc_int "XGE_OS_LLXFMT, 3586 (unsigned long long)val64); 3587 __hal_device_handle_link_up_ind(hldev); 3588 } 3589 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){ 3590 xge_debug_device(XGE_TRACE, 3591 "link down request, misc_int "XGE_OS_LLXFMT, 3592 (unsigned long long)val64); 3593 __hal_device_handle_link_down_ind(hldev); 3594 } 3595 } else 3596#endif 3597 { 3598 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3599 val64, &isrbar0->misc_int_reg); 3600 } 3601 } 3602 3603 return XGE_HAL_OK; 3604} 3605 3606/* 3607 * __hal_device_handle_txpic - Handle TxPIC interrupt reason 3608 * @hldev: HAL device handle. 3609 * @reason: interrupt reason 3610 */ 3611xge_hal_status_e 3612__hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason) 3613{ 3614 xge_hal_status_e status = XGE_HAL_OK; 3615 xge_hal_pci_bar0_t *isrbar0 = 3616 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3617 volatile u64 val64; 3618 3619 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3620 &isrbar0->pic_int_status); 3621 if ( val64 & (XGE_HAL_PIC_INT_FLSH | 3622 XGE_HAL_PIC_INT_MDIO | 3623 XGE_HAL_PIC_INT_IIC | 3624 XGE_HAL_PIC_INT_MISC) ) { 3625 status = __hal_device_handle_pic(hldev, val64); 3626 xge_os_wmb(); 3627 } 3628 3629 if (!(val64 & XGE_HAL_PIC_INT_TX)) 3630 return status; 3631 3632 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3633 &isrbar0->txpic_int_reg); 3634 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3635 val64, &isrbar0->txpic_int_reg); 3636 xge_os_wmb(); 3637 3638 if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) { 3639 int i; 3640 3641 if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL) 3642 g_xge_hal_driver->uld_callbacks.sched_timer( 3643 hldev, hldev->upper_layer_info); 3644 /* 3645 * This feature implements adaptive receive interrupt 3646 * coalecing. It is disabled by default. To enable it 3647 * set hldev->config.rxufca_lo_lim to be not equal to 3648 * hldev->config.rxufca_hi_lim. 3649 * 3650 * We are using HW timer for this feature, so 3651 * use needs to configure hldev->config.rxufca_lbolt_period 3652 * which is essentially a time slice of timer. 3653 * 3654 * For those who familiar with Linux, lbolt means jiffies 3655 * of this timer. I.e. timer tick. 3656 */ 3657 if (hldev->config.rxufca_lo_lim != 3658 hldev->config.rxufca_hi_lim && 3659 hldev->config.rxufca_lo_lim != 0) { 3660 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 3661 if (!hldev->config.ring.queue[i].configured) 3662 continue; 3663 if (hldev->config.ring.queue[i].rti.urange_a) 3664 __hal_update_rxufca(hldev, i); 3665 } 3666 } 3667 3668 /* 3669 * This feature implements adaptive TTI timer re-calculation 3670 * based on host utilization, number of interrupt processed, 3671 * number of RXD per tick and avarage length of packets per 3672 * tick. 3673 */ 3674 if (hldev->config.bimodal_interrupts) { 3675 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 3676 if (!hldev->config.ring.queue[i].configured) 3677 continue; 3678 if (hldev->bimodal_tti[i].enabled) 3679 __hal_update_bimodal(hldev, i); 3680 } 3681 } 3682 } 3683 3684 return XGE_HAL_OK; 3685} 3686 3687/* 3688 * __hal_device_handle_txdma - Handle TxDMA interrupt reason 3689 * @hldev: HAL device handle. 3690 * @reason: interrupt reason 3691 */ 3692xge_hal_status_e 3693__hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason) 3694{ 3695 xge_hal_pci_bar0_t *isrbar0 = 3696 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3697 u64 val64, temp64, err; 3698 3699 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3700 &isrbar0->txdma_int_status); 3701 if (val64 & XGE_HAL_TXDMA_PFC_INT) { 3702 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3703 &isrbar0->pfc_err_reg); 3704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3705 err, &isrbar0->pfc_err_reg); 3706 hldev->stats.sw_dev_info_stats.pfc_err_cnt++; 3707 temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM 3708 |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR 3709 |XGE_HAL_PFC_PCIX_ERR; 3710 if (val64 & temp64) 3711 goto reset; 3712 } 3713 if (val64 & XGE_HAL_TXDMA_TDA_INT) { 3714 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3715 &isrbar0->tda_err_reg); 3716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3717 err, &isrbar0->tda_err_reg); 3718 hldev->stats.sw_dev_info_stats.tda_err_cnt++; 3719 temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM 3720 |XGE_HAL_TDA_SM1_ERR_ALARM; 3721 if (val64 & temp64) 3722 goto reset; 3723 } 3724 if (val64 & XGE_HAL_TXDMA_PCC_INT) { 3725 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3726 &isrbar0->pcc_err_reg); 3727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3728 err, &isrbar0->pcc_err_reg); 3729 hldev->stats.sw_dev_info_stats.pcc_err_cnt++; 3730 temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR 3731 |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM 3732 |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR 3733 |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR 3734 |XGE_HAL_PCC_7_LSO_OV_ERR; 3735 if (val64 & temp64) 3736 goto reset; 3737 } 3738 if (val64 & XGE_HAL_TXDMA_TTI_INT) { 3739 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3740 &isrbar0->tti_err_reg); 3741 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3742 err, &isrbar0->tti_err_reg); 3743 hldev->stats.sw_dev_info_stats.tti_err_cnt++; 3744 temp64 = XGE_HAL_TTI_SM_ERR_ALARM; 3745 if (val64 & temp64) 3746 goto reset; 3747 } 3748 if (val64 & XGE_HAL_TXDMA_LSO_INT) { 3749 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3750 &isrbar0->lso_err_reg); 3751 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3752 err, &isrbar0->lso_err_reg); 3753 hldev->stats.sw_dev_info_stats.lso_err_cnt++; 3754 temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT 3755 |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM; 3756 if (val64 & temp64) 3757 goto reset; 3758 } 3759 if (val64 & XGE_HAL_TXDMA_TPA_INT) { 3760 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3761 &isrbar0->tpa_err_reg); 3762 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3763 err, &isrbar0->tpa_err_reg); 3764 hldev->stats.sw_dev_info_stats.tpa_err_cnt++; 3765 temp64 = XGE_HAL_TPA_SM_ERR_ALARM; 3766 if (val64 & temp64) 3767 goto reset; 3768 } 3769 if (val64 & XGE_HAL_TXDMA_SM_INT) { 3770 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3771 &isrbar0->sm_err_reg); 3772 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3773 err, &isrbar0->sm_err_reg); 3774 hldev->stats.sw_dev_info_stats.sm_err_cnt++; 3775 temp64 = XGE_HAL_SM_SM_ERR_ALARM; 3776 if (val64 & temp64) 3777 goto reset; 3778 } 3779 3780 return XGE_HAL_OK; 3781 3782reset : xge_hal_device_reset(hldev); 3783 xge_hal_device_enable(hldev); 3784 xge_hal_device_intr_enable(hldev); 3785 return XGE_HAL_OK; 3786} 3787 3788/* 3789 * __hal_device_handle_txmac - Handle TxMAC interrupt reason 3790 * @hldev: HAL device handle. 3791 * @reason: interrupt reason 3792 */ 3793xge_hal_status_e 3794__hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason) 3795{ 3796 xge_hal_pci_bar0_t *isrbar0 = 3797 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3798 u64 val64, temp64; 3799 3800 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3801 &isrbar0->mac_int_status); 3802 if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT)) 3803 return XGE_HAL_OK; 3804 3805 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3806 &isrbar0->mac_tmac_err_reg); 3807 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3808 val64, &isrbar0->mac_tmac_err_reg); 3809 hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++; 3810 temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR; 3811 if (val64 & temp64) { 3812 xge_hal_device_reset(hldev); 3813 xge_hal_device_enable(hldev); 3814 xge_hal_device_intr_enable(hldev); 3815 } 3816 3817 return XGE_HAL_OK; 3818} 3819 3820/* 3821 * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason 3822 * @hldev: HAL device handle. 3823 * @reason: interrupt reason 3824 */ 3825xge_hal_status_e 3826__hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason) 3827{ 3828 xge_hal_pci_bar0_t *isrbar0 = 3829 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3830 u64 val64, temp64; 3831 3832 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3833 &isrbar0->xgxs_int_status); 3834 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS)) 3835 return XGE_HAL_OK; 3836 3837 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3838 &isrbar0->xgxs_txgxs_err_reg); 3839 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3840 val64, &isrbar0->xgxs_txgxs_err_reg); 3841 hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++; 3842 temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR; 3843 if (val64 & temp64) { 3844 xge_hal_device_reset(hldev); 3845 xge_hal_device_enable(hldev); 3846 xge_hal_device_intr_enable(hldev); 3847 } 3848 3849 return XGE_HAL_OK; 3850} 3851 3852/* 3853 * __hal_device_handle_rxpic - Handle RxPIC interrupt reason 3854 * @hldev: HAL device handle. 3855 * @reason: interrupt reason 3856 */ 3857xge_hal_status_e 3858__hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason) 3859{ 3860 /* FIXME: handle register */ 3861 3862 return XGE_HAL_OK; 3863} 3864 3865/* 3866 * __hal_device_handle_rxdma - Handle RxDMA interrupt reason 3867 * @hldev: HAL device handle. 3868 * @reason: interrupt reason 3869 */ 3870xge_hal_status_e 3871__hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason) 3872{ 3873 xge_hal_pci_bar0_t *isrbar0 = 3874 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3875 u64 val64, err, temp64; 3876 3877 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3878 &isrbar0->rxdma_int_status); 3879 if (val64 & XGE_HAL_RXDMA_RC_INT) { 3880 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3881 &isrbar0->rc_err_reg); 3882 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3883 err, &isrbar0->rc_err_reg); 3884 hldev->stats.sw_dev_info_stats.rc_err_cnt++; 3885 temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR 3886 |XGE_HAL_RC_PRCn_SM_ERR_ALARM 3887 |XGE_HAL_RC_FTC_SM_ERR_ALARM; 3888 if (val64 & temp64) 3889 goto reset; 3890 } 3891 if (val64 & XGE_HAL_RXDMA_RPA_INT) { 3892 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3893 &isrbar0->rpa_err_reg); 3894 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3895 err, &isrbar0->rpa_err_reg); 3896 hldev->stats.sw_dev_info_stats.rpa_err_cnt++; 3897 temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR; 3898 if (val64 & temp64) 3899 goto reset; 3900 } 3901 if (val64 & XGE_HAL_RXDMA_RDA_INT) { 3902 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3903 &isrbar0->rda_err_reg); 3904 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3905 err, &isrbar0->rda_err_reg); 3906 hldev->stats.sw_dev_info_stats.rda_err_cnt++; 3907 temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR 3908 |XGE_HAL_RDA_FRM_ECC_DB_N_AERR 3909 |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM 3910 |XGE_HAL_RDA_RXD_ECC_DB_SERR; 3911 if (val64 & temp64) 3912 goto reset; 3913 } 3914 if (val64 & XGE_HAL_RXDMA_RTI_INT) { 3915 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3916 &isrbar0->rti_err_reg); 3917 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3918 err, &isrbar0->rti_err_reg); 3919 hldev->stats.sw_dev_info_stats.rti_err_cnt++; 3920 temp64 = XGE_HAL_RTI_SM_ERR_ALARM; 3921 if (val64 & temp64) 3922 goto reset; 3923 } 3924 3925 return XGE_HAL_OK; 3926 3927reset : xge_hal_device_reset(hldev); 3928 xge_hal_device_enable(hldev); 3929 xge_hal_device_intr_enable(hldev); 3930 return XGE_HAL_OK; 3931} 3932 3933/* 3934 * __hal_device_handle_rxmac - Handle RxMAC interrupt reason 3935 * @hldev: HAL device handle. 3936 * @reason: interrupt reason 3937 */ 3938xge_hal_status_e 3939__hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason) 3940{ 3941 xge_hal_pci_bar0_t *isrbar0 = 3942 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3943 u64 val64, temp64; 3944 3945 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3946 &isrbar0->mac_int_status); 3947 if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT)) 3948 return XGE_HAL_OK; 3949 3950 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3951 &isrbar0->mac_rmac_err_reg); 3952 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3953 val64, &isrbar0->mac_rmac_err_reg); 3954 hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++; 3955 temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR; 3956 if (val64 & temp64) { 3957 xge_hal_device_reset(hldev); 3958 xge_hal_device_enable(hldev); 3959 xge_hal_device_intr_enable(hldev); 3960 } 3961 3962 return XGE_HAL_OK; 3963} 3964 3965/* 3966 * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason 3967 * @hldev: HAL device handle. 3968 * @reason: interrupt reason 3969 */ 3970xge_hal_status_e 3971__hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason) 3972{ 3973 xge_hal_pci_bar0_t *isrbar0 = 3974 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3975 u64 val64, temp64; 3976 3977 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3978 &isrbar0->xgxs_int_status); 3979 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS)) 3980 return XGE_HAL_OK; 3981 3982 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3983 &isrbar0->xgxs_rxgxs_err_reg); 3984 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3985 val64, &isrbar0->xgxs_rxgxs_err_reg); 3986 hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++; 3987 temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR; 3988 if (val64 & temp64) { 3989 xge_hal_device_reset(hldev); 3990 xge_hal_device_enable(hldev); 3991 xge_hal_device_intr_enable(hldev); 3992 } 3993 3994 return XGE_HAL_OK; 3995} 3996 3997/** 3998 * xge_hal_device_enable - Enable device. 3999 * @hldev: HAL device handle. 4000 * 4001 * Enable the specified device: bring up the link/interface. 4002 * Returns: XGE_HAL_OK - success. 4003 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device 4004 * to a "quiescent" state. 4005 * 4006 * See also: xge_hal_status_e{}. 4007 * 4008 * Usage: See ex_open{}. 4009 */ 4010xge_hal_status_e 4011xge_hal_device_enable(xge_hal_device_t *hldev) 4012{ 4013 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4014 u64 val64; 4015 u64 adp_status; 4016 int i, j; 4017 4018 if (!hldev->hw_is_initialized) { 4019 xge_hal_status_e status; 4020 4021 status = __hal_device_hw_initialize(hldev); 4022 if (status != XGE_HAL_OK) { 4023 return status; 4024 } 4025 } 4026 4027 /* 4028 * Not needed in most cases, i.e. 4029 * when device_disable() is followed by reset - 4030 * the latter copies back PCI config space, along with 4031 * the bus mastership - see __hal_device_reset(). 4032 * However, there are/may-in-future be other cases, and 4033 * does not hurt. 4034 */ 4035 __hal_device_bus_master_enable(hldev); 4036 4037 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4038 /* 4039 * Configure the link stability period. 4040 */ 4041 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4042 &bar0->misc_control); 4043 if (hldev->config.link_stability_period != 4044 XGE_HAL_DEFAULT_USE_HARDCODE) { 4045 4046 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( 4047 hldev->config.link_stability_period); 4048 } else { 4049 /* 4050 * Use the link stability period 1 ms as default 4051 */ 4052 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( 4053 XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD); 4054 } 4055 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4056 val64, &bar0->misc_control); 4057 4058 /* 4059 * Clearing any possible Link up/down interrupts that 4060 * could have popped up just before Enabling the card. 4061 */ 4062 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4063 &bar0->misc_int_reg); 4064 if (val64) { 4065 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4066 val64, &bar0->misc_int_reg); 4067 xge_debug_device(XGE_TRACE, "%s","link state cleared"); 4068 } 4069 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 4070 /* 4071 * Clearing any possible Link state change interrupts that 4072 * could have popped up just before Enabling the card. 4073 */ 4074 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4075 &bar0->mac_rmac_err_reg); 4076 if (val64) { 4077 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4078 val64, &bar0->mac_rmac_err_reg); 4079 xge_debug_device(XGE_TRACE, "%s", "link state cleared"); 4080 } 4081 } 4082 4083 if (__hal_device_wait_quiescent(hldev, &val64)) { 4084 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4085 } 4086 4087 /* Enabling Laser. */ 4088 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4089 &bar0->adapter_control); 4090 val64 |= XGE_HAL_ADAPTER_EOI_TX_ON; 4091 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4092 &bar0->adapter_control); 4093 4094 /* let link establish */ 4095 xge_os_mdelay(1); 4096 4097 /* set link down untill poll() routine will set it up (maybe) */ 4098 hldev->link_state = XGE_HAL_LINK_DOWN; 4099 4100 /* If link is UP (adpter is connected) then enable the adapter */ 4101 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4102 &bar0->adapter_status); 4103 if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4104 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) { 4105 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4106 &bar0->adapter_control); 4107 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 4108 } else { 4109 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4110 &bar0->adapter_control); 4111 val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON | 4112 XGE_HAL_ADAPTER_LED_ON ); 4113 } 4114 4115 val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */ 4116 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ 4117 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64, 4118 &bar0->adapter_control); 4119 4120 /* We spin here waiting for the Link to come up. 4121 * This is the fix for the Link being unstable after the reset. */ 4122 i = 0; 4123 j = 0; 4124 do 4125 { 4126 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4127 &bar0->adapter_status); 4128 4129 /* Read the adapter control register for Adapter_enable bit */ 4130 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4131 &bar0->adapter_control); 4132 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4133 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) && 4134 (val64 & XGE_HAL_ADAPTER_CNTL_EN)) { 4135 j++; 4136 if (j >= hldev->config.link_valid_cnt) { 4137 if (xge_hal_device_status(hldev, &adp_status) == 4138 XGE_HAL_OK) { 4139 if (__hal_verify_pcc_idle(hldev, 4140 adp_status) != XGE_HAL_OK) { 4141 return 4142 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4143 } 4144 xge_debug_device(XGE_TRACE, 4145 "adp_status: "XGE_OS_LLXFMT 4146 ", link is up on " 4147 "adapter enable!", 4148 (unsigned long long)adp_status); 4149 val64 = xge_os_pio_mem_read64( 4150 hldev->pdev, 4151 hldev->regh0, 4152 &bar0->adapter_control); 4153 val64 = val64| 4154 (XGE_HAL_ADAPTER_EOI_TX_ON | 4155 XGE_HAL_ADAPTER_LED_ON ); 4156 xge_os_pio_mem_write64(hldev->pdev, 4157 hldev->regh0, val64, 4158 &bar0->adapter_control); 4159 xge_os_mdelay(1); 4160 4161 val64 = xge_os_pio_mem_read64( 4162 hldev->pdev, 4163 hldev->regh0, 4164 &bar0->adapter_control); 4165 break; /* out of for loop */ 4166 } else { 4167 return 4168 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4169 } 4170 } 4171 } else { 4172 j = 0; /* Reset the count */ 4173 /* Turn on the Laser */ 4174 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4175 &bar0->adapter_control); 4176 val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON; 4177 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, 4178 val64, &bar0->adapter_control); 4179 4180 xge_os_mdelay(1); 4181 4182 /* Now re-enable it as due to noise, hardware 4183 * turned it off */ 4184 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4185 &bar0->adapter_control); 4186 val64 |= XGE_HAL_ADAPTER_CNTL_EN; 4187 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/ 4188 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4189 &bar0->adapter_control); 4190 } 4191 xge_os_mdelay(1); /* Sleep for 1 msec */ 4192 i++; 4193 } while (i < hldev->config.link_retry_cnt); 4194 4195 __hal_device_led_actifity_fix(hldev); 4196 4197#ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR 4198 /* Here we are performing soft reset on XGXS to force link down. 4199 * Since link is already up, we will get link state change 4200 * poll notificatoin after adapter is enabled */ 4201 4202 __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL, 4203 &bar0->dtx_control); 4204 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4205 4206 __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL, 4207 &bar0->dtx_control); 4208 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4209 4210 __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL, 4211 &bar0->dtx_control); 4212 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4213 4214 xge_os_mdelay(100); /* Sleep for 500 msec */ 4215#else 4216 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 4217#endif 4218 { 4219 /* 4220 * With some switches the link state change interrupt does not 4221 * occur even though the xgxs reset is done as per SPN-006. So, 4222 * poll the adapter status register and check if the link state 4223 * is ok. 4224 */ 4225 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4226 &bar0->adapter_status); 4227 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4228 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 4229 { 4230 xge_debug_device(XGE_TRACE, "%s", 4231 "enable device causing link state change ind.."); 4232 (void) __hal_device_handle_link_state_change(hldev); 4233 } 4234 } 4235 4236 if (hldev->config.stats_refresh_time_sec != 4237 XGE_HAL_STATS_REFRESH_DISABLE) 4238 __hal_stats_enable(&hldev->stats); 4239 4240 return XGE_HAL_OK; 4241} 4242 4243/** 4244 * xge_hal_device_disable - Disable Xframe adapter. 4245 * @hldev: Device handle. 4246 * 4247 * Disable this device. To gracefully reset the adapter, the host should: 4248 * 4249 * - call xge_hal_device_disable(); 4250 * 4251 * - call xge_hal_device_intr_disable(); 4252 * 4253 * - close all opened channels and clean up outstanding resources; 4254 * 4255 * - do some work (error recovery, change mtu, reset, etc); 4256 * 4257 * - call xge_hal_device_enable(); 4258 * 4259 * - open channels, replenish RxDs, etc. 4260 * 4261 * - call xge_hal_device_intr_enable(). 4262 * 4263 * Note: Disabling the device does _not_ include disabling of interrupts. 4264 * After disabling the device stops receiving new frames but those frames 4265 * that were already in the pipe will keep coming for some few milliseconds. 4266 * 4267 * Returns: XGE_HAL_OK - success. 4268 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to 4269 * a "quiescent" state. 4270 * 4271 * See also: xge_hal_status_e{}. 4272 */ 4273xge_hal_status_e 4274xge_hal_device_disable(xge_hal_device_t *hldev) 4275{ 4276 xge_hal_status_e status = XGE_HAL_OK; 4277 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4278 u64 val64; 4279 4280 xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware"); 4281 4282 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4283 &bar0->adapter_control); 4284 val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN); 4285 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4286 &bar0->adapter_control); 4287 4288 if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) { 4289 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4290 } 4291 4292 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, 4293 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, 4294 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4295 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); 4296 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4297 } 4298 4299 if (hldev->config.stats_refresh_time_sec != 4300 XGE_HAL_STATS_REFRESH_DISABLE) 4301 __hal_stats_disable(&hldev->stats); 4302#ifdef XGE_DEBUG_ASSERT 4303 else 4304 xge_assert(!hldev->stats.is_enabled); 4305#endif 4306 4307#ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP 4308 __hal_device_bus_master_disable(hldev); 4309#endif 4310 4311 return status; 4312} 4313 4314/** 4315 * xge_hal_device_reset - Reset device. 4316 * @hldev: HAL device handle. 4317 * 4318 * Soft-reset the device, reset the device stats except reset_cnt. 4319 * 4320 * After reset is done, will try to re-initialize HW. 4321 * 4322 * Returns: XGE_HAL_OK - success. 4323 * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized. 4324 * XGE_HAL_ERR_RESET_FAILED - Reset failed. 4325 * 4326 * See also: xge_hal_status_e{}. 4327 */ 4328xge_hal_status_e 4329xge_hal_device_reset(xge_hal_device_t *hldev) 4330{ 4331 xge_hal_status_e status; 4332 4333 /* increment the soft reset counter */ 4334 u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt; 4335 4336 xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt); 4337 4338 if (!hldev->is_initialized) 4339 return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED; 4340 4341 /* actual "soft" reset of the adapter */ 4342 status = __hal_device_reset(hldev); 4343 4344 /* reset all stats including saved */ 4345 __hal_stats_soft_reset(hldev, 1); 4346 4347 /* increment reset counter */ 4348 hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1; 4349 4350 /* re-initialize rxufca_intr_thres */ 4351 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; 4352 4353 hldev->reset_needed_after_close = 0; 4354 4355 return status; 4356} 4357 4358/** 4359 * xge_hal_device_status - Check whether Xframe hardware is ready for 4360 * operation. 4361 * @hldev: HAL device handle. 4362 * @hw_status: Xframe status register. Returned by HAL. 4363 * 4364 * Check whether Xframe hardware is ready for operation. 4365 * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest 4366 * hardware functional blocks. 4367 * 4368 * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise 4369 * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status). 4370 * 4371 * See also: xge_hal_status_e{}. 4372 * Usage: See ex_open{}. 4373 */ 4374xge_hal_status_e 4375xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status) 4376{ 4377 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4378 u64 tmp64; 4379 4380 tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4381 &bar0->adapter_status); 4382 4383 *hw_status = tmp64; 4384 4385 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) { 4386 xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!"); 4387 return XGE_HAL_FAIL; 4388 } 4389 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) { 4390 xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!"); 4391 return XGE_HAL_FAIL; 4392 } 4393 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) { 4394 xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!"); 4395 return XGE_HAL_FAIL; 4396 } 4397 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) { 4398 xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!"); 4399 return XGE_HAL_FAIL; 4400 } 4401 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) { 4402 xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!"); 4403 return XGE_HAL_FAIL; 4404 } 4405 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) { 4406 xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!"); 4407 return XGE_HAL_FAIL; 4408 } 4409 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) { 4410 xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!"); 4411 return XGE_HAL_FAIL; 4412 } 4413 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) { 4414 xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!"); 4415 return XGE_HAL_FAIL; 4416 } 4417#ifndef XGE_HAL_HERC_EMULATION 4418 /* 4419 * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore, 4420 * the P_PLL_LOCK bit in the adapter_status register will 4421 * not be asserted. 4422 */ 4423 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) && 4424 xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC && 4425 hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) { 4426 xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!"); 4427 return XGE_HAL_FAIL; 4428 } 4429#endif 4430 4431 return XGE_HAL_OK; 4432} 4433 4434void 4435__hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag) 4436{ 4437 u16 msi_control_reg; 4438 4439 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 4440 xge_offsetof(xge_hal_pci_config_le_t, 4441 msi_control), &msi_control_reg); 4442 4443 if (flag) 4444 msi_control_reg |= 0x1; 4445 else 4446 msi_control_reg &= ~0x1; 4447 4448 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 4449 xge_offsetof(xge_hal_pci_config_le_t, 4450 msi_control), msi_control_reg); 4451} 4452 4453void 4454__hal_device_msix_intr_endis(xge_hal_device_t *hldev, 4455 xge_hal_channel_t *channel, int flag) 4456{ 4457 u64 val64; 4458 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 4459 4460 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4461 &bar0->xmsi_mask_reg); 4462 4463 if (flag) 4464 val64 &= ~(1LL << ( 63 - channel->msix_idx )); 4465 else 4466 val64 |= (1LL << ( 63 - channel->msix_idx )); 4467 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4468 &bar0->xmsi_mask_reg); 4469} 4470 4471/** 4472 * xge_hal_device_intr_enable - Enable Xframe interrupts. 4473 * @hldev: HAL device handle. 4474 * @op: One of the xge_hal_device_intr_e enumerated values specifying 4475 * the type(s) of interrupts to enable. 4476 * 4477 * Enable Xframe interrupts. The function is to be executed the last in 4478 * Xframe initialization sequence. 4479 * 4480 * See also: xge_hal_device_intr_disable() 4481 */ 4482void 4483xge_hal_device_intr_enable(xge_hal_device_t *hldev) 4484{ 4485 xge_list_t *item; 4486 u64 val64; 4487 4488 /* PRC initialization and configuration */ 4489 xge_list_for_each(item, &hldev->ring_channels) { 4490 xge_hal_channel_h channel; 4491 channel = xge_container_of(item, xge_hal_channel_t, item); 4492 __hal_ring_prc_enable(channel); 4493 } 4494 4495 /* enable traffic only interrupts */ 4496 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) { 4497 /* 4498 * make sure all interrupts going to be disabled if MSI 4499 * is enabled. 4500 */ 4501 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); 4502 } else { 4503 /* 4504 * Enable the Tx traffic interrupts only if the TTI feature is 4505 * enabled. 4506 */ 4507 val64 = 0; 4508 if (hldev->tti_enabled) 4509 val64 = XGE_HAL_TX_TRAFFIC_INTR; 4510 4511 if (!hldev->config.bimodal_interrupts) 4512 val64 |= XGE_HAL_RX_TRAFFIC_INTR; 4513 4514 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 4515 val64 |= XGE_HAL_RX_TRAFFIC_INTR; 4516 4517 val64 |=XGE_HAL_TX_PIC_INTR | 4518 XGE_HAL_MC_INTR | 4519 XGE_HAL_TX_DMA_INTR | 4520 (hldev->config.sched_timer_us != 4521 XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0); 4522 __hal_device_intr_mgmt(hldev, val64, 1); 4523 } 4524 4525 /* 4526 * Enable MSI-X interrupts 4527 */ 4528 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 4529 4530 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4531 /* 4532 * To enable MSI-X, MSI also needs to be enabled, 4533 * due to a bug in the herc NIC. 4534 */ 4535 __hal_device_msi_intr_endis(hldev, 1); 4536 } 4537 4538 4539 /* Enable the MSI-X interrupt for each configured channel */ 4540 xge_list_for_each(item, &hldev->fifo_channels) { 4541 xge_hal_channel_t *channel; 4542 4543 channel = xge_container_of(item, 4544 xge_hal_channel_t, item); 4545 4546 /* 0 vector is reserved for alarms */ 4547 if (!channel->msix_idx) 4548 continue; 4549 4550 __hal_device_msix_intr_endis(hldev, channel, 1); 4551 } 4552 4553 xge_list_for_each(item, &hldev->ring_channels) { 4554 xge_hal_channel_t *channel; 4555 4556 channel = xge_container_of(item, 4557 xge_hal_channel_t, item); 4558 4559 /* 0 vector is reserved for alarms */ 4560 if (!channel->msix_idx) 4561 continue; 4562 4563 __hal_device_msix_intr_endis(hldev, channel, 1); 4564 } 4565 } 4566 4567 xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled"); 4568} 4569 4570 4571/** 4572 * xge_hal_device_intr_disable - Disable Xframe interrupts. 4573 * @hldev: HAL device handle. 4574 * @op: One of the xge_hal_device_intr_e enumerated values specifying 4575 * the type(s) of interrupts to disable. 4576 * 4577 * Disable Xframe interrupts. 4578 * 4579 * See also: xge_hal_device_intr_enable() 4580 */ 4581void 4582xge_hal_device_intr_disable(xge_hal_device_t *hldev) 4583{ 4584 xge_list_t *item; 4585 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4586 u64 val64; 4587 4588 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 4589 4590 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4591 /* 4592 * To disable MSI-X, MSI also needs to be disabled, 4593 * due to a bug in the herc NIC. 4594 */ 4595 __hal_device_msi_intr_endis(hldev, 0); 4596 } 4597 4598 /* Disable the MSI-X interrupt for each configured channel */ 4599 xge_list_for_each(item, &hldev->fifo_channels) { 4600 xge_hal_channel_t *channel; 4601 4602 channel = xge_container_of(item, 4603 xge_hal_channel_t, item); 4604 4605 /* 0 vector is reserved for alarms */ 4606 if (!channel->msix_idx) 4607 continue; 4608 4609 __hal_device_msix_intr_endis(hldev, channel, 0); 4610 4611 } 4612 4613 xge_os_pio_mem_write64(hldev->pdev, 4614 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, 4615 &bar0->tx_traffic_mask); 4616 4617 xge_list_for_each(item, &hldev->ring_channels) { 4618 xge_hal_channel_t *channel; 4619 4620 channel = xge_container_of(item, 4621 xge_hal_channel_t, item); 4622 4623 /* 0 vector is reserved for alarms */ 4624 if (!channel->msix_idx) 4625 continue; 4626 4627 __hal_device_msix_intr_endis(hldev, channel, 0); 4628 } 4629 4630 xge_os_pio_mem_write64(hldev->pdev, 4631 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, 4632 &bar0->rx_traffic_mask); 4633 } 4634 4635 /* 4636 * Disable traffic only interrupts. 4637 * Tx traffic interrupts are used only if the TTI feature is 4638 * enabled. 4639 */ 4640 val64 = 0; 4641 if (hldev->tti_enabled) 4642 val64 = XGE_HAL_TX_TRAFFIC_INTR; 4643 4644 val64 |= XGE_HAL_RX_TRAFFIC_INTR | 4645 XGE_HAL_TX_PIC_INTR | 4646 XGE_HAL_MC_INTR | 4647 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? 4648 XGE_HAL_SCHED_INTR : 0); 4649 __hal_device_intr_mgmt(hldev, val64, 0); 4650 4651 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4652 0xFFFFFFFFFFFFFFFFULL, 4653 &bar0->general_int_mask); 4654 4655 4656 /* disable all configured PRCs */ 4657 xge_list_for_each(item, &hldev->ring_channels) { 4658 xge_hal_channel_h channel; 4659 channel = xge_container_of(item, xge_hal_channel_t, item); 4660 __hal_ring_prc_disable(channel); 4661 } 4662 4663 xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled"); 4664} 4665 4666 4667/** 4668 * xge_hal_device_mcast_enable - Enable Xframe multicast addresses. 4669 * @hldev: HAL device handle. 4670 * 4671 * Enable Xframe multicast addresses. 4672 * Returns: XGE_HAL_OK on success. 4673 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast 4674 * feature within the time(timeout). 4675 * 4676 * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}. 4677 */ 4678xge_hal_status_e 4679xge_hal_device_mcast_enable(xge_hal_device_t *hldev) 4680{ 4681 u64 val64; 4682 xge_hal_pci_bar0_t *bar0; 4683 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; 4684 4685 if (hldev == NULL) 4686 return XGE_HAL_ERR_INVALID_DEVICE; 4687 4688 if (hldev->mcast_refcnt) 4689 return XGE_HAL_OK; 4690 4691 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 4692 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; 4693 4694 hldev->mcast_refcnt = 1; 4695 4696 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4697 4698 /* Enable all Multicast addresses */ 4699 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4700 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL), 4701 &bar0->rmac_addr_data0_mem); 4702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4703 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL), 4704 &bar0->rmac_addr_data1_mem); 4705 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4706 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4707 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); 4708 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4709 &bar0->rmac_addr_cmd_mem); 4710 4711 if (__hal_device_register_poll(hldev, 4712 &bar0->rmac_addr_cmd_mem, 0, 4713 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4714 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4715 /* upper layer may require to repeat */ 4716 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4717 } 4718 4719 return XGE_HAL_OK; 4720} 4721 4722/** 4723 * xge_hal_device_mcast_disable - Disable Xframe multicast addresses. 4724 * @hldev: HAL device handle. 4725 * 4726 * Disable Xframe multicast addresses. 4727 * Returns: XGE_HAL_OK - success. 4728 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast 4729 * feature within the time(timeout). 4730 * 4731 * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}. 4732 */ 4733xge_hal_status_e 4734xge_hal_device_mcast_disable(xge_hal_device_t *hldev) 4735{ 4736 u64 val64; 4737 xge_hal_pci_bar0_t *bar0; 4738 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; 4739 4740 if (hldev == NULL) 4741 return XGE_HAL_ERR_INVALID_DEVICE; 4742 4743 if (hldev->mcast_refcnt == 0) 4744 return XGE_HAL_OK; 4745 4746 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 4747 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; 4748 4749 hldev->mcast_refcnt = 0; 4750 4751 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4752 4753 /* Disable all Multicast addresses */ 4754 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4755 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL), 4756 &bar0->rmac_addr_data0_mem); 4757 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4758 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0), 4759 &bar0->rmac_addr_data1_mem); 4760 4761 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4762 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4763 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); 4764 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4765 &bar0->rmac_addr_cmd_mem); 4766 4767 if (__hal_device_register_poll(hldev, 4768 &bar0->rmac_addr_cmd_mem, 0, 4769 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4770 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4771 /* upper layer may require to repeat */ 4772 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4773 } 4774 4775 return XGE_HAL_OK; 4776} 4777 4778/** 4779 * xge_hal_device_promisc_enable - Enable promiscuous mode. 4780 * @hldev: HAL device handle. 4781 * 4782 * Enable promiscuous mode of Xframe operation. 4783 * 4784 * See also: xge_hal_device_promisc_disable(). 4785 */ 4786void 4787xge_hal_device_promisc_enable(xge_hal_device_t *hldev) 4788{ 4789 u64 val64; 4790 xge_hal_pci_bar0_t *bar0; 4791 4792 xge_assert(hldev); 4793 4794 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4795 4796 if (!hldev->is_promisc) { 4797 /* Put the NIC into promiscuous mode */ 4798 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4799 &bar0->mac_cfg); 4800 val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; 4801 4802 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4803 XGE_HAL_RMAC_CFG_KEY(0x4C0D), 4804 &bar0->rmac_cfg_key); 4805 4806 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 4807 (u32)(val64 >> 32), 4808 &bar0->mac_cfg); 4809 4810 hldev->is_promisc = 1; 4811 xge_debug_device(XGE_TRACE, 4812 "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled", 4813 (unsigned long long)val64); 4814 } 4815} 4816 4817/** 4818 * xge_hal_device_promisc_disable - Disable promiscuous mode. 4819 * @hldev: HAL device handle. 4820 * 4821 * Disable promiscuous mode of Xframe operation. 4822 * 4823 * See also: xge_hal_device_promisc_enable(). 4824 */ 4825void 4826xge_hal_device_promisc_disable(xge_hal_device_t *hldev) 4827{ 4828 u64 val64; 4829 xge_hal_pci_bar0_t *bar0; 4830 4831 xge_assert(hldev); 4832 4833 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4834 4835 if (hldev->is_promisc) { 4836 /* Remove the NIC from promiscuous mode */ 4837 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4838 &bar0->mac_cfg); 4839 val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; 4840 4841 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4842 XGE_HAL_RMAC_CFG_KEY(0x4C0D), 4843 &bar0->rmac_cfg_key); 4844 4845 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 4846 (u32)(val64 >> 32), 4847 &bar0->mac_cfg); 4848 4849 hldev->is_promisc = 0; 4850 xge_debug_device(XGE_TRACE, 4851 "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled", 4852 (unsigned long long)val64); 4853 } 4854} 4855 4856/** 4857 * xge_hal_device_macaddr_get - Get MAC addresses. 4858 * @hldev: HAL device handle. 4859 * @index: MAC address index, in the range from 0 to 4860 * XGE_HAL_MAX_MAC_ADDRESSES. 4861 * @macaddr: MAC address. Returned by HAL. 4862 * 4863 * Retrieve one of the stored MAC addresses by reading non-volatile 4864 * memory on the chip. 4865 * 4866 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. 4867 * 4868 * Returns: XGE_HAL_OK - success. 4869 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac 4870 * address within the time(timeout). 4871 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 4872 * 4873 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. 4874 */ 4875xge_hal_status_e 4876xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index, 4877 macaddr_t *macaddr) 4878{ 4879 xge_hal_pci_bar0_t *bar0; 4880 u64 val64; 4881 int i; 4882 4883 if (hldev == NULL) { 4884 return XGE_HAL_ERR_INVALID_DEVICE; 4885 } 4886 4887 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4888 4889 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) { 4890 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 4891 } 4892 4893#ifdef XGE_HAL_HERC_EMULATION 4894 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000, 4895 &bar0->rmac_addr_data0_mem); 4896 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000, 4897 &bar0->rmac_addr_data1_mem); 4898 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD | 4899 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4900 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)); 4901 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4902 &bar0->rmac_addr_cmd_mem); 4903 4904 /* poll until done */ 4905 __hal_device_register_poll(hldev, 4906 &bar0->rmac_addr_cmd_mem, 0, 4907 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD, 4908 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS); 4909 4910#endif 4911 4912 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD | 4913 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4914 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); 4915 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4916 &bar0->rmac_addr_cmd_mem); 4917 4918 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, 4919 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4920 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4921 /* upper layer may require to repeat */ 4922 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4923 } 4924 4925 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4926 &bar0->rmac_addr_data0_mem); 4927 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4928 (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8))); 4929 } 4930 4931#ifdef XGE_HAL_HERC_EMULATION 4932 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4933 (*macaddr)[i] = (u8)0; 4934 } 4935 (*macaddr)[1] = (u8)1; 4936 4937#endif 4938 4939 return XGE_HAL_OK; 4940} 4941 4942/** 4943 * xge_hal_device_macaddr_set - Set MAC address. 4944 * @hldev: HAL device handle. 4945 * @index: MAC address index, in the range from 0 to 4946 * XGE_HAL_MAX_MAC_ADDRESSES. 4947 * @macaddr: New MAC address to configure. 4948 * 4949 * Configure one of the available MAC address "slots". 4950 * 4951 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. 4952 * 4953 * Returns: XGE_HAL_OK - success. 4954 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac 4955 * address within the time(timeout). 4956 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 4957 * 4958 * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}. 4959 */ 4960xge_hal_status_e 4961xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index, 4962 macaddr_t macaddr) 4963{ 4964 xge_hal_pci_bar0_t *bar0 = 4965 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4966 u64 val64, temp64; 4967 int i; 4968 4969 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) 4970 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 4971 4972 temp64 = 0; 4973 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4974 temp64 |= macaddr[i]; 4975 temp64 <<= 8; 4976 } 4977 temp64 >>= 8; 4978 4979 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4980 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64), 4981 &bar0->rmac_addr_data0_mem); 4982 4983 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4984 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL), 4985 &bar0->rmac_addr_data1_mem); 4986 4987 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4988 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4989 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); 4990 4991 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4992 &bar0->rmac_addr_cmd_mem); 4993 4994 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, 4995 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4996 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4997 /* upper layer may require to repeat */ 4998 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4999 } 5000 5001 return XGE_HAL_OK; 5002} 5003 5004/** 5005 * xge_hal_device_macaddr_clear - Set MAC address. 5006 * @hldev: HAL device handle. 5007 * @index: MAC address index, in the range from 0 to 5008 * XGE_HAL_MAX_MAC_ADDRESSES. 5009 * 5010 * Clear one of the available MAC address "slots". 5011 * 5012 * Returns: XGE_HAL_OK - success. 5013 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac 5014 * address within the time(timeout). 5015 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 5016 * 5017 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. 5018 */ 5019xge_hal_status_e 5020xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index) 5021{ 5022 xge_hal_status_e status; 5023 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 5024 5025 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 5026 if (status != XGE_HAL_OK) { 5027 xge_debug_device(XGE_ERR, "%s", 5028 "Not able to set the mac addr"); 5029 return status; 5030 } 5031 5032 return XGE_HAL_OK; 5033} 5034 5035/** 5036 * xge_hal_device_macaddr_find - Finds index in the rmac table. 5037 * @hldev: HAL device handle. 5038 * @wanted: Wanted MAC address. 5039 * 5040 * See also: xge_hal_device_macaddr_set(). 5041 */ 5042int 5043xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted) 5044{ 5045 int i; 5046 5047 if (hldev == NULL) { 5048 return XGE_HAL_ERR_INVALID_DEVICE; 5049 } 5050 5051 for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) { 5052 macaddr_t macaddr; 5053 (void) xge_hal_device_macaddr_get(hldev, i, &macaddr); 5054 if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) { 5055 return i; 5056 } 5057 } 5058 5059 return -1; 5060} 5061 5062/** 5063 * xge_hal_device_mtu_set - Set MTU. 5064 * @hldev: HAL device handle. 5065 * @new_mtu: New MTU size to configure. 5066 * 5067 * Set new MTU value. Example, to use jumbo frames: 5068 * xge_hal_device_mtu_set(my_device, my_channel, 9600); 5069 * 5070 * Returns: XGE_HAL_OK on success. 5071 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control 5072 * register. 5073 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI 5074 * schemes. 5075 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to 5076 * a "quiescent" state. 5077 */ 5078xge_hal_status_e 5079xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu) 5080{ 5081 xge_hal_status_e status; 5082 5083 /* 5084 * reset needed if 1) new MTU differs, and 5085 * 2a) device was closed or 5086 * 2b) device is being upped for first time. 5087 */ 5088 if (hldev->config.mtu != new_mtu) { 5089 if (hldev->reset_needed_after_close || 5090 !hldev->mtu_first_time_set) { 5091 status = xge_hal_device_reset(hldev); 5092 if (status != XGE_HAL_OK) { 5093 xge_debug_device(XGE_TRACE, "%s", 5094 "fatal: can not reset the device"); 5095 return status; 5096 } 5097 } 5098 /* store the new MTU in device, reset will use it */ 5099 hldev->config.mtu = new_mtu; 5100 xge_debug_device(XGE_TRACE, "new MTU %d applied", 5101 new_mtu); 5102 } 5103 5104 if (!hldev->mtu_first_time_set) 5105 hldev->mtu_first_time_set = 1; 5106 5107 return XGE_HAL_OK; 5108} 5109 5110/** 5111 * xge_hal_device_initialize - Initialize Xframe device. 5112 * @hldev: HAL device handle. 5113 * @attr: pointer to xge_hal_device_attr_t structure 5114 * @device_config: Configuration to be _applied_ to the device, 5115 * For the Xframe configuration "knobs" please 5116 * refer to xge_hal_device_config_t and Xframe 5117 * User Guide. 5118 * 5119 * Initialize Xframe device. Note that all the arguments of this public API 5120 * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with 5121 * OS to find new Xframe device, locate its PCI and memory spaces. 5122 * 5123 * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL 5124 * to enable the latter to perform Xframe hardware initialization. 5125 * 5126 * Returns: XGE_HAL_OK - success. 5127 * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized. 5128 * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not 5129 * valid. 5130 * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed. 5131 * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid. 5132 * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid. 5133 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac 5134 * address within the time(timeout) or TTI/RTI initialization failed. 5135 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control. 5136 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent. 5137 * 5138 * See also: xge_hal_device_terminate(), xge_hal_status_e{} 5139 * xge_hal_device_attr_t{}. 5140 */ 5141xge_hal_status_e 5142xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, 5143 xge_hal_device_config_t *device_config) 5144{ 5145 int i; 5146 xge_hal_status_e status; 5147 xge_hal_channel_t *channel; 5148 u16 subsys_device; 5149 u16 subsys_vendor; 5150 int total_dram_size, ring_auto_dram_cfg, left_dram_size; 5151 int total_dram_size_max = 0; 5152 5153 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing", 5154 (unsigned long long)(ulong_t)hldev); 5155 5156 /* sanity check */ 5157 if (g_xge_hal_driver == NULL || 5158 !g_xge_hal_driver->is_initialized) { 5159 return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; 5160 } 5161 5162 xge_os_memzero(hldev, sizeof(xge_hal_device_t)); 5163 5164 /* 5165 * validate a common part of Xframe-I/II configuration 5166 * (and run check_card() later, once PCI inited - see below) 5167 */ 5168 status = __hal_device_config_check_common(device_config); 5169 if (status != XGE_HAL_OK) 5170 return status; 5171 5172 /* apply config */ 5173 xge_os_memcpy(&hldev->config, device_config, 5174 sizeof(xge_hal_device_config_t)); 5175 5176 /* save original attr */ 5177 xge_os_memcpy(&hldev->orig_attr, attr, 5178 sizeof(xge_hal_device_attr_t)); 5179 5180 /* initialize rxufca_intr_thres */ 5181 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; 5182 5183 hldev->regh0 = attr->regh0; 5184 hldev->regh1 = attr->regh1; 5185 hldev->regh2 = attr->regh2; 5186 hldev->isrbar0 = hldev->bar0 = attr->bar0; 5187 hldev->bar1 = attr->bar1; 5188 hldev->bar2 = attr->bar2; 5189 hldev->pdev = attr->pdev; 5190 hldev->irqh = attr->irqh; 5191 hldev->cfgh = attr->cfgh; 5192 5193 /* set initial bimodal timer for bimodal adaptive schema */ 5194 hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us; 5195 5196 hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh, 5197 g_xge_hal_driver->config.queue_size_initial, 5198 g_xge_hal_driver->config.queue_size_max, 5199 __hal_device_event_queued, hldev); 5200 if (hldev->queueh == NULL) 5201 return XGE_HAL_ERR_OUT_OF_MEMORY; 5202 5203 hldev->magic = XGE_HAL_MAGIC; 5204 5205 xge_assert(hldev->regh0); 5206 xge_assert(hldev->regh1); 5207 xge_assert(hldev->bar0); 5208 xge_assert(hldev->bar1); 5209 xge_assert(hldev->pdev); 5210 xge_assert(hldev->irqh); 5211 xge_assert(hldev->cfgh); 5212 5213 /* initialize some PCI/PCI-X fields of this PCI device. */ 5214 __hal_device_pci_init(hldev); 5215 5216 /* 5217 * initlialize lists to properly handling a potential 5218 * terminate request 5219 */ 5220 xge_list_init(&hldev->free_channels); 5221 xge_list_init(&hldev->fifo_channels); 5222 xge_list_init(&hldev->ring_channels); 5223 5224 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 5225 /* fixups for xena */ 5226 hldev->config.rth_en = 0; 5227 hldev->config.rth_spdm_en = 0; 5228 hldev->config.rts_mac_en = 0; 5229 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA; 5230 5231 status = __hal_device_config_check_xena(device_config); 5232 if (status != XGE_HAL_OK) { 5233 xge_hal_device_terminate(hldev); 5234 return status; 5235 } 5236 if (hldev->config.bimodal_interrupts == 1) { 5237 xge_hal_device_terminate(hldev); 5238 return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED; 5239 } else if (hldev->config.bimodal_interrupts == 5240 XGE_HAL_DEFAULT_USE_HARDCODE) 5241 hldev->config.bimodal_interrupts = 0; 5242 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 5243 /* fixups for herc */ 5244 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC; 5245 status = __hal_device_config_check_herc(device_config); 5246 if (status != XGE_HAL_OK) { 5247 xge_hal_device_terminate(hldev); 5248 return status; 5249 } 5250 if (hldev->config.bimodal_interrupts == 5251 XGE_HAL_DEFAULT_USE_HARDCODE) 5252 hldev->config.bimodal_interrupts = 1; 5253 } else { 5254 xge_debug_device(XGE_ERR, 5255 "detected unknown device_id 0x%x", hldev->device_id); 5256 xge_hal_device_terminate(hldev); 5257 return XGE_HAL_ERR_BAD_DEVICE_ID; 5258 } 5259 5260 /* allocate and initialize FIFO types of channels according to 5261 * configuration */ 5262 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { 5263 if (!device_config->fifo.queue[i].configured) 5264 continue; 5265 5266 channel = __hal_channel_allocate(hldev, i, 5267 XGE_HAL_CHANNEL_TYPE_FIFO); 5268 if (channel == NULL) { 5269 xge_debug_device(XGE_ERR, 5270 "fifo: __hal_channel_allocate failed"); 5271 xge_hal_device_terminate(hldev); 5272 return XGE_HAL_ERR_OUT_OF_MEMORY; 5273 } 5274 /* add new channel to the device */ 5275 xge_list_insert(&channel->item, &hldev->free_channels); 5276 } 5277 5278 /* 5279 * automatic DRAM adjustment 5280 */ 5281 total_dram_size = 0; 5282 ring_auto_dram_cfg = 0; 5283 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 5284 if (!device_config->ring.queue[i].configured) 5285 continue; 5286 if (device_config->ring.queue[i].dram_size_mb == 5287 XGE_HAL_DEFAULT_USE_HARDCODE) { 5288 ring_auto_dram_cfg++; 5289 continue; 5290 } 5291 total_dram_size += device_config->ring.queue[i].dram_size_mb; 5292 } 5293 left_dram_size = total_dram_size_max - total_dram_size; 5294 if (left_dram_size < 0 || 5295 (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) { 5296 xge_debug_device(XGE_ERR, 5297 "ring config: exceeded DRAM size %d MB", 5298 total_dram_size_max); 5299 xge_hal_device_terminate(hldev); 5300 return XGE_HAL_BADCFG_RING_QUEUE_SIZE; 5301 } 5302 5303 /* 5304 * allocate and initialize RING types of channels according to 5305 * configuration 5306 */ 5307 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 5308 if (!device_config->ring.queue[i].configured) 5309 continue; 5310 5311 if (device_config->ring.queue[i].dram_size_mb == 5312 XGE_HAL_DEFAULT_USE_HARDCODE) { 5313 hldev->config.ring.queue[i].dram_size_mb = 5314 device_config->ring.queue[i].dram_size_mb = 5315 left_dram_size / ring_auto_dram_cfg; 5316 } 5317 5318 channel = __hal_channel_allocate(hldev, i, 5319 XGE_HAL_CHANNEL_TYPE_RING); 5320 if (channel == NULL) { 5321 xge_debug_device(XGE_ERR, 5322 "ring: __hal_channel_allocate failed"); 5323 xge_hal_device_terminate(hldev); 5324 return XGE_HAL_ERR_OUT_OF_MEMORY; 5325 } 5326 /* add new channel to the device */ 5327 xge_list_insert(&channel->item, &hldev->free_channels); 5328 } 5329 5330 /* get subsystem IDs */ 5331 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 5332 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), 5333 &subsys_device); 5334 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 5335 xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), 5336 &subsys_vendor); 5337 xge_debug_device(XGE_TRACE, 5338 "subsystem_id %04x:%04x", 5339 subsys_vendor, subsys_device); 5340 5341 /* reset device initially */ 5342 (void) __hal_device_reset(hldev); 5343 5344 /* set host endian before, to assure proper action */ 5345 status = __hal_device_set_swapper(hldev); 5346 if (status != XGE_HAL_OK) { 5347 xge_debug_device(XGE_ERR, 5348 "__hal_device_set_swapper failed"); 5349 xge_hal_device_terminate(hldev); 5350 (void) __hal_device_reset(hldev); 5351 return status; 5352 } 5353 5354#ifndef XGE_HAL_HERC_EMULATION 5355 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 5356 __hal_device_xena_fix_mac(hldev); 5357#endif 5358 5359 /* MAC address initialization. 5360 * For now only one mac address will be read and used. */ 5361 status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]); 5362 if (status != XGE_HAL_OK) { 5363 xge_debug_device(XGE_ERR, 5364 "xge_hal_device_macaddr_get failed"); 5365 xge_hal_device_terminate(hldev); 5366 return status; 5367 } 5368 5369 if (hldev->macaddr[0][0] == 0xFF && 5370 hldev->macaddr[0][1] == 0xFF && 5371 hldev->macaddr[0][2] == 0xFF && 5372 hldev->macaddr[0][3] == 0xFF && 5373 hldev->macaddr[0][4] == 0xFF && 5374 hldev->macaddr[0][5] == 0xFF) { 5375 xge_debug_device(XGE_ERR, 5376 "xge_hal_device_macaddr_get returns all FFs"); 5377 xge_hal_device_terminate(hldev); 5378 return XGE_HAL_ERR_INVALID_MAC_ADDRESS; 5379 } 5380 5381 xge_debug_device(XGE_TRACE, 5382 "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x", 5383 hldev->macaddr[0][0], hldev->macaddr[0][1], 5384 hldev->macaddr[0][2], hldev->macaddr[0][3], 5385 hldev->macaddr[0][4], hldev->macaddr[0][5]); 5386 5387 status = __hal_stats_initialize(&hldev->stats, hldev); 5388 if (status != XGE_HAL_OK) { 5389 xge_debug_device(XGE_ERR, 5390 "__hal_stats_initialize failed"); 5391 xge_hal_device_terminate(hldev); 5392 return status; 5393 } 5394 5395 status = __hal_device_hw_initialize(hldev); 5396 if (status != XGE_HAL_OK) { 5397 xge_debug_device(XGE_ERR, 5398 "__hal_device_hw_initialize failed"); 5399 xge_hal_device_terminate(hldev); 5400 return status; 5401 } 5402 hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE); 5403 if (hldev->dump_buf == NULL) { 5404 xge_debug_device(XGE_ERR, 5405 "__hal_device_hw_initialize failed"); 5406 xge_hal_device_terminate(hldev); 5407 return XGE_HAL_ERR_OUT_OF_MEMORY; 5408 } 5409 5410 5411 /* Xena-only: need to serialize fifo posts across all device fifos */ 5412#if defined(XGE_HAL_TX_MULTI_POST) 5413 xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev); 5414#elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 5415 xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh); 5416#endif 5417 /* Getting VPD data */ 5418 __hal_device_get_vpd_data(hldev); 5419 5420 hldev->is_initialized = 1; 5421 5422 return XGE_HAL_OK; 5423} 5424 5425/** 5426 * xge_hal_device_terminating - Mark the device as 'terminating'. 5427 * @devh: HAL device handle. 5428 * 5429 * Mark the device as 'terminating', going to terminate. Can be used 5430 * to serialize termination with other running processes/contexts. 5431 * 5432 * See also: xge_hal_device_terminate(). 5433 */ 5434void 5435xge_hal_device_terminating(xge_hal_device_h devh) 5436{ 5437 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 5438 xge_list_t *item; 5439 xge_hal_channel_t *channel; 5440#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5441 unsigned long flags=0; 5442#endif 5443 5444 /* 5445 * go through each opened tx channel and aquire 5446 * lock, so it will serialize with HAL termination flag 5447 */ 5448 xge_list_for_each(item, &hldev->fifo_channels) { 5449 channel = xge_container_of(item, xge_hal_channel_t, item); 5450#if defined(XGE_HAL_TX_MULTI_RESERVE) 5451 xge_os_spin_lock(&channel->reserve_lock); 5452#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5453 xge_os_spin_lock_irq(&channel->reserve_lock, flags); 5454#endif 5455 5456 channel->terminating = 1; 5457 5458#if defined(XGE_HAL_TX_MULTI_RESERVE) 5459 xge_os_spin_unlock(&channel->reserve_lock); 5460#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5461 xge_os_spin_unlock_irq(&channel->reserve_lock, flags); 5462#endif 5463 } 5464 5465 hldev->terminating = 1; 5466} 5467 5468/** 5469 * xge_hal_device_terminate - Terminate Xframe device. 5470 * @hldev: HAL device handle. 5471 * 5472 * Terminate HAL device. 5473 * 5474 * See also: xge_hal_device_initialize(). 5475 */ 5476void 5477xge_hal_device_terminate(xge_hal_device_t *hldev) 5478{ 5479 xge_assert(g_xge_hal_driver != NULL); 5480 xge_assert(hldev != NULL); 5481 xge_assert(hldev->magic == XGE_HAL_MAGIC); 5482 5483 xge_queue_flush(hldev->queueh); 5484 5485 hldev->terminating = 1; 5486 hldev->is_initialized = 0; 5487 hldev->in_poll = 0; 5488 hldev->magic = XGE_HAL_DEAD; 5489 5490#if defined(XGE_HAL_TX_MULTI_POST) 5491 xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev); 5492#elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 5493 xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev); 5494#endif 5495 5496 xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating", 5497 (unsigned long long)(ulong_t)hldev); 5498 5499 xge_assert(xge_list_is_empty(&hldev->fifo_channels)); 5500 xge_assert(xge_list_is_empty(&hldev->ring_channels)); 5501 5502 if (hldev->stats.is_initialized) { 5503 __hal_stats_terminate(&hldev->stats); 5504 } 5505 5506 /* close if open and free all channels */ 5507 while (!xge_list_is_empty(&hldev->free_channels)) { 5508 xge_hal_channel_t *channel = (xge_hal_channel_t*) 5509 hldev->free_channels.next; 5510 5511 xge_assert(!channel->is_open); 5512 xge_list_remove(&channel->item); 5513 __hal_channel_free(channel); 5514 } 5515 5516 if (hldev->queueh) { 5517 xge_queue_destroy(hldev->queueh); 5518 } 5519 5520 if (hldev->spdm_table) { 5521 xge_os_free(hldev->pdev, 5522 hldev->spdm_table[0], 5523 (sizeof(xge_hal_spdm_entry_t) * 5524 hldev->spdm_max_entries)); 5525 xge_os_free(hldev->pdev, 5526 hldev->spdm_table, 5527 (sizeof(xge_hal_spdm_entry_t *) * 5528 hldev->spdm_max_entries)); 5529 xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev); 5530 hldev->spdm_table = NULL; 5531 } 5532 5533 if (hldev->dump_buf) { 5534 xge_os_free(hldev->pdev, hldev->dump_buf, 5535 XGE_HAL_DUMP_BUF_SIZE); 5536 hldev->dump_buf = NULL; 5537 } 5538 5539 if (hldev->device_id != 0) { 5540 int j, pcisize; 5541 5542 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? 5543 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; 5544 for (j = 0; j < pcisize; j++) { 5545 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, 5546 *((u32*)&hldev->pci_config_space_bios + j)); 5547 } 5548 } 5549} 5550/** 5551 * __hal_device_get_vpd_data - Getting vpd_data. 5552 * 5553 * @hldev: HAL device handle. 5554 * 5555 * Getting product name and serial number from vpd capabilites structure 5556 * 5557 */ 5558void 5559__hal_device_get_vpd_data(xge_hal_device_t *hldev) 5560{ 5561 u8 * vpd_data; 5562 u8 data; 5563 int index = 0, count, fail = 0; 5564 u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR; 5565 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 5566 vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR; 5567 5568 xge_os_strcpy((char *) hldev->vpd_data.product_name, 5569 "10 Gigabit Ethernet Adapter"); 5570 xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available"); 5571 5572 vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16); 5573 if ( vpd_data == NULL ) 5574 return; 5575 5576 for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) { 5577 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index); 5578 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data); 5579 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0); 5580 for (count = 0; count < 5; count++ ) { 5581 xge_os_mdelay(2); 5582 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data); 5583 if (data == XGE_HAL_VPD_READ_COMPLETE) 5584 break; 5585 } 5586 5587 if (count >= 5) { 5588 xge_os_printf("ERR, Reading VPD data failed"); 5589 fail = 1; 5590 break; 5591 } 5592 5593 xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4), 5594 (u32 *)&vpd_data[index]); 5595 } 5596 5597 if(!fail) { 5598 5599 /* read serial number of adapter */ 5600 for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) { 5601 if ((vpd_data[count] == 'S') && 5602 (vpd_data[count + 1] == 'N') && 5603 (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) { 5604 memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH); 5605 memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3], 5606 vpd_data[count + 2]); 5607 break; 5608 } 5609 } 5610 5611 if (vpd_data[1] < XGE_HAL_VPD_LENGTH) { 5612 memset(hldev->vpd_data.product_name, 0, vpd_data[1]); 5613 memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]); 5614 } 5615 5616 } 5617 5618 xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16); 5619} 5620 5621 5622/** 5623 * xge_hal_device_handle_tcode - Handle transfer code. 5624 * @channelh: Channel handle. 5625 * @dtrh: Descriptor handle. 5626 * @t_code: One of the enumerated (and documented in the Xframe user guide) 5627 * "transfer codes". 5628 * 5629 * Handle descriptor's transfer code. The latter comes with each completed 5630 * descriptor, see xge_hal_fifo_dtr_next_completed() and 5631 * xge_hal_ring_dtr_next_completed(). 5632 * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h. 5633 * 5634 * Returns: one of the xge_hal_status_e{} enumerated types. 5635 * XGE_HAL_OK - for success. 5636 * XGE_HAL_ERR_CRITICAL - when encounters critical error. 5637 */ 5638xge_hal_status_e 5639xge_hal_device_handle_tcode (xge_hal_channel_h channelh, 5640 xge_hal_dtr_h dtrh, u8 t_code) 5641{ 5642 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 5643 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; 5644 5645 if (t_code > 15) { 5646 xge_os_printf("invalid t_code %d", t_code); 5647 return XGE_HAL_OK; 5648 } 5649 5650 if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { 5651 hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++; 5652 5653#if defined(XGE_HAL_DEBUG_BAD_TCODE) 5654 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 5655 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":" 5656 XGE_OS_LLXFMT":"XGE_OS_LLXFMT, 5657 txdp->control_1, txdp->control_2, txdp->buffer_pointer, 5658 txdp->host_control); 5659#endif 5660 5661 /* handle link "down" immediately without going through 5662 * xge_hal_device_poll() routine. */ 5663 if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) { 5664 /* link is down */ 5665 if (hldev->link_state != XGE_HAL_LINK_DOWN) { 5666 xge_hal_pci_bar0_t *bar0 = 5667 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5668 u64 val64; 5669 5670 hldev->link_state = XGE_HAL_LINK_DOWN; 5671 5672 val64 = xge_os_pio_mem_read64(hldev->pdev, 5673 hldev->regh0, &bar0->adapter_control); 5674 5675 /* turn off LED */ 5676 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 5677 xge_os_pio_mem_write64(hldev->pdev, 5678 hldev->regh0, val64, 5679 &bar0->adapter_control); 5680 5681 g_xge_hal_driver->uld_callbacks.link_down( 5682 hldev->upper_layer_info); 5683 } 5684 } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER || 5685 t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) { 5686 __hal_device_handle_targetabort(hldev); 5687 return XGE_HAL_ERR_CRITICAL; 5688 } 5689 return XGE_HAL_ERR_PKT_DROP; 5690 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 5691 hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++; 5692 5693#if defined(XGE_HAL_DEBUG_BAD_TCODE) 5694 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 5695 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT 5696 ":"XGE_OS_LLXFMT, rxdp->control_1, 5697 rxdp->control_2, rxdp->buffer0_ptr, 5698 rxdp->host_control); 5699#endif 5700 if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) { 5701 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 5702 __hal_device_handle_eccerr(hldev, "rxd_t_code", 5703 (u64)t_code); 5704 return XGE_HAL_ERR_CRITICAL; 5705 } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY || 5706 t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) { 5707 hldev->stats.sw_dev_err_stats.parity_err_cnt++; 5708 __hal_device_handle_parityerr(hldev, "rxd_t_code", 5709 (u64)t_code); 5710 return XGE_HAL_ERR_CRITICAL; 5711 /* do not drop if detected unknown IPv6 extension */ 5712 } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) { 5713 return XGE_HAL_ERR_PKT_DROP; 5714 } 5715 } 5716 return XGE_HAL_OK; 5717} 5718 5719/** 5720 * xge_hal_device_link_state - Get link state. 5721 * @devh: HAL device handle. 5722 * @ls: Link state, see xge_hal_device_link_state_e{}. 5723 * 5724 * Get link state. 5725 * Returns: XGE_HAL_OK. 5726 * See also: xge_hal_device_link_state_e{}. 5727 */ 5728xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh, 5729 xge_hal_device_link_state_e *ls) 5730{ 5731 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5732 5733 xge_assert(ls != NULL); 5734 *ls = hldev->link_state; 5735 return XGE_HAL_OK; 5736} 5737 5738/** 5739 * xge_hal_device_sched_timer - Configure scheduled device interrupt. 5740 * @devh: HAL device handle. 5741 * @interval_us: Time interval, in miscoseconds. 5742 * Unlike transmit and receive interrupts, 5743 * the scheduled interrupt is generated independently of 5744 * traffic, but purely based on time. 5745 * @one_shot: 1 - generate scheduled interrupt only once. 5746 * 0 - generate scheduled interrupt periodically at the specified 5747 * @interval_us interval. 5748 * 5749 * (Re-)configure scheduled interrupt. Can be called at runtime to change 5750 * the setting, generate one-shot interrupts based on the resource and/or 5751 * traffic conditions, other purposes. 5752 * See also: xge_hal_device_config_t{}. 5753 */ 5754void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us, 5755 int one_shot) 5756{ 5757 u64 val64; 5758 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5759 xge_hal_pci_bar0_t *bar0 = 5760 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5761 unsigned int interval = hldev->config.pci_freq_mherz * interval_us; 5762 5763 interval = __hal_fix_time_ival_herc(hldev, interval); 5764 5765 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5766 &bar0->scheduled_int_ctrl); 5767 if (interval) { 5768 val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK; 5769 val64 |= XGE_HAL_SCHED_INT_PERIOD(interval); 5770 if (one_shot) { 5771 val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT; 5772 } 5773 val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN; 5774 } else { 5775 val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN; 5776 } 5777 5778 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 5779 val64, &bar0->scheduled_int_ctrl); 5780 5781 xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s", 5782 (unsigned long long)val64, 5783 interval ? "enabled" : "disabled"); 5784} 5785 5786/** 5787 * xge_hal_device_check_id - Verify device ID. 5788 * @devh: HAL device handle. 5789 * 5790 * Verify device ID. 5791 * Returns: one of the xge_hal_card_e{} enumerated types. 5792 * See also: xge_hal_card_e{}. 5793 */ 5794xge_hal_card_e 5795xge_hal_device_check_id(xge_hal_device_h devh) 5796{ 5797 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5798 switch (hldev->device_id) { 5799 case XGE_PCI_DEVICE_ID_XENA_1: 5800 case XGE_PCI_DEVICE_ID_XENA_2: 5801 return XGE_HAL_CARD_XENA; 5802 case XGE_PCI_DEVICE_ID_HERC_1: 5803 case XGE_PCI_DEVICE_ID_HERC_2: 5804 return XGE_HAL_CARD_HERC; 5805 case XGE_PCI_DEVICE_ID_TITAN_1: 5806 case XGE_PCI_DEVICE_ID_TITAN_2: 5807 return XGE_HAL_CARD_TITAN; 5808 default: 5809 return XGE_HAL_CARD_UNKNOWN; 5810 } 5811} 5812 5813/** 5814 * xge_hal_device_pci_info_get - Get PCI bus informations such as width, 5815 * frequency, and mode from previously stored values. 5816 * @devh: HAL device handle. 5817 * @pci_mode: pointer to a variable of enumerated type 5818 * xge_hal_pci_mode_e{}. 5819 * @bus_frequency: pointer to a variable of enumerated type 5820 * xge_hal_pci_bus_frequency_e{}. 5821 * @bus_width: pointer to a variable of enumerated type 5822 * xge_hal_pci_bus_width_e{}. 5823 * 5824 * Get pci mode, frequency, and PCI bus width. 5825 * Returns: one of the xge_hal_status_e{} enumerated types. 5826 * XGE_HAL_OK - for success. 5827 * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle. 5828 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. 5829 */ 5830xge_hal_status_e 5831xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, 5832 xge_hal_pci_bus_frequency_e *bus_frequency, 5833 xge_hal_pci_bus_width_e *bus_width) 5834{ 5835 xge_hal_status_e rc_status; 5836 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5837 5838 if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) { 5839 rc_status = XGE_HAL_ERR_INVALID_DEVICE; 5840 xge_debug_device(XGE_ERR, 5841 "xge_hal_device_pci_info_get error, rc %d for device %p", 5842 rc_status, hldev); 5843 5844 return rc_status; 5845 } 5846 5847 *pci_mode = hldev->pci_mode; 5848 *bus_frequency = hldev->bus_frequency; 5849 *bus_width = hldev->bus_width; 5850 rc_status = XGE_HAL_OK; 5851 return rc_status; 5852} 5853 5854/** 5855 * xge_hal_reinitialize_hw 5856 * @hldev: private member of the device structure. 5857 * 5858 * This function will soft reset the NIC and re-initalize all the 5859 * I/O registers to the values they had after it's inital initialization 5860 * through the probe function. 5861 */ 5862int xge_hal_reinitialize_hw(xge_hal_device_t * hldev) 5863{ 5864 (void) xge_hal_device_reset(hldev); 5865 if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) { 5866 xge_hal_device_terminate(hldev); 5867 (void) __hal_device_reset(hldev); 5868 return 1; 5869 } 5870 return 0; 5871} 5872 5873 5874/* 5875 * __hal_read_spdm_entry_line 5876 * @hldev: pointer to xge_hal_device_t structure 5877 * @spdm_line: spdm line in the spdm entry to be read. 5878 * @spdm_entry: spdm entry of the spdm_line in the SPDM table. 5879 * @spdm_line_val: Contains the value stored in the spdm line. 5880 * 5881 * SPDM table contains upto a maximum of 256 spdm entries. 5882 * Each spdm entry contains 8 lines and each line stores 8 bytes. 5883 * This function reads the spdm line(addressed by @spdm_line) 5884 * of the spdm entry(addressed by @spdm_entry) in 5885 * the SPDM table. 5886 */ 5887xge_hal_status_e 5888__hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line, 5889 u16 spdm_entry, u64 *spdm_line_val) 5890{ 5891 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5892 u64 val64; 5893 5894 val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE | 5895 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) | 5896 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry); 5897 5898 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5899 &bar0->rts_rth_spdm_mem_ctrl); 5900 5901 /* poll until done */ 5902 if (__hal_device_register_poll(hldev, 5903 &bar0->rts_rth_spdm_mem_ctrl, 0, 5904 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE, 5905 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 5906 5907 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 5908 } 5909 5910 *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev, 5911 hldev->regh0, &bar0->rts_rth_spdm_mem_data); 5912 return XGE_HAL_OK; 5913} 5914 5915 5916/* 5917 * __hal_get_free_spdm_entry 5918 * @hldev: pointer to xge_hal_device_t structure 5919 * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table. 5920 * 5921 * This function returns an index of unused spdm entry in the SPDM 5922 * table. 5923 */ 5924static xge_hal_status_e 5925__hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry) 5926{ 5927 xge_hal_status_e status; 5928 u64 spdm_line_val=0; 5929 5930 /* 5931 * Search in the local SPDM table for a free slot. 5932 */ 5933 *spdm_entry = 0; 5934 for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) { 5935 if (hldev->spdm_table[*spdm_entry]->in_use) { 5936 break; 5937 } 5938 } 5939 5940 if (*spdm_entry >= hldev->spdm_max_entries) { 5941 return XGE_HAL_ERR_SPDM_TABLE_FULL; 5942 } 5943 5944 /* 5945 * Make sure that the corresponding spdm entry in the SPDM 5946 * table is free. 5947 * Seventh line of the spdm entry contains information about 5948 * whether the entry is free or not. 5949 */ 5950 if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry, 5951 &spdm_line_val)) != XGE_HAL_OK) { 5952 return status; 5953 } 5954 5955 /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */ 5956 if ((spdm_line_val & BIT(63))) { 5957 /* 5958 * Log a warning 5959 */ 5960 xge_debug_device(XGE_ERR, "Local SPDM table is not " 5961 "consistent with the actual one for the spdm " 5962 "entry %d", *spdm_entry); 5963 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; 5964 } 5965 5966 return XGE_HAL_OK; 5967} 5968 5969 5970/* 5971 * __hal_calc_jhash - Calculate Jenkins hash. 5972 * @msg: Jenkins hash algorithm key. 5973 * @length: Length of the key. 5974 * @golden_ratio: Jenkins hash golden ratio. 5975 * @init_value: Jenkins hash initial value. 5976 * 5977 * This function implements the Jenkins based algorithm used for the 5978 * calculation of the RTH hash. 5979 * Returns: Jenkins hash value. 5980 * 5981 */ 5982static u32 5983__hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) 5984{ 5985 5986 register u32 a,b,c,len; 5987 5988 /* 5989 * Set up the internal state 5990 */ 5991 len = length; 5992 a = b = golden_ratio; /* the golden ratio; an arbitrary value */ 5993 c = init_value; /* the previous hash value */ 5994 5995 /* handle most of the key */ 5996 while (len >= 12) 5997 { 5998 a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16) 5999 + ((u32)msg[3]<<24)); 6000 b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16) 6001 + ((u32)msg[7]<<24)); 6002 c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16) 6003 + ((u32)msg[11]<<24)); 6004 mix(a,b,c); 6005 msg += 12; len -= 12; 6006 } 6007 6008 /* handle the last 11 bytes */ 6009 c += length; 6010 switch(len) /* all the case statements fall through */ 6011 { 6012 case 11: c+= ((u32)msg[10]<<24); 6013 break; 6014 case 10: c+= ((u32)msg[9]<<16); 6015 break; 6016 case 9 : c+= ((u32)msg[8]<<8); 6017 break; 6018 /* the first byte of c is reserved for the length */ 6019 case 8 : b+= ((u32)msg[7]<<24); 6020 break; 6021 case 7 : b+= ((u32)msg[6]<<16); 6022 break; 6023 case 6 : b+= ((u32)msg[5]<<8); 6024 break; 6025 case 5 : b+= msg[4]; 6026 break; 6027 case 4 : a+= ((u32)msg[3]<<24); 6028 break; 6029 case 3 : a+= ((u32)msg[2]<<16); 6030 break; 6031 case 2 : a+= ((u32)msg[1]<<8); 6032 break; 6033 case 1 : a+= msg[0]; 6034 break; 6035 /* case 0: nothing left to add */ 6036 } 6037 6038 mix(a,b,c); 6039 6040 /* report the result */ 6041 return c; 6042} 6043 6044 6045/** 6046 * xge_hal_spdm_entry_add - Add a new entry to the SPDM table. 6047 * @devh: HAL device handle. 6048 * @src_ip: Source ip address(IPv4/IPv6). 6049 * @dst_ip: Destination ip address(IPv4/IPv6). 6050 * @l4_sp: L4 source port. 6051 * @l4_dp: L4 destination port. 6052 * @is_tcp: Set to 1, if the protocol is TCP. 6053 * 0, if the protocol is UDP. 6054 * @is_ipv4: Set to 1, if the protocol is IPv4. 6055 * 0, if the protocol is IPv6. 6056 * @tgt_queue: Target queue to route the receive packet. 6057 * 6058 * This function add a new entry to the SPDM table. 6059 * 6060 * Returns: XGE_HAL_OK - success. 6061 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. 6062 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in 6063 * the time(timeout). 6064 * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full. 6065 * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry. 6066 * 6067 * See also: xge_hal_spdm_entry_remove{}. 6068 */ 6069xge_hal_status_e 6070xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, 6071 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, 6072 u8 is_tcp, u8 is_ipv4, u8 tgt_queue) 6073{ 6074 6075 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6076 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6077 u32 jhash_value; 6078 u32 jhash_init_val; 6079 u32 jhash_golden_ratio; 6080 u64 val64; 6081 int off; 6082 u16 spdm_entry; 6083 u8 msg[XGE_HAL_JHASH_MSG_LEN]; 6084 int ipaddr_len; 6085 xge_hal_status_e status; 6086 6087 6088 if (!hldev->config.rth_spdm_en) { 6089 return XGE_HAL_ERR_SPDM_NOT_ENABLED; 6090 } 6091 6092 if ((tgt_queue < XGE_HAL_MIN_RING_NUM) || 6093 (tgt_queue > XGE_HAL_MAX_RING_NUM)) { 6094 return XGE_HAL_ERR_SPDM_INVALID_ENTRY; 6095 } 6096 6097 6098 /* 6099 * Calculate the jenkins hash. 6100 */ 6101 /* 6102 * Create the Jenkins hash algorithm key. 6103 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to 6104 * use L4 information. Otherwize key = {L3SA, L3DA}. 6105 */ 6106 6107 if (is_ipv4) { 6108 ipaddr_len = 4; // In bytes 6109 } else { 6110 ipaddr_len = 16; 6111 } 6112 6113 /* 6114 * Jenkins hash algorithm expects the key in the big endian 6115 * format. Since key is the byte array, memcpy won't work in the 6116 * case of little endian. So, the current code extracts each 6117 * byte starting from MSB and store it in the key. 6118 */ 6119 if (is_ipv4) { 6120 for (off = 0; off < ipaddr_len; off++) { 6121 u32 mask = vBIT32(0xff,(off*8),8); 6122 int shift = 32-(off+1)*8; 6123 msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift); 6124 msg[off+ipaddr_len] = 6125 (u8)((dst_ip->ipv4.addr & mask) >> shift); 6126 } 6127 } else { 6128 for (off = 0; off < ipaddr_len; off++) { 6129 int loc = off % 8; 6130 u64 mask = vBIT(0xff,(loc*8),8); 6131 int shift = 64-(loc+1)*8; 6132 6133 msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask) 6134 >> shift); 6135 msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8] 6136 & mask) >> shift); 6137 } 6138 } 6139 6140 off = (2*ipaddr_len); 6141 6142 if (hldev->config.rth_spdm_use_l4) { 6143 msg[off] = (u8)((l4_sp & 0xff00) >> 8); 6144 msg[off + 1] = (u8)(l4_sp & 0xff); 6145 msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8); 6146 msg[off + 3] = (u8)(l4_dp & 0xff); 6147 off += 4; 6148 } 6149 6150 /* 6151 * Calculate jenkins hash for this configuration 6152 */ 6153 val64 = xge_os_pio_mem_read64(hldev->pdev, 6154 hldev->regh0, 6155 &bar0->rts_rth_jhash_cfg); 6156 jhash_golden_ratio = (u32)(val64 >> 32); 6157 jhash_init_val = (u32)(val64 & 0xffffffff); 6158 6159 jhash_value = __hal_calc_jhash(msg, off, 6160 jhash_golden_ratio, 6161 jhash_init_val); 6162 6163 xge_os_spin_lock(&hldev->spdm_lock); 6164 6165 /* 6166 * Locate a free slot in the SPDM table. To avoid a seach in the 6167 * actual SPDM table, which is very expensive in terms of time, 6168 * we are maintaining a local copy of the table and the search for 6169 * the free entry is performed in the local table. 6170 */ 6171 if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry)) 6172 != XGE_HAL_OK) { 6173 xge_os_spin_unlock(&hldev->spdm_lock); 6174 return status; 6175 } 6176 6177 /* 6178 * Add this entry to the SPDM table 6179 */ 6180 status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp, 6181 is_tcp, is_ipv4, tgt_queue, 6182 jhash_value, /* calculated jhash */ 6183 spdm_entry); 6184 6185 xge_os_spin_unlock(&hldev->spdm_lock); 6186 6187 return status; 6188} 6189 6190/** 6191 * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table. 6192 * @devh: HAL device handle. 6193 * @src_ip: Source ip address(IPv4/IPv6). 6194 * @dst_ip: Destination ip address(IPv4/IPv6). 6195 * @l4_sp: L4 source port. 6196 * @l4_dp: L4 destination port. 6197 * @is_tcp: Set to 1, if the protocol is TCP. 6198 * 0, if the protocol os UDP. 6199 * @is_ipv4: Set to 1, if the protocol is IPv4. 6200 * 0, if the protocol is IPv6. 6201 * 6202 * This function remove an entry from the SPDM table. 6203 * 6204 * Returns: XGE_HAL_OK - success. 6205 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. 6206 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in 6207 * the time(timeout). 6208 * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM 6209 * table. 6210 * 6211 * See also: xge_hal_spdm_entry_add{}. 6212 */ 6213xge_hal_status_e 6214xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, 6215 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, 6216 u8 is_tcp, u8 is_ipv4) 6217{ 6218 6219 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6220 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6221 u64 val64; 6222 u16 spdm_entry; 6223 xge_hal_status_e status; 6224 u64 spdm_line_arr[8]; 6225 u8 line_no; 6226 u8 spdm_is_tcp; 6227 u8 spdm_is_ipv4; 6228 u16 spdm_l4_sp; 6229 u16 spdm_l4_dp; 6230 6231 if (!hldev->config.rth_spdm_en) { 6232 return XGE_HAL_ERR_SPDM_NOT_ENABLED; 6233 } 6234 6235 xge_os_spin_lock(&hldev->spdm_lock); 6236 6237 /* 6238 * Poll the rxpic_int_reg register until spdm ready bit is set or 6239 * timeout happens. 6240 */ 6241 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 6242 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 6243 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 6244 6245 /* upper layer may require to repeat */ 6246 xge_os_spin_unlock(&hldev->spdm_lock); 6247 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 6248 } 6249 6250 /* 6251 * Clear the SPDM READY bit. 6252 */ 6253 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6254 &bar0->rxpic_int_reg); 6255 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY; 6256 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6257 &bar0->rxpic_int_reg); 6258 6259 /* 6260 * Search in the local SPDM table to get the index of the 6261 * corresponding entry in the SPDM table. 6262 */ 6263 spdm_entry = 0; 6264 for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) { 6265 if ((!hldev->spdm_table[spdm_entry]->in_use) || 6266 (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) || 6267 (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) || 6268 (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) || 6269 (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) { 6270 continue; 6271 } 6272 6273 /* 6274 * Compare the src/dst IP addresses of source and target 6275 */ 6276 if (is_ipv4) { 6277 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr 6278 != src_ip->ipv4.addr) || 6279 (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr 6280 != dst_ip->ipv4.addr)) { 6281 continue; 6282 } 6283 } else { 6284 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0] 6285 != src_ip->ipv6.addr[0]) || 6286 (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1] 6287 != src_ip->ipv6.addr[1]) || 6288 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0] 6289 != dst_ip->ipv6.addr[0]) || 6290 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1] 6291 != dst_ip->ipv6.addr[1])) { 6292 continue; 6293 } 6294 } 6295 break; 6296 } 6297 6298 if (spdm_entry >= hldev->spdm_max_entries) { 6299 xge_os_spin_unlock(&hldev->spdm_lock); 6300 return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND; 6301 } 6302 6303 /* 6304 * Retrieve the corresponding entry from the SPDM table and 6305 * make sure that the data is consistent. 6306 */ 6307 for(line_no = 0; line_no < 8; line_no++) { 6308 6309 /* 6310 * SPDM line 2,3,4 are valid only for IPv6 entry. 6311 * SPDM line 5 & 6 are reserved. We don't have to 6312 * read these entries in the above cases. 6313 */ 6314 if (((is_ipv4) && 6315 ((line_no == 2)||(line_no == 3)||(line_no == 4))) || 6316 (line_no == 5) || 6317 (line_no == 6)) { 6318 continue; 6319 } 6320 6321 if ((status = __hal_read_spdm_entry_line( 6322 hldev, 6323 line_no, 6324 spdm_entry, 6325 &spdm_line_arr[line_no])) 6326 != XGE_HAL_OK) { 6327 xge_os_spin_unlock(&hldev->spdm_lock); 6328 return status; 6329 } 6330 } 6331 6332 /* 6333 * Seventh line of the spdm entry contains the entry_enable 6334 * bit. Make sure that the entry_enable bit of this spdm entry 6335 * is set. 6336 * To remove an entry from the SPDM table, reset this 6337 * bit. 6338 */ 6339 if (!(spdm_line_arr[7] & BIT(63))) { 6340 /* 6341 * Log a warning 6342 */ 6343 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6344 "consistent with the actual one for the spdm " 6345 "entry %d ", spdm_entry); 6346 goto err_exit; 6347 } 6348 6349 /* 6350 * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM 6351 * table and do a comparision. 6352 */ 6353 spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4); 6354 spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63)); 6355 spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48); 6356 spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff); 6357 6358 6359 if ((spdm_is_tcp != is_tcp) || 6360 (spdm_is_ipv4 != is_ipv4) || 6361 (spdm_l4_sp != l4_sp) || 6362 (spdm_l4_dp != l4_dp)) { 6363 /* 6364 * Log a warning 6365 */ 6366 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6367 "consistent with the actual one for the spdm " 6368 "entry %d ", spdm_entry); 6369 goto err_exit; 6370 } 6371 6372 if (is_ipv4) { 6373 /* Upper 32 bits of spdm_line(64 bit) contains the 6374 * src IPv4 address. Lower 32 bits of spdm_line 6375 * contains the destination IPv4 address. 6376 */ 6377 u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32); 6378 u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff); 6379 6380 if ((temp_src_ip != src_ip->ipv4.addr) || 6381 (temp_dst_ip != dst_ip->ipv4.addr)) { 6382 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6383 "consistent with the actual one for the spdm " 6384 "entry %d ", spdm_entry); 6385 goto err_exit; 6386 } 6387 6388 } else { 6389 /* 6390 * SPDM line 1 & 2 contains the src IPv6 address. 6391 * SPDM line 3 & 4 contains the dst IPv6 address. 6392 */ 6393 if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) || 6394 (spdm_line_arr[2] != src_ip->ipv6.addr[1]) || 6395 (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) || 6396 (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) { 6397 6398 /* 6399 * Log a warning 6400 */ 6401 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6402 "consistent with the actual one for the spdm " 6403 "entry %d ", spdm_entry); 6404 goto err_exit; 6405 } 6406 } 6407 6408 /* 6409 * Reset the entry_enable bit to zero 6410 */ 6411 spdm_line_arr[7] &= ~BIT(63); 6412 6413 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6414 spdm_line_arr[7], 6415 (void *)((char *)hldev->spdm_mem_base + 6416 (spdm_entry * 64) + (7 * 8))); 6417 6418 /* 6419 * Wait for the operation to be completed. 6420 */ 6421 if (__hal_device_register_poll(hldev, 6422 &bar0->rxpic_int_reg, 1, 6423 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 6424 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 6425 xge_os_spin_unlock(&hldev->spdm_lock); 6426 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 6427 } 6428 6429 /* 6430 * Make the corresponding spdm entry in the local SPDM table 6431 * available for future use. 6432 */ 6433 hldev->spdm_table[spdm_entry]->in_use = 0; 6434 xge_os_spin_unlock(&hldev->spdm_lock); 6435 6436 return XGE_HAL_OK; 6437 6438err_exit: 6439 xge_os_spin_unlock(&hldev->spdm_lock); 6440 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; 6441} 6442 6443/* 6444 * __hal_device_rti_set 6445 * @ring: The post_qid of the ring. 6446 * @channel: HAL channel of the ring. 6447 * 6448 * This function stores the RTI value associated for the MSI and 6449 * also unmasks this particular RTI in the rti_mask register. 6450 */ 6451static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel) 6452{ 6453 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6454 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6455 u64 val64; 6456 6457 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || 6458 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) 6459 channel->rti = (u8)ring_qid; 6460 6461 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6462 &bar0->rx_traffic_mask); 6463 val64 &= ~BIT(ring_qid); 6464 xge_os_pio_mem_write64(hldev->pdev, 6465 hldev->regh0, val64, 6466 &bar0->rx_traffic_mask); 6467} 6468 6469/* 6470 * __hal_device_tti_set 6471 * @ring: The post_qid of the FIFO. 6472 * @channel: HAL channel the FIFO. 6473 * 6474 * This function stores the TTI value associated for the MSI and 6475 * also unmasks this particular TTI in the tti_mask register. 6476 */ 6477static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel) 6478{ 6479 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6480 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6481 u64 val64; 6482 6483 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || 6484 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) 6485 channel->tti = (u8)fifo_qid; 6486 6487 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6488 &bar0->tx_traffic_mask); 6489 val64 &= ~BIT(fifo_qid); 6490 xge_os_pio_mem_write64(hldev->pdev, 6491 hldev->regh0, val64, 6492 &bar0->tx_traffic_mask); 6493} 6494 6495/** 6496 * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a 6497 * FIFO for a given MSI. 6498 * @channelh: HAL channel handle. 6499 * @msi: MSI Number associated with the channel. 6500 * @msi_msg: The MSI message associated with the MSI number above. 6501 * 6502 * This API will associate a given channel (either Ring or FIFO) with the 6503 * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the 6504 * hardware to indicate this association to the hardware. 6505 */ 6506xge_hal_status_e 6507xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg) 6508{ 6509 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 6510 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6511 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6512 u64 val64; 6513 6514 channel->msi_msg = msi_msg; 6515 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 6516 int ring = channel->post_qid; 6517 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d," 6518 " MSI: %d", channel->msi_msg, ring, msi); 6519 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6520 &bar0->rx_mat); 6521 val64 |= XGE_HAL_SET_RX_MAT(ring, msi); 6522 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6523 &bar0->rx_mat); 6524 __hal_device_rti_set(ring, channel); 6525 } else { 6526 int fifo = channel->post_qid; 6527 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d," 6528 " MSI: %d", channel->msi_msg, fifo, msi); 6529 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6530 &bar0->tx_mat[0]); 6531 val64 |= XGE_HAL_SET_TX_MAT(fifo, msi); 6532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6533 &bar0->tx_mat[0]); 6534 __hal_device_tti_set(fifo, channel); 6535 } 6536 6537 return XGE_HAL_OK; 6538} 6539 6540/** 6541 * xge_hal_mask_msix - Begin IRQ processing. 6542 * @hldev: HAL device handle. 6543 * @msi_id: MSI ID 6544 * 6545 * The function masks the msix interrupt for the given msi_id 6546 * 6547 * Note: 6548 * 6549 * Returns: 0, 6550 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range 6551 * status. 6552 * See also: 6553 */ 6554xge_hal_status_e 6555xge_hal_mask_msix(xge_hal_device_h devh, int msi_id) 6556{ 6557 xge_hal_status_e status = XGE_HAL_OK; 6558 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6559 u32 *bar2 = (u32 *)hldev->bar2; 6560 u32 val32; 6561 6562 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES); 6563 6564 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]); 6565 val32 |= 1; 6566 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]); 6567 return status; 6568} 6569 6570/** 6571 * xge_hal_mask_msix - Begin IRQ processing. 6572 * @hldev: HAL device handle. 6573 * @msi_id: MSI ID 6574 * 6575 * The function masks the msix interrupt for the given msi_id 6576 * 6577 * Note: 6578 * 6579 * Returns: 0, 6580 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range 6581 * status. 6582 * See also: 6583 */ 6584xge_hal_status_e 6585xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id) 6586{ 6587 xge_hal_status_e status = XGE_HAL_OK; 6588 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6589 u32 *bar2 = (u32 *)hldev->bar2; 6590 u32 val32; 6591 6592 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES); 6593 6594 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]); 6595 val32 &= ~1; 6596 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]); 6597 return status; 6598} 6599 6600/* 6601 * __hal_set_msix_vals 6602 * @devh: HAL device handle. 6603 * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address. 6604 * Filled in by this function. 6605 * @msix_address: 32bit MSI-X DMA address. 6606 * Filled in by this function. 6607 * @msix_idx: index that corresponds to the (@msix_value, @msix_address) 6608 * entry in the table of MSI-X (value, address) pairs. 6609 * 6610 * This function will program the hardware associating the given 6611 * address/value cobination to the specified msi number. 6612 */ 6613static void __hal_set_msix_vals (xge_hal_device_h devh, 6614 u32 *msix_value, 6615 u64 *msix_addr, 6616 int msix_idx) 6617{ 6618 int cnt = 0; 6619 6620 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 6621 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6622 u64 val64; 6623 6624 val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE; 6625 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 6626 (u32)(val64 >> 32), &bar0->xmsi_access); 6627 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 6628 (u32)(val64), &bar0->xmsi_access); 6629 do { 6630 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6631 &bar0->xmsi_access); 6632 if (val64 & XGE_HAL_XMSI_STROBE) 6633 break; 6634 cnt++; 6635 xge_os_mdelay(20); 6636 } while(cnt < 5); 6637 *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6638 &bar0->xmsi_data)); 6639 *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6640 &bar0->xmsi_address); 6641} 6642 6643/** 6644 * xge_hal_channel_msix_set - Associate MSI-X with a channel. 6645 * @channelh: HAL channel handle. 6646 * @msix_idx: index that corresponds to a particular (@msix_value, 6647 * @msix_address) entry in the MSI-X table. 6648 * 6649 * This API associates a given channel (either Ring or FIFO) with the 6650 * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables 6651 * to indicate this association. 6652 */ 6653xge_hal_status_e 6654xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx) 6655{ 6656 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 6657 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6658 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6659 u64 val64; 6660 6661 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 6662 /* Currently Ring and RTI is one on one. */ 6663 int ring = channel->post_qid; 6664 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6665 &bar0->rx_mat); 6666 val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx); 6667 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6668 &bar0->rx_mat); 6669 __hal_device_rti_set(ring, channel); 6670 hldev->config.fifo.queue[channel->post_qid].intr_vector = 6671 msix_idx; 6672 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { 6673 int fifo = channel->post_qid; 6674 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6675 &bar0->tx_mat[0]); 6676 val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx); 6677 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6678 &bar0->tx_mat[0]); 6679 __hal_device_tti_set(fifo, channel); 6680 hldev->config.ring.queue[channel->post_qid].intr_vector = 6681 msix_idx; 6682 } 6683 channel->msix_idx = msix_idx; 6684 __hal_set_msix_vals(hldev, &channel->msix_data, 6685 &channel->msix_address, 6686 channel->msix_idx); 6687 6688 return XGE_HAL_OK; 6689} 6690 6691#if defined(XGE_HAL_CONFIG_LRO) 6692/** 6693 * xge_hal_lro_terminate - Terminate lro resources. 6694 * @lro_scale: Amount of lro memory. 6695 * @hldev: Hal device structure. 6696 * 6697 */ 6698void 6699xge_hal_lro_terminate(u32 lro_scale, 6700 xge_hal_device_t *hldev) 6701{ 6702} 6703 6704/** 6705 * xge_hal_lro_init - Initiate lro resources. 6706 * @lro_scale: Amount of lro memory. 6707 * @hldev: Hal device structure. 6708 * Note: For time being I am using only one LRO per device. Later on size 6709 * will be increased. 6710 */ 6711 6712xge_hal_status_e 6713xge_hal_lro_init(u32 lro_scale, 6714 xge_hal_device_t *hldev) 6715{ 6716 int i; 6717 6718 if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE) 6719 hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE; 6720 6721 if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE) 6722 hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN; 6723 6724 for (i=0; i < XGE_HAL_MAX_RING_NUM; i++) 6725 { 6726 xge_os_memzero(hldev->lro_desc[i].lro_pool, 6727 sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS); 6728 6729 hldev->lro_desc[i].lro_next_idx = 0; 6730 hldev->lro_desc[i].lro_recent = NULL; 6731 } 6732 6733 return XGE_HAL_OK; 6734} 6735#endif 6736 6737 6738/** 6739 * xge_hal_device_poll - HAL device "polling" entry point. 6740 * @devh: HAL device. 6741 * 6742 * HAL "polling" entry point. Note that this is part of HAL public API. 6743 * Upper-Layer driver _must_ periodically poll HAL via 6744 * xge_hal_device_poll(). 6745 * 6746 * HAL uses caller's execution context to serially process accumulated 6747 * slow-path events, such as link state changes and hardware error 6748 * indications. 6749 * 6750 * The rate of polling could be somewhere between 500us to 10ms, 6751 * depending on requirements (e.g., the requirement to support fail-over 6752 * could mean that 500us or even 100us polling interval need to be used). 6753 * 6754 * The need and motivation for external polling includes 6755 * 6756 * - remove the error-checking "burden" from the HAL interrupt handler 6757 * (see xge_hal_device_handle_irq()); 6758 * 6759 * - remove the potential source of portability issues by _not_ 6760 * implementing separate polling thread within HAL itself. 6761 * 6762 * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}. 6763 * Usage: See ex_slow_path{}. 6764 */ 6765void 6766xge_hal_device_poll(xge_hal_device_h devh) 6767{ 6768 unsigned char item_buf[sizeof(xge_queue_item_t) + 6769 XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; 6770 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; 6771 xge_queue_status_e qstatus; 6772 xge_hal_status_e hstatus; 6773 int i = 0; 6774 int queue_has_critical_event = 0; 6775 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 6776 6777 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) + 6778 XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); 6779 6780_again: 6781 if (!hldev->is_initialized || 6782 hldev->terminating || 6783 hldev->magic != XGE_HAL_MAGIC) 6784 return; 6785 6786 if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000) 6787 { 6788 /* 6789 * Wait for an Hour 6790 */ 6791 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++; 6792 } else { 6793 /* 6794 * Logging Error messages in the excess temperature, 6795 * Bias current, laser output for three cycle 6796 */ 6797 __hal_updt_stats_xpak(hldev); 6798 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0; 6799 } 6800 6801 if (!queue_has_critical_event) 6802 queue_has_critical_event = 6803 __queue_get_reset_critical(hldev->queueh); 6804 6805 hldev->in_poll = 1; 6806 while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) { 6807 6808 qstatus = xge_queue_consume(hldev->queueh, 6809 XGE_DEFAULT_EVENT_MAX_DATA_SIZE, 6810 item); 6811 if (qstatus == XGE_QUEUE_IS_EMPTY) 6812 break; 6813 6814 xge_debug_queue(XGE_TRACE, 6815 "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x" 6816 XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type, 6817 (u64)(ulong_t)item->context); 6818 6819 if (!hldev->is_initialized || 6820 hldev->magic != XGE_HAL_MAGIC) { 6821 hldev->in_poll = 0; 6822 return; 6823 } 6824 6825 switch (item->event_type) { 6826 case XGE_HAL_EVENT_LINK_IS_UP: { 6827 if (!queue_has_critical_event && 6828 g_xge_hal_driver->uld_callbacks.link_up) { 6829 g_xge_hal_driver->uld_callbacks.link_up( 6830 hldev->upper_layer_info); 6831 hldev->link_state = XGE_HAL_LINK_UP; 6832 } 6833 } break; 6834 case XGE_HAL_EVENT_LINK_IS_DOWN: { 6835 if (!queue_has_critical_event && 6836 g_xge_hal_driver->uld_callbacks.link_down) { 6837 g_xge_hal_driver->uld_callbacks.link_down( 6838 hldev->upper_layer_info); 6839 hldev->link_state = XGE_HAL_LINK_DOWN; 6840 } 6841 } break; 6842 case XGE_HAL_EVENT_SERR: 6843 case XGE_HAL_EVENT_ECCERR: 6844 case XGE_HAL_EVENT_PARITYERR: 6845 case XGE_HAL_EVENT_TARGETABORT: 6846 case XGE_HAL_EVENT_SLOT_FREEZE: { 6847 void *item_data = xge_queue_item_data(item); 6848 xge_hal_event_e event_type = item->event_type; 6849 u64 val64 = *((u64*)item_data); 6850 6851 if (event_type != XGE_HAL_EVENT_SLOT_FREEZE) 6852 if (xge_hal_device_is_slot_freeze(hldev)) 6853 event_type = XGE_HAL_EVENT_SLOT_FREEZE; 6854 if (g_xge_hal_driver->uld_callbacks.crit_err) { 6855 g_xge_hal_driver->uld_callbacks.crit_err( 6856 hldev->upper_layer_info, 6857 event_type, 6858 val64); 6859 /* handle one critical event per poll cycle */ 6860 hldev->in_poll = 0; 6861 return; 6862 } 6863 } break; 6864 default: { 6865 xge_debug_queue(XGE_TRACE, 6866 "got non-HAL event %d", 6867 item->event_type); 6868 } break; 6869 } 6870 6871 /* broadcast this event */ 6872 if (g_xge_hal_driver->uld_callbacks.event) 6873 g_xge_hal_driver->uld_callbacks.event(item); 6874 } 6875 6876 if (g_xge_hal_driver->uld_callbacks.before_device_poll) { 6877 if (g_xge_hal_driver->uld_callbacks.before_device_poll( 6878 hldev) != 0) { 6879 hldev->in_poll = 0; 6880 return; 6881 } 6882 } 6883 6884 hstatus = __hal_device_poll(hldev); 6885 if (g_xge_hal_driver->uld_callbacks.after_device_poll) 6886 g_xge_hal_driver->uld_callbacks.after_device_poll(hldev); 6887 6888 /* 6889 * handle critical error right away: 6890 * - walk the device queue again 6891 * - drop non-critical events, if any 6892 * - look for the 1st critical 6893 */ 6894 if (hstatus == XGE_HAL_ERR_CRITICAL) { 6895 queue_has_critical_event = 1; 6896 goto _again; 6897 } 6898 6899 hldev->in_poll = 0; 6900} 6901 6902/** 6903 * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing. 6904 * @hldev: HAL device handle. 6905 * 6906 * This function is used to set the adapter to enhanced mode. 6907 * 6908 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 6909 */ 6910void 6911xge_hal_rts_rth_init(xge_hal_device_t *hldev) 6912{ 6913 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6914 u64 val64; 6915 6916 /* 6917 * Set the receive traffic steering mode from default(classic) 6918 * to enhanced. 6919 */ 6920 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6921 &bar0->rts_ctrl); 6922 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 6923 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6924 val64, &bar0->rts_ctrl); 6925} 6926 6927/** 6928 * xge_hal_rts_rth_clr - Clear RTS hashing. 6929 * @hldev: HAL device handle. 6930 * 6931 * This function is used to clear all RTS hashing related stuff. 6932 * It brings the adapter out from enhanced mode to classic mode. 6933 * It also clears RTS_RTH_CFG register i.e clears hash type, function etc. 6934 * 6935 * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set(). 6936 */ 6937void 6938xge_hal_rts_rth_clr(xge_hal_device_t *hldev) 6939{ 6940 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6941 u64 val64; 6942 6943 /* 6944 * Set the receive traffic steering mode from default(classic) 6945 * to enhanced. 6946 */ 6947 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6948 &bar0->rts_ctrl); 6949 val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE; 6950 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6951 val64, &bar0->rts_ctrl); 6952 val64 = 0; 6953 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6954 &bar0->rts_rth_cfg); 6955} 6956 6957/** 6958 * xge_hal_rts_rth_set - Set/configure RTS hashing. 6959 * @hldev: HAL device handle. 6960 * @def_q: default queue 6961 * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc. 6962 * @bucket_size: no of least significant bits to be used for hashing. 6963 * 6964 * Used to set/configure all RTS hashing related stuff. 6965 * - set the steering mode to enhanced. 6966 * - set hash function i.e algo selection. 6967 * - set the default queue. 6968 * 6969 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(). 6970 */ 6971void 6972xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type, 6973 u16 bucket_size) 6974{ 6975 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6976 u64 val64; 6977 6978 val64 = XGE_HAL_RTS_DEFAULT_Q(def_q); 6979 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6980 &bar0->rts_default_q); 6981 6982 val64 = hash_type; 6983 val64 |= XGE_HAL_RTS_RTH_EN; 6984 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size); 6985 val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS; 6986 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6987 &bar0->rts_rth_cfg); 6988} 6989 6990/** 6991 * xge_hal_rts_rth_start - Start RTS hashing. 6992 * @hldev: HAL device handle. 6993 * 6994 * Used to Start RTS hashing . 6995 * 6996 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. 6997 */ 6998void 6999xge_hal_rts_rth_start(xge_hal_device_t *hldev) 7000{ 7001 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7002 u64 val64; 7003 7004 7005 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7006 &bar0->rts_rth_cfg); 7007 val64 |= XGE_HAL_RTS_RTH_EN; 7008 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7009 &bar0->rts_rth_cfg); 7010} 7011 7012/** 7013 * xge_hal_rts_rth_stop - Stop the RTS hashing. 7014 * @hldev: HAL device handle. 7015 * 7016 * Used to Staop RTS hashing . 7017 * 7018 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. 7019 */ 7020void 7021xge_hal_rts_rth_stop(xge_hal_device_t *hldev) 7022{ 7023 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7024 u64 val64; 7025 7026 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7027 &bar0->rts_rth_cfg); 7028 val64 &= ~XGE_HAL_RTS_RTH_EN; 7029 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7030 &bar0->rts_rth_cfg); 7031} 7032 7033/** 7034 * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT). 7035 * @hldev: HAL device handle. 7036 * @itable: Pointer to the indirection table 7037 * @itable_size: no of least significant bits to be used for hashing 7038 * 7039 * Used to set/configure indirection table. 7040 * It enables the required no of entries in the IT. 7041 * It adds entries to the IT. 7042 * 7043 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 7044 */ 7045xge_hal_status_e 7046xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size) 7047{ 7048 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7049 u64 val64; 7050 u32 idx; 7051 7052 for (idx = 0; idx < itable_size; idx++) { 7053 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | 7054 XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]); 7055 7056 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7057 &bar0->rts_rth_map_mem_data); 7058 7059 /* execute */ 7060 val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | 7061 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | 7062 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx)); 7063 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7064 &bar0->rts_rth_map_mem_ctrl); 7065 7066 /* poll until done */ 7067 if (__hal_device_register_poll(hldev, 7068 &bar0->rts_rth_map_mem_ctrl, 0, 7069 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, 7070 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 7071 /* upper layer may require to repeat */ 7072 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 7073 } 7074 } 7075 7076 return XGE_HAL_OK; 7077} 7078 7079 7080/** 7081 * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc. 7082 * 7083 * @hldev: HAL device handle. 7084 * @KeySize: Number of 64-bit words 7085 * @Key: upto 40-byte array of 8-bit values 7086 * This function configures the 40-byte secret which is used for hash 7087 * calculation. 7088 * 7089 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 7090 */ 7091void 7092xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key) 7093{ 7094 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0; 7095 u64 val64; 7096 u32 entry, nreg, i; 7097 7098 entry = 0; 7099 nreg = 0; 7100 7101 while( KeySize ) { 7102 val64 = 0; 7103 for ( i = 0; i < 8 ; i++) { 7104 /* Prepare 64-bit word for 'nreg' containing 8 keys. */ 7105 if (i) 7106 val64 <<= 8; 7107 val64 |= Key[entry++]; 7108 } 7109 7110 KeySize--; 7111 7112 /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/ 7113 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7114 &bar0->rts_rth_hash_mask[nreg++]); 7115 } 7116 7117 while( nreg < 5 ) { 7118 /* Clear the rest if key is less than 40 bytes */ 7119 val64 = 0; 7120 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7121 &bar0->rts_rth_hash_mask[nreg++]); 7122 } 7123} 7124 7125 7126/** 7127 * xge_hal_device_is_closed - Device is closed 7128 * 7129 * @devh: HAL device handle. 7130 */ 7131int 7132xge_hal_device_is_closed(xge_hal_device_h devh) 7133{ 7134 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 7135 7136 if (xge_list_is_empty(&hldev->fifo_channels) && 7137 xge_list_is_empty(&hldev->ring_channels)) 7138 return 1; 7139 7140 return 0; 7141} 7142 7143xge_hal_status_e 7144xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index) 7145{ 7146 u64 val64; 7147 int section; 7148 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 7149 7150 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 7151 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7152 7153 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 7154 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 7155 7156 if ( index >= max_addr ) 7157 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 7158 7159 /* 7160 * Calculate the section value 7161 */ 7162 section = index / 32; 7163 7164 xge_debug_device(XGE_TRACE, "the Section value is %d ", section); 7165 7166 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7167 &bar0->rts_mac_cfg); 7168 switch(section) 7169 { 7170 case 0: 7171 val64 |= XGE_HAL_RTS_MAC_SECT0_EN; 7172 break; 7173 case 1: 7174 val64 |= XGE_HAL_RTS_MAC_SECT1_EN; 7175 break; 7176 case 2: 7177 val64 |= XGE_HAL_RTS_MAC_SECT2_EN; 7178 break; 7179 case 3: 7180 val64 |= XGE_HAL_RTS_MAC_SECT3_EN; 7181 break; 7182 case 4: 7183 val64 |= XGE_HAL_RTS_MAC_SECT4_EN; 7184 break; 7185 case 5: 7186 val64 |= XGE_HAL_RTS_MAC_SECT5_EN; 7187 break; 7188 case 6: 7189 val64 |= XGE_HAL_RTS_MAC_SECT6_EN; 7190 break; 7191 case 7: 7192 val64 |= XGE_HAL_RTS_MAC_SECT7_EN; 7193 break; 7194 default: 7195 xge_debug_device(XGE_ERR, "Invalid Section value %d " 7196 , section); 7197 } 7198 7199 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 7200 val64, &bar0->rts_mac_cfg); 7201 return XGE_HAL_OK; 7202} 7203 7204 7205/** 7206 * xge_hal_fix_rldram_ecc_error 7207 * @hldev: private member of the device structure. 7208 * 7209 * SXE-02-010. This function will turn OFF the ECC error reporting for the 7210 * interface bet'n external Micron RLDRAM II device and memory controller. 7211 * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U 7212 * fields of MC_ERR_REG register. Issue reported by HP-Unix folks during the 7213 * qualification of Herc. 7214 */ 7215xge_hal_status_e 7216xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev) 7217{ 7218 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 7219 u64 val64; 7220 7221 // Enter Test Mode. 7222 val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; 7223 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7224 &bar0->mc_rldram_test_ctrl); 7225 7226 // Enable fg/bg tests. 7227 val64 = 0x0100000000000000ULL; 7228 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7229 &bar0->mc_driver); 7230 7231 // Enable RLDRAM configuration. 7232 val64 = 0x0000000000017B00ULL; 7233 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7234 &bar0->mc_rldram_mrs); 7235 7236 // Enable RLDRAM queues. 7237 val64 = 0x0000000001017B00ULL; 7238 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7239 &bar0->mc_rldram_mrs); 7240 7241 // Setup test ranges 7242 val64 = 0x00000000001E0100ULL; 7243 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7244 &bar0->mc_rldram_test_add); 7245 7246 val64 = 0x00000100001F0100ULL; 7247 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7248 &bar0->mc_rldram_test_add_bkg); 7249 // Start Reads. 7250 val64 = 0x0001000000010000ULL; 7251 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7252 &bar0->mc_rldram_test_ctrl); 7253 7254 if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1, 7255 XGE_HAL_MC_RLDRAM_TEST_DONE, 7256 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK){ 7257 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 7258 } 7259 7260 // Exit test mode 7261 val64 = 0x0000000000000000ULL; 7262 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7263 &bar0->mc_rldram_test_ctrl); 7264 7265 return XGE_HAL_OK; 7266} 7267