1/*- 2 * Copyright (c) 2002-2007 Neterion, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29#include <dev/nxge/include/xgehal-device.h> 30#include <dev/nxge/include/xgehal-channel.h> 31#include <dev/nxge/include/xgehal-fifo.h> 32#include <dev/nxge/include/xgehal-ring.h> 33#include <dev/nxge/include/xgehal-driver.h> 34#include <dev/nxge/include/xgehal-mgmt.h> 35 36#define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL 37#define END_SIGN 0x0 38 39#ifdef XGE_HAL_HERC_EMULATION 40#undef XGE_HAL_PROCESS_LINK_INT_IN_ISR 41#endif 42 43/* 44 * Jenkins hash key length(in bytes) 45 */ 46#define XGE_HAL_JHASH_MSG_LEN 50 47 48/* 49 * mix(a,b,c) used in Jenkins hash algorithm 50 */ 51#define mix(a,b,c) { \ 52 a -= b; a -= c; a ^= (c>>13); \ 53 b -= c; b -= a; b ^= (a<<8); \ 54 c -= a; c -= b; c ^= (b>>13); \ 55 a -= b; a -= c; a ^= (c>>12); \ 56 b -= c; b -= a; b ^= (a<<16); \ 57 c -= a; c -= b; c ^= (b>>5); \ 58 a -= b; a -= c; a ^= (c>>3); \ 59 b -= c; b -= a; b ^= (a<<10); \ 60 c -= a; c -= b; c ^= (b>>15); \ 61} 62 63 64/* 65 * __hal_device_event_queued 66 * @data: pointer to xge_hal_device_t structure 67 * 68 * Will be called when new event succesfully queued. 69 */ 70void 71__hal_device_event_queued(void *data, int event_type) 72{ 73 xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC); 74 if (g_xge_hal_driver->uld_callbacks.event_queued) { 75 g_xge_hal_driver->uld_callbacks.event_queued(data, event_type); 76 } 77} 78 79/* 80 * __hal_pio_mem_write32_upper 81 * 82 * Endiann-aware implementation of xge_os_pio_mem_write32(). 83 * Since Xframe has 64bit registers, we differintiate uppper and lower 84 * parts. 85 */ 86void 87__hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr) 88{ 89#if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) 90 xge_os_pio_mem_write32(pdev, regh, val, addr); 91#else 92 xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4)); 93#endif 94} 95 96/* 97 * __hal_pio_mem_write32_upper 98 * 99 * Endiann-aware implementation of xge_os_pio_mem_write32(). 100 * Since Xframe has 64bit registers, we differintiate uppper and lower 101 * parts. 102 */ 103void 104__hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val, 105 void *addr) 106{ 107#if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) 108 xge_os_pio_mem_write32(pdev, regh, val, 109 (void *) ((char *)addr + 4)); 110#else 111 xge_os_pio_mem_write32(pdev, regh, val, addr); 112#endif 113} 114 115/* 116 * __hal_device_register_poll 117 * @hldev: pointer to xge_hal_device_t structure 118 * @reg: register to poll for 119 * @op: 0 - bit reset, 1 - bit set 120 * @mask: mask for logical "and" condition based on %op 121 * @max_millis: maximum time to try to poll in milliseconds 122 * 123 * Will poll certain register for specified amount of time. 124 * Will poll until masked bit is not cleared. 125 */ 126xge_hal_status_e 127__hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, 128 int op, u64 mask, int max_millis) 129{ 130 u64 val64; 131 int i = 0; 132 xge_hal_status_e ret = XGE_HAL_FAIL; 133 134 xge_os_udelay(10); 135 136 do { 137 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); 138 if (op == 0 && !(val64 & mask)) 139 return XGE_HAL_OK; 140 else if (op == 1 && (val64 & mask) == mask) 141 return XGE_HAL_OK; 142 xge_os_udelay(100); 143 } while (++i <= 9); 144 145 do { 146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); 147 if (op == 0 && !(val64 & mask)) 148 return XGE_HAL_OK; 149 else if (op == 1 && (val64 & mask) == mask) 150 return XGE_HAL_OK; 151 xge_os_udelay(1000); 152 } while (++i < max_millis); 153 154 return ret; 155} 156 157/* 158 * __hal_device_wait_quiescent 159 * @hldev: the device 160 * @hw_status: hw_status in case of error 161 * 162 * Will wait until device is quiescent for some blocks. 163 */ 164static xge_hal_status_e 165__hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status) 166{ 167 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 168 169 /* poll and wait first */ 170#ifdef XGE_HAL_HERC_EMULATION 171 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, 172 (XGE_HAL_ADAPTER_STATUS_TDMA_READY | 173 XGE_HAL_ADAPTER_STATUS_RDMA_READY | 174 XGE_HAL_ADAPTER_STATUS_PFC_READY | 175 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | 176 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | 177 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | 178 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | 179 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK), 180 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); 181#else 182 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, 183 (XGE_HAL_ADAPTER_STATUS_TDMA_READY | 184 XGE_HAL_ADAPTER_STATUS_RDMA_READY | 185 XGE_HAL_ADAPTER_STATUS_PFC_READY | 186 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | 187 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | 188 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | 189 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | 190 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK | 191 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK), 192 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); 193#endif 194 195 return xge_hal_device_status(hldev, hw_status); 196} 197 198/** 199 * xge_hal_device_is_slot_freeze 200 * @devh: the device 201 * 202 * Returns non-zero if the slot is freezed. 203 * The determination is made based on the adapter_status 204 * register which will never give all FFs, unless PCI read 205 * cannot go through. 206 */ 207int 208xge_hal_device_is_slot_freeze(xge_hal_device_h devh) 209{ 210 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 211 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 212 u16 device_id; 213 u64 adapter_status = 214 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 215 &bar0->adapter_status); 216 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 217 xge_offsetof(xge_hal_pci_config_le_t, device_id), 218 &device_id); 219#ifdef TX_DEBUG 220 if (adapter_status == XGE_HAL_ALL_FOXES) 221 { 222 u64 dummy; 223 dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 224 &bar0->pcc_enable); 225 printf(">>> Slot is frozen!\n"); 226 brkpoint(0); 227 } 228#endif 229 return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff)); 230} 231 232 233/* 234 * __hal_device_led_actifity_fix 235 * @hldev: pointer to xge_hal_device_t structure 236 * 237 * SXE-002: Configure link and activity LED to turn it off 238 */ 239static void 240__hal_device_led_actifity_fix(xge_hal_device_t *hldev) 241{ 242 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 243 u16 subid; 244 u64 val64; 245 246 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 247 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid); 248 249 /* 250 * In the case of Herc, there is a new register named beacon control 251 * is added which was not present in Xena. 252 * Beacon control register in Herc is at the same offset as 253 * gpio control register in Xena. It means they are one and same in 254 * the case of Xena. Also, gpio control register offset in Herc and 255 * Xena is different. 256 * The current register map represents Herc(It means we have 257 * both beacon and gpio control registers in register map). 258 * WRT transition from Xena to Herc, all the code in Xena which was 259 * using gpio control register for LED handling would have to 260 * use beacon control register in Herc and the rest of the code 261 * which uses gpio control in Xena would use the same register 262 * in Herc. 263 * WRT LED handling(following code), In the case of Herc, beacon 264 * control register has to be used. This is applicable for Xena also, 265 * since it represents the gpio control register in Xena. 266 */ 267 if ((subid & 0xFF) >= 0x07) { 268 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 269 &bar0->beacon_control); 270 val64 |= 0x0000800000000000ULL; 271 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 272 val64, &bar0->beacon_control); 273 val64 = 0x0411040400000000ULL; 274 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 275 (void *) ((u8 *)bar0 + 0x2700)); 276 } 277} 278 279/* Constants for Fixing the MacAddress problem seen mostly on 280 * Alpha machines. 281 */ 282static u64 xena_fix_mac[] = { 283 0x0060000000000000ULL, 0x0060600000000000ULL, 284 0x0040600000000000ULL, 0x0000600000000000ULL, 285 0x0020600000000000ULL, 0x0060600000000000ULL, 286 0x0020600000000000ULL, 0x0060600000000000ULL, 287 0x0020600000000000ULL, 0x0060600000000000ULL, 288 0x0020600000000000ULL, 0x0060600000000000ULL, 289 0x0020600000000000ULL, 0x0060600000000000ULL, 290 0x0020600000000000ULL, 0x0060600000000000ULL, 291 0x0020600000000000ULL, 0x0060600000000000ULL, 292 0x0020600000000000ULL, 0x0060600000000000ULL, 293 0x0020600000000000ULL, 0x0060600000000000ULL, 294 0x0020600000000000ULL, 0x0060600000000000ULL, 295 0x0020600000000000ULL, 0x0000600000000000ULL, 296 0x0040600000000000ULL, 0x0060600000000000ULL, 297 END_SIGN 298}; 299 300/* 301 * __hal_device_fix_mac 302 * @hldev: HAL device handle. 303 * 304 * Fix for all "FFs" MAC address problems observed on Alpha platforms. 305 */ 306static void 307__hal_device_xena_fix_mac(xge_hal_device_t *hldev) 308{ 309 int i = 0; 310 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 311 312 /* 313 * In the case of Herc, there is a new register named beacon control 314 * is added which was not present in Xena. 315 * Beacon control register in Herc is at the same offset as 316 * gpio control register in Xena. It means they are one and same in 317 * the case of Xena. Also, gpio control register offset in Herc and 318 * Xena is different. 319 * The current register map represents Herc(It means we have 320 * both beacon and gpio control registers in register map). 321 * WRT transition from Xena to Herc, all the code in Xena which was 322 * using gpio control register for LED handling would have to 323 * use beacon control register in Herc and the rest of the code 324 * which uses gpio control in Xena would use the same register 325 * in Herc. 326 * In the following code(xena_fix_mac), beacon control register has 327 * to be used in the case of Xena, since it represents gpio control 328 * register. In the case of Herc, there is no change required. 329 */ 330 while (xena_fix_mac[i] != END_SIGN) { 331 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 332 xena_fix_mac[i++], &bar0->beacon_control); 333 xge_os_mdelay(1); 334 } 335} 336 337/* 338 * xge_hal_device_bcast_enable 339 * @hldev: HAL device handle. 340 * 341 * Enable receiving broadcasts. 342 * The host must first write RMAC_CFG_KEY "key" 343 * register, and then - MAC_CFG register. 344 */ 345void 346xge_hal_device_bcast_enable(xge_hal_device_h devh) 347{ 348 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 349 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 350 u64 val64; 351 352 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 353 &bar0->mac_cfg); 354 val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE; 355 356 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 357 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 358 359 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 360 (u32)(val64 >> 32), &bar0->mac_cfg); 361 362 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", 363 (unsigned long long)val64, 364 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); 365} 366 367/* 368 * xge_hal_device_bcast_disable 369 * @hldev: HAL device handle. 370 * 371 * Disable receiving broadcasts. 372 * The host must first write RMAC_CFG_KEY "key" 373 * register, and then - MAC_CFG register. 374 */ 375void 376xge_hal_device_bcast_disable(xge_hal_device_h devh) 377{ 378 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 379 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 380 u64 val64; 381 382 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 383 &bar0->mac_cfg); 384 385 val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE); 386 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 387 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 388 389 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 390 (u32)(val64 >> 32), &bar0->mac_cfg); 391 392 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", 393 (unsigned long long)val64, 394 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); 395} 396 397/* 398 * __hal_device_shared_splits_configure 399 * @hldev: HAL device handle. 400 * 401 * TxDMA will stop Read request if the number of read split had exceeded 402 * the limit set by shared_splits 403 */ 404static void 405__hal_device_shared_splits_configure(xge_hal_device_t *hldev) 406{ 407 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 408 u64 val64; 409 410 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 411 &bar0->pic_control); 412 val64 |= 413 XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits); 414 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 415 &bar0->pic_control); 416 xge_debug_device(XGE_TRACE, "%s", "shared splits configured"); 417} 418 419/* 420 * __hal_device_rmac_padding_configure 421 * @hldev: HAL device handle. 422 * 423 * Configure RMAC frame padding. Depends on configuration, it 424 * can be send to host or removed by MAC. 425 */ 426static void 427__hal_device_rmac_padding_configure(xge_hal_device_t *hldev) 428{ 429 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 430 u64 val64; 431 432 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 433 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); 434 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 435 &bar0->mac_cfg); 436 val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE ); 437 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE ); 438 val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD; 439 440 /* 441 * If the RTH enable bit is not set, strip the FCS 442 */ 443 if (!hldev->config.rth_en || 444 !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 445 &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) { 446 val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS; 447 } 448 449 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD ); 450 val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM; 451 452 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 453 (u32)(val64 >> 32), (char*)&bar0->mac_cfg); 454 xge_os_mdelay(1); 455 456 xge_debug_device(XGE_TRACE, 457 "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured", 458 (unsigned long long)val64); 459} 460 461/* 462 * __hal_device_pause_frames_configure 463 * @hldev: HAL device handle. 464 * 465 * Set Pause threshold. 466 * 467 * Pause frame is generated if the amount of data outstanding 468 * on any queue exceeded the ratio of 469 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 470 */ 471static void 472__hal_device_pause_frames_configure(xge_hal_device_t *hldev) 473{ 474 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 475 int i; 476 u64 val64; 477 478 switch (hldev->config.mac.media) { 479 case XGE_HAL_MEDIA_SR: 480 case XGE_HAL_MEDIA_SW: 481 val64=0xfffbfffbfffbfffbULL; 482 break; 483 case XGE_HAL_MEDIA_LR: 484 case XGE_HAL_MEDIA_LW: 485 val64=0xffbbffbbffbbffbbULL; 486 break; 487 case XGE_HAL_MEDIA_ER: 488 case XGE_HAL_MEDIA_EW: 489 default: 490 val64=0xffbbffbbffbbffbbULL; 491 break; 492 } 493 494 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 495 val64, &bar0->mc_pause_thresh_q0q3); 496 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 497 val64, &bar0->mc_pause_thresh_q4q7); 498 499 /* Set the time value to be inserted in the pause frame generated 500 * by Xframe */ 501 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 502 &bar0->rmac_pause_cfg); 503 if (hldev->config.mac.rmac_pause_gen_en) 504 val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; 505 else 506 val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN); 507 if (hldev->config.mac.rmac_pause_rcv_en) 508 val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; 509 else 510 val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN); 511 val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff)); 512 val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time); 513 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 514 &bar0->rmac_pause_cfg); 515 516 val64 = 0; 517 for (i = 0; i<4; i++) { 518 val64 |= 519 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3) 520 <<(i*2*8)); 521 } 522 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 523 &bar0->mc_pause_thresh_q0q3); 524 525 val64 = 0; 526 for (i = 0; i<4; i++) { 527 val64 |= 528 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7) 529 <<(i*2*8)); 530 } 531 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 532 &bar0->mc_pause_thresh_q4q7); 533 xge_debug_device(XGE_TRACE, "%s", "pause frames configured"); 534} 535 536/* 537 * Herc's clock rate doubled, unless the slot is 33MHz. 538 */ 539unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev, 540 unsigned int time_ival) 541{ 542 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 543 return time_ival; 544 545 xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC); 546 547 if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN && 548 hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ) 549 time_ival *= 2; 550 551 return time_ival; 552} 553 554 555/* 556 * __hal_device_bus_master_disable 557 * @hldev: HAL device handle. 558 * 559 * Disable bus mastership. 560 */ 561static void 562__hal_device_bus_master_disable (xge_hal_device_t *hldev) 563{ 564 u16 cmd; 565 u16 bus_master = 4; 566 567 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 568 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 569 cmd &= ~bus_master; 570 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 571 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 572} 573 574/* 575 * __hal_device_bus_master_enable 576 * @hldev: HAL device handle. 577 * 578 * Disable bus mastership. 579 */ 580static void 581__hal_device_bus_master_enable (xge_hal_device_t *hldev) 582{ 583 u16 cmd; 584 u16 bus_master = 4; 585 586 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 587 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 588 589 /* already enabled? do nothing */ 590 if (cmd & bus_master) 591 return; 592 593 cmd |= bus_master; 594 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 595 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 596} 597/* 598 * __hal_device_intr_mgmt 599 * @hldev: HAL device handle. 600 * @mask: mask indicating which Intr block must be modified. 601 * @flag: if true - enable, otherwise - disable interrupts. 602 * 603 * Disable or enable device interrupts. Mask is used to specify 604 * which hardware blocks should produce interrupts. For details 605 * please refer to Xframe User Guide. 606 */ 607static void 608__hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag) 609{ 610 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 611 u64 val64 = 0, temp64 = 0; 612 u64 gim, gim_saved; 613 614 gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev, 615 hldev->regh0, &bar0->general_int_mask); 616 617 /* Top level interrupt classification */ 618 /* PIC Interrupts */ 619 if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) { 620 /* Enable PIC Intrs in the general intr mask register */ 621 val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/; 622 if (flag) { 623 gim &= ~((u64) val64); 624 temp64 = xge_os_pio_mem_read64(hldev->pdev, 625 hldev->regh0, &bar0->pic_int_mask); 626 627 temp64 &= ~XGE_HAL_PIC_INT_TX; 628#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 629 if (xge_hal_device_check_id(hldev) == 630 XGE_HAL_CARD_HERC) { 631 temp64 &= ~XGE_HAL_PIC_INT_MISC; 632 } 633#endif 634 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 635 temp64, &bar0->pic_int_mask); 636#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 637 if (xge_hal_device_check_id(hldev) == 638 XGE_HAL_CARD_HERC) { 639 /* 640 * Unmask only Link Up interrupt 641 */ 642 temp64 = xge_os_pio_mem_read64(hldev->pdev, 643 hldev->regh0, &bar0->misc_int_mask); 644 temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 645 xge_os_pio_mem_write64(hldev->pdev, 646 hldev->regh0, temp64, 647 &bar0->misc_int_mask); 648 xge_debug_device(XGE_TRACE, 649 "unmask link up flag "XGE_OS_LLXFMT, 650 (unsigned long long)temp64); 651 } 652#endif 653 } else { /* flag == 0 */ 654 655#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 656 if (xge_hal_device_check_id(hldev) == 657 XGE_HAL_CARD_HERC) { 658 /* 659 * Mask both Link Up and Down interrupts 660 */ 661 temp64 = xge_os_pio_mem_read64(hldev->pdev, 662 hldev->regh0, &bar0->misc_int_mask); 663 temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 664 temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 665 xge_os_pio_mem_write64(hldev->pdev, 666 hldev->regh0, temp64, 667 &bar0->misc_int_mask); 668 xge_debug_device(XGE_TRACE, 669 "mask link up/down flag "XGE_OS_LLXFMT, 670 (unsigned long long)temp64); 671 } 672#endif 673 /* Disable PIC Intrs in the general intr mask 674 * register */ 675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 676 XGE_HAL_ALL_INTRS_DIS, 677 &bar0->pic_int_mask); 678 gim |= val64; 679 } 680 } 681 682 /* DMA Interrupts */ 683 /* Enabling/Disabling Tx DMA interrupts */ 684 if (mask & XGE_HAL_TX_DMA_INTR) { 685 /* Enable TxDMA Intrs in the general intr mask register */ 686 val64 = XGE_HAL_TXDMA_INT_M; 687 if (flag) { 688 gim &= ~((u64) val64); 689 /* Enable all TxDMA interrupts */ 690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 691 0x0, &bar0->txdma_int_mask); 692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 693 0x0, &bar0->pfc_err_mask); 694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 695 0x0, &bar0->tda_err_mask); 696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 697 0x0, &bar0->pcc_err_mask); 698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 699 0x0, &bar0->tti_err_mask); 700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 701 0x0, &bar0->lso_err_mask); 702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 703 0x0, &bar0->tpa_err_mask); 704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 705 0x0, &bar0->sm_err_mask); 706 707 } else { /* flag == 0 */ 708 709 /* Disable TxDMA Intrs in the general intr mask 710 * register */ 711 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 712 XGE_HAL_ALL_INTRS_DIS, 713 &bar0->txdma_int_mask); 714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 715 XGE_HAL_ALL_INTRS_DIS, 716 &bar0->pfc_err_mask); 717 718 gim |= val64; 719 } 720 } 721 722 /* Enabling/Disabling Rx DMA interrupts */ 723 if (mask & XGE_HAL_RX_DMA_INTR) { 724 /* Enable RxDMA Intrs in the general intr mask register */ 725 val64 = XGE_HAL_RXDMA_INT_M; 726 if (flag) { 727 728 gim &= ~((u64) val64); 729 /* All RxDMA block interrupts are disabled for now 730 * TODO */ 731 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 732 XGE_HAL_ALL_INTRS_DIS, 733 &bar0->rxdma_int_mask); 734 735 } else { /* flag == 0 */ 736 737 /* Disable RxDMA Intrs in the general intr mask 738 * register */ 739 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 740 XGE_HAL_ALL_INTRS_DIS, 741 &bar0->rxdma_int_mask); 742 743 gim |= val64; 744 } 745 } 746 747 /* MAC Interrupts */ 748 /* Enabling/Disabling MAC interrupts */ 749 if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) { 750 val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M; 751 if (flag) { 752 753 gim &= ~((u64) val64); 754 755 /* All MAC block error inter. are disabled for now. */ 756 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 757 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); 758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 759 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); 760 761 } else { /* flag == 0 */ 762 763 /* Disable MAC Intrs in the general intr mask 764 * register */ 765 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 766 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); 767 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 768 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); 769 770 gim |= val64; 771 } 772 } 773 774 /* XGXS Interrupts */ 775 if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) { 776 val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M; 777 if (flag) { 778 779 gim &= ~((u64) val64); 780 /* All XGXS block error interrupts are disabled for now 781 * TODO */ 782 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 783 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); 784 785 } else { /* flag == 0 */ 786 787 /* Disable MC Intrs in the general intr mask register */ 788 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 789 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); 790 791 gim |= val64; 792 } 793 } 794 795 /* Memory Controller(MC) interrupts */ 796 if (mask & XGE_HAL_MC_INTR) { 797 val64 = XGE_HAL_MC_INT_M; 798 if (flag) { 799 800 gim &= ~((u64) val64); 801 802 /* Enable all MC blocks error interrupts */ 803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 804 0x0ULL, &bar0->mc_int_mask); 805 806 } else { /* flag == 0 */ 807 808 /* Disable MC Intrs in the general intr mask 809 * register */ 810 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 811 XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask); 812 813 gim |= val64; 814 } 815 } 816 817 818 /* Tx traffic interrupts */ 819 if (mask & XGE_HAL_TX_TRAFFIC_INTR) { 820 val64 = XGE_HAL_TXTRAFFIC_INT_M; 821 if (flag) { 822 823 gim &= ~((u64) val64); 824 825 /* Enable all the Tx side interrupts */ 826 /* '0' Enables all 64 TX interrupt levels. */ 827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, 828 &bar0->tx_traffic_mask); 829 830 } else { /* flag == 0 */ 831 832 /* Disable Tx Traffic Intrs in the general intr mask 833 * register. */ 834 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 835 XGE_HAL_ALL_INTRS_DIS, 836 &bar0->tx_traffic_mask); 837 gim |= val64; 838 } 839 } 840 841 /* Rx traffic interrupts */ 842 if (mask & XGE_HAL_RX_TRAFFIC_INTR) { 843 val64 = XGE_HAL_RXTRAFFIC_INT_M; 844 if (flag) { 845 gim &= ~((u64) val64); 846 /* '0' Enables all 8 RX interrupt levels. */ 847 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, 848 &bar0->rx_traffic_mask); 849 850 } else { /* flag == 0 */ 851 852 /* Disable Rx Traffic Intrs in the general intr mask 853 * register. 854 */ 855 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 856 XGE_HAL_ALL_INTRS_DIS, 857 &bar0->rx_traffic_mask); 858 859 gim |= val64; 860 } 861 } 862 863 /* Sched Timer interrupt */ 864 if (mask & XGE_HAL_SCHED_INTR) { 865 if (flag) { 866 temp64 = xge_os_pio_mem_read64(hldev->pdev, 867 hldev->regh0, &bar0->txpic_int_mask); 868 temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR; 869 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 870 temp64, &bar0->txpic_int_mask); 871 872 xge_hal_device_sched_timer(hldev, 873 hldev->config.sched_timer_us, 874 hldev->config.sched_timer_one_shot); 875 } else { 876 temp64 = xge_os_pio_mem_read64(hldev->pdev, 877 hldev->regh0, &bar0->txpic_int_mask); 878 temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR; 879 880 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 881 temp64, &bar0->txpic_int_mask); 882 883 xge_hal_device_sched_timer(hldev, 884 XGE_HAL_SCHED_TIMER_DISABLED, 885 XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE); 886 } 887 } 888 889 if (gim != gim_saved) { 890 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim, 891 &bar0->general_int_mask); 892 xge_debug_device(XGE_TRACE, "general_int_mask updated " 893 XGE_OS_LLXFMT" => "XGE_OS_LLXFMT, 894 (unsigned long long)gim_saved, (unsigned long long)gim); 895 } 896} 897 898/* 899 * __hal_device_bimodal_configure 900 * @hldev: HAL device handle. 901 * 902 * Bimodal parameters initialization. 903 */ 904static void 905__hal_device_bimodal_configure(xge_hal_device_t *hldev) 906{ 907 int i; 908 909 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 910 xge_hal_tti_config_t *tti; 911 xge_hal_rti_config_t *rti; 912 913 if (!hldev->config.ring.queue[i].configured) 914 continue; 915 rti = &hldev->config.ring.queue[i].rti; 916 tti = &hldev->bimodal_tti[i]; 917 918 tti->enabled = 1; 919 tti->urange_a = hldev->bimodal_urange_a_en * 10; 920 tti->urange_b = 20; 921 tti->urange_c = 30; 922 tti->ufc_a = hldev->bimodal_urange_a_en * 8; 923 tti->ufc_b = 16; 924 tti->ufc_c = 32; 925 tti->ufc_d = 64; 926 tti->timer_val_us = hldev->bimodal_timer_val_us; 927 tti->timer_ac_en = 1; 928 tti->timer_ci_en = 0; 929 930 rti->urange_a = 10; 931 rti->urange_b = 20; 932 rti->urange_c = 30; 933 rti->ufc_a = 1; /* <= for netpipe type of tests */ 934 rti->ufc_b = 4; 935 rti->ufc_c = 4; 936 rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */ 937 rti->timer_ac_en = 1; 938 rti->timer_val_us = 5; /* for optimal bus efficiency usage */ 939 } 940} 941 942/* 943 * __hal_device_tti_apply 944 * @hldev: HAL device handle. 945 * 946 * apply TTI configuration. 947 */ 948static xge_hal_status_e 949__hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti, 950 int num, int runtime) 951{ 952 u64 val64, data1 = 0, data2 = 0; 953 xge_hal_pci_bar0_t *bar0; 954 955 if (runtime) 956 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 957 else 958 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 959 960 if (tti->timer_val_us) { 961 unsigned int tx_interval; 962 963 if (hldev->config.pci_freq_mherz) { 964 tx_interval = hldev->config.pci_freq_mherz * 965 tti->timer_val_us / 64; 966 tx_interval = 967 __hal_fix_time_ival_herc(hldev, 968 tx_interval); 969 } else { 970 tx_interval = tti->timer_val_us; 971 } 972 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval); 973 if (tti->timer_ac_en) { 974 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN; 975 } 976 if (tti->timer_ci_en) { 977 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN; 978 } 979 980 if (!runtime) { 981 xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s", 982 num, tx_interval, tti->timer_ci_en ? 983 "enabled": "disabled"); 984 } 985 } 986 987 if (tti->urange_a || 988 tti->urange_b || 989 tti->urange_c || 990 tti->ufc_a || 991 tti->ufc_b || 992 tti->ufc_c || 993 tti->ufc_d ) { 994 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) | 995 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) | 996 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c); 997 998 data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) | 999 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) | 1000 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) | 1001 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d); 1002 } 1003 1004 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, 1005 &bar0->tti_data1_mem); 1006 (void)xge_os_pio_mem_read64(hldev->pdev, 1007 hldev->regh0, &bar0->tti_data1_mem); 1008 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, 1009 &bar0->tti_data2_mem); 1010 (void)xge_os_pio_mem_read64(hldev->pdev, 1011 hldev->regh0, &bar0->tti_data2_mem); 1012 xge_os_wmb(); 1013 1014 val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD | 1015 XGE_HAL_TTI_CMD_MEM_OFFSET(num); 1016 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1017 &bar0->tti_command_mem); 1018 1019 if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem, 1020 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD, 1021 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1022 /* upper layer may require to repeat */ 1023 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1024 } 1025 1026 if (!runtime) { 1027 xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x" 1028 XGE_OS_LLXFMT, num, 1029 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, 1030 hldev->regh0, &bar0->tti_data1_mem)); 1031 } 1032 1033 return XGE_HAL_OK; 1034} 1035 1036/* 1037 * __hal_device_tti_configure 1038 * @hldev: HAL device handle. 1039 * 1040 * TTI Initialization. 1041 * Initialize Transmit Traffic Interrupt Scheme. 1042 */ 1043static xge_hal_status_e 1044__hal_device_tti_configure(xge_hal_device_t *hldev, int runtime) 1045{ 1046 int i; 1047 1048 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { 1049 int j; 1050 1051 if (!hldev->config.fifo.queue[i].configured) 1052 continue; 1053 1054 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { 1055 xge_hal_status_e status; 1056 1057 if (!hldev->config.fifo.queue[i].tti[j].enabled) 1058 continue; 1059 1060 /* at least some TTI enabled. Record it. */ 1061 hldev->tti_enabled = 1; 1062 1063 status = __hal_device_tti_apply(hldev, 1064 &hldev->config.fifo.queue[i].tti[j], 1065 i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime); 1066 if (status != XGE_HAL_OK) 1067 return status; 1068 } 1069 } 1070 1071 /* processing bimodal TTIs */ 1072 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 1073 xge_hal_status_e status; 1074 1075 if (!hldev->bimodal_tti[i].enabled) 1076 continue; 1077 1078 /* at least some bimodal TTI enabled. Record it. */ 1079 hldev->tti_enabled = 1; 1080 1081 status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i], 1082 XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime); 1083 if (status != XGE_HAL_OK) 1084 return status; 1085 1086 } 1087 1088 return XGE_HAL_OK; 1089} 1090 1091/* 1092 * __hal_device_rti_configure 1093 * @hldev: HAL device handle. 1094 * 1095 * RTI Initialization. 1096 * Initialize Receive Traffic Interrupt Scheme. 1097 */ 1098xge_hal_status_e 1099__hal_device_rti_configure(xge_hal_device_t *hldev, int runtime) 1100{ 1101 xge_hal_pci_bar0_t *bar0; 1102 u64 val64, data1 = 0, data2 = 0; 1103 int i; 1104 1105 if (runtime) { 1106 /* 1107 * we don't want to re-configure RTI in case when 1108 * bimodal interrupts are in use. Instead reconfigure TTI 1109 * with new RTI values. 1110 */ 1111 if (hldev->config.bimodal_interrupts) { 1112 __hal_device_bimodal_configure(hldev); 1113 return __hal_device_tti_configure(hldev, 1); 1114 } 1115 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 1116 } else 1117 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1118 1119 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) { 1120 xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti; 1121 1122 if (!hldev->config.ring.queue[i].configured) 1123 continue; 1124 1125 if (rti->timer_val_us) { 1126 unsigned int rx_interval; 1127 1128 if (hldev->config.pci_freq_mherz) { 1129 rx_interval = hldev->config.pci_freq_mherz * 1130 rti->timer_val_us / 8; 1131 rx_interval = 1132 __hal_fix_time_ival_herc(hldev, 1133 rx_interval); 1134 } else { 1135 rx_interval = rti->timer_val_us; 1136 } 1137 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval); 1138 if (rti->timer_ac_en) { 1139 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN; 1140 } 1141 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN; 1142 } 1143 1144 if (rti->urange_a || 1145 rti->urange_b || 1146 rti->urange_c || 1147 rti->ufc_a || 1148 rti->ufc_b || 1149 rti->ufc_c || 1150 rti->ufc_d) { 1151 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) | 1152 XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) | 1153 XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c); 1154 1155 data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) | 1156 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) | 1157 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) | 1158 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d); 1159 } 1160 1161 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, 1162 &bar0->rti_data1_mem); 1163 (void)xge_os_pio_mem_read64(hldev->pdev, 1164 hldev->regh0, &bar0->rti_data1_mem); 1165 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, 1166 &bar0->rti_data2_mem); 1167 (void)xge_os_pio_mem_read64(hldev->pdev, 1168 hldev->regh0, &bar0->rti_data2_mem); 1169 xge_os_wmb(); 1170 1171 val64 = XGE_HAL_RTI_CMD_MEM_WE | 1172 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD; 1173 val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i); 1174 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1175 &bar0->rti_command_mem); 1176 1177 if (!runtime && __hal_device_register_poll(hldev, 1178 &bar0->rti_command_mem, 0, 1179 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD, 1180 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1181 /* upper layer may require to repeat */ 1182 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1183 } 1184 1185 if (!runtime) { 1186 xge_debug_device(XGE_TRACE, 1187 "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT, 1188 i, 1189 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, 1190 hldev->regh0, &bar0->rti_data1_mem)); 1191 } 1192 } 1193 1194 return XGE_HAL_OK; 1195} 1196 1197 1198/* Constants to be programmed into the Xena's registers to configure 1199 * the XAUI. */ 1200static u64 default_xena_mdio_cfg[] = { 1201 /* Reset PMA PLL */ 1202 0xC001010000000000ULL, 0xC0010100000000E0ULL, 1203 0xC0010100008000E4ULL, 1204 /* Remove Reset from PMA PLL */ 1205 0xC001010000000000ULL, 0xC0010100000000E0ULL, 1206 0xC0010100000000E4ULL, 1207 END_SIGN 1208}; 1209 1210static u64 default_herc_mdio_cfg[] = { 1211 END_SIGN 1212}; 1213 1214static u64 default_xena_dtx_cfg[] = { 1215 0x8000051500000000ULL, 0x80000515000000E0ULL, 1216 0x80000515D93500E4ULL, 0x8001051500000000ULL, 1217 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 1218 0x8002051500000000ULL, 0x80020515000000E0ULL, 1219 0x80020515F21000E4ULL, 1220 /* Set PADLOOPBACKN */ 1221 0x8002051500000000ULL, 0x80020515000000E0ULL, 1222 0x80020515B20000E4ULL, 0x8003051500000000ULL, 1223 0x80030515000000E0ULL, 0x80030515B20000E4ULL, 1224 0x8004051500000000ULL, 0x80040515000000E0ULL, 1225 0x80040515B20000E4ULL, 0x8005051500000000ULL, 1226 0x80050515000000E0ULL, 0x80050515B20000E4ULL, 1227 SWITCH_SIGN, 1228 /* Remove PADLOOPBACKN */ 1229 0x8002051500000000ULL, 0x80020515000000E0ULL, 1230 0x80020515F20000E4ULL, 0x8003051500000000ULL, 1231 0x80030515000000E0ULL, 0x80030515F20000E4ULL, 1232 0x8004051500000000ULL, 0x80040515000000E0ULL, 1233 0x80040515F20000E4ULL, 0x8005051500000000ULL, 1234 0x80050515000000E0ULL, 0x80050515F20000E4ULL, 1235 END_SIGN 1236}; 1237 1238/* 1239static u64 default_herc_dtx_cfg[] = { 1240 0x80000515BA750000ULL, 0x80000515BA7500E0ULL, 1241 0x80000515BA750004ULL, 0x80000515BA7500E4ULL, 1242 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 1243 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 1244 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 1245 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 1246 END_SIGN 1247}; 1248*/ 1249 1250static u64 default_herc_dtx_cfg[] = { 1251 0x8000051536750000ULL, 0x80000515367500E0ULL, 1252 0x8000051536750004ULL, 0x80000515367500E4ULL, 1253 1254 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 1255 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 1256 1257 0x801205150D440000ULL, 0x801205150D4400E0ULL, 1258 0x801205150D440004ULL, 0x801205150D4400E4ULL, 1259 1260 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 1261 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 1262 END_SIGN 1263}; 1264 1265 1266void 1267__hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg) 1268{ 1269 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 1270 (u32)(value>>32), reg); 1271 xge_os_wmb(); 1272 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 1273 (u32)value, reg); 1274 xge_os_wmb(); 1275 xge_os_mdelay(1); 1276} 1277 1278u64 1279__hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg) 1280{ 1281 u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1282 reg); 1283 xge_os_mdelay(1); 1284 return val64; 1285} 1286 1287/* 1288 * __hal_device_xaui_configure 1289 * @hldev: HAL device handle. 1290 * 1291 * Configure XAUI Interface of Xena. 1292 * 1293 * To Configure the Xena's XAUI, one has to write a series 1294 * of 64 bit values into two registers in a particular 1295 * sequence. Hence a macro 'SWITCH_SIGN' has been defined 1296 * which will be defined in the array of configuration values 1297 * (default_dtx_cfg & default_mdio_cfg) at appropriate places 1298 * to switch writing from one regsiter to another. We continue 1299 * writing these values until we encounter the 'END_SIGN' macro. 1300 * For example, After making a series of 21 writes into 1301 * dtx_control register the 'SWITCH_SIGN' appears and hence we 1302 * start writing into mdio_control until we encounter END_SIGN. 1303 */ 1304static void 1305__hal_device_xaui_configure(xge_hal_device_t *hldev) 1306{ 1307 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1308 int mdio_cnt = 0, dtx_cnt = 0; 1309 u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL; 1310 1311 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 1312 default_dtx_cfg = default_xena_dtx_cfg; 1313 default_mdio_cfg = default_xena_mdio_cfg; 1314 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 1315 default_dtx_cfg = default_herc_dtx_cfg; 1316 default_mdio_cfg = default_herc_mdio_cfg; 1317 } else { 1318 xge_assert(default_dtx_cfg); 1319 return; 1320 } 1321 1322 do { 1323 dtx_cfg: 1324 while (default_dtx_cfg[dtx_cnt] != END_SIGN) { 1325 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) { 1326 dtx_cnt++; 1327 goto mdio_cfg; 1328 } 1329 __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt], 1330 &bar0->dtx_control); 1331 dtx_cnt++; 1332 } 1333 mdio_cfg: 1334 while (default_mdio_cfg[mdio_cnt] != END_SIGN) { 1335 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { 1336 mdio_cnt++; 1337 goto dtx_cfg; 1338 } 1339 __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt], 1340 &bar0->mdio_control); 1341 mdio_cnt++; 1342 } 1343 } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) && 1344 (default_mdio_cfg[mdio_cnt] == END_SIGN)) ); 1345 1346 xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured"); 1347} 1348 1349/* 1350 * __hal_device_mac_link_util_set 1351 * @hldev: HAL device handle. 1352 * 1353 * Set sampling rate to calculate link utilization. 1354 */ 1355static void 1356__hal_device_mac_link_util_set(xge_hal_device_t *hldev) 1357{ 1358 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1359 u64 val64; 1360 1361 val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL( 1362 hldev->config.mac.tmac_util_period) | 1363 XGE_HAL_MAC_RX_LINK_UTIL_VAL( 1364 hldev->config.mac.rmac_util_period); 1365 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1366 &bar0->mac_link_util); 1367 xge_debug_device(XGE_TRACE, "%s", 1368 "bandwidth link utilization configured"); 1369} 1370 1371/* 1372 * __hal_device_set_swapper 1373 * @hldev: HAL device handle. 1374 * 1375 * Set the Xframe's byte "swapper" in accordance with 1376 * endianness of the host. 1377 */ 1378xge_hal_status_e 1379__hal_device_set_swapper(xge_hal_device_t *hldev) 1380{ 1381 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1382 u64 val64; 1383 1384 /* 1385 * from 32bit errarta: 1386 * 1387 * The SWAPPER_CONTROL register determines how the adapter accesses 1388 * host memory as well as how it responds to read and write requests 1389 * from the host system. Writes to this register should be performed 1390 * carefully, since the byte swappers could reverse the order of bytes. 1391 * When configuring this register keep in mind that writes to the PIF 1392 * read and write swappers could reverse the order of the upper and 1393 * lower 32-bit words. This means that the driver may have to write 1394 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to 1395 * configure the entire register. */ 1396 1397 /* 1398 * The device by default set to a big endian format, so a big endian 1399 * driver need not set anything. 1400 */ 1401 1402#if defined(XGE_HAL_CUSTOM_HW_SWAPPER) 1403 1404 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1405 0xffffffffffffffffULL, &bar0->swapper_ctrl); 1406 1407 val64 = XGE_HAL_CUSTOM_HW_SWAPPER; 1408 1409 xge_os_wmb(); 1410 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1411 &bar0->swapper_ctrl); 1412 1413 xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT, 1414 (unsigned long long)val64); 1415 1416#elif !defined(XGE_OS_HOST_BIG_ENDIAN) 1417 1418 /* 1419 * Initially we enable all bits to make it accessible by the driver, 1420 * then we selectively enable only those bits that we want to set. 1421 * i.e. force swapper to swap for the first time since second write 1422 * will overwrite with the final settings. 1423 * 1424 * Use only for little endian platforms. 1425 */ 1426 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1427 0xffffffffffffffffULL, &bar0->swapper_ctrl); 1428 xge_os_wmb(); 1429 val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE | 1430 XGE_HAL_SWAPPER_CTRL_PIF_R_SE | 1431 XGE_HAL_SWAPPER_CTRL_PIF_W_FE | 1432 XGE_HAL_SWAPPER_CTRL_PIF_W_SE | 1433 XGE_HAL_SWAPPER_CTRL_RTH_FE | 1434 XGE_HAL_SWAPPER_CTRL_RTH_SE | 1435 XGE_HAL_SWAPPER_CTRL_TXP_FE | 1436 XGE_HAL_SWAPPER_CTRL_TXP_SE | 1437 XGE_HAL_SWAPPER_CTRL_TXD_R_FE | 1438 XGE_HAL_SWAPPER_CTRL_TXD_R_SE | 1439 XGE_HAL_SWAPPER_CTRL_TXD_W_FE | 1440 XGE_HAL_SWAPPER_CTRL_TXD_W_SE | 1441 XGE_HAL_SWAPPER_CTRL_TXF_R_FE | 1442 XGE_HAL_SWAPPER_CTRL_RXD_R_FE | 1443 XGE_HAL_SWAPPER_CTRL_RXD_R_SE | 1444 XGE_HAL_SWAPPER_CTRL_RXD_W_FE | 1445 XGE_HAL_SWAPPER_CTRL_RXD_W_SE | 1446 XGE_HAL_SWAPPER_CTRL_RXF_W_FE | 1447 XGE_HAL_SWAPPER_CTRL_XMSI_FE | 1448 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE); 1449 1450 /* 1451 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 1452 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE; 1453 } */ 1454 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64, 1455 &bar0->swapper_ctrl); 1456 xge_os_wmb(); 1457 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), 1458 &bar0->swapper_ctrl); 1459 xge_os_wmb(); 1460 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), 1461 &bar0->swapper_ctrl); 1462 xge_debug_device(XGE_TRACE, "%s", "using little endian set"); 1463#endif 1464 1465 /* Verifying if endian settings are accurate by reading a feedback 1466 * register. */ 1467 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1468 &bar0->pif_rd_swapper_fb); 1469 if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) { 1470 xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT, 1471 (unsigned long long) val64); 1472 return XGE_HAL_ERR_SWAPPER_CTRL; 1473 } 1474 1475 xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled"); 1476 1477 return XGE_HAL_OK; 1478} 1479 1480/* 1481 * __hal_device_rts_mac_configure - Configure RTS steering based on 1482 * destination mac address. 1483 * @hldev: HAL device handle. 1484 * 1485 */ 1486xge_hal_status_e 1487__hal_device_rts_mac_configure(xge_hal_device_t *hldev) 1488{ 1489 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1490 u64 val64; 1491 1492 if (!hldev->config.rts_mac_en) { 1493 return XGE_HAL_OK; 1494 } 1495 1496 /* 1497 * Set the receive traffic steering mode from default(classic) 1498 * to enhanced. 1499 */ 1500 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1501 &bar0->rts_ctrl); 1502 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1503 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1504 val64, &bar0->rts_ctrl); 1505 return XGE_HAL_OK; 1506} 1507 1508/* 1509 * __hal_device_rts_port_configure - Configure RTS steering based on 1510 * destination or source port number. 1511 * @hldev: HAL device handle. 1512 * 1513 */ 1514xge_hal_status_e 1515__hal_device_rts_port_configure(xge_hal_device_t *hldev) 1516{ 1517 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1518 u64 val64; 1519 int rnum; 1520 1521 if (!hldev->config.rts_port_en) { 1522 return XGE_HAL_OK; 1523 } 1524 1525 /* 1526 * Set the receive traffic steering mode from default(classic) 1527 * to enhanced. 1528 */ 1529 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1530 &bar0->rts_ctrl); 1531 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1533 val64, &bar0->rts_ctrl); 1534 1535 /* 1536 * Initiate port steering according to per-ring configuration 1537 */ 1538 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { 1539 int pnum; 1540 xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum]; 1541 1542 if (!queue->configured || queue->rts_port_en) 1543 continue; 1544 1545 for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) { 1546 xge_hal_rts_port_t *port = &queue->rts_ports[pnum]; 1547 1548 /* 1549 * Skip and clear empty ports 1550 */ 1551 if (!port->num) { 1552 /* 1553 * Clear CAM memory 1554 */ 1555 xge_os_pio_mem_write64(hldev->pdev, 1556 hldev->regh0, 0ULL, 1557 &bar0->rts_pn_cam_data); 1558 1559 val64 = BIT(7) | BIT(15); 1560 } else { 1561 /* 1562 * Assign new Port values according 1563 * to configuration 1564 */ 1565 val64 = vBIT(port->num,8,16) | 1566 vBIT(rnum,37,3) | BIT(63); 1567 if (port->src) 1568 val64 = BIT(47); 1569 if (!port->udp) 1570 val64 = BIT(7); 1571 xge_os_pio_mem_write64(hldev->pdev, 1572 hldev->regh0, val64, 1573 &bar0->rts_pn_cam_data); 1574 1575 val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8); 1576 } 1577 1578 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1579 val64, &bar0->rts_pn_cam_ctrl); 1580 1581 /* poll until done */ 1582 if (__hal_device_register_poll(hldev, 1583 &bar0->rts_pn_cam_ctrl, 0, 1584 XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED, 1585 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != 1586 XGE_HAL_OK) { 1587 /* upper layer may require to repeat */ 1588 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1589 } 1590 } 1591 } 1592 return XGE_HAL_OK; 1593} 1594 1595/* 1596 * __hal_device_rts_qos_configure - Configure RTS steering based on 1597 * qos. 1598 * @hldev: HAL device handle. 1599 * 1600 */ 1601xge_hal_status_e 1602__hal_device_rts_qos_configure(xge_hal_device_t *hldev) 1603{ 1604 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1605 u64 val64; 1606 int j, rx_ring_num; 1607 1608 if (!hldev->config.rts_qos_en) { 1609 return XGE_HAL_OK; 1610 } 1611 1612 /* First clear the RTS_DS_MEM_DATA */ 1613 val64 = 0; 1614 for (j = 0; j < 64; j++ ) 1615 { 1616 /* First clear the value */ 1617 val64 = XGE_HAL_RTS_DS_MEM_DATA(0); 1618 1619 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1620 &bar0->rts_ds_mem_data); 1621 1622 val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE | 1623 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD | 1624 XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j ); 1625 1626 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1627 &bar0->rts_ds_mem_ctrl); 1628 1629 1630 /* poll until done */ 1631 if (__hal_device_register_poll(hldev, 1632 &bar0->rts_ds_mem_ctrl, 0, 1633 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, 1634 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1635 /* upper layer may require to repeat */ 1636 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1637 } 1638 1639 } 1640 1641 rx_ring_num = 0; 1642 for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) { 1643 if (hldev->config.ring.queue[j].configured) 1644 rx_ring_num++; 1645 } 1646 1647 switch (rx_ring_num) { 1648 case 1: 1649 val64 = 0x0; 1650 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1651 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1652 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1653 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1654 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1655 break; 1656 case 2: 1657 val64 = 0x0001000100010001ULL; 1658 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1659 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1660 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1661 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1662 val64 = 0x0001000100000000ULL; 1663 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1664 break; 1665 case 3: 1666 val64 = 0x0001020001020001ULL; 1667 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1668 val64 = 0x0200010200010200ULL; 1669 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1670 val64 = 0x0102000102000102ULL; 1671 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1672 val64 = 0x0001020001020001ULL; 1673 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1674 val64 = 0x0200010200000000ULL; 1675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1676 break; 1677 case 4: 1678 val64 = 0x0001020300010203ULL; 1679 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1680 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1681 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1682 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1683 val64 = 0x0001020300000000ULL; 1684 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1685 break; 1686 case 5: 1687 val64 = 0x0001020304000102ULL; 1688 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1689 val64 = 0x0304000102030400ULL; 1690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1691 val64 = 0x0102030400010203ULL; 1692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1693 val64 = 0x0400010203040001ULL; 1694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1695 val64 = 0x0203040000000000ULL; 1696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1697 break; 1698 case 6: 1699 val64 = 0x0001020304050001ULL; 1700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1701 val64 = 0x0203040500010203ULL; 1702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1703 val64 = 0x0405000102030405ULL; 1704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1705 val64 = 0x0001020304050001ULL; 1706 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1707 val64 = 0x0203040500000000ULL; 1708 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1709 break; 1710 case 7: 1711 val64 = 0x0001020304050600ULL; 1712 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1713 val64 = 0x0102030405060001ULL; 1714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1715 val64 = 0x0203040506000102ULL; 1716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1717 val64 = 0x0304050600010203ULL; 1718 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1719 val64 = 0x0405060000000000ULL; 1720 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1721 break; 1722 case 8: 1723 val64 = 0x0001020304050607ULL; 1724 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); 1725 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); 1726 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); 1727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); 1728 val64 = 0x0001020300000000ULL; 1729 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); 1730 break; 1731 } 1732 1733 return XGE_HAL_OK; 1734} 1735 1736/* 1737 * xge__hal_device_rts_mac_enable 1738 * 1739 * @devh: HAL device handle. 1740 * @index: index number where the MAC addr will be stored 1741 * @macaddr: MAC address 1742 * 1743 * - Enable RTS steering for the given MAC address. This function has to be 1744 * called with lock acquired. 1745 * 1746 * NOTE: 1747 * 1. ULD has to call this function with the index value which 1748 * statisfies the following condition: 1749 * ring_num = (index % 8) 1750 * 2.ULD also needs to make sure that the index is not 1751 * occupied by any MAC address. If that index has any MAC address 1752 * it will be overwritten and HAL will not check for it. 1753 * 1754 */ 1755xge_hal_status_e 1756xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr) 1757{ 1758 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 1759 xge_hal_status_e status; 1760 1761 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1762 1763 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 1764 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 1765 1766 if ( index >= max_addr ) 1767 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 1768 1769 /* 1770 * Set the MAC address at the given location marked by index. 1771 */ 1772 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 1773 if (status != XGE_HAL_OK) { 1774 xge_debug_device(XGE_ERR, "%s", 1775 "Not able to set the mac addr"); 1776 return status; 1777 } 1778 1779 return xge_hal_device_rts_section_enable(hldev, index); 1780} 1781 1782/* 1783 * xge__hal_device_rts_mac_disable 1784 * @hldev: HAL device handle. 1785 * @index: index number where to disable the MAC addr 1786 * 1787 * Disable RTS Steering based on the MAC address. 1788 * This function should be called with lock acquired. 1789 * 1790 */ 1791xge_hal_status_e 1792xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index) 1793{ 1794 xge_hal_status_e status; 1795 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 1796 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 1797 1798 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 1799 1800 xge_debug_ll(XGE_TRACE, "the index value is %d ", index); 1801 1802 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 1803 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 1804 1805 if ( index >= max_addr ) 1806 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 1807 1808 /* 1809 * Disable MAC address @ given index location 1810 */ 1811 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 1812 if (status != XGE_HAL_OK) { 1813 xge_debug_device(XGE_ERR, "%s", 1814 "Not able to set the mac addr"); 1815 return status; 1816 } 1817 1818 return XGE_HAL_OK; 1819} 1820 1821 1822/* 1823 * __hal_device_rth_configure - Configure RTH for the device 1824 * @hldev: HAL device handle. 1825 * 1826 * Using IT (Indirection Table). 1827 */ 1828xge_hal_status_e 1829__hal_device_rth_it_configure(xge_hal_device_t *hldev) 1830{ 1831 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1832 u64 val64; 1833 int rings[XGE_HAL_MAX_RING_NUM]={0}; 1834 int rnum; 1835 int rmax; 1836 int buckets_num; 1837 int bucket; 1838 1839 if (!hldev->config.rth_en) { 1840 return XGE_HAL_OK; 1841 } 1842 1843 /* 1844 * Set the receive traffic steering mode from default(classic) 1845 * to enhanced. 1846 */ 1847 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 1848 &bar0->rts_ctrl); 1849 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 1850 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1851 val64, &bar0->rts_ctrl); 1852 1853 buckets_num = (1 << hldev->config.rth_bucket_size); 1854 1855 rmax=0; 1856 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { 1857 if (hldev->config.ring.queue[rnum].configured && 1858 hldev->config.ring.queue[rnum].rth_en) 1859 rings[rmax++] = rnum; 1860 } 1861 1862 rnum = 0; 1863 /* for starters: fill in all the buckets with rings "equally" */ 1864 for (bucket = 0; bucket < buckets_num; bucket++) { 1865 1866 if (rnum == rmax) 1867 rnum = 0; 1868 1869 /* write data */ 1870 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | 1871 XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]); 1872 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1873 &bar0->rts_rth_map_mem_data); 1874 1875 /* execute */ 1876 val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | 1877 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | 1878 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket); 1879 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1880 &bar0->rts_rth_map_mem_ctrl); 1881 1882 /* poll until done */ 1883 if (__hal_device_register_poll(hldev, 1884 &bar0->rts_rth_map_mem_ctrl, 0, 1885 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, 1886 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1887 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1888 } 1889 1890 rnum++; 1891 } 1892 1893 val64 = XGE_HAL_RTS_RTH_EN; 1894 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size); 1895 val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN | 1896 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN | 1897 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN; 1898 1899 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1900 &bar0->rts_rth_cfg); 1901 1902 xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d", 1903 hldev->config.rth_bucket_size); 1904 1905 return XGE_HAL_OK; 1906} 1907 1908 1909/* 1910 * __hal_spdm_entry_add - Add a new entry to the SPDM table. 1911 * 1912 * Add a new entry to the SPDM table 1913 * 1914 * This function add a new entry to the SPDM table. 1915 * 1916 * Note: 1917 * This function should be called with spdm_lock. 1918 * 1919 * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove. 1920 */ 1921static xge_hal_status_e 1922__hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, 1923 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, 1924 u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry) 1925{ 1926 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 1927 u64 val64; 1928 u64 spdm_line_arr[8]; 1929 u8 line_no; 1930 1931 /* 1932 * Clear the SPDM READY bit 1933 */ 1934 val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY; 1935 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 1936 &bar0->rxpic_int_reg); 1937 1938 xge_debug_device(XGE_TRACE, 1939 "L4 SP %x:DP %x: hash %x tgt_queue %d ", 1940 l4_sp, l4_dp, jhash_value, tgt_queue); 1941 1942 xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr)); 1943 1944 /* 1945 * Construct the SPDM entry. 1946 */ 1947 spdm_line_arr[0] = vBIT(l4_sp,0,16) | 1948 vBIT(l4_dp,16,32) | 1949 vBIT(tgt_queue,53,3) | 1950 vBIT(is_tcp,59,1) | 1951 vBIT(is_ipv4,63,1); 1952 1953 1954 if (is_ipv4) { 1955 spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) | 1956 vBIT(dst_ip->ipv4.addr,32,32); 1957 1958 } else { 1959 xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8); 1960 xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8); 1961 xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8); 1962 xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8); 1963 } 1964 1965 spdm_line_arr[7] = vBIT(jhash_value,0,32) | 1966 BIT(63); /* entry enable bit */ 1967 1968 /* 1969 * Add the entry to the SPDM table 1970 */ 1971 for(line_no = 0; line_no < 8; line_no++) { 1972 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 1973 spdm_line_arr[line_no], 1974 (void *)((char *)hldev->spdm_mem_base + 1975 (spdm_entry * 64) + 1976 (line_no * 8))); 1977 } 1978 1979 /* 1980 * Wait for the operation to be completed. 1981 */ 1982 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 1983 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 1984 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 1985 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 1986 } 1987 1988 /* 1989 * Add this information to a local SPDM table. The purpose of 1990 * maintaining a local SPDM table is to avoid a search in the 1991 * adapter SPDM table for spdm entry lookup which is very costly 1992 * in terms of time. 1993 */ 1994 hldev->spdm_table[spdm_entry]->in_use = 1; 1995 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip, 1996 sizeof(xge_hal_ipaddr_t)); 1997 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip, 1998 sizeof(xge_hal_ipaddr_t)); 1999 hldev->spdm_table[spdm_entry]->l4_sp = l4_sp; 2000 hldev->spdm_table[spdm_entry]->l4_dp = l4_dp; 2001 hldev->spdm_table[spdm_entry]->is_tcp = is_tcp; 2002 hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4; 2003 hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue; 2004 hldev->spdm_table[spdm_entry]->jhash_value = jhash_value; 2005 hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry; 2006 2007 return XGE_HAL_OK; 2008} 2009 2010/* 2011 * __hal_device_rth_spdm_configure - Configure RTH for the device 2012 * @hldev: HAL device handle. 2013 * 2014 * Using SPDM (Socket-Pair Direct Match). 2015 */ 2016xge_hal_status_e 2017__hal_device_rth_spdm_configure(xge_hal_device_t *hldev) 2018{ 2019 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 2020 u64 val64; 2021 u8 spdm_bar_num; 2022 u32 spdm_bar_offset; 2023 int spdm_table_size; 2024 int i; 2025 2026 if (!hldev->config.rth_spdm_en) { 2027 return XGE_HAL_OK; 2028 } 2029 2030 /* 2031 * Retrieve the base address of SPDM Table. 2032 */ 2033 val64 = xge_os_pio_mem_read64(hldev->pdev, 2034 hldev->regh0, &bar0->spdm_bir_offset); 2035 2036 spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64); 2037 spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64); 2038 2039 2040 /* 2041 * spdm_bar_num specifies the PCI bar num register used to 2042 * address the memory space. spdm_bar_offset specifies the offset 2043 * of the SPDM memory with in the bar num memory space. 2044 */ 2045 switch (spdm_bar_num) { 2046 case 0: 2047 { 2048 hldev->spdm_mem_base = (char *)bar0 + 2049 (spdm_bar_offset * 8); 2050 break; 2051 } 2052 case 1: 2053 { 2054 char *bar1 = (char *)hldev->bar1; 2055 hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8); 2056 break; 2057 } 2058 default: 2059 xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1))); 2060 } 2061 2062 /* 2063 * Retrieve the size of SPDM table(number of entries). 2064 */ 2065 val64 = xge_os_pio_mem_read64(hldev->pdev, 2066 hldev->regh0, &bar0->spdm_structure); 2067 hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64); 2068 2069 2070 spdm_table_size = hldev->spdm_max_entries * 2071 sizeof(xge_hal_spdm_entry_t); 2072 if (hldev->spdm_table == NULL) { 2073 void *mem; 2074 2075 /* 2076 * Allocate memory to hold the copy of SPDM table. 2077 */ 2078 if ((hldev->spdm_table = (xge_hal_spdm_entry_t **) 2079 xge_os_malloc( 2080 hldev->pdev, 2081 (sizeof(xge_hal_spdm_entry_t *) * 2082 hldev->spdm_max_entries))) == NULL) { 2083 return XGE_HAL_ERR_OUT_OF_MEMORY; 2084 } 2085 2086 if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL) 2087 { 2088 xge_os_free(hldev->pdev, hldev->spdm_table, 2089 (sizeof(xge_hal_spdm_entry_t *) * 2090 hldev->spdm_max_entries)); 2091 return XGE_HAL_ERR_OUT_OF_MEMORY; 2092 } 2093 2094 xge_os_memzero(mem, spdm_table_size); 2095 for (i = 0; i < hldev->spdm_max_entries; i++) { 2096 hldev->spdm_table[i] = (xge_hal_spdm_entry_t *) 2097 ((char *)mem + 2098 i * sizeof(xge_hal_spdm_entry_t)); 2099 } 2100 xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev); 2101 } else { 2102 /* 2103 * We are here because the host driver tries to 2104 * do a soft reset on the device. 2105 * Since the device soft reset clears the SPDM table, copy 2106 * the entries from the local SPDM table to the actual one. 2107 */ 2108 xge_os_spin_lock(&hldev->spdm_lock); 2109 for (i = 0; i < hldev->spdm_max_entries; i++) { 2110 xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i]; 2111 2112 if (spdm_entry->in_use) { 2113 if (__hal_spdm_entry_add(hldev, 2114 &spdm_entry->src_ip, 2115 &spdm_entry->dst_ip, 2116 spdm_entry->l4_sp, 2117 spdm_entry->l4_dp, 2118 spdm_entry->is_tcp, 2119 spdm_entry->is_ipv4, 2120 spdm_entry->tgt_queue, 2121 spdm_entry->jhash_value, 2122 spdm_entry->spdm_entry) 2123 != XGE_HAL_OK) { 2124 /* Log an warning */ 2125 xge_debug_device(XGE_ERR, 2126 "SPDM table update from local" 2127 " memory failed"); 2128 } 2129 } 2130 } 2131 xge_os_spin_unlock(&hldev->spdm_lock); 2132 } 2133 2134 /* 2135 * Set the receive traffic steering mode from default(classic) 2136 * to enhanced. 2137 */ 2138 val64 = xge_os_pio_mem_read64(hldev->pdev, 2139 hldev->regh0, &bar0->rts_ctrl); 2140 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 2141 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2142 val64, &bar0->rts_ctrl); 2143 2144 /* 2145 * We may not need to configure rts_rth_jhash_cfg register as the 2146 * default values are good enough to calculate the hash. 2147 */ 2148 2149 /* 2150 * As of now, set all the rth mask registers to zero. TODO. 2151 */ 2152 for(i = 0; i < 5; i++) { 2153 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2154 0, &bar0->rts_rth_hash_mask[i]); 2155 } 2156 2157 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2158 0, &bar0->rts_rth_hash_mask_5); 2159 2160 if (hldev->config.rth_spdm_use_l4) { 2161 val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4; 2162 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2163 val64, &bar0->rts_rth_status); 2164 } 2165 2166 val64 = XGE_HAL_RTS_RTH_EN; 2167 val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN; 2168 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2169 &bar0->rts_rth_cfg); 2170 2171 2172 return XGE_HAL_OK; 2173} 2174 2175/* 2176 * __hal_device_pci_init 2177 * @hldev: HAL device handle. 2178 * 2179 * Initialize certain PCI/PCI-X configuration registers 2180 * with recommended values. Save config space for future hw resets. 2181 */ 2182static void 2183__hal_device_pci_init(xge_hal_device_t *hldev) 2184{ 2185 int i, pcisize = 0; 2186 u16 cmd = 0; 2187 u8 val; 2188 2189 /* Store PCI device ID and revision for future references where in we 2190 * decide Xena revision using PCI sub system ID */ 2191 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 2192 xge_offsetof(xge_hal_pci_config_le_t, device_id), 2193 &hldev->device_id); 2194 xge_os_pci_read8(hldev->pdev,hldev->cfgh, 2195 xge_offsetof(xge_hal_pci_config_le_t, revision), 2196 &hldev->revision); 2197 2198 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 2199 pcisize = XGE_HAL_PCISIZE_HERC; 2200 else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 2201 pcisize = XGE_HAL_PCISIZE_XENA; 2202 2203 /* save original PCI config space to restore it on device_terminate() */ 2204 for (i = 0; i < pcisize; i++) { 2205 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, 2206 (u32*)&hldev->pci_config_space_bios + i); 2207 } 2208 2209 /* Set the PErr Repconse bit and SERR in PCI command register. */ 2210 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2211 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); 2212 cmd |= 0x140; 2213 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2214 xge_offsetof(xge_hal_pci_config_le_t, command), cmd); 2215 2216 /* Set user spcecified value for the PCI Latency Timer */ 2217 if (hldev->config.latency_timer && 2218 hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) { 2219 xge_os_pci_write8(hldev->pdev, hldev->cfgh, 2220 xge_offsetof(xge_hal_pci_config_le_t, 2221 latency_timer), 2222 (u8)hldev->config.latency_timer); 2223 } 2224 /* Read back latency timer to reflect it into user level */ 2225 xge_os_pci_read8(hldev->pdev, hldev->cfgh, 2226 xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val); 2227 hldev->config.latency_timer = val; 2228 2229 /* Enable Data Parity Error Recovery in PCI-X command register. */ 2230 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2231 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2232 cmd |= 1; 2233 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2234 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); 2235 2236 /* Set MMRB count in PCI-X command register. */ 2237 if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) { 2238 cmd &= 0xFFF3; 2239 cmd |= hldev->config.mmrb_count << 2; 2240 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2241 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2242 cmd); 2243 } 2244 /* Read back MMRB count to reflect it into user level */ 2245 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2246 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2247 &cmd); 2248 cmd &= 0x000C; 2249 hldev->config.mmrb_count = cmd>>2; 2250 2251 /* Setting Maximum outstanding splits based on system type. */ 2252 if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) { 2253 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2254 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2255 &cmd); 2256 cmd &= 0xFF8F; 2257 cmd |= hldev->config.max_splits_trans << 4; 2258 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2259 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), 2260 cmd); 2261 } 2262 2263 /* Read back max split trans to reflect it into user level */ 2264 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2265 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2266 cmd &= 0x0070; 2267 hldev->config.max_splits_trans = cmd>>4; 2268 2269 /* Forcibly disabling relaxed ordering capability of the card. */ 2270 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 2271 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); 2272 cmd &= 0xFFFD; 2273 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 2274 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); 2275 2276 /* save PCI config space for future resets */ 2277 for (i = 0; i < pcisize; i++) { 2278 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, 2279 (u32*)&hldev->pci_config_space + i); 2280 } 2281} 2282 2283/* 2284 * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency 2285 * and mode. 2286 * @devh: HAL device handle. 2287 * @pci_mode: pointer to a variable of enumerated type 2288 * xge_hal_pci_mode_e{}. 2289 * @bus_frequency: pointer to a variable of enumerated type 2290 * xge_hal_pci_bus_frequency_e{}. 2291 * @bus_width: pointer to a variable of enumerated type 2292 * xge_hal_pci_bus_width_e{}. 2293 * 2294 * Get pci mode, frequency, and PCI bus width. 2295 * 2296 * Returns: one of the xge_hal_status_e{} enumerated types. 2297 * XGE_HAL_OK - for success. 2298 * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card. 2299 * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card. 2300 * 2301 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. 2302 */ 2303static xge_hal_status_e 2304__hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, 2305 xge_hal_pci_bus_frequency_e *bus_frequency, 2306 xge_hal_pci_bus_width_e *bus_width) 2307{ 2308 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 2309 xge_hal_status_e rc_status = XGE_HAL_OK; 2310 xge_hal_card_e card_id = xge_hal_device_check_id (devh); 2311 2312#ifdef XGE_HAL_HERC_EMULATION 2313 hldev->config.pci_freq_mherz = 2314 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2315 *bus_frequency = 2316 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2317 *pci_mode = XGE_HAL_PCI_66MHZ_MODE; 2318#else 2319 if (card_id == XGE_HAL_CARD_HERC) { 2320 xge_hal_pci_bar0_t *bar0 = 2321 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2322 u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2323 &bar0->pci_info); 2324 if (XGE_HAL_PCI_32_BIT & pci_info) 2325 *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT; 2326 else 2327 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; 2328 switch((pci_info & XGE_HAL_PCI_INFO)>>60) 2329 { 2330 case XGE_HAL_PCI_33MHZ_MODE: 2331 *bus_frequency = 2332 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ; 2333 *pci_mode = XGE_HAL_PCI_33MHZ_MODE; 2334 break; 2335 case XGE_HAL_PCI_66MHZ_MODE: 2336 *bus_frequency = 2337 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2338 *pci_mode = XGE_HAL_PCI_66MHZ_MODE; 2339 break; 2340 case XGE_HAL_PCIX_M1_66MHZ_MODE: 2341 *bus_frequency = 2342 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; 2343 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE; 2344 break; 2345 case XGE_HAL_PCIX_M1_100MHZ_MODE: 2346 *bus_frequency = 2347 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ; 2348 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE; 2349 break; 2350 case XGE_HAL_PCIX_M1_133MHZ_MODE: 2351 *bus_frequency = 2352 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2353 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE; 2354 break; 2355 case XGE_HAL_PCIX_M2_66MHZ_MODE: 2356 *bus_frequency = 2357 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2358 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE; 2359 break; 2360 case XGE_HAL_PCIX_M2_100MHZ_MODE: 2361 *bus_frequency = 2362 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ; 2363 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE; 2364 break; 2365 case XGE_HAL_PCIX_M2_133MHZ_MODE: 2366 *bus_frequency = 2367 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ; 2368 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE; 2369 break; 2370 case XGE_HAL_PCIX_M1_RESERVED: 2371 case XGE_HAL_PCIX_M1_66MHZ_NS: 2372 case XGE_HAL_PCIX_M1_100MHZ_NS: 2373 case XGE_HAL_PCIX_M1_133MHZ_NS: 2374 case XGE_HAL_PCIX_M2_RESERVED: 2375 case XGE_HAL_PCIX_533_RESERVED: 2376 default: 2377 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO; 2378 xge_debug_device(XGE_ERR, 2379 "invalid pci info "XGE_OS_LLXFMT, 2380 (unsigned long long)pci_info); 2381 break; 2382 } 2383 if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO) 2384 xge_debug_device(XGE_TRACE, "PCI info: mode %d width " 2385 "%d frequency %d", *pci_mode, *bus_width, 2386 *bus_frequency); 2387 if (hldev->config.pci_freq_mherz == 2388 XGE_HAL_DEFAULT_USE_HARDCODE) { 2389 hldev->config.pci_freq_mherz = *bus_frequency; 2390 } 2391 } 2392 /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width 2393 * are set to unknown */ 2394 else if (card_id == XGE_HAL_CARD_XENA) { 2395 u32 pcix_status; 2396 u8 dev_num, bus_num; 2397 /* initialize defaults for XENA */ 2398 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; 2399 *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; 2400 xge_os_pci_read32(hldev->pdev, hldev->cfgh, 2401 xge_offsetof(xge_hal_pci_config_le_t, pcix_status), 2402 &pcix_status); 2403 dev_num = (u8)((pcix_status & 0xF8) >> 3); 2404 bus_num = (u8)((pcix_status & 0xFF00) >> 8); 2405 if (dev_num == 0 && bus_num == 0) 2406 *pci_mode = XGE_HAL_PCI_BASIC_MODE; 2407 else 2408 *pci_mode = XGE_HAL_PCIX_BASIC_MODE; 2409 xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode); 2410 if (hldev->config.pci_freq_mherz == 2411 XGE_HAL_DEFAULT_USE_HARDCODE) { 2412 /* 2413 * There is no way to detect BUS frequency on Xena, 2414 * so, in case of automatic configuration we hopelessly 2415 * assume 133MHZ. 2416 */ 2417 hldev->config.pci_freq_mherz = 2418 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; 2419 } 2420 } else if (card_id == XGE_HAL_CARD_TITAN) { 2421 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; 2422 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ; 2423 if (hldev->config.pci_freq_mherz == 2424 XGE_HAL_DEFAULT_USE_HARDCODE) { 2425 hldev->config.pci_freq_mherz = *bus_frequency; 2426 } 2427 } else{ 2428 rc_status = XGE_HAL_ERR_BAD_DEVICE_ID; 2429 xge_debug_device(XGE_ERR, "invalid device id %d", card_id); 2430 } 2431#endif 2432 2433 return rc_status; 2434} 2435 2436/* 2437 * __hal_device_handle_link_up_ind 2438 * @hldev: HAL device handle. 2439 * 2440 * Link up indication handler. The function is invoked by HAL when 2441 * Xframe indicates that the link is up for programmable amount of time. 2442 */ 2443static int 2444__hal_device_handle_link_up_ind(xge_hal_device_t *hldev) 2445{ 2446 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2447 u64 val64; 2448 2449 /* 2450 * If the previous link state is not down, return. 2451 */ 2452 if (hldev->link_state == XGE_HAL_LINK_UP) { 2453#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2454 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ 2455 val64 = xge_os_pio_mem_read64( 2456 hldev->pdev, hldev->regh0, 2457 &bar0->misc_int_mask); 2458 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2459 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2460 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2461 val64, &bar0->misc_int_mask); 2462 } 2463#endif 2464 xge_debug_device(XGE_TRACE, 2465 "link up indication while link is up, ignoring.."); 2466 return 0; 2467 } 2468 2469 /* Now re-enable it as due to noise, hardware turned it off */ 2470 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2471 &bar0->adapter_control); 2472 val64 |= XGE_HAL_ADAPTER_CNTL_EN; 2473 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ 2474 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2475 &bar0->adapter_control); 2476 2477 /* Turn on the Laser */ 2478 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2479 &bar0->adapter_control); 2480 val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON | 2481 XGE_HAL_ADAPTER_LED_ON); 2482 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2483 &bar0->adapter_control); 2484 2485#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2486 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2487 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2488 &bar0->adapter_status); 2489 if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2490 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) { 2491 xge_debug_device(XGE_TRACE, "%s", 2492 "fail to transition link to up..."); 2493 return 0; 2494 } 2495 else { 2496 /* 2497 * Mask the Link Up interrupt and unmask the Link Down 2498 * interrupt. 2499 */ 2500 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2501 &bar0->misc_int_mask); 2502 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2503 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2504 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2505 &bar0->misc_int_mask); 2506 xge_debug_device(XGE_TRACE, "calling link up.."); 2507 hldev->link_state = XGE_HAL_LINK_UP; 2508 2509 /* notify ULD */ 2510 if (g_xge_hal_driver->uld_callbacks.link_up) { 2511 g_xge_hal_driver->uld_callbacks.link_up( 2512 hldev->upper_layer_info); 2513 } 2514 return 1; 2515 } 2516 } 2517#endif 2518 xge_os_mdelay(1); 2519 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, 2520 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2521 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), 2522 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { 2523 2524 /* notify ULD */ 2525 (void) xge_queue_produce_context(hldev->queueh, 2526 XGE_HAL_EVENT_LINK_IS_UP, 2527 hldev); 2528 /* link is up after been enabled */ 2529 return 1; 2530 } else { 2531 xge_debug_device(XGE_TRACE, "%s", 2532 "fail to transition link to up..."); 2533 return 0; 2534 } 2535} 2536 2537/* 2538 * __hal_device_handle_link_down_ind 2539 * @hldev: HAL device handle. 2540 * 2541 * Link down indication handler. The function is invoked by HAL when 2542 * Xframe indicates that the link is down. 2543 */ 2544static int 2545__hal_device_handle_link_down_ind(xge_hal_device_t *hldev) 2546{ 2547 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2548 u64 val64; 2549 2550 /* 2551 * If the previous link state is not up, return. 2552 */ 2553 if (hldev->link_state == XGE_HAL_LINK_DOWN) { 2554#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2555 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ 2556 val64 = xge_os_pio_mem_read64( 2557 hldev->pdev, hldev->regh0, 2558 &bar0->misc_int_mask); 2559 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2560 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2561 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2562 val64, &bar0->misc_int_mask); 2563 } 2564#endif 2565 xge_debug_device(XGE_TRACE, 2566 "link down indication while link is down, ignoring.."); 2567 return 0; 2568 } 2569 xge_os_mdelay(1); 2570 2571 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2572 &bar0->adapter_control); 2573 2574 /* try to debounce the link only if the adapter is enabled. */ 2575 if (val64 & XGE_HAL_ADAPTER_CNTL_EN) { 2576 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, 2577 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2578 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), 2579 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { 2580 xge_debug_device(XGE_TRACE, 2581 "link is actually up (possible noisy link?), ignoring."); 2582 return(0); 2583 } 2584 } 2585 2586 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2587 &bar0->adapter_control); 2588 /* turn off LED */ 2589 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 2590 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2591 &bar0->adapter_control); 2592 2593#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 2594 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2595 /* 2596 * Mask the Link Down interrupt and unmask the Link up 2597 * interrupt 2598 */ 2599 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2600 &bar0->misc_int_mask); 2601 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; 2602 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; 2603 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2604 &bar0->misc_int_mask); 2605 2606 /* link is down */ 2607 xge_debug_device(XGE_TRACE, "calling link down.."); 2608 hldev->link_state = XGE_HAL_LINK_DOWN; 2609 2610 /* notify ULD */ 2611 if (g_xge_hal_driver->uld_callbacks.link_down) { 2612 g_xge_hal_driver->uld_callbacks.link_down( 2613 hldev->upper_layer_info); 2614 } 2615 return 1; 2616 } 2617#endif 2618 /* notify ULD */ 2619 (void) xge_queue_produce_context(hldev->queueh, 2620 XGE_HAL_EVENT_LINK_IS_DOWN, 2621 hldev); 2622 /* link is down */ 2623 return 1; 2624} 2625/* 2626 * __hal_device_handle_link_state_change 2627 * @hldev: HAL device handle. 2628 * 2629 * Link state change handler. The function is invoked by HAL when 2630 * Xframe indicates link state change condition. The code here makes sure to 2631 * 1) ignore redundant state change indications; 2632 * 2) execute link-up sequence, and handle the failure to bring the link up; 2633 * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by 2634 * upper-layer driver (ULD). 2635 */ 2636static int 2637__hal_device_handle_link_state_change(xge_hal_device_t *hldev) 2638{ 2639 u64 hw_status; 2640 int hw_link_state; 2641 int retcode; 2642 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2643 u64 val64; 2644 int i = 0; 2645 2646 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2647 &bar0->adapter_control); 2648 2649 /* If the adapter is not enabled but the hal thinks we are in the up 2650 * state then transition to the down state. 2651 */ 2652 if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) && 2653 (hldev->link_state == XGE_HAL_LINK_UP) ) { 2654 return(__hal_device_handle_link_down_ind(hldev)); 2655 } 2656 2657 do { 2658 xge_os_mdelay(1); 2659 (void) xge_hal_device_status(hldev, &hw_status); 2660 hw_link_state = (hw_status & 2661 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 2662 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ? 2663 XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP; 2664 2665 /* check if the current link state is still considered 2666 * to be changed. This way we will make sure that this is 2667 * not a noise which needs to be filtered out */ 2668 if (hldev->link_state == hw_link_state) 2669 break; 2670 } while (i++ < hldev->config.link_valid_cnt); 2671 2672 /* If the current link state is same as previous, just return */ 2673 if (hldev->link_state == hw_link_state) 2674 retcode = 0; 2675 /* detected state change */ 2676 else if (hw_link_state == XGE_HAL_LINK_UP) 2677 retcode = __hal_device_handle_link_up_ind(hldev); 2678 else 2679 retcode = __hal_device_handle_link_down_ind(hldev); 2680 return retcode; 2681} 2682 2683/* 2684 * 2685 */ 2686static void 2687__hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value) 2688{ 2689 hldev->stats.sw_dev_err_stats.serr_cnt++; 2690 if (hldev->config.dump_on_serr) { 2691#ifdef XGE_HAL_USE_MGMT_AUX 2692 (void) xge_hal_aux_device_dump(hldev); 2693#endif 2694 } 2695 2696 (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev, 2697 1, sizeof(u64), (void *)&value); 2698 2699 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2700 (unsigned long long) value); 2701} 2702 2703/* 2704 * 2705 */ 2706static void 2707__hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value) 2708{ 2709 if (hldev->config.dump_on_eccerr) { 2710#ifdef XGE_HAL_USE_MGMT_AUX 2711 (void) xge_hal_aux_device_dump(hldev); 2712#endif 2713 } 2714 2715 /* Herc smart enough to recover on its own! */ 2716 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 2717 (void) xge_queue_produce(hldev->queueh, 2718 XGE_HAL_EVENT_ECCERR, hldev, 2719 1, sizeof(u64), (void *)&value); 2720 } 2721 2722 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2723 (unsigned long long) value); 2724} 2725 2726/* 2727 * 2728 */ 2729static void 2730__hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value) 2731{ 2732 if (hldev->config.dump_on_parityerr) { 2733#ifdef XGE_HAL_USE_MGMT_AUX 2734 (void) xge_hal_aux_device_dump(hldev); 2735#endif 2736 } 2737 (void) xge_queue_produce_context(hldev->queueh, 2738 XGE_HAL_EVENT_PARITYERR, hldev); 2739 2740 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, 2741 (unsigned long long) value); 2742} 2743 2744/* 2745 * 2746 */ 2747static void 2748__hal_device_handle_targetabort(xge_hal_device_t *hldev) 2749{ 2750 (void) xge_queue_produce_context(hldev->queueh, 2751 XGE_HAL_EVENT_TARGETABORT, hldev); 2752} 2753 2754 2755/* 2756 * __hal_device_hw_initialize 2757 * @hldev: HAL device handle. 2758 * 2759 * Initialize Xframe hardware. 2760 */ 2761static xge_hal_status_e 2762__hal_device_hw_initialize(xge_hal_device_t *hldev) 2763{ 2764 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 2765 xge_hal_status_e status; 2766 u64 val64; 2767 2768 /* Set proper endian settings and verify the same by reading the PIF 2769 * Feed-back register. */ 2770 status = __hal_device_set_swapper(hldev); 2771 if (status != XGE_HAL_OK) { 2772 return status; 2773 } 2774 2775 /* update the pci mode, frequency, and width */ 2776 if (__hal_device_pci_info_get(hldev, &hldev->pci_mode, 2777 &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){ 2778 hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE; 2779 hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; 2780 hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; 2781 /* 2782 * FIXME: this cannot happen. 2783 * But if it happens we cannot continue just like that 2784 */ 2785 xge_debug_device(XGE_ERR, "unable to get pci info"); 2786 } 2787 2788 if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) || 2789 (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) || 2790 (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) { 2791 /* PCI optimization: set TxReqTimeOut 2792 * register (0x800+0x120) to 0x1ff or 2793 * something close to this. 2794 * Note: not to be used for PCI-X! */ 2795 2796 val64 = XGE_HAL_TXREQTO_VAL(0x1FF); 2797 val64 |= XGE_HAL_TXREQTO_EN; 2798 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2799 &bar0->txreqtimeout); 2800 2801 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 2802 &bar0->read_retry_delay); 2803 2804 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, 2805 &bar0->write_retry_delay); 2806 2807 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode"); 2808 } 2809 2810 if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ || 2811 hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) { 2812 2813 /* Optimizing for PCI-X 266/250 */ 2814 2815 val64 = XGE_HAL_TXREQTO_VAL(0x7F); 2816 val64 |= XGE_HAL_TXREQTO_EN; 2817 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2818 &bar0->txreqtimeout); 2819 2820 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes"); 2821 } 2822 2823 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2824 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, 2825 &bar0->read_retry_delay); 2826 2827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, 2828 &bar0->write_retry_delay); 2829 } 2830 2831 /* added this to set the no of bytes used to update lso_bytes_sent 2832 returned TxD0 */ 2833 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2834 &bar0->pic_control_2); 2835 val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2); 2836 val64 |= XGE_HAL_TXD_WRITE_BC(0x4); 2837 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 2838 &bar0->pic_control_2); 2839 /* added this to clear the EOI_RESET field while leaving XGXS_RESET 2840 * in reset, then a 1-second delay */ 2841 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2842 XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset); 2843 xge_os_mdelay(1000); 2844 2845 /* Clear the XGXS_RESET field of the SW_RESET register in order to 2846 * release the XGXS from reset. Its reset value is 0xA5; write 0x00 2847 * to activate the XGXS. The core requires a minimum 500 us reset.*/ 2848 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset); 2849 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2850 &bar0->sw_reset); 2851 xge_os_mdelay(1); 2852 2853 /* read registers in all blocks */ 2854 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2855 &bar0->mac_int_mask); 2856 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2857 &bar0->mc_int_mask); 2858 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2859 &bar0->xgxs_int_mask); 2860 2861 /* set default MTU and steer based on length*/ 2862 __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work 2863 2864 if (hldev->config.mac.rmac_bcast_en) { 2865 xge_hal_device_bcast_enable(hldev); 2866 } else { 2867 xge_hal_device_bcast_disable(hldev); 2868 } 2869 2870#ifndef XGE_HAL_HERC_EMULATION 2871 __hal_device_xaui_configure(hldev); 2872#endif 2873 __hal_device_mac_link_util_set(hldev); 2874 2875 __hal_device_mac_link_util_set(hldev); 2876 2877 /* 2878 * Keep its PCI REQ# line asserted during a write 2879 * transaction up to the end of the transaction 2880 */ 2881 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2882 &bar0->misc_control); 2883 2884 val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN; 2885 2886 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2887 val64, &bar0->misc_control); 2888 2889 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2890 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 2891 &bar0->misc_control); 2892 2893 val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT; 2894 2895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 2896 val64, &bar0->misc_control); 2897 } 2898 2899 /* 2900 * bimodal interrupts is when all Rx traffic interrupts 2901 * will go to TTI, so we need to adjust RTI settings and 2902 * use adaptive TTI timer. We need to make sure RTI is 2903 * properly configured to sane value which will not 2904 * distrupt bimodal behavior. 2905 */ 2906 if (hldev->config.bimodal_interrupts) { 2907 int i; 2908 2909 /* force polling_cnt to be "0", otherwise 2910 * IRQ workload statistics will be screwed. This could 2911 * be worked out in TXPIC handler later. */ 2912 hldev->config.isr_polling_cnt = 0; 2913 hldev->config.sched_timer_us = 10000; 2914 2915 /* disable all TTI < 56 */ 2916 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) { 2917 int j; 2918 if (!hldev->config.fifo.queue[i].configured) 2919 continue; 2920 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) { 2921 if (hldev->config.fifo.queue[i].tti[j].enabled) 2922 hldev->config.fifo.queue[i].tti[j].enabled = 0; 2923 } 2924 } 2925 2926 /* now configure bimodal interrupts */ 2927 __hal_device_bimodal_configure(hldev); 2928 } 2929 2930 status = __hal_device_tti_configure(hldev, 0); 2931 if (status != XGE_HAL_OK) 2932 return status; 2933 2934 status = __hal_device_rti_configure(hldev, 0); 2935 if (status != XGE_HAL_OK) 2936 return status; 2937 2938 status = __hal_device_rth_it_configure(hldev); 2939 if (status != XGE_HAL_OK) 2940 return status; 2941 2942 status = __hal_device_rth_spdm_configure(hldev); 2943 if (status != XGE_HAL_OK) 2944 return status; 2945 2946 status = __hal_device_rts_mac_configure(hldev); 2947 if (status != XGE_HAL_OK) { 2948 xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed "); 2949 return status; 2950 } 2951 2952 status = __hal_device_rts_port_configure(hldev); 2953 if (status != XGE_HAL_OK) { 2954 xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed "); 2955 return status; 2956 } 2957 2958 status = __hal_device_rts_qos_configure(hldev); 2959 if (status != XGE_HAL_OK) { 2960 xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed "); 2961 return status; 2962 } 2963 2964 __hal_device_pause_frames_configure(hldev); 2965 __hal_device_rmac_padding_configure(hldev); 2966 __hal_device_shared_splits_configure(hldev); 2967 2968 /* make sure all interrupts going to be disabled at the moment */ 2969 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); 2970 2971 /* SXE-008 Transmit DMA arbitration issue */ 2972 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && 2973 hldev->revision < 4) { 2974 xge_os_pio_mem_write64(hldev->pdev,hldev->regh0, 2975 XGE_HAL_ADAPTER_PCC_ENABLE_FOUR, 2976 &bar0->pcc_enable); 2977 } 2978#if 0 // Removing temporarily as FreeBSD is seeing lower performance 2979 // attributable to this fix. 2980 /* SXE-2-010 */ 2981 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 2982 /* Turn off the ECC error reporting for RLDRAM interface */ 2983 if ((status = xge_hal_fix_rldram_ecc_error(hldev)) != XGE_HAL_OK) 2984 return status; 2985 } 2986#endif 2987 __hal_fifo_hw_initialize(hldev); 2988 __hal_ring_hw_initialize(hldev); 2989 2990 if (__hal_device_wait_quiescent(hldev, &val64)) { 2991 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2992 } 2993 2994 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, 2995 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, 2996 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 2997 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); 2998 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 2999 } 3000 3001 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent", 3002 (unsigned long long)(ulong_t)hldev); 3003 3004 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX || 3005 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) { 3006 /* 3007 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL 3008 * is disabled. 3009 */ 3010 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3011 &bar0->pic_control); 3012 val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT); 3013 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 3014 &bar0->pic_control); 3015 } 3016 3017 hldev->hw_is_initialized = 1; 3018 hldev->terminating = 0; 3019 return XGE_HAL_OK; 3020} 3021 3022/* 3023 * __hal_device_reset - Reset device only. 3024 * @hldev: HAL device handle. 3025 * 3026 * Reset the device, and subsequently restore 3027 * the previously saved PCI configuration space. 3028 */ 3029#define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50 3030static xge_hal_status_e 3031__hal_device_reset(xge_hal_device_t *hldev) 3032{ 3033 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3034 int i, j, swap_done, pcisize = 0; 3035 u64 val64, rawval = 0ULL; 3036 3037 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 3038 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3039 if ( hldev->bar2 ) { 3040 u64 *msix_vetor_table = (u64 *)hldev->bar2; 3041 3042 // 2 64bit words for each entry 3043 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; 3044 i++) { 3045 hldev->msix_vector_table[i] = 3046 xge_os_pio_mem_read64(hldev->pdev, 3047 hldev->regh2, &msix_vetor_table[i]); 3048 } 3049 } 3050 } 3051 } 3052 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3053 &bar0->pif_rd_swapper_fb); 3054 swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB); 3055 3056 if (swap_done) { 3057 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 3058 (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset); 3059 } else { 3060 u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32); 3061#if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN) 3062 /* swap it */ 3063 val = (((val & (u32)0x000000ffUL) << 24) | 3064 ((val & (u32)0x0000ff00UL) << 8) | 3065 ((val & (u32)0x00ff0000UL) >> 8) | 3066 ((val & (u32)0xff000000UL) >> 24)); 3067#endif 3068 xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val, 3069 &bar0->sw_reset); 3070 } 3071 3072 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? 3073 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; 3074 3075 xge_os_mdelay(20); /* Wait for 20 ms after reset */ 3076 3077 { 3078 /* Poll for no more than 1 second */ 3079 for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++) 3080 { 3081 for (j = 0; j < pcisize; j++) { 3082 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, 3083 *((u32*)&hldev->pci_config_space + j)); 3084 } 3085 3086 xge_os_pci_read16(hldev->pdev,hldev->cfgh, 3087 xge_offsetof(xge_hal_pci_config_le_t, device_id), 3088 &hldev->device_id); 3089 3090 if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN) 3091 break; 3092 xge_os_mdelay(20); 3093 } 3094 } 3095 3096 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN) 3097 { 3098 xge_debug_device(XGE_ERR, "device reset failed"); 3099 return XGE_HAL_ERR_RESET_FAILED; 3100 } 3101 3102 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3103 int cnt = 0; 3104 3105 rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC; 3106 pcisize = XGE_HAL_PCISIZE_HERC; 3107 xge_os_mdelay(1); 3108 do { 3109 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3110 &bar0->sw_reset); 3111 if (val64 != rawval) { 3112 break; 3113 } 3114 cnt++; 3115 xge_os_mdelay(1); /* Wait for 1ms before retry */ 3116 } while(cnt < 20); 3117 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 3118 rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA; 3119 pcisize = XGE_HAL_PCISIZE_XENA; 3120 xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS); 3121 } 3122 3123 /* Restore MSI-X vector table */ 3124 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 3125 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3126 if ( hldev->bar2 ) { 3127 /* 3128 * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 ) 3129 * 98: PBATable 00000404 ( BIR:4 Offset:0x400 ) 3130 */ 3131 u64 *msix_vetor_table = (u64 *)hldev->bar2; 3132 3133 /* 2 64bit words for each entry */ 3134 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; 3135 i++) { 3136 xge_os_pio_mem_write64(hldev->pdev, 3137 hldev->regh2, 3138 hldev->msix_vector_table[i], 3139 &msix_vetor_table[i]); 3140 } 3141 } 3142 } 3143 } 3144 3145 hldev->link_state = XGE_HAL_LINK_DOWN; 3146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3147 &bar0->sw_reset); 3148 3149 if (val64 != rawval) { 3150 xge_debug_device(XGE_ERR, "device has not been reset " 3151 "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT, 3152 (unsigned long long)val64, (unsigned long long)rawval); 3153 return XGE_HAL_ERR_RESET_FAILED; 3154 } 3155 3156 hldev->hw_is_initialized = 0; 3157 return XGE_HAL_OK; 3158} 3159 3160/* 3161 * __hal_device_poll - General private routine to poll the device. 3162 * @hldev: HAL device handle. 3163 * 3164 * Returns: one of the xge_hal_status_e{} enumerated types. 3165 * XGE_HAL_OK - for success. 3166 * XGE_HAL_ERR_CRITICAL - when encounters critical error. 3167 */ 3168static xge_hal_status_e 3169__hal_device_poll(xge_hal_device_t *hldev) 3170{ 3171 xge_hal_pci_bar0_t *bar0; 3172 u64 err_reg; 3173 3174 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 3175 3176 /* Handling SERR errors by forcing a H/W reset. */ 3177 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3178 &bar0->serr_source); 3179 if (err_reg & XGE_HAL_SERR_SOURCE_ANY) { 3180 __hal_device_handle_serr(hldev, "serr_source", err_reg); 3181 return XGE_HAL_ERR_CRITICAL; 3182 } 3183 3184 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3185 &bar0->misc_int_reg); 3186 3187 if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) { 3188 hldev->stats.sw_dev_err_stats.parity_err_cnt++; 3189 __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg); 3190 return XGE_HAL_ERR_CRITICAL; 3191 } 3192 3193#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 3194 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 3195#endif 3196 { 3197 3198 /* Handling link status change error Intr */ 3199 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3200 &bar0->mac_rmac_err_reg); 3201 if (__hal_device_handle_link_state_change(hldev)) 3202 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3203 err_reg, &bar0->mac_rmac_err_reg); 3204 } 3205 3206 if (hldev->inject_serr != 0) { 3207 err_reg = hldev->inject_serr; 3208 hldev->inject_serr = 0; 3209 __hal_device_handle_serr(hldev, "inject_serr", err_reg); 3210 return XGE_HAL_ERR_CRITICAL; 3211 } 3212 3213 if (hldev->inject_ecc != 0) { 3214 err_reg = hldev->inject_ecc; 3215 hldev->inject_ecc = 0; 3216 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3217 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg); 3218 return XGE_HAL_ERR_CRITICAL; 3219 } 3220 3221 if (hldev->inject_bad_tcode != 0) { 3222 u8 t_code = hldev->inject_bad_tcode; 3223 xge_hal_channel_t channel; 3224 xge_hal_fifo_txd_t txd; 3225 xge_hal_ring_rxd_1_t rxd; 3226 3227 channel.devh = hldev; 3228 3229 if (hldev->inject_bad_tcode_for_chan_type == 3230 XGE_HAL_CHANNEL_TYPE_FIFO) { 3231 channel.type = XGE_HAL_CHANNEL_TYPE_FIFO; 3232 3233 } else { 3234 channel.type = XGE_HAL_CHANNEL_TYPE_RING; 3235 } 3236 3237 hldev->inject_bad_tcode = 0; 3238 3239 if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO) 3240 return xge_hal_device_handle_tcode(&channel, &txd, 3241 t_code); 3242 else 3243 return xge_hal_device_handle_tcode(&channel, &rxd, 3244 t_code); 3245 } 3246 3247 return XGE_HAL_OK; 3248} 3249 3250/* 3251 * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not 3252 * @hldev: HAL device handle. 3253 * @adp_status: Adapter Status value 3254 * Usage: See xge_hal_device_enable{}. 3255 */ 3256xge_hal_status_e 3257__hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status) 3258{ 3259 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && 3260 hldev->revision < 4) { 3261 /* 3262 * For Xena 1,2,3 we enable only 4 PCCs Due to 3263 * SXE-008 (Transmit DMA arbitration issue) 3264 */ 3265 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) 3266 != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) { 3267 xge_debug_device(XGE_TRACE, "%s", 3268 "PCC is not IDLE after adapter enabled!"); 3269 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3270 } 3271 } else { 3272 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) != 3273 XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) { 3274 xge_debug_device(XGE_TRACE, "%s", 3275 "PCC is not IDLE after adapter enabled!"); 3276 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 3277 } 3278 } 3279 return XGE_HAL_OK; 3280} 3281 3282static void 3283__hal_update_bimodal(xge_hal_device_t *hldev, int ring_no) 3284{ 3285 int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist; 3286 int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg; 3287 int iwl_cnt, i; 3288 3289#define _HIST_SIZE 50 /* 0.5 sec history */ 3290#define _HIST_ADJ_TIMER 1 3291#define _STEP 2 3292 3293 static int bytes_avg_history[_HIST_SIZE] = {0}; 3294 static int d_avg_history[_HIST_SIZE] = {0}; 3295 static int history_idx = 0; 3296 static int pstep = 1; 3297 static int hist_adj_timer = 0; 3298 3299 /* 3300 * tval - current value of this bimodal timer 3301 */ 3302 tval = hldev->bimodal_tti[ring_no].timer_val_us; 3303 3304 /* 3305 * d - how many interrupts we were getting since last 3306 * bimodal timer tick. 3307 */ 3308 d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt - 3309 hldev->bimodal_intr_cnt; 3310 3311 /* advance bimodal interrupt counter */ 3312 hldev->bimodal_intr_cnt = 3313 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt; 3314 3315 /* 3316 * iwl_cnt - how many interrupts we've got since last 3317 * bimodal timer tick. 3318 */ 3319 iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ? 3320 hldev->irq_workload_rxcnt[ring_no] : 1); 3321 iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ? 3322 hldev->irq_workload_txcnt[ring_no] : 1); 3323 iwl_cnt = iwl_rxcnt + iwl_txcnt; 3324 iwl_cnt = iwl_cnt; /* just to remove the lint warning */ 3325 3326 /* 3327 * we need to take hldev->config.isr_polling_cnt into account 3328 * but for some reason this line causing GCC to produce wrong 3329 * code on Solaris. As of now, if bimodal_interrupts is configured 3330 * hldev->config.isr_polling_cnt is forced to be "0". 3331 * 3332 * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */ 3333 3334 /* 3335 * iwl_avg - how many RXDs on avarage been processed since 3336 * last bimodal timer tick. This indirectly includes 3337 * CPU utilizations. 3338 */ 3339 iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt; 3340 iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt; 3341 iwl_avg = iwl_rxavg + iwl_txavg; 3342 iwl_avg = iwl_avg == 0 ? 1 : iwl_avg; 3343 3344 /* 3345 * len_avg - how many bytes on avarage been processed since 3346 * last bimodal timer tick. i.e. avarage frame size. 3347 */ 3348 len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] / 3349 (hldev->irq_workload_rxd[ring_no] ? 3350 hldev->irq_workload_rxd[ring_no] : 1); 3351 len_txavg = 1 + hldev->irq_workload_txlen[ring_no] / 3352 (hldev->irq_workload_txd[ring_no] ? 3353 hldev->irq_workload_txd[ring_no] : 1); 3354 len_avg = len_rxavg + len_txavg; 3355 if (len_avg < 60) 3356 len_avg = 60; 3357 3358 /* align on low boundary */ 3359 if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us) 3360 tval = hldev->config.bimodal_timer_lo_us; 3361 3362 /* reset faster */ 3363 if (iwl_avg == 1) { 3364 tval = hldev->config.bimodal_timer_lo_us; 3365 /* reset history */ 3366 for (i = 0; i < _HIST_SIZE; i++) 3367 bytes_avg_history[i] = d_avg_history[i] = 0; 3368 history_idx = 0; 3369 pstep = 1; 3370 hist_adj_timer = 0; 3371 } 3372 3373 /* always try to ajust timer to the best throughput value */ 3374 bytes_avg = iwl_avg * len_avg; 3375 history_idx %= _HIST_SIZE; 3376 bytes_avg_history[history_idx] = bytes_avg; 3377 d_avg_history[history_idx] = d; 3378 history_idx++; 3379 d_hist = bytes_hist = 0; 3380 for (i = 0; i < _HIST_SIZE; i++) { 3381 /* do not re-configure until history is gathered */ 3382 if (!bytes_avg_history[i]) { 3383 tval = hldev->config.bimodal_timer_lo_us; 3384 goto _end; 3385 } 3386 bytes_hist += bytes_avg_history[i]; 3387 d_hist += d_avg_history[i]; 3388 } 3389 bytes_hist /= _HIST_SIZE; 3390 d_hist /= _HIST_SIZE; 3391 3392// xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d", 3393// d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg, 3394// d_hist*bytes_hist, pstep); 3395 3396 /* make an adaptive step */ 3397 if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) { 3398 pstep = !pstep; 3399 hist_adj_timer = 0; 3400 } 3401 3402 if (pstep && 3403 (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) { 3404 tval += _STEP; 3405 hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++; 3406 } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) { 3407 tval -= _STEP; 3408 hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++; 3409 } 3410 3411 /* enable TTI range A for better latencies */ 3412 hldev->bimodal_urange_a_en = 0; 3413 if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2) 3414 hldev->bimodal_urange_a_en = 1; 3415 3416_end: 3417 /* reset workload statistics counters */ 3418 hldev->irq_workload_rxcnt[ring_no] = 0; 3419 hldev->irq_workload_rxd[ring_no] = 0; 3420 hldev->irq_workload_rxlen[ring_no] = 0; 3421 hldev->irq_workload_txcnt[ring_no] = 0; 3422 hldev->irq_workload_txd[ring_no] = 0; 3423 hldev->irq_workload_txlen[ring_no] = 0; 3424 3425 /* reconfigure TTI56 + ring_no with new timer value */ 3426 hldev->bimodal_timer_val_us = tval; 3427 (void) __hal_device_rti_configure(hldev, 1); 3428} 3429 3430static void 3431__hal_update_rxufca(xge_hal_device_t *hldev, int ring_no) 3432{ 3433 int ufc, ic, i; 3434 3435 ufc = hldev->config.ring.queue[ring_no].rti.ufc_a; 3436 ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt; 3437 3438 /* urange_a adaptive coalescing */ 3439 if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) { 3440 if (ic > hldev->rxufca_intr_thres) { 3441 if (ufc < hldev->config.rxufca_hi_lim) { 3442 ufc += 1; 3443 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) 3444 hldev->config.ring.queue[i].rti.ufc_a = ufc; 3445 (void) __hal_device_rti_configure(hldev, 1); 3446 hldev->stats.sw_dev_info_stats. 3447 rxufca_hi_adjust_cnt++; 3448 } 3449 hldev->rxufca_intr_thres = ic + 3450 hldev->config.rxufca_intr_thres; /* def: 30 */ 3451 } else { 3452 if (ufc > hldev->config.rxufca_lo_lim) { 3453 ufc -= 1; 3454 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) 3455 hldev->config.ring.queue[i].rti.ufc_a = ufc; 3456 (void) __hal_device_rti_configure(hldev, 1); 3457 hldev->stats.sw_dev_info_stats. 3458 rxufca_lo_adjust_cnt++; 3459 } 3460 } 3461 hldev->rxufca_lbolt_time = hldev->rxufca_lbolt + 3462 hldev->config.rxufca_lbolt_period; 3463 } 3464 hldev->rxufca_lbolt++; 3465} 3466 3467/* 3468 * __hal_device_handle_mc - Handle MC interrupt reason 3469 * @hldev: HAL device handle. 3470 * @reason: interrupt reason 3471 */ 3472xge_hal_status_e 3473__hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason) 3474{ 3475 xge_hal_pci_bar0_t *isrbar0 = 3476 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3477 u64 val64; 3478 3479 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3480 &isrbar0->mc_int_status); 3481 if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT)) 3482 return XGE_HAL_OK; 3483 3484 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3485 &isrbar0->mc_err_reg); 3486 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3487 val64, &isrbar0->mc_err_reg); 3488 3489 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L || 3490 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U || 3491 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 || 3492 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 || 3493 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && 3494 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L || 3495 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U || 3496 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L || 3497 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) { 3498 hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++; 3499 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3500 } 3501 3502 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L || 3503 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U || 3504 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || 3505 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 || 3506 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && 3507 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L || 3508 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U || 3509 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L || 3510 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) { 3511 hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++; 3512 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 3513 } 3514 3515 if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) { 3516 hldev->stats.sw_dev_err_stats.sm_err_cnt++; 3517 } 3518 3519 /* those two should result in device reset */ 3520 if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || 3521 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) { 3522 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64); 3523 return XGE_HAL_ERR_CRITICAL; 3524 } 3525 3526 return XGE_HAL_OK; 3527} 3528 3529/* 3530 * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason 3531 * @hldev: HAL device handle. 3532 * @reason: interrupt reason 3533 */ 3534xge_hal_status_e 3535__hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason) 3536{ 3537 xge_hal_pci_bar0_t *isrbar0 = 3538 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3539 u64 val64; 3540 3541 if (reason & XGE_HAL_PIC_INT_FLSH) { 3542 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3543 &isrbar0->flsh_int_reg); 3544 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3545 val64, &isrbar0->flsh_int_reg); 3546 /* FIXME: handle register */ 3547 } 3548 if (reason & XGE_HAL_PIC_INT_MDIO) { 3549 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3550 &isrbar0->mdio_int_reg); 3551 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3552 val64, &isrbar0->mdio_int_reg); 3553 /* FIXME: handle register */ 3554 } 3555 if (reason & XGE_HAL_PIC_INT_IIC) { 3556 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3557 &isrbar0->iic_int_reg); 3558 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3559 val64, &isrbar0->iic_int_reg); 3560 /* FIXME: handle register */ 3561 } 3562 if (reason & XGE_HAL_PIC_INT_MISC) { 3563 val64 = xge_os_pio_mem_read64(hldev->pdev, 3564 hldev->regh0, &isrbar0->misc_int_reg); 3565#ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR 3566 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 3567 /* Check for Link interrupts. If both Link Up/Down 3568 * bits are set, clear both and check adapter status 3569 */ 3570 if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) && 3571 (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) { 3572 u64 temp64; 3573 3574 xge_debug_device(XGE_TRACE, 3575 "both link up and link down detected "XGE_OS_LLXFMT, 3576 (unsigned long long)val64); 3577 3578 temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT | 3579 XGE_HAL_MISC_INT_REG_LINK_UP_INT); 3580 xge_os_pio_mem_write64(hldev->pdev, 3581 hldev->regh0, temp64, 3582 &isrbar0->misc_int_reg); 3583 } 3584 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) { 3585 xge_debug_device(XGE_TRACE, 3586 "link up call request, misc_int "XGE_OS_LLXFMT, 3587 (unsigned long long)val64); 3588 __hal_device_handle_link_up_ind(hldev); 3589 } 3590 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){ 3591 xge_debug_device(XGE_TRACE, 3592 "link down request, misc_int "XGE_OS_LLXFMT, 3593 (unsigned long long)val64); 3594 __hal_device_handle_link_down_ind(hldev); 3595 } 3596 } else 3597#endif 3598 { 3599 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3600 val64, &isrbar0->misc_int_reg); 3601 } 3602 } 3603 3604 return XGE_HAL_OK; 3605} 3606 3607/* 3608 * __hal_device_handle_txpic - Handle TxPIC interrupt reason 3609 * @hldev: HAL device handle. 3610 * @reason: interrupt reason 3611 */ 3612xge_hal_status_e 3613__hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason) 3614{ 3615 xge_hal_status_e status = XGE_HAL_OK; 3616 xge_hal_pci_bar0_t *isrbar0 = 3617 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3618 volatile u64 val64; 3619 3620 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3621 &isrbar0->pic_int_status); 3622 if ( val64 & (XGE_HAL_PIC_INT_FLSH | 3623 XGE_HAL_PIC_INT_MDIO | 3624 XGE_HAL_PIC_INT_IIC | 3625 XGE_HAL_PIC_INT_MISC) ) { 3626 status = __hal_device_handle_pic(hldev, val64); 3627 xge_os_wmb(); 3628 } 3629 3630 if (!(val64 & XGE_HAL_PIC_INT_TX)) 3631 return status; 3632 3633 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3634 &isrbar0->txpic_int_reg); 3635 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3636 val64, &isrbar0->txpic_int_reg); 3637 xge_os_wmb(); 3638 3639 if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) { 3640 int i; 3641 3642 if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL) 3643 g_xge_hal_driver->uld_callbacks.sched_timer( 3644 hldev, hldev->upper_layer_info); 3645 /* 3646 * This feature implements adaptive receive interrupt 3647 * coalecing. It is disabled by default. To enable it 3648 * set hldev->config.rxufca_lo_lim to be not equal to 3649 * hldev->config.rxufca_hi_lim. 3650 * 3651 * We are using HW timer for this feature, so 3652 * use needs to configure hldev->config.rxufca_lbolt_period 3653 * which is essentially a time slice of timer. 3654 * 3655 * For those who familiar with Linux, lbolt means jiffies 3656 * of this timer. I.e. timer tick. 3657 */ 3658 if (hldev->config.rxufca_lo_lim != 3659 hldev->config.rxufca_hi_lim && 3660 hldev->config.rxufca_lo_lim != 0) { 3661 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 3662 if (!hldev->config.ring.queue[i].configured) 3663 continue; 3664 if (hldev->config.ring.queue[i].rti.urange_a) 3665 __hal_update_rxufca(hldev, i); 3666 } 3667 } 3668 3669 /* 3670 * This feature implements adaptive TTI timer re-calculation 3671 * based on host utilization, number of interrupt processed, 3672 * number of RXD per tick and avarage length of packets per 3673 * tick. 3674 */ 3675 if (hldev->config.bimodal_interrupts) { 3676 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 3677 if (!hldev->config.ring.queue[i].configured) 3678 continue; 3679 if (hldev->bimodal_tti[i].enabled) 3680 __hal_update_bimodal(hldev, i); 3681 } 3682 } 3683 } 3684 3685 return XGE_HAL_OK; 3686} 3687 3688/* 3689 * __hal_device_handle_txdma - Handle TxDMA interrupt reason 3690 * @hldev: HAL device handle. 3691 * @reason: interrupt reason 3692 */ 3693xge_hal_status_e 3694__hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason) 3695{ 3696 xge_hal_pci_bar0_t *isrbar0 = 3697 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3698 u64 val64, temp64, err; 3699 3700 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3701 &isrbar0->txdma_int_status); 3702 if (val64 & XGE_HAL_TXDMA_PFC_INT) { 3703 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3704 &isrbar0->pfc_err_reg); 3705 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3706 err, &isrbar0->pfc_err_reg); 3707 hldev->stats.sw_dev_info_stats.pfc_err_cnt++; 3708 temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM 3709 |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR 3710 |XGE_HAL_PFC_PCIX_ERR; 3711 if (val64 & temp64) 3712 goto reset; 3713 } 3714 if (val64 & XGE_HAL_TXDMA_TDA_INT) { 3715 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3716 &isrbar0->tda_err_reg); 3717 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3718 err, &isrbar0->tda_err_reg); 3719 hldev->stats.sw_dev_info_stats.tda_err_cnt++; 3720 temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM 3721 |XGE_HAL_TDA_SM1_ERR_ALARM; 3722 if (val64 & temp64) 3723 goto reset; 3724 } 3725 if (val64 & XGE_HAL_TXDMA_PCC_INT) { 3726 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3727 &isrbar0->pcc_err_reg); 3728 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3729 err, &isrbar0->pcc_err_reg); 3730 hldev->stats.sw_dev_info_stats.pcc_err_cnt++; 3731 temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR 3732 |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM 3733 |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR 3734 |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR 3735 |XGE_HAL_PCC_7_LSO_OV_ERR; 3736 if (val64 & temp64) 3737 goto reset; 3738 } 3739 if (val64 & XGE_HAL_TXDMA_TTI_INT) { 3740 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3741 &isrbar0->tti_err_reg); 3742 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3743 err, &isrbar0->tti_err_reg); 3744 hldev->stats.sw_dev_info_stats.tti_err_cnt++; 3745 temp64 = XGE_HAL_TTI_SM_ERR_ALARM; 3746 if (val64 & temp64) 3747 goto reset; 3748 } 3749 if (val64 & XGE_HAL_TXDMA_LSO_INT) { 3750 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3751 &isrbar0->lso_err_reg); 3752 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3753 err, &isrbar0->lso_err_reg); 3754 hldev->stats.sw_dev_info_stats.lso_err_cnt++; 3755 temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT 3756 |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM; 3757 if (val64 & temp64) 3758 goto reset; 3759 } 3760 if (val64 & XGE_HAL_TXDMA_TPA_INT) { 3761 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3762 &isrbar0->tpa_err_reg); 3763 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3764 err, &isrbar0->tpa_err_reg); 3765 hldev->stats.sw_dev_info_stats.tpa_err_cnt++; 3766 temp64 = XGE_HAL_TPA_SM_ERR_ALARM; 3767 if (val64 & temp64) 3768 goto reset; 3769 } 3770 if (val64 & XGE_HAL_TXDMA_SM_INT) { 3771 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3772 &isrbar0->sm_err_reg); 3773 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3774 err, &isrbar0->sm_err_reg); 3775 hldev->stats.sw_dev_info_stats.sm_err_cnt++; 3776 temp64 = XGE_HAL_SM_SM_ERR_ALARM; 3777 if (val64 & temp64) 3778 goto reset; 3779 } 3780 3781 return XGE_HAL_OK; 3782 3783reset : xge_hal_device_reset(hldev); 3784 xge_hal_device_enable(hldev); 3785 xge_hal_device_intr_enable(hldev); 3786 return XGE_HAL_OK; 3787} 3788 3789/* 3790 * __hal_device_handle_txmac - Handle TxMAC interrupt reason 3791 * @hldev: HAL device handle. 3792 * @reason: interrupt reason 3793 */ 3794xge_hal_status_e 3795__hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason) 3796{ 3797 xge_hal_pci_bar0_t *isrbar0 = 3798 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3799 u64 val64, temp64; 3800 3801 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3802 &isrbar0->mac_int_status); 3803 if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT)) 3804 return XGE_HAL_OK; 3805 3806 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3807 &isrbar0->mac_tmac_err_reg); 3808 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3809 val64, &isrbar0->mac_tmac_err_reg); 3810 hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++; 3811 temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR; 3812 if (val64 & temp64) { 3813 xge_hal_device_reset(hldev); 3814 xge_hal_device_enable(hldev); 3815 xge_hal_device_intr_enable(hldev); 3816 } 3817 3818 return XGE_HAL_OK; 3819} 3820 3821/* 3822 * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason 3823 * @hldev: HAL device handle. 3824 * @reason: interrupt reason 3825 */ 3826xge_hal_status_e 3827__hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason) 3828{ 3829 xge_hal_pci_bar0_t *isrbar0 = 3830 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3831 u64 val64, temp64; 3832 3833 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3834 &isrbar0->xgxs_int_status); 3835 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS)) 3836 return XGE_HAL_OK; 3837 3838 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3839 &isrbar0->xgxs_txgxs_err_reg); 3840 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3841 val64, &isrbar0->xgxs_txgxs_err_reg); 3842 hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++; 3843 temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR; 3844 if (val64 & temp64) { 3845 xge_hal_device_reset(hldev); 3846 xge_hal_device_enable(hldev); 3847 xge_hal_device_intr_enable(hldev); 3848 } 3849 3850 return XGE_HAL_OK; 3851} 3852 3853/* 3854 * __hal_device_handle_rxpic - Handle RxPIC interrupt reason 3855 * @hldev: HAL device handle. 3856 * @reason: interrupt reason 3857 */ 3858xge_hal_status_e 3859__hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason) 3860{ 3861 /* FIXME: handle register */ 3862 3863 return XGE_HAL_OK; 3864} 3865 3866/* 3867 * __hal_device_handle_rxdma - Handle RxDMA interrupt reason 3868 * @hldev: HAL device handle. 3869 * @reason: interrupt reason 3870 */ 3871xge_hal_status_e 3872__hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason) 3873{ 3874 xge_hal_pci_bar0_t *isrbar0 = 3875 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3876 u64 val64, err, temp64; 3877 3878 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3879 &isrbar0->rxdma_int_status); 3880 if (val64 & XGE_HAL_RXDMA_RC_INT) { 3881 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3882 &isrbar0->rc_err_reg); 3883 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3884 err, &isrbar0->rc_err_reg); 3885 hldev->stats.sw_dev_info_stats.rc_err_cnt++; 3886 temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR 3887 |XGE_HAL_RC_PRCn_SM_ERR_ALARM 3888 |XGE_HAL_RC_FTC_SM_ERR_ALARM; 3889 if (val64 & temp64) 3890 goto reset; 3891 } 3892 if (val64 & XGE_HAL_RXDMA_RPA_INT) { 3893 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3894 &isrbar0->rpa_err_reg); 3895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3896 err, &isrbar0->rpa_err_reg); 3897 hldev->stats.sw_dev_info_stats.rpa_err_cnt++; 3898 temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR; 3899 if (val64 & temp64) 3900 goto reset; 3901 } 3902 if (val64 & XGE_HAL_RXDMA_RDA_INT) { 3903 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3904 &isrbar0->rda_err_reg); 3905 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3906 err, &isrbar0->rda_err_reg); 3907 hldev->stats.sw_dev_info_stats.rda_err_cnt++; 3908 temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR 3909 |XGE_HAL_RDA_FRM_ECC_DB_N_AERR 3910 |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM 3911 |XGE_HAL_RDA_RXD_ECC_DB_SERR; 3912 if (val64 & temp64) 3913 goto reset; 3914 } 3915 if (val64 & XGE_HAL_RXDMA_RTI_INT) { 3916 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3917 &isrbar0->rti_err_reg); 3918 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3919 err, &isrbar0->rti_err_reg); 3920 hldev->stats.sw_dev_info_stats.rti_err_cnt++; 3921 temp64 = XGE_HAL_RTI_SM_ERR_ALARM; 3922 if (val64 & temp64) 3923 goto reset; 3924 } 3925 3926 return XGE_HAL_OK; 3927 3928reset : xge_hal_device_reset(hldev); 3929 xge_hal_device_enable(hldev); 3930 xge_hal_device_intr_enable(hldev); 3931 return XGE_HAL_OK; 3932} 3933 3934/* 3935 * __hal_device_handle_rxmac - Handle RxMAC interrupt reason 3936 * @hldev: HAL device handle. 3937 * @reason: interrupt reason 3938 */ 3939xge_hal_status_e 3940__hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason) 3941{ 3942 xge_hal_pci_bar0_t *isrbar0 = 3943 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3944 u64 val64, temp64; 3945 3946 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3947 &isrbar0->mac_int_status); 3948 if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT)) 3949 return XGE_HAL_OK; 3950 3951 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3952 &isrbar0->mac_rmac_err_reg); 3953 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3954 val64, &isrbar0->mac_rmac_err_reg); 3955 hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++; 3956 temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR; 3957 if (val64 & temp64) { 3958 xge_hal_device_reset(hldev); 3959 xge_hal_device_enable(hldev); 3960 xge_hal_device_intr_enable(hldev); 3961 } 3962 3963 return XGE_HAL_OK; 3964} 3965 3966/* 3967 * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason 3968 * @hldev: HAL device handle. 3969 * @reason: interrupt reason 3970 */ 3971xge_hal_status_e 3972__hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason) 3973{ 3974 xge_hal_pci_bar0_t *isrbar0 = 3975 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; 3976 u64 val64, temp64; 3977 3978 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3979 &isrbar0->xgxs_int_status); 3980 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS)) 3981 return XGE_HAL_OK; 3982 3983 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 3984 &isrbar0->xgxs_rxgxs_err_reg); 3985 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 3986 val64, &isrbar0->xgxs_rxgxs_err_reg); 3987 hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++; 3988 temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR; 3989 if (val64 & temp64) { 3990 xge_hal_device_reset(hldev); 3991 xge_hal_device_enable(hldev); 3992 xge_hal_device_intr_enable(hldev); 3993 } 3994 3995 return XGE_HAL_OK; 3996} 3997 3998/** 3999 * xge_hal_device_enable - Enable device. 4000 * @hldev: HAL device handle. 4001 * 4002 * Enable the specified device: bring up the link/interface. 4003 * Returns: XGE_HAL_OK - success. 4004 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device 4005 * to a "quiescent" state. 4006 * 4007 * See also: xge_hal_status_e{}. 4008 * 4009 * Usage: See ex_open{}. 4010 */ 4011xge_hal_status_e 4012xge_hal_device_enable(xge_hal_device_t *hldev) 4013{ 4014 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4015 u64 val64; 4016 u64 adp_status; 4017 int i, j; 4018 4019 if (!hldev->hw_is_initialized) { 4020 xge_hal_status_e status; 4021 4022 status = __hal_device_hw_initialize(hldev); 4023 if (status != XGE_HAL_OK) { 4024 return status; 4025 } 4026 } 4027 4028 /* 4029 * Not needed in most cases, i.e. 4030 * when device_disable() is followed by reset - 4031 * the latter copies back PCI config space, along with 4032 * the bus mastership - see __hal_device_reset(). 4033 * However, there are/may-in-future be other cases, and 4034 * does not hurt. 4035 */ 4036 __hal_device_bus_master_enable(hldev); 4037 4038 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4039 /* 4040 * Configure the link stability period. 4041 */ 4042 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4043 &bar0->misc_control); 4044 if (hldev->config.link_stability_period != 4045 XGE_HAL_DEFAULT_USE_HARDCODE) { 4046 4047 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( 4048 hldev->config.link_stability_period); 4049 } else { 4050 /* 4051 * Use the link stability period 1 ms as default 4052 */ 4053 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( 4054 XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD); 4055 } 4056 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4057 val64, &bar0->misc_control); 4058 4059 /* 4060 * Clearing any possible Link up/down interrupts that 4061 * could have popped up just before Enabling the card. 4062 */ 4063 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4064 &bar0->misc_int_reg); 4065 if (val64) { 4066 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4067 val64, &bar0->misc_int_reg); 4068 xge_debug_device(XGE_TRACE, "%s","link state cleared"); 4069 } 4070 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 4071 /* 4072 * Clearing any possible Link state change interrupts that 4073 * could have popped up just before Enabling the card. 4074 */ 4075 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4076 &bar0->mac_rmac_err_reg); 4077 if (val64) { 4078 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4079 val64, &bar0->mac_rmac_err_reg); 4080 xge_debug_device(XGE_TRACE, "%s", "link state cleared"); 4081 } 4082 } 4083 4084 if (__hal_device_wait_quiescent(hldev, &val64)) { 4085 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4086 } 4087 4088 /* Enabling Laser. */ 4089 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4090 &bar0->adapter_control); 4091 val64 |= XGE_HAL_ADAPTER_EOI_TX_ON; 4092 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4093 &bar0->adapter_control); 4094 4095 /* let link establish */ 4096 xge_os_mdelay(1); 4097 4098 /* set link down untill poll() routine will set it up (maybe) */ 4099 hldev->link_state = XGE_HAL_LINK_DOWN; 4100 4101 /* If link is UP (adpter is connected) then enable the adapter */ 4102 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4103 &bar0->adapter_status); 4104 if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4105 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) { 4106 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4107 &bar0->adapter_control); 4108 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 4109 } else { 4110 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4111 &bar0->adapter_control); 4112 val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON | 4113 XGE_HAL_ADAPTER_LED_ON ); 4114 } 4115 4116 val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */ 4117 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ 4118 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64, 4119 &bar0->adapter_control); 4120 4121 /* We spin here waiting for the Link to come up. 4122 * This is the fix for the Link being unstable after the reset. */ 4123 i = 0; 4124 j = 0; 4125 do 4126 { 4127 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4128 &bar0->adapter_status); 4129 4130 /* Read the adapter control register for Adapter_enable bit */ 4131 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4132 &bar0->adapter_control); 4133 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4134 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) && 4135 (val64 & XGE_HAL_ADAPTER_CNTL_EN)) { 4136 j++; 4137 if (j >= hldev->config.link_valid_cnt) { 4138 if (xge_hal_device_status(hldev, &adp_status) == 4139 XGE_HAL_OK) { 4140 if (__hal_verify_pcc_idle(hldev, 4141 adp_status) != XGE_HAL_OK) { 4142 return 4143 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4144 } 4145 xge_debug_device(XGE_TRACE, 4146 "adp_status: "XGE_OS_LLXFMT 4147 ", link is up on " 4148 "adapter enable!", 4149 (unsigned long long)adp_status); 4150 val64 = xge_os_pio_mem_read64( 4151 hldev->pdev, 4152 hldev->regh0, 4153 &bar0->adapter_control); 4154 val64 = val64| 4155 (XGE_HAL_ADAPTER_EOI_TX_ON | 4156 XGE_HAL_ADAPTER_LED_ON ); 4157 xge_os_pio_mem_write64(hldev->pdev, 4158 hldev->regh0, val64, 4159 &bar0->adapter_control); 4160 xge_os_mdelay(1); 4161 4162 val64 = xge_os_pio_mem_read64( 4163 hldev->pdev, 4164 hldev->regh0, 4165 &bar0->adapter_control); 4166 break; /* out of for loop */ 4167 } else { 4168 return 4169 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4170 } 4171 } 4172 } else { 4173 j = 0; /* Reset the count */ 4174 /* Turn on the Laser */ 4175 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4176 &bar0->adapter_control); 4177 val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON; 4178 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, 4179 val64, &bar0->adapter_control); 4180 4181 xge_os_mdelay(1); 4182 4183 /* Now re-enable it as due to noise, hardware 4184 * turned it off */ 4185 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4186 &bar0->adapter_control); 4187 val64 |= XGE_HAL_ADAPTER_CNTL_EN; 4188 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/ 4189 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4190 &bar0->adapter_control); 4191 } 4192 xge_os_mdelay(1); /* Sleep for 1 msec */ 4193 i++; 4194 } while (i < hldev->config.link_retry_cnt); 4195 4196 __hal_device_led_actifity_fix(hldev); 4197 4198#ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR 4199 /* Here we are performing soft reset on XGXS to force link down. 4200 * Since link is already up, we will get link state change 4201 * poll notificatoin after adapter is enabled */ 4202 4203 __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL, 4204 &bar0->dtx_control); 4205 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4206 4207 __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL, 4208 &bar0->dtx_control); 4209 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4210 4211 __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL, 4212 &bar0->dtx_control); 4213 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); 4214 4215 xge_os_mdelay(100); /* Sleep for 500 msec */ 4216#else 4217 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 4218#endif 4219 { 4220 /* 4221 * With some switches the link state change interrupt does not 4222 * occur even though the xgxs reset is done as per SPN-006. So, 4223 * poll the adapter status register and check if the link state 4224 * is ok. 4225 */ 4226 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4227 &bar0->adapter_status); 4228 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | 4229 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT))) 4230 { 4231 xge_debug_device(XGE_TRACE, "%s", 4232 "enable device causing link state change ind.."); 4233 (void) __hal_device_handle_link_state_change(hldev); 4234 } 4235 } 4236 4237 if (hldev->config.stats_refresh_time_sec != 4238 XGE_HAL_STATS_REFRESH_DISABLE) 4239 __hal_stats_enable(&hldev->stats); 4240 4241 return XGE_HAL_OK; 4242} 4243 4244/** 4245 * xge_hal_device_disable - Disable Xframe adapter. 4246 * @hldev: Device handle. 4247 * 4248 * Disable this device. To gracefully reset the adapter, the host should: 4249 * 4250 * - call xge_hal_device_disable(); 4251 * 4252 * - call xge_hal_device_intr_disable(); 4253 * 4254 * - close all opened channels and clean up outstanding resources; 4255 * 4256 * - do some work (error recovery, change mtu, reset, etc); 4257 * 4258 * - call xge_hal_device_enable(); 4259 * 4260 * - open channels, replenish RxDs, etc. 4261 * 4262 * - call xge_hal_device_intr_enable(). 4263 * 4264 * Note: Disabling the device does _not_ include disabling of interrupts. 4265 * After disabling the device stops receiving new frames but those frames 4266 * that were already in the pipe will keep coming for some few milliseconds. 4267 * 4268 * Returns: XGE_HAL_OK - success. 4269 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to 4270 * a "quiescent" state. 4271 * 4272 * See also: xge_hal_status_e{}. 4273 */ 4274xge_hal_status_e 4275xge_hal_device_disable(xge_hal_device_t *hldev) 4276{ 4277 xge_hal_status_e status = XGE_HAL_OK; 4278 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4279 u64 val64; 4280 4281 xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware"); 4282 4283 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4284 &bar0->adapter_control); 4285 val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN); 4286 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4287 &bar0->adapter_control); 4288 4289 if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) { 4290 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4291 } 4292 4293 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, 4294 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, 4295 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4296 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); 4297 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; 4298 } 4299 4300 if (hldev->config.stats_refresh_time_sec != 4301 XGE_HAL_STATS_REFRESH_DISABLE) 4302 __hal_stats_disable(&hldev->stats); 4303#ifdef XGE_DEBUG_ASSERT 4304 else 4305 xge_assert(!hldev->stats.is_enabled); 4306#endif 4307 4308#ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP 4309 __hal_device_bus_master_disable(hldev); 4310#endif 4311 4312 return status; 4313} 4314 4315/** 4316 * xge_hal_device_reset - Reset device. 4317 * @hldev: HAL device handle. 4318 * 4319 * Soft-reset the device, reset the device stats except reset_cnt. 4320 * 4321 * After reset is done, will try to re-initialize HW. 4322 * 4323 * Returns: XGE_HAL_OK - success. 4324 * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized. 4325 * XGE_HAL_ERR_RESET_FAILED - Reset failed. 4326 * 4327 * See also: xge_hal_status_e{}. 4328 */ 4329xge_hal_status_e 4330xge_hal_device_reset(xge_hal_device_t *hldev) 4331{ 4332 xge_hal_status_e status; 4333 4334 /* increment the soft reset counter */ 4335 u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt; 4336 4337 xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt); 4338 4339 if (!hldev->is_initialized) 4340 return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED; 4341 4342 /* actual "soft" reset of the adapter */ 4343 status = __hal_device_reset(hldev); 4344 4345 /* reset all stats including saved */ 4346 __hal_stats_soft_reset(hldev, 1); 4347 4348 /* increment reset counter */ 4349 hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1; 4350 4351 /* re-initialize rxufca_intr_thres */ 4352 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; 4353 4354 hldev->reset_needed_after_close = 0; 4355 4356 return status; 4357} 4358 4359/** 4360 * xge_hal_device_status - Check whether Xframe hardware is ready for 4361 * operation. 4362 * @hldev: HAL device handle. 4363 * @hw_status: Xframe status register. Returned by HAL. 4364 * 4365 * Check whether Xframe hardware is ready for operation. 4366 * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest 4367 * hardware functional blocks. 4368 * 4369 * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise 4370 * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status). 4371 * 4372 * See also: xge_hal_status_e{}. 4373 * Usage: See ex_open{}. 4374 */ 4375xge_hal_status_e 4376xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status) 4377{ 4378 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4379 u64 tmp64; 4380 4381 tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4382 &bar0->adapter_status); 4383 4384 *hw_status = tmp64; 4385 4386 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) { 4387 xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!"); 4388 return XGE_HAL_FAIL; 4389 } 4390 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) { 4391 xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!"); 4392 return XGE_HAL_FAIL; 4393 } 4394 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) { 4395 xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!"); 4396 return XGE_HAL_FAIL; 4397 } 4398 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) { 4399 xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!"); 4400 return XGE_HAL_FAIL; 4401 } 4402 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) { 4403 xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!"); 4404 return XGE_HAL_FAIL; 4405 } 4406 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) { 4407 xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!"); 4408 return XGE_HAL_FAIL; 4409 } 4410 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) { 4411 xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!"); 4412 return XGE_HAL_FAIL; 4413 } 4414 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) { 4415 xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!"); 4416 return XGE_HAL_FAIL; 4417 } 4418#ifndef XGE_HAL_HERC_EMULATION 4419 /* 4420 * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore, 4421 * the P_PLL_LOCK bit in the adapter_status register will 4422 * not be asserted. 4423 */ 4424 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) && 4425 xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC && 4426 hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) { 4427 xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!"); 4428 return XGE_HAL_FAIL; 4429 } 4430#endif 4431 4432 return XGE_HAL_OK; 4433} 4434 4435void 4436__hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag) 4437{ 4438 u16 msi_control_reg; 4439 4440 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 4441 xge_offsetof(xge_hal_pci_config_le_t, 4442 msi_control), &msi_control_reg); 4443 4444 if (flag) 4445 msi_control_reg |= 0x1; 4446 else 4447 msi_control_reg &= ~0x1; 4448 4449 xge_os_pci_write16(hldev->pdev, hldev->cfgh, 4450 xge_offsetof(xge_hal_pci_config_le_t, 4451 msi_control), msi_control_reg); 4452} 4453 4454void 4455__hal_device_msix_intr_endis(xge_hal_device_t *hldev, 4456 xge_hal_channel_t *channel, int flag) 4457{ 4458 u64 val64; 4459 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 4460 4461 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4462 &bar0->xmsi_mask_reg); 4463 4464 if (flag) 4465 val64 &= ~(1LL << ( 63 - channel->msix_idx )); 4466 else 4467 val64 |= (1LL << ( 63 - channel->msix_idx )); 4468 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4469 &bar0->xmsi_mask_reg); 4470} 4471 4472/** 4473 * xge_hal_device_intr_enable - Enable Xframe interrupts. 4474 * @hldev: HAL device handle. 4475 * @op: One of the xge_hal_device_intr_e enumerated values specifying 4476 * the type(s) of interrupts to enable. 4477 * 4478 * Enable Xframe interrupts. The function is to be executed the last in 4479 * Xframe initialization sequence. 4480 * 4481 * See also: xge_hal_device_intr_disable() 4482 */ 4483void 4484xge_hal_device_intr_enable(xge_hal_device_t *hldev) 4485{ 4486 xge_list_t *item; 4487 u64 val64; 4488 4489 /* PRC initialization and configuration */ 4490 xge_list_for_each(item, &hldev->ring_channels) { 4491 xge_hal_channel_h channel; 4492 channel = xge_container_of(item, xge_hal_channel_t, item); 4493 __hal_ring_prc_enable(channel); 4494 } 4495 4496 /* enable traffic only interrupts */ 4497 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) { 4498 /* 4499 * make sure all interrupts going to be disabled if MSI 4500 * is enabled. 4501 */ 4502 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); 4503 } else { 4504 /* 4505 * Enable the Tx traffic interrupts only if the TTI feature is 4506 * enabled. 4507 */ 4508 val64 = 0; 4509 if (hldev->tti_enabled) 4510 val64 = XGE_HAL_TX_TRAFFIC_INTR; 4511 4512 if (!hldev->config.bimodal_interrupts) 4513 val64 |= XGE_HAL_RX_TRAFFIC_INTR; 4514 4515 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 4516 val64 |= XGE_HAL_RX_TRAFFIC_INTR; 4517 4518 val64 |=XGE_HAL_TX_PIC_INTR | 4519 XGE_HAL_MC_INTR | 4520 XGE_HAL_TX_DMA_INTR | 4521 (hldev->config.sched_timer_us != 4522 XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0); 4523 __hal_device_intr_mgmt(hldev, val64, 1); 4524 } 4525 4526 /* 4527 * Enable MSI-X interrupts 4528 */ 4529 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 4530 4531 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4532 /* 4533 * To enable MSI-X, MSI also needs to be enabled, 4534 * due to a bug in the herc NIC. 4535 */ 4536 __hal_device_msi_intr_endis(hldev, 1); 4537 } 4538 4539 4540 /* Enable the MSI-X interrupt for each configured channel */ 4541 xge_list_for_each(item, &hldev->fifo_channels) { 4542 xge_hal_channel_t *channel; 4543 4544 channel = xge_container_of(item, 4545 xge_hal_channel_t, item); 4546 4547 /* 0 vector is reserved for alarms */ 4548 if (!channel->msix_idx) 4549 continue; 4550 4551 __hal_device_msix_intr_endis(hldev, channel, 1); 4552 } 4553 4554 xge_list_for_each(item, &hldev->ring_channels) { 4555 xge_hal_channel_t *channel; 4556 4557 channel = xge_container_of(item, 4558 xge_hal_channel_t, item); 4559 4560 /* 0 vector is reserved for alarms */ 4561 if (!channel->msix_idx) 4562 continue; 4563 4564 __hal_device_msix_intr_endis(hldev, channel, 1); 4565 } 4566 } 4567 4568 xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled"); 4569} 4570 4571 4572/** 4573 * xge_hal_device_intr_disable - Disable Xframe interrupts. 4574 * @hldev: HAL device handle. 4575 * @op: One of the xge_hal_device_intr_e enumerated values specifying 4576 * the type(s) of interrupts to disable. 4577 * 4578 * Disable Xframe interrupts. 4579 * 4580 * See also: xge_hal_device_intr_enable() 4581 */ 4582void 4583xge_hal_device_intr_disable(xge_hal_device_t *hldev) 4584{ 4585 xge_list_t *item; 4586 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4587 u64 val64; 4588 4589 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { 4590 4591 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 4592 /* 4593 * To disable MSI-X, MSI also needs to be disabled, 4594 * due to a bug in the herc NIC. 4595 */ 4596 __hal_device_msi_intr_endis(hldev, 0); 4597 } 4598 4599 /* Disable the MSI-X interrupt for each configured channel */ 4600 xge_list_for_each(item, &hldev->fifo_channels) { 4601 xge_hal_channel_t *channel; 4602 4603 channel = xge_container_of(item, 4604 xge_hal_channel_t, item); 4605 4606 /* 0 vector is reserved for alarms */ 4607 if (!channel->msix_idx) 4608 continue; 4609 4610 __hal_device_msix_intr_endis(hldev, channel, 0); 4611 4612 } 4613 4614 xge_os_pio_mem_write64(hldev->pdev, 4615 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, 4616 &bar0->tx_traffic_mask); 4617 4618 xge_list_for_each(item, &hldev->ring_channels) { 4619 xge_hal_channel_t *channel; 4620 4621 channel = xge_container_of(item, 4622 xge_hal_channel_t, item); 4623 4624 /* 0 vector is reserved for alarms */ 4625 if (!channel->msix_idx) 4626 continue; 4627 4628 __hal_device_msix_intr_endis(hldev, channel, 0); 4629 } 4630 4631 xge_os_pio_mem_write64(hldev->pdev, 4632 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, 4633 &bar0->rx_traffic_mask); 4634 } 4635 4636 /* 4637 * Disable traffic only interrupts. 4638 * Tx traffic interrupts are used only if the TTI feature is 4639 * enabled. 4640 */ 4641 val64 = 0; 4642 if (hldev->tti_enabled) 4643 val64 = XGE_HAL_TX_TRAFFIC_INTR; 4644 4645 val64 |= XGE_HAL_RX_TRAFFIC_INTR | 4646 XGE_HAL_TX_PIC_INTR | 4647 XGE_HAL_MC_INTR | 4648 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? 4649 XGE_HAL_SCHED_INTR : 0); 4650 __hal_device_intr_mgmt(hldev, val64, 0); 4651 4652 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4653 0xFFFFFFFFFFFFFFFFULL, 4654 &bar0->general_int_mask); 4655 4656 4657 /* disable all configured PRCs */ 4658 xge_list_for_each(item, &hldev->ring_channels) { 4659 xge_hal_channel_h channel; 4660 channel = xge_container_of(item, xge_hal_channel_t, item); 4661 __hal_ring_prc_disable(channel); 4662 } 4663 4664 xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled"); 4665} 4666 4667 4668/** 4669 * xge_hal_device_mcast_enable - Enable Xframe multicast addresses. 4670 * @hldev: HAL device handle. 4671 * 4672 * Enable Xframe multicast addresses. 4673 * Returns: XGE_HAL_OK on success. 4674 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast 4675 * feature within the time(timeout). 4676 * 4677 * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}. 4678 */ 4679xge_hal_status_e 4680xge_hal_device_mcast_enable(xge_hal_device_t *hldev) 4681{ 4682 u64 val64; 4683 xge_hal_pci_bar0_t *bar0; 4684 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; 4685 4686 if (hldev == NULL) 4687 return XGE_HAL_ERR_INVALID_DEVICE; 4688 4689 if (hldev->mcast_refcnt) 4690 return XGE_HAL_OK; 4691 4692 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 4693 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; 4694 4695 hldev->mcast_refcnt = 1; 4696 4697 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4698 4699 /* Enable all Multicast addresses */ 4700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4701 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL), 4702 &bar0->rmac_addr_data0_mem); 4703 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4704 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL), 4705 &bar0->rmac_addr_data1_mem); 4706 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4707 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4708 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); 4709 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4710 &bar0->rmac_addr_cmd_mem); 4711 4712 if (__hal_device_register_poll(hldev, 4713 &bar0->rmac_addr_cmd_mem, 0, 4714 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4715 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4716 /* upper layer may require to repeat */ 4717 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4718 } 4719 4720 return XGE_HAL_OK; 4721} 4722 4723/** 4724 * xge_hal_device_mcast_disable - Disable Xframe multicast addresses. 4725 * @hldev: HAL device handle. 4726 * 4727 * Disable Xframe multicast addresses. 4728 * Returns: XGE_HAL_OK - success. 4729 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast 4730 * feature within the time(timeout). 4731 * 4732 * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}. 4733 */ 4734xge_hal_status_e 4735xge_hal_device_mcast_disable(xge_hal_device_t *hldev) 4736{ 4737 u64 val64; 4738 xge_hal_pci_bar0_t *bar0; 4739 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; 4740 4741 if (hldev == NULL) 4742 return XGE_HAL_ERR_INVALID_DEVICE; 4743 4744 if (hldev->mcast_refcnt == 0) 4745 return XGE_HAL_OK; 4746 4747 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 4748 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; 4749 4750 hldev->mcast_refcnt = 0; 4751 4752 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4753 4754 /* Disable all Multicast addresses */ 4755 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4756 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL), 4757 &bar0->rmac_addr_data0_mem); 4758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4759 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0), 4760 &bar0->rmac_addr_data1_mem); 4761 4762 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4763 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4764 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); 4765 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4766 &bar0->rmac_addr_cmd_mem); 4767 4768 if (__hal_device_register_poll(hldev, 4769 &bar0->rmac_addr_cmd_mem, 0, 4770 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4771 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4772 /* upper layer may require to repeat */ 4773 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4774 } 4775 4776 return XGE_HAL_OK; 4777} 4778 4779/** 4780 * xge_hal_device_promisc_enable - Enable promiscuous mode. 4781 * @hldev: HAL device handle. 4782 * 4783 * Enable promiscuous mode of Xframe operation. 4784 * 4785 * See also: xge_hal_device_promisc_disable(). 4786 */ 4787void 4788xge_hal_device_promisc_enable(xge_hal_device_t *hldev) 4789{ 4790 u64 val64; 4791 xge_hal_pci_bar0_t *bar0; 4792 4793 xge_assert(hldev); 4794 4795 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4796 4797 if (!hldev->is_promisc) { 4798 /* Put the NIC into promiscuous mode */ 4799 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4800 &bar0->mac_cfg); 4801 val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; 4802 4803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4804 XGE_HAL_RMAC_CFG_KEY(0x4C0D), 4805 &bar0->rmac_cfg_key); 4806 4807 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 4808 (u32)(val64 >> 32), 4809 &bar0->mac_cfg); 4810 4811 hldev->is_promisc = 1; 4812 xge_debug_device(XGE_TRACE, 4813 "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled", 4814 (unsigned long long)val64); 4815 } 4816} 4817 4818/** 4819 * xge_hal_device_promisc_disable - Disable promiscuous mode. 4820 * @hldev: HAL device handle. 4821 * 4822 * Disable promiscuous mode of Xframe operation. 4823 * 4824 * See also: xge_hal_device_promisc_enable(). 4825 */ 4826void 4827xge_hal_device_promisc_disable(xge_hal_device_t *hldev) 4828{ 4829 u64 val64; 4830 xge_hal_pci_bar0_t *bar0; 4831 4832 xge_assert(hldev); 4833 4834 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4835 4836 if (hldev->is_promisc) { 4837 /* Remove the NIC from promiscuous mode */ 4838 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4839 &bar0->mac_cfg); 4840 val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; 4841 4842 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4843 XGE_HAL_RMAC_CFG_KEY(0x4C0D), 4844 &bar0->rmac_cfg_key); 4845 4846 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 4847 (u32)(val64 >> 32), 4848 &bar0->mac_cfg); 4849 4850 hldev->is_promisc = 0; 4851 xge_debug_device(XGE_TRACE, 4852 "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled", 4853 (unsigned long long)val64); 4854 } 4855} 4856 4857/** 4858 * xge_hal_device_macaddr_get - Get MAC addresses. 4859 * @hldev: HAL device handle. 4860 * @index: MAC address index, in the range from 0 to 4861 * XGE_HAL_MAX_MAC_ADDRESSES. 4862 * @macaddr: MAC address. Returned by HAL. 4863 * 4864 * Retrieve one of the stored MAC addresses by reading non-volatile 4865 * memory on the chip. 4866 * 4867 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. 4868 * 4869 * Returns: XGE_HAL_OK - success. 4870 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac 4871 * address within the time(timeout). 4872 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 4873 * 4874 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. 4875 */ 4876xge_hal_status_e 4877xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index, 4878 macaddr_t *macaddr) 4879{ 4880 xge_hal_pci_bar0_t *bar0; 4881 u64 val64; 4882 int i; 4883 4884 if (hldev == NULL) { 4885 return XGE_HAL_ERR_INVALID_DEVICE; 4886 } 4887 4888 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4889 4890 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) { 4891 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 4892 } 4893 4894#ifdef XGE_HAL_HERC_EMULATION 4895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000, 4896 &bar0->rmac_addr_data0_mem); 4897 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000, 4898 &bar0->rmac_addr_data1_mem); 4899 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD | 4900 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4901 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)); 4902 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4903 &bar0->rmac_addr_cmd_mem); 4904 4905 /* poll until done */ 4906 __hal_device_register_poll(hldev, 4907 &bar0->rmac_addr_cmd_mem, 0, 4908 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD, 4909 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS); 4910 4911#endif 4912 4913 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD | 4914 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4915 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); 4916 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4917 &bar0->rmac_addr_cmd_mem); 4918 4919 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, 4920 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4921 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4922 /* upper layer may require to repeat */ 4923 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 4924 } 4925 4926 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 4927 &bar0->rmac_addr_data0_mem); 4928 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4929 (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8))); 4930 } 4931 4932#ifdef XGE_HAL_HERC_EMULATION 4933 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4934 (*macaddr)[i] = (u8)0; 4935 } 4936 (*macaddr)[1] = (u8)1; 4937 4938#endif 4939 4940 return XGE_HAL_OK; 4941} 4942 4943/** 4944 * xge_hal_device_macaddr_set - Set MAC address. 4945 * @hldev: HAL device handle. 4946 * @index: MAC address index, in the range from 0 to 4947 * XGE_HAL_MAX_MAC_ADDRESSES. 4948 * @macaddr: New MAC address to configure. 4949 * 4950 * Configure one of the available MAC address "slots". 4951 * 4952 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. 4953 * 4954 * Returns: XGE_HAL_OK - success. 4955 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac 4956 * address within the time(timeout). 4957 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 4958 * 4959 * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}. 4960 */ 4961xge_hal_status_e 4962xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index, 4963 macaddr_t macaddr) 4964{ 4965 xge_hal_pci_bar0_t *bar0 = 4966 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 4967 u64 val64, temp64; 4968 int i; 4969 4970 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) 4971 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 4972 4973 temp64 = 0; 4974 for (i=0; i < XGE_HAL_ETH_ALEN; i++) { 4975 temp64 |= macaddr[i]; 4976 temp64 <<= 8; 4977 } 4978 temp64 >>= 8; 4979 4980 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4981 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64), 4982 &bar0->rmac_addr_data0_mem); 4983 4984 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 4985 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL), 4986 &bar0->rmac_addr_data1_mem); 4987 4988 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE | 4989 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | 4990 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); 4991 4992 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 4993 &bar0->rmac_addr_cmd_mem); 4994 4995 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, 4996 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, 4997 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 4998 /* upper layer may require to repeat */ 4999 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 5000 } 5001 5002 return XGE_HAL_OK; 5003} 5004 5005/** 5006 * xge_hal_device_macaddr_clear - Set MAC address. 5007 * @hldev: HAL device handle. 5008 * @index: MAC address index, in the range from 0 to 5009 * XGE_HAL_MAX_MAC_ADDRESSES. 5010 * 5011 * Clear one of the available MAC address "slots". 5012 * 5013 * Returns: XGE_HAL_OK - success. 5014 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac 5015 * address within the time(timeout). 5016 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. 5017 * 5018 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. 5019 */ 5020xge_hal_status_e 5021xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index) 5022{ 5023 xge_hal_status_e status; 5024 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 5025 5026 status = xge_hal_device_macaddr_set(hldev, index, macaddr); 5027 if (status != XGE_HAL_OK) { 5028 xge_debug_device(XGE_ERR, "%s", 5029 "Not able to set the mac addr"); 5030 return status; 5031 } 5032 5033 return XGE_HAL_OK; 5034} 5035 5036/** 5037 * xge_hal_device_macaddr_find - Finds index in the rmac table. 5038 * @hldev: HAL device handle. 5039 * @wanted: Wanted MAC address. 5040 * 5041 * See also: xge_hal_device_macaddr_set(). 5042 */ 5043int 5044xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted) 5045{ 5046 int i; 5047 5048 if (hldev == NULL) { 5049 return XGE_HAL_ERR_INVALID_DEVICE; 5050 } 5051 5052 for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) { 5053 macaddr_t macaddr; 5054 (void) xge_hal_device_macaddr_get(hldev, i, &macaddr); 5055 if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) { 5056 return i; 5057 } 5058 } 5059 5060 return -1; 5061} 5062 5063/** 5064 * xge_hal_device_mtu_set - Set MTU. 5065 * @hldev: HAL device handle. 5066 * @new_mtu: New MTU size to configure. 5067 * 5068 * Set new MTU value. Example, to use jumbo frames: 5069 * xge_hal_device_mtu_set(my_device, my_channel, 9600); 5070 * 5071 * Returns: XGE_HAL_OK on success. 5072 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control 5073 * register. 5074 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI 5075 * schemes. 5076 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to 5077 * a "quiescent" state. 5078 */ 5079xge_hal_status_e 5080xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu) 5081{ 5082 xge_hal_status_e status; 5083 5084 /* 5085 * reset needed if 1) new MTU differs, and 5086 * 2a) device was closed or 5087 * 2b) device is being upped for first time. 5088 */ 5089 if (hldev->config.mtu != new_mtu) { 5090 if (hldev->reset_needed_after_close || 5091 !hldev->mtu_first_time_set) { 5092 status = xge_hal_device_reset(hldev); 5093 if (status != XGE_HAL_OK) { 5094 xge_debug_device(XGE_TRACE, "%s", 5095 "fatal: can not reset the device"); 5096 return status; 5097 } 5098 } 5099 /* store the new MTU in device, reset will use it */ 5100 hldev->config.mtu = new_mtu; 5101 xge_debug_device(XGE_TRACE, "new MTU %d applied", 5102 new_mtu); 5103 } 5104 5105 if (!hldev->mtu_first_time_set) 5106 hldev->mtu_first_time_set = 1; 5107 5108 return XGE_HAL_OK; 5109} 5110 5111/** 5112 * xge_hal_device_initialize - Initialize Xframe device. 5113 * @hldev: HAL device handle. 5114 * @attr: pointer to xge_hal_device_attr_t structure 5115 * @device_config: Configuration to be _applied_ to the device, 5116 * For the Xframe configuration "knobs" please 5117 * refer to xge_hal_device_config_t and Xframe 5118 * User Guide. 5119 * 5120 * Initialize Xframe device. Note that all the arguments of this public API 5121 * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with 5122 * OS to find new Xframe device, locate its PCI and memory spaces. 5123 * 5124 * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL 5125 * to enable the latter to perform Xframe hardware initialization. 5126 * 5127 * Returns: XGE_HAL_OK - success. 5128 * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized. 5129 * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not 5130 * valid. 5131 * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed. 5132 * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid. 5133 * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid. 5134 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac 5135 * address within the time(timeout) or TTI/RTI initialization failed. 5136 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control. 5137 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent. 5138 * 5139 * See also: xge_hal_device_terminate(), xge_hal_status_e{} 5140 * xge_hal_device_attr_t{}. 5141 */ 5142xge_hal_status_e 5143xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, 5144 xge_hal_device_config_t *device_config) 5145{ 5146 int i; 5147 xge_hal_status_e status; 5148 xge_hal_channel_t *channel; 5149 u16 subsys_device; 5150 u16 subsys_vendor; 5151 int total_dram_size, ring_auto_dram_cfg, left_dram_size; 5152 int total_dram_size_max = 0; 5153 5154 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing", 5155 (unsigned long long)(ulong_t)hldev); 5156 5157 /* sanity check */ 5158 if (g_xge_hal_driver == NULL || 5159 !g_xge_hal_driver->is_initialized) { 5160 return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; 5161 } 5162 5163 xge_os_memzero(hldev, sizeof(xge_hal_device_t)); 5164 5165 /* 5166 * validate a common part of Xframe-I/II configuration 5167 * (and run check_card() later, once PCI inited - see below) 5168 */ 5169 status = __hal_device_config_check_common(device_config); 5170 if (status != XGE_HAL_OK) 5171 return status; 5172 5173 /* apply config */ 5174 xge_os_memcpy(&hldev->config, device_config, 5175 sizeof(xge_hal_device_config_t)); 5176 5177 /* save original attr */ 5178 xge_os_memcpy(&hldev->orig_attr, attr, 5179 sizeof(xge_hal_device_attr_t)); 5180 5181 /* initialize rxufca_intr_thres */ 5182 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; 5183 5184 hldev->regh0 = attr->regh0; 5185 hldev->regh1 = attr->regh1; 5186 hldev->regh2 = attr->regh2; 5187 hldev->isrbar0 = hldev->bar0 = attr->bar0; 5188 hldev->bar1 = attr->bar1; 5189 hldev->bar2 = attr->bar2; 5190 hldev->pdev = attr->pdev; 5191 hldev->irqh = attr->irqh; 5192 hldev->cfgh = attr->cfgh; 5193 5194 /* set initial bimodal timer for bimodal adaptive schema */ 5195 hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us; 5196 5197 hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh, 5198 g_xge_hal_driver->config.queue_size_initial, 5199 g_xge_hal_driver->config.queue_size_max, 5200 __hal_device_event_queued, hldev); 5201 if (hldev->queueh == NULL) 5202 return XGE_HAL_ERR_OUT_OF_MEMORY; 5203 5204 hldev->magic = XGE_HAL_MAGIC; 5205 5206 xge_assert(hldev->regh0); 5207 xge_assert(hldev->regh1); 5208 xge_assert(hldev->bar0); 5209 xge_assert(hldev->bar1); 5210 xge_assert(hldev->pdev); 5211 xge_assert(hldev->irqh); 5212 xge_assert(hldev->cfgh); 5213 5214 /* initialize some PCI/PCI-X fields of this PCI device. */ 5215 __hal_device_pci_init(hldev); 5216 5217 /* 5218 * initlialize lists to properly handling a potential 5219 * terminate request 5220 */ 5221 xge_list_init(&hldev->free_channels); 5222 xge_list_init(&hldev->fifo_channels); 5223 xge_list_init(&hldev->ring_channels); 5224 5225 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { 5226 /* fixups for xena */ 5227 hldev->config.rth_en = 0; 5228 hldev->config.rth_spdm_en = 0; 5229 hldev->config.rts_mac_en = 0; 5230 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA; 5231 5232 status = __hal_device_config_check_xena(device_config); 5233 if (status != XGE_HAL_OK) { 5234 xge_hal_device_terminate(hldev); 5235 return status; 5236 } 5237 if (hldev->config.bimodal_interrupts == 1) { 5238 xge_hal_device_terminate(hldev); 5239 return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED; 5240 } else if (hldev->config.bimodal_interrupts == 5241 XGE_HAL_DEFAULT_USE_HARDCODE) 5242 hldev->config.bimodal_interrupts = 0; 5243 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { 5244 /* fixups for herc */ 5245 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC; 5246 status = __hal_device_config_check_herc(device_config); 5247 if (status != XGE_HAL_OK) { 5248 xge_hal_device_terminate(hldev); 5249 return status; 5250 } 5251 if (hldev->config.bimodal_interrupts == 5252 XGE_HAL_DEFAULT_USE_HARDCODE) 5253 hldev->config.bimodal_interrupts = 1; 5254 } else { 5255 xge_debug_device(XGE_ERR, 5256 "detected unknown device_id 0x%x", hldev->device_id); 5257 xge_hal_device_terminate(hldev); 5258 return XGE_HAL_ERR_BAD_DEVICE_ID; 5259 } 5260 5261 /* allocate and initialize FIFO types of channels according to 5262 * configuration */ 5263 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { 5264 if (!device_config->fifo.queue[i].configured) 5265 continue; 5266 5267 channel = __hal_channel_allocate(hldev, i, 5268 XGE_HAL_CHANNEL_TYPE_FIFO); 5269 if (channel == NULL) { 5270 xge_debug_device(XGE_ERR, 5271 "fifo: __hal_channel_allocate failed"); 5272 xge_hal_device_terminate(hldev); 5273 return XGE_HAL_ERR_OUT_OF_MEMORY; 5274 } 5275 /* add new channel to the device */ 5276 xge_list_insert(&channel->item, &hldev->free_channels); 5277 } 5278 5279 /* 5280 * automatic DRAM adjustment 5281 */ 5282 total_dram_size = 0; 5283 ring_auto_dram_cfg = 0; 5284 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 5285 if (!device_config->ring.queue[i].configured) 5286 continue; 5287 if (device_config->ring.queue[i].dram_size_mb == 5288 XGE_HAL_DEFAULT_USE_HARDCODE) { 5289 ring_auto_dram_cfg++; 5290 continue; 5291 } 5292 total_dram_size += device_config->ring.queue[i].dram_size_mb; 5293 } 5294 left_dram_size = total_dram_size_max - total_dram_size; 5295 if (left_dram_size < 0 || 5296 (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) { 5297 xge_debug_device(XGE_ERR, 5298 "ring config: exceeded DRAM size %d MB", 5299 total_dram_size_max); 5300 xge_hal_device_terminate(hldev); 5301 return XGE_HAL_BADCFG_RING_QUEUE_SIZE; 5302 } 5303 5304 /* 5305 * allocate and initialize RING types of channels according to 5306 * configuration 5307 */ 5308 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { 5309 if (!device_config->ring.queue[i].configured) 5310 continue; 5311 5312 if (device_config->ring.queue[i].dram_size_mb == 5313 XGE_HAL_DEFAULT_USE_HARDCODE) { 5314 hldev->config.ring.queue[i].dram_size_mb = 5315 device_config->ring.queue[i].dram_size_mb = 5316 left_dram_size / ring_auto_dram_cfg; 5317 } 5318 5319 channel = __hal_channel_allocate(hldev, i, 5320 XGE_HAL_CHANNEL_TYPE_RING); 5321 if (channel == NULL) { 5322 xge_debug_device(XGE_ERR, 5323 "ring: __hal_channel_allocate failed"); 5324 xge_hal_device_terminate(hldev); 5325 return XGE_HAL_ERR_OUT_OF_MEMORY; 5326 } 5327 /* add new channel to the device */ 5328 xge_list_insert(&channel->item, &hldev->free_channels); 5329 } 5330 5331 /* get subsystem IDs */ 5332 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 5333 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), 5334 &subsys_device); 5335 xge_os_pci_read16(hldev->pdev, hldev->cfgh, 5336 xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), 5337 &subsys_vendor); 5338 xge_debug_device(XGE_TRACE, 5339 "subsystem_id %04x:%04x", 5340 subsys_vendor, subsys_device); 5341 5342 /* reset device initially */ 5343 (void) __hal_device_reset(hldev); 5344 5345 /* set host endian before, to assure proper action */ 5346 status = __hal_device_set_swapper(hldev); 5347 if (status != XGE_HAL_OK) { 5348 xge_debug_device(XGE_ERR, 5349 "__hal_device_set_swapper failed"); 5350 xge_hal_device_terminate(hldev); 5351 (void) __hal_device_reset(hldev); 5352 return status; 5353 } 5354 5355#ifndef XGE_HAL_HERC_EMULATION 5356 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) 5357 __hal_device_xena_fix_mac(hldev); 5358#endif 5359 5360 /* MAC address initialization. 5361 * For now only one mac address will be read and used. */ 5362 status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]); 5363 if (status != XGE_HAL_OK) { 5364 xge_debug_device(XGE_ERR, 5365 "xge_hal_device_macaddr_get failed"); 5366 xge_hal_device_terminate(hldev); 5367 return status; 5368 } 5369 5370 if (hldev->macaddr[0][0] == 0xFF && 5371 hldev->macaddr[0][1] == 0xFF && 5372 hldev->macaddr[0][2] == 0xFF && 5373 hldev->macaddr[0][3] == 0xFF && 5374 hldev->macaddr[0][4] == 0xFF && 5375 hldev->macaddr[0][5] == 0xFF) { 5376 xge_debug_device(XGE_ERR, 5377 "xge_hal_device_macaddr_get returns all FFs"); 5378 xge_hal_device_terminate(hldev); 5379 return XGE_HAL_ERR_INVALID_MAC_ADDRESS; 5380 } 5381 5382 xge_debug_device(XGE_TRACE, 5383 "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x", 5384 hldev->macaddr[0][0], hldev->macaddr[0][1], 5385 hldev->macaddr[0][2], hldev->macaddr[0][3], 5386 hldev->macaddr[0][4], hldev->macaddr[0][5]); 5387 5388 status = __hal_stats_initialize(&hldev->stats, hldev); 5389 if (status != XGE_HAL_OK) { 5390 xge_debug_device(XGE_ERR, 5391 "__hal_stats_initialize failed"); 5392 xge_hal_device_terminate(hldev); 5393 return status; 5394 } 5395 5396 status = __hal_device_hw_initialize(hldev); 5397 if (status != XGE_HAL_OK) { 5398 xge_debug_device(XGE_ERR, 5399 "__hal_device_hw_initialize failed"); 5400 xge_hal_device_terminate(hldev); 5401 return status; 5402 } 5403 hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE); 5404 if (hldev->dump_buf == NULL) { 5405 xge_debug_device(XGE_ERR, 5406 "__hal_device_hw_initialize failed"); 5407 xge_hal_device_terminate(hldev); 5408 return XGE_HAL_ERR_OUT_OF_MEMORY; 5409 } 5410 5411 5412 /* Xena-only: need to serialize fifo posts across all device fifos */ 5413#if defined(XGE_HAL_TX_MULTI_POST) 5414 xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev); 5415#elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 5416 xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh); 5417#endif 5418 /* Getting VPD data */ 5419 __hal_device_get_vpd_data(hldev); 5420 5421 hldev->is_initialized = 1; 5422 5423 return XGE_HAL_OK; 5424} 5425 5426/** 5427 * xge_hal_device_terminating - Mark the device as 'terminating'. 5428 * @devh: HAL device handle. 5429 * 5430 * Mark the device as 'terminating', going to terminate. Can be used 5431 * to serialize termination with other running processes/contexts. 5432 * 5433 * See also: xge_hal_device_terminate(). 5434 */ 5435void 5436xge_hal_device_terminating(xge_hal_device_h devh) 5437{ 5438 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 5439 xge_list_t *item; 5440 xge_hal_channel_t *channel; 5441#if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5442 unsigned long flags=0; 5443#endif 5444 5445 /* 5446 * go through each opened tx channel and aquire 5447 * lock, so it will serialize with HAL termination flag 5448 */ 5449 xge_list_for_each(item, &hldev->fifo_channels) { 5450 channel = xge_container_of(item, xge_hal_channel_t, item); 5451#if defined(XGE_HAL_TX_MULTI_RESERVE) 5452 xge_os_spin_lock(&channel->reserve_lock); 5453#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5454 xge_os_spin_lock_irq(&channel->reserve_lock, flags); 5455#endif 5456 5457 channel->terminating = 1; 5458 5459#if defined(XGE_HAL_TX_MULTI_RESERVE) 5460 xge_os_spin_unlock(&channel->reserve_lock); 5461#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) 5462 xge_os_spin_unlock_irq(&channel->reserve_lock, flags); 5463#endif 5464 } 5465 5466 hldev->terminating = 1; 5467} 5468 5469/** 5470 * xge_hal_device_terminate - Terminate Xframe device. 5471 * @hldev: HAL device handle. 5472 * 5473 * Terminate HAL device. 5474 * 5475 * See also: xge_hal_device_initialize(). 5476 */ 5477void 5478xge_hal_device_terminate(xge_hal_device_t *hldev) 5479{ 5480 xge_assert(g_xge_hal_driver != NULL); 5481 xge_assert(hldev != NULL); 5482 xge_assert(hldev->magic == XGE_HAL_MAGIC); 5483 5484 xge_queue_flush(hldev->queueh); 5485 5486 hldev->terminating = 1; 5487 hldev->is_initialized = 0; 5488 hldev->in_poll = 0; 5489 hldev->magic = XGE_HAL_DEAD; 5490 5491#if defined(XGE_HAL_TX_MULTI_POST) 5492 xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev); 5493#elif defined(XGE_HAL_TX_MULTI_POST_IRQ) 5494 xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev); 5495#endif 5496 5497 xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating", 5498 (unsigned long long)(ulong_t)hldev); 5499 5500 xge_assert(xge_list_is_empty(&hldev->fifo_channels)); 5501 xge_assert(xge_list_is_empty(&hldev->ring_channels)); 5502 5503 if (hldev->stats.is_initialized) { 5504 __hal_stats_terminate(&hldev->stats); 5505 } 5506 5507 /* close if open and free all channels */ 5508 while (!xge_list_is_empty(&hldev->free_channels)) { 5509 xge_hal_channel_t *channel = (xge_hal_channel_t*) 5510 hldev->free_channels.next; 5511 5512 xge_assert(!channel->is_open); 5513 xge_list_remove(&channel->item); 5514 __hal_channel_free(channel); 5515 } 5516 5517 if (hldev->queueh) { 5518 xge_queue_destroy(hldev->queueh); 5519 } 5520 5521 if (hldev->spdm_table) { 5522 xge_os_free(hldev->pdev, 5523 hldev->spdm_table[0], 5524 (sizeof(xge_hal_spdm_entry_t) * 5525 hldev->spdm_max_entries)); 5526 xge_os_free(hldev->pdev, 5527 hldev->spdm_table, 5528 (sizeof(xge_hal_spdm_entry_t *) * 5529 hldev->spdm_max_entries)); 5530 xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev); 5531 hldev->spdm_table = NULL; 5532 } 5533 5534 if (hldev->dump_buf) { 5535 xge_os_free(hldev->pdev, hldev->dump_buf, 5536 XGE_HAL_DUMP_BUF_SIZE); 5537 hldev->dump_buf = NULL; 5538 } 5539 5540 if (hldev->device_id != 0) { 5541 int j, pcisize; 5542 5543 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? 5544 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; 5545 for (j = 0; j < pcisize; j++) { 5546 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, 5547 *((u32*)&hldev->pci_config_space_bios + j)); 5548 } 5549 } 5550} 5551/** 5552 * __hal_device_get_vpd_data - Getting vpd_data. 5553 * 5554 * @hldev: HAL device handle. 5555 * 5556 * Getting product name and serial number from vpd capabilites structure 5557 * 5558 */ 5559void 5560__hal_device_get_vpd_data(xge_hal_device_t *hldev) 5561{ 5562 u8 * vpd_data; 5563 u8 data; 5564 int index = 0, count, fail = 0; 5565 u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR; 5566 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 5567 vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR; 5568 5569 xge_os_strcpy((char *) hldev->vpd_data.product_name, 5570 "10 Gigabit Ethernet Adapter"); 5571 xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available"); 5572 5573 vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16); 5574 if ( vpd_data == 0 ) 5575 return; 5576 5577 for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) { 5578 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index); 5579 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data); 5580 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0); 5581 for (count = 0; count < 5; count++ ) { 5582 xge_os_mdelay(2); 5583 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data); 5584 if (data == XGE_HAL_VPD_READ_COMPLETE) 5585 break; 5586 } 5587 5588 if (count >= 5) { 5589 xge_os_printf("ERR, Reading VPD data failed"); 5590 fail = 1; 5591 break; 5592 } 5593 5594 xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4), 5595 (u32 *)&vpd_data[index]); 5596 } 5597 5598 if(!fail) { 5599 5600 /* read serial number of adapter */ 5601 for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) { 5602 if ((vpd_data[count] == 'S') && 5603 (vpd_data[count + 1] == 'N') && 5604 (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) { 5605 memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH); 5606 memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3], 5607 vpd_data[count + 2]); 5608 break; 5609 } 5610 } 5611 5612 if (vpd_data[1] < XGE_HAL_VPD_LENGTH) { 5613 memset(hldev->vpd_data.product_name, 0, vpd_data[1]); 5614 memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]); 5615 } 5616 5617 } 5618 5619 xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16); 5620} 5621 5622 5623/** 5624 * xge_hal_device_handle_tcode - Handle transfer code. 5625 * @channelh: Channel handle. 5626 * @dtrh: Descriptor handle. 5627 * @t_code: One of the enumerated (and documented in the Xframe user guide) 5628 * "transfer codes". 5629 * 5630 * Handle descriptor's transfer code. The latter comes with each completed 5631 * descriptor, see xge_hal_fifo_dtr_next_completed() and 5632 * xge_hal_ring_dtr_next_completed(). 5633 * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h. 5634 * 5635 * Returns: one of the xge_hal_status_e{} enumerated types. 5636 * XGE_HAL_OK - for success. 5637 * XGE_HAL_ERR_CRITICAL - when encounters critical error. 5638 */ 5639xge_hal_status_e 5640xge_hal_device_handle_tcode (xge_hal_channel_h channelh, 5641 xge_hal_dtr_h dtrh, u8 t_code) 5642{ 5643 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 5644 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; 5645 5646 if (t_code > 15) { 5647 xge_os_printf("invalid t_code %d", t_code); 5648 return XGE_HAL_OK; 5649 } 5650 5651 if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { 5652 hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++; 5653 5654#if defined(XGE_HAL_DEBUG_BAD_TCODE) 5655 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; 5656 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":" 5657 XGE_OS_LLXFMT":"XGE_OS_LLXFMT, 5658 txdp->control_1, txdp->control_2, txdp->buffer_pointer, 5659 txdp->host_control); 5660#endif 5661 5662 /* handle link "down" immediately without going through 5663 * xge_hal_device_poll() routine. */ 5664 if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) { 5665 /* link is down */ 5666 if (hldev->link_state != XGE_HAL_LINK_DOWN) { 5667 xge_hal_pci_bar0_t *bar0 = 5668 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5669 u64 val64; 5670 5671 hldev->link_state = XGE_HAL_LINK_DOWN; 5672 5673 val64 = xge_os_pio_mem_read64(hldev->pdev, 5674 hldev->regh0, &bar0->adapter_control); 5675 5676 /* turn off LED */ 5677 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); 5678 xge_os_pio_mem_write64(hldev->pdev, 5679 hldev->regh0, val64, 5680 &bar0->adapter_control); 5681 5682 g_xge_hal_driver->uld_callbacks.link_down( 5683 hldev->upper_layer_info); 5684 } 5685 } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER || 5686 t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) { 5687 __hal_device_handle_targetabort(hldev); 5688 return XGE_HAL_ERR_CRITICAL; 5689 } 5690 return XGE_HAL_ERR_PKT_DROP; 5691 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 5692 hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++; 5693 5694#if defined(XGE_HAL_DEBUG_BAD_TCODE) 5695 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; 5696 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT 5697 ":"XGE_OS_LLXFMT, rxdp->control_1, 5698 rxdp->control_2, rxdp->buffer0_ptr, 5699 rxdp->host_control); 5700#endif 5701 if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) { 5702 hldev->stats.sw_dev_err_stats.ecc_err_cnt++; 5703 __hal_device_handle_eccerr(hldev, "rxd_t_code", 5704 (u64)t_code); 5705 return XGE_HAL_ERR_CRITICAL; 5706 } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY || 5707 t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) { 5708 hldev->stats.sw_dev_err_stats.parity_err_cnt++; 5709 __hal_device_handle_parityerr(hldev, "rxd_t_code", 5710 (u64)t_code); 5711 return XGE_HAL_ERR_CRITICAL; 5712 /* do not drop if detected unknown IPv6 extension */ 5713 } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) { 5714 return XGE_HAL_ERR_PKT_DROP; 5715 } 5716 } 5717 return XGE_HAL_OK; 5718} 5719 5720/** 5721 * xge_hal_device_link_state - Get link state. 5722 * @devh: HAL device handle. 5723 * @ls: Link state, see xge_hal_device_link_state_e{}. 5724 * 5725 * Get link state. 5726 * Returns: XGE_HAL_OK. 5727 * See also: xge_hal_device_link_state_e{}. 5728 */ 5729xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh, 5730 xge_hal_device_link_state_e *ls) 5731{ 5732 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5733 5734 xge_assert(ls != NULL); 5735 *ls = hldev->link_state; 5736 return XGE_HAL_OK; 5737} 5738 5739/** 5740 * xge_hal_device_sched_timer - Configure scheduled device interrupt. 5741 * @devh: HAL device handle. 5742 * @interval_us: Time interval, in miscoseconds. 5743 * Unlike transmit and receive interrupts, 5744 * the scheduled interrupt is generated independently of 5745 * traffic, but purely based on time. 5746 * @one_shot: 1 - generate scheduled interrupt only once. 5747 * 0 - generate scheduled interrupt periodically at the specified 5748 * @interval_us interval. 5749 * 5750 * (Re-)configure scheduled interrupt. Can be called at runtime to change 5751 * the setting, generate one-shot interrupts based on the resource and/or 5752 * traffic conditions, other purposes. 5753 * See also: xge_hal_device_config_t{}. 5754 */ 5755void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us, 5756 int one_shot) 5757{ 5758 u64 val64; 5759 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5760 xge_hal_pci_bar0_t *bar0 = 5761 (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5762 unsigned int interval = hldev->config.pci_freq_mherz * interval_us; 5763 5764 interval = __hal_fix_time_ival_herc(hldev, interval); 5765 5766 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 5767 &bar0->scheduled_int_ctrl); 5768 if (interval) { 5769 val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK; 5770 val64 |= XGE_HAL_SCHED_INT_PERIOD(interval); 5771 if (one_shot) { 5772 val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT; 5773 } 5774 val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN; 5775 } else { 5776 val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN; 5777 } 5778 5779 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 5780 val64, &bar0->scheduled_int_ctrl); 5781 5782 xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s", 5783 (unsigned long long)val64, 5784 interval ? "enabled" : "disabled"); 5785} 5786 5787/** 5788 * xge_hal_device_check_id - Verify device ID. 5789 * @devh: HAL device handle. 5790 * 5791 * Verify device ID. 5792 * Returns: one of the xge_hal_card_e{} enumerated types. 5793 * See also: xge_hal_card_e{}. 5794 */ 5795xge_hal_card_e 5796xge_hal_device_check_id(xge_hal_device_h devh) 5797{ 5798 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5799 switch (hldev->device_id) { 5800 case XGE_PCI_DEVICE_ID_XENA_1: 5801 case XGE_PCI_DEVICE_ID_XENA_2: 5802 return XGE_HAL_CARD_XENA; 5803 case XGE_PCI_DEVICE_ID_HERC_1: 5804 case XGE_PCI_DEVICE_ID_HERC_2: 5805 return XGE_HAL_CARD_HERC; 5806 case XGE_PCI_DEVICE_ID_TITAN_1: 5807 case XGE_PCI_DEVICE_ID_TITAN_2: 5808 return XGE_HAL_CARD_TITAN; 5809 default: 5810 return XGE_HAL_CARD_UNKNOWN; 5811 } 5812} 5813 5814/** 5815 * xge_hal_device_pci_info_get - Get PCI bus informations such as width, 5816 * frequency, and mode from previously stored values. 5817 * @devh: HAL device handle. 5818 * @pci_mode: pointer to a variable of enumerated type 5819 * xge_hal_pci_mode_e{}. 5820 * @bus_frequency: pointer to a variable of enumerated type 5821 * xge_hal_pci_bus_frequency_e{}. 5822 * @bus_width: pointer to a variable of enumerated type 5823 * xge_hal_pci_bus_width_e{}. 5824 * 5825 * Get pci mode, frequency, and PCI bus width. 5826 * Returns: one of the xge_hal_status_e{} enumerated types. 5827 * XGE_HAL_OK - for success. 5828 * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle. 5829 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. 5830 */ 5831xge_hal_status_e 5832xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, 5833 xge_hal_pci_bus_frequency_e *bus_frequency, 5834 xge_hal_pci_bus_width_e *bus_width) 5835{ 5836 xge_hal_status_e rc_status; 5837 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 5838 5839 if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) { 5840 rc_status = XGE_HAL_ERR_INVALID_DEVICE; 5841 xge_debug_device(XGE_ERR, 5842 "xge_hal_device_pci_info_get error, rc %d for device %p", 5843 rc_status, hldev); 5844 5845 return rc_status; 5846 } 5847 5848 *pci_mode = hldev->pci_mode; 5849 *bus_frequency = hldev->bus_frequency; 5850 *bus_width = hldev->bus_width; 5851 rc_status = XGE_HAL_OK; 5852 return rc_status; 5853} 5854 5855/** 5856 * xge_hal_reinitialize_hw 5857 * @hldev: private member of the device structure. 5858 * 5859 * This function will soft reset the NIC and re-initalize all the 5860 * I/O registers to the values they had after it's inital initialization 5861 * through the probe function. 5862 */ 5863int xge_hal_reinitialize_hw(xge_hal_device_t * hldev) 5864{ 5865 (void) xge_hal_device_reset(hldev); 5866 if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) { 5867 xge_hal_device_terminate(hldev); 5868 (void) __hal_device_reset(hldev); 5869 return 1; 5870 } 5871 return 0; 5872} 5873 5874 5875/* 5876 * __hal_read_spdm_entry_line 5877 * @hldev: pointer to xge_hal_device_t structure 5878 * @spdm_line: spdm line in the spdm entry to be read. 5879 * @spdm_entry: spdm entry of the spdm_line in the SPDM table. 5880 * @spdm_line_val: Contains the value stored in the spdm line. 5881 * 5882 * SPDM table contains upto a maximum of 256 spdm entries. 5883 * Each spdm entry contains 8 lines and each line stores 8 bytes. 5884 * This function reads the spdm line(addressed by @spdm_line) 5885 * of the spdm entry(addressed by @spdm_entry) in 5886 * the SPDM table. 5887 */ 5888xge_hal_status_e 5889__hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line, 5890 u16 spdm_entry, u64 *spdm_line_val) 5891{ 5892 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 5893 u64 val64; 5894 5895 val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE | 5896 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) | 5897 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry); 5898 5899 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 5900 &bar0->rts_rth_spdm_mem_ctrl); 5901 5902 /* poll until done */ 5903 if (__hal_device_register_poll(hldev, 5904 &bar0->rts_rth_spdm_mem_ctrl, 0, 5905 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE, 5906 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 5907 5908 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 5909 } 5910 5911 *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev, 5912 hldev->regh0, &bar0->rts_rth_spdm_mem_data); 5913 return XGE_HAL_OK; 5914} 5915 5916 5917/* 5918 * __hal_get_free_spdm_entry 5919 * @hldev: pointer to xge_hal_device_t structure 5920 * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table. 5921 * 5922 * This function returns an index of unused spdm entry in the SPDM 5923 * table. 5924 */ 5925static xge_hal_status_e 5926__hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry) 5927{ 5928 xge_hal_status_e status; 5929 u64 spdm_line_val=0; 5930 5931 /* 5932 * Search in the local SPDM table for a free slot. 5933 */ 5934 *spdm_entry = 0; 5935 for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) { 5936 if (hldev->spdm_table[*spdm_entry]->in_use) { 5937 break; 5938 } 5939 } 5940 5941 if (*spdm_entry >= hldev->spdm_max_entries) { 5942 return XGE_HAL_ERR_SPDM_TABLE_FULL; 5943 } 5944 5945 /* 5946 * Make sure that the corresponding spdm entry in the SPDM 5947 * table is free. 5948 * Seventh line of the spdm entry contains information about 5949 * whether the entry is free or not. 5950 */ 5951 if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry, 5952 &spdm_line_val)) != XGE_HAL_OK) { 5953 return status; 5954 } 5955 5956 /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */ 5957 if ((spdm_line_val & BIT(63))) { 5958 /* 5959 * Log a warning 5960 */ 5961 xge_debug_device(XGE_ERR, "Local SPDM table is not " 5962 "consistent with the actual one for the spdm " 5963 "entry %d", *spdm_entry); 5964 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; 5965 } 5966 5967 return XGE_HAL_OK; 5968} 5969 5970 5971/* 5972 * __hal_calc_jhash - Calculate Jenkins hash. 5973 * @msg: Jenkins hash algorithm key. 5974 * @length: Length of the key. 5975 * @golden_ratio: Jenkins hash golden ratio. 5976 * @init_value: Jenkins hash initial value. 5977 * 5978 * This function implements the Jenkins based algorithm used for the 5979 * calculation of the RTH hash. 5980 * Returns: Jenkins hash value. 5981 * 5982 */ 5983static u32 5984__hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) 5985{ 5986 5987 register u32 a,b,c,len; 5988 5989 /* 5990 * Set up the internal state 5991 */ 5992 len = length; 5993 a = b = golden_ratio; /* the golden ratio; an arbitrary value */ 5994 c = init_value; /* the previous hash value */ 5995 5996 /* handle most of the key */ 5997 while (len >= 12) 5998 { 5999 a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16) 6000 + ((u32)msg[3]<<24)); 6001 b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16) 6002 + ((u32)msg[7]<<24)); 6003 c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16) 6004 + ((u32)msg[11]<<24)); 6005 mix(a,b,c); 6006 msg += 12; len -= 12; 6007 } 6008 6009 /* handle the last 11 bytes */ 6010 c += length; 6011 switch(len) /* all the case statements fall through */ 6012 { 6013 case 11: c+= ((u32)msg[10]<<24); 6014 break; 6015 case 10: c+= ((u32)msg[9]<<16); 6016 break; 6017 case 9 : c+= ((u32)msg[8]<<8); 6018 break; 6019 /* the first byte of c is reserved for the length */ 6020 case 8 : b+= ((u32)msg[7]<<24); 6021 break; 6022 case 7 : b+= ((u32)msg[6]<<16); 6023 break; 6024 case 6 : b+= ((u32)msg[5]<<8); 6025 break; 6026 case 5 : b+= msg[4]; 6027 break; 6028 case 4 : a+= ((u32)msg[3]<<24); 6029 break; 6030 case 3 : a+= ((u32)msg[2]<<16); 6031 break; 6032 case 2 : a+= ((u32)msg[1]<<8); 6033 break; 6034 case 1 : a+= msg[0]; 6035 break; 6036 /* case 0: nothing left to add */ 6037 } 6038 6039 mix(a,b,c); 6040 6041 /* report the result */ 6042 return c; 6043} 6044 6045 6046/** 6047 * xge_hal_spdm_entry_add - Add a new entry to the SPDM table. 6048 * @devh: HAL device handle. 6049 * @src_ip: Source ip address(IPv4/IPv6). 6050 * @dst_ip: Destination ip address(IPv4/IPv6). 6051 * @l4_sp: L4 source port. 6052 * @l4_dp: L4 destination port. 6053 * @is_tcp: Set to 1, if the protocol is TCP. 6054 * 0, if the protocol is UDP. 6055 * @is_ipv4: Set to 1, if the protocol is IPv4. 6056 * 0, if the protocol is IPv6. 6057 * @tgt_queue: Target queue to route the receive packet. 6058 * 6059 * This function add a new entry to the SPDM table. 6060 * 6061 * Returns: XGE_HAL_OK - success. 6062 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. 6063 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in 6064 * the time(timeout). 6065 * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full. 6066 * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry. 6067 * 6068 * See also: xge_hal_spdm_entry_remove{}. 6069 */ 6070xge_hal_status_e 6071xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, 6072 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, 6073 u8 is_tcp, u8 is_ipv4, u8 tgt_queue) 6074{ 6075 6076 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6077 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6078 u32 jhash_value; 6079 u32 jhash_init_val; 6080 u32 jhash_golden_ratio; 6081 u64 val64; 6082 int off; 6083 u16 spdm_entry; 6084 u8 msg[XGE_HAL_JHASH_MSG_LEN]; 6085 int ipaddr_len; 6086 xge_hal_status_e status; 6087 6088 6089 if (!hldev->config.rth_spdm_en) { 6090 return XGE_HAL_ERR_SPDM_NOT_ENABLED; 6091 } 6092 6093 if ((tgt_queue < XGE_HAL_MIN_RING_NUM) || 6094 (tgt_queue > XGE_HAL_MAX_RING_NUM)) { 6095 return XGE_HAL_ERR_SPDM_INVALID_ENTRY; 6096 } 6097 6098 6099 /* 6100 * Calculate the jenkins hash. 6101 */ 6102 /* 6103 * Create the Jenkins hash algorithm key. 6104 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to 6105 * use L4 information. Otherwize key = {L3SA, L3DA}. 6106 */ 6107 6108 if (is_ipv4) { 6109 ipaddr_len = 4; // In bytes 6110 } else { 6111 ipaddr_len = 16; 6112 } 6113 6114 /* 6115 * Jenkins hash algorithm expects the key in the big endian 6116 * format. Since key is the byte array, memcpy won't work in the 6117 * case of little endian. So, the current code extracts each 6118 * byte starting from MSB and store it in the key. 6119 */ 6120 if (is_ipv4) { 6121 for (off = 0; off < ipaddr_len; off++) { 6122 u32 mask = vBIT32(0xff,(off*8),8); 6123 int shift = 32-(off+1)*8; 6124 msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift); 6125 msg[off+ipaddr_len] = 6126 (u8)((dst_ip->ipv4.addr & mask) >> shift); 6127 } 6128 } else { 6129 for (off = 0; off < ipaddr_len; off++) { 6130 int loc = off % 8; 6131 u64 mask = vBIT(0xff,(loc*8),8); 6132 int shift = 64-(loc+1)*8; 6133 6134 msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask) 6135 >> shift); 6136 msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8] 6137 & mask) >> shift); 6138 } 6139 } 6140 6141 off = (2*ipaddr_len); 6142 6143 if (hldev->config.rth_spdm_use_l4) { 6144 msg[off] = (u8)((l4_sp & 0xff00) >> 8); 6145 msg[off + 1] = (u8)(l4_sp & 0xff); 6146 msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8); 6147 msg[off + 3] = (u8)(l4_dp & 0xff); 6148 off += 4; 6149 } 6150 6151 /* 6152 * Calculate jenkins hash for this configuration 6153 */ 6154 val64 = xge_os_pio_mem_read64(hldev->pdev, 6155 hldev->regh0, 6156 &bar0->rts_rth_jhash_cfg); 6157 jhash_golden_ratio = (u32)(val64 >> 32); 6158 jhash_init_val = (u32)(val64 & 0xffffffff); 6159 6160 jhash_value = __hal_calc_jhash(msg, off, 6161 jhash_golden_ratio, 6162 jhash_init_val); 6163 6164 xge_os_spin_lock(&hldev->spdm_lock); 6165 6166 /* 6167 * Locate a free slot in the SPDM table. To avoid a seach in the 6168 * actual SPDM table, which is very expensive in terms of time, 6169 * we are maintaining a local copy of the table and the search for 6170 * the free entry is performed in the local table. 6171 */ 6172 if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry)) 6173 != XGE_HAL_OK) { 6174 xge_os_spin_unlock(&hldev->spdm_lock); 6175 return status; 6176 } 6177 6178 /* 6179 * Add this entry to the SPDM table 6180 */ 6181 status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp, 6182 is_tcp, is_ipv4, tgt_queue, 6183 jhash_value, /* calculated jhash */ 6184 spdm_entry); 6185 6186 xge_os_spin_unlock(&hldev->spdm_lock); 6187 6188 return status; 6189} 6190 6191/** 6192 * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table. 6193 * @devh: HAL device handle. 6194 * @src_ip: Source ip address(IPv4/IPv6). 6195 * @dst_ip: Destination ip address(IPv4/IPv6). 6196 * @l4_sp: L4 source port. 6197 * @l4_dp: L4 destination port. 6198 * @is_tcp: Set to 1, if the protocol is TCP. 6199 * 0, if the protocol os UDP. 6200 * @is_ipv4: Set to 1, if the protocol is IPv4. 6201 * 0, if the protocol is IPv6. 6202 * 6203 * This function remove an entry from the SPDM table. 6204 * 6205 * Returns: XGE_HAL_OK - success. 6206 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. 6207 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in 6208 * the time(timeout). 6209 * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM 6210 * table. 6211 * 6212 * See also: xge_hal_spdm_entry_add{}. 6213 */ 6214xge_hal_status_e 6215xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, 6216 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, 6217 u8 is_tcp, u8 is_ipv4) 6218{ 6219 6220 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6221 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6222 u64 val64; 6223 u16 spdm_entry; 6224 xge_hal_status_e status; 6225 u64 spdm_line_arr[8]; 6226 u8 line_no; 6227 u8 spdm_is_tcp; 6228 u8 spdm_is_ipv4; 6229 u16 spdm_l4_sp; 6230 u16 spdm_l4_dp; 6231 6232 if (!hldev->config.rth_spdm_en) { 6233 return XGE_HAL_ERR_SPDM_NOT_ENABLED; 6234 } 6235 6236 xge_os_spin_lock(&hldev->spdm_lock); 6237 6238 /* 6239 * Poll the rxpic_int_reg register until spdm ready bit is set or 6240 * timeout happens. 6241 */ 6242 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, 6243 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 6244 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 6245 6246 /* upper layer may require to repeat */ 6247 xge_os_spin_unlock(&hldev->spdm_lock); 6248 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 6249 } 6250 6251 /* 6252 * Clear the SPDM READY bit. 6253 */ 6254 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6255 &bar0->rxpic_int_reg); 6256 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY; 6257 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6258 &bar0->rxpic_int_reg); 6259 6260 /* 6261 * Search in the local SPDM table to get the index of the 6262 * corresponding entry in the SPDM table. 6263 */ 6264 spdm_entry = 0; 6265 for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) { 6266 if ((!hldev->spdm_table[spdm_entry]->in_use) || 6267 (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) || 6268 (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) || 6269 (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) || 6270 (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) { 6271 continue; 6272 } 6273 6274 /* 6275 * Compare the src/dst IP addresses of source and target 6276 */ 6277 if (is_ipv4) { 6278 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr 6279 != src_ip->ipv4.addr) || 6280 (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr 6281 != dst_ip->ipv4.addr)) { 6282 continue; 6283 } 6284 } else { 6285 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0] 6286 != src_ip->ipv6.addr[0]) || 6287 (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1] 6288 != src_ip->ipv6.addr[1]) || 6289 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0] 6290 != dst_ip->ipv6.addr[0]) || 6291 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1] 6292 != dst_ip->ipv6.addr[1])) { 6293 continue; 6294 } 6295 } 6296 break; 6297 } 6298 6299 if (spdm_entry >= hldev->spdm_max_entries) { 6300 xge_os_spin_unlock(&hldev->spdm_lock); 6301 return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND; 6302 } 6303 6304 /* 6305 * Retrieve the corresponding entry from the SPDM table and 6306 * make sure that the data is consistent. 6307 */ 6308 for(line_no = 0; line_no < 8; line_no++) { 6309 6310 /* 6311 * SPDM line 2,3,4 are valid only for IPv6 entry. 6312 * SPDM line 5 & 6 are reserved. We don't have to 6313 * read these entries in the above cases. 6314 */ 6315 if (((is_ipv4) && 6316 ((line_no == 2)||(line_no == 3)||(line_no == 4))) || 6317 (line_no == 5) || 6318 (line_no == 6)) { 6319 continue; 6320 } 6321 6322 if ((status = __hal_read_spdm_entry_line( 6323 hldev, 6324 line_no, 6325 spdm_entry, 6326 &spdm_line_arr[line_no])) 6327 != XGE_HAL_OK) { 6328 xge_os_spin_unlock(&hldev->spdm_lock); 6329 return status; 6330 } 6331 } 6332 6333 /* 6334 * Seventh line of the spdm entry contains the entry_enable 6335 * bit. Make sure that the entry_enable bit of this spdm entry 6336 * is set. 6337 * To remove an entry from the SPDM table, reset this 6338 * bit. 6339 */ 6340 if (!(spdm_line_arr[7] & BIT(63))) { 6341 /* 6342 * Log a warning 6343 */ 6344 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6345 "consistent with the actual one for the spdm " 6346 "entry %d ", spdm_entry); 6347 goto err_exit; 6348 } 6349 6350 /* 6351 * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM 6352 * table and do a comparision. 6353 */ 6354 spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4); 6355 spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63)); 6356 spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48); 6357 spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff); 6358 6359 6360 if ((spdm_is_tcp != is_tcp) || 6361 (spdm_is_ipv4 != is_ipv4) || 6362 (spdm_l4_sp != l4_sp) || 6363 (spdm_l4_dp != l4_dp)) { 6364 /* 6365 * Log a warning 6366 */ 6367 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6368 "consistent with the actual one for the spdm " 6369 "entry %d ", spdm_entry); 6370 goto err_exit; 6371 } 6372 6373 if (is_ipv4) { 6374 /* Upper 32 bits of spdm_line(64 bit) contains the 6375 * src IPv4 address. Lower 32 bits of spdm_line 6376 * contains the destination IPv4 address. 6377 */ 6378 u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32); 6379 u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff); 6380 6381 if ((temp_src_ip != src_ip->ipv4.addr) || 6382 (temp_dst_ip != dst_ip->ipv4.addr)) { 6383 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6384 "consistent with the actual one for the spdm " 6385 "entry %d ", spdm_entry); 6386 goto err_exit; 6387 } 6388 6389 } else { 6390 /* 6391 * SPDM line 1 & 2 contains the src IPv6 address. 6392 * SPDM line 3 & 4 contains the dst IPv6 address. 6393 */ 6394 if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) || 6395 (spdm_line_arr[2] != src_ip->ipv6.addr[1]) || 6396 (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) || 6397 (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) { 6398 6399 /* 6400 * Log a warning 6401 */ 6402 xge_debug_device(XGE_ERR, "Local SPDM table is not " 6403 "consistent with the actual one for the spdm " 6404 "entry %d ", spdm_entry); 6405 goto err_exit; 6406 } 6407 } 6408 6409 /* 6410 * Reset the entry_enable bit to zero 6411 */ 6412 spdm_line_arr[7] &= ~BIT(63); 6413 6414 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6415 spdm_line_arr[7], 6416 (void *)((char *)hldev->spdm_mem_base + 6417 (spdm_entry * 64) + (7 * 8))); 6418 6419 /* 6420 * Wait for the operation to be completed. 6421 */ 6422 if (__hal_device_register_poll(hldev, 6423 &bar0->rxpic_int_reg, 1, 6424 XGE_HAL_RX_PIC_INT_REG_SPDM_READY, 6425 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 6426 xge_os_spin_unlock(&hldev->spdm_lock); 6427 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 6428 } 6429 6430 /* 6431 * Make the corresponding spdm entry in the local SPDM table 6432 * available for future use. 6433 */ 6434 hldev->spdm_table[spdm_entry]->in_use = 0; 6435 xge_os_spin_unlock(&hldev->spdm_lock); 6436 6437 return XGE_HAL_OK; 6438 6439err_exit: 6440 xge_os_spin_unlock(&hldev->spdm_lock); 6441 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; 6442} 6443 6444/* 6445 * __hal_device_rti_set 6446 * @ring: The post_qid of the ring. 6447 * @channel: HAL channel of the ring. 6448 * 6449 * This function stores the RTI value associated for the MSI and 6450 * also unmasks this particular RTI in the rti_mask register. 6451 */ 6452static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel) 6453{ 6454 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6455 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6456 u64 val64; 6457 6458 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || 6459 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) 6460 channel->rti = (u8)ring_qid; 6461 6462 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6463 &bar0->rx_traffic_mask); 6464 val64 &= ~BIT(ring_qid); 6465 xge_os_pio_mem_write64(hldev->pdev, 6466 hldev->regh0, val64, 6467 &bar0->rx_traffic_mask); 6468} 6469 6470/* 6471 * __hal_device_tti_set 6472 * @ring: The post_qid of the FIFO. 6473 * @channel: HAL channel the FIFO. 6474 * 6475 * This function stores the TTI value associated for the MSI and 6476 * also unmasks this particular TTI in the tti_mask register. 6477 */ 6478static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel) 6479{ 6480 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6481 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6482 u64 val64; 6483 6484 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || 6485 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) 6486 channel->tti = (u8)fifo_qid; 6487 6488 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6489 &bar0->tx_traffic_mask); 6490 val64 &= ~BIT(fifo_qid); 6491 xge_os_pio_mem_write64(hldev->pdev, 6492 hldev->regh0, val64, 6493 &bar0->tx_traffic_mask); 6494} 6495 6496/** 6497 * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a 6498 * FIFO for a given MSI. 6499 * @channelh: HAL channel handle. 6500 * @msi: MSI Number associated with the channel. 6501 * @msi_msg: The MSI message associated with the MSI number above. 6502 * 6503 * This API will associate a given channel (either Ring or FIFO) with the 6504 * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the 6505 * hardware to indicate this association to the hardware. 6506 */ 6507xge_hal_status_e 6508xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg) 6509{ 6510 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 6511 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6512 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6513 u64 val64; 6514 6515 channel->msi_msg = msi_msg; 6516 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 6517 int ring = channel->post_qid; 6518 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d," 6519 " MSI: %d", channel->msi_msg, ring, msi); 6520 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6521 &bar0->rx_mat); 6522 val64 |= XGE_HAL_SET_RX_MAT(ring, msi); 6523 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6524 &bar0->rx_mat); 6525 __hal_device_rti_set(ring, channel); 6526 } else { 6527 int fifo = channel->post_qid; 6528 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d," 6529 " MSI: %d", channel->msi_msg, fifo, msi); 6530 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6531 &bar0->tx_mat[0]); 6532 val64 |= XGE_HAL_SET_TX_MAT(fifo, msi); 6533 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6534 &bar0->tx_mat[0]); 6535 __hal_device_tti_set(fifo, channel); 6536 } 6537 6538 return XGE_HAL_OK; 6539} 6540 6541/** 6542 * xge_hal_mask_msix - Begin IRQ processing. 6543 * @hldev: HAL device handle. 6544 * @msi_id: MSI ID 6545 * 6546 * The function masks the msix interrupt for the given msi_id 6547 * 6548 * Note: 6549 * 6550 * Returns: 0, 6551 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range 6552 * status. 6553 * See also: 6554 */ 6555xge_hal_status_e 6556xge_hal_mask_msix(xge_hal_device_h devh, int msi_id) 6557{ 6558 xge_hal_status_e status = XGE_HAL_OK; 6559 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6560 u32 *bar2 = (u32 *)hldev->bar2; 6561 u32 val32; 6562 6563 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES); 6564 6565 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]); 6566 val32 |= 1; 6567 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]); 6568 return status; 6569} 6570 6571/** 6572 * xge_hal_mask_msix - Begin IRQ processing. 6573 * @hldev: HAL device handle. 6574 * @msi_id: MSI ID 6575 * 6576 * The function masks the msix interrupt for the given msi_id 6577 * 6578 * Note: 6579 * 6580 * Returns: 0, 6581 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range 6582 * status. 6583 * See also: 6584 */ 6585xge_hal_status_e 6586xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id) 6587{ 6588 xge_hal_status_e status = XGE_HAL_OK; 6589 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 6590 u32 *bar2 = (u32 *)hldev->bar2; 6591 u32 val32; 6592 6593 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES); 6594 6595 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]); 6596 val32 &= ~1; 6597 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]); 6598 return status; 6599} 6600 6601/* 6602 * __hal_set_msix_vals 6603 * @devh: HAL device handle. 6604 * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address. 6605 * Filled in by this function. 6606 * @msix_address: 32bit MSI-X DMA address. 6607 * Filled in by this function. 6608 * @msix_idx: index that corresponds to the (@msix_value, @msix_address) 6609 * entry in the table of MSI-X (value, address) pairs. 6610 * 6611 * This function will program the hardware associating the given 6612 * address/value cobination to the specified msi number. 6613 */ 6614static void __hal_set_msix_vals (xge_hal_device_h devh, 6615 u32 *msix_value, 6616 u64 *msix_addr, 6617 int msix_idx) 6618{ 6619 int cnt = 0; 6620 6621 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 6622 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6623 u64 val64; 6624 6625 val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE; 6626 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, 6627 (u32)(val64 >> 32), &bar0->xmsi_access); 6628 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, 6629 (u32)(val64), &bar0->xmsi_access); 6630 do { 6631 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6632 &bar0->xmsi_access); 6633 if (val64 & XGE_HAL_XMSI_STROBE) 6634 break; 6635 cnt++; 6636 xge_os_mdelay(20); 6637 } while(cnt < 5); 6638 *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6639 &bar0->xmsi_data)); 6640 *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6641 &bar0->xmsi_address); 6642} 6643 6644/** 6645 * xge_hal_channel_msix_set - Associate MSI-X with a channel. 6646 * @channelh: HAL channel handle. 6647 * @msix_idx: index that corresponds to a particular (@msix_value, 6648 * @msix_address) entry in the MSI-X table. 6649 * 6650 * This API associates a given channel (either Ring or FIFO) with the 6651 * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables 6652 * to indicate this association. 6653 */ 6654xge_hal_status_e 6655xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx) 6656{ 6657 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; 6658 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; 6659 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 6660 u64 val64; 6661 6662 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { 6663 /* Currently Ring and RTI is one on one. */ 6664 int ring = channel->post_qid; 6665 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6666 &bar0->rx_mat); 6667 val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx); 6668 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6669 &bar0->rx_mat); 6670 __hal_device_rti_set(ring, channel); 6671 hldev->config.fifo.queue[channel->post_qid].intr_vector = 6672 msix_idx; 6673 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { 6674 int fifo = channel->post_qid; 6675 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6676 &bar0->tx_mat[0]); 6677 val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx); 6678 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6679 &bar0->tx_mat[0]); 6680 __hal_device_tti_set(fifo, channel); 6681 hldev->config.ring.queue[channel->post_qid].intr_vector = 6682 msix_idx; 6683 } 6684 channel->msix_idx = msix_idx; 6685 __hal_set_msix_vals(hldev, &channel->msix_data, 6686 &channel->msix_address, 6687 channel->msix_idx); 6688 6689 return XGE_HAL_OK; 6690} 6691 6692#if defined(XGE_HAL_CONFIG_LRO) 6693/** 6694 * xge_hal_lro_terminate - Terminate lro resources. 6695 * @lro_scale: Amount of lro memory. 6696 * @hldev: Hal device structure. 6697 * 6698 */ 6699void 6700xge_hal_lro_terminate(u32 lro_scale, 6701 xge_hal_device_t *hldev) 6702{ 6703} 6704 6705/** 6706 * xge_hal_lro_init - Initiate lro resources. 6707 * @lro_scale: Amount of lro memory. 6708 * @hldev: Hal device structure. 6709 * Note: For time being I am using only one LRO per device. Later on size 6710 * will be increased. 6711 */ 6712 6713xge_hal_status_e 6714xge_hal_lro_init(u32 lro_scale, 6715 xge_hal_device_t *hldev) 6716{ 6717 int i; 6718 6719 if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE) 6720 hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE; 6721 6722 if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE) 6723 hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN; 6724 6725 for (i=0; i < XGE_HAL_MAX_RING_NUM; i++) 6726 { 6727 xge_os_memzero(hldev->lro_desc[i].lro_pool, 6728 sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS); 6729 6730 hldev->lro_desc[i].lro_next_idx = 0; 6731 hldev->lro_desc[i].lro_recent = NULL; 6732 } 6733 6734 return XGE_HAL_OK; 6735} 6736#endif 6737 6738 6739/** 6740 * xge_hal_device_poll - HAL device "polling" entry point. 6741 * @devh: HAL device. 6742 * 6743 * HAL "polling" entry point. Note that this is part of HAL public API. 6744 * Upper-Layer driver _must_ periodically poll HAL via 6745 * xge_hal_device_poll(). 6746 * 6747 * HAL uses caller's execution context to serially process accumulated 6748 * slow-path events, such as link state changes and hardware error 6749 * indications. 6750 * 6751 * The rate of polling could be somewhere between 500us to 10ms, 6752 * depending on requirements (e.g., the requirement to support fail-over 6753 * could mean that 500us or even 100us polling interval need to be used). 6754 * 6755 * The need and motivation for external polling includes 6756 * 6757 * - remove the error-checking "burden" from the HAL interrupt handler 6758 * (see xge_hal_device_handle_irq()); 6759 * 6760 * - remove the potential source of portability issues by _not_ 6761 * implementing separate polling thread within HAL itself. 6762 * 6763 * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}. 6764 * Usage: See ex_slow_path{}. 6765 */ 6766void 6767xge_hal_device_poll(xge_hal_device_h devh) 6768{ 6769 unsigned char item_buf[sizeof(xge_queue_item_t) + 6770 XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; 6771 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; 6772 xge_queue_status_e qstatus; 6773 xge_hal_status_e hstatus; 6774 int i = 0; 6775 int queue_has_critical_event = 0; 6776 xge_hal_device_t *hldev = (xge_hal_device_t*)devh; 6777 6778 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) + 6779 XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); 6780 6781_again: 6782 if (!hldev->is_initialized || 6783 hldev->terminating || 6784 hldev->magic != XGE_HAL_MAGIC) 6785 return; 6786 6787 if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000) 6788 { 6789 /* 6790 * Wait for an Hour 6791 */ 6792 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++; 6793 } else { 6794 /* 6795 * Logging Error messages in the excess temperature, 6796 * Bias current, laser ouput for three cycle 6797 */ 6798 __hal_updt_stats_xpak(hldev); 6799 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0; 6800 } 6801 6802 if (!queue_has_critical_event) 6803 queue_has_critical_event = 6804 __queue_get_reset_critical(hldev->queueh); 6805 6806 hldev->in_poll = 1; 6807 while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) { 6808 6809 qstatus = xge_queue_consume(hldev->queueh, 6810 XGE_DEFAULT_EVENT_MAX_DATA_SIZE, 6811 item); 6812 if (qstatus == XGE_QUEUE_IS_EMPTY) 6813 break; 6814 6815 xge_debug_queue(XGE_TRACE, 6816 "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x" 6817 XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type, 6818 (u64)(ulong_t)item->context); 6819 6820 if (!hldev->is_initialized || 6821 hldev->magic != XGE_HAL_MAGIC) { 6822 hldev->in_poll = 0; 6823 return; 6824 } 6825 6826 switch (item->event_type) { 6827 case XGE_HAL_EVENT_LINK_IS_UP: { 6828 if (!queue_has_critical_event && 6829 g_xge_hal_driver->uld_callbacks.link_up) { 6830 g_xge_hal_driver->uld_callbacks.link_up( 6831 hldev->upper_layer_info); 6832 hldev->link_state = XGE_HAL_LINK_UP; 6833 } 6834 } break; 6835 case XGE_HAL_EVENT_LINK_IS_DOWN: { 6836 if (!queue_has_critical_event && 6837 g_xge_hal_driver->uld_callbacks.link_down) { 6838 g_xge_hal_driver->uld_callbacks.link_down( 6839 hldev->upper_layer_info); 6840 hldev->link_state = XGE_HAL_LINK_DOWN; 6841 } 6842 } break; 6843 case XGE_HAL_EVENT_SERR: 6844 case XGE_HAL_EVENT_ECCERR: 6845 case XGE_HAL_EVENT_PARITYERR: 6846 case XGE_HAL_EVENT_TARGETABORT: 6847 case XGE_HAL_EVENT_SLOT_FREEZE: { 6848 void *item_data = xge_queue_item_data(item); 6849 xge_hal_event_e event_type = item->event_type; 6850 u64 val64 = *((u64*)item_data); 6851 6852 if (event_type != XGE_HAL_EVENT_SLOT_FREEZE) 6853 if (xge_hal_device_is_slot_freeze(hldev)) 6854 event_type = XGE_HAL_EVENT_SLOT_FREEZE; 6855 if (g_xge_hal_driver->uld_callbacks.crit_err) { 6856 g_xge_hal_driver->uld_callbacks.crit_err( 6857 hldev->upper_layer_info, 6858 event_type, 6859 val64); 6860 /* handle one critical event per poll cycle */ 6861 hldev->in_poll = 0; 6862 return; 6863 } 6864 } break; 6865 default: { 6866 xge_debug_queue(XGE_TRACE, 6867 "got non-HAL event %d", 6868 item->event_type); 6869 } break; 6870 } 6871 6872 /* broadcast this event */ 6873 if (g_xge_hal_driver->uld_callbacks.event) 6874 g_xge_hal_driver->uld_callbacks.event(item); 6875 } 6876 6877 if (g_xge_hal_driver->uld_callbacks.before_device_poll) { 6878 if (g_xge_hal_driver->uld_callbacks.before_device_poll( 6879 hldev) != 0) { 6880 hldev->in_poll = 0; 6881 return; 6882 } 6883 } 6884 6885 hstatus = __hal_device_poll(hldev); 6886 if (g_xge_hal_driver->uld_callbacks.after_device_poll) 6887 g_xge_hal_driver->uld_callbacks.after_device_poll(hldev); 6888 6889 /* 6890 * handle critical error right away: 6891 * - walk the device queue again 6892 * - drop non-critical events, if any 6893 * - look for the 1st critical 6894 */ 6895 if (hstatus == XGE_HAL_ERR_CRITICAL) { 6896 queue_has_critical_event = 1; 6897 goto _again; 6898 } 6899 6900 hldev->in_poll = 0; 6901} 6902 6903/** 6904 * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing. 6905 * @hldev: HAL device handle. 6906 * 6907 * This function is used to set the adapter to enhanced mode. 6908 * 6909 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 6910 */ 6911void 6912xge_hal_rts_rth_init(xge_hal_device_t *hldev) 6913{ 6914 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6915 u64 val64; 6916 6917 /* 6918 * Set the receive traffic steering mode from default(classic) 6919 * to enhanced. 6920 */ 6921 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6922 &bar0->rts_ctrl); 6923 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; 6924 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6925 val64, &bar0->rts_ctrl); 6926} 6927 6928/** 6929 * xge_hal_rts_rth_clr - Clear RTS hashing. 6930 * @hldev: HAL device handle. 6931 * 6932 * This function is used to clear all RTS hashing related stuff. 6933 * It brings the adapter out from enhanced mode to classic mode. 6934 * It also clears RTS_RTH_CFG register i.e clears hash type, function etc. 6935 * 6936 * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set(). 6937 */ 6938void 6939xge_hal_rts_rth_clr(xge_hal_device_t *hldev) 6940{ 6941 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6942 u64 val64; 6943 6944 /* 6945 * Set the receive traffic steering mode from default(classic) 6946 * to enhanced. 6947 */ 6948 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 6949 &bar0->rts_ctrl); 6950 val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE; 6951 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 6952 val64, &bar0->rts_ctrl); 6953 val64 = 0; 6954 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6955 &bar0->rts_rth_cfg); 6956} 6957 6958/** 6959 * xge_hal_rts_rth_set - Set/configure RTS hashing. 6960 * @hldev: HAL device handle. 6961 * @def_q: default queue 6962 * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc. 6963 * @bucket_size: no of least significant bits to be used for hashing. 6964 * 6965 * Used to set/configure all RTS hashing related stuff. 6966 * - set the steering mode to enhanced. 6967 * - set hash function i.e algo selection. 6968 * - set the default queue. 6969 * 6970 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(). 6971 */ 6972void 6973xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type, 6974 u16 bucket_size) 6975{ 6976 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 6977 u64 val64; 6978 6979 val64 = XGE_HAL_RTS_DEFAULT_Q(def_q); 6980 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6981 &bar0->rts_default_q); 6982 6983 val64 = hash_type; 6984 val64 |= XGE_HAL_RTS_RTH_EN; 6985 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size); 6986 val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS; 6987 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 6988 &bar0->rts_rth_cfg); 6989} 6990 6991/** 6992 * xge_hal_rts_rth_start - Start RTS hashing. 6993 * @hldev: HAL device handle. 6994 * 6995 * Used to Start RTS hashing . 6996 * 6997 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. 6998 */ 6999void 7000xge_hal_rts_rth_start(xge_hal_device_t *hldev) 7001{ 7002 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7003 u64 val64; 7004 7005 7006 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7007 &bar0->rts_rth_cfg); 7008 val64 |= XGE_HAL_RTS_RTH_EN; 7009 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7010 &bar0->rts_rth_cfg); 7011} 7012 7013/** 7014 * xge_hal_rts_rth_stop - Stop the RTS hashing. 7015 * @hldev: HAL device handle. 7016 * 7017 * Used to Staop RTS hashing . 7018 * 7019 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. 7020 */ 7021void 7022xge_hal_rts_rth_stop(xge_hal_device_t *hldev) 7023{ 7024 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7025 u64 val64; 7026 7027 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7028 &bar0->rts_rth_cfg); 7029 val64 &= ~XGE_HAL_RTS_RTH_EN; 7030 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7031 &bar0->rts_rth_cfg); 7032} 7033 7034/** 7035 * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT). 7036 * @hldev: HAL device handle. 7037 * @itable: Pointer to the indirection table 7038 * @itable_size: no of least significant bits to be used for hashing 7039 * 7040 * Used to set/configure indirection table. 7041 * It enables the required no of entries in the IT. 7042 * It adds entries to the IT. 7043 * 7044 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 7045 */ 7046xge_hal_status_e 7047xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size) 7048{ 7049 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7050 u64 val64; 7051 u32 idx; 7052 7053 for (idx = 0; idx < itable_size; idx++) { 7054 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | 7055 XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]); 7056 7057 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7058 &bar0->rts_rth_map_mem_data); 7059 7060 /* execute */ 7061 val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | 7062 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | 7063 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx)); 7064 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7065 &bar0->rts_rth_map_mem_ctrl); 7066 7067 /* poll until done */ 7068 if (__hal_device_register_poll(hldev, 7069 &bar0->rts_rth_map_mem_ctrl, 0, 7070 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, 7071 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { 7072 /* upper layer may require to repeat */ 7073 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 7074 } 7075 } 7076 7077 return XGE_HAL_OK; 7078} 7079 7080 7081/** 7082 * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc. 7083 * 7084 * @hldev: HAL device handle. 7085 * @KeySize: Number of 64-bit words 7086 * @Key: upto 40-byte array of 8-bit values 7087 * This function configures the 40-byte secret which is used for hash 7088 * calculation. 7089 * 7090 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). 7091 */ 7092void 7093xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key) 7094{ 7095 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0; 7096 u64 val64; 7097 u32 entry, nreg, i; 7098 7099 entry = 0; 7100 nreg = 0; 7101 7102 while( KeySize ) { 7103 val64 = 0; 7104 for ( i = 0; i < 8 ; i++) { 7105 /* Prepare 64-bit word for 'nreg' containing 8 keys. */ 7106 if (i) 7107 val64 <<= 8; 7108 val64 |= Key[entry++]; 7109 } 7110 7111 KeySize--; 7112 7113 /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/ 7114 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7115 &bar0->rts_rth_hash_mask[nreg++]); 7116 } 7117 7118 while( nreg < 5 ) { 7119 /* Clear the rest if key is less than 40 bytes */ 7120 val64 = 0; 7121 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7122 &bar0->rts_rth_hash_mask[nreg++]); 7123 } 7124} 7125 7126 7127/** 7128 * xge_hal_device_is_closed - Device is closed 7129 * 7130 * @devh: HAL device handle. 7131 */ 7132int 7133xge_hal_device_is_closed(xge_hal_device_h devh) 7134{ 7135 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 7136 7137 if (xge_list_is_empty(&hldev->fifo_channels) && 7138 xge_list_is_empty(&hldev->ring_channels)) 7139 return 1; 7140 7141 return 0; 7142} 7143 7144xge_hal_status_e 7145xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index) 7146{ 7147 u64 val64; 7148 int section; 7149 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; 7150 7151 xge_hal_device_t *hldev = (xge_hal_device_t *)devh; 7152 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; 7153 7154 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) 7155 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; 7156 7157 if ( index >= max_addr ) 7158 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; 7159 7160 /* 7161 * Calculate the section value 7162 */ 7163 section = index / 32; 7164 7165 xge_debug_device(XGE_TRACE, "the Section value is %d ", section); 7166 7167 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, 7168 &bar0->rts_mac_cfg); 7169 switch(section) 7170 { 7171 case 0: 7172 val64 |= XGE_HAL_RTS_MAC_SECT0_EN; 7173 break; 7174 case 1: 7175 val64 |= XGE_HAL_RTS_MAC_SECT1_EN; 7176 break; 7177 case 2: 7178 val64 |= XGE_HAL_RTS_MAC_SECT2_EN; 7179 break; 7180 case 3: 7181 val64 |= XGE_HAL_RTS_MAC_SECT3_EN; 7182 break; 7183 case 4: 7184 val64 |= XGE_HAL_RTS_MAC_SECT4_EN; 7185 break; 7186 case 5: 7187 val64 |= XGE_HAL_RTS_MAC_SECT5_EN; 7188 break; 7189 case 6: 7190 val64 |= XGE_HAL_RTS_MAC_SECT6_EN; 7191 break; 7192 case 7: 7193 val64 |= XGE_HAL_RTS_MAC_SECT7_EN; 7194 break; 7195 default: 7196 xge_debug_device(XGE_ERR, "Invalid Section value %d " 7197 , section); 7198 } 7199 7200 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 7201 val64, &bar0->rts_mac_cfg); 7202 return XGE_HAL_OK; 7203} 7204 7205 7206/** 7207 * xge_hal_fix_rldram_ecc_error 7208 * @hldev: private member of the device structure. 7209 * 7210 * SXE-02-010. This function will turn OFF the ECC error reporting for the 7211 * interface bet'n external Micron RLDRAM II device and memory controller. 7212 * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U 7213 * fields of MC_ERR_REG register. Issue reported by HP-Unix folks during the 7214 * qualification of Herc. 7215 */ 7216xge_hal_status_e 7217xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev) 7218{ 7219 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; 7220 u64 val64; 7221 7222 // Enter Test Mode. 7223 val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; 7224 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7225 &bar0->mc_rldram_test_ctrl); 7226 7227 // Enable fg/bg tests. 7228 val64 = 0x0100000000000000ULL; 7229 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7230 &bar0->mc_driver); 7231 7232 // Enable RLDRAM configuration. 7233 val64 = 0x0000000000017B00ULL; 7234 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7235 &bar0->mc_rldram_mrs); 7236 7237 // Enable RLDRAM queues. 7238 val64 = 0x0000000001017B00ULL; 7239 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7240 &bar0->mc_rldram_mrs); 7241 7242 // Setup test ranges 7243 val64 = 0x00000000001E0100ULL; 7244 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7245 &bar0->mc_rldram_test_add); 7246 7247 val64 = 0x00000100001F0100ULL; 7248 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7249 &bar0->mc_rldram_test_add_bkg); 7250 // Start Reads. 7251 val64 = 0x0001000000010000ULL; 7252 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7253 &bar0->mc_rldram_test_ctrl); 7254 7255 if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1, 7256 XGE_HAL_MC_RLDRAM_TEST_DONE, 7257 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK){ 7258 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; 7259 } 7260 7261 // Exit test mode 7262 val64 = 0x0000000000000000ULL; 7263 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, 7264 &bar0->mc_rldram_test_ctrl); 7265 7266 return XGE_HAL_OK; 7267} 7268