1/**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2009 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11#include <linux/bitops.h> 12#include <linux/delay.h> 13#include <linux/pci.h> 14#include <linux/module.h> 15#include <linux/seq_file.h> 16#include "net_driver.h" 17#include "bitfield.h" 18#include "efx.h" 19#include "nic.h" 20#include "regs.h" 21#include "io.h" 22#include "workarounds.h" 23 24/************************************************************************** 25 * 26 * Configurable values 27 * 28 ************************************************************************** 29 */ 30 31/* This is set to 16 for a good reason. In summary, if larger than 32 * 16, the descriptor cache holds more than a default socket 33 * buffer's worth of packets (for UDP we can only have at most one 34 * socket buffer's worth outstanding). This combined with the fact 35 * that we only get 1 TX event per descriptor cache means the NIC 36 * goes idle. 37 */ 38#define TX_DC_ENTRIES 16 39#define TX_DC_ENTRIES_ORDER 1 40 41#define RX_DC_ENTRIES 64 42#define RX_DC_ENTRIES_ORDER 3 43 44/* RX FIFO XOFF watermark 45 * 46 * When the amount of the RX FIFO increases used increases past this 47 * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) 48 * This also has an effect on RX/TX arbitration 49 */ 50int efx_nic_rx_xoff_thresh = -1; 51module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644); 52MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); 53 54/* RX FIFO XON watermark 55 * 56 * When the amount of the RX FIFO used decreases below this 57 * watermark send XON. Only used if TX flow control is enabled (ethtool -A) 58 * This also has an effect on RX/TX arbitration 59 */ 60int efx_nic_rx_xon_thresh = -1; 61module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644); 62MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); 63 64/* If EFX_MAX_INT_ERRORS internal errors occur within 65 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 66 * disable it. 67 */ 68#define EFX_INT_ERROR_EXPIRE 3600 69#define EFX_MAX_INT_ERRORS 5 70 71/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times 72 */ 73#define EFX_FLUSH_INTERVAL 10 74#define EFX_FLUSH_POLL_COUNT 100 75 76/* Size and alignment of special buffers (4KB) */ 77#define EFX_BUF_SIZE 4096 78 79/* Depth of RX flush request fifo */ 80#define EFX_RX_FLUSH_COUNT 4 81 82/* Generated event code for efx_generate_test_event() */ 83#define EFX_CHANNEL_MAGIC_TEST(_channel) \ 84 (0x00010100 + (_channel)->channel) 85 86/* Generated event code for efx_generate_fill_event() */ 87#define EFX_CHANNEL_MAGIC_FILL(_channel) \ 88 (0x00010200 + (_channel)->channel) 89 90/************************************************************************** 91 * 92 * Solarstorm hardware access 93 * 94 **************************************************************************/ 95 96static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 97 unsigned int index) 98{ 99 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 100 value, index); 101} 102 103/* Read the current event from the event queue */ 104static inline efx_qword_t *efx_event(struct efx_channel *channel, 105 unsigned int index) 106{ 107 return (((efx_qword_t *) (channel->eventq.addr)) + index); 108} 109 110/* See if an event is present 111 * 112 * We check both the high and low dword of the event for all ones. We 113 * wrote all ones when we cleared the event, and no valid event can 114 * have all ones in either its high or low dwords. This approach is 115 * robust against reordering. 116 * 117 * Note that using a single 64-bit comparison is incorrect; even 118 * though the CPU read will be atomic, the DMA write may not be. 119 */ 120static inline int efx_event_present(efx_qword_t *event) 121{ 122 return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 123 EFX_DWORD_IS_ALL_ONES(event->dword[1]))); 124} 125 126static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 127 const efx_oword_t *mask) 128{ 129 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 130 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 131} 132 133int efx_nic_test_registers(struct efx_nic *efx, 134 const struct efx_nic_register_test *regs, 135 size_t n_regs) 136{ 137 unsigned address = 0, i, j; 138 efx_oword_t mask, imask, original, reg, buf; 139 140 /* Falcon should be in loopback to isolate the XMAC from the PHY */ 141 WARN_ON(!LOOPBACK_INTERNAL(efx)); 142 143 for (i = 0; i < n_regs; ++i) { 144 address = regs[i].address; 145 mask = imask = regs[i].mask; 146 EFX_INVERT_OWORD(imask); 147 148 efx_reado(efx, &original, address); 149 150 /* bit sweep on and off */ 151 for (j = 0; j < 128; j++) { 152 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 153 continue; 154 155 /* Test this testable bit can be set in isolation */ 156 EFX_AND_OWORD(reg, original, mask); 157 EFX_SET_OWORD32(reg, j, j, 1); 158 159 efx_writeo(efx, ®, address); 160 efx_reado(efx, &buf, address); 161 162 if (efx_masked_compare_oword(®, &buf, &mask)) 163 goto fail; 164 165 /* Test this testable bit can be cleared in isolation */ 166 EFX_OR_OWORD(reg, original, mask); 167 EFX_SET_OWORD32(reg, j, j, 0); 168 169 efx_writeo(efx, ®, address); 170 efx_reado(efx, &buf, address); 171 172 if (efx_masked_compare_oword(®, &buf, &mask)) 173 goto fail; 174 } 175 176 efx_writeo(efx, &original, address); 177 } 178 179 return 0; 180 181fail: 182 netif_err(efx, hw, efx->net_dev, 183 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 184 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 185 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 186 return -EIO; 187} 188 189/************************************************************************** 190 * 191 * Special buffer handling 192 * Special buffers are used for event queues and the TX and RX 193 * descriptor rings. 194 * 195 *************************************************************************/ 196 197/* 198 * Initialise a special buffer 199 * 200 * This will define a buffer (previously allocated via 201 * efx_alloc_special_buffer()) in the buffer table, allowing 202 * it to be used for event queues, descriptor rings etc. 203 */ 204static void 205efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 206{ 207 efx_qword_t buf_desc; 208 int index; 209 dma_addr_t dma_addr; 210 int i; 211 212 EFX_BUG_ON_PARANOID(!buffer->addr); 213 214 /* Write buffer descriptors to NIC */ 215 for (i = 0; i < buffer->entries; i++) { 216 index = buffer->index + i; 217 dma_addr = buffer->dma_addr + (i * 4096); 218 netif_dbg(efx, probe, efx->net_dev, 219 "mapping special buffer %d at %llx\n", 220 index, (unsigned long long)dma_addr); 221 EFX_POPULATE_QWORD_3(buf_desc, 222 FRF_AZ_BUF_ADR_REGION, 0, 223 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 224 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 225 efx_write_buf_tbl(efx, &buf_desc, index); 226 } 227} 228 229/* Unmaps a buffer and clears the buffer table entries */ 230static void 231efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 232{ 233 efx_oword_t buf_tbl_upd; 234 unsigned int start = buffer->index; 235 unsigned int end = (buffer->index + buffer->entries - 1); 236 237 if (!buffer->entries) 238 return; 239 240 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 241 buffer->index, buffer->index + buffer->entries - 1); 242 243 EFX_POPULATE_OWORD_4(buf_tbl_upd, 244 FRF_AZ_BUF_UPD_CMD, 0, 245 FRF_AZ_BUF_CLR_CMD, 1, 246 FRF_AZ_BUF_CLR_END_ID, end, 247 FRF_AZ_BUF_CLR_START_ID, start); 248 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 249} 250 251/* 252 * Allocate a new special buffer 253 * 254 * This allocates memory for a new buffer, clears it and allocates a 255 * new buffer ID range. It does not write into the buffer table. 256 * 257 * This call will allocate 4KB buffers, since 8KB buffers can't be 258 * used for event queues and descriptor rings. 259 */ 260static int efx_alloc_special_buffer(struct efx_nic *efx, 261 struct efx_special_buffer *buffer, 262 unsigned int len) 263{ 264 len = ALIGN(len, EFX_BUF_SIZE); 265 266 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 267 &buffer->dma_addr); 268 if (!buffer->addr) 269 return -ENOMEM; 270 buffer->len = len; 271 buffer->entries = len / EFX_BUF_SIZE; 272 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 273 274 /* All zeros is a potentially valid event so memset to 0xff */ 275 memset(buffer->addr, 0xff, len); 276 277 /* Select new buffer ID */ 278 buffer->index = efx->next_buffer_table; 279 efx->next_buffer_table += buffer->entries; 280 281 netif_dbg(efx, probe, efx->net_dev, 282 "allocating special buffers %d-%d at %llx+%x " 283 "(virt %p phys %llx)\n", buffer->index, 284 buffer->index + buffer->entries - 1, 285 (u64)buffer->dma_addr, len, 286 buffer->addr, (u64)virt_to_phys(buffer->addr)); 287 288 return 0; 289} 290 291static void 292efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 293{ 294 if (!buffer->addr) 295 return; 296 297 netif_dbg(efx, hw, efx->net_dev, 298 "deallocating special buffers %d-%d at %llx+%x " 299 "(virt %p phys %llx)\n", buffer->index, 300 buffer->index + buffer->entries - 1, 301 (u64)buffer->dma_addr, buffer->len, 302 buffer->addr, (u64)virt_to_phys(buffer->addr)); 303 304 pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, 305 buffer->dma_addr); 306 buffer->addr = NULL; 307 buffer->entries = 0; 308} 309 310/************************************************************************** 311 * 312 * Generic buffer handling 313 * These buffers are used for interrupt status and MAC stats 314 * 315 **************************************************************************/ 316 317int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 318 unsigned int len) 319{ 320 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 321 &buffer->dma_addr); 322 if (!buffer->addr) 323 return -ENOMEM; 324 buffer->len = len; 325 memset(buffer->addr, 0, len); 326 return 0; 327} 328 329void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 330{ 331 if (buffer->addr) { 332 pci_free_consistent(efx->pci_dev, buffer->len, 333 buffer->addr, buffer->dma_addr); 334 buffer->addr = NULL; 335 } 336} 337 338/************************************************************************** 339 * 340 * TX path 341 * 342 **************************************************************************/ 343 344/* Returns a pointer to the specified transmit descriptor in the TX 345 * descriptor queue belonging to the specified channel. 346 */ 347static inline efx_qword_t * 348efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 349{ 350 return (((efx_qword_t *) (tx_queue->txd.addr)) + index); 351} 352 353/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 354static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 355{ 356 unsigned write_ptr; 357 efx_dword_t reg; 358 359 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 360 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 361 efx_writed_page(tx_queue->efx, ®, 362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 363} 364 365 366/* For each entry inserted into the software descriptor ring, create a 367 * descriptor in the hardware TX descriptor ring (in host memory), and 368 * write a doorbell. 369 */ 370void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 371{ 372 373 struct efx_tx_buffer *buffer; 374 efx_qword_t *txd; 375 unsigned write_ptr; 376 377 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 378 379 do { 380 write_ptr = tx_queue->write_count & EFX_TXQ_MASK; 381 buffer = &tx_queue->buffer[write_ptr]; 382 txd = efx_tx_desc(tx_queue, write_ptr); 383 ++tx_queue->write_count; 384 385 /* Create TX descriptor ring entry */ 386 EFX_POPULATE_QWORD_4(*txd, 387 FSF_AZ_TX_KER_CONT, buffer->continuation, 388 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 389 FSF_AZ_TX_KER_BUF_REGION, 0, 390 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 391 } while (tx_queue->write_count != tx_queue->insert_count); 392 393 wmb(); /* Ensure descriptors are written before they are fetched */ 394 efx_notify_tx_desc(tx_queue); 395} 396 397/* Allocate hardware resources for a TX queue */ 398int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 399{ 400 struct efx_nic *efx = tx_queue->efx; 401 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 || 402 EFX_TXQ_SIZE & EFX_TXQ_MASK); 403 return efx_alloc_special_buffer(efx, &tx_queue->txd, 404 EFX_TXQ_SIZE * sizeof(efx_qword_t)); 405} 406 407void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 408{ 409 efx_oword_t tx_desc_ptr; 410 struct efx_nic *efx = tx_queue->efx; 411 412 tx_queue->flushed = FLUSH_NONE; 413 414 /* Pin TX descriptor ring */ 415 efx_init_special_buffer(efx, &tx_queue->txd); 416 417 /* Push TX descriptor ring to card */ 418 EFX_POPULATE_OWORD_10(tx_desc_ptr, 419 FRF_AZ_TX_DESCQ_EN, 1, 420 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 421 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 422 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 423 FRF_AZ_TX_DESCQ_EVQ_ID, 424 tx_queue->channel->channel, 425 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 426 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 427 FRF_AZ_TX_DESCQ_SIZE, 428 __ffs(tx_queue->txd.entries), 429 FRF_AZ_TX_DESCQ_TYPE, 0, 430 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 431 432 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 433 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 434 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 435 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 436 !csum); 437 } 438 439 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 440 tx_queue->queue); 441 442 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 443 efx_oword_t reg; 444 445 /* Only 128 bits in this register */ 446 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 447 448 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 449 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 450 clear_bit_le(tx_queue->queue, (void *)®); 451 else 452 set_bit_le(tx_queue->queue, (void *)®); 453 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 454 } 455} 456 457static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 458{ 459 struct efx_nic *efx = tx_queue->efx; 460 efx_oword_t tx_flush_descq; 461 462 tx_queue->flushed = FLUSH_PENDING; 463 464 /* Post a flush command */ 465 EFX_POPULATE_OWORD_2(tx_flush_descq, 466 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 467 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 468 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 469} 470 471void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 472{ 473 struct efx_nic *efx = tx_queue->efx; 474 efx_oword_t tx_desc_ptr; 475 476 /* The queue should have been flushed */ 477 WARN_ON(tx_queue->flushed != FLUSH_DONE); 478 479 /* Remove TX descriptor ring from card */ 480 EFX_ZERO_OWORD(tx_desc_ptr); 481 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 482 tx_queue->queue); 483 484 /* Unpin TX descriptor ring */ 485 efx_fini_special_buffer(efx, &tx_queue->txd); 486} 487 488/* Free buffers backing TX queue */ 489void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 490{ 491 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 492} 493 494/************************************************************************** 495 * 496 * RX path 497 * 498 **************************************************************************/ 499 500/* Returns a pointer to the specified descriptor in the RX descriptor queue */ 501static inline efx_qword_t * 502efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 503{ 504 return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); 505} 506 507/* This creates an entry in the RX descriptor queue */ 508static inline void 509efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 510{ 511 struct efx_rx_buffer *rx_buf; 512 efx_qword_t *rxd; 513 514 rxd = efx_rx_desc(rx_queue, index); 515 rx_buf = efx_rx_buffer(rx_queue, index); 516 EFX_POPULATE_QWORD_3(*rxd, 517 FSF_AZ_RX_KER_BUF_SIZE, 518 rx_buf->len - 519 rx_queue->efx->type->rx_buffer_padding, 520 FSF_AZ_RX_KER_BUF_REGION, 0, 521 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 522} 523 524/* This writes to the RX_DESC_WPTR register for the specified receive 525 * descriptor ring. 526 */ 527void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 528{ 529 efx_dword_t reg; 530 unsigned write_ptr; 531 532 while (rx_queue->notified_count != rx_queue->added_count) { 533 efx_build_rx_desc(rx_queue, 534 rx_queue->notified_count & 535 EFX_RXQ_MASK); 536 ++rx_queue->notified_count; 537 } 538 539 wmb(); 540 write_ptr = rx_queue->added_count & EFX_RXQ_MASK; 541 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 542 efx_writed_page(rx_queue->efx, ®, 543 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue); 544} 545 546int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 547{ 548 struct efx_nic *efx = rx_queue->efx; 549 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 || 550 EFX_RXQ_SIZE & EFX_RXQ_MASK); 551 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 552 EFX_RXQ_SIZE * sizeof(efx_qword_t)); 553} 554 555void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 556{ 557 efx_oword_t rx_desc_ptr; 558 struct efx_nic *efx = rx_queue->efx; 559 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 560 bool iscsi_digest_en = is_b0; 561 562 netif_dbg(efx, hw, efx->net_dev, 563 "RX queue %d ring in special buffers %d-%d\n", 564 rx_queue->queue, rx_queue->rxd.index, 565 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 566 567 rx_queue->flushed = FLUSH_NONE; 568 569 /* Pin RX descriptor ring */ 570 efx_init_special_buffer(efx, &rx_queue->rxd); 571 572 /* Push RX descriptor ring to card */ 573 EFX_POPULATE_OWORD_10(rx_desc_ptr, 574 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 575 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 576 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 577 FRF_AZ_RX_DESCQ_EVQ_ID, 578 rx_queue->channel->channel, 579 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 580 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue, 581 FRF_AZ_RX_DESCQ_SIZE, 582 __ffs(rx_queue->rxd.entries), 583 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 584 /* For >=B0 this is scatter so disable */ 585 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 586 FRF_AZ_RX_DESCQ_EN, 1); 587 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 588 rx_queue->queue); 589} 590 591static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 592{ 593 struct efx_nic *efx = rx_queue->efx; 594 efx_oword_t rx_flush_descq; 595 596 rx_queue->flushed = FLUSH_PENDING; 597 598 /* Post a flush command */ 599 EFX_POPULATE_OWORD_2(rx_flush_descq, 600 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 601 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue); 602 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 603} 604 605void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 606{ 607 efx_oword_t rx_desc_ptr; 608 struct efx_nic *efx = rx_queue->efx; 609 610 /* The queue should already have been flushed */ 611 WARN_ON(rx_queue->flushed != FLUSH_DONE); 612 613 /* Remove RX descriptor ring from card */ 614 EFX_ZERO_OWORD(rx_desc_ptr); 615 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 616 rx_queue->queue); 617 618 /* Unpin RX descriptor ring */ 619 efx_fini_special_buffer(efx, &rx_queue->rxd); 620} 621 622/* Free buffers backing RX queue */ 623void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 624{ 625 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 626} 627 628/************************************************************************** 629 * 630 * Event queue processing 631 * Event queues are processed by per-channel tasklets. 632 * 633 **************************************************************************/ 634 635/* Update a channel's event queue's read pointer (RPTR) register 636 * 637 * This writes the EVQ_RPTR_REG register for the specified channel's 638 * event queue. 639 */ 640void efx_nic_eventq_read_ack(struct efx_channel *channel) 641{ 642 efx_dword_t reg; 643 struct efx_nic *efx = channel->efx; 644 645 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); 646 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, 647 channel->channel); 648} 649 650/* Use HW to insert a SW defined event */ 651void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) 652{ 653 efx_oword_t drv_ev_reg; 654 655 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 656 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 657 drv_ev_reg.u32[0] = event->u32[0]; 658 drv_ev_reg.u32[1] = event->u32[1]; 659 drv_ev_reg.u32[2] = 0; 660 drv_ev_reg.u32[3] = 0; 661 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); 662 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); 663} 664 665/* Handle a transmit completion event 666 * 667 * The NIC batches TX completion events; the message we receive is of 668 * the form "complete all TX events up to this index". 669 */ 670static int 671efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 672{ 673 unsigned int tx_ev_desc_ptr; 674 unsigned int tx_ev_q_label; 675 struct efx_tx_queue *tx_queue; 676 struct efx_nic *efx = channel->efx; 677 int tx_packets = 0; 678 679 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 680 /* Transmit completion */ 681 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 682 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 683 tx_queue = &efx->tx_queue[tx_ev_q_label]; 684 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 685 EFX_TXQ_MASK); 686 channel->irq_mod_score += tx_packets; 687 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 688 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 689 /* Rewrite the FIFO write pointer */ 690 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 691 tx_queue = &efx->tx_queue[tx_ev_q_label]; 692 693 if (efx_dev_registered(efx)) 694 netif_tx_lock(efx->net_dev); 695 efx_notify_tx_desc(tx_queue); 696 if (efx_dev_registered(efx)) 697 netif_tx_unlock(efx->net_dev); 698 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 699 EFX_WORKAROUND_10727(efx)) { 700 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 701 } else { 702 netif_err(efx, tx_err, efx->net_dev, 703 "channel %d unexpected TX event " 704 EFX_QWORD_FMT"\n", channel->channel, 705 EFX_QWORD_VAL(*event)); 706 } 707 708 return tx_packets; 709} 710 711/* Detect errors included in the rx_evt_pkt_ok bit. */ 712static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 713 const efx_qword_t *event, 714 bool *rx_ev_pkt_ok, 715 bool *discard) 716{ 717 struct efx_nic *efx = rx_queue->efx; 718 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 719 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 720 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 721 bool rx_ev_other_err, rx_ev_pause_frm; 722 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 723 unsigned rx_ev_pkt_type; 724 725 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 726 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 727 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 728 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 729 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 730 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 731 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 732 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 733 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 734 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 735 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 736 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 737 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 738 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 739 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 740 741 /* Every error apart from tobe_disc and pause_frm */ 742 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 743 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 744 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 745 746 /* Count errors that are not in MAC stats. Ignore expected 747 * checksum errors during self-test. */ 748 if (rx_ev_frm_trunc) 749 ++rx_queue->channel->n_rx_frm_trunc; 750 else if (rx_ev_tobe_disc) 751 ++rx_queue->channel->n_rx_tobe_disc; 752 else if (!efx->loopback_selftest) { 753 if (rx_ev_ip_hdr_chksum_err) 754 ++rx_queue->channel->n_rx_ip_hdr_chksum_err; 755 else if (rx_ev_tcp_udp_chksum_err) 756 ++rx_queue->channel->n_rx_tcp_udp_chksum_err; 757 } 758 759 /* The frame must be discarded if any of these are true. */ 760 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 761 rx_ev_tobe_disc | rx_ev_pause_frm); 762 763 /* TOBE_DISC is expected on unicast mismatches; don't print out an 764 * error message. FRM_TRUNC indicates RXDP dropped the packet due 765 * to a FIFO overflow. 766 */ 767#ifdef EFX_ENABLE_DEBUG 768 if (rx_ev_other_err && net_ratelimit()) { 769 netif_dbg(efx, rx_err, efx->net_dev, 770 " RX queue %d unexpected RX event " 771 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 772 rx_queue->queue, EFX_QWORD_VAL(*event), 773 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 774 rx_ev_ip_hdr_chksum_err ? 775 " [IP_HDR_CHKSUM_ERR]" : "", 776 rx_ev_tcp_udp_chksum_err ? 777 " [TCP_UDP_CHKSUM_ERR]" : "", 778 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 779 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 780 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 781 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 782 rx_ev_pause_frm ? " [PAUSE]" : ""); 783 } 784#endif 785} 786 787/* Handle receive events that are not in-order. */ 788static void 789efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 790{ 791 struct efx_nic *efx = rx_queue->efx; 792 unsigned expected, dropped; 793 794 expected = rx_queue->removed_count & EFX_RXQ_MASK; 795 dropped = (index - expected) & EFX_RXQ_MASK; 796 netif_info(efx, rx_err, efx->net_dev, 797 "dropped %d events (index=%d expected=%d)\n", 798 dropped, index, expected); 799 800 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 801 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 802} 803 804/* Handle a packet received event 805 * 806 * The NIC gives a "discard" flag if it's a unicast packet with the 807 * wrong destination address 808 * Also "is multicast" and "matches multicast filter" flags can be used to 809 * discard non-matching multicast packets. 810 */ 811static void 812efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 813{ 814 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 815 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 816 unsigned expected_ptr; 817 bool rx_ev_pkt_ok, discard = false, checksummed; 818 struct efx_rx_queue *rx_queue; 819 struct efx_nic *efx = channel->efx; 820 821 /* Basic packet information */ 822 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 823 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 824 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 825 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 826 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 827 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 828 channel->channel); 829 830 rx_queue = &efx->rx_queue[channel->channel]; 831 832 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 833 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; 834 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 835 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 836 837 if (likely(rx_ev_pkt_ok)) { 838 /* If packet is marked as OK and packet type is TCP/IP or 839 * UDP/IP, then we can rely on the hardware checksum. 840 */ 841 checksummed = 842 likely(efx->rx_checksum_enabled) && 843 (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 844 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP); 845 } else { 846 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); 847 checksummed = false; 848 } 849 850 /* Detect multicast packets that didn't match the filter */ 851 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 852 if (rx_ev_mcast_pkt) { 853 unsigned int rx_ev_mcast_hash_match = 854 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 855 856 if (unlikely(!rx_ev_mcast_hash_match)) { 857 ++channel->n_rx_mcast_mismatch; 858 discard = true; 859 } 860 } 861 862 channel->irq_mod_score += 2; 863 864 /* Handle received packet */ 865 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, 866 checksummed, discard); 867} 868 869static void 870efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 871{ 872 struct efx_nic *efx = channel->efx; 873 unsigned code; 874 875 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 876 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 877 ++channel->magic_count; 878 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 879 /* The queue must be empty, so we won't receive any rx 880 * events, so efx_process_channel() won't refill the 881 * queue. Refill it here */ 882 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 883 else 884 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 885 "generated event "EFX_QWORD_FMT"\n", 886 channel->channel, EFX_QWORD_VAL(*event)); 887} 888 889/* Global events are basically PHY events */ 890static void 891efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) 892{ 893 struct efx_nic *efx = channel->efx; 894 bool handled = false; 895 896 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || 897 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || 898 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) { 899 /* Ignored */ 900 handled = true; 901 } 902 903 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) && 904 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { 905 efx->xmac_poll_required = true; 906 handled = true; 907 } 908 909 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 910 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : 911 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { 912 netif_err(efx, rx_err, efx->net_dev, 913 "channel %d seen global RX_RESET event. Resetting.\n", 914 channel->channel); 915 916 atomic_inc(&efx->rx_reset); 917 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? 918 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 919 handled = true; 920 } 921 922 if (!handled) 923 netif_err(efx, hw, efx->net_dev, 924 "channel %d unknown global event " 925 EFX_QWORD_FMT "\n", channel->channel, 926 EFX_QWORD_VAL(*event)); 927} 928 929static void 930efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 931{ 932 struct efx_nic *efx = channel->efx; 933 unsigned int ev_sub_code; 934 unsigned int ev_sub_data; 935 936 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 937 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 938 939 switch (ev_sub_code) { 940 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 941 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 942 channel->channel, ev_sub_data); 943 break; 944 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 945 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 946 channel->channel, ev_sub_data); 947 break; 948 case FSE_AZ_EVQ_INIT_DONE_EV: 949 netif_dbg(efx, hw, efx->net_dev, 950 "channel %d EVQ %d initialised\n", 951 channel->channel, ev_sub_data); 952 break; 953 case FSE_AZ_SRM_UPD_DONE_EV: 954 netif_vdbg(efx, hw, efx->net_dev, 955 "channel %d SRAM update done\n", channel->channel); 956 break; 957 case FSE_AZ_WAKE_UP_EV: 958 netif_vdbg(efx, hw, efx->net_dev, 959 "channel %d RXQ %d wakeup event\n", 960 channel->channel, ev_sub_data); 961 break; 962 case FSE_AZ_TIMER_EV: 963 netif_vdbg(efx, hw, efx->net_dev, 964 "channel %d RX queue %d timer expired\n", 965 channel->channel, ev_sub_data); 966 break; 967 case FSE_AA_RX_RECOVER_EV: 968 netif_err(efx, rx_err, efx->net_dev, 969 "channel %d seen DRIVER RX_RESET event. " 970 "Resetting.\n", channel->channel); 971 atomic_inc(&efx->rx_reset); 972 efx_schedule_reset(efx, 973 EFX_WORKAROUND_6555(efx) ? 974 RESET_TYPE_RX_RECOVERY : 975 RESET_TYPE_DISABLE); 976 break; 977 case FSE_BZ_RX_DSC_ERROR_EV: 978 netif_err(efx, rx_err, efx->net_dev, 979 "RX DMA Q %d reports descriptor fetch error." 980 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 981 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 982 break; 983 case FSE_BZ_TX_DSC_ERROR_EV: 984 netif_err(efx, tx_err, efx->net_dev, 985 "TX DMA Q %d reports descriptor fetch error." 986 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 987 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 988 break; 989 default: 990 netif_vdbg(efx, hw, efx->net_dev, 991 "channel %d unknown driver event code %d " 992 "data %04x\n", channel->channel, ev_sub_code, 993 ev_sub_data); 994 break; 995 } 996} 997 998int efx_nic_process_eventq(struct efx_channel *channel, int budget) 999{ 1000 unsigned int read_ptr; 1001 efx_qword_t event, *p_event; 1002 int ev_code; 1003 int tx_packets = 0; 1004 int spent = 0; 1005 1006 read_ptr = channel->eventq_read_ptr; 1007 1008 for (;;) { 1009 p_event = efx_event(channel, read_ptr); 1010 event = *p_event; 1011 1012 if (!efx_event_present(&event)) 1013 /* End of events */ 1014 break; 1015 1016 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1017 "channel %d event is "EFX_QWORD_FMT"\n", 1018 channel->channel, EFX_QWORD_VAL(event)); 1019 1020 /* Clear this event by marking it all ones */ 1021 EFX_SET_QWORD(*p_event); 1022 1023 /* Increment read pointer */ 1024 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; 1025 1026 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1027 1028 switch (ev_code) { 1029 case FSE_AZ_EV_CODE_RX_EV: 1030 efx_handle_rx_event(channel, &event); 1031 if (++spent == budget) 1032 goto out; 1033 break; 1034 case FSE_AZ_EV_CODE_TX_EV: 1035 tx_packets += efx_handle_tx_event(channel, &event); 1036 if (tx_packets >= EFX_TXQ_SIZE) { 1037 spent = budget; 1038 goto out; 1039 } 1040 break; 1041 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1042 efx_handle_generated_event(channel, &event); 1043 break; 1044 case FSE_AZ_EV_CODE_GLOBAL_EV: 1045 efx_handle_global_event(channel, &event); 1046 break; 1047 case FSE_AZ_EV_CODE_DRIVER_EV: 1048 efx_handle_driver_event(channel, &event); 1049 break; 1050 case FSE_CZ_EV_CODE_MCDI_EV: 1051 efx_mcdi_process_event(channel, &event); 1052 break; 1053 default: 1054 netif_err(channel->efx, hw, channel->efx->net_dev, 1055 "channel %d unknown event type %d (data " 1056 EFX_QWORD_FMT ")\n", channel->channel, 1057 ev_code, EFX_QWORD_VAL(event)); 1058 } 1059 } 1060 1061out: 1062 channel->eventq_read_ptr = read_ptr; 1063 return spent; 1064} 1065 1066 1067/* Allocate buffer table entries for event queue */ 1068int efx_nic_probe_eventq(struct efx_channel *channel) 1069{ 1070 struct efx_nic *efx = channel->efx; 1071 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 || 1072 EFX_EVQ_SIZE & EFX_EVQ_MASK); 1073 return efx_alloc_special_buffer(efx, &channel->eventq, 1074 EFX_EVQ_SIZE * sizeof(efx_qword_t)); 1075} 1076 1077void efx_nic_init_eventq(struct efx_channel *channel) 1078{ 1079 efx_oword_t reg; 1080 struct efx_nic *efx = channel->efx; 1081 1082 netif_dbg(efx, hw, efx->net_dev, 1083 "channel %d event queue in special buffers %d-%d\n", 1084 channel->channel, channel->eventq.index, 1085 channel->eventq.index + channel->eventq.entries - 1); 1086 1087 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1088 EFX_POPULATE_OWORD_3(reg, 1089 FRF_CZ_TIMER_Q_EN, 1, 1090 FRF_CZ_HOST_NOTIFY_MODE, 0, 1091 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1092 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1093 } 1094 1095 /* Pin event queue buffer */ 1096 efx_init_special_buffer(efx, &channel->eventq); 1097 1098 /* Fill event queue with all ones (i.e. empty events) */ 1099 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1100 1101 /* Push event queue to card */ 1102 EFX_POPULATE_OWORD_3(reg, 1103 FRF_AZ_EVQ_EN, 1, 1104 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1105 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1106 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1107 channel->channel); 1108 1109 efx->type->push_irq_moderation(channel); 1110} 1111 1112void efx_nic_fini_eventq(struct efx_channel *channel) 1113{ 1114 efx_oword_t reg; 1115 struct efx_nic *efx = channel->efx; 1116 1117 /* Remove event queue from card */ 1118 EFX_ZERO_OWORD(reg); 1119 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1120 channel->channel); 1121 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1122 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1123 1124 /* Unpin event queue */ 1125 efx_fini_special_buffer(efx, &channel->eventq); 1126} 1127 1128/* Free buffers backing event queue */ 1129void efx_nic_remove_eventq(struct efx_channel *channel) 1130{ 1131 efx_free_special_buffer(channel->efx, &channel->eventq); 1132} 1133 1134 1135void efx_nic_generate_test_event(struct efx_channel *channel) 1136{ 1137 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); 1138 efx_qword_t test_event; 1139 1140 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, 1141 FSE_AZ_EV_CODE_DRV_GEN_EV, 1142 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 1143 efx_generate_event(channel, &test_event); 1144} 1145 1146void efx_nic_generate_fill_event(struct efx_channel *channel) 1147{ 1148 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); 1149 efx_qword_t test_event; 1150 1151 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, 1152 FSE_AZ_EV_CODE_DRV_GEN_EV, 1153 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 1154 efx_generate_event(channel, &test_event); 1155} 1156 1157/************************************************************************** 1158 * 1159 * Flush handling 1160 * 1161 **************************************************************************/ 1162 1163 1164static void efx_poll_flush_events(struct efx_nic *efx) 1165{ 1166 struct efx_channel *channel = &efx->channel[0]; 1167 struct efx_tx_queue *tx_queue; 1168 struct efx_rx_queue *rx_queue; 1169 unsigned int read_ptr = channel->eventq_read_ptr; 1170 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK; 1171 1172 do { 1173 efx_qword_t *event = efx_event(channel, read_ptr); 1174 int ev_code, ev_sub_code, ev_queue; 1175 bool ev_failed; 1176 1177 if (!efx_event_present(event)) 1178 break; 1179 1180 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); 1181 ev_sub_code = EFX_QWORD_FIELD(*event, 1182 FSF_AZ_DRIVER_EV_SUBCODE); 1183 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1184 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { 1185 ev_queue = EFX_QWORD_FIELD(*event, 1186 FSF_AZ_DRIVER_EV_SUBDATA); 1187 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { 1188 tx_queue = efx->tx_queue + ev_queue; 1189 tx_queue->flushed = FLUSH_DONE; 1190 } 1191 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1192 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { 1193 ev_queue = EFX_QWORD_FIELD( 1194 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1195 ev_failed = EFX_QWORD_FIELD( 1196 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1197 if (ev_queue < efx->n_rx_channels) { 1198 rx_queue = efx->rx_queue + ev_queue; 1199 rx_queue->flushed = 1200 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1201 } 1202 } 1203 1204 /* We're about to destroy the queue anyway, so 1205 * it's ok to throw away every non-flush event */ 1206 EFX_SET_QWORD(*event); 1207 1208 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK; 1209 } while (read_ptr != end_ptr); 1210 1211 channel->eventq_read_ptr = read_ptr; 1212} 1213 1214/* Handle tx and rx flushes at the same time, since they run in 1215 * parallel in the hardware and there's no reason for us to 1216 * serialise them */ 1217int efx_nic_flush_queues(struct efx_nic *efx) 1218{ 1219 struct efx_rx_queue *rx_queue; 1220 struct efx_tx_queue *tx_queue; 1221 int i, tx_pending, rx_pending; 1222 1223 /* If necessary prepare the hardware for flushing */ 1224 efx->type->prepare_flush(efx); 1225 1226 /* Flush all tx queues in parallel */ 1227 efx_for_each_tx_queue(tx_queue, efx) 1228 efx_flush_tx_queue(tx_queue); 1229 1230 /* The hardware supports four concurrent rx flushes, each of which may 1231 * need to be retried if there is an outstanding descriptor fetch */ 1232 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { 1233 rx_pending = tx_pending = 0; 1234 efx_for_each_rx_queue(rx_queue, efx) { 1235 if (rx_queue->flushed == FLUSH_PENDING) 1236 ++rx_pending; 1237 } 1238 efx_for_each_rx_queue(rx_queue, efx) { 1239 if (rx_pending == EFX_RX_FLUSH_COUNT) 1240 break; 1241 if (rx_queue->flushed == FLUSH_FAILED || 1242 rx_queue->flushed == FLUSH_NONE) { 1243 efx_flush_rx_queue(rx_queue); 1244 ++rx_pending; 1245 } 1246 } 1247 efx_for_each_tx_queue(tx_queue, efx) { 1248 if (tx_queue->flushed != FLUSH_DONE) 1249 ++tx_pending; 1250 } 1251 1252 if (rx_pending == 0 && tx_pending == 0) 1253 return 0; 1254 1255 msleep(EFX_FLUSH_INTERVAL); 1256 efx_poll_flush_events(efx); 1257 } 1258 1259 /* Mark the queues as all flushed. We're going to return failure 1260 * leading to a reset, or fake up success anyway */ 1261 efx_for_each_tx_queue(tx_queue, efx) { 1262 if (tx_queue->flushed != FLUSH_DONE) 1263 netif_err(efx, hw, efx->net_dev, 1264 "tx queue %d flush command timed out\n", 1265 tx_queue->queue); 1266 tx_queue->flushed = FLUSH_DONE; 1267 } 1268 efx_for_each_rx_queue(rx_queue, efx) { 1269 if (rx_queue->flushed != FLUSH_DONE) 1270 netif_err(efx, hw, efx->net_dev, 1271 "rx queue %d flush command timed out\n", 1272 rx_queue->queue); 1273 rx_queue->flushed = FLUSH_DONE; 1274 } 1275 1276 return -ETIMEDOUT; 1277} 1278 1279/************************************************************************** 1280 * 1281 * Hardware interrupts 1282 * The hardware interrupt handler does very little work; all the event 1283 * queue processing is carried out by per-channel tasklets. 1284 * 1285 **************************************************************************/ 1286 1287/* Enable/disable/generate interrupts */ 1288static inline void efx_nic_interrupts(struct efx_nic *efx, 1289 bool enabled, bool force) 1290{ 1291 efx_oword_t int_en_reg_ker; 1292 1293 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1294 FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, 1295 FRF_AZ_KER_INT_KER, force, 1296 FRF_AZ_DRV_INT_EN_KER, enabled); 1297 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1298} 1299 1300void efx_nic_enable_interrupts(struct efx_nic *efx) 1301{ 1302 struct efx_channel *channel; 1303 1304 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1305 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1306 1307 /* Enable interrupts */ 1308 efx_nic_interrupts(efx, true, false); 1309 1310 /* Force processing of all the channels to get the EVQ RPTRs up to 1311 date */ 1312 efx_for_each_channel(channel, efx) 1313 efx_schedule_channel(channel); 1314} 1315 1316void efx_nic_disable_interrupts(struct efx_nic *efx) 1317{ 1318 /* Disable interrupts */ 1319 efx_nic_interrupts(efx, false, false); 1320} 1321 1322/* Generate a test interrupt 1323 * Interrupt must already have been enabled, otherwise nasty things 1324 * may happen. 1325 */ 1326void efx_nic_generate_interrupt(struct efx_nic *efx) 1327{ 1328 efx_nic_interrupts(efx, true, true); 1329} 1330 1331/* Process a fatal interrupt 1332 * Disable bus mastering ASAP and schedule a reset 1333 */ 1334irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1335{ 1336 struct falcon_nic_data *nic_data = efx->nic_data; 1337 efx_oword_t *int_ker = efx->irq_status.addr; 1338 efx_oword_t fatal_intr; 1339 int error, mem_perr; 1340 1341 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1342 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1343 1344 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1345 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1346 EFX_OWORD_VAL(fatal_intr), 1347 error ? "disabling bus mastering" : "no recognised error"); 1348 1349 /* If this is a memory parity error dump which blocks are offending */ 1350 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1351 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1352 if (mem_perr) { 1353 efx_oword_t reg; 1354 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1355 netif_err(efx, hw, efx->net_dev, 1356 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1357 EFX_OWORD_VAL(reg)); 1358 } 1359 1360 /* Disable both devices */ 1361 pci_clear_master(efx->pci_dev); 1362 if (efx_nic_is_dual_func(efx)) 1363 pci_clear_master(nic_data->pci_dev2); 1364 efx_nic_disable_interrupts(efx); 1365 1366 /* Count errors and reset or disable the NIC accordingly */ 1367 if (efx->int_error_count == 0 || 1368 time_after(jiffies, efx->int_error_expire)) { 1369 efx->int_error_count = 0; 1370 efx->int_error_expire = 1371 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1372 } 1373 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1374 netif_err(efx, hw, efx->net_dev, 1375 "SYSTEM ERROR - reset scheduled\n"); 1376 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1377 } else { 1378 netif_err(efx, hw, efx->net_dev, 1379 "SYSTEM ERROR - max number of errors seen." 1380 "NIC will be disabled\n"); 1381 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1382 } 1383 1384 return IRQ_HANDLED; 1385} 1386 1387/* Handle a legacy interrupt 1388 * Acknowledges the interrupt and schedule event queue processing. 1389 */ 1390static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1391{ 1392 struct efx_nic *efx = dev_id; 1393 efx_oword_t *int_ker = efx->irq_status.addr; 1394 irqreturn_t result = IRQ_NONE; 1395 struct efx_channel *channel; 1396 efx_dword_t reg; 1397 u32 queues; 1398 int syserr; 1399 1400 /* Read the ISR which also ACKs the interrupts */ 1401 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1402 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1403 1404 /* Check to see if we have a serious error condition */ 1405 if (queues & (1U << efx->fatal_irq_level)) { 1406 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1407 if (unlikely(syserr)) 1408 return efx_nic_fatal_interrupt(efx); 1409 } 1410 1411 if (queues != 0) { 1412 if (EFX_WORKAROUND_15783(efx)) 1413 efx->irq_zero_count = 0; 1414 1415 /* Schedule processing of any interrupting queues */ 1416 efx_for_each_channel(channel, efx) { 1417 if (queues & 1) 1418 efx_schedule_channel(channel); 1419 queues >>= 1; 1420 } 1421 result = IRQ_HANDLED; 1422 1423 } else if (EFX_WORKAROUND_15783(efx)) { 1424 efx_qword_t *event; 1425 1426 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1427 * because this might be a shared interrupt. */ 1428 if (efx->irq_zero_count++ == 0) 1429 result = IRQ_HANDLED; 1430 1431 /* Ensure we schedule or rearm all event queues */ 1432 efx_for_each_channel(channel, efx) { 1433 event = efx_event(channel, channel->eventq_read_ptr); 1434 if (efx_event_present(event)) 1435 efx_schedule_channel(channel); 1436 else 1437 efx_nic_eventq_read_ack(channel); 1438 } 1439 } 1440 1441 if (result == IRQ_HANDLED) { 1442 efx->last_irq_cpu = raw_smp_processor_id(); 1443 netif_vdbg(efx, intr, efx->net_dev, 1444 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1445 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1446 } 1447 1448 return result; 1449} 1450 1451/* Handle an MSI interrupt 1452 * 1453 * Handle an MSI hardware interrupt. This routine schedules event 1454 * queue processing. No interrupt acknowledgement cycle is necessary. 1455 * Also, we never need to check that the interrupt is for us, since 1456 * MSI interrupts cannot be shared. 1457 */ 1458static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1459{ 1460 struct efx_channel *channel = dev_id; 1461 struct efx_nic *efx = channel->efx; 1462 efx_oword_t *int_ker = efx->irq_status.addr; 1463 int syserr; 1464 1465 efx->last_irq_cpu = raw_smp_processor_id(); 1466 netif_vdbg(efx, intr, efx->net_dev, 1467 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1468 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1469 1470 /* Check to see if we have a serious error condition */ 1471 if (channel->channel == efx->fatal_irq_level) { 1472 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1473 if (unlikely(syserr)) 1474 return efx_nic_fatal_interrupt(efx); 1475 } 1476 1477 /* Schedule processing of the channel */ 1478 efx_schedule_channel(channel); 1479 1480 return IRQ_HANDLED; 1481} 1482 1483 1484/* Setup RSS indirection table. 1485 * This maps from the hash value of the packet to RXQ 1486 */ 1487void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1488{ 1489 size_t i = 0; 1490 efx_dword_t dword; 1491 1492 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1493 return; 1494 1495 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1496 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1497 1498 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1499 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1500 efx->rx_indir_table[i]); 1501 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1502 } 1503} 1504 1505/* Hook interrupt handler(s) 1506 * Try MSI and then legacy interrupts. 1507 */ 1508int efx_nic_init_interrupt(struct efx_nic *efx) 1509{ 1510 struct efx_channel *channel; 1511 int rc; 1512 1513 if (!EFX_INT_MODE_USE_MSI(efx)) { 1514 irq_handler_t handler; 1515 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1516 handler = efx_legacy_interrupt; 1517 else 1518 handler = falcon_legacy_interrupt_a1; 1519 1520 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1521 efx->name, efx); 1522 if (rc) { 1523 netif_err(efx, drv, efx->net_dev, 1524 "failed to hook legacy IRQ %d\n", 1525 efx->pci_dev->irq); 1526 goto fail1; 1527 } 1528 return 0; 1529 } 1530 1531 /* Hook MSI or MSI-X interrupt */ 1532 efx_for_each_channel(channel, efx) { 1533 rc = request_irq(channel->irq, efx_msi_interrupt, 1534 IRQF_PROBE_SHARED, /* Not shared */ 1535 channel->name, channel); 1536 if (rc) { 1537 netif_err(efx, drv, efx->net_dev, 1538 "failed to hook IRQ %d\n", channel->irq); 1539 goto fail2; 1540 } 1541 } 1542 1543 return 0; 1544 1545 fail2: 1546 efx_for_each_channel(channel, efx) 1547 free_irq(channel->irq, channel); 1548 fail1: 1549 return rc; 1550} 1551 1552void efx_nic_fini_interrupt(struct efx_nic *efx) 1553{ 1554 struct efx_channel *channel; 1555 efx_oword_t reg; 1556 1557 /* Disable MSI/MSI-X interrupts */ 1558 efx_for_each_channel(channel, efx) { 1559 if (channel->irq) 1560 free_irq(channel->irq, channel); 1561 } 1562 1563 /* ACK legacy interrupt */ 1564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1565 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1566 else 1567 falcon_irq_ack_a1(efx); 1568 1569 /* Disable legacy interrupt */ 1570 if (efx->legacy_irq) 1571 free_irq(efx->legacy_irq, efx); 1572} 1573 1574u32 efx_nic_fpga_ver(struct efx_nic *efx) 1575{ 1576 efx_oword_t altera_build; 1577 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1578 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1579} 1580 1581void efx_nic_init_common(struct efx_nic *efx) 1582{ 1583 efx_oword_t temp; 1584 1585 /* Set positions of descriptor caches in SRAM. */ 1586 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, 1587 efx->type->tx_dc_base / 8); 1588 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1589 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, 1590 efx->type->rx_dc_base / 8); 1591 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1592 1593 /* Set TX descriptor cache size. */ 1594 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1595 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1596 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1597 1598 /* Set RX descriptor cache size. Set low watermark to size-8, as 1599 * this allows most efficient prefetching. 1600 */ 1601 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1602 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1603 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1604 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1605 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1606 1607 /* Program INT_KER address */ 1608 EFX_POPULATE_OWORD_2(temp, 1609 FRF_AZ_NORM_INT_VEC_DIS_KER, 1610 EFX_INT_MODE_USE_MSI(efx), 1611 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1612 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1613 1614 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1615 /* Use an interrupt level unused by event queues */ 1616 efx->fatal_irq_level = 0x1f; 1617 else 1618 /* Use a valid MSI-X vector */ 1619 efx->fatal_irq_level = 0; 1620 1621 /* Enable all the genuinely fatal interrupts. (They are still 1622 * masked by the overall interrupt mask, controlled by 1623 * falcon_interrupts()). 1624 * 1625 * Note: All other fatal interrupts are enabled 1626 */ 1627 EFX_POPULATE_OWORD_3(temp, 1628 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1629 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1630 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1631 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1632 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1633 EFX_INVERT_OWORD(temp); 1634 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1635 1636 efx_nic_push_rx_indir_table(efx); 1637 1638 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1639 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1640 */ 1641 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1642 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1643 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1644 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1645 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); 1646 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1647 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1648 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1649 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1650 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1651 /* Disable hardware watchdog which can misfire */ 1652 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1653 /* Squash TX of packets of 16 bytes or less */ 1654 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1655 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1656 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1657} 1658 1659/* Register dump */ 1660 1661#define REGISTER_REVISION_A 1 1662#define REGISTER_REVISION_B 2 1663#define REGISTER_REVISION_C 3 1664#define REGISTER_REVISION_Z 3 /* latest revision */ 1665 1666struct efx_nic_reg { 1667 u32 offset:24; 1668 u32 min_revision:2, max_revision:2; 1669}; 1670 1671#define REGISTER(name, min_rev, max_rev) { \ 1672 FR_ ## min_rev ## max_rev ## _ ## name, \ 1673 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1674} 1675#define REGISTER_AA(name) REGISTER(name, A, A) 1676#define REGISTER_AB(name) REGISTER(name, A, B) 1677#define REGISTER_AZ(name) REGISTER(name, A, Z) 1678#define REGISTER_BB(name) REGISTER(name, B, B) 1679#define REGISTER_BZ(name) REGISTER(name, B, Z) 1680#define REGISTER_CZ(name) REGISTER(name, C, Z) 1681 1682static const struct efx_nic_reg efx_nic_regs[] = { 1683 REGISTER_AZ(ADR_REGION), 1684 REGISTER_AZ(INT_EN_KER), 1685 REGISTER_BZ(INT_EN_CHAR), 1686 REGISTER_AZ(INT_ADR_KER), 1687 REGISTER_BZ(INT_ADR_CHAR), 1688 /* INT_ACK_KER is WO */ 1689 /* INT_ISR0 is RC */ 1690 REGISTER_AZ(HW_INIT), 1691 REGISTER_CZ(USR_EV_CFG), 1692 REGISTER_AB(EE_SPI_HCMD), 1693 REGISTER_AB(EE_SPI_HADR), 1694 REGISTER_AB(EE_SPI_HDATA), 1695 REGISTER_AB(EE_BASE_PAGE), 1696 REGISTER_AB(EE_VPD_CFG0), 1697 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1698 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1699 /* PCIE_CORE_INDIRECT is indirect */ 1700 REGISTER_AB(NIC_STAT), 1701 REGISTER_AB(GPIO_CTL), 1702 REGISTER_AB(GLB_CTL), 1703 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1704 REGISTER_BZ(DP_CTRL), 1705 REGISTER_AZ(MEM_STAT), 1706 REGISTER_AZ(CS_DEBUG), 1707 REGISTER_AZ(ALTERA_BUILD), 1708 REGISTER_AZ(CSR_SPARE), 1709 REGISTER_AB(PCIE_SD_CTL0123), 1710 REGISTER_AB(PCIE_SD_CTL45), 1711 REGISTER_AB(PCIE_PCS_CTL_STAT), 1712 /* DEBUG_DATA_OUT is not used */ 1713 /* DRV_EV is WO */ 1714 REGISTER_AZ(EVQ_CTL), 1715 REGISTER_AZ(EVQ_CNT1), 1716 REGISTER_AZ(EVQ_CNT2), 1717 REGISTER_AZ(BUF_TBL_CFG), 1718 REGISTER_AZ(SRM_RX_DC_CFG), 1719 REGISTER_AZ(SRM_TX_DC_CFG), 1720 REGISTER_AZ(SRM_CFG), 1721 /* BUF_TBL_UPD is WO */ 1722 REGISTER_AZ(SRM_UPD_EVQ), 1723 REGISTER_AZ(SRAM_PARITY), 1724 REGISTER_AZ(RX_CFG), 1725 REGISTER_BZ(RX_FILTER_CTL), 1726 /* RX_FLUSH_DESCQ is WO */ 1727 REGISTER_AZ(RX_DC_CFG), 1728 REGISTER_AZ(RX_DC_PF_WM), 1729 REGISTER_BZ(RX_RSS_TKEY), 1730 /* RX_NODESC_DROP is RC */ 1731 REGISTER_AA(RX_SELF_RST), 1732 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1733 REGISTER_CZ(RX_RSS_IPV6_REG1), 1734 REGISTER_CZ(RX_RSS_IPV6_REG2), 1735 REGISTER_CZ(RX_RSS_IPV6_REG3), 1736 /* TX_FLUSH_DESCQ is WO */ 1737 REGISTER_AZ(TX_DC_CFG), 1738 REGISTER_AA(TX_CHKSM_CFG), 1739 REGISTER_AZ(TX_CFG), 1740 /* TX_PUSH_DROP is not used */ 1741 REGISTER_AZ(TX_RESERVED), 1742 REGISTER_BZ(TX_PACE), 1743 /* TX_PACE_DROP_QID is RC */ 1744 REGISTER_BB(TX_VLAN), 1745 REGISTER_BZ(TX_IPFIL_PORTEN), 1746 REGISTER_AB(MD_TXD), 1747 REGISTER_AB(MD_RXD), 1748 REGISTER_AB(MD_CS), 1749 REGISTER_AB(MD_PHY_ADR), 1750 REGISTER_AB(MD_ID), 1751 /* MD_STAT is RC */ 1752 REGISTER_AB(MAC_STAT_DMA), 1753 REGISTER_AB(MAC_CTRL), 1754 REGISTER_BB(GEN_MODE), 1755 REGISTER_AB(MAC_MC_HASH_REG0), 1756 REGISTER_AB(MAC_MC_HASH_REG1), 1757 REGISTER_AB(GM_CFG1), 1758 REGISTER_AB(GM_CFG2), 1759 /* GM_IPG and GM_HD are not used */ 1760 REGISTER_AB(GM_MAX_FLEN), 1761 /* GM_TEST is not used */ 1762 REGISTER_AB(GM_ADR1), 1763 REGISTER_AB(GM_ADR2), 1764 REGISTER_AB(GMF_CFG0), 1765 REGISTER_AB(GMF_CFG1), 1766 REGISTER_AB(GMF_CFG2), 1767 REGISTER_AB(GMF_CFG3), 1768 REGISTER_AB(GMF_CFG4), 1769 REGISTER_AB(GMF_CFG5), 1770 REGISTER_BB(TX_SRC_MAC_CTL), 1771 REGISTER_AB(XM_ADR_LO), 1772 REGISTER_AB(XM_ADR_HI), 1773 REGISTER_AB(XM_GLB_CFG), 1774 REGISTER_AB(XM_TX_CFG), 1775 REGISTER_AB(XM_RX_CFG), 1776 REGISTER_AB(XM_MGT_INT_MASK), 1777 REGISTER_AB(XM_FC), 1778 REGISTER_AB(XM_PAUSE_TIME), 1779 REGISTER_AB(XM_TX_PARAM), 1780 REGISTER_AB(XM_RX_PARAM), 1781 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1782 REGISTER_AB(XX_PWR_RST), 1783 REGISTER_AB(XX_SD_CTL), 1784 REGISTER_AB(XX_TXDRV_CTL), 1785 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1786 /* XX_CORE_STAT is partly RC */ 1787}; 1788 1789struct efx_nic_reg_table { 1790 u32 offset:24; 1791 u32 min_revision:2, max_revision:2; 1792 u32 step:6, rows:21; 1793}; 1794 1795#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1796 offset, \ 1797 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1798 step, rows \ 1799} 1800#define REGISTER_TABLE(name, min_rev, max_rev) \ 1801 REGISTER_TABLE_DIMENSIONS( \ 1802 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1803 min_rev, max_rev, \ 1804 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1805 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1806#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1807#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1808#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1809#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1810#define REGISTER_TABLE_BB_CZ(name) \ 1811 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1812 FR_BZ_ ## name ## _STEP, \ 1813 FR_BB_ ## name ## _ROWS), \ 1814 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1815 FR_BZ_ ## name ## _STEP, \ 1816 FR_CZ_ ## name ## _ROWS) 1817#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 1818 1819static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 1820 /* DRIVER is not used */ 1821 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 1822 REGISTER_TABLE_BB(TX_IPFIL_TBL), 1823 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 1824 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 1825 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 1826 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 1827 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1828 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1829 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1830 /* The register buffer is allocated with slab, so we can't 1831 * reasonably read all of the buffer table (up to 8MB!). 1832 * However this driver will only use a few entries. Reading 1833 * 1K entries allows for some expansion of queue count and 1834 * size before we need to change the version. */ 1835 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 1836 A, A, 8, 1024), 1837 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1838 B, Z, 8, 1024), 1839 /* RX_FILTER_TBL{0,1} is huge and not used by this driver */ 1840 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1841 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1842 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1843 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 1844 /* TX_FILTER_TBL0 is huge and not used by this driver */ 1845 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 1846 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1847 /* MSIX_PBA_TABLE is not mapped */ 1848 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1849}; 1850 1851size_t efx_nic_get_regs_len(struct efx_nic *efx) 1852{ 1853 const struct efx_nic_reg *reg; 1854 const struct efx_nic_reg_table *table; 1855 size_t len = 0; 1856 1857 for (reg = efx_nic_regs; 1858 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1859 reg++) 1860 if (efx->type->revision >= reg->min_revision && 1861 efx->type->revision <= reg->max_revision) 1862 len += sizeof(efx_oword_t); 1863 1864 for (table = efx_nic_reg_tables; 1865 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1866 table++) 1867 if (efx->type->revision >= table->min_revision && 1868 efx->type->revision <= table->max_revision) 1869 len += table->rows * min_t(size_t, table->step, 16); 1870 1871 return len; 1872} 1873 1874void efx_nic_get_regs(struct efx_nic *efx, void *buf) 1875{ 1876 const struct efx_nic_reg *reg; 1877 const struct efx_nic_reg_table *table; 1878 1879 for (reg = efx_nic_regs; 1880 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1881 reg++) { 1882 if (efx->type->revision >= reg->min_revision && 1883 efx->type->revision <= reg->max_revision) { 1884 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 1885 buf += sizeof(efx_oword_t); 1886 } 1887 } 1888 1889 for (table = efx_nic_reg_tables; 1890 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1891 table++) { 1892 size_t size, i; 1893 1894 if (!(efx->type->revision >= table->min_revision && 1895 efx->type->revision <= table->max_revision)) 1896 continue; 1897 1898 size = min_t(size_t, table->step, 16); 1899 1900 for (i = 0; i < table->rows; i++) { 1901 switch (table->step) { 1902 case 4: /* 32-bit register or SRAM */ 1903 efx_readd_table(efx, buf, table->offset, i); 1904 break; 1905 case 8: /* 64-bit SRAM */ 1906 efx_sram_readq(efx, 1907 efx->membase + table->offset, 1908 buf, i); 1909 break; 1910 case 16: /* 128-bit register */ 1911 efx_reado_table(efx, buf, table->offset, i); 1912 break; 1913 case 32: /* 128-bit register, interleaved */ 1914 efx_reado_table(efx, buf, table->offset, 2 * i); 1915 break; 1916 default: 1917 WARN_ON(1); 1918 return; 1919 } 1920 buf += size; 1921 } 1922 } 1923} 1924