1/***************************************************************************** 2 * * 3 * File: sge.c * 4 * $Revision: 1.1.1.1 $ * 5 * $Date: 2007/08/03 18:52:46 $ * 6 * Description: * 7 * DMA engine. * 8 * part of the Chelsio 10Gb Ethernet Driver. * 9 * * 10 * This program is free software; you can redistribute it and/or modify * 11 * it under the terms of the GNU General Public License, version 2, as * 12 * published by the Free Software Foundation. * 13 * * 14 * You should have received a copy of the GNU General Public License along * 15 * with this program; if not, write to the Free Software Foundation, Inc., * 16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * 17 * * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * 19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * 21 * * 22 * http://www.chelsio.com * 23 * * 24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * 25 * All rights reserved. * 26 * * 27 * Maintainers: maintainers@chelsio.com * 28 * * 29 * Authors: Dimitrios Michailidis <dm@chelsio.com> * 30 * Tina Yang <tainay@chelsio.com> * 31 * Felix Marti <felix@chelsio.com> * 32 * Scott Bardone <sbardone@chelsio.com> * 33 * Kurt Ottaway <kottaway@chelsio.com> * 34 * Frank DiMambro <frank@chelsio.com> * 35 * * 36 * History: * 37 * * 38 ****************************************************************************/ 39 40#include "common.h" 41 42#include <linux/types.h> 43#include <linux/errno.h> 44#include <linux/pci.h> 45#include <linux/ktime.h> 46#include <linux/netdevice.h> 47#include <linux/etherdevice.h> 48#include <linux/if_vlan.h> 49#include <linux/skbuff.h> 50#include <linux/init.h> 51#include <linux/mm.h> 52#include <linux/tcp.h> 53#include <linux/ip.h> 54#include <linux/in.h> 55#include <linux/if_arp.h> 56 57#include "cpl5_cmd.h" 58#include "sge.h" 59#include "regs.h" 60#include "espi.h" 61 62/* This belongs in if_ether.h */ 63#define ETH_P_CPL5 0xf 64 65#define SGE_CMDQ_N 2 66#define SGE_FREELQ_N 2 67#define SGE_CMDQ0_E_N 1024 68#define SGE_CMDQ1_E_N 128 69#define SGE_FREEL_SIZE 4096 70#define SGE_JUMBO_FREEL_SIZE 512 71#define SGE_FREEL_REFILL_THRESH 16 72#define SGE_RESPQ_E_N 1024 73#define SGE_INTRTIMER_NRES 1000 74#define SGE_RX_SM_BUF_SIZE 1536 75#define SGE_TX_DESC_MAX_PLEN 16384 76 77#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) 78 79/* 80 * Period of the TX buffer reclaim timer. This timer does not need to run 81 * frequently as TX buffers are usually reclaimed by new TX packets. 82 */ 83#define TX_RECLAIM_PERIOD (HZ / 4) 84 85#define M_CMD_LEN 0x7fffffff 86#define V_CMD_LEN(v) (v) 87#define G_CMD_LEN(v) ((v) & M_CMD_LEN) 88#define V_CMD_GEN1(v) ((v) << 31) 89#define V_CMD_GEN2(v) (v) 90#define F_CMD_DATAVALID (1 << 1) 91#define F_CMD_SOP (1 << 2) 92#define V_CMD_EOP(v) ((v) << 3) 93 94/* 95 * Command queue, receive buffer list, and response queue descriptors. 96 */ 97#if defined(__BIG_ENDIAN_BITFIELD) 98struct cmdQ_e { 99 u32 addr_lo; 100 u32 len_gen; 101 u32 flags; 102 u32 addr_hi; 103}; 104 105struct freelQ_e { 106 u32 addr_lo; 107 u32 len_gen; 108 u32 gen2; 109 u32 addr_hi; 110}; 111 112struct respQ_e { 113 u32 Qsleeping : 4; 114 u32 Cmdq1CreditReturn : 5; 115 u32 Cmdq1DmaComplete : 5; 116 u32 Cmdq0CreditReturn : 5; 117 u32 Cmdq0DmaComplete : 5; 118 u32 FreelistQid : 2; 119 u32 CreditValid : 1; 120 u32 DataValid : 1; 121 u32 Offload : 1; 122 u32 Eop : 1; 123 u32 Sop : 1; 124 u32 GenerationBit : 1; 125 u32 BufferLength; 126}; 127#elif defined(__LITTLE_ENDIAN_BITFIELD) 128struct cmdQ_e { 129 u32 len_gen; 130 u32 addr_lo; 131 u32 addr_hi; 132 u32 flags; 133}; 134 135struct freelQ_e { 136 u32 len_gen; 137 u32 addr_lo; 138 u32 addr_hi; 139 u32 gen2; 140}; 141 142struct respQ_e { 143 u32 BufferLength; 144 u32 GenerationBit : 1; 145 u32 Sop : 1; 146 u32 Eop : 1; 147 u32 Offload : 1; 148 u32 DataValid : 1; 149 u32 CreditValid : 1; 150 u32 FreelistQid : 2; 151 u32 Cmdq0DmaComplete : 5; 152 u32 Cmdq0CreditReturn : 5; 153 u32 Cmdq1DmaComplete : 5; 154 u32 Cmdq1CreditReturn : 5; 155 u32 Qsleeping : 4; 156} ; 157#endif 158 159/* 160 * SW Context Command and Freelist Queue Descriptors 161 */ 162struct cmdQ_ce { 163 struct sk_buff *skb; 164 DECLARE_PCI_UNMAP_ADDR(dma_addr); 165 DECLARE_PCI_UNMAP_LEN(dma_len); 166}; 167 168struct freelQ_ce { 169 struct sk_buff *skb; 170 DECLARE_PCI_UNMAP_ADDR(dma_addr); 171 DECLARE_PCI_UNMAP_LEN(dma_len); 172}; 173 174/* 175 * SW command, freelist and response rings 176 */ 177struct cmdQ { 178 unsigned long status; /* HW DMA fetch status */ 179 unsigned int in_use; /* # of in-use command descriptors */ 180 unsigned int size; /* # of descriptors */ 181 unsigned int processed; /* total # of descs HW has processed */ 182 unsigned int cleaned; /* total # of descs SW has reclaimed */ 183 unsigned int stop_thres; /* SW TX queue suspend threshold */ 184 u16 pidx; /* producer index (SW) */ 185 u16 cidx; /* consumer index (HW) */ 186 u8 genbit; /* current generation (=valid) bit */ 187 u8 sop; /* is next entry start of packet? */ 188 struct cmdQ_e *entries; /* HW command descriptor Q */ 189 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 190 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 191 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ 192}; 193 194struct freelQ { 195 unsigned int credits; /* # of available RX buffers */ 196 unsigned int size; /* free list capacity */ 197 u16 pidx; /* producer index (SW) */ 198 u16 cidx; /* consumer index (HW) */ 199 u16 rx_buffer_size; /* Buffer size on this free list */ 200 u16 dma_offset; /* DMA offset to align IP headers */ 201 u16 recycleq_idx; /* skb recycle q to use */ 202 u8 genbit; /* current generation (=valid) bit */ 203 struct freelQ_e *entries; /* HW freelist descriptor Q */ 204 struct freelQ_ce *centries; /* SW freelist context descriptor Q */ 205 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ 206}; 207 208struct respQ { 209 unsigned int credits; /* credits to be returned to SGE */ 210 unsigned int size; /* # of response Q descriptors */ 211 u16 cidx; /* consumer index (SW) */ 212 u8 genbit; /* current generation(=valid) bit */ 213 struct respQ_e *entries; /* HW response descriptor Q */ 214 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ 215}; 216 217/* Bit flags for cmdQ.status */ 218enum { 219 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ 220 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ 221}; 222 223/* T204 TX SW scheduler */ 224 225/* Per T204 TX port */ 226struct sched_port { 227 unsigned int avail; /* available bits - quota */ 228 unsigned int drain_bits_per_1024ns; /* drain rate */ 229 unsigned int speed; /* drain rate, mbps */ 230 unsigned int mtu; /* mtu size */ 231 struct sk_buff_head skbq; /* pending skbs */ 232}; 233 234/* Per T204 device */ 235struct sched { 236 ktime_t last_updated; /* last time quotas were computed */ 237 unsigned int max_avail; /* max bits to be sent to any port */ 238 unsigned int port; /* port index (round robin ports) */ 239 unsigned int num; /* num skbs in per port queues */ 240 struct sched_port p[MAX_NPORTS]; 241 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ 242}; 243static void restart_sched(unsigned long); 244 245 246/* 247 * Main SGE data structure 248 * 249 * Interrupts are handled by a single CPU and it is likely that on a MP system 250 * the application is migrated to another CPU. In that scenario, we try to 251 * seperate the RX(in irq context) and TX state in order to decrease memory 252 * contention. 253 */ 254struct sge { 255 struct adapter *adapter; /* adapter backpointer */ 256 struct net_device *netdev; /* netdevice backpointer */ 257 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ 258 struct respQ respQ; /* response Q */ 259 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ 260 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 261 unsigned int jumbo_fl; /* jumbo freelist Q index */ 262 unsigned int intrtimer_nres; /* no-resource interrupt timer */ 263 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ 264 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ 265 struct timer_list espibug_timer; 266 unsigned long espibug_timeout; 267 struct sk_buff *espibug_skb[MAX_NPORTS]; 268 u32 sge_control; /* shadow value of sge control reg */ 269 struct sge_intr_counts stats; 270 struct sge_port_stats *port_stats[MAX_NPORTS]; 271 struct sched *tx_sched; 272 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; 273}; 274 275/* 276 * stop tasklet and free all pending skb's 277 */ 278static void tx_sched_stop(struct sge *sge) 279{ 280 struct sched *s = sge->tx_sched; 281 int i; 282 283 tasklet_kill(&s->sched_tsk); 284 285 for (i = 0; i < MAX_NPORTS; i++) 286 __skb_queue_purge(&s->p[s->port].skbq); 287} 288 289/* 290 * t1_sched_update_parms() is called when the MTU or link speed changes. It 291 * re-computes scheduler parameters to scope with the change. 292 */ 293unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, 294 unsigned int mtu, unsigned int speed) 295{ 296 struct sched *s = sge->tx_sched; 297 struct sched_port *p = &s->p[port]; 298 unsigned int max_avail_segs; 299 300 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); 301 if (speed) 302 p->speed = speed; 303 if (mtu) 304 p->mtu = mtu; 305 306 if (speed || mtu) { 307 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); 308 do_div(drain, (p->mtu + 50) * 1000); 309 p->drain_bits_per_1024ns = (unsigned int) drain; 310 311 if (p->speed < 1000) 312 p->drain_bits_per_1024ns = 313 90 * p->drain_bits_per_1024ns / 100; 314 } 315 316 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { 317 p->drain_bits_per_1024ns -= 16; 318 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); 319 max_avail_segs = max(1U, 4096 / (p->mtu - 40)); 320 } else { 321 s->max_avail = 16384; 322 max_avail_segs = max(1U, 9000 / (p->mtu - 40)); 323 } 324 325 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " 326 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, 327 p->speed, s->max_avail, max_avail_segs, 328 p->drain_bits_per_1024ns); 329 330 return max_avail_segs * (p->mtu - 40); 331} 332 333/* 334 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of 335 * data that can be pushed per port. 336 */ 337void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) 338{ 339 struct sched *s = sge->tx_sched; 340 unsigned int i; 341 342 s->max_avail = val; 343 for (i = 0; i < MAX_NPORTS; i++) 344 t1_sched_update_parms(sge, i, 0, 0); 345} 346 347/* 348 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port 349 * is draining. 350 */ 351void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, 352 unsigned int val) 353{ 354 struct sched *s = sge->tx_sched; 355 struct sched_port *p = &s->p[port]; 356 p->drain_bits_per_1024ns = val * 1024 / 1000; 357 t1_sched_update_parms(sge, port, 0, 0); 358} 359 360 361/* 362 * get_clock() implements a ns clock (see ktime_get) 363 */ 364static inline ktime_t get_clock(void) 365{ 366 struct timespec ts; 367 368 ktime_get_ts(&ts); 369 return timespec_to_ktime(ts); 370} 371 372/* 373 * tx_sched_init() allocates resources and does basic initialization. 374 */ 375static int tx_sched_init(struct sge *sge) 376{ 377 struct sched *s; 378 int i; 379 380 s = kzalloc(sizeof (struct sched), GFP_KERNEL); 381 if (!s) 382 return -ENOMEM; 383 384 pr_debug("tx_sched_init\n"); 385 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); 386 sge->tx_sched = s; 387 388 for (i = 0; i < MAX_NPORTS; i++) { 389 skb_queue_head_init(&s->p[i].skbq); 390 t1_sched_update_parms(sge, i, 1500, 1000); 391 } 392 393 return 0; 394} 395 396/* 397 * sched_update_avail() computes the delta since the last time it was called 398 * and updates the per port quota (number of bits that can be sent to the any 399 * port). 400 */ 401static inline int sched_update_avail(struct sge *sge) 402{ 403 struct sched *s = sge->tx_sched; 404 ktime_t now = get_clock(); 405 unsigned int i; 406 long long delta_time_ns; 407 408 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); 409 410 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); 411 if (delta_time_ns < 15000) 412 return 0; 413 414 for (i = 0; i < MAX_NPORTS; i++) { 415 struct sched_port *p = &s->p[i]; 416 unsigned int delta_avail; 417 418 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; 419 p->avail = min(p->avail + delta_avail, s->max_avail); 420 } 421 422 s->last_updated = now; 423 424 return 1; 425} 426 427/* 428 * sched_skb() is called from two different places. In the tx path, any 429 * packet generating load on an output port will call sched_skb() 430 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq 431 * context (skb == NULL). 432 * The scheduler only returns a skb (which will then be sent) if the 433 * length of the skb is <= the current quota of the output port. 434 */ 435static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, 436 unsigned int credits) 437{ 438 struct sched *s = sge->tx_sched; 439 struct sk_buff_head *skbq; 440 unsigned int i, len, update = 1; 441 442 pr_debug("sched_skb %p\n", skb); 443 if (!skb) { 444 if (!s->num) 445 return NULL; 446 } else { 447 skbq = &s->p[skb->dev->if_port].skbq; 448 __skb_queue_tail(skbq, skb); 449 s->num++; 450 skb = NULL; 451 } 452 453 if (credits < MAX_SKB_FRAGS + 1) 454 goto out; 455 456again: 457 for (i = 0; i < MAX_NPORTS; i++) { 458 s->port = ++s->port & (MAX_NPORTS - 1); 459 skbq = &s->p[s->port].skbq; 460 461 skb = skb_peek(skbq); 462 463 if (!skb) 464 continue; 465 466 len = skb->len; 467 if (len <= s->p[s->port].avail) { 468 s->p[s->port].avail -= len; 469 s->num--; 470 __skb_unlink(skb, skbq); 471 goto out; 472 } 473 skb = NULL; 474 } 475 476 if (update-- && sched_update_avail(sge)) 477 goto again; 478 479out: 480 /* If there are more pending skbs, we use the hardware to schedule us 481 * again. 482 */ 483 if (s->num && !skb) { 484 struct cmdQ *q = &sge->cmdQ[0]; 485 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 486 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 487 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 488 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); 489 } 490 } 491 pr_debug("sched_skb ret %p\n", skb); 492 493 return skb; 494} 495 496/* 497 * PIO to indicate that memory mapped Q contains valid descriptor(s). 498 */ 499static inline void doorbell_pio(struct adapter *adapter, u32 val) 500{ 501 wmb(); 502 writel(val, adapter->regs + A_SG_DOORBELL); 503} 504 505/* 506 * Frees all RX buffers on the freelist Q. The caller must make sure that 507 * the SGE is turned off before calling this function. 508 */ 509static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) 510{ 511 unsigned int cidx = q->cidx; 512 513 while (q->credits--) { 514 struct freelQ_ce *ce = &q->centries[cidx]; 515 516 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 517 pci_unmap_len(ce, dma_len), 518 PCI_DMA_FROMDEVICE); 519 dev_kfree_skb(ce->skb); 520 ce->skb = NULL; 521 if (++cidx == q->size) 522 cidx = 0; 523 } 524} 525 526/* 527 * Free RX free list and response queue resources. 528 */ 529static void free_rx_resources(struct sge *sge) 530{ 531 struct pci_dev *pdev = sge->adapter->pdev; 532 unsigned int size, i; 533 534 if (sge->respQ.entries) { 535 size = sizeof(struct respQ_e) * sge->respQ.size; 536 pci_free_consistent(pdev, size, sge->respQ.entries, 537 sge->respQ.dma_addr); 538 } 539 540 for (i = 0; i < SGE_FREELQ_N; i++) { 541 struct freelQ *q = &sge->freelQ[i]; 542 543 if (q->centries) { 544 free_freelQ_buffers(pdev, q); 545 kfree(q->centries); 546 } 547 if (q->entries) { 548 size = sizeof(struct freelQ_e) * q->size; 549 pci_free_consistent(pdev, size, q->entries, 550 q->dma_addr); 551 } 552 } 553} 554 555/* 556 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a 557 * response queue. 558 */ 559static int alloc_rx_resources(struct sge *sge, struct sge_params *p) 560{ 561 struct pci_dev *pdev = sge->adapter->pdev; 562 unsigned int size, i; 563 564 for (i = 0; i < SGE_FREELQ_N; i++) { 565 struct freelQ *q = &sge->freelQ[i]; 566 567 q->genbit = 1; 568 q->size = p->freelQ_size[i]; 569 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; 570 size = sizeof(struct freelQ_e) * q->size; 571 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); 572 if (!q->entries) 573 goto err_no_mem; 574 575 size = sizeof(struct freelQ_ce) * q->size; 576 q->centries = kzalloc(size, GFP_KERNEL); 577 if (!q->centries) 578 goto err_no_mem; 579 } 580 581 /* 582 * Calculate the buffer sizes for the two free lists. FL0 accommodates 583 * regular sized Ethernet frames, FL1 is sized not to exceed 16K, 584 * including all the sk_buff overhead. 585 * 586 * Note: For T2 FL0 and FL1 are reversed. 587 */ 588 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + 589 sizeof(struct cpl_rx_data) + 590 sge->freelQ[!sge->jumbo_fl].dma_offset; 591 592 size = (16 * 1024) - 593 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 594 595 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; 596 597 /* 598 * Setup which skb recycle Q should be used when recycling buffers from 599 * each free list. 600 */ 601 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; 602 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; 603 604 sge->respQ.genbit = 1; 605 sge->respQ.size = SGE_RESPQ_E_N; 606 sge->respQ.credits = 0; 607 size = sizeof(struct respQ_e) * sge->respQ.size; 608 sge->respQ.entries = 609 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); 610 if (!sge->respQ.entries) 611 goto err_no_mem; 612 return 0; 613 614err_no_mem: 615 free_rx_resources(sge); 616 return -ENOMEM; 617} 618 619/* 620 * Reclaims n TX descriptors and frees the buffers associated with them. 621 */ 622static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) 623{ 624 struct cmdQ_ce *ce; 625 struct pci_dev *pdev = sge->adapter->pdev; 626 unsigned int cidx = q->cidx; 627 628 q->in_use -= n; 629 ce = &q->centries[cidx]; 630 while (n--) { 631 if (likely(pci_unmap_len(ce, dma_len))) { 632 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 633 pci_unmap_len(ce, dma_len), 634 PCI_DMA_TODEVICE); 635 if (q->sop) 636 q->sop = 0; 637 } 638 if (ce->skb) { 639 dev_kfree_skb_any(ce->skb); 640 q->sop = 1; 641 } 642 ce++; 643 if (++cidx == q->size) { 644 cidx = 0; 645 ce = q->centries; 646 } 647 } 648 q->cidx = cidx; 649} 650 651/* 652 * Free TX resources. 653 * 654 * Assumes that SGE is stopped and all interrupts are disabled. 655 */ 656static void free_tx_resources(struct sge *sge) 657{ 658 struct pci_dev *pdev = sge->adapter->pdev; 659 unsigned int size, i; 660 661 for (i = 0; i < SGE_CMDQ_N; i++) { 662 struct cmdQ *q = &sge->cmdQ[i]; 663 664 if (q->centries) { 665 if (q->in_use) 666 free_cmdQ_buffers(sge, q, q->in_use); 667 kfree(q->centries); 668 } 669 if (q->entries) { 670 size = sizeof(struct cmdQ_e) * q->size; 671 pci_free_consistent(pdev, size, q->entries, 672 q->dma_addr); 673 } 674 } 675} 676 677/* 678 * Allocates basic TX resources, consisting of memory mapped command Qs. 679 */ 680static int alloc_tx_resources(struct sge *sge, struct sge_params *p) 681{ 682 struct pci_dev *pdev = sge->adapter->pdev; 683 unsigned int size, i; 684 685 for (i = 0; i < SGE_CMDQ_N; i++) { 686 struct cmdQ *q = &sge->cmdQ[i]; 687 688 q->genbit = 1; 689 q->sop = 1; 690 q->size = p->cmdQ_size[i]; 691 q->in_use = 0; 692 q->status = 0; 693 q->processed = q->cleaned = 0; 694 q->stop_thres = 0; 695 spin_lock_init(&q->lock); 696 size = sizeof(struct cmdQ_e) * q->size; 697 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); 698 if (!q->entries) 699 goto err_no_mem; 700 701 size = sizeof(struct cmdQ_ce) * q->size; 702 q->centries = kzalloc(size, GFP_KERNEL); 703 if (!q->centries) 704 goto err_no_mem; 705 } 706 707 /* 708 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE 709 * only. For queue 0 set the stop threshold so we can handle one more 710 * packet from each port, plus reserve an additional 24 entries for 711 * Ethernet packets only. Queue 1 never suspends nor do we reserve 712 * space for Ethernet packets. 713 */ 714 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * 715 (MAX_SKB_FRAGS + 1); 716 return 0; 717 718err_no_mem: 719 free_tx_resources(sge); 720 return -ENOMEM; 721} 722 723static inline void setup_ring_params(struct adapter *adapter, u64 addr, 724 u32 size, int base_reg_lo, 725 int base_reg_hi, int size_reg) 726{ 727 writel((u32)addr, adapter->regs + base_reg_lo); 728 writel(addr >> 32, adapter->regs + base_reg_hi); 729 writel(size, adapter->regs + size_reg); 730} 731 732/* 733 * Enable/disable VLAN acceleration. 734 */ 735void t1_set_vlan_accel(struct adapter *adapter, int on_off) 736{ 737 struct sge *sge = adapter->sge; 738 739 sge->sge_control &= ~F_VLAN_XTRACT; 740 if (on_off) 741 sge->sge_control |= F_VLAN_XTRACT; 742 if (adapter->open_device_map) { 743 writel(sge->sge_control, adapter->regs + A_SG_CONTROL); 744 readl(adapter->regs + A_SG_CONTROL); /* flush */ 745 } 746} 747 748/* 749 * Programs the various SGE registers. However, the engine is not yet enabled, 750 * but sge->sge_control is setup and ready to go. 751 */ 752static void configure_sge(struct sge *sge, struct sge_params *p) 753{ 754 struct adapter *ap = sge->adapter; 755 756 writel(0, ap->regs + A_SG_CONTROL); 757 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, 758 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 759 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, 760 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); 761 setup_ring_params(ap, sge->freelQ[0].dma_addr, 762 sge->freelQ[0].size, A_SG_FL0BASELWR, 763 A_SG_FL0BASEUPR, A_SG_FL0SIZE); 764 setup_ring_params(ap, sge->freelQ[1].dma_addr, 765 sge->freelQ[1].size, A_SG_FL1BASELWR, 766 A_SG_FL1BASEUPR, A_SG_FL1SIZE); 767 768 /* The threshold comparison uses <. */ 769 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); 770 771 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, 772 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); 773 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); 774 775 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | 776 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | 777 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | 778 V_RX_PKT_OFFSET(sge->rx_pkt_pad); 779 780#if defined(__BIG_ENDIAN_BITFIELD) 781 sge->sge_control |= F_ENABLE_BIG_ENDIAN; 782#endif 783 784 /* Initialize no-resource timer */ 785 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); 786 787 t1_sge_set_coalesce_params(sge, p); 788} 789 790/* 791 * Return the payload capacity of the jumbo free-list buffers. 792 */ 793static inline unsigned int jumbo_payload_capacity(const struct sge *sge) 794{ 795 return sge->freelQ[sge->jumbo_fl].rx_buffer_size - 796 sge->freelQ[sge->jumbo_fl].dma_offset - 797 sizeof(struct cpl_rx_data); 798} 799 800/* 801 * Frees all SGE related resources and the sge structure itself 802 */ 803void t1_sge_destroy(struct sge *sge) 804{ 805 int i; 806 807 for_each_port(sge->adapter, i) 808 free_percpu(sge->port_stats[i]); 809 810 kfree(sge->tx_sched); 811 free_tx_resources(sge); 812 free_rx_resources(sge); 813 kfree(sge); 814} 815 816/* 817 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist 818 * context Q) until the Q is full or alloc_skb fails. 819 * 820 * It is possible that the generation bits already match, indicating that the 821 * buffer is already valid and nothing needs to be done. This happens when we 822 * copied a received buffer into a new sk_buff during the interrupt processing. 823 * 824 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), 825 * we specify a RX_OFFSET in order to make sure that the IP header is 4B 826 * aligned. 827 */ 828static void refill_free_list(struct sge *sge, struct freelQ *q) 829{ 830 struct pci_dev *pdev = sge->adapter->pdev; 831 struct freelQ_ce *ce = &q->centries[q->pidx]; 832 struct freelQ_e *e = &q->entries[q->pidx]; 833 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; 834 835 while (q->credits < q->size) { 836 struct sk_buff *skb; 837 dma_addr_t mapping; 838 839 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); 840 if (!skb) 841 break; 842 843 skb_reserve(skb, q->dma_offset); 844 mapping = pci_map_single(pdev, skb->data, dma_len, 845 PCI_DMA_FROMDEVICE); 846 skb_reserve(skb, sge->rx_pkt_pad); 847 848 ce->skb = skb; 849 pci_unmap_addr_set(ce, dma_addr, mapping); 850 pci_unmap_len_set(ce, dma_len, dma_len); 851 e->addr_lo = (u32)mapping; 852 e->addr_hi = (u64)mapping >> 32; 853 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); 854 wmb(); 855 e->gen2 = V_CMD_GEN2(q->genbit); 856 857 e++; 858 ce++; 859 if (++q->pidx == q->size) { 860 q->pidx = 0; 861 q->genbit ^= 1; 862 ce = q->centries; 863 e = q->entries; 864 } 865 q->credits++; 866 } 867} 868 869/* 870 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 871 * of both rings, we go into 'few interrupt mode' in order to give the system 872 * time to free up resources. 873 */ 874static void freelQs_empty(struct sge *sge) 875{ 876 struct adapter *adapter = sge->adapter; 877 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); 878 u32 irqholdoff_reg; 879 880 refill_free_list(sge, &sge->freelQ[0]); 881 refill_free_list(sge, &sge->freelQ[1]); 882 883 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && 884 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { 885 irq_reg |= F_FL_EXHAUSTED; 886 irqholdoff_reg = sge->fixed_intrtimer; 887 } else { 888 /* Clear the F_FL_EXHAUSTED interrupts for now */ 889 irq_reg &= ~F_FL_EXHAUSTED; 890 irqholdoff_reg = sge->intrtimer_nres; 891 } 892 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); 893 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); 894 895 /* We reenable the Qs to force a freelist GTS interrupt later */ 896 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); 897} 898 899#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) 900#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) 901#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ 902 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) 903 904/* 905 * Disable SGE Interrupts 906 */ 907void t1_sge_intr_disable(struct sge *sge) 908{ 909 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 910 911 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 912 writel(0, sge->adapter->regs + A_SG_INT_ENABLE); 913} 914 915/* 916 * Enable SGE interrupts. 917 */ 918void t1_sge_intr_enable(struct sge *sge) 919{ 920 u32 en = SGE_INT_ENABLE; 921 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 922 923 if (sge->adapter->flags & TSO_CAPABLE) 924 en &= ~F_PACKET_TOO_BIG; 925 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); 926 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 927} 928 929/* 930 * Clear SGE interrupts. 931 */ 932void t1_sge_intr_clear(struct sge *sge) 933{ 934 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); 935 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); 936} 937 938/* 939 * SGE 'Error' interrupt handler 940 */ 941int t1_sge_intr_error_handler(struct sge *sge) 942{ 943 struct adapter *adapter = sge->adapter; 944 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); 945 946 if (adapter->flags & TSO_CAPABLE) 947 cause &= ~F_PACKET_TOO_BIG; 948 if (cause & F_RESPQ_EXHAUSTED) 949 sge->stats.respQ_empty++; 950 if (cause & F_RESPQ_OVERFLOW) { 951 sge->stats.respQ_overflow++; 952 CH_ALERT("%s: SGE response queue overflow\n", 953 adapter->name); 954 } 955 if (cause & F_FL_EXHAUSTED) { 956 sge->stats.freelistQ_empty++; 957 freelQs_empty(sge); 958 } 959 if (cause & F_PACKET_TOO_BIG) { 960 sge->stats.pkt_too_big++; 961 CH_ALERT("%s: SGE max packet size exceeded\n", 962 adapter->name); 963 } 964 if (cause & F_PACKET_MISMATCH) { 965 sge->stats.pkt_mismatch++; 966 CH_ALERT("%s: SGE packet mismatch\n", adapter->name); 967 } 968 if (cause & SGE_INT_FATAL) 969 t1_fatal_err(adapter); 970 971 writel(cause, adapter->regs + A_SG_INT_CAUSE); 972 return 0; 973} 974 975const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) 976{ 977 return &sge->stats; 978} 979 980void t1_sge_get_port_stats(const struct sge *sge, int port, 981 struct sge_port_stats *ss) 982{ 983 int cpu; 984 985 memset(ss, 0, sizeof(*ss)); 986 for_each_possible_cpu(cpu) { 987 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); 988 989 ss->rx_packets += st->rx_packets; 990 ss->rx_cso_good += st->rx_cso_good; 991 ss->tx_packets += st->tx_packets; 992 ss->tx_cso += st->tx_cso; 993 ss->tx_tso += st->tx_tso; 994 ss->vlan_xtract += st->vlan_xtract; 995 ss->vlan_insert += st->vlan_insert; 996 } 997} 998 999/** 1000 * recycle_fl_buf - recycle a free list buffer 1001 * @fl: the free list 1002 * @idx: index of buffer to recycle 1003 * 1004 * Recycles the specified buffer on the given free list by adding it at 1005 * the next available slot on the list. 1006 */ 1007static void recycle_fl_buf(struct freelQ *fl, int idx) 1008{ 1009 struct freelQ_e *from = &fl->entries[idx]; 1010 struct freelQ_e *to = &fl->entries[fl->pidx]; 1011 1012 fl->centries[fl->pidx] = fl->centries[idx]; 1013 to->addr_lo = from->addr_lo; 1014 to->addr_hi = from->addr_hi; 1015 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); 1016 wmb(); 1017 to->gen2 = V_CMD_GEN2(fl->genbit); 1018 fl->credits++; 1019 1020 if (++fl->pidx == fl->size) { 1021 fl->pidx = 0; 1022 fl->genbit ^= 1; 1023 } 1024} 1025 1026static int copybreak __read_mostly = 256; 1027module_param(copybreak, int, 0); 1028MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 1029 1030/** 1031 * get_packet - return the next ingress packet buffer 1032 * @pdev: the PCI device that received the packet 1033 * @fl: the SGE free list holding the packet 1034 * @len: the actual packet length, excluding any SGE padding 1035 * @dma_pad: padding at beginning of buffer left by SGE DMA 1036 * @skb_pad: padding to be used if the packet is copied 1037 * @copy_thres: length threshold under which a packet should be copied 1038 * @drop_thres: # of remaining buffers before we start dropping packets 1039 * 1040 * Get the next packet from a free list and complete setup of the 1041 * sk_buff. If the packet is small we make a copy and recycle the 1042 * original buffer, otherwise we use the original buffer itself. If a 1043 * positive drop threshold is supplied packets are dropped and their 1044 * buffers recycled if (a) the number of remaining buffers is under the 1045 * threshold and the packet is too big to copy, or (b) the packet should 1046 * be copied but there is no memory for the copy. 1047 */ 1048static inline struct sk_buff *get_packet(struct pci_dev *pdev, 1049 struct freelQ *fl, unsigned int len) 1050{ 1051 struct sk_buff *skb; 1052 const struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1053 1054 if (len < copybreak) { 1055 skb = alloc_skb(len + 2, GFP_ATOMIC); 1056 if (!skb) 1057 goto use_orig_buf; 1058 1059 skb_reserve(skb, 2); /* align IP header */ 1060 skb_put(skb, len); 1061 pci_dma_sync_single_for_cpu(pdev, 1062 pci_unmap_addr(ce, dma_addr), 1063 pci_unmap_len(ce, dma_len), 1064 PCI_DMA_FROMDEVICE); 1065 skb_copy_from_linear_data(ce->skb, skb->data, len); 1066 pci_dma_sync_single_for_device(pdev, 1067 pci_unmap_addr(ce, dma_addr), 1068 pci_unmap_len(ce, dma_len), 1069 PCI_DMA_FROMDEVICE); 1070 recycle_fl_buf(fl, fl->cidx); 1071 return skb; 1072 } 1073 1074use_orig_buf: 1075 if (fl->credits < 2) { 1076 recycle_fl_buf(fl, fl->cidx); 1077 return NULL; 1078 } 1079 1080 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 1081 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1082 skb = ce->skb; 1083 prefetch(skb->data); 1084 1085 skb_put(skb, len); 1086 return skb; 1087} 1088 1089/** 1090 * unexpected_offload - handle an unexpected offload packet 1091 * @adapter: the adapter 1092 * @fl: the free list that received the packet 1093 * 1094 * Called when we receive an unexpected offload packet (e.g., the TOE 1095 * function is disabled or the card is a NIC). Prints a message and 1096 * recycles the buffer. 1097 */ 1098static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) 1099{ 1100 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1101 struct sk_buff *skb = ce->skb; 1102 1103 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), 1104 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1105 CH_ERR("%s: unexpected offload packet, cmd %u\n", 1106 adapter->name, *skb->data); 1107 recycle_fl_buf(fl, fl->cidx); 1108} 1109 1110/* 1111 * T1/T2 SGE limits the maximum DMA size per TX descriptor to 1112 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the 1113 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. 1114 * Note that the *_large_page_tx_descs stuff will be optimized out when 1115 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. 1116 * 1117 * compute_large_page_descs() computes how many additional descriptors are 1118 * required to break down the stack's request. 1119 */ 1120static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) 1121{ 1122 unsigned int count = 0; 1123 1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1126 unsigned int i, len = skb->len - skb->data_len; 1127 while (len > SGE_TX_DESC_MAX_PLEN) { 1128 count++; 1129 len -= SGE_TX_DESC_MAX_PLEN; 1130 } 1131 for (i = 0; nfrags--; i++) { 1132 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1133 len = frag->size; 1134 while (len > SGE_TX_DESC_MAX_PLEN) { 1135 count++; 1136 len -= SGE_TX_DESC_MAX_PLEN; 1137 } 1138 } 1139 } 1140 return count; 1141} 1142 1143/* 1144 * Write a cmdQ entry. 1145 * 1146 * Since this function writes the 'flags' field, it must not be used to 1147 * write the first cmdQ entry. 1148 */ 1149static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, 1150 unsigned int len, unsigned int gen, 1151 unsigned int eop) 1152{ 1153 if (unlikely(len > SGE_TX_DESC_MAX_PLEN)) 1154 BUG(); 1155 e->addr_lo = (u32)mapping; 1156 e->addr_hi = (u64)mapping >> 32; 1157 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); 1158 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); 1159} 1160 1161/* 1162 * See comment for previous function. 1163 * 1164 * write_tx_descs_large_page() writes additional SGE tx descriptors if 1165 * *desc_len exceeds HW's capability. 1166 */ 1167static inline unsigned int write_large_page_tx_descs(unsigned int pidx, 1168 struct cmdQ_e **e, 1169 struct cmdQ_ce **ce, 1170 unsigned int *gen, 1171 dma_addr_t *desc_mapping, 1172 unsigned int *desc_len, 1173 unsigned int nfrags, 1174 struct cmdQ *q) 1175{ 1176 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1177 struct cmdQ_e *e1 = *e; 1178 struct cmdQ_ce *ce1 = *ce; 1179 1180 while (*desc_len > SGE_TX_DESC_MAX_PLEN) { 1181 *desc_len -= SGE_TX_DESC_MAX_PLEN; 1182 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, 1183 *gen, nfrags == 0 && *desc_len == 0); 1184 ce1->skb = NULL; 1185 pci_unmap_len_set(ce1, dma_len, 0); 1186 *desc_mapping += SGE_TX_DESC_MAX_PLEN; 1187 if (*desc_len) { 1188 ce1++; 1189 e1++; 1190 if (++pidx == q->size) { 1191 pidx = 0; 1192 *gen ^= 1; 1193 ce1 = q->centries; 1194 e1 = q->entries; 1195 } 1196 } 1197 } 1198 *e = e1; 1199 *ce = ce1; 1200 } 1201 return pidx; 1202} 1203 1204/* 1205 * Write the command descriptors to transmit the given skb starting at 1206 * descriptor pidx with the given generation. 1207 */ 1208static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, 1209 unsigned int pidx, unsigned int gen, 1210 struct cmdQ *q) 1211{ 1212 dma_addr_t mapping, desc_mapping; 1213 struct cmdQ_e *e, *e1; 1214 struct cmdQ_ce *ce; 1215 unsigned int i, flags, first_desc_len, desc_len, 1216 nfrags = skb_shinfo(skb)->nr_frags; 1217 1218 e = e1 = &q->entries[pidx]; 1219 ce = &q->centries[pidx]; 1220 1221 mapping = pci_map_single(adapter->pdev, skb->data, 1222 skb->len - skb->data_len, PCI_DMA_TODEVICE); 1223 1224 desc_mapping = mapping; 1225 desc_len = skb->len - skb->data_len; 1226 1227 flags = F_CMD_DATAVALID | F_CMD_SOP | 1228 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | 1229 V_CMD_GEN2(gen); 1230 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? 1231 desc_len : SGE_TX_DESC_MAX_PLEN; 1232 e->addr_lo = (u32)desc_mapping; 1233 e->addr_hi = (u64)desc_mapping >> 32; 1234 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); 1235 ce->skb = NULL; 1236 pci_unmap_len_set(ce, dma_len, 0); 1237 1238 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && 1239 desc_len > SGE_TX_DESC_MAX_PLEN) { 1240 desc_mapping += first_desc_len; 1241 desc_len -= first_desc_len; 1242 e1++; 1243 ce++; 1244 if (++pidx == q->size) { 1245 pidx = 0; 1246 gen ^= 1; 1247 e1 = q->entries; 1248 ce = q->centries; 1249 } 1250 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1251 &desc_mapping, &desc_len, 1252 nfrags, q); 1253 1254 if (likely(desc_len)) 1255 write_tx_desc(e1, desc_mapping, desc_len, gen, 1256 nfrags == 0); 1257 } 1258 1259 ce->skb = NULL; 1260 pci_unmap_addr_set(ce, dma_addr, mapping); 1261 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); 1262 1263 for (i = 0; nfrags--; i++) { 1264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1265 e1++; 1266 ce++; 1267 if (++pidx == q->size) { 1268 pidx = 0; 1269 gen ^= 1; 1270 e1 = q->entries; 1271 ce = q->centries; 1272 } 1273 1274 mapping = pci_map_page(adapter->pdev, frag->page, 1275 frag->page_offset, frag->size, 1276 PCI_DMA_TODEVICE); 1277 desc_mapping = mapping; 1278 desc_len = frag->size; 1279 1280 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1281 &desc_mapping, &desc_len, 1282 nfrags, q); 1283 if (likely(desc_len)) 1284 write_tx_desc(e1, desc_mapping, desc_len, gen, 1285 nfrags == 0); 1286 ce->skb = NULL; 1287 pci_unmap_addr_set(ce, dma_addr, mapping); 1288 pci_unmap_len_set(ce, dma_len, frag->size); 1289 } 1290 ce->skb = skb; 1291 wmb(); 1292 e->flags = flags; 1293} 1294 1295/* 1296 * Clean up completed Tx buffers. 1297 */ 1298static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) 1299{ 1300 unsigned int reclaim = q->processed - q->cleaned; 1301 1302 if (reclaim) { 1303 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", 1304 q->processed, q->cleaned); 1305 free_cmdQ_buffers(sge, q, reclaim); 1306 q->cleaned += reclaim; 1307 } 1308} 1309 1310/* 1311 * Called from tasklet. Checks the scheduler for any 1312 * pending skbs that can be sent. 1313 */ 1314static void restart_sched(unsigned long arg) 1315{ 1316 struct sge *sge = (struct sge *) arg; 1317 struct adapter *adapter = sge->adapter; 1318 struct cmdQ *q = &sge->cmdQ[0]; 1319 struct sk_buff *skb; 1320 unsigned int credits, queued_skb = 0; 1321 1322 spin_lock(&q->lock); 1323 reclaim_completed_tx(sge, q); 1324 1325 credits = q->size - q->in_use; 1326 pr_debug("restart_sched credits=%d\n", credits); 1327 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { 1328 unsigned int genbit, pidx, count; 1329 count = 1 + skb_shinfo(skb)->nr_frags; 1330 count += compute_large_page_tx_descs(skb); 1331 q->in_use += count; 1332 genbit = q->genbit; 1333 pidx = q->pidx; 1334 q->pidx += count; 1335 if (q->pidx >= q->size) { 1336 q->pidx -= q->size; 1337 q->genbit ^= 1; 1338 } 1339 write_tx_descs(adapter, skb, pidx, genbit, q); 1340 credits = q->size - q->in_use; 1341 queued_skb = 1; 1342 } 1343 1344 if (queued_skb) { 1345 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1346 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 1347 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1348 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1349 } 1350 } 1351 spin_unlock(&q->lock); 1352} 1353 1354/** 1355 * sge_rx - process an ingress ethernet packet 1356 * @sge: the sge structure 1357 * @fl: the free list that contains the packet buffer 1358 * @len: the packet length 1359 * 1360 * Process an ingress ethernet pakcet and deliver it to the stack. 1361 */ 1362static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) 1363{ 1364 struct sk_buff *skb; 1365 const struct cpl_rx_pkt *p; 1366 struct adapter *adapter = sge->adapter; 1367 struct sge_port_stats *st; 1368 1369 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); 1370 if (unlikely(!skb)) { 1371 sge->stats.rx_drops++; 1372 return; 1373 } 1374 1375 p = (const struct cpl_rx_pkt *) skb->data; 1376 if (p->iff >= adapter->params.nports) { 1377 kfree_skb(skb); 1378 return; 1379 } 1380 __skb_pull(skb, sizeof(*p)); 1381 1382 skb->dev->last_rx = jiffies; 1383 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); 1384 st->rx_packets++; 1385 1386 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); 1387 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1388 skb->protocol == htons(ETH_P_IP) && 1389 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { 1390 ++st->rx_cso_good; 1391 skb->ip_summed = CHECKSUM_UNNECESSARY; 1392 } else 1393 skb->ip_summed = CHECKSUM_NONE; 1394 1395 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1396 st->vlan_xtract++; 1397#ifdef CONFIG_CHELSIO_T1_NAPI 1398 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, 1399 ntohs(p->vlan)); 1400#else 1401 vlan_hwaccel_rx(skb, adapter->vlan_grp, 1402 ntohs(p->vlan)); 1403#endif 1404 } else { 1405#ifdef CONFIG_CHELSIO_T1_NAPI 1406 netif_receive_skb(skb); 1407#else 1408 netif_rx(skb); 1409#endif 1410 } 1411} 1412 1413/* 1414 * Returns true if a command queue has enough available descriptors that 1415 * we can resume Tx operation after temporarily disabling its packet queue. 1416 */ 1417static inline int enough_free_Tx_descs(const struct cmdQ *q) 1418{ 1419 unsigned int r = q->processed - q->cleaned; 1420 1421 return q->in_use - r < (q->size >> 1); 1422} 1423 1424/* 1425 * Called when sufficient space has become available in the SGE command queues 1426 * after the Tx packet schedulers have been suspended to restart the Tx path. 1427 */ 1428static void restart_tx_queues(struct sge *sge) 1429{ 1430 struct adapter *adap = sge->adapter; 1431 int i; 1432 1433 if (!enough_free_Tx_descs(&sge->cmdQ[0])) 1434 return; 1435 1436 for_each_port(adap, i) { 1437 struct net_device *nd = adap->port[i].dev; 1438 1439 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && 1440 netif_running(nd)) { 1441 sge->stats.cmdQ_restarted[2]++; 1442 netif_wake_queue(nd); 1443 } 1444 } 1445} 1446 1447/* 1448 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 1449 * information. 1450 */ 1451static unsigned int update_tx_info(struct adapter *adapter, 1452 unsigned int flags, 1453 unsigned int pr0) 1454{ 1455 struct sge *sge = adapter->sge; 1456 struct cmdQ *cmdq = &sge->cmdQ[0]; 1457 1458 cmdq->processed += pr0; 1459 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { 1460 freelQs_empty(sge); 1461 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); 1462 } 1463 if (flags & F_CMDQ0_ENABLE) { 1464 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); 1465 1466 if (cmdq->cleaned + cmdq->in_use != cmdq->processed && 1467 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { 1468 set_bit(CMDQ_STAT_RUNNING, &cmdq->status); 1469 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1470 } 1471 if (sge->tx_sched) 1472 tasklet_hi_schedule(&sge->tx_sched->sched_tsk); 1473 1474 flags &= ~F_CMDQ0_ENABLE; 1475 } 1476 1477 if (unlikely(sge->stopped_tx_queues != 0)) 1478 restart_tx_queues(sge); 1479 1480 return flags; 1481} 1482 1483/* 1484 * Process SGE responses, up to the supplied budget. Returns the number of 1485 * responses processed. A negative budget is effectively unlimited. 1486 */ 1487static int process_responses(struct adapter *adapter, int budget) 1488{ 1489 struct sge *sge = adapter->sge; 1490 struct respQ *q = &sge->respQ; 1491 struct respQ_e *e = &q->entries[q->cidx]; 1492 int done = 0; 1493 unsigned int flags = 0; 1494 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1495 1496 while (done < budget && e->GenerationBit == q->genbit) { 1497 flags |= e->Qsleeping; 1498 1499 cmdq_processed[0] += e->Cmdq0CreditReturn; 1500 cmdq_processed[1] += e->Cmdq1CreditReturn; 1501 1502 /* We batch updates to the TX side to avoid cacheline 1503 * ping-pong of TX state information on MP where the sender 1504 * might run on a different CPU than this function... 1505 */ 1506 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { 1507 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1508 cmdq_processed[0] = 0; 1509 } 1510 1511 if (unlikely(cmdq_processed[1] > 16)) { 1512 sge->cmdQ[1].processed += cmdq_processed[1]; 1513 cmdq_processed[1] = 0; 1514 } 1515 1516 if (likely(e->DataValid)) { 1517 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1518 1519 BUG_ON(!e->Sop || !e->Eop); 1520 if (unlikely(e->Offload)) 1521 unexpected_offload(adapter, fl); 1522 else 1523 sge_rx(sge, fl, e->BufferLength); 1524 1525 ++done; 1526 1527 /* 1528 * Note: this depends on each packet consuming a 1529 * single free-list buffer; cf. the BUG above. 1530 */ 1531 if (++fl->cidx == fl->size) 1532 fl->cidx = 0; 1533 prefetch(fl->centries[fl->cidx].skb); 1534 1535 if (unlikely(--fl->credits < 1536 fl->size - SGE_FREEL_REFILL_THRESH)) 1537 refill_free_list(sge, fl); 1538 } else 1539 sge->stats.pure_rsps++; 1540 1541 e++; 1542 if (unlikely(++q->cidx == q->size)) { 1543 q->cidx = 0; 1544 q->genbit ^= 1; 1545 e = q->entries; 1546 } 1547 prefetch(e); 1548 1549 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { 1550 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1551 q->credits = 0; 1552 } 1553 } 1554 1555 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1556 sge->cmdQ[1].processed += cmdq_processed[1]; 1557 1558 return done; 1559} 1560 1561static inline int responses_pending(const struct adapter *adapter) 1562{ 1563 const struct respQ *Q = &adapter->sge->respQ; 1564 const struct respQ_e *e = &Q->entries[Q->cidx]; 1565 1566 return (e->GenerationBit == Q->genbit); 1567} 1568 1569#ifdef CONFIG_CHELSIO_T1_NAPI 1570/* 1571 * A simpler version of process_responses() that handles only pure (i.e., 1572 * non data-carrying) responses. Such respones are too light-weight to justify 1573 * calling a softirq when using NAPI, so we handle them specially in hard 1574 * interrupt context. The function is called with a pointer to a response, 1575 * which the caller must ensure is a valid pure response. Returns 1 if it 1576 * encounters a valid data-carrying response, 0 otherwise. 1577 */ 1578static int process_pure_responses(struct adapter *adapter) 1579{ 1580 struct sge *sge = adapter->sge; 1581 struct respQ *q = &sge->respQ; 1582 struct respQ_e *e = &q->entries[q->cidx]; 1583 const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1584 unsigned int flags = 0; 1585 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1586 1587 prefetch(fl->centries[fl->cidx].skb); 1588 if (e->DataValid) 1589 return 1; 1590 1591 do { 1592 flags |= e->Qsleeping; 1593 1594 cmdq_processed[0] += e->Cmdq0CreditReturn; 1595 cmdq_processed[1] += e->Cmdq1CreditReturn; 1596 1597 e++; 1598 if (unlikely(++q->cidx == q->size)) { 1599 q->cidx = 0; 1600 q->genbit ^= 1; 1601 e = q->entries; 1602 } 1603 prefetch(e); 1604 1605 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { 1606 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1607 q->credits = 0; 1608 } 1609 sge->stats.pure_rsps++; 1610 } while (e->GenerationBit == q->genbit && !e->DataValid); 1611 1612 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1613 sge->cmdQ[1].processed += cmdq_processed[1]; 1614 1615 return e->GenerationBit == q->genbit; 1616} 1617 1618/* 1619 * Handler for new data events when using NAPI. This does not need any locking 1620 * or protection from interrupts as data interrupts are off at this point and 1621 * other adapter interrupts do not interfere. 1622 */ 1623int t1_poll(struct net_device *dev, int *budget) 1624{ 1625 struct adapter *adapter = dev->priv; 1626 int work_done; 1627 1628 work_done = process_responses(adapter, min(*budget, dev->quota)); 1629 *budget -= work_done; 1630 dev->quota -= work_done; 1631 1632 if (unlikely(responses_pending(adapter))) 1633 return 1; 1634 1635 netif_rx_complete(dev); 1636 writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1637 1638 return 0; 1639 1640} 1641 1642/* 1643 * NAPI version of the main interrupt handler. 1644 */ 1645irqreturn_t t1_interrupt(int irq, void *data) 1646{ 1647 struct adapter *adapter = data; 1648 struct sge *sge = adapter->sge; 1649 int handled; 1650 1651 if (likely(responses_pending(adapter))) { 1652 struct net_device *dev = sge->netdev; 1653 1654 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1655 1656 if (__netif_rx_schedule_prep(dev)) { 1657 if (process_pure_responses(adapter)) 1658 __netif_rx_schedule(dev); 1659 else { 1660 /* no data, no NAPI needed */ 1661 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1662 netif_poll_enable(dev); /* undo schedule_prep */ 1663 } 1664 } 1665 return IRQ_HANDLED; 1666 } 1667 1668 spin_lock(&adapter->async_lock); 1669 handled = t1_slow_intr_handler(adapter); 1670 spin_unlock(&adapter->async_lock); 1671 1672 if (!handled) 1673 sge->stats.unhandled_irqs++; 1674 1675 return IRQ_RETVAL(handled != 0); 1676} 1677 1678#else 1679/* 1680 * Main interrupt handler, optimized assuming that we took a 'DATA' 1681 * interrupt. 1682 * 1683 * 1. Clear the interrupt 1684 * 2. Loop while we find valid descriptors and process them; accumulate 1685 * information that can be processed after the loop 1686 * 3. Tell the SGE at which index we stopped processing descriptors 1687 * 4. Bookkeeping; free TX buffers, ring doorbell if there are any 1688 * outstanding TX buffers waiting, replenish RX buffers, potentially 1689 * reenable upper layers if they were turned off due to lack of TX 1690 * resources which are available again. 1691 * 5. If we took an interrupt, but no valid respQ descriptors was found we 1692 * let the slow_intr_handler run and do error handling. 1693 */ 1694irqreturn_t t1_interrupt(int irq, void *cookie) 1695{ 1696 int work_done; 1697 struct adapter *adapter = cookie; 1698 struct respQ *Q = &adapter->sge->respQ; 1699 1700 spin_lock(&adapter->async_lock); 1701 1702 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1703 1704 if (likely(responses_pending(adapter))) 1705 work_done = process_responses(adapter, -1); 1706 else 1707 work_done = t1_slow_intr_handler(adapter); 1708 1709 /* 1710 * The unconditional clearing of the PL_CAUSE above may have raced 1711 * with DMA completion and the corresponding generation of a response 1712 * to cause us to miss the resulting data interrupt. The next write 1713 * is also unconditional to recover the missed interrupt and render 1714 * this race harmless. 1715 */ 1716 writel(Q->cidx, adapter->regs + A_SG_SLEEPING); 1717 1718 if (!work_done) 1719 adapter->sge->stats.unhandled_irqs++; 1720 spin_unlock(&adapter->async_lock); 1721 return IRQ_RETVAL(work_done != 0); 1722} 1723#endif 1724 1725/* 1726 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. 1727 * 1728 * The code figures out how many entries the sk_buff will require in the 1729 * cmdQ and updates the cmdQ data structure with the state once the enqueue 1730 * has complete. Then, it doesn't access the global structure anymore, but 1731 * uses the corresponding fields on the stack. In conjuction with a spinlock 1732 * around that code, we can make the function reentrant without holding the 1733 * lock when we actually enqueue (which might be expensive, especially on 1734 * architectures with IO MMUs). 1735 * 1736 * This runs with softirqs disabled. 1737 */ 1738static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, 1739 unsigned int qid, struct net_device *dev) 1740{ 1741 struct sge *sge = adapter->sge; 1742 struct cmdQ *q = &sge->cmdQ[qid]; 1743 unsigned int credits, pidx, genbit, count, use_sched_skb = 0; 1744 1745 if (!spin_trylock(&q->lock)) 1746 return NETDEV_TX_LOCKED; 1747 1748 reclaim_completed_tx(sge, q); 1749 1750 pidx = q->pidx; 1751 credits = q->size - q->in_use; 1752 count = 1 + skb_shinfo(skb)->nr_frags; 1753 count += compute_large_page_tx_descs(skb); 1754 1755 /* Ethernet packet */ 1756 if (unlikely(credits < count)) { 1757 if (!netif_queue_stopped(dev)) { 1758 netif_stop_queue(dev); 1759 set_bit(dev->if_port, &sge->stopped_tx_queues); 1760 sge->stats.cmdQ_full[2]++; 1761 CH_ERR("%s: Tx ring full while queue awake!\n", 1762 adapter->name); 1763 } 1764 spin_unlock(&q->lock); 1765 return NETDEV_TX_BUSY; 1766 } 1767 1768 if (unlikely(credits - count < q->stop_thres)) { 1769 netif_stop_queue(dev); 1770 set_bit(dev->if_port, &sge->stopped_tx_queues); 1771 sge->stats.cmdQ_full[2]++; 1772 } 1773 1774 /* T204 cmdQ0 skbs that are destined for a certain port have to go 1775 * through the scheduler. 1776 */ 1777 if (sge->tx_sched && !qid && skb->dev) { 1778use_sched: 1779 use_sched_skb = 1; 1780 /* Note that the scheduler might return a different skb than 1781 * the one passed in. 1782 */ 1783 skb = sched_skb(sge, skb, credits); 1784 if (!skb) { 1785 spin_unlock(&q->lock); 1786 return NETDEV_TX_OK; 1787 } 1788 pidx = q->pidx; 1789 count = 1 + skb_shinfo(skb)->nr_frags; 1790 count += compute_large_page_tx_descs(skb); 1791 } 1792 1793 q->in_use += count; 1794 genbit = q->genbit; 1795 pidx = q->pidx; 1796 q->pidx += count; 1797 if (q->pidx >= q->size) { 1798 q->pidx -= q->size; 1799 q->genbit ^= 1; 1800 } 1801 spin_unlock(&q->lock); 1802 1803 write_tx_descs(adapter, skb, pidx, genbit, q); 1804 1805 /* 1806 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring 1807 * the doorbell if the Q is asleep. There is a natural race, where 1808 * the hardware is going to sleep just after we checked, however, 1809 * then the interrupt handler will detect the outstanding TX packet 1810 * and ring the doorbell for us. 1811 */ 1812 if (qid) 1813 doorbell_pio(adapter, F_CMDQ1_ENABLE); 1814 else { 1815 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1816 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 1817 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1818 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1819 } 1820 } 1821 1822 if (use_sched_skb) { 1823 if (spin_trylock(&q->lock)) { 1824 credits = q->size - q->in_use; 1825 skb = NULL; 1826 goto use_sched; 1827 } 1828 } 1829 return NETDEV_TX_OK; 1830} 1831 1832#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) 1833 1834/* 1835 * eth_hdr_len - return the length of an Ethernet header 1836 * @data: pointer to the start of the Ethernet header 1837 * 1838 * Returns the length of an Ethernet header, including optional VLAN tag. 1839 */ 1840static inline int eth_hdr_len(const void *data) 1841{ 1842 const struct ethhdr *e = data; 1843 1844 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; 1845} 1846 1847/* 1848 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. 1849 */ 1850int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) 1851{ 1852 struct adapter *adapter = dev->priv; 1853 struct sge *sge = adapter->sge; 1854 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id()); 1855 struct cpl_tx_pkt *cpl; 1856 struct sk_buff *orig_skb = skb; 1857 int ret; 1858 1859 if (skb->protocol == htons(ETH_P_CPL5)) 1860 goto send; 1861 1862 if (skb_shinfo(skb)->gso_size) { 1863 int eth_type; 1864 struct cpl_tx_pkt_lso *hdr; 1865 1866 ++st->tx_tso; 1867 1868 eth_type = skb_network_offset(skb) == ETH_HLEN ? 1869 CPL_ETH_II : CPL_ETH_II_VLAN; 1870 1871 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); 1872 hdr->opcode = CPL_TX_PKT_LSO; 1873 hdr->ip_csum_dis = hdr->l4_csum_dis = 0; 1874 hdr->ip_hdr_words = ip_hdr(skb)->ihl; 1875 hdr->tcp_hdr_words = tcp_hdr(skb)->doff; 1876 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, 1877 skb_shinfo(skb)->gso_size)); 1878 hdr->len = htonl(skb->len - sizeof(*hdr)); 1879 cpl = (struct cpl_tx_pkt *)hdr; 1880 } else { 1881 /* 1882 * Packets shorter than ETH_HLEN can break the MAC, drop them 1883 * early. Also, we may get oversized packets because some 1884 * parts of the kernel don't handle our unusual hard_header_len 1885 * right, drop those too. 1886 */ 1887 if (unlikely(skb->len < ETH_HLEN || 1888 skb->len > dev->mtu + eth_hdr_len(skb->data))) { 1889 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, 1890 skb->len, eth_hdr_len(skb->data), dev->mtu); 1891 dev_kfree_skb_any(skb); 1892 return NETDEV_TX_OK; 1893 } 1894 1895 /* 1896 * We are using a non-standard hard_header_len and some kernel 1897 * components, such as pktgen, do not handle it right. 1898 * Complain when this happens but try to fix things up. 1899 */ 1900 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { 1901 pr_debug("%s: headroom %d header_len %d\n", dev->name, 1902 skb_headroom(skb), dev->hard_header_len); 1903 1904 if (net_ratelimit()) 1905 printk(KERN_ERR "%s: inadequate headroom in " 1906 "Tx packet\n", dev->name); 1907 skb = skb_realloc_headroom(skb, sizeof(*cpl)); 1908 dev_kfree_skb_any(orig_skb); 1909 if (!skb) 1910 return NETDEV_TX_OK; 1911 } 1912 1913 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1914 skb->ip_summed == CHECKSUM_PARTIAL && 1915 ip_hdr(skb)->protocol == IPPROTO_UDP) { 1916 if (unlikely(skb_checksum_help(skb))) { 1917 pr_debug("%s: unable to do udp checksum\n", dev->name); 1918 dev_kfree_skb_any(skb); 1919 return NETDEV_TX_OK; 1920 } 1921 } 1922 1923 /* Hmmm, assuming to catch the gratious arp... and we'll use 1924 * it to flush out stuck espi packets... 1925 */ 1926 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { 1927 if (skb->protocol == htons(ETH_P_ARP) && 1928 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { 1929 adapter->sge->espibug_skb[dev->if_port] = skb; 1930 /* We want to re-use this skb later. We 1931 * simply bump the reference count and it 1932 * will not be freed... 1933 */ 1934 skb = skb_get(skb); 1935 } 1936 } 1937 1938 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); 1939 cpl->opcode = CPL_TX_PKT; 1940 cpl->ip_csum_dis = 1; /* SW calculates IP csum */ 1941 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; 1942 /* the length field isn't used so don't bother setting it */ 1943 1944 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); 1945 } 1946 cpl->iff = dev->if_port; 1947 1948#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1949 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { 1950 cpl->vlan_valid = 1; 1951 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1952 st->vlan_insert++; 1953 } else 1954#endif 1955 cpl->vlan_valid = 0; 1956 1957send: 1958 st->tx_packets++; 1959 dev->trans_start = jiffies; 1960 ret = t1_sge_tx(skb, adapter, 0, dev); 1961 1962 /* If transmit busy, and we reallocated skb's due to headroom limit, 1963 * then silently discard to avoid leak. 1964 */ 1965 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { 1966 dev_kfree_skb_any(skb); 1967 ret = NETDEV_TX_OK; 1968 } 1969 return ret; 1970} 1971 1972/* 1973 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. 1974 */ 1975static void sge_tx_reclaim_cb(unsigned long data) 1976{ 1977 int i; 1978 struct sge *sge = (struct sge *)data; 1979 1980 for (i = 0; i < SGE_CMDQ_N; ++i) { 1981 struct cmdQ *q = &sge->cmdQ[i]; 1982 1983 if (!spin_trylock(&q->lock)) 1984 continue; 1985 1986 reclaim_completed_tx(sge, q); 1987 if (i == 0 && q->in_use) { /* flush pending credits */ 1988 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); 1989 } 1990 spin_unlock(&q->lock); 1991 } 1992 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 1993} 1994 1995/* 1996 * Propagate changes of the SGE coalescing parameters to the HW. 1997 */ 1998int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) 1999{ 2000 sge->fixed_intrtimer = p->rx_coalesce_usecs * 2001 core_ticks_per_usec(sge->adapter); 2002 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); 2003 return 0; 2004} 2005 2006/* 2007 * Allocates both RX and TX resources and configures the SGE. However, 2008 * the hardware is not enabled yet. 2009 */ 2010int t1_sge_configure(struct sge *sge, struct sge_params *p) 2011{ 2012 if (alloc_rx_resources(sge, p)) 2013 return -ENOMEM; 2014 if (alloc_tx_resources(sge, p)) { 2015 free_rx_resources(sge); 2016 return -ENOMEM; 2017 } 2018 configure_sge(sge, p); 2019 2020 /* 2021 * Now that we have sized the free lists calculate the payload 2022 * capacity of the large buffers. Other parts of the driver use 2023 * this to set the max offload coalescing size so that RX packets 2024 * do not overflow our large buffers. 2025 */ 2026 p->large_buf_capacity = jumbo_payload_capacity(sge); 2027 return 0; 2028} 2029 2030/* 2031 * Disables the DMA engine. 2032 */ 2033void t1_sge_stop(struct sge *sge) 2034{ 2035 int i; 2036 writel(0, sge->adapter->regs + A_SG_CONTROL); 2037 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 2038 2039 if (is_T2(sge->adapter)) 2040 del_timer_sync(&sge->espibug_timer); 2041 2042 del_timer_sync(&sge->tx_reclaim_timer); 2043 if (sge->tx_sched) 2044 tx_sched_stop(sge); 2045 2046 for (i = 0; i < MAX_NPORTS; i++) 2047 if (sge->espibug_skb[i]) 2048 kfree_skb(sge->espibug_skb[i]); 2049} 2050 2051/* 2052 * Enables the DMA engine. 2053 */ 2054void t1_sge_start(struct sge *sge) 2055{ 2056 refill_free_list(sge, &sge->freelQ[0]); 2057 refill_free_list(sge, &sge->freelQ[1]); 2058 2059 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); 2060 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); 2061 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 2062 2063 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 2064 2065 if (is_T2(sge->adapter)) 2066 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2067} 2068 2069/* 2070 * Callback for the T2 ESPI 'stuck packet feature' workaorund 2071 */ 2072static void espibug_workaround_t204(unsigned long data) 2073{ 2074 struct adapter *adapter = (struct adapter *)data; 2075 struct sge *sge = adapter->sge; 2076 unsigned int nports = adapter->params.nports; 2077 u32 seop[MAX_NPORTS]; 2078 2079 if (adapter->open_device_map & PORT_MASK) { 2080 int i; 2081 2082 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) 2083 return; 2084 2085 for (i = 0; i < nports; i++) { 2086 struct sk_buff *skb = sge->espibug_skb[i]; 2087 2088 if (!netif_running(adapter->port[i].dev) || 2089 netif_queue_stopped(adapter->port[i].dev) || 2090 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) 2091 continue; 2092 2093 if (!skb->cb[0]) { 2094 u8 ch_mac_addr[ETH_ALEN] = { 2095 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 2096 }; 2097 2098 skb_copy_to_linear_data_offset(skb, 2099 sizeof(struct cpl_tx_pkt), 2100 ch_mac_addr, 2101 ETH_ALEN); 2102 skb_copy_to_linear_data_offset(skb, 2103 skb->len - 10, 2104 ch_mac_addr, 2105 ETH_ALEN); 2106 skb->cb[0] = 0xff; 2107 } 2108 2109 /* bump the reference count to avoid freeing of 2110 * the skb once the DMA has completed. 2111 */ 2112 skb = skb_get(skb); 2113 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); 2114 } 2115 } 2116 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2117} 2118 2119static void espibug_workaround(unsigned long data) 2120{ 2121 struct adapter *adapter = (struct adapter *)data; 2122 struct sge *sge = adapter->sge; 2123 2124 if (netif_running(adapter->port[0].dev)) { 2125 struct sk_buff *skb = sge->espibug_skb[0]; 2126 u32 seop = t1_espi_get_mon(adapter, 0x930, 0); 2127 2128 if ((seop & 0xfff0fff) == 0xfff && skb) { 2129 if (!skb->cb[0]) { 2130 u8 ch_mac_addr[ETH_ALEN] = 2131 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; 2132 skb_copy_to_linear_data_offset(skb, 2133 sizeof(struct cpl_tx_pkt), 2134 ch_mac_addr, 2135 ETH_ALEN); 2136 skb_copy_to_linear_data_offset(skb, 2137 skb->len - 10, 2138 ch_mac_addr, 2139 ETH_ALEN); 2140 skb->cb[0] = 0xff; 2141 } 2142 2143 /* bump the reference count to avoid freeing of the 2144 * skb once the DMA has completed. 2145 */ 2146 skb = skb_get(skb); 2147 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); 2148 } 2149 } 2150 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2151} 2152 2153/* 2154 * Creates a t1_sge structure and returns suggested resource parameters. 2155 */ 2156struct sge * __devinit t1_sge_create(struct adapter *adapter, 2157 struct sge_params *p) 2158{ 2159 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); 2160 int i; 2161 2162 if (!sge) 2163 return NULL; 2164 2165 sge->adapter = adapter; 2166 sge->netdev = adapter->port[0].dev; 2167 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; 2168 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; 2169 2170 for_each_port(adapter, i) { 2171 sge->port_stats[i] = alloc_percpu(struct sge_port_stats); 2172 if (!sge->port_stats[i]) 2173 goto nomem_port; 2174 } 2175 2176 init_timer(&sge->tx_reclaim_timer); 2177 sge->tx_reclaim_timer.data = (unsigned long)sge; 2178 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; 2179 2180 if (is_T2(sge->adapter)) { 2181 init_timer(&sge->espibug_timer); 2182 2183 if (adapter->params.nports > 1) { 2184 tx_sched_init(sge); 2185 sge->espibug_timer.function = espibug_workaround_t204; 2186 } else 2187 sge->espibug_timer.function = espibug_workaround; 2188 sge->espibug_timer.data = (unsigned long)sge->adapter; 2189 2190 sge->espibug_timeout = 1; 2191 /* for T204, every 10ms */ 2192 if (adapter->params.nports > 1) 2193 sge->espibug_timeout = HZ/100; 2194 } 2195 2196 2197 p->cmdQ_size[0] = SGE_CMDQ0_E_N; 2198 p->cmdQ_size[1] = SGE_CMDQ1_E_N; 2199 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; 2200 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; 2201 if (sge->tx_sched) { 2202 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) 2203 p->rx_coalesce_usecs = 15; 2204 else 2205 p->rx_coalesce_usecs = 50; 2206 } else 2207 p->rx_coalesce_usecs = 50; 2208 2209 p->coalesce_enable = 0; 2210 p->sample_interval_usecs = 0; 2211 2212 return sge; 2213nomem_port: 2214 while (i >= 0) { 2215 free_percpu(sge->port_stats[i]); 2216 --i; 2217 } 2218 kfree(sge); 2219 return NULL; 2220 2221} 2222