1/***************************************************************************** 2 * * 3 * File: sge.c * 4 * $Revision: 1.26 $ * 5 * $Date: 2005/06/21 18:29:48 $ * 6 * Description: * 7 * DMA engine. * 8 * part of the Chelsio 10Gb Ethernet Driver. * 9 * * 10 * This program is free software; you can redistribute it and/or modify * 11 * it under the terms of the GNU General Public License, version 2, as * 12 * published by the Free Software Foundation. * 13 * * 14 * You should have received a copy of the GNU General Public License along * 15 * with this program; if not, write to the Free Software Foundation, Inc., * 16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * 17 * * 18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * 19 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * 21 * * 22 * http://www.chelsio.com * 23 * * 24 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * 25 * All rights reserved. * 26 * * 27 * Maintainers: maintainers@chelsio.com * 28 * * 29 * Authors: Dimitrios Michailidis <dm@chelsio.com> * 30 * Tina Yang <tainay@chelsio.com> * 31 * Felix Marti <felix@chelsio.com> * 32 * Scott Bardone <sbardone@chelsio.com> * 33 * Kurt Ottaway <kottaway@chelsio.com> * 34 * Frank DiMambro <frank@chelsio.com> * 35 * * 36 * History: * 37 * * 38 ****************************************************************************/ 39 40#include "common.h" 41 42#include <linux/types.h> 43#include <linux/errno.h> 44#include <linux/pci.h> 45#include <linux/ktime.h> 46#include <linux/netdevice.h> 47#include <linux/etherdevice.h> 48#include <linux/if_vlan.h> 49#include <linux/skbuff.h> 50#include <linux/init.h> 51#include <linux/mm.h> 52#include <linux/tcp.h> 53#include <linux/ip.h> 54#include <linux/in.h> 55#include <linux/if_arp.h> 56#include <linux/slab.h> 57 58#include "cpl5_cmd.h" 59#include "sge.h" 60#include "regs.h" 61#include "espi.h" 62 63/* This belongs in if_ether.h */ 64#define ETH_P_CPL5 0xf 65 66#define SGE_CMDQ_N 2 67#define SGE_FREELQ_N 2 68#define SGE_CMDQ0_E_N 1024 69#define SGE_CMDQ1_E_N 128 70#define SGE_FREEL_SIZE 4096 71#define SGE_JUMBO_FREEL_SIZE 512 72#define SGE_FREEL_REFILL_THRESH 16 73#define SGE_RESPQ_E_N 1024 74#define SGE_INTRTIMER_NRES 1000 75#define SGE_RX_SM_BUF_SIZE 1536 76#define SGE_TX_DESC_MAX_PLEN 16384 77 78#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4) 79 80/* 81 * Period of the TX buffer reclaim timer. This timer does not need to run 82 * frequently as TX buffers are usually reclaimed by new TX packets. 83 */ 84#define TX_RECLAIM_PERIOD (HZ / 4) 85 86#define M_CMD_LEN 0x7fffffff 87#define V_CMD_LEN(v) (v) 88#define G_CMD_LEN(v) ((v) & M_CMD_LEN) 89#define V_CMD_GEN1(v) ((v) << 31) 90#define V_CMD_GEN2(v) (v) 91#define F_CMD_DATAVALID (1 << 1) 92#define F_CMD_SOP (1 << 2) 93#define V_CMD_EOP(v) ((v) << 3) 94 95/* 96 * Command queue, receive buffer list, and response queue descriptors. 97 */ 98#if defined(__BIG_ENDIAN_BITFIELD) 99struct cmdQ_e { 100 u32 addr_lo; 101 u32 len_gen; 102 u32 flags; 103 u32 addr_hi; 104}; 105 106struct freelQ_e { 107 u32 addr_lo; 108 u32 len_gen; 109 u32 gen2; 110 u32 addr_hi; 111}; 112 113struct respQ_e { 114 u32 Qsleeping : 4; 115 u32 Cmdq1CreditReturn : 5; 116 u32 Cmdq1DmaComplete : 5; 117 u32 Cmdq0CreditReturn : 5; 118 u32 Cmdq0DmaComplete : 5; 119 u32 FreelistQid : 2; 120 u32 CreditValid : 1; 121 u32 DataValid : 1; 122 u32 Offload : 1; 123 u32 Eop : 1; 124 u32 Sop : 1; 125 u32 GenerationBit : 1; 126 u32 BufferLength; 127}; 128#elif defined(__LITTLE_ENDIAN_BITFIELD) 129struct cmdQ_e { 130 u32 len_gen; 131 u32 addr_lo; 132 u32 addr_hi; 133 u32 flags; 134}; 135 136struct freelQ_e { 137 u32 len_gen; 138 u32 addr_lo; 139 u32 addr_hi; 140 u32 gen2; 141}; 142 143struct respQ_e { 144 u32 BufferLength; 145 u32 GenerationBit : 1; 146 u32 Sop : 1; 147 u32 Eop : 1; 148 u32 Offload : 1; 149 u32 DataValid : 1; 150 u32 CreditValid : 1; 151 u32 FreelistQid : 2; 152 u32 Cmdq0DmaComplete : 5; 153 u32 Cmdq0CreditReturn : 5; 154 u32 Cmdq1DmaComplete : 5; 155 u32 Cmdq1CreditReturn : 5; 156 u32 Qsleeping : 4; 157} ; 158#endif 159 160/* 161 * SW Context Command and Freelist Queue Descriptors 162 */ 163struct cmdQ_ce { 164 struct sk_buff *skb; 165 DEFINE_DMA_UNMAP_ADDR(dma_addr); 166 DEFINE_DMA_UNMAP_LEN(dma_len); 167}; 168 169struct freelQ_ce { 170 struct sk_buff *skb; 171 DEFINE_DMA_UNMAP_ADDR(dma_addr); 172 DEFINE_DMA_UNMAP_LEN(dma_len); 173}; 174 175/* 176 * SW command, freelist and response rings 177 */ 178struct cmdQ { 179 unsigned long status; /* HW DMA fetch status */ 180 unsigned int in_use; /* # of in-use command descriptors */ 181 unsigned int size; /* # of descriptors */ 182 unsigned int processed; /* total # of descs HW has processed */ 183 unsigned int cleaned; /* total # of descs SW has reclaimed */ 184 unsigned int stop_thres; /* SW TX queue suspend threshold */ 185 u16 pidx; /* producer index (SW) */ 186 u16 cidx; /* consumer index (HW) */ 187 u8 genbit; /* current generation (=valid) bit */ 188 u8 sop; /* is next entry start of packet? */ 189 struct cmdQ_e *entries; /* HW command descriptor Q */ 190 struct cmdQ_ce *centries; /* SW command context descriptor Q */ 191 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */ 192 spinlock_t lock; /* Lock to protect cmdQ enqueuing */ 193}; 194 195struct freelQ { 196 unsigned int credits; /* # of available RX buffers */ 197 unsigned int size; /* free list capacity */ 198 u16 pidx; /* producer index (SW) */ 199 u16 cidx; /* consumer index (HW) */ 200 u16 rx_buffer_size; /* Buffer size on this free list */ 201 u16 dma_offset; /* DMA offset to align IP headers */ 202 u16 recycleq_idx; /* skb recycle q to use */ 203 u8 genbit; /* current generation (=valid) bit */ 204 struct freelQ_e *entries; /* HW freelist descriptor Q */ 205 struct freelQ_ce *centries; /* SW freelist context descriptor Q */ 206 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */ 207}; 208 209struct respQ { 210 unsigned int credits; /* credits to be returned to SGE */ 211 unsigned int size; /* # of response Q descriptors */ 212 u16 cidx; /* consumer index (SW) */ 213 u8 genbit; /* current generation(=valid) bit */ 214 struct respQ_e *entries; /* HW response descriptor Q */ 215 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */ 216}; 217 218/* Bit flags for cmdQ.status */ 219enum { 220 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */ 221 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */ 222}; 223 224/* T204 TX SW scheduler */ 225 226/* Per T204 TX port */ 227struct sched_port { 228 unsigned int avail; /* available bits - quota */ 229 unsigned int drain_bits_per_1024ns; /* drain rate */ 230 unsigned int speed; /* drain rate, mbps */ 231 unsigned int mtu; /* mtu size */ 232 struct sk_buff_head skbq; /* pending skbs */ 233}; 234 235/* Per T204 device */ 236struct sched { 237 ktime_t last_updated; /* last time quotas were computed */ 238 unsigned int max_avail; /* max bits to be sent to any port */ 239 unsigned int port; /* port index (round robin ports) */ 240 unsigned int num; /* num skbs in per port queues */ 241 struct sched_port p[MAX_NPORTS]; 242 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */ 243}; 244static void restart_sched(unsigned long); 245 246 247/* 248 * Main SGE data structure 249 * 250 * Interrupts are handled by a single CPU and it is likely that on a MP system 251 * the application is migrated to another CPU. In that scenario, we try to 252 * separate the RX(in irq context) and TX state in order to decrease memory 253 * contention. 254 */ 255struct sge { 256 struct adapter *adapter; /* adapter backpointer */ 257 struct net_device *netdev; /* netdevice backpointer */ 258 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */ 259 struct respQ respQ; /* response Q */ 260 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */ 261 unsigned int rx_pkt_pad; /* RX padding for L2 packets */ 262 unsigned int jumbo_fl; /* jumbo freelist Q index */ 263 unsigned int intrtimer_nres; /* no-resource interrupt timer */ 264 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */ 265 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ 266 struct timer_list espibug_timer; 267 unsigned long espibug_timeout; 268 struct sk_buff *espibug_skb[MAX_NPORTS]; 269 u32 sge_control; /* shadow value of sge control reg */ 270 struct sge_intr_counts stats; 271 struct sge_port_stats __percpu *port_stats[MAX_NPORTS]; 272 struct sched *tx_sched; 273 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; 274}; 275 276/* 277 * stop tasklet and free all pending skb's 278 */ 279static void tx_sched_stop(struct sge *sge) 280{ 281 struct sched *s = sge->tx_sched; 282 int i; 283 284 tasklet_kill(&s->sched_tsk); 285 286 for (i = 0; i < MAX_NPORTS; i++) 287 __skb_queue_purge(&s->p[s->port].skbq); 288} 289 290/* 291 * t1_sched_update_parms() is called when the MTU or link speed changes. It 292 * re-computes scheduler parameters to scope with the change. 293 */ 294unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, 295 unsigned int mtu, unsigned int speed) 296{ 297 struct sched *s = sge->tx_sched; 298 struct sched_port *p = &s->p[port]; 299 unsigned int max_avail_segs; 300 301 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed); 302 if (speed) 303 p->speed = speed; 304 if (mtu) 305 p->mtu = mtu; 306 307 if (speed || mtu) { 308 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40); 309 do_div(drain, (p->mtu + 50) * 1000); 310 p->drain_bits_per_1024ns = (unsigned int) drain; 311 312 if (p->speed < 1000) 313 p->drain_bits_per_1024ns = 314 90 * p->drain_bits_per_1024ns / 100; 315 } 316 317 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { 318 p->drain_bits_per_1024ns -= 16; 319 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4); 320 max_avail_segs = max(1U, 4096 / (p->mtu - 40)); 321 } else { 322 s->max_avail = 16384; 323 max_avail_segs = max(1U, 9000 / (p->mtu - 40)); 324 } 325 326 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u " 327 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu, 328 p->speed, s->max_avail, max_avail_segs, 329 p->drain_bits_per_1024ns); 330 331 return max_avail_segs * (p->mtu - 40); 332} 333 334 335 336/* 337 * get_clock() implements a ns clock (see ktime_get) 338 */ 339static inline ktime_t get_clock(void) 340{ 341 struct timespec ts; 342 343 ktime_get_ts(&ts); 344 return timespec_to_ktime(ts); 345} 346 347/* 348 * tx_sched_init() allocates resources and does basic initialization. 349 */ 350static int tx_sched_init(struct sge *sge) 351{ 352 struct sched *s; 353 int i; 354 355 s = kzalloc(sizeof (struct sched), GFP_KERNEL); 356 if (!s) 357 return -ENOMEM; 358 359 pr_debug("tx_sched_init\n"); 360 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge); 361 sge->tx_sched = s; 362 363 for (i = 0; i < MAX_NPORTS; i++) { 364 skb_queue_head_init(&s->p[i].skbq); 365 t1_sched_update_parms(sge, i, 1500, 1000); 366 } 367 368 return 0; 369} 370 371/* 372 * sched_update_avail() computes the delta since the last time it was called 373 * and updates the per port quota (number of bits that can be sent to the any 374 * port). 375 */ 376static inline int sched_update_avail(struct sge *sge) 377{ 378 struct sched *s = sge->tx_sched; 379 ktime_t now = get_clock(); 380 unsigned int i; 381 long long delta_time_ns; 382 383 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated)); 384 385 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns); 386 if (delta_time_ns < 15000) 387 return 0; 388 389 for (i = 0; i < MAX_NPORTS; i++) { 390 struct sched_port *p = &s->p[i]; 391 unsigned int delta_avail; 392 393 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13; 394 p->avail = min(p->avail + delta_avail, s->max_avail); 395 } 396 397 s->last_updated = now; 398 399 return 1; 400} 401 402/* 403 * sched_skb() is called from two different places. In the tx path, any 404 * packet generating load on an output port will call sched_skb() 405 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq 406 * context (skb == NULL). 407 * The scheduler only returns a skb (which will then be sent) if the 408 * length of the skb is <= the current quota of the output port. 409 */ 410static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb, 411 unsigned int credits) 412{ 413 struct sched *s = sge->tx_sched; 414 struct sk_buff_head *skbq; 415 unsigned int i, len, update = 1; 416 417 pr_debug("sched_skb %p\n", skb); 418 if (!skb) { 419 if (!s->num) 420 return NULL; 421 } else { 422 skbq = &s->p[skb->dev->if_port].skbq; 423 __skb_queue_tail(skbq, skb); 424 s->num++; 425 skb = NULL; 426 } 427 428 if (credits < MAX_SKB_FRAGS + 1) 429 goto out; 430 431again: 432 for (i = 0; i < MAX_NPORTS; i++) { 433 s->port = (s->port + 1) & (MAX_NPORTS - 1); 434 skbq = &s->p[s->port].skbq; 435 436 skb = skb_peek(skbq); 437 438 if (!skb) 439 continue; 440 441 len = skb->len; 442 if (len <= s->p[s->port].avail) { 443 s->p[s->port].avail -= len; 444 s->num--; 445 __skb_unlink(skb, skbq); 446 goto out; 447 } 448 skb = NULL; 449 } 450 451 if (update-- && sched_update_avail(sge)) 452 goto again; 453 454out: 455 /* If there are more pending skbs, we use the hardware to schedule us 456 * again. 457 */ 458 if (s->num && !skb) { 459 struct cmdQ *q = &sge->cmdQ[0]; 460 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 461 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 462 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 463 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); 464 } 465 } 466 pr_debug("sched_skb ret %p\n", skb); 467 468 return skb; 469} 470 471/* 472 * PIO to indicate that memory mapped Q contains valid descriptor(s). 473 */ 474static inline void doorbell_pio(struct adapter *adapter, u32 val) 475{ 476 wmb(); 477 writel(val, adapter->regs + A_SG_DOORBELL); 478} 479 480/* 481 * Frees all RX buffers on the freelist Q. The caller must make sure that 482 * the SGE is turned off before calling this function. 483 */ 484static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) 485{ 486 unsigned int cidx = q->cidx; 487 488 while (q->credits--) { 489 struct freelQ_ce *ce = &q->centries[cidx]; 490 491 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 492 dma_unmap_len(ce, dma_len), 493 PCI_DMA_FROMDEVICE); 494 dev_kfree_skb(ce->skb); 495 ce->skb = NULL; 496 if (++cidx == q->size) 497 cidx = 0; 498 } 499} 500 501/* 502 * Free RX free list and response queue resources. 503 */ 504static void free_rx_resources(struct sge *sge) 505{ 506 struct pci_dev *pdev = sge->adapter->pdev; 507 unsigned int size, i; 508 509 if (sge->respQ.entries) { 510 size = sizeof(struct respQ_e) * sge->respQ.size; 511 pci_free_consistent(pdev, size, sge->respQ.entries, 512 sge->respQ.dma_addr); 513 } 514 515 for (i = 0; i < SGE_FREELQ_N; i++) { 516 struct freelQ *q = &sge->freelQ[i]; 517 518 if (q->centries) { 519 free_freelQ_buffers(pdev, q); 520 kfree(q->centries); 521 } 522 if (q->entries) { 523 size = sizeof(struct freelQ_e) * q->size; 524 pci_free_consistent(pdev, size, q->entries, 525 q->dma_addr); 526 } 527 } 528} 529 530/* 531 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a 532 * response queue. 533 */ 534static int alloc_rx_resources(struct sge *sge, struct sge_params *p) 535{ 536 struct pci_dev *pdev = sge->adapter->pdev; 537 unsigned int size, i; 538 539 for (i = 0; i < SGE_FREELQ_N; i++) { 540 struct freelQ *q = &sge->freelQ[i]; 541 542 q->genbit = 1; 543 q->size = p->freelQ_size[i]; 544 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; 545 size = sizeof(struct freelQ_e) * q->size; 546 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); 547 if (!q->entries) 548 goto err_no_mem; 549 550 size = sizeof(struct freelQ_ce) * q->size; 551 q->centries = kzalloc(size, GFP_KERNEL); 552 if (!q->centries) 553 goto err_no_mem; 554 } 555 556 /* 557 * Calculate the buffer sizes for the two free lists. FL0 accommodates 558 * regular sized Ethernet frames, FL1 is sized not to exceed 16K, 559 * including all the sk_buff overhead. 560 * 561 * Note: For T2 FL0 and FL1 are reversed. 562 */ 563 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE + 564 sizeof(struct cpl_rx_data) + 565 sge->freelQ[!sge->jumbo_fl].dma_offset; 566 567 size = (16 * 1024) - 568 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 569 570 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; 571 572 /* 573 * Setup which skb recycle Q should be used when recycling buffers from 574 * each free list. 575 */ 576 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0; 577 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1; 578 579 sge->respQ.genbit = 1; 580 sge->respQ.size = SGE_RESPQ_E_N; 581 sge->respQ.credits = 0; 582 size = sizeof(struct respQ_e) * sge->respQ.size; 583 sge->respQ.entries = 584 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr); 585 if (!sge->respQ.entries) 586 goto err_no_mem; 587 return 0; 588 589err_no_mem: 590 free_rx_resources(sge); 591 return -ENOMEM; 592} 593 594/* 595 * Reclaims n TX descriptors and frees the buffers associated with them. 596 */ 597static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) 598{ 599 struct cmdQ_ce *ce; 600 struct pci_dev *pdev = sge->adapter->pdev; 601 unsigned int cidx = q->cidx; 602 603 q->in_use -= n; 604 ce = &q->centries[cidx]; 605 while (n--) { 606 if (likely(dma_unmap_len(ce, dma_len))) { 607 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 608 dma_unmap_len(ce, dma_len), 609 PCI_DMA_TODEVICE); 610 if (q->sop) 611 q->sop = 0; 612 } 613 if (ce->skb) { 614 dev_kfree_skb_any(ce->skb); 615 q->sop = 1; 616 } 617 ce++; 618 if (++cidx == q->size) { 619 cidx = 0; 620 ce = q->centries; 621 } 622 } 623 q->cidx = cidx; 624} 625 626/* 627 * Free TX resources. 628 * 629 * Assumes that SGE is stopped and all interrupts are disabled. 630 */ 631static void free_tx_resources(struct sge *sge) 632{ 633 struct pci_dev *pdev = sge->adapter->pdev; 634 unsigned int size, i; 635 636 for (i = 0; i < SGE_CMDQ_N; i++) { 637 struct cmdQ *q = &sge->cmdQ[i]; 638 639 if (q->centries) { 640 if (q->in_use) 641 free_cmdQ_buffers(sge, q, q->in_use); 642 kfree(q->centries); 643 } 644 if (q->entries) { 645 size = sizeof(struct cmdQ_e) * q->size; 646 pci_free_consistent(pdev, size, q->entries, 647 q->dma_addr); 648 } 649 } 650} 651 652/* 653 * Allocates basic TX resources, consisting of memory mapped command Qs. 654 */ 655static int alloc_tx_resources(struct sge *sge, struct sge_params *p) 656{ 657 struct pci_dev *pdev = sge->adapter->pdev; 658 unsigned int size, i; 659 660 for (i = 0; i < SGE_CMDQ_N; i++) { 661 struct cmdQ *q = &sge->cmdQ[i]; 662 663 q->genbit = 1; 664 q->sop = 1; 665 q->size = p->cmdQ_size[i]; 666 q->in_use = 0; 667 q->status = 0; 668 q->processed = q->cleaned = 0; 669 q->stop_thres = 0; 670 spin_lock_init(&q->lock); 671 size = sizeof(struct cmdQ_e) * q->size; 672 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); 673 if (!q->entries) 674 goto err_no_mem; 675 676 size = sizeof(struct cmdQ_ce) * q->size; 677 q->centries = kzalloc(size, GFP_KERNEL); 678 if (!q->centries) 679 goto err_no_mem; 680 } 681 682 /* 683 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE 684 * only. For queue 0 set the stop threshold so we can handle one more 685 * packet from each port, plus reserve an additional 24 entries for 686 * Ethernet packets only. Queue 1 never suspends nor do we reserve 687 * space for Ethernet packets. 688 */ 689 sge->cmdQ[0].stop_thres = sge->adapter->params.nports * 690 (MAX_SKB_FRAGS + 1); 691 return 0; 692 693err_no_mem: 694 free_tx_resources(sge); 695 return -ENOMEM; 696} 697 698static inline void setup_ring_params(struct adapter *adapter, u64 addr, 699 u32 size, int base_reg_lo, 700 int base_reg_hi, int size_reg) 701{ 702 writel((u32)addr, adapter->regs + base_reg_lo); 703 writel(addr >> 32, adapter->regs + base_reg_hi); 704 writel(size, adapter->regs + size_reg); 705} 706 707/* 708 * Enable/disable VLAN acceleration. 709 */ 710void t1_set_vlan_accel(struct adapter *adapter, int on_off) 711{ 712 struct sge *sge = adapter->sge; 713 714 sge->sge_control &= ~F_VLAN_XTRACT; 715 if (on_off) 716 sge->sge_control |= F_VLAN_XTRACT; 717 if (adapter->open_device_map) { 718 writel(sge->sge_control, adapter->regs + A_SG_CONTROL); 719 readl(adapter->regs + A_SG_CONTROL); /* flush */ 720 } 721} 722 723/* 724 * Programs the various SGE registers. However, the engine is not yet enabled, 725 * but sge->sge_control is setup and ready to go. 726 */ 727static void configure_sge(struct sge *sge, struct sge_params *p) 728{ 729 struct adapter *ap = sge->adapter; 730 731 writel(0, ap->regs + A_SG_CONTROL); 732 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, 733 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); 734 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, 735 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); 736 setup_ring_params(ap, sge->freelQ[0].dma_addr, 737 sge->freelQ[0].size, A_SG_FL0BASELWR, 738 A_SG_FL0BASEUPR, A_SG_FL0SIZE); 739 setup_ring_params(ap, sge->freelQ[1].dma_addr, 740 sge->freelQ[1].size, A_SG_FL1BASELWR, 741 A_SG_FL1BASEUPR, A_SG_FL1SIZE); 742 743 /* The threshold comparison uses <. */ 744 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); 745 746 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, 747 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); 748 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); 749 750 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | 751 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | 752 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | 753 V_RX_PKT_OFFSET(sge->rx_pkt_pad); 754 755#if defined(__BIG_ENDIAN_BITFIELD) 756 sge->sge_control |= F_ENABLE_BIG_ENDIAN; 757#endif 758 759 /* Initialize no-resource timer */ 760 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); 761 762 t1_sge_set_coalesce_params(sge, p); 763} 764 765/* 766 * Return the payload capacity of the jumbo free-list buffers. 767 */ 768static inline unsigned int jumbo_payload_capacity(const struct sge *sge) 769{ 770 return sge->freelQ[sge->jumbo_fl].rx_buffer_size - 771 sge->freelQ[sge->jumbo_fl].dma_offset - 772 sizeof(struct cpl_rx_data); 773} 774 775/* 776 * Frees all SGE related resources and the sge structure itself 777 */ 778void t1_sge_destroy(struct sge *sge) 779{ 780 int i; 781 782 for_each_port(sge->adapter, i) 783 free_percpu(sge->port_stats[i]); 784 785 kfree(sge->tx_sched); 786 free_tx_resources(sge); 787 free_rx_resources(sge); 788 kfree(sge); 789} 790 791/* 792 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist 793 * context Q) until the Q is full or alloc_skb fails. 794 * 795 * It is possible that the generation bits already match, indicating that the 796 * buffer is already valid and nothing needs to be done. This happens when we 797 * copied a received buffer into a new sk_buff during the interrupt processing. 798 * 799 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), 800 * we specify a RX_OFFSET in order to make sure that the IP header is 4B 801 * aligned. 802 */ 803static void refill_free_list(struct sge *sge, struct freelQ *q) 804{ 805 struct pci_dev *pdev = sge->adapter->pdev; 806 struct freelQ_ce *ce = &q->centries[q->pidx]; 807 struct freelQ_e *e = &q->entries[q->pidx]; 808 unsigned int dma_len = q->rx_buffer_size - q->dma_offset; 809 810 while (q->credits < q->size) { 811 struct sk_buff *skb; 812 dma_addr_t mapping; 813 814 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); 815 if (!skb) 816 break; 817 818 skb_reserve(skb, q->dma_offset); 819 mapping = pci_map_single(pdev, skb->data, dma_len, 820 PCI_DMA_FROMDEVICE); 821 skb_reserve(skb, sge->rx_pkt_pad); 822 823 ce->skb = skb; 824 dma_unmap_addr_set(ce, dma_addr, mapping); 825 dma_unmap_len_set(ce, dma_len, dma_len); 826 e->addr_lo = (u32)mapping; 827 e->addr_hi = (u64)mapping >> 32; 828 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); 829 wmb(); 830 e->gen2 = V_CMD_GEN2(q->genbit); 831 832 e++; 833 ce++; 834 if (++q->pidx == q->size) { 835 q->pidx = 0; 836 q->genbit ^= 1; 837 ce = q->centries; 838 e = q->entries; 839 } 840 q->credits++; 841 } 842} 843 844/* 845 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 846 * of both rings, we go into 'few interrupt mode' in order to give the system 847 * time to free up resources. 848 */ 849static void freelQs_empty(struct sge *sge) 850{ 851 struct adapter *adapter = sge->adapter; 852 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); 853 u32 irqholdoff_reg; 854 855 refill_free_list(sge, &sge->freelQ[0]); 856 refill_free_list(sge, &sge->freelQ[1]); 857 858 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && 859 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { 860 irq_reg |= F_FL_EXHAUSTED; 861 irqholdoff_reg = sge->fixed_intrtimer; 862 } else { 863 /* Clear the F_FL_EXHAUSTED interrupts for now */ 864 irq_reg &= ~F_FL_EXHAUSTED; 865 irqholdoff_reg = sge->intrtimer_nres; 866 } 867 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); 868 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); 869 870 /* We reenable the Qs to force a freelist GTS interrupt later */ 871 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE); 872} 873 874#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA) 875#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) 876#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ 877 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH) 878 879/* 880 * Disable SGE Interrupts 881 */ 882void t1_sge_intr_disable(struct sge *sge) 883{ 884 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 885 886 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 887 writel(0, sge->adapter->regs + A_SG_INT_ENABLE); 888} 889 890/* 891 * Enable SGE interrupts. 892 */ 893void t1_sge_intr_enable(struct sge *sge) 894{ 895 u32 en = SGE_INT_ENABLE; 896 u32 val = readl(sge->adapter->regs + A_PL_ENABLE); 897 898 if (sge->adapter->flags & TSO_CAPABLE) 899 en &= ~F_PACKET_TOO_BIG; 900 writel(en, sge->adapter->regs + A_SG_INT_ENABLE); 901 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); 902} 903 904/* 905 * Clear SGE interrupts. 906 */ 907void t1_sge_intr_clear(struct sge *sge) 908{ 909 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); 910 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE); 911} 912 913/* 914 * SGE 'Error' interrupt handler 915 */ 916int t1_sge_intr_error_handler(struct sge *sge) 917{ 918 struct adapter *adapter = sge->adapter; 919 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); 920 921 if (adapter->flags & TSO_CAPABLE) 922 cause &= ~F_PACKET_TOO_BIG; 923 if (cause & F_RESPQ_EXHAUSTED) 924 sge->stats.respQ_empty++; 925 if (cause & F_RESPQ_OVERFLOW) { 926 sge->stats.respQ_overflow++; 927 pr_alert("%s: SGE response queue overflow\n", 928 adapter->name); 929 } 930 if (cause & F_FL_EXHAUSTED) { 931 sge->stats.freelistQ_empty++; 932 freelQs_empty(sge); 933 } 934 if (cause & F_PACKET_TOO_BIG) { 935 sge->stats.pkt_too_big++; 936 pr_alert("%s: SGE max packet size exceeded\n", 937 adapter->name); 938 } 939 if (cause & F_PACKET_MISMATCH) { 940 sge->stats.pkt_mismatch++; 941 pr_alert("%s: SGE packet mismatch\n", adapter->name); 942 } 943 if (cause & SGE_INT_FATAL) 944 t1_fatal_err(adapter); 945 946 writel(cause, adapter->regs + A_SG_INT_CAUSE); 947 return 0; 948} 949 950const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge) 951{ 952 return &sge->stats; 953} 954 955void t1_sge_get_port_stats(const struct sge *sge, int port, 956 struct sge_port_stats *ss) 957{ 958 int cpu; 959 960 memset(ss, 0, sizeof(*ss)); 961 for_each_possible_cpu(cpu) { 962 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); 963 964 ss->rx_cso_good += st->rx_cso_good; 965 ss->tx_cso += st->tx_cso; 966 ss->tx_tso += st->tx_tso; 967 ss->tx_need_hdrroom += st->tx_need_hdrroom; 968 ss->vlan_xtract += st->vlan_xtract; 969 ss->vlan_insert += st->vlan_insert; 970 } 971} 972 973/** 974 * recycle_fl_buf - recycle a free list buffer 975 * @fl: the free list 976 * @idx: index of buffer to recycle 977 * 978 * Recycles the specified buffer on the given free list by adding it at 979 * the next available slot on the list. 980 */ 981static void recycle_fl_buf(struct freelQ *fl, int idx) 982{ 983 struct freelQ_e *from = &fl->entries[idx]; 984 struct freelQ_e *to = &fl->entries[fl->pidx]; 985 986 fl->centries[fl->pidx] = fl->centries[idx]; 987 to->addr_lo = from->addr_lo; 988 to->addr_hi = from->addr_hi; 989 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); 990 wmb(); 991 to->gen2 = V_CMD_GEN2(fl->genbit); 992 fl->credits++; 993 994 if (++fl->pidx == fl->size) { 995 fl->pidx = 0; 996 fl->genbit ^= 1; 997 } 998} 999 1000static int copybreak __read_mostly = 256; 1001module_param(copybreak, int, 0); 1002MODULE_PARM_DESC(copybreak, "Receive copy threshold"); 1003 1004/** 1005 * get_packet - return the next ingress packet buffer 1006 * @pdev: the PCI device that received the packet 1007 * @fl: the SGE free list holding the packet 1008 * @len: the actual packet length, excluding any SGE padding 1009 * 1010 * Get the next packet from a free list and complete setup of the 1011 * sk_buff. If the packet is small we make a copy and recycle the 1012 * original buffer, otherwise we use the original buffer itself. If a 1013 * positive drop threshold is supplied packets are dropped and their 1014 * buffers recycled if (a) the number of remaining buffers is under the 1015 * threshold and the packet is too big to copy, or (b) the packet should 1016 * be copied but there is no memory for the copy. 1017 */ 1018static inline struct sk_buff *get_packet(struct pci_dev *pdev, 1019 struct freelQ *fl, unsigned int len) 1020{ 1021 struct sk_buff *skb; 1022 const struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1023 1024 if (len < copybreak) { 1025 skb = alloc_skb(len + 2, GFP_ATOMIC); 1026 if (!skb) 1027 goto use_orig_buf; 1028 1029 skb_reserve(skb, 2); /* align IP header */ 1030 skb_put(skb, len); 1031 pci_dma_sync_single_for_cpu(pdev, 1032 dma_unmap_addr(ce, dma_addr), 1033 dma_unmap_len(ce, dma_len), 1034 PCI_DMA_FROMDEVICE); 1035 skb_copy_from_linear_data(ce->skb, skb->data, len); 1036 pci_dma_sync_single_for_device(pdev, 1037 dma_unmap_addr(ce, dma_addr), 1038 dma_unmap_len(ce, dma_len), 1039 PCI_DMA_FROMDEVICE); 1040 recycle_fl_buf(fl, fl->cidx); 1041 return skb; 1042 } 1043 1044use_orig_buf: 1045 if (fl->credits < 2) { 1046 recycle_fl_buf(fl, fl->cidx); 1047 return NULL; 1048 } 1049 1050 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr), 1051 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1052 skb = ce->skb; 1053 prefetch(skb->data); 1054 1055 skb_put(skb, len); 1056 return skb; 1057} 1058 1059/** 1060 * unexpected_offload - handle an unexpected offload packet 1061 * @adapter: the adapter 1062 * @fl: the free list that received the packet 1063 * 1064 * Called when we receive an unexpected offload packet (e.g., the TOE 1065 * function is disabled or the card is a NIC). Prints a message and 1066 * recycles the buffer. 1067 */ 1068static void unexpected_offload(struct adapter *adapter, struct freelQ *fl) 1069{ 1070 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1071 struct sk_buff *skb = ce->skb; 1072 1073 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr), 1074 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1075 pr_err("%s: unexpected offload packet, cmd %u\n", 1076 adapter->name, *skb->data); 1077 recycle_fl_buf(fl, fl->cidx); 1078} 1079 1080/* 1081 * T1/T2 SGE limits the maximum DMA size per TX descriptor to 1082 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the 1083 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner. 1084 * Note that the *_large_page_tx_descs stuff will be optimized out when 1085 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN. 1086 * 1087 * compute_large_page_descs() computes how many additional descriptors are 1088 * required to break down the stack's request. 1089 */ 1090static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb) 1091{ 1092 unsigned int count = 0; 1093 1094 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1095 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1096 unsigned int i, len = skb_headlen(skb); 1097 while (len > SGE_TX_DESC_MAX_PLEN) { 1098 count++; 1099 len -= SGE_TX_DESC_MAX_PLEN; 1100 } 1101 for (i = 0; nfrags--; i++) { 1102 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1103 len = frag->size; 1104 while (len > SGE_TX_DESC_MAX_PLEN) { 1105 count++; 1106 len -= SGE_TX_DESC_MAX_PLEN; 1107 } 1108 } 1109 } 1110 return count; 1111} 1112 1113/* 1114 * Write a cmdQ entry. 1115 * 1116 * Since this function writes the 'flags' field, it must not be used to 1117 * write the first cmdQ entry. 1118 */ 1119static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, 1120 unsigned int len, unsigned int gen, 1121 unsigned int eop) 1122{ 1123 BUG_ON(len > SGE_TX_DESC_MAX_PLEN); 1124 1125 e->addr_lo = (u32)mapping; 1126 e->addr_hi = (u64)mapping >> 32; 1127 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen); 1128 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen); 1129} 1130 1131/* 1132 * See comment for previous function. 1133 * 1134 * write_tx_descs_large_page() writes additional SGE tx descriptors if 1135 * *desc_len exceeds HW's capability. 1136 */ 1137static inline unsigned int write_large_page_tx_descs(unsigned int pidx, 1138 struct cmdQ_e **e, 1139 struct cmdQ_ce **ce, 1140 unsigned int *gen, 1141 dma_addr_t *desc_mapping, 1142 unsigned int *desc_len, 1143 unsigned int nfrags, 1144 struct cmdQ *q) 1145{ 1146 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1147 struct cmdQ_e *e1 = *e; 1148 struct cmdQ_ce *ce1 = *ce; 1149 1150 while (*desc_len > SGE_TX_DESC_MAX_PLEN) { 1151 *desc_len -= SGE_TX_DESC_MAX_PLEN; 1152 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, 1153 *gen, nfrags == 0 && *desc_len == 0); 1154 ce1->skb = NULL; 1155 dma_unmap_len_set(ce1, dma_len, 0); 1156 *desc_mapping += SGE_TX_DESC_MAX_PLEN; 1157 if (*desc_len) { 1158 ce1++; 1159 e1++; 1160 if (++pidx == q->size) { 1161 pidx = 0; 1162 *gen ^= 1; 1163 ce1 = q->centries; 1164 e1 = q->entries; 1165 } 1166 } 1167 } 1168 *e = e1; 1169 *ce = ce1; 1170 } 1171 return pidx; 1172} 1173 1174/* 1175 * Write the command descriptors to transmit the given skb starting at 1176 * descriptor pidx with the given generation. 1177 */ 1178static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb, 1179 unsigned int pidx, unsigned int gen, 1180 struct cmdQ *q) 1181{ 1182 dma_addr_t mapping, desc_mapping; 1183 struct cmdQ_e *e, *e1; 1184 struct cmdQ_ce *ce; 1185 unsigned int i, flags, first_desc_len, desc_len, 1186 nfrags = skb_shinfo(skb)->nr_frags; 1187 1188 e = e1 = &q->entries[pidx]; 1189 ce = &q->centries[pidx]; 1190 1191 mapping = pci_map_single(adapter->pdev, skb->data, 1192 skb_headlen(skb), PCI_DMA_TODEVICE); 1193 1194 desc_mapping = mapping; 1195 desc_len = skb_headlen(skb); 1196 1197 flags = F_CMD_DATAVALID | F_CMD_SOP | 1198 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | 1199 V_CMD_GEN2(gen); 1200 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ? 1201 desc_len : SGE_TX_DESC_MAX_PLEN; 1202 e->addr_lo = (u32)desc_mapping; 1203 e->addr_hi = (u64)desc_mapping >> 32; 1204 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); 1205 ce->skb = NULL; 1206 dma_unmap_len_set(ce, dma_len, 0); 1207 1208 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && 1209 desc_len > SGE_TX_DESC_MAX_PLEN) { 1210 desc_mapping += first_desc_len; 1211 desc_len -= first_desc_len; 1212 e1++; 1213 ce++; 1214 if (++pidx == q->size) { 1215 pidx = 0; 1216 gen ^= 1; 1217 e1 = q->entries; 1218 ce = q->centries; 1219 } 1220 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1221 &desc_mapping, &desc_len, 1222 nfrags, q); 1223 1224 if (likely(desc_len)) 1225 write_tx_desc(e1, desc_mapping, desc_len, gen, 1226 nfrags == 0); 1227 } 1228 1229 ce->skb = NULL; 1230 dma_unmap_addr_set(ce, dma_addr, mapping); 1231 dma_unmap_len_set(ce, dma_len, skb_headlen(skb)); 1232 1233 for (i = 0; nfrags--; i++) { 1234 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1235 e1++; 1236 ce++; 1237 if (++pidx == q->size) { 1238 pidx = 0; 1239 gen ^= 1; 1240 e1 = q->entries; 1241 ce = q->centries; 1242 } 1243 1244 mapping = pci_map_page(adapter->pdev, frag->page, 1245 frag->page_offset, frag->size, 1246 PCI_DMA_TODEVICE); 1247 desc_mapping = mapping; 1248 desc_len = frag->size; 1249 1250 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1251 &desc_mapping, &desc_len, 1252 nfrags, q); 1253 if (likely(desc_len)) 1254 write_tx_desc(e1, desc_mapping, desc_len, gen, 1255 nfrags == 0); 1256 ce->skb = NULL; 1257 dma_unmap_addr_set(ce, dma_addr, mapping); 1258 dma_unmap_len_set(ce, dma_len, frag->size); 1259 } 1260 ce->skb = skb; 1261 wmb(); 1262 e->flags = flags; 1263} 1264 1265/* 1266 * Clean up completed Tx buffers. 1267 */ 1268static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) 1269{ 1270 unsigned int reclaim = q->processed - q->cleaned; 1271 1272 if (reclaim) { 1273 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n", 1274 q->processed, q->cleaned); 1275 free_cmdQ_buffers(sge, q, reclaim); 1276 q->cleaned += reclaim; 1277 } 1278} 1279 1280/* 1281 * Called from tasklet. Checks the scheduler for any 1282 * pending skbs that can be sent. 1283 */ 1284static void restart_sched(unsigned long arg) 1285{ 1286 struct sge *sge = (struct sge *) arg; 1287 struct adapter *adapter = sge->adapter; 1288 struct cmdQ *q = &sge->cmdQ[0]; 1289 struct sk_buff *skb; 1290 unsigned int credits, queued_skb = 0; 1291 1292 spin_lock(&q->lock); 1293 reclaim_completed_tx(sge, q); 1294 1295 credits = q->size - q->in_use; 1296 pr_debug("restart_sched credits=%d\n", credits); 1297 while ((skb = sched_skb(sge, NULL, credits)) != NULL) { 1298 unsigned int genbit, pidx, count; 1299 count = 1 + skb_shinfo(skb)->nr_frags; 1300 count += compute_large_page_tx_descs(skb); 1301 q->in_use += count; 1302 genbit = q->genbit; 1303 pidx = q->pidx; 1304 q->pidx += count; 1305 if (q->pidx >= q->size) { 1306 q->pidx -= q->size; 1307 q->genbit ^= 1; 1308 } 1309 write_tx_descs(adapter, skb, pidx, genbit, q); 1310 credits = q->size - q->in_use; 1311 queued_skb = 1; 1312 } 1313 1314 if (queued_skb) { 1315 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1316 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 1317 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1318 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1319 } 1320 } 1321 spin_unlock(&q->lock); 1322} 1323 1324/** 1325 * sge_rx - process an ingress ethernet packet 1326 * @sge: the sge structure 1327 * @fl: the free list that contains the packet buffer 1328 * @len: the packet length 1329 * 1330 * Process an ingress ethernet pakcet and deliver it to the stack. 1331 */ 1332static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) 1333{ 1334 struct sk_buff *skb; 1335 const struct cpl_rx_pkt *p; 1336 struct adapter *adapter = sge->adapter; 1337 struct sge_port_stats *st; 1338 1339 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad); 1340 if (unlikely(!skb)) { 1341 sge->stats.rx_drops++; 1342 return; 1343 } 1344 1345 p = (const struct cpl_rx_pkt *) skb->data; 1346 if (p->iff >= adapter->params.nports) { 1347 kfree_skb(skb); 1348 return; 1349 } 1350 __skb_pull(skb, sizeof(*p)); 1351 1352 st = this_cpu_ptr(sge->port_stats[p->iff]); 1353 1354 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); 1355 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1356 skb->protocol == htons(ETH_P_IP) && 1357 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { 1358 ++st->rx_cso_good; 1359 skb->ip_summed = CHECKSUM_UNNECESSARY; 1360 } else 1361 skb->ip_summed = CHECKSUM_NONE; 1362 1363 if (unlikely(adapter->vlan_grp && p->vlan_valid)) { 1364 st->vlan_xtract++; 1365 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, 1366 ntohs(p->vlan)); 1367 } else 1368 netif_receive_skb(skb); 1369} 1370 1371/* 1372 * Returns true if a command queue has enough available descriptors that 1373 * we can resume Tx operation after temporarily disabling its packet queue. 1374 */ 1375static inline int enough_free_Tx_descs(const struct cmdQ *q) 1376{ 1377 unsigned int r = q->processed - q->cleaned; 1378 1379 return q->in_use - r < (q->size >> 1); 1380} 1381 1382/* 1383 * Called when sufficient space has become available in the SGE command queues 1384 * after the Tx packet schedulers have been suspended to restart the Tx path. 1385 */ 1386static void restart_tx_queues(struct sge *sge) 1387{ 1388 struct adapter *adap = sge->adapter; 1389 int i; 1390 1391 if (!enough_free_Tx_descs(&sge->cmdQ[0])) 1392 return; 1393 1394 for_each_port(adap, i) { 1395 struct net_device *nd = adap->port[i].dev; 1396 1397 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) && 1398 netif_running(nd)) { 1399 sge->stats.cmdQ_restarted[2]++; 1400 netif_wake_queue(nd); 1401 } 1402 } 1403} 1404 1405/* 1406 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 1407 * information. 1408 */ 1409static unsigned int update_tx_info(struct adapter *adapter, 1410 unsigned int flags, 1411 unsigned int pr0) 1412{ 1413 struct sge *sge = adapter->sge; 1414 struct cmdQ *cmdq = &sge->cmdQ[0]; 1415 1416 cmdq->processed += pr0; 1417 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) { 1418 freelQs_empty(sge); 1419 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE); 1420 } 1421 if (flags & F_CMDQ0_ENABLE) { 1422 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); 1423 1424 if (cmdq->cleaned + cmdq->in_use != cmdq->processed && 1425 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { 1426 set_bit(CMDQ_STAT_RUNNING, &cmdq->status); 1427 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1428 } 1429 if (sge->tx_sched) 1430 tasklet_hi_schedule(&sge->tx_sched->sched_tsk); 1431 1432 flags &= ~F_CMDQ0_ENABLE; 1433 } 1434 1435 if (unlikely(sge->stopped_tx_queues != 0)) 1436 restart_tx_queues(sge); 1437 1438 return flags; 1439} 1440 1441/* 1442 * Process SGE responses, up to the supplied budget. Returns the number of 1443 * responses processed. A negative budget is effectively unlimited. 1444 */ 1445static int process_responses(struct adapter *adapter, int budget) 1446{ 1447 struct sge *sge = adapter->sge; 1448 struct respQ *q = &sge->respQ; 1449 struct respQ_e *e = &q->entries[q->cidx]; 1450 int done = 0; 1451 unsigned int flags = 0; 1452 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1453 1454 while (done < budget && e->GenerationBit == q->genbit) { 1455 flags |= e->Qsleeping; 1456 1457 cmdq_processed[0] += e->Cmdq0CreditReturn; 1458 cmdq_processed[1] += e->Cmdq1CreditReturn; 1459 1460 /* We batch updates to the TX side to avoid cacheline 1461 * ping-pong of TX state information on MP where the sender 1462 * might run on a different CPU than this function... 1463 */ 1464 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) { 1465 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1466 cmdq_processed[0] = 0; 1467 } 1468 1469 if (unlikely(cmdq_processed[1] > 16)) { 1470 sge->cmdQ[1].processed += cmdq_processed[1]; 1471 cmdq_processed[1] = 0; 1472 } 1473 1474 if (likely(e->DataValid)) { 1475 struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1476 1477 BUG_ON(!e->Sop || !e->Eop); 1478 if (unlikely(e->Offload)) 1479 unexpected_offload(adapter, fl); 1480 else 1481 sge_rx(sge, fl, e->BufferLength); 1482 1483 ++done; 1484 1485 /* 1486 * Note: this depends on each packet consuming a 1487 * single free-list buffer; cf. the BUG above. 1488 */ 1489 if (++fl->cidx == fl->size) 1490 fl->cidx = 0; 1491 prefetch(fl->centries[fl->cidx].skb); 1492 1493 if (unlikely(--fl->credits < 1494 fl->size - SGE_FREEL_REFILL_THRESH)) 1495 refill_free_list(sge, fl); 1496 } else 1497 sge->stats.pure_rsps++; 1498 1499 e++; 1500 if (unlikely(++q->cidx == q->size)) { 1501 q->cidx = 0; 1502 q->genbit ^= 1; 1503 e = q->entries; 1504 } 1505 prefetch(e); 1506 1507 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { 1508 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1509 q->credits = 0; 1510 } 1511 } 1512 1513 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1514 sge->cmdQ[1].processed += cmdq_processed[1]; 1515 1516 return done; 1517} 1518 1519static inline int responses_pending(const struct adapter *adapter) 1520{ 1521 const struct respQ *Q = &adapter->sge->respQ; 1522 const struct respQ_e *e = &Q->entries[Q->cidx]; 1523 1524 return (e->GenerationBit == Q->genbit); 1525} 1526 1527/* 1528 * A simpler version of process_responses() that handles only pure (i.e., 1529 * non data-carrying) responses. Such respones are too light-weight to justify 1530 * calling a softirq when using NAPI, so we handle them specially in hard 1531 * interrupt context. The function is called with a pointer to a response, 1532 * which the caller must ensure is a valid pure response. Returns 1 if it 1533 * encounters a valid data-carrying response, 0 otherwise. 1534 */ 1535static int process_pure_responses(struct adapter *adapter) 1536{ 1537 struct sge *sge = adapter->sge; 1538 struct respQ *q = &sge->respQ; 1539 struct respQ_e *e = &q->entries[q->cidx]; 1540 const struct freelQ *fl = &sge->freelQ[e->FreelistQid]; 1541 unsigned int flags = 0; 1542 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0}; 1543 1544 prefetch(fl->centries[fl->cidx].skb); 1545 if (e->DataValid) 1546 return 1; 1547 1548 do { 1549 flags |= e->Qsleeping; 1550 1551 cmdq_processed[0] += e->Cmdq0CreditReturn; 1552 cmdq_processed[1] += e->Cmdq1CreditReturn; 1553 1554 e++; 1555 if (unlikely(++q->cidx == q->size)) { 1556 q->cidx = 0; 1557 q->genbit ^= 1; 1558 e = q->entries; 1559 } 1560 prefetch(e); 1561 1562 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { 1563 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); 1564 q->credits = 0; 1565 } 1566 sge->stats.pure_rsps++; 1567 } while (e->GenerationBit == q->genbit && !e->DataValid); 1568 1569 flags = update_tx_info(adapter, flags, cmdq_processed[0]); 1570 sge->cmdQ[1].processed += cmdq_processed[1]; 1571 1572 return e->GenerationBit == q->genbit; 1573} 1574 1575/* 1576 * Handler for new data events when using NAPI. This does not need any locking 1577 * or protection from interrupts as data interrupts are off at this point and 1578 * other adapter interrupts do not interfere. 1579 */ 1580int t1_poll(struct napi_struct *napi, int budget) 1581{ 1582 struct adapter *adapter = container_of(napi, struct adapter, napi); 1583 int work_done = process_responses(adapter, budget); 1584 1585 if (likely(work_done < budget)) { 1586 napi_complete(napi); 1587 writel(adapter->sge->respQ.cidx, 1588 adapter->regs + A_SG_SLEEPING); 1589 } 1590 return work_done; 1591} 1592 1593irqreturn_t t1_interrupt(int irq, void *data) 1594{ 1595 struct adapter *adapter = data; 1596 struct sge *sge = adapter->sge; 1597 int handled; 1598 1599 if (likely(responses_pending(adapter))) { 1600 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1601 1602 if (napi_schedule_prep(&adapter->napi)) { 1603 if (process_pure_responses(adapter)) 1604 __napi_schedule(&adapter->napi); 1605 else { 1606 /* no data, no NAPI needed */ 1607 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1608 /* undo schedule_prep */ 1609 napi_enable(&adapter->napi); 1610 } 1611 } 1612 return IRQ_HANDLED; 1613 } 1614 1615 spin_lock(&adapter->async_lock); 1616 handled = t1_slow_intr_handler(adapter); 1617 spin_unlock(&adapter->async_lock); 1618 1619 if (!handled) 1620 sge->stats.unhandled_irqs++; 1621 1622 return IRQ_RETVAL(handled != 0); 1623} 1624 1625/* 1626 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. 1627 * 1628 * The code figures out how many entries the sk_buff will require in the 1629 * cmdQ and updates the cmdQ data structure with the state once the enqueue 1630 * has complete. Then, it doesn't access the global structure anymore, but 1631 * uses the corresponding fields on the stack. In conjuction with a spinlock 1632 * around that code, we can make the function reentrant without holding the 1633 * lock when we actually enqueue (which might be expensive, especially on 1634 * architectures with IO MMUs). 1635 * 1636 * This runs with softirqs disabled. 1637 */ 1638static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, 1639 unsigned int qid, struct net_device *dev) 1640{ 1641 struct sge *sge = adapter->sge; 1642 struct cmdQ *q = &sge->cmdQ[qid]; 1643 unsigned int credits, pidx, genbit, count, use_sched_skb = 0; 1644 1645 if (!spin_trylock(&q->lock)) 1646 return NETDEV_TX_LOCKED; 1647 1648 reclaim_completed_tx(sge, q); 1649 1650 pidx = q->pidx; 1651 credits = q->size - q->in_use; 1652 count = 1 + skb_shinfo(skb)->nr_frags; 1653 count += compute_large_page_tx_descs(skb); 1654 1655 /* Ethernet packet */ 1656 if (unlikely(credits < count)) { 1657 if (!netif_queue_stopped(dev)) { 1658 netif_stop_queue(dev); 1659 set_bit(dev->if_port, &sge->stopped_tx_queues); 1660 sge->stats.cmdQ_full[2]++; 1661 pr_err("%s: Tx ring full while queue awake!\n", 1662 adapter->name); 1663 } 1664 spin_unlock(&q->lock); 1665 return NETDEV_TX_BUSY; 1666 } 1667 1668 if (unlikely(credits - count < q->stop_thres)) { 1669 netif_stop_queue(dev); 1670 set_bit(dev->if_port, &sge->stopped_tx_queues); 1671 sge->stats.cmdQ_full[2]++; 1672 } 1673 1674 /* T204 cmdQ0 skbs that are destined for a certain port have to go 1675 * through the scheduler. 1676 */ 1677 if (sge->tx_sched && !qid && skb->dev) { 1678use_sched: 1679 use_sched_skb = 1; 1680 /* Note that the scheduler might return a different skb than 1681 * the one passed in. 1682 */ 1683 skb = sched_skb(sge, skb, credits); 1684 if (!skb) { 1685 spin_unlock(&q->lock); 1686 return NETDEV_TX_OK; 1687 } 1688 pidx = q->pidx; 1689 count = 1 + skb_shinfo(skb)->nr_frags; 1690 count += compute_large_page_tx_descs(skb); 1691 } 1692 1693 q->in_use += count; 1694 genbit = q->genbit; 1695 pidx = q->pidx; 1696 q->pidx += count; 1697 if (q->pidx >= q->size) { 1698 q->pidx -= q->size; 1699 q->genbit ^= 1; 1700 } 1701 spin_unlock(&q->lock); 1702 1703 write_tx_descs(adapter, skb, pidx, genbit, q); 1704 1705 /* 1706 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring 1707 * the doorbell if the Q is asleep. There is a natural race, where 1708 * the hardware is going to sleep just after we checked, however, 1709 * then the interrupt handler will detect the outstanding TX packet 1710 * and ring the doorbell for us. 1711 */ 1712 if (qid) 1713 doorbell_pio(adapter, F_CMDQ1_ENABLE); 1714 else { 1715 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1716 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { 1717 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); 1718 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL); 1719 } 1720 } 1721 1722 if (use_sched_skb) { 1723 if (spin_trylock(&q->lock)) { 1724 credits = q->size - q->in_use; 1725 skb = NULL; 1726 goto use_sched; 1727 } 1728 } 1729 return NETDEV_TX_OK; 1730} 1731 1732#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14)) 1733 1734/* 1735 * eth_hdr_len - return the length of an Ethernet header 1736 * @data: pointer to the start of the Ethernet header 1737 * 1738 * Returns the length of an Ethernet header, including optional VLAN tag. 1739 */ 1740static inline int eth_hdr_len(const void *data) 1741{ 1742 const struct ethhdr *e = data; 1743 1744 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN; 1745} 1746 1747/* 1748 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. 1749 */ 1750netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) 1751{ 1752 struct adapter *adapter = dev->ml_priv; 1753 struct sge *sge = adapter->sge; 1754 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]); 1755 struct cpl_tx_pkt *cpl; 1756 struct sk_buff *orig_skb = skb; 1757 int ret; 1758 1759 if (skb->protocol == htons(ETH_P_CPL5)) 1760 goto send; 1761 1762 /* 1763 * We are using a non-standard hard_header_len. 1764 * Allocate more header room in the rare cases it is not big enough. 1765 */ 1766 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) { 1767 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso)); 1768 ++st->tx_need_hdrroom; 1769 dev_kfree_skb_any(orig_skb); 1770 if (!skb) 1771 return NETDEV_TX_OK; 1772 } 1773 1774 if (skb_shinfo(skb)->gso_size) { 1775 int eth_type; 1776 struct cpl_tx_pkt_lso *hdr; 1777 1778 ++st->tx_tso; 1779 1780 eth_type = skb_network_offset(skb) == ETH_HLEN ? 1781 CPL_ETH_II : CPL_ETH_II_VLAN; 1782 1783 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr)); 1784 hdr->opcode = CPL_TX_PKT_LSO; 1785 hdr->ip_csum_dis = hdr->l4_csum_dis = 0; 1786 hdr->ip_hdr_words = ip_hdr(skb)->ihl; 1787 hdr->tcp_hdr_words = tcp_hdr(skb)->doff; 1788 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type, 1789 skb_shinfo(skb)->gso_size)); 1790 hdr->len = htonl(skb->len - sizeof(*hdr)); 1791 cpl = (struct cpl_tx_pkt *)hdr; 1792 } else { 1793 /* 1794 * Packets shorter than ETH_HLEN can break the MAC, drop them 1795 * early. Also, we may get oversized packets because some 1796 * parts of the kernel don't handle our unusual hard_header_len 1797 * right, drop those too. 1798 */ 1799 if (unlikely(skb->len < ETH_HLEN || 1800 skb->len > dev->mtu + eth_hdr_len(skb->data))) { 1801 pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name, 1802 skb->len, eth_hdr_len(skb->data), dev->mtu); 1803 dev_kfree_skb_any(skb); 1804 return NETDEV_TX_OK; 1805 } 1806 1807 if (!(adapter->flags & UDP_CSUM_CAPABLE) && 1808 skb->ip_summed == CHECKSUM_PARTIAL && 1809 ip_hdr(skb)->protocol == IPPROTO_UDP) { 1810 if (unlikely(skb_checksum_help(skb))) { 1811 pr_debug("%s: unable to do udp checksum\n", dev->name); 1812 dev_kfree_skb_any(skb); 1813 return NETDEV_TX_OK; 1814 } 1815 } 1816 1817 /* Hmmm, assuming to catch the gratious arp... and we'll use 1818 * it to flush out stuck espi packets... 1819 */ 1820 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) { 1821 if (skb->protocol == htons(ETH_P_ARP) && 1822 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) { 1823 adapter->sge->espibug_skb[dev->if_port] = skb; 1824 /* We want to re-use this skb later. We 1825 * simply bump the reference count and it 1826 * will not be freed... 1827 */ 1828 skb = skb_get(skb); 1829 } 1830 } 1831 1832 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl)); 1833 cpl->opcode = CPL_TX_PKT; 1834 cpl->ip_csum_dis = 1; /* SW calculates IP csum */ 1835 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1; 1836 /* the length field isn't used so don't bother setting it */ 1837 1838 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL); 1839 } 1840 cpl->iff = dev->if_port; 1841 1842#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1843 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) { 1844 cpl->vlan_valid = 1; 1845 cpl->vlan = htons(vlan_tx_tag_get(skb)); 1846 st->vlan_insert++; 1847 } else 1848#endif 1849 cpl->vlan_valid = 0; 1850 1851send: 1852 ret = t1_sge_tx(skb, adapter, 0, dev); 1853 1854 /* If transmit busy, and we reallocated skb's due to headroom limit, 1855 * then silently discard to avoid leak. 1856 */ 1857 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) { 1858 dev_kfree_skb_any(skb); 1859 ret = NETDEV_TX_OK; 1860 } 1861 return ret; 1862} 1863 1864/* 1865 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. 1866 */ 1867static void sge_tx_reclaim_cb(unsigned long data) 1868{ 1869 int i; 1870 struct sge *sge = (struct sge *)data; 1871 1872 for (i = 0; i < SGE_CMDQ_N; ++i) { 1873 struct cmdQ *q = &sge->cmdQ[i]; 1874 1875 if (!spin_trylock(&q->lock)) 1876 continue; 1877 1878 reclaim_completed_tx(sge, q); 1879 if (i == 0 && q->in_use) { /* flush pending credits */ 1880 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL); 1881 } 1882 spin_unlock(&q->lock); 1883 } 1884 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 1885} 1886 1887/* 1888 * Propagate changes of the SGE coalescing parameters to the HW. 1889 */ 1890int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) 1891{ 1892 sge->fixed_intrtimer = p->rx_coalesce_usecs * 1893 core_ticks_per_usec(sge->adapter); 1894 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); 1895 return 0; 1896} 1897 1898/* 1899 * Allocates both RX and TX resources and configures the SGE. However, 1900 * the hardware is not enabled yet. 1901 */ 1902int t1_sge_configure(struct sge *sge, struct sge_params *p) 1903{ 1904 if (alloc_rx_resources(sge, p)) 1905 return -ENOMEM; 1906 if (alloc_tx_resources(sge, p)) { 1907 free_rx_resources(sge); 1908 return -ENOMEM; 1909 } 1910 configure_sge(sge, p); 1911 1912 /* 1913 * Now that we have sized the free lists calculate the payload 1914 * capacity of the large buffers. Other parts of the driver use 1915 * this to set the max offload coalescing size so that RX packets 1916 * do not overflow our large buffers. 1917 */ 1918 p->large_buf_capacity = jumbo_payload_capacity(sge); 1919 return 0; 1920} 1921 1922/* 1923 * Disables the DMA engine. 1924 */ 1925void t1_sge_stop(struct sge *sge) 1926{ 1927 int i; 1928 writel(0, sge->adapter->regs + A_SG_CONTROL); 1929 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 1930 1931 if (is_T2(sge->adapter)) 1932 del_timer_sync(&sge->espibug_timer); 1933 1934 del_timer_sync(&sge->tx_reclaim_timer); 1935 if (sge->tx_sched) 1936 tx_sched_stop(sge); 1937 1938 for (i = 0; i < MAX_NPORTS; i++) 1939 kfree_skb(sge->espibug_skb[i]); 1940} 1941 1942/* 1943 * Enables the DMA engine. 1944 */ 1945void t1_sge_start(struct sge *sge) 1946{ 1947 refill_free_list(sge, &sge->freelQ[0]); 1948 refill_free_list(sge, &sge->freelQ[1]); 1949 1950 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL); 1951 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE); 1952 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */ 1953 1954 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 1955 1956 if (is_T2(sge->adapter)) 1957 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 1958} 1959 1960/* 1961 * Callback for the T2 ESPI 'stuck packet feature' workaorund 1962 */ 1963static void espibug_workaround_t204(unsigned long data) 1964{ 1965 struct adapter *adapter = (struct adapter *)data; 1966 struct sge *sge = adapter->sge; 1967 unsigned int nports = adapter->params.nports; 1968 u32 seop[MAX_NPORTS]; 1969 1970 if (adapter->open_device_map & PORT_MASK) { 1971 int i; 1972 1973 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) 1974 return; 1975 1976 for (i = 0; i < nports; i++) { 1977 struct sk_buff *skb = sge->espibug_skb[i]; 1978 1979 if (!netif_running(adapter->port[i].dev) || 1980 netif_queue_stopped(adapter->port[i].dev) || 1981 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb) 1982 continue; 1983 1984 if (!skb->cb[0]) { 1985 u8 ch_mac_addr[ETH_ALEN] = { 1986 0x0, 0x7, 0x43, 0x0, 0x0, 0x0 1987 }; 1988 1989 skb_copy_to_linear_data_offset(skb, 1990 sizeof(struct cpl_tx_pkt), 1991 ch_mac_addr, 1992 ETH_ALEN); 1993 skb_copy_to_linear_data_offset(skb, 1994 skb->len - 10, 1995 ch_mac_addr, 1996 ETH_ALEN); 1997 skb->cb[0] = 0xff; 1998 } 1999 2000 /* bump the reference count to avoid freeing of 2001 * the skb once the DMA has completed. 2002 */ 2003 skb = skb_get(skb); 2004 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev); 2005 } 2006 } 2007 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2008} 2009 2010static void espibug_workaround(unsigned long data) 2011{ 2012 struct adapter *adapter = (struct adapter *)data; 2013 struct sge *sge = adapter->sge; 2014 2015 if (netif_running(adapter->port[0].dev)) { 2016 struct sk_buff *skb = sge->espibug_skb[0]; 2017 u32 seop = t1_espi_get_mon(adapter, 0x930, 0); 2018 2019 if ((seop & 0xfff0fff) == 0xfff && skb) { 2020 if (!skb->cb[0]) { 2021 u8 ch_mac_addr[ETH_ALEN] = 2022 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0}; 2023 skb_copy_to_linear_data_offset(skb, 2024 sizeof(struct cpl_tx_pkt), 2025 ch_mac_addr, 2026 ETH_ALEN); 2027 skb_copy_to_linear_data_offset(skb, 2028 skb->len - 10, 2029 ch_mac_addr, 2030 ETH_ALEN); 2031 skb->cb[0] = 0xff; 2032 } 2033 2034 /* bump the reference count to avoid freeing of the 2035 * skb once the DMA has completed. 2036 */ 2037 skb = skb_get(skb); 2038 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev); 2039 } 2040 } 2041 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); 2042} 2043 2044/* 2045 * Creates a t1_sge structure and returns suggested resource parameters. 2046 */ 2047struct sge * __devinit t1_sge_create(struct adapter *adapter, 2048 struct sge_params *p) 2049{ 2050 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); 2051 int i; 2052 2053 if (!sge) 2054 return NULL; 2055 2056 sge->adapter = adapter; 2057 sge->netdev = adapter->port[0].dev; 2058 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2; 2059 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; 2060 2061 for_each_port(adapter, i) { 2062 sge->port_stats[i] = alloc_percpu(struct sge_port_stats); 2063 if (!sge->port_stats[i]) 2064 goto nomem_port; 2065 } 2066 2067 init_timer(&sge->tx_reclaim_timer); 2068 sge->tx_reclaim_timer.data = (unsigned long)sge; 2069 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; 2070 2071 if (is_T2(sge->adapter)) { 2072 init_timer(&sge->espibug_timer); 2073 2074 if (adapter->params.nports > 1) { 2075 tx_sched_init(sge); 2076 sge->espibug_timer.function = espibug_workaround_t204; 2077 } else 2078 sge->espibug_timer.function = espibug_workaround; 2079 sge->espibug_timer.data = (unsigned long)sge->adapter; 2080 2081 sge->espibug_timeout = 1; 2082 /* for T204, every 10ms */ 2083 if (adapter->params.nports > 1) 2084 sge->espibug_timeout = HZ/100; 2085 } 2086 2087 2088 p->cmdQ_size[0] = SGE_CMDQ0_E_N; 2089 p->cmdQ_size[1] = SGE_CMDQ1_E_N; 2090 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE; 2091 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE; 2092 if (sge->tx_sched) { 2093 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) 2094 p->rx_coalesce_usecs = 15; 2095 else 2096 p->rx_coalesce_usecs = 50; 2097 } else 2098 p->rx_coalesce_usecs = 50; 2099 2100 p->coalesce_enable = 0; 2101 p->sample_interval_usecs = 0; 2102 2103 return sge; 2104nomem_port: 2105 while (i >= 0) { 2106 free_percpu(sge->port_stats[i]); 2107 --i; 2108 } 2109 kfree(sge); 2110 return NULL; 2111 2112} 2113