1/******************************************************************************* 2 3 Intel 10 Gigabit PCI Express Linux driver 4 Copyright(c) 1999 - 2010 Intel Corporation. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Contact Information: 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 26*******************************************************************************/ 27 28#include "ixgbe.h" 29#ifdef CONFIG_IXGBE_DCB 30#include "ixgbe_dcb_82599.h" 31#endif /* CONFIG_IXGBE_DCB */ 32#include <linux/if_ether.h> 33#include <linux/gfp.h> 34#include <linux/if_vlan.h> 35#include <scsi/scsi_cmnd.h> 36#include <scsi/scsi_device.h> 37#include <scsi/fc/fc_fs.h> 38#include <scsi/fc/fc_fcoe.h> 39#include <scsi/libfc.h> 40#include <scsi/libfcoe.h> 41 42/** 43 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type 44 * @rx_desc: advanced rx descriptor 45 * 46 * Returns : true if it is FCoE pkt 47 */ 48static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc) 49{ 50 u16 p; 51 52 p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info); 53 if (p & IXGBE_RXDADV_PKTTYPE_ETQF) { 54 p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK; 55 p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT; 56 return p == IXGBE_ETQF_FILTER_FCOE; 57 } 58 return false; 59} 60 61/** 62 * ixgbe_fcoe_clear_ddp - clear the given ddp context 63 * @ddp - ptr to the ixgbe_fcoe_ddp 64 * 65 * Returns : none 66 * 67 */ 68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 69{ 70 ddp->len = 0; 71 ddp->err = 0; 72 ddp->udl = NULL; 73 ddp->udp = 0UL; 74 ddp->sgl = NULL; 75 ddp->sgc = 0; 76} 77 78/** 79 * ixgbe_fcoe_ddp_put - free the ddp context for a given xid 80 * @netdev: the corresponding net_device 81 * @xid: the xid that corresponding ddp will be freed 82 * 83 * This is the implementation of net_device_ops.ndo_fcoe_ddp_done 84 * and it is expected to be called by ULD, i.e., FCP layer of libfc 85 * to release the corresponding ddp context when the I/O is done. 86 * 87 * Returns : data length already ddp-ed in bytes 88 */ 89int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) 90{ 91 int len = 0; 92 struct ixgbe_fcoe *fcoe; 93 struct ixgbe_adapter *adapter; 94 struct ixgbe_fcoe_ddp *ddp; 95 96 if (!netdev) 97 goto out_ddp_put; 98 99 if (xid >= IXGBE_FCOE_DDP_MAX) 100 goto out_ddp_put; 101 102 adapter = netdev_priv(netdev); 103 fcoe = &adapter->fcoe; 104 ddp = &fcoe->ddp[xid]; 105 if (!ddp->udl) 106 goto out_ddp_put; 107 108 len = ddp->len; 109 /* if there an error, force to invalidate ddp context */ 110 if (ddp->err) { 111 spin_lock_bh(&fcoe->lock); 112 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); 113 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, 114 (xid | IXGBE_FCFLTRW_WE)); 115 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 117 (xid | IXGBE_FCDMARW_WE)); 118 spin_unlock_bh(&fcoe->lock); 119 } 120 if (ddp->sgl) 121 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, 122 DMA_FROM_DEVICE); 123 pci_pool_free(fcoe->pool, ddp->udl, ddp->udp); 124 ixgbe_fcoe_clear_ddp(ddp); 125 126out_ddp_put: 127 return len; 128} 129 130/** 131 * ixgbe_fcoe_ddp_get - called to set up ddp context 132 * @netdev: the corresponding net_device 133 * @xid: the exchange id requesting ddp 134 * @sgl: the scatter-gather list for this request 135 * @sgc: the number of scatter-gather items 136 * 137 * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup 138 * and is expected to be called from ULD, e.g., FCP layer of libfc 139 * to set up ddp for the corresponding xid of the given sglist for 140 * the corresponding I/O. 141 * 142 * Returns : 1 for success and 0 for no ddp 143 */ 144int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 145 struct scatterlist *sgl, unsigned int sgc) 146{ 147 struct ixgbe_adapter *adapter; 148 struct ixgbe_hw *hw; 149 struct ixgbe_fcoe *fcoe; 150 struct ixgbe_fcoe_ddp *ddp; 151 struct scatterlist *sg; 152 unsigned int i, j, dmacount; 153 unsigned int len; 154 static const unsigned int bufflen = 4096; 155 unsigned int firstoff = 0; 156 unsigned int lastsize; 157 unsigned int thisoff = 0; 158 unsigned int thislen = 0; 159 u32 fcbuff, fcdmarw, fcfltrw; 160 dma_addr_t addr; 161 162 if (!netdev || !sgl) 163 return 0; 164 165 adapter = netdev_priv(netdev); 166 if (xid >= IXGBE_FCOE_DDP_MAX) { 167 e_warn(drv, "xid=0x%x out-of-range\n", xid); 168 return 0; 169 } 170 171 fcoe = &adapter->fcoe; 172 if (!fcoe->pool) { 173 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 174 return 0; 175 } 176 177 ddp = &fcoe->ddp[xid]; 178 if (ddp->sgl) { 179 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", 180 xid, ddp->sgl, ddp->sgc); 181 return 0; 182 } 183 ixgbe_fcoe_clear_ddp(ddp); 184 185 /* setup dma from scsi command sgl */ 186 dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); 187 if (dmacount == 0) { 188 e_err(drv, "xid 0x%x DMA map error\n", xid); 189 return 0; 190 } 191 192 /* alloc the udl from our ddp pool */ 193 ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp); 194 if (!ddp->udl) { 195 e_err(drv, "failed allocated ddp context\n"); 196 goto out_noddp_unmap; 197 } 198 ddp->sgl = sgl; 199 ddp->sgc = sgc; 200 201 j = 0; 202 for_each_sg(sgl, sg, dmacount, i) { 203 addr = sg_dma_address(sg); 204 len = sg_dma_len(sg); 205 while (len) { 206 /* max number of buffers allowed in one DDP context */ 207 if (j >= IXGBE_BUFFCNT_MAX) { 208 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " 209 "not enough descriptors\n", 210 xid, i, j, dmacount, (u64)addr); 211 goto out_noddp_free; 212 } 213 214 /* get the offset of length of current buffer */ 215 thisoff = addr & ((dma_addr_t)bufflen - 1); 216 thislen = min((bufflen - thisoff), len); 217 /* 218 * all but the 1st buffer (j == 0) 219 * must be aligned on bufflen 220 */ 221 if ((j != 0) && (thisoff)) 222 goto out_noddp_free; 223 /* 224 * all but the last buffer 225 * ((i == (dmacount - 1)) && (thislen == len)) 226 * must end at bufflen 227 */ 228 if (((i != (dmacount - 1)) || (thislen != len)) 229 && ((thislen + thisoff) != bufflen)) 230 goto out_noddp_free; 231 232 ddp->udl[j] = (u64)(addr - thisoff); 233 /* only the first buffer may have none-zero offset */ 234 if (j == 0) 235 firstoff = thisoff; 236 len -= thislen; 237 addr += thislen; 238 j++; 239 } 240 } 241 /* only the last buffer may have non-full bufflen */ 242 lastsize = thisoff + thislen; 243 244 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 245 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 246 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 247 fcbuff |= (IXGBE_FCBUFF_VALID); 248 249 fcdmarw = xid; 250 fcdmarw |= IXGBE_FCDMARW_WE; 251 fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); 252 253 fcfltrw = xid; 254 fcfltrw |= IXGBE_FCFLTRW_WE; 255 256 /* program DMA context */ 257 hw = &adapter->hw; 258 spin_lock_bh(&fcoe->lock); 259 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); 260 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); 261 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); 262 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); 263 /* program filter context */ 264 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); 265 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); 266 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); 267 spin_unlock_bh(&fcoe->lock); 268 269 return 1; 270 271out_noddp_free: 272 pci_pool_free(fcoe->pool, ddp->udl, ddp->udp); 273 ixgbe_fcoe_clear_ddp(ddp); 274 275out_noddp_unmap: 276 pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); 277 return 0; 278} 279 280/** 281 * ixgbe_fcoe_ddp - check ddp status and mark it done 282 * @adapter: ixgbe adapter 283 * @rx_desc: advanced rx descriptor 284 * @skb: the skb holding the received data 285 * 286 * This checks ddp status. 287 * 288 * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates 289 * not passing the skb to ULD, > 0 indicates is the length of data 290 * being ddped. 291 */ 292int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 293 union ixgbe_adv_rx_desc *rx_desc, 294 struct sk_buff *skb) 295{ 296 u16 xid; 297 u32 fctl; 298 u32 sterr, fceofe, fcerr, fcstat; 299 int rc = -EINVAL; 300 struct ixgbe_fcoe *fcoe; 301 struct ixgbe_fcoe_ddp *ddp; 302 struct fc_frame_header *fh; 303 304 if (!ixgbe_rx_is_fcoe(rx_desc)) 305 goto ddp_out; 306 307 skb->ip_summed = CHECKSUM_UNNECESSARY; 308 sterr = le32_to_cpu(rx_desc->wb.upper.status_error); 309 fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); 310 fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); 311 if (fcerr == IXGBE_FCERR_BADCRC) 312 skb->ip_summed = CHECKSUM_NONE; 313 314 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) 315 fh = (struct fc_frame_header *)(skb->data + 316 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); 317 else 318 fh = (struct fc_frame_header *)(skb->data + 319 sizeof(struct fcoe_hdr)); 320 fctl = ntoh24(fh->fh_f_ctl); 321 if (fctl & FC_FC_EX_CTX) 322 xid = be16_to_cpu(fh->fh_ox_id); 323 else 324 xid = be16_to_cpu(fh->fh_rx_id); 325 326 if (xid >= IXGBE_FCOE_DDP_MAX) 327 goto ddp_out; 328 329 fcoe = &adapter->fcoe; 330 ddp = &fcoe->ddp[xid]; 331 if (!ddp->udl) 332 goto ddp_out; 333 334 ddp->err = (fcerr | fceofe); 335 if (ddp->err) 336 goto ddp_out; 337 338 fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); 339 if (fcstat) { 340 /* update length of DDPed data */ 341 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 342 /* unmap the sg list when FCP_RSP is received */ 343 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { 344 pci_unmap_sg(adapter->pdev, ddp->sgl, 345 ddp->sgc, DMA_FROM_DEVICE); 346 ddp->sgl = NULL; 347 ddp->sgc = 0; 348 } 349 /* return 0 to bypass going to ULD for DDPed data */ 350 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) 351 rc = 0; 352 else if (ddp->len) 353 rc = ddp->len; 354 } 355 356ddp_out: 357 return rc; 358} 359 360/** 361 * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) 362 * @adapter: ixgbe adapter 363 * @tx_ring: tx desc ring 364 * @skb: associated skb 365 * @tx_flags: tx flags 366 * @hdr_len: hdr_len to be returned 367 * 368 * This sets up large send offload for FCoE 369 * 370 * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error 371 */ 372int ixgbe_fso(struct ixgbe_adapter *adapter, 373 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 374 u32 tx_flags, u8 *hdr_len) 375{ 376 u8 sof, eof; 377 u32 vlan_macip_lens; 378 u32 fcoe_sof_eof; 379 u32 type_tucmd; 380 u32 mss_l4len_idx; 381 int mss = 0; 382 unsigned int i; 383 struct ixgbe_tx_buffer *tx_buffer_info; 384 struct ixgbe_adv_tx_context_desc *context_desc; 385 struct fc_frame_header *fh; 386 387 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { 388 e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", 389 skb_shinfo(skb)->gso_type); 390 return -EINVAL; 391 } 392 393 /* resets the header to point fcoe/fc */ 394 skb_set_network_header(skb, skb->mac_len); 395 skb_set_transport_header(skb, skb->mac_len + 396 sizeof(struct fcoe_hdr)); 397 398 /* sets up SOF and ORIS */ 399 fcoe_sof_eof = 0; 400 sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; 401 switch (sof) { 402 case FC_SOF_I2: 403 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; 404 break; 405 case FC_SOF_I3: 406 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; 407 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; 408 break; 409 case FC_SOF_N2: 410 break; 411 case FC_SOF_N3: 412 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; 413 break; 414 default: 415 e_warn(drv, "unknown sof = 0x%x\n", sof); 416 return -EINVAL; 417 } 418 419 /* the first byte of the last dword is EOF */ 420 skb_copy_bits(skb, skb->len - 4, &eof, 1); 421 /* sets up EOF and ORIE */ 422 switch (eof) { 423 case FC_EOF_N: 424 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 425 break; 426 case FC_EOF_T: 427 /* lso needs ORIE */ 428 if (skb_is_gso(skb)) { 429 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; 430 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE; 431 } else { 432 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; 433 } 434 break; 435 case FC_EOF_NI: 436 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; 437 break; 438 case FC_EOF_A: 439 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; 440 break; 441 default: 442 e_warn(drv, "unknown eof = 0x%x\n", eof); 443 return -EINVAL; 444 } 445 446 /* sets up PARINC indicating data offset */ 447 fh = (struct fc_frame_header *)skb_transport_header(skb); 448 if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) 449 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; 450 451 /* hdr_len includes fc_hdr if FCoE lso is enabled */ 452 *hdr_len = sizeof(struct fcoe_crc_eof); 453 if (skb_is_gso(skb)) 454 *hdr_len += (skb_transport_offset(skb) + 455 sizeof(struct fc_frame_header)); 456 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ 457 vlan_macip_lens = (skb_transport_offset(skb) + 458 sizeof(struct fc_frame_header)); 459 vlan_macip_lens |= ((skb_transport_offset(skb) - 4) 460 << IXGBE_ADVTXD_MACLEN_SHIFT); 461 vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 462 463 /* type_tycmd and mss: set TUCMD.FCoE to enable offload */ 464 type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT | 465 IXGBE_ADVTXT_TUCMD_FCOE; 466 if (skb_is_gso(skb)) 467 mss = skb_shinfo(skb)->gso_size; 468 /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ 469 mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) | 470 (1 << IXGBE_ADVTXD_IDX_SHIFT); 471 472 /* write context desc */ 473 i = tx_ring->next_to_use; 474 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 475 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 476 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); 477 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); 478 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 479 480 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 481 tx_buffer_info->time_stamp = jiffies; 482 tx_buffer_info->next_to_watch = i; 483 484 i++; 485 if (i == tx_ring->count) 486 i = 0; 487 tx_ring->next_to_use = i; 488 489 return skb_is_gso(skb); 490} 491 492/** 493 * ixgbe_configure_fcoe - configures registers for fcoe at start 494 * @adapter: ptr to ixgbe adapter 495 * 496 * This sets up FCoE related registers 497 * 498 * Returns : none 499 */ 500void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) 501{ 502 int i, fcoe_q, fcoe_i; 503 struct ixgbe_hw *hw = &adapter->hw; 504 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 505 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 506#ifdef CONFIG_IXGBE_DCB 507 u8 tc; 508 u32 up2tc; 509#endif 510 511 /* create the pool for ddp if not created yet */ 512 if (!fcoe->pool) { 513 /* allocate ddp pool */ 514 fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp", 515 adapter->pdev, IXGBE_FCPTR_MAX, 516 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 517 if (!fcoe->pool) 518 e_err(drv, "failed to allocated FCoE DDP pool\n"); 519 520 spin_lock_init(&fcoe->lock); 521 } 522 523 /* Enable L2 eth type filter for FCoE */ 524 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), 525 (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); 526 /* Enable L2 eth type filter for FIP */ 527 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), 528 (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); 529 if (adapter->ring_feature[RING_F_FCOE].indices) { 530 /* Use multiple rx queues for FCoE by redirection table */ 531 for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { 532 fcoe_i = f->mask + i % f->indices; 533 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; 534 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 535 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); 536 } 537 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 538 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 539 } else { 540 /* Use single rx queue for FCoE */ 541 fcoe_i = f->mask; 542 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 543 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); 544 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 545 IXGBE_ETQS_QUEUE_EN | 546 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 547 } 548 /* send FIP frames to the first FCoE queue */ 549 fcoe_i = f->mask; 550 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; 551 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), 552 IXGBE_ETQS_QUEUE_EN | 553 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); 554 555 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, 556 IXGBE_FCRXCTRL_FCOELLI | 557 IXGBE_FCRXCTRL_FCCRCBO | 558 (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); 559#ifdef CONFIG_IXGBE_DCB 560 up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); 561 for (i = 0; i < MAX_USER_PRIORITY; i++) { 562 tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT)); 563 tc &= (MAX_TRAFFIC_CLASS - 1); 564 if (fcoe->tc == tc) { 565 fcoe->up = i; 566 break; 567 } 568 } 569#endif 570} 571 572/** 573 * ixgbe_cleanup_fcoe - release all fcoe ddp context resources 574 * @adapter : ixgbe adapter 575 * 576 * Cleans up outstanding ddp context resources 577 * 578 * Returns : none 579 */ 580void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) 581{ 582 int i; 583 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 584 585 /* release ddp resource */ 586 if (fcoe->pool) { 587 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 588 ixgbe_fcoe_ddp_put(adapter->netdev, i); 589 pci_pool_destroy(fcoe->pool); 590 fcoe->pool = NULL; 591 } 592} 593 594/** 595 * ixgbe_fcoe_enable - turn on FCoE offload feature 596 * @netdev: the corresponding netdev 597 * 598 * Turns on FCoE offload feature in 82599. 599 * 600 * Returns : 0 indicates success or -EINVAL on failure 601 */ 602int ixgbe_fcoe_enable(struct net_device *netdev) 603{ 604 int rc = -EINVAL; 605 struct ixgbe_adapter *adapter = netdev_priv(netdev); 606 607 608 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 609 goto out_enable; 610 611 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 612 goto out_enable; 613 614 e_info(drv, "Enabling FCoE offload features.\n"); 615 if (netif_running(netdev)) 616 netdev->netdev_ops->ndo_stop(netdev); 617 618 ixgbe_clear_interrupt_scheme(adapter); 619 620 adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; 621 adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; 622 netdev->features |= NETIF_F_FCOE_CRC; 623 netdev->features |= NETIF_F_FSO; 624 netdev->features |= NETIF_F_FCOE_MTU; 625 netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; 626 627 ixgbe_init_interrupt_scheme(adapter); 628 netdev_features_change(netdev); 629 630 if (netif_running(netdev)) 631 netdev->netdev_ops->ndo_open(netdev); 632 rc = 0; 633 634out_enable: 635 return rc; 636} 637 638/** 639 * ixgbe_fcoe_disable - turn off FCoE offload feature 640 * @netdev: the corresponding netdev 641 * 642 * Turns off FCoE offload feature in 82599. 643 * 644 * Returns : 0 indicates success or -EINVAL on failure 645 */ 646int ixgbe_fcoe_disable(struct net_device *netdev) 647{ 648 int rc = -EINVAL; 649 struct ixgbe_adapter *adapter = netdev_priv(netdev); 650 651 if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) 652 goto out_disable; 653 654 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 655 goto out_disable; 656 657 e_info(drv, "Disabling FCoE offload features.\n"); 658 netdev->features &= ~NETIF_F_FCOE_CRC; 659 netdev->features &= ~NETIF_F_FSO; 660 netdev->features &= ~NETIF_F_FCOE_MTU; 661 netdev->fcoe_ddp_xid = 0; 662 netdev_features_change(netdev); 663 664 if (netif_running(netdev)) 665 netdev->netdev_ops->ndo_stop(netdev); 666 667 ixgbe_clear_interrupt_scheme(adapter); 668 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 669 adapter->ring_feature[RING_F_FCOE].indices = 0; 670 ixgbe_cleanup_fcoe(adapter); 671 ixgbe_init_interrupt_scheme(adapter); 672 673 if (netif_running(netdev)) 674 netdev->netdev_ops->ndo_open(netdev); 675 rc = 0; 676 677out_disable: 678 return rc; 679} 680 681#ifdef CONFIG_IXGBE_DCB 682/** 683 * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE 684 * @adapter : ixgbe adapter 685 * 686 * Finds out the corresponding user priority bitmap from the current 687 * traffic class that FCoE belongs to. Returns 0 as the invalid user 688 * priority bitmap to indicate an error. 689 * 690 * Returns : 802.1p user priority bitmap for FCoE 691 */ 692u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter) 693{ 694 return 1 << adapter->fcoe.up; 695} 696 697/** 698 * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE 699 * @adapter : ixgbe adapter 700 * @up : 802.1p user priority bitmap 701 * 702 * Finds out the traffic class from the input user priority 703 * bitmap for FCoE. 704 * 705 * Returns : 0 on success otherwise returns 1 on error 706 */ 707u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up) 708{ 709 int i; 710 u32 up2tc; 711 712 /* valid user priority bitmap must not be 0 */ 713 if (up) { 714 /* from user priority to the corresponding traffic class */ 715 up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); 716 for (i = 0; i < MAX_USER_PRIORITY; i++) { 717 if (up & (1 << i)) { 718 up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT); 719 up2tc &= (MAX_TRAFFIC_CLASS - 1); 720 adapter->fcoe.tc = (u8)up2tc; 721 adapter->fcoe.up = i; 722 return 0; 723 } 724 } 725 } 726 727 return 1; 728} 729#endif /* CONFIG_IXGBE_DCB */ 730 731/** 732 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port 733 * @netdev : ixgbe adapter 734 * @wwn : the world wide name 735 * @type: the type of world wide name 736 * 737 * Returns the node or port world wide name if both the prefix and the san 738 * mac address are valid, then the wwn is formed based on the NAA-2 for 739 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). 740 * 741 * Returns : 0 on success 742 */ 743int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) 744{ 745 int rc = -EINVAL; 746 u16 prefix = 0xffff; 747 struct ixgbe_adapter *adapter = netdev_priv(netdev); 748 struct ixgbe_mac_info *mac = &adapter->hw.mac; 749 750 switch (type) { 751 case NETDEV_FCOE_WWNN: 752 prefix = mac->wwnn_prefix; 753 break; 754 case NETDEV_FCOE_WWPN: 755 prefix = mac->wwpn_prefix; 756 break; 757 default: 758 break; 759 } 760 761 if ((prefix != 0xffff) && 762 is_valid_ether_addr(mac->san_addr)) { 763 *wwn = ((u64) prefix << 48) | 764 ((u64) mac->san_addr[0] << 40) | 765 ((u64) mac->san_addr[1] << 32) | 766 ((u64) mac->san_addr[2] << 24) | 767 ((u64) mac->san_addr[3] << 16) | 768 ((u64) mac->san_addr[4] << 8) | 769 ((u64) mac->san_addr[5]); 770 rc = 0; 771 } 772 return rc; 773} 774