1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2009 Cavium Networks 7 */ 8 9#include <linux/capability.h> 10#include <linux/dma-mapping.h> 11#include <linux/init.h> 12#include <linux/platform_device.h> 13#include <linux/netdevice.h> 14#include <linux/etherdevice.h> 15#include <linux/if_vlan.h> 16#include <linux/slab.h> 17#include <linux/phy.h> 18#include <linux/spinlock.h> 19 20#include <asm/octeon/octeon.h> 21#include <asm/octeon/cvmx-mixx-defs.h> 22#include <asm/octeon/cvmx-agl-defs.h> 23 24#define DRV_NAME "octeon_mgmt" 25#define DRV_VERSION "2.0" 26#define DRV_DESCRIPTION \ 27 "Cavium Networks Octeon MII (management) port Network Driver" 28 29#define OCTEON_MGMT_NAPI_WEIGHT 16 30 31/* 32 * Ring sizes that are powers of two allow for more efficient modulo 33 * opertions. 34 */ 35#define OCTEON_MGMT_RX_RING_SIZE 512 36#define OCTEON_MGMT_TX_RING_SIZE 128 37 38/* Allow 8 bytes for vlan and FCS. */ 39#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) 40 41union mgmt_port_ring_entry { 42 u64 d64; 43 struct { 44 u64 reserved_62_63:2; 45 /* Length of the buffer/packet in bytes */ 46 u64 len:14; 47 /* For TX, signals that the packet should be timestamped */ 48 u64 tstamp:1; 49 /* The RX error code */ 50 u64 code:7; 51#define RING_ENTRY_CODE_DONE 0xf 52#define RING_ENTRY_CODE_MORE 0x10 53 /* Physical address of the buffer */ 54 u64 addr:40; 55 } s; 56}; 57 58struct octeon_mgmt { 59 struct net_device *netdev; 60 int port; 61 int irq; 62 u64 *tx_ring; 63 dma_addr_t tx_ring_handle; 64 unsigned int tx_next; 65 unsigned int tx_next_clean; 66 unsigned int tx_current_fill; 67 /* The tx_list lock also protects the ring related variables */ 68 struct sk_buff_head tx_list; 69 70 /* RX variables only touched in napi_poll. No locking necessary. */ 71 u64 *rx_ring; 72 dma_addr_t rx_ring_handle; 73 unsigned int rx_next; 74 unsigned int rx_next_fill; 75 unsigned int rx_current_fill; 76 struct sk_buff_head rx_list; 77 78 spinlock_t lock; 79 unsigned int last_duplex; 80 unsigned int last_link; 81 struct device *dev; 82 struct napi_struct napi; 83 struct tasklet_struct tx_clean_tasklet; 84 struct phy_device *phydev; 85}; 86 87static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) 88{ 89 int port = p->port; 90 union cvmx_mixx_intena mix_intena; 91 unsigned long flags; 92 93 spin_lock_irqsave(&p->lock, flags); 94 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); 95 mix_intena.s.ithena = enable ? 1 : 0; 96 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); 97 spin_unlock_irqrestore(&p->lock, flags); 98} 99 100static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) 101{ 102 int port = p->port; 103 union cvmx_mixx_intena mix_intena; 104 unsigned long flags; 105 106 spin_lock_irqsave(&p->lock, flags); 107 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); 108 mix_intena.s.othena = enable ? 1 : 0; 109 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); 110 spin_unlock_irqrestore(&p->lock, flags); 111} 112 113static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) 114{ 115 octeon_mgmt_set_rx_irq(p, 1); 116} 117 118static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) 119{ 120 octeon_mgmt_set_rx_irq(p, 0); 121} 122 123static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) 124{ 125 octeon_mgmt_set_tx_irq(p, 1); 126} 127 128static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) 129{ 130 octeon_mgmt_set_tx_irq(p, 0); 131} 132 133static unsigned int ring_max_fill(unsigned int ring_size) 134{ 135 return ring_size - 8; 136} 137 138static unsigned int ring_size_to_bytes(unsigned int ring_size) 139{ 140 return ring_size * sizeof(union mgmt_port_ring_entry); 141} 142 143static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) 144{ 145 struct octeon_mgmt *p = netdev_priv(netdev); 146 int port = p->port; 147 148 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { 149 unsigned int size; 150 union mgmt_port_ring_entry re; 151 struct sk_buff *skb; 152 153 /* CN56XX pass 1 needs 8 bytes of padding. */ 154 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; 155 156 skb = netdev_alloc_skb(netdev, size); 157 if (!skb) 158 break; 159 skb_reserve(skb, NET_IP_ALIGN); 160 __skb_queue_tail(&p->rx_list, skb); 161 162 re.d64 = 0; 163 re.s.len = size; 164 re.s.addr = dma_map_single(p->dev, skb->data, 165 size, 166 DMA_FROM_DEVICE); 167 168 /* Put it in the ring. */ 169 p->rx_ring[p->rx_next_fill] = re.d64; 170 dma_sync_single_for_device(p->dev, p->rx_ring_handle, 171 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 172 DMA_BIDIRECTIONAL); 173 p->rx_next_fill = 174 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; 175 p->rx_current_fill++; 176 /* Ring the bell. */ 177 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); 178 } 179} 180 181static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) 182{ 183 int port = p->port; 184 union cvmx_mixx_orcnt mix_orcnt; 185 union mgmt_port_ring_entry re; 186 struct sk_buff *skb; 187 int cleaned = 0; 188 unsigned long flags; 189 190 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); 191 while (mix_orcnt.s.orcnt) { 192 spin_lock_irqsave(&p->tx_list.lock, flags); 193 194 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); 195 196 if (mix_orcnt.s.orcnt == 0) { 197 spin_unlock_irqrestore(&p->tx_list.lock, flags); 198 break; 199 } 200 201 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, 202 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 203 DMA_BIDIRECTIONAL); 204 205 re.d64 = p->tx_ring[p->tx_next_clean]; 206 p->tx_next_clean = 207 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; 208 skb = __skb_dequeue(&p->tx_list); 209 210 mix_orcnt.u64 = 0; 211 mix_orcnt.s.orcnt = 1; 212 213 /* Acknowledge to hardware that we have the buffer. */ 214 cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); 215 p->tx_current_fill--; 216 217 spin_unlock_irqrestore(&p->tx_list.lock, flags); 218 219 dma_unmap_single(p->dev, re.s.addr, re.s.len, 220 DMA_TO_DEVICE); 221 dev_kfree_skb_any(skb); 222 cleaned++; 223 224 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); 225 } 226 227 if (cleaned && netif_queue_stopped(p->netdev)) 228 netif_wake_queue(p->netdev); 229} 230 231static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) 232{ 233 struct octeon_mgmt *p = (struct octeon_mgmt *)arg; 234 octeon_mgmt_clean_tx_buffers(p); 235 octeon_mgmt_enable_tx_irq(p); 236} 237 238static void octeon_mgmt_update_rx_stats(struct net_device *netdev) 239{ 240 struct octeon_mgmt *p = netdev_priv(netdev); 241 int port = p->port; 242 unsigned long flags; 243 u64 drop, bad; 244 245 /* These reads also clear the count registers. */ 246 drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); 247 bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); 248 249 if (drop || bad) { 250 /* Do an atomic update. */ 251 spin_lock_irqsave(&p->lock, flags); 252 netdev->stats.rx_errors += bad; 253 netdev->stats.rx_dropped += drop; 254 spin_unlock_irqrestore(&p->lock, flags); 255 } 256} 257 258static void octeon_mgmt_update_tx_stats(struct net_device *netdev) 259{ 260 struct octeon_mgmt *p = netdev_priv(netdev); 261 int port = p->port; 262 unsigned long flags; 263 264 union cvmx_agl_gmx_txx_stat0 s0; 265 union cvmx_agl_gmx_txx_stat1 s1; 266 267 /* These reads also clear the count registers. */ 268 s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); 269 s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); 270 271 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { 272 /* Do an atomic update. */ 273 spin_lock_irqsave(&p->lock, flags); 274 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; 275 netdev->stats.collisions += s1.s.scol + s1.s.mcol; 276 spin_unlock_irqrestore(&p->lock, flags); 277 } 278} 279 280/* 281 * Dequeue a receive skb and its corresponding ring entry. The ring 282 * entry is returned, *pskb is updated to point to the skb. 283 */ 284static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, 285 struct sk_buff **pskb) 286{ 287 union mgmt_port_ring_entry re; 288 289 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, 290 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 291 DMA_BIDIRECTIONAL); 292 293 re.d64 = p->rx_ring[p->rx_next]; 294 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; 295 p->rx_current_fill--; 296 *pskb = __skb_dequeue(&p->rx_list); 297 298 dma_unmap_single(p->dev, re.s.addr, 299 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, 300 DMA_FROM_DEVICE); 301 302 return re.d64; 303} 304 305 306static int octeon_mgmt_receive_one(struct octeon_mgmt *p) 307{ 308 int port = p->port; 309 struct net_device *netdev = p->netdev; 310 union cvmx_mixx_ircnt mix_ircnt; 311 union mgmt_port_ring_entry re; 312 struct sk_buff *skb; 313 struct sk_buff *skb2; 314 struct sk_buff *skb_new; 315 union mgmt_port_ring_entry re2; 316 int rc = 1; 317 318 319 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); 320 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { 321 /* A good packet, send it up. */ 322 skb_put(skb, re.s.len); 323good: 324 skb->protocol = eth_type_trans(skb, netdev); 325 netdev->stats.rx_packets++; 326 netdev->stats.rx_bytes += skb->len; 327 netif_receive_skb(skb); 328 rc = 0; 329 } else if (re.s.code == RING_ENTRY_CODE_MORE) { 330 /* 331 * Packet split across skbs. This can happen if we 332 * increase the MTU. Buffers that are already in the 333 * rx ring can then end up being too small. As the rx 334 * ring is refilled, buffers sized for the new MTU 335 * will be used and we should go back to the normal 336 * non-split case. 337 */ 338 skb_put(skb, re.s.len); 339 do { 340 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); 341 if (re2.s.code != RING_ENTRY_CODE_MORE 342 && re2.s.code != RING_ENTRY_CODE_DONE) 343 goto split_error; 344 skb_put(skb2, re2.s.len); 345 skb_new = skb_copy_expand(skb, 0, skb2->len, 346 GFP_ATOMIC); 347 if (!skb_new) 348 goto split_error; 349 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), 350 skb2->len)) 351 goto split_error; 352 skb_put(skb_new, skb2->len); 353 dev_kfree_skb_any(skb); 354 dev_kfree_skb_any(skb2); 355 skb = skb_new; 356 } while (re2.s.code == RING_ENTRY_CODE_MORE); 357 goto good; 358 } else { 359 /* Some other error, discard it. */ 360 dev_kfree_skb_any(skb); 361 /* 362 * Error statistics are accumulated in 363 * octeon_mgmt_update_rx_stats. 364 */ 365 } 366 goto done; 367split_error: 368 /* Discard the whole mess. */ 369 dev_kfree_skb_any(skb); 370 dev_kfree_skb_any(skb2); 371 while (re2.s.code == RING_ENTRY_CODE_MORE) { 372 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); 373 dev_kfree_skb_any(skb2); 374 } 375 netdev->stats.rx_errors++; 376 377done: 378 /* Tell the hardware we processed a packet. */ 379 mix_ircnt.u64 = 0; 380 mix_ircnt.s.ircnt = 1; 381 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); 382 return rc; 383} 384 385static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) 386{ 387 int port = p->port; 388 unsigned int work_done = 0; 389 union cvmx_mixx_ircnt mix_ircnt; 390 int rc; 391 392 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); 393 while (work_done < budget && mix_ircnt.s.ircnt) { 394 395 rc = octeon_mgmt_receive_one(p); 396 if (!rc) 397 work_done++; 398 399 /* Check for more packets. */ 400 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); 401 } 402 403 octeon_mgmt_rx_fill_ring(p->netdev); 404 405 return work_done; 406} 407 408static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) 409{ 410 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); 411 struct net_device *netdev = p->netdev; 412 unsigned int work_done = 0; 413 414 work_done = octeon_mgmt_receive_packets(p, budget); 415 416 if (work_done < budget) { 417 /* We stopped because no more packets were available. */ 418 napi_complete(napi); 419 octeon_mgmt_enable_rx_irq(p); 420 } 421 octeon_mgmt_update_rx_stats(netdev); 422 423 return work_done; 424} 425 426/* Reset the hardware to clean state. */ 427static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) 428{ 429 union cvmx_mixx_ctl mix_ctl; 430 union cvmx_mixx_bist mix_bist; 431 union cvmx_agl_gmx_bist agl_gmx_bist; 432 433 mix_ctl.u64 = 0; 434 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); 435 do { 436 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); 437 } while (mix_ctl.s.busy); 438 mix_ctl.s.reset = 1; 439 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); 440 cvmx_read_csr(CVMX_MIXX_CTL(p->port)); 441 cvmx_wait(64); 442 443 mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); 444 if (mix_bist.u64) 445 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", 446 (unsigned long long)mix_bist.u64); 447 448 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); 449 if (agl_gmx_bist.u64) 450 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", 451 (unsigned long long)agl_gmx_bist.u64); 452} 453 454struct octeon_mgmt_cam_state { 455 u64 cam[6]; 456 u64 cam_mask; 457 int cam_index; 458}; 459 460static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, 461 unsigned char *addr) 462{ 463 int i; 464 465 for (i = 0; i < 6; i++) 466 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); 467 cs->cam_mask |= (1ULL << cs->cam_index); 468 cs->cam_index++; 469} 470 471static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) 472{ 473 struct octeon_mgmt *p = netdev_priv(netdev); 474 int port = p->port; 475 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; 476 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; 477 unsigned long flags; 478 unsigned int prev_packet_enable; 479 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ 480 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ 481 struct octeon_mgmt_cam_state cam_state; 482 struct netdev_hw_addr *ha; 483 int available_cam_entries; 484 485 memset(&cam_state, 0, sizeof(cam_state)); 486 487 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { 488 cam_mode = 0; 489 available_cam_entries = 8; 490 } else { 491 /* 492 * One CAM entry for the primary address, leaves seven 493 * for the secondary addresses. 494 */ 495 available_cam_entries = 7 - netdev->uc.count; 496 } 497 498 if (netdev->flags & IFF_MULTICAST) { 499 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || 500 netdev_mc_count(netdev) > available_cam_entries) 501 multicast_mode = 2; /* 2 - Accept all multicast. */ 502 else 503 multicast_mode = 0; /* 0 - Use CAM. */ 504 } 505 506 if (cam_mode == 1) { 507 /* Add primary address. */ 508 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); 509 netdev_for_each_uc_addr(ha, netdev) 510 octeon_mgmt_cam_state_add(&cam_state, ha->addr); 511 } 512 if (multicast_mode == 0) { 513 netdev_for_each_mc_addr(ha, netdev) 514 octeon_mgmt_cam_state_add(&cam_state, ha->addr); 515 } 516 517 spin_lock_irqsave(&p->lock, flags); 518 519 /* Disable packet I/O. */ 520 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); 521 prev_packet_enable = agl_gmx_prtx.s.en; 522 agl_gmx_prtx.s.en = 0; 523 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); 524 525 adr_ctl.u64 = 0; 526 adr_ctl.s.cam_mode = cam_mode; 527 adr_ctl.s.mcst = multicast_mode; 528 adr_ctl.s.bcst = 1; /* Allow broadcast */ 529 530 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); 531 532 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); 533 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); 534 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); 535 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); 536 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); 537 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); 538 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); 539 540 /* Restore packet I/O. */ 541 agl_gmx_prtx.s.en = prev_packet_enable; 542 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); 543 544 spin_unlock_irqrestore(&p->lock, flags); 545} 546 547static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) 548{ 549 struct sockaddr *sa = addr; 550 551 if (!is_valid_ether_addr(sa->sa_data)) 552 return -EADDRNOTAVAIL; 553 554 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); 555 556 octeon_mgmt_set_rx_filtering(netdev); 557 558 return 0; 559} 560 561static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) 562{ 563 struct octeon_mgmt *p = netdev_priv(netdev); 564 int port = p->port; 565 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; 566 567 /* 568 * Limit the MTU to make sure the ethernet packets are between 569 * 64 bytes and 16383 bytes. 570 */ 571 if (size_without_fcs < 64 || size_without_fcs > 16383) { 572 dev_warn(p->dev, "MTU must be between %d and %d.\n", 573 64 - OCTEON_MGMT_RX_HEADROOM, 574 16383 - OCTEON_MGMT_RX_HEADROOM); 575 return -EINVAL; 576 } 577 578 netdev->mtu = new_mtu; 579 580 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); 581 cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), 582 (size_without_fcs + 7) & 0xfff8); 583 584 return 0; 585} 586 587static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) 588{ 589 struct net_device *netdev = dev_id; 590 struct octeon_mgmt *p = netdev_priv(netdev); 591 int port = p->port; 592 union cvmx_mixx_isr mixx_isr; 593 594 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); 595 596 /* Clear any pending interrupts */ 597 cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64); 598 cvmx_read_csr(CVMX_MIXX_ISR(port)); 599 600 if (mixx_isr.s.irthresh) { 601 octeon_mgmt_disable_rx_irq(p); 602 napi_schedule(&p->napi); 603 } 604 if (mixx_isr.s.orthresh) { 605 octeon_mgmt_disable_tx_irq(p); 606 tasklet_schedule(&p->tx_clean_tasklet); 607 } 608 609 return IRQ_HANDLED; 610} 611 612static int octeon_mgmt_ioctl(struct net_device *netdev, 613 struct ifreq *rq, int cmd) 614{ 615 struct octeon_mgmt *p = netdev_priv(netdev); 616 617 if (!netif_running(netdev)) 618 return -EINVAL; 619 620 if (!p->phydev) 621 return -EINVAL; 622 623 return phy_mii_ioctl(p->phydev, rq, cmd); 624} 625 626static void octeon_mgmt_adjust_link(struct net_device *netdev) 627{ 628 struct octeon_mgmt *p = netdev_priv(netdev); 629 int port = p->port; 630 union cvmx_agl_gmx_prtx_cfg prtx_cfg; 631 unsigned long flags; 632 int link_changed = 0; 633 634 spin_lock_irqsave(&p->lock, flags); 635 if (p->phydev->link) { 636 if (!p->last_link) 637 link_changed = 1; 638 if (p->last_duplex != p->phydev->duplex) { 639 p->last_duplex = p->phydev->duplex; 640 prtx_cfg.u64 = 641 cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); 642 prtx_cfg.s.duplex = p->phydev->duplex; 643 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), 644 prtx_cfg.u64); 645 } 646 } else { 647 if (p->last_link) 648 link_changed = -1; 649 } 650 p->last_link = p->phydev->link; 651 spin_unlock_irqrestore(&p->lock, flags); 652 653 if (link_changed != 0) { 654 if (link_changed > 0) { 655 netif_carrier_on(netdev); 656 pr_info("%s: Link is up - %d/%s\n", netdev->name, 657 p->phydev->speed, 658 DUPLEX_FULL == p->phydev->duplex ? 659 "Full" : "Half"); 660 } else { 661 netif_carrier_off(netdev); 662 pr_info("%s: Link is down\n", netdev->name); 663 } 664 } 665} 666 667static int octeon_mgmt_init_phy(struct net_device *netdev) 668{ 669 struct octeon_mgmt *p = netdev_priv(netdev); 670 char phy_id[20]; 671 672 if (octeon_is_simulation()) { 673 /* No PHYs in the simulator. */ 674 netif_carrier_on(netdev); 675 return 0; 676 } 677 678 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); 679 680 p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, 681 PHY_INTERFACE_MODE_MII); 682 683 if (IS_ERR(p->phydev)) { 684 p->phydev = NULL; 685 return -1; 686 } 687 688 phy_start_aneg(p->phydev); 689 690 return 0; 691} 692 693static int octeon_mgmt_open(struct net_device *netdev) 694{ 695 struct octeon_mgmt *p = netdev_priv(netdev); 696 int port = p->port; 697 union cvmx_mixx_ctl mix_ctl; 698 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; 699 union cvmx_mixx_oring1 oring1; 700 union cvmx_mixx_iring1 iring1; 701 union cvmx_agl_gmx_prtx_cfg prtx_cfg; 702 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; 703 union cvmx_mixx_irhwm mix_irhwm; 704 union cvmx_mixx_orhwm mix_orhwm; 705 union cvmx_mixx_intena mix_intena; 706 struct sockaddr sa; 707 708 /* Allocate ring buffers. */ 709 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 710 GFP_KERNEL); 711 if (!p->tx_ring) 712 return -ENOMEM; 713 p->tx_ring_handle = 714 dma_map_single(p->dev, p->tx_ring, 715 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 716 DMA_BIDIRECTIONAL); 717 p->tx_next = 0; 718 p->tx_next_clean = 0; 719 p->tx_current_fill = 0; 720 721 722 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 723 GFP_KERNEL); 724 if (!p->rx_ring) 725 goto err_nomem; 726 p->rx_ring_handle = 727 dma_map_single(p->dev, p->rx_ring, 728 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 729 DMA_BIDIRECTIONAL); 730 731 p->rx_next = 0; 732 p->rx_next_fill = 0; 733 p->rx_current_fill = 0; 734 735 octeon_mgmt_reset_hw(p); 736 737 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); 738 739 /* Bring it out of reset if needed. */ 740 if (mix_ctl.s.reset) { 741 mix_ctl.s.reset = 0; 742 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); 743 do { 744 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); 745 } while (mix_ctl.s.reset); 746 } 747 748 agl_gmx_inf_mode.u64 = 0; 749 agl_gmx_inf_mode.s.en = 1; 750 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 751 752 oring1.u64 = 0; 753 oring1.s.obase = p->tx_ring_handle >> 3; 754 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; 755 cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); 756 757 iring1.u64 = 0; 758 iring1.s.ibase = p->rx_ring_handle >> 3; 759 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; 760 cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); 761 762 /* Disable packet I/O. */ 763 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); 764 prtx_cfg.s.en = 0; 765 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); 766 767 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); 768 octeon_mgmt_set_mac_address(netdev, &sa); 769 770 octeon_mgmt_change_mtu(netdev, netdev->mtu); 771 772 /* 773 * Enable the port HW. Packets are not allowed until 774 * cvmx_mgmt_port_enable() is called. 775 */ 776 mix_ctl.u64 = 0; 777 mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ 778 mix_ctl.s.en = 1; /* Enable the port */ 779 mix_ctl.s.nbtarb = 0; /* Arbitration mode */ 780 /* MII CB-request FIFO programmable high watermark */ 781 mix_ctl.s.mrq_hwm = 1; 782 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); 783 784 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) 785 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { 786 /* 787 * Force compensation values, as they are not 788 * determined properly by HW 789 */ 790 union cvmx_agl_gmx_drv_ctl drv_ctl; 791 792 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); 793 if (port) { 794 drv_ctl.s.byp_en1 = 1; 795 drv_ctl.s.nctl1 = 6; 796 drv_ctl.s.pctl1 = 6; 797 } else { 798 drv_ctl.s.byp_en = 1; 799 drv_ctl.s.nctl = 6; 800 drv_ctl.s.pctl = 6; 801 } 802 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); 803 } 804 805 octeon_mgmt_rx_fill_ring(netdev); 806 807 /* Clear statistics. */ 808 /* Clear on read. */ 809 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); 810 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); 811 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); 812 813 cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); 814 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); 815 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); 816 817 /* Clear any pending interrupts */ 818 cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); 819 820 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, 821 netdev)) { 822 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); 823 goto err_noirq; 824 } 825 826 /* Interrupt every single RX packet */ 827 mix_irhwm.u64 = 0; 828 mix_irhwm.s.irhwm = 0; 829 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); 830 831 /* Interrupt when we have 1 or more packets to clean. */ 832 mix_orhwm.u64 = 0; 833 mix_orhwm.s.orhwm = 1; 834 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); 835 836 /* Enable receive and transmit interrupts */ 837 mix_intena.u64 = 0; 838 mix_intena.s.ithena = 1; 839 mix_intena.s.othena = 1; 840 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); 841 842 843 /* Enable packet I/O. */ 844 845 rxx_frm_ctl.u64 = 0; 846 rxx_frm_ctl.s.pre_align = 1; 847 /* 848 * When set, disables the length check for non-min sized pkts 849 * with padding in the client data. 850 */ 851 rxx_frm_ctl.s.pad_len = 1; 852 /* When set, disables the length check for VLAN pkts */ 853 rxx_frm_ctl.s.vlan_len = 1; 854 /* When set, PREAMBLE checking is less strict */ 855 rxx_frm_ctl.s.pre_free = 1; 856 /* Control Pause Frames can match station SMAC */ 857 rxx_frm_ctl.s.ctl_smac = 0; 858 /* Control Pause Frames can match globally assign Multicast address */ 859 rxx_frm_ctl.s.ctl_mcst = 1; 860 /* Forward pause information to TX block */ 861 rxx_frm_ctl.s.ctl_bck = 1; 862 /* Drop Control Pause Frames */ 863 rxx_frm_ctl.s.ctl_drp = 1; 864 /* Strip off the preamble */ 865 rxx_frm_ctl.s.pre_strp = 1; 866 /* 867 * This port is configured to send PREAMBLE+SFD to begin every 868 * frame. GMX checks that the PREAMBLE is sent correctly. 869 */ 870 rxx_frm_ctl.s.pre_chk = 1; 871 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); 872 873 /* Enable the AGL block */ 874 agl_gmx_inf_mode.u64 = 0; 875 agl_gmx_inf_mode.s.en = 1; 876 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 877 878 /* Configure the port duplex and enables */ 879 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); 880 prtx_cfg.s.tx_en = 1; 881 prtx_cfg.s.rx_en = 1; 882 prtx_cfg.s.en = 1; 883 p->last_duplex = 1; 884 prtx_cfg.s.duplex = p->last_duplex; 885 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); 886 887 p->last_link = 0; 888 netif_carrier_off(netdev); 889 890 if (octeon_mgmt_init_phy(netdev)) { 891 dev_err(p->dev, "Cannot initialize PHY.\n"); 892 goto err_noirq; 893 } 894 895 netif_wake_queue(netdev); 896 napi_enable(&p->napi); 897 898 return 0; 899err_noirq: 900 octeon_mgmt_reset_hw(p); 901 dma_unmap_single(p->dev, p->rx_ring_handle, 902 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 903 DMA_BIDIRECTIONAL); 904 kfree(p->rx_ring); 905err_nomem: 906 dma_unmap_single(p->dev, p->tx_ring_handle, 907 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 908 DMA_BIDIRECTIONAL); 909 kfree(p->tx_ring); 910 return -ENOMEM; 911} 912 913static int octeon_mgmt_stop(struct net_device *netdev) 914{ 915 struct octeon_mgmt *p = netdev_priv(netdev); 916 917 napi_disable(&p->napi); 918 netif_stop_queue(netdev); 919 920 if (p->phydev) 921 phy_disconnect(p->phydev); 922 923 netif_carrier_off(netdev); 924 925 octeon_mgmt_reset_hw(p); 926 927 free_irq(p->irq, netdev); 928 929 /* dma_unmap is a nop on Octeon, so just free everything. */ 930 skb_queue_purge(&p->tx_list); 931 skb_queue_purge(&p->rx_list); 932 933 dma_unmap_single(p->dev, p->rx_ring_handle, 934 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 935 DMA_BIDIRECTIONAL); 936 kfree(p->rx_ring); 937 938 dma_unmap_single(p->dev, p->tx_ring_handle, 939 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 940 DMA_BIDIRECTIONAL); 941 kfree(p->tx_ring); 942 943 return 0; 944} 945 946static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) 947{ 948 struct octeon_mgmt *p = netdev_priv(netdev); 949 int port = p->port; 950 union mgmt_port_ring_entry re; 951 unsigned long flags; 952 int rv = NETDEV_TX_BUSY; 953 954 re.d64 = 0; 955 re.s.len = skb->len; 956 re.s.addr = dma_map_single(p->dev, skb->data, 957 skb->len, 958 DMA_TO_DEVICE); 959 960 spin_lock_irqsave(&p->tx_list.lock, flags); 961 962 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { 963 spin_unlock_irqrestore(&p->tx_list.lock, flags); 964 netif_stop_queue(netdev); 965 spin_lock_irqsave(&p->tx_list.lock, flags); 966 } 967 968 if (unlikely(p->tx_current_fill >= 969 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { 970 spin_unlock_irqrestore(&p->tx_list.lock, flags); 971 dma_unmap_single(p->dev, re.s.addr, re.s.len, 972 DMA_TO_DEVICE); 973 goto out; 974 } 975 976 __skb_queue_tail(&p->tx_list, skb); 977 978 /* Put it in the ring. */ 979 p->tx_ring[p->tx_next] = re.d64; 980 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; 981 p->tx_current_fill++; 982 983 spin_unlock_irqrestore(&p->tx_list.lock, flags); 984 985 dma_sync_single_for_device(p->dev, p->tx_ring_handle, 986 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 987 DMA_BIDIRECTIONAL); 988 989 netdev->stats.tx_packets++; 990 netdev->stats.tx_bytes += skb->len; 991 992 /* Ring the bell. */ 993 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); 994 995 rv = NETDEV_TX_OK; 996out: 997 octeon_mgmt_update_tx_stats(netdev); 998 return rv; 999} 1000 1001#ifdef CONFIG_NET_POLL_CONTROLLER 1002static void octeon_mgmt_poll_controller(struct net_device *netdev) 1003{ 1004 struct octeon_mgmt *p = netdev_priv(netdev); 1005 1006 octeon_mgmt_receive_packets(p, 16); 1007 octeon_mgmt_update_rx_stats(netdev); 1008} 1009#endif 1010 1011static void octeon_mgmt_get_drvinfo(struct net_device *netdev, 1012 struct ethtool_drvinfo *info) 1013{ 1014 strncpy(info->driver, DRV_NAME, sizeof(info->driver)); 1015 strncpy(info->version, DRV_VERSION, sizeof(info->version)); 1016 strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); 1017 strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); 1018 info->n_stats = 0; 1019 info->testinfo_len = 0; 1020 info->regdump_len = 0; 1021 info->eedump_len = 0; 1022} 1023 1024static int octeon_mgmt_get_settings(struct net_device *netdev, 1025 struct ethtool_cmd *cmd) 1026{ 1027 struct octeon_mgmt *p = netdev_priv(netdev); 1028 1029 if (p->phydev) 1030 return phy_ethtool_gset(p->phydev, cmd); 1031 1032 return -EINVAL; 1033} 1034 1035static int octeon_mgmt_set_settings(struct net_device *netdev, 1036 struct ethtool_cmd *cmd) 1037{ 1038 struct octeon_mgmt *p = netdev_priv(netdev); 1039 1040 if (!capable(CAP_NET_ADMIN)) 1041 return -EPERM; 1042 1043 if (p->phydev) 1044 return phy_ethtool_sset(p->phydev, cmd); 1045 1046 return -EINVAL; 1047} 1048 1049static const struct ethtool_ops octeon_mgmt_ethtool_ops = { 1050 .get_drvinfo = octeon_mgmt_get_drvinfo, 1051 .get_link = ethtool_op_get_link, 1052 .get_settings = octeon_mgmt_get_settings, 1053 .set_settings = octeon_mgmt_set_settings 1054}; 1055 1056static const struct net_device_ops octeon_mgmt_ops = { 1057 .ndo_open = octeon_mgmt_open, 1058 .ndo_stop = octeon_mgmt_stop, 1059 .ndo_start_xmit = octeon_mgmt_xmit, 1060 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, 1061 .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering, 1062 .ndo_set_mac_address = octeon_mgmt_set_mac_address, 1063 .ndo_do_ioctl = octeon_mgmt_ioctl, 1064 .ndo_change_mtu = octeon_mgmt_change_mtu, 1065#ifdef CONFIG_NET_POLL_CONTROLLER 1066 .ndo_poll_controller = octeon_mgmt_poll_controller, 1067#endif 1068}; 1069 1070static int __devinit octeon_mgmt_probe(struct platform_device *pdev) 1071{ 1072 struct resource *res_irq; 1073 struct net_device *netdev; 1074 struct octeon_mgmt *p; 1075 int i; 1076 1077 netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); 1078 if (netdev == NULL) 1079 return -ENOMEM; 1080 1081 dev_set_drvdata(&pdev->dev, netdev); 1082 p = netdev_priv(netdev); 1083 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, 1084 OCTEON_MGMT_NAPI_WEIGHT); 1085 1086 p->netdev = netdev; 1087 p->dev = &pdev->dev; 1088 1089 p->port = pdev->id; 1090 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); 1091 1092 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1093 if (!res_irq) 1094 goto err; 1095 1096 p->irq = res_irq->start; 1097 spin_lock_init(&p->lock); 1098 1099 skb_queue_head_init(&p->tx_list); 1100 skb_queue_head_init(&p->rx_list); 1101 tasklet_init(&p->tx_clean_tasklet, 1102 octeon_mgmt_clean_tx_tasklet, (unsigned long)p); 1103 1104 netdev->netdev_ops = &octeon_mgmt_ops; 1105 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; 1106 1107 /* The mgmt ports get the first N MACs. */ 1108 for (i = 0; i < 6; i++) 1109 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; 1110 netdev->dev_addr[5] += p->port; 1111 1112 if (p->port >= octeon_bootinfo->mac_addr_count) 1113 dev_err(&pdev->dev, 1114 "Error %s: Using MAC outside of the assigned range: %pM\n", 1115 netdev->name, netdev->dev_addr); 1116 1117 if (register_netdev(netdev)) 1118 goto err; 1119 1120 dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); 1121 return 0; 1122err: 1123 free_netdev(netdev); 1124 return -ENOENT; 1125} 1126 1127static int __devexit octeon_mgmt_remove(struct platform_device *pdev) 1128{ 1129 struct net_device *netdev = dev_get_drvdata(&pdev->dev); 1130 1131 unregister_netdev(netdev); 1132 free_netdev(netdev); 1133 return 0; 1134} 1135 1136static struct platform_driver octeon_mgmt_driver = { 1137 .driver = { 1138 .name = "octeon_mgmt", 1139 .owner = THIS_MODULE, 1140 }, 1141 .probe = octeon_mgmt_probe, 1142 .remove = __devexit_p(octeon_mgmt_remove), 1143}; 1144 1145extern void octeon_mdiobus_force_mod_depencency(void); 1146 1147static int __init octeon_mgmt_mod_init(void) 1148{ 1149 /* Force our mdiobus driver module to be loaded first. */ 1150 octeon_mdiobus_force_mod_depencency(); 1151 return platform_driver_register(&octeon_mgmt_driver); 1152} 1153 1154static void __exit octeon_mgmt_mod_exit(void) 1155{ 1156 platform_driver_unregister(&octeon_mgmt_driver); 1157} 1158 1159module_init(octeon_mgmt_mod_init); 1160module_exit(octeon_mgmt_mod_exit); 1161 1162MODULE_DESCRIPTION(DRV_DESCRIPTION); 1163MODULE_AUTHOR("David Daney"); 1164MODULE_LICENSE("GPL"); 1165MODULE_VERSION(DRV_VERSION); 1166