1/* 2 * linux/drivers/net/ehea/ehea_main.c 3 * 4 * eHEA ethernet device driver for IBM eServer System p 5 * 6 * (C) Copyright IBM Corp. 2006 7 * 8 * Authors: 9 * Christoph Raisch <raisch@de.ibm.com> 10 * Jan-Bernd Themann <themann@de.ibm.com> 11 * Thomas Klein <tklein@de.ibm.com> 12 * 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 */ 28 29#include <linux/in.h> 30#include <linux/ip.h> 31#include <linux/tcp.h> 32#include <linux/udp.h> 33#include <linux/if.h> 34#include <linux/list.h> 35#include <linux/slab.h> 36#include <linux/if_ether.h> 37#include <linux/notifier.h> 38#include <linux/reboot.h> 39#include <linux/memory.h> 40#include <asm/kexec.h> 41#include <linux/mutex.h> 42 43#include <net/ip.h> 44 45#include "ehea.h" 46#include "ehea_qmr.h" 47#include "ehea_phyp.h" 48 49 50MODULE_LICENSE("GPL"); 51MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 52MODULE_DESCRIPTION("IBM eServer HEA Driver"); 53MODULE_VERSION(DRV_VERSION); 54 55 56static int msg_level = -1; 57static int rq1_entries = EHEA_DEF_ENTRIES_RQ1; 58static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; 59static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; 60static int sq_entries = EHEA_DEF_ENTRIES_SQ; 61static int use_mcs; 62static int use_lro; 63static int lro_max_aggr = EHEA_LRO_MAX_AGGR; 64static int num_tx_qps = EHEA_NUM_TX_QP; 65static int prop_carrier_state; 66 67module_param(msg_level, int, 0); 68module_param(rq1_entries, int, 0); 69module_param(rq2_entries, int, 0); 70module_param(rq3_entries, int, 0); 71module_param(sq_entries, int, 0); 72module_param(prop_carrier_state, int, 0); 73module_param(use_mcs, int, 0); 74module_param(use_lro, int, 0); 75module_param(lro_max_aggr, int, 0); 76module_param(num_tx_qps, int, 0); 77 78MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS"); 79MODULE_PARM_DESC(msg_level, "msg_level"); 80MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical " 81 "port to stack. 1:yes, 0:no. Default = 0 "); 82MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 " 83 "[2^x - 1], x = [6..14]. Default = " 84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")"); 85MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 " 86 "[2^x - 1], x = [6..14]. Default = " 87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")"); 88MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 " 89 "[2^x - 1], x = [6..14]. Default = " 90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")"); 91MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " 92 "[2^x - 1], x = [6..14]. Default = " 93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); 94MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 "); 95 96MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = " 97 __MODULE_STRING(EHEA_LRO_MAX_AGGR)); 98MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " 99 "Default = 0"); 100 101static int port_name_cnt; 102static LIST_HEAD(adapter_list); 103static unsigned long ehea_driver_flags; 104struct work_struct ehea_rereg_mr_task; 105static DEFINE_MUTEX(dlpar_mem_lock); 106struct ehea_fw_handle_array ehea_fw_handles; 107struct ehea_bcmc_reg_array ehea_bcmc_regs; 108 109 110static int __devinit ehea_probe_adapter(struct platform_device *dev, 111 const struct of_device_id *id); 112 113static int __devexit ehea_remove(struct platform_device *dev); 114 115static struct of_device_id ehea_device_table[] = { 116 { 117 .name = "lhea", 118 .compatible = "IBM,lhea", 119 }, 120 {}, 121}; 122MODULE_DEVICE_TABLE(of, ehea_device_table); 123 124static struct of_platform_driver ehea_driver = { 125 .driver = { 126 .name = "ehea", 127 .owner = THIS_MODULE, 128 .of_match_table = ehea_device_table, 129 }, 130 .probe = ehea_probe_adapter, 131 .remove = ehea_remove, 132}; 133 134void ehea_dump(void *adr, int len, char *msg) 135{ 136 int x; 137 unsigned char *deb = adr; 138 for (x = 0; x < len; x += 16) { 139 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg, 140 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); 141 deb += 16; 142 } 143} 144 145void ehea_schedule_port_reset(struct ehea_port *port) 146{ 147 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags)) 148 schedule_work(&port->reset_task); 149} 150 151static void ehea_update_firmware_handles(void) 152{ 153 struct ehea_fw_handle_entry *arr = NULL; 154 struct ehea_adapter *adapter; 155 int num_adapters = 0; 156 int num_ports = 0; 157 int num_portres = 0; 158 int i = 0; 159 int num_fw_handles, k, l; 160 161 /* Determine number of handles */ 162 mutex_lock(&ehea_fw_handles.lock); 163 164 list_for_each_entry(adapter, &adapter_list, list) { 165 num_adapters++; 166 167 for (k = 0; k < EHEA_MAX_PORTS; k++) { 168 struct ehea_port *port = adapter->port[k]; 169 170 if (!port || (port->state != EHEA_PORT_UP)) 171 continue; 172 173 num_ports++; 174 num_portres += port->num_def_qps + port->num_add_tx_qps; 175 } 176 } 177 178 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES + 179 num_ports * EHEA_NUM_PORT_FW_HANDLES + 180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES; 181 182 if (num_fw_handles) { 183 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL); 184 if (!arr) 185 goto out; /* Keep the existing array */ 186 } else 187 goto out_update; 188 189 list_for_each_entry(adapter, &adapter_list, list) { 190 if (num_adapters == 0) 191 break; 192 193 for (k = 0; k < EHEA_MAX_PORTS; k++) { 194 struct ehea_port *port = adapter->port[k]; 195 196 if (!port || (port->state != EHEA_PORT_UP) || 197 (num_ports == 0)) 198 continue; 199 200 for (l = 0; 201 l < port->num_def_qps + port->num_add_tx_qps; 202 l++) { 203 struct ehea_port_res *pr = &port->port_res[l]; 204 205 arr[i].adh = adapter->handle; 206 arr[i++].fwh = pr->qp->fw_handle; 207 arr[i].adh = adapter->handle; 208 arr[i++].fwh = pr->send_cq->fw_handle; 209 arr[i].adh = adapter->handle; 210 arr[i++].fwh = pr->recv_cq->fw_handle; 211 arr[i].adh = adapter->handle; 212 arr[i++].fwh = pr->eq->fw_handle; 213 arr[i].adh = adapter->handle; 214 arr[i++].fwh = pr->send_mr.handle; 215 arr[i].adh = adapter->handle; 216 arr[i++].fwh = pr->recv_mr.handle; 217 } 218 arr[i].adh = adapter->handle; 219 arr[i++].fwh = port->qp_eq->fw_handle; 220 num_ports--; 221 } 222 223 arr[i].adh = adapter->handle; 224 arr[i++].fwh = adapter->neq->fw_handle; 225 226 if (adapter->mr.handle) { 227 arr[i].adh = adapter->handle; 228 arr[i++].fwh = adapter->mr.handle; 229 } 230 num_adapters--; 231 } 232 233out_update: 234 kfree(ehea_fw_handles.arr); 235 ehea_fw_handles.arr = arr; 236 ehea_fw_handles.num_entries = i; 237out: 238 mutex_unlock(&ehea_fw_handles.lock); 239} 240 241static void ehea_update_bcmc_registrations(void) 242{ 243 unsigned long flags; 244 struct ehea_bcmc_reg_entry *arr = NULL; 245 struct ehea_adapter *adapter; 246 struct ehea_mc_list *mc_entry; 247 int num_registrations = 0; 248 int i = 0; 249 int k; 250 251 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags); 252 253 /* Determine number of registrations */ 254 list_for_each_entry(adapter, &adapter_list, list) 255 for (k = 0; k < EHEA_MAX_PORTS; k++) { 256 struct ehea_port *port = adapter->port[k]; 257 258 if (!port || (port->state != EHEA_PORT_UP)) 259 continue; 260 261 num_registrations += 2; /* Broadcast registrations */ 262 263 list_for_each_entry(mc_entry, &port->mc_list->list,list) 264 num_registrations += 2; 265 } 266 267 if (num_registrations) { 268 arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC); 269 if (!arr) 270 goto out; /* Keep the existing array */ 271 } else 272 goto out_update; 273 274 list_for_each_entry(adapter, &adapter_list, list) { 275 for (k = 0; k < EHEA_MAX_PORTS; k++) { 276 struct ehea_port *port = adapter->port[k]; 277 278 if (!port || (port->state != EHEA_PORT_UP)) 279 continue; 280 281 if (num_registrations == 0) 282 goto out_update; 283 284 arr[i].adh = adapter->handle; 285 arr[i].port_id = port->logical_port_id; 286 arr[i].reg_type = EHEA_BCMC_BROADCAST | 287 EHEA_BCMC_UNTAGGED; 288 arr[i++].macaddr = port->mac_addr; 289 290 arr[i].adh = adapter->handle; 291 arr[i].port_id = port->logical_port_id; 292 arr[i].reg_type = EHEA_BCMC_BROADCAST | 293 EHEA_BCMC_VLANID_ALL; 294 arr[i++].macaddr = port->mac_addr; 295 num_registrations -= 2; 296 297 list_for_each_entry(mc_entry, 298 &port->mc_list->list, list) { 299 if (num_registrations == 0) 300 goto out_update; 301 302 arr[i].adh = adapter->handle; 303 arr[i].port_id = port->logical_port_id; 304 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | 305 EHEA_BCMC_MULTICAST | 306 EHEA_BCMC_UNTAGGED; 307 arr[i++].macaddr = mc_entry->macaddr; 308 309 arr[i].adh = adapter->handle; 310 arr[i].port_id = port->logical_port_id; 311 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | 312 EHEA_BCMC_MULTICAST | 313 EHEA_BCMC_VLANID_ALL; 314 arr[i++].macaddr = mc_entry->macaddr; 315 num_registrations -= 2; 316 } 317 } 318 } 319 320out_update: 321 kfree(ehea_bcmc_regs.arr); 322 ehea_bcmc_regs.arr = arr; 323 ehea_bcmc_regs.num_entries = i; 324out: 325 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags); 326} 327 328static struct net_device_stats *ehea_get_stats(struct net_device *dev) 329{ 330 struct ehea_port *port = netdev_priv(dev); 331 struct net_device_stats *stats = &port->stats; 332 struct hcp_ehea_port_cb2 *cb2; 333 u64 hret, rx_packets, tx_packets; 334 int i; 335 336 memset(stats, 0, sizeof(*stats)); 337 338 cb2 = (void *)get_zeroed_page(GFP_KERNEL); 339 if (!cb2) { 340 ehea_error("no mem for cb2"); 341 goto out; 342 } 343 344 hret = ehea_h_query_ehea_port(port->adapter->handle, 345 port->logical_port_id, 346 H_PORT_CB2, H_PORT_CB2_ALL, cb2); 347 if (hret != H_SUCCESS) { 348 ehea_error("query_ehea_port failed"); 349 goto out_herr; 350 } 351 352 if (netif_msg_hw(port)) 353 ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); 354 355 rx_packets = 0; 356 for (i = 0; i < port->num_def_qps; i++) 357 rx_packets += port->port_res[i].rx_packets; 358 359 tx_packets = 0; 360 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 361 tx_packets += port->port_res[i].tx_packets; 362 363 stats->tx_packets = tx_packets; 364 stats->multicast = cb2->rxmcp; 365 stats->rx_errors = cb2->rxuerr; 366 stats->rx_bytes = cb2->rxo; 367 stats->tx_bytes = cb2->txo; 368 stats->rx_packets = rx_packets; 369 370out_herr: 371 free_page((unsigned long)cb2); 372out: 373 return stats; 374} 375 376static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) 377{ 378 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; 379 struct net_device *dev = pr->port->netdev; 380 int max_index_mask = pr->rq1_skba.len - 1; 381 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; 382 int adder = 0; 383 int i; 384 385 pr->rq1_skba.os_skbs = 0; 386 387 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 388 if (nr_of_wqes > 0) 389 pr->rq1_skba.index = index; 390 pr->rq1_skba.os_skbs = fill_wqes; 391 return; 392 } 393 394 for (i = 0; i < fill_wqes; i++) { 395 if (!skb_arr_rq1[index]) { 396 skb_arr_rq1[index] = netdev_alloc_skb(dev, 397 EHEA_L_PKT_SIZE); 398 if (!skb_arr_rq1[index]) { 399 pr->rq1_skba.os_skbs = fill_wqes - i; 400 break; 401 } 402 } 403 index--; 404 index &= max_index_mask; 405 adder++; 406 } 407 408 if (adder == 0) 409 return; 410 411 /* Ring doorbell */ 412 ehea_update_rq1a(pr->qp, adder); 413} 414 415static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) 416{ 417 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; 418 struct net_device *dev = pr->port->netdev; 419 int i; 420 421 for (i = 0; i < pr->rq1_skba.len; i++) { 422 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 423 if (!skb_arr_rq1[i]) 424 break; 425 } 426 /* Ring doorbell */ 427 ehea_update_rq1a(pr->qp, nr_rq1a); 428} 429 430static int ehea_refill_rq_def(struct ehea_port_res *pr, 431 struct ehea_q_skb_arr *q_skba, int rq_nr, 432 int num_wqes, int wqe_type, int packet_size) 433{ 434 struct net_device *dev = pr->port->netdev; 435 struct ehea_qp *qp = pr->qp; 436 struct sk_buff **skb_arr = q_skba->arr; 437 struct ehea_rwqe *rwqe; 438 int i, index, max_index_mask, fill_wqes; 439 int adder = 0; 440 int ret = 0; 441 442 fill_wqes = q_skba->os_skbs + num_wqes; 443 q_skba->os_skbs = 0; 444 445 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 446 q_skba->os_skbs = fill_wqes; 447 return ret; 448 } 449 450 index = q_skba->index; 451 max_index_mask = q_skba->len - 1; 452 for (i = 0; i < fill_wqes; i++) { 453 u64 tmp_addr; 454 struct sk_buff *skb; 455 456 skb = netdev_alloc_skb_ip_align(dev, packet_size); 457 if (!skb) { 458 q_skba->os_skbs = fill_wqes - i; 459 if (q_skba->os_skbs == q_skba->len - 2) { 460 ehea_info("%s: rq%i ran dry - no mem for skb", 461 pr->port->netdev->name, rq_nr); 462 ret = -ENOMEM; 463 } 464 break; 465 } 466 467 skb_arr[index] = skb; 468 tmp_addr = ehea_map_vaddr(skb->data); 469 if (tmp_addr == -1) { 470 dev_kfree_skb(skb); 471 q_skba->os_skbs = fill_wqes - i; 472 ret = 0; 473 break; 474 } 475 476 rwqe = ehea_get_next_rwqe(qp, rq_nr); 477 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) 478 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); 479 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; 480 rwqe->sg_list[0].vaddr = tmp_addr; 481 rwqe->sg_list[0].len = packet_size; 482 rwqe->data_segments = 1; 483 484 index++; 485 index &= max_index_mask; 486 adder++; 487 } 488 489 q_skba->index = index; 490 if (adder == 0) 491 goto out; 492 493 /* Ring doorbell */ 494 iosync(); 495 if (rq_nr == 2) 496 ehea_update_rq2a(pr->qp, adder); 497 else 498 ehea_update_rq3a(pr->qp, adder); 499out: 500 return ret; 501} 502 503 504static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes) 505{ 506 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, 507 nr_of_wqes, EHEA_RWQE2_TYPE, 508 EHEA_RQ2_PKT_SIZE); 509} 510 511 512static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes) 513{ 514 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, 515 nr_of_wqes, EHEA_RWQE3_TYPE, 516 EHEA_MAX_PACKET_SIZE); 517} 518 519static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) 520{ 521 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; 522 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) 523 return 0; 524 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && 525 (cqe->header_length == 0)) 526 return 0; 527 return -EINVAL; 528} 529 530static inline void ehea_fill_skb(struct net_device *dev, 531 struct sk_buff *skb, struct ehea_cqe *cqe) 532{ 533 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ 534 535 skb_put(skb, length); 536 skb->protocol = eth_type_trans(skb, dev); 537 538 /* The packet was not an IPV4 packet so a complemented checksum was 539 calculated. The value is found in the Internet Checksum field. */ 540 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { 541 skb->ip_summed = CHECKSUM_COMPLETE; 542 skb->csum = csum_unfold(~cqe->inet_checksum_value); 543 } else 544 skb->ip_summed = CHECKSUM_UNNECESSARY; 545} 546 547static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, 548 int arr_len, 549 struct ehea_cqe *cqe) 550{ 551 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); 552 struct sk_buff *skb; 553 void *pref; 554 int x; 555 556 x = skb_index + 1; 557 x &= (arr_len - 1); 558 559 pref = skb_array[x]; 560 if (pref) { 561 prefetchw(pref); 562 prefetchw(pref + EHEA_CACHE_LINE); 563 564 pref = (skb_array[x]->data); 565 prefetch(pref); 566 prefetch(pref + EHEA_CACHE_LINE); 567 prefetch(pref + EHEA_CACHE_LINE * 2); 568 prefetch(pref + EHEA_CACHE_LINE * 3); 569 } 570 571 skb = skb_array[skb_index]; 572 skb_array[skb_index] = NULL; 573 return skb; 574} 575 576static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array, 577 int arr_len, int wqe_index) 578{ 579 struct sk_buff *skb; 580 void *pref; 581 int x; 582 583 x = wqe_index + 1; 584 x &= (arr_len - 1); 585 586 pref = skb_array[x]; 587 if (pref) { 588 prefetchw(pref); 589 prefetchw(pref + EHEA_CACHE_LINE); 590 591 pref = (skb_array[x]->data); 592 prefetchw(pref); 593 prefetchw(pref + EHEA_CACHE_LINE); 594 } 595 596 skb = skb_array[wqe_index]; 597 skb_array[wqe_index] = NULL; 598 return skb; 599} 600 601static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, 602 struct ehea_cqe *cqe, int *processed_rq2, 603 int *processed_rq3) 604{ 605 struct sk_buff *skb; 606 607 if (cqe->status & EHEA_CQE_STAT_ERR_TCP) 608 pr->p_stats.err_tcp_cksum++; 609 if (cqe->status & EHEA_CQE_STAT_ERR_IP) 610 pr->p_stats.err_ip_cksum++; 611 if (cqe->status & EHEA_CQE_STAT_ERR_CRC) 612 pr->p_stats.err_frame_crc++; 613 614 if (rq == 2) { 615 *processed_rq2 += 1; 616 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); 617 dev_kfree_skb(skb); 618 } else if (rq == 3) { 619 *processed_rq3 += 1; 620 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe); 621 dev_kfree_skb(skb); 622 } 623 624 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { 625 if (netif_msg_rx_err(pr->port)) { 626 ehea_error("Critical receive error for QP %d. " 627 "Resetting port.", pr->qp->init_attr.qp_nr); 628 ehea_dump(cqe, sizeof(*cqe), "CQE"); 629 } 630 ehea_schedule_port_reset(pr->port); 631 return 1; 632 } 633 634 return 0; 635} 636 637static int get_skb_hdr(struct sk_buff *skb, void **iphdr, 638 void **tcph, u64 *hdr_flags, void *priv) 639{ 640 struct ehea_cqe *cqe = priv; 641 unsigned int ip_len; 642 struct iphdr *iph; 643 644 /* non tcp/udp packets */ 645 if (!cqe->header_length) 646 return -1; 647 648 /* non tcp packet */ 649 skb_reset_network_header(skb); 650 iph = ip_hdr(skb); 651 if (iph->protocol != IPPROTO_TCP) 652 return -1; 653 654 ip_len = ip_hdrlen(skb); 655 skb_set_transport_header(skb, ip_len); 656 *tcph = tcp_hdr(skb); 657 658 /* check if ip header and tcp header are complete */ 659 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) 660 return -1; 661 662 *hdr_flags = LRO_IPV4 | LRO_TCP; 663 *iphdr = iph; 664 665 return 0; 666} 667 668static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe, 669 struct sk_buff *skb) 670{ 671 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) && 672 pr->port->vgrp); 673 674 if (use_lro) { 675 if (vlan_extracted) 676 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb, 677 pr->port->vgrp, 678 cqe->vlan_tag, 679 cqe); 680 else 681 lro_receive_skb(&pr->lro_mgr, skb, cqe); 682 } else { 683 if (vlan_extracted) 684 vlan_hwaccel_receive_skb(skb, pr->port->vgrp, 685 cqe->vlan_tag); 686 else 687 netif_receive_skb(skb); 688 } 689} 690 691static int ehea_proc_rwqes(struct net_device *dev, 692 struct ehea_port_res *pr, 693 int budget) 694{ 695 struct ehea_port *port = pr->port; 696 struct ehea_qp *qp = pr->qp; 697 struct ehea_cqe *cqe; 698 struct sk_buff *skb; 699 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; 700 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr; 701 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr; 702 int skb_arr_rq1_len = pr->rq1_skba.len; 703 int skb_arr_rq2_len = pr->rq2_skba.len; 704 int skb_arr_rq3_len = pr->rq3_skba.len; 705 int processed, processed_rq1, processed_rq2, processed_rq3; 706 int wqe_index, last_wqe_index, rq, port_reset; 707 708 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; 709 last_wqe_index = 0; 710 711 cqe = ehea_poll_rq1(qp, &wqe_index); 712 while ((processed < budget) && cqe) { 713 ehea_inc_rq1(qp); 714 processed_rq1++; 715 processed++; 716 if (netif_msg_rx_status(port)) 717 ehea_dump(cqe, sizeof(*cqe), "CQE"); 718 719 last_wqe_index = wqe_index; 720 rmb(); 721 if (!ehea_check_cqe(cqe, &rq)) { 722 if (rq == 1) { 723 /* LL RQ1 */ 724 skb = get_skb_by_index_ll(skb_arr_rq1, 725 skb_arr_rq1_len, 726 wqe_index); 727 if (unlikely(!skb)) { 728 if (netif_msg_rx_err(port)) 729 ehea_error("LL rq1: skb=NULL"); 730 731 skb = netdev_alloc_skb(dev, 732 EHEA_L_PKT_SIZE); 733 if (!skb) 734 break; 735 } 736 skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 737 cqe->num_bytes_transfered - 4); 738 ehea_fill_skb(dev, skb, cqe); 739 } else if (rq == 2) { 740 /* RQ2 */ 741 skb = get_skb_by_index(skb_arr_rq2, 742 skb_arr_rq2_len, cqe); 743 if (unlikely(!skb)) { 744 if (netif_msg_rx_err(port)) 745 ehea_error("rq2: skb=NULL"); 746 break; 747 } 748 ehea_fill_skb(dev, skb, cqe); 749 processed_rq2++; 750 } else { 751 /* RQ3 */ 752 skb = get_skb_by_index(skb_arr_rq3, 753 skb_arr_rq3_len, cqe); 754 if (unlikely(!skb)) { 755 if (netif_msg_rx_err(port)) 756 ehea_error("rq3: skb=NULL"); 757 break; 758 } 759 ehea_fill_skb(dev, skb, cqe); 760 processed_rq3++; 761 } 762 763 ehea_proc_skb(pr, cqe, skb); 764 } else { 765 pr->p_stats.poll_receive_errors++; 766 port_reset = ehea_treat_poll_error(pr, rq, cqe, 767 &processed_rq2, 768 &processed_rq3); 769 if (port_reset) 770 break; 771 } 772 cqe = ehea_poll_rq1(qp, &wqe_index); 773 } 774 if (use_lro) 775 lro_flush_all(&pr->lro_mgr); 776 777 pr->rx_packets += processed; 778 779 ehea_refill_rq1(pr, last_wqe_index, processed_rq1); 780 ehea_refill_rq2(pr, processed_rq2); 781 ehea_refill_rq3(pr, processed_rq3); 782 783 return processed; 784} 785 786#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull 787 788static void reset_sq_restart_flag(struct ehea_port *port) 789{ 790 int i; 791 792 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 793 struct ehea_port_res *pr = &port->port_res[i]; 794 pr->sq_restart_flag = 0; 795 } 796} 797 798static void check_sqs(struct ehea_port *port) 799{ 800 struct ehea_swqe *swqe; 801 int swqe_index; 802 int i, k; 803 804 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 805 struct ehea_port_res *pr = &port->port_res[i]; 806 k = 0; 807 swqe = ehea_get_swqe(pr->qp, &swqe_index); 808 memset(swqe, 0, SWQE_HEADER_SIZE); 809 atomic_dec(&pr->swqe_avail); 810 811 swqe->tx_control |= EHEA_SWQE_PURGE; 812 swqe->wr_id = SWQE_RESTART_CHECK; 813 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; 814 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT; 815 swqe->immediate_data_length = 80; 816 817 ehea_post_swqe(pr->qp, swqe); 818 819 while (pr->sq_restart_flag == 0) { 820 msleep(5); 821 if (++k == 100) { 822 ehea_error("HW/SW queues out of sync"); 823 ehea_schedule_port_reset(pr->port); 824 return; 825 } 826 } 827 } 828 829 return; 830} 831 832 833static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) 834{ 835 struct sk_buff *skb; 836 struct ehea_cq *send_cq = pr->send_cq; 837 struct ehea_cqe *cqe; 838 int quota = my_quota; 839 int cqe_counter = 0; 840 int swqe_av = 0; 841 int index; 842 unsigned long flags; 843 844 cqe = ehea_poll_cq(send_cq); 845 while (cqe && (quota > 0)) { 846 ehea_inc_cq(send_cq); 847 848 cqe_counter++; 849 rmb(); 850 851 if (cqe->wr_id == SWQE_RESTART_CHECK) { 852 pr->sq_restart_flag = 1; 853 swqe_av++; 854 break; 855 } 856 857 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 858 ehea_error("Bad send completion status=0x%04X", 859 cqe->status); 860 861 if (netif_msg_tx_err(pr->port)) 862 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 863 864 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { 865 ehea_error("Resetting port"); 866 ehea_schedule_port_reset(pr->port); 867 break; 868 } 869 } 870 871 if (netif_msg_tx_done(pr->port)) 872 ehea_dump(cqe, sizeof(*cqe), "CQE"); 873 874 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) 875 == EHEA_SWQE2_TYPE)) { 876 877 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); 878 skb = pr->sq_skba.arr[index]; 879 dev_kfree_skb(skb); 880 pr->sq_skba.arr[index] = NULL; 881 } 882 883 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); 884 quota--; 885 886 cqe = ehea_poll_cq(send_cq); 887 } 888 889 ehea_update_feca(send_cq, cqe_counter); 890 atomic_add(swqe_av, &pr->swqe_avail); 891 892 spin_lock_irqsave(&pr->netif_queue, flags); 893 894 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail) 895 >= pr->swqe_refill_th)) { 896 netif_wake_queue(pr->port->netdev); 897 pr->queue_stopped = 0; 898 } 899 spin_unlock_irqrestore(&pr->netif_queue, flags); 900 901 return cqe; 902} 903 904#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16 905#define EHEA_POLL_MAX_CQES 65535 906 907static int ehea_poll(struct napi_struct *napi, int budget) 908{ 909 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, 910 napi); 911 struct net_device *dev = pr->port->netdev; 912 struct ehea_cqe *cqe; 913 struct ehea_cqe *cqe_skb = NULL; 914 int force_irq, wqe_index; 915 int rx = 0; 916 917 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ); 918 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); 919 920 if (!force_irq) 921 rx += ehea_proc_rwqes(dev, pr, budget - rx); 922 923 while ((rx != budget) || force_irq) { 924 pr->poll_counter = 0; 925 force_irq = 0; 926 napi_complete(napi); 927 ehea_reset_cq_ep(pr->recv_cq); 928 ehea_reset_cq_ep(pr->send_cq); 929 ehea_reset_cq_n1(pr->recv_cq); 930 ehea_reset_cq_n1(pr->send_cq); 931 rmb(); 932 cqe = ehea_poll_rq1(pr->qp, &wqe_index); 933 cqe_skb = ehea_poll_cq(pr->send_cq); 934 935 if (!cqe && !cqe_skb) 936 return rx; 937 938 if (!napi_reschedule(napi)) 939 return rx; 940 941 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES); 942 rx += ehea_proc_rwqes(dev, pr, budget - rx); 943 } 944 945 pr->poll_counter++; 946 return rx; 947} 948 949#ifdef CONFIG_NET_POLL_CONTROLLER 950static void ehea_netpoll(struct net_device *dev) 951{ 952 struct ehea_port *port = netdev_priv(dev); 953 int i; 954 955 for (i = 0; i < port->num_def_qps; i++) 956 napi_schedule(&port->port_res[i].napi); 957} 958#endif 959 960static irqreturn_t ehea_recv_irq_handler(int irq, void *param) 961{ 962 struct ehea_port_res *pr = param; 963 964 napi_schedule(&pr->napi); 965 966 return IRQ_HANDLED; 967} 968 969static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) 970{ 971 struct ehea_port *port = param; 972 struct ehea_eqe *eqe; 973 struct ehea_qp *qp; 974 u32 qp_token; 975 u64 resource_type, aer, aerr; 976 int reset_port = 0; 977 978 eqe = ehea_poll_eq(port->qp_eq); 979 980 while (eqe) { 981 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); 982 ehea_error("QP aff_err: entry=0x%llx, token=0x%x", 983 eqe->entry, qp_token); 984 985 qp = port->port_res[qp_token].qp; 986 987 resource_type = ehea_error_data(port->adapter, qp->fw_handle, 988 &aer, &aerr); 989 990 if (resource_type == EHEA_AER_RESTYPE_QP) { 991 if ((aer & EHEA_AER_RESET_MASK) || 992 (aerr & EHEA_AERR_RESET_MASK)) 993 reset_port = 1; 994 } else 995 reset_port = 1; /* Reset in case of CQ or EQ error */ 996 997 eqe = ehea_poll_eq(port->qp_eq); 998 } 999 1000 if (reset_port) { 1001 ehea_error("Resetting port"); 1002 ehea_schedule_port_reset(port); 1003 } 1004 1005 return IRQ_HANDLED; 1006} 1007 1008static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, 1009 int logical_port) 1010{ 1011 int i; 1012 1013 for (i = 0; i < EHEA_MAX_PORTS; i++) 1014 if (adapter->port[i]) 1015 if (adapter->port[i]->logical_port_id == logical_port) 1016 return adapter->port[i]; 1017 return NULL; 1018} 1019 1020int ehea_sense_port_attr(struct ehea_port *port) 1021{ 1022 int ret; 1023 u64 hret; 1024 struct hcp_ehea_port_cb0 *cb0; 1025 1026 /* may be called via ehea_neq_tasklet() */ 1027 cb0 = (void *)get_zeroed_page(GFP_ATOMIC); 1028 if (!cb0) { 1029 ehea_error("no mem for cb0"); 1030 ret = -ENOMEM; 1031 goto out; 1032 } 1033 1034 hret = ehea_h_query_ehea_port(port->adapter->handle, 1035 port->logical_port_id, H_PORT_CB0, 1036 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF), 1037 cb0); 1038 if (hret != H_SUCCESS) { 1039 ret = -EIO; 1040 goto out_free; 1041 } 1042 1043 /* MAC address */ 1044 port->mac_addr = cb0->port_mac_addr << 16; 1045 1046 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) { 1047 ret = -EADDRNOTAVAIL; 1048 goto out_free; 1049 } 1050 1051 /* Port speed */ 1052 switch (cb0->port_speed) { 1053 case H_SPEED_10M_H: 1054 port->port_speed = EHEA_SPEED_10M; 1055 port->full_duplex = 0; 1056 break; 1057 case H_SPEED_10M_F: 1058 port->port_speed = EHEA_SPEED_10M; 1059 port->full_duplex = 1; 1060 break; 1061 case H_SPEED_100M_H: 1062 port->port_speed = EHEA_SPEED_100M; 1063 port->full_duplex = 0; 1064 break; 1065 case H_SPEED_100M_F: 1066 port->port_speed = EHEA_SPEED_100M; 1067 port->full_duplex = 1; 1068 break; 1069 case H_SPEED_1G_F: 1070 port->port_speed = EHEA_SPEED_1G; 1071 port->full_duplex = 1; 1072 break; 1073 case H_SPEED_10G_F: 1074 port->port_speed = EHEA_SPEED_10G; 1075 port->full_duplex = 1; 1076 break; 1077 default: 1078 port->port_speed = 0; 1079 port->full_duplex = 0; 1080 break; 1081 } 1082 1083 port->autoneg = 1; 1084 port->num_mcs = cb0->num_default_qps; 1085 1086 /* Number of default QPs */ 1087 if (use_mcs) 1088 port->num_def_qps = cb0->num_default_qps; 1089 else 1090 port->num_def_qps = 1; 1091 1092 if (!port->num_def_qps) { 1093 ret = -EINVAL; 1094 goto out_free; 1095 } 1096 1097 port->num_tx_qps = num_tx_qps; 1098 1099 if (port->num_def_qps >= port->num_tx_qps) 1100 port->num_add_tx_qps = 0; 1101 else 1102 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps; 1103 1104 ret = 0; 1105out_free: 1106 if (ret || netif_msg_probe(port)) 1107 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr"); 1108 free_page((unsigned long)cb0); 1109out: 1110 return ret; 1111} 1112 1113int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) 1114{ 1115 struct hcp_ehea_port_cb4 *cb4; 1116 u64 hret; 1117 int ret = 0; 1118 1119 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 1120 if (!cb4) { 1121 ehea_error("no mem for cb4"); 1122 ret = -ENOMEM; 1123 goto out; 1124 } 1125 1126 cb4->port_speed = port_speed; 1127 1128 netif_carrier_off(port->netdev); 1129 1130 hret = ehea_h_modify_ehea_port(port->adapter->handle, 1131 port->logical_port_id, 1132 H_PORT_CB4, H_PORT_CB4_SPEED, cb4); 1133 if (hret == H_SUCCESS) { 1134 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0; 1135 1136 hret = ehea_h_query_ehea_port(port->adapter->handle, 1137 port->logical_port_id, 1138 H_PORT_CB4, H_PORT_CB4_SPEED, 1139 cb4); 1140 if (hret == H_SUCCESS) { 1141 switch (cb4->port_speed) { 1142 case H_SPEED_10M_H: 1143 port->port_speed = EHEA_SPEED_10M; 1144 port->full_duplex = 0; 1145 break; 1146 case H_SPEED_10M_F: 1147 port->port_speed = EHEA_SPEED_10M; 1148 port->full_duplex = 1; 1149 break; 1150 case H_SPEED_100M_H: 1151 port->port_speed = EHEA_SPEED_100M; 1152 port->full_duplex = 0; 1153 break; 1154 case H_SPEED_100M_F: 1155 port->port_speed = EHEA_SPEED_100M; 1156 port->full_duplex = 1; 1157 break; 1158 case H_SPEED_1G_F: 1159 port->port_speed = EHEA_SPEED_1G; 1160 port->full_duplex = 1; 1161 break; 1162 case H_SPEED_10G_F: 1163 port->port_speed = EHEA_SPEED_10G; 1164 port->full_duplex = 1; 1165 break; 1166 default: 1167 port->port_speed = 0; 1168 port->full_duplex = 0; 1169 break; 1170 } 1171 } else { 1172 ehea_error("Failed sensing port speed"); 1173 ret = -EIO; 1174 } 1175 } else { 1176 if (hret == H_AUTHORITY) { 1177 ehea_info("Hypervisor denied setting port speed"); 1178 ret = -EPERM; 1179 } else { 1180 ret = -EIO; 1181 ehea_error("Failed setting port speed"); 1182 } 1183 } 1184 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) 1185 netif_carrier_on(port->netdev); 1186 1187 free_page((unsigned long)cb4); 1188out: 1189 return ret; 1190} 1191 1192static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) 1193{ 1194 int ret; 1195 u8 ec; 1196 u8 portnum; 1197 struct ehea_port *port; 1198 1199 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); 1200 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); 1201 port = ehea_get_port(adapter, portnum); 1202 1203 switch (ec) { 1204 case EHEA_EC_PORTSTATE_CHG: /* port state change */ 1205 1206 if (!port) { 1207 ehea_error("unknown portnum %x", portnum); 1208 break; 1209 } 1210 1211 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { 1212 if (!netif_carrier_ok(port->netdev)) { 1213 ret = ehea_sense_port_attr(port); 1214 if (ret) { 1215 ehea_error("failed resensing port " 1216 "attributes"); 1217 break; 1218 } 1219 1220 if (netif_msg_link(port)) 1221 ehea_info("%s: Logical port up: %dMbps " 1222 "%s Duplex", 1223 port->netdev->name, 1224 port->port_speed, 1225 port->full_duplex == 1226 1 ? "Full" : "Half"); 1227 1228 netif_carrier_on(port->netdev); 1229 netif_wake_queue(port->netdev); 1230 } 1231 } else 1232 if (netif_carrier_ok(port->netdev)) { 1233 if (netif_msg_link(port)) 1234 ehea_info("%s: Logical port down", 1235 port->netdev->name); 1236 netif_carrier_off(port->netdev); 1237 netif_stop_queue(port->netdev); 1238 } 1239 1240 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { 1241 port->phy_link = EHEA_PHY_LINK_UP; 1242 if (netif_msg_link(port)) 1243 ehea_info("%s: Physical port up", 1244 port->netdev->name); 1245 if (prop_carrier_state) 1246 netif_carrier_on(port->netdev); 1247 } else { 1248 port->phy_link = EHEA_PHY_LINK_DOWN; 1249 if (netif_msg_link(port)) 1250 ehea_info("%s: Physical port down", 1251 port->netdev->name); 1252 if (prop_carrier_state) 1253 netif_carrier_off(port->netdev); 1254 } 1255 1256 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) 1257 ehea_info("External switch port is primary port"); 1258 else 1259 ehea_info("External switch port is backup port"); 1260 1261 break; 1262 case EHEA_EC_ADAPTER_MALFUNC: 1263 ehea_error("Adapter malfunction"); 1264 break; 1265 case EHEA_EC_PORT_MALFUNC: 1266 ehea_info("Port malfunction: Device: %s", port->netdev->name); 1267 netif_carrier_off(port->netdev); 1268 netif_stop_queue(port->netdev); 1269 break; 1270 default: 1271 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe); 1272 break; 1273 } 1274} 1275 1276static void ehea_neq_tasklet(unsigned long data) 1277{ 1278 struct ehea_adapter *adapter = (struct ehea_adapter *)data; 1279 struct ehea_eqe *eqe; 1280 u64 event_mask; 1281 1282 eqe = ehea_poll_eq(adapter->neq); 1283 ehea_debug("eqe=%p", eqe); 1284 1285 while (eqe) { 1286 ehea_debug("*eqe=%lx", eqe->entry); 1287 ehea_parse_eqe(adapter, eqe->entry); 1288 eqe = ehea_poll_eq(adapter->neq); 1289 ehea_debug("next eqe=%p", eqe); 1290 } 1291 1292 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) 1293 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1) 1294 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1); 1295 1296 ehea_h_reset_events(adapter->handle, 1297 adapter->neq->fw_handle, event_mask); 1298} 1299 1300static irqreturn_t ehea_interrupt_neq(int irq, void *param) 1301{ 1302 struct ehea_adapter *adapter = param; 1303 tasklet_hi_schedule(&adapter->neq_tasklet); 1304 return IRQ_HANDLED; 1305} 1306 1307 1308static int ehea_fill_port_res(struct ehea_port_res *pr) 1309{ 1310 int ret; 1311 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1312 1313 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 1314 - init_attr->act_nr_rwqes_rq2 1315 - init_attr->act_nr_rwqes_rq3 - 1); 1316 1317 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1318 1319 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1); 1320 1321 return ret; 1322} 1323 1324static int ehea_reg_interrupts(struct net_device *dev) 1325{ 1326 struct ehea_port *port = netdev_priv(dev); 1327 struct ehea_port_res *pr; 1328 int i, ret; 1329 1330 1331 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", 1332 dev->name); 1333 1334 ret = ibmebus_request_irq(port->qp_eq->attr.ist1, 1335 ehea_qp_aff_irq_handler, 1336 IRQF_DISABLED, port->int_aff_name, port); 1337 if (ret) { 1338 ehea_error("failed registering irq for qp_aff_irq_handler:" 1339 "ist=%X", port->qp_eq->attr.ist1); 1340 goto out_free_qpeq; 1341 } 1342 1343 if (netif_msg_ifup(port)) 1344 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " 1345 "registered", port->qp_eq->attr.ist1); 1346 1347 1348 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1349 pr = &port->port_res[i]; 1350 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, 1351 "%s-queue%d", dev->name, i); 1352 ret = ibmebus_request_irq(pr->eq->attr.ist1, 1353 ehea_recv_irq_handler, 1354 IRQF_DISABLED, pr->int_send_name, 1355 pr); 1356 if (ret) { 1357 ehea_error("failed registering irq for ehea_queue " 1358 "port_res_nr:%d, ist=%X", i, 1359 pr->eq->attr.ist1); 1360 goto out_free_req; 1361 } 1362 if (netif_msg_ifup(port)) 1363 ehea_info("irq_handle 0x%X for function ehea_queue_int " 1364 "%d registered", pr->eq->attr.ist1, i); 1365 } 1366out: 1367 return ret; 1368 1369 1370out_free_req: 1371 while (--i >= 0) { 1372 u32 ist = port->port_res[i].eq->attr.ist1; 1373 ibmebus_free_irq(ist, &port->port_res[i]); 1374 } 1375 1376out_free_qpeq: 1377 ibmebus_free_irq(port->qp_eq->attr.ist1, port); 1378 i = port->num_def_qps; 1379 1380 goto out; 1381 1382} 1383 1384static void ehea_free_interrupts(struct net_device *dev) 1385{ 1386 struct ehea_port *port = netdev_priv(dev); 1387 struct ehea_port_res *pr; 1388 int i; 1389 1390 /* send */ 1391 1392 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1393 pr = &port->port_res[i]; 1394 ibmebus_free_irq(pr->eq->attr.ist1, pr); 1395 if (netif_msg_intr(port)) 1396 ehea_info("free send irq for res %d with handle 0x%X", 1397 i, pr->eq->attr.ist1); 1398 } 1399 1400 /* associated events */ 1401 ibmebus_free_irq(port->qp_eq->attr.ist1, port); 1402 if (netif_msg_intr(port)) 1403 ehea_info("associated event interrupt for handle 0x%X freed", 1404 port->qp_eq->attr.ist1); 1405} 1406 1407static int ehea_configure_port(struct ehea_port *port) 1408{ 1409 int ret, i; 1410 u64 hret, mask; 1411 struct hcp_ehea_port_cb0 *cb0; 1412 1413 ret = -ENOMEM; 1414 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 1415 if (!cb0) 1416 goto out; 1417 1418 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1) 1419 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1) 1420 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1) 1421 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1) 1422 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER, 1423 PXLY_RC_VLAN_FILTER) 1424 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1); 1425 1426 for (i = 0; i < port->num_mcs; i++) 1427 if (use_mcs) 1428 cb0->default_qpn_arr[i] = 1429 port->port_res[i].qp->init_attr.qp_nr; 1430 else 1431 cb0->default_qpn_arr[i] = 1432 port->port_res[0].qp->init_attr.qp_nr; 1433 1434 if (netif_msg_ifup(port)) 1435 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port"); 1436 1437 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1) 1438 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1); 1439 1440 hret = ehea_h_modify_ehea_port(port->adapter->handle, 1441 port->logical_port_id, 1442 H_PORT_CB0, mask, cb0); 1443 ret = -EIO; 1444 if (hret != H_SUCCESS) 1445 goto out_free; 1446 1447 ret = 0; 1448 1449out_free: 1450 free_page((unsigned long)cb0); 1451out: 1452 return ret; 1453} 1454 1455int ehea_gen_smrs(struct ehea_port_res *pr) 1456{ 1457 int ret; 1458 struct ehea_adapter *adapter = pr->port->adapter; 1459 1460 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr); 1461 if (ret) 1462 goto out; 1463 1464 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr); 1465 if (ret) 1466 goto out_free; 1467 1468 return 0; 1469 1470out_free: 1471 ehea_rem_mr(&pr->send_mr); 1472out: 1473 ehea_error("Generating SMRS failed\n"); 1474 return -EIO; 1475} 1476 1477int ehea_rem_smrs(struct ehea_port_res *pr) 1478{ 1479 if ((ehea_rem_mr(&pr->send_mr)) || 1480 (ehea_rem_mr(&pr->recv_mr))) 1481 return -EIO; 1482 else 1483 return 0; 1484} 1485 1486static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) 1487{ 1488 int arr_size = sizeof(void *) * max_q_entries; 1489 1490 q_skba->arr = vmalloc(arr_size); 1491 if (!q_skba->arr) 1492 return -ENOMEM; 1493 1494 memset(q_skba->arr, 0, arr_size); 1495 1496 q_skba->len = max_q_entries; 1497 q_skba->index = 0; 1498 q_skba->os_skbs = 0; 1499 1500 return 0; 1501} 1502 1503static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, 1504 struct port_res_cfg *pr_cfg, int queue_token) 1505{ 1506 struct ehea_adapter *adapter = port->adapter; 1507 enum ehea_eq_type eq_type = EHEA_EQ; 1508 struct ehea_qp_init_attr *init_attr = NULL; 1509 int ret = -EIO; 1510 1511 memset(pr, 0, sizeof(struct ehea_port_res)); 1512 1513 pr->port = port; 1514 spin_lock_init(&pr->xmit_lock); 1515 spin_lock_init(&pr->netif_queue); 1516 1517 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); 1518 if (!pr->eq) { 1519 ehea_error("create_eq failed (eq)"); 1520 goto out_free; 1521 } 1522 1523 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq, 1524 pr->eq->fw_handle, 1525 port->logical_port_id); 1526 if (!pr->recv_cq) { 1527 ehea_error("create_cq failed (cq_recv)"); 1528 goto out_free; 1529 } 1530 1531 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq, 1532 pr->eq->fw_handle, 1533 port->logical_port_id); 1534 if (!pr->send_cq) { 1535 ehea_error("create_cq failed (cq_send)"); 1536 goto out_free; 1537 } 1538 1539 if (netif_msg_ifup(port)) 1540 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d", 1541 pr->send_cq->attr.act_nr_of_cqes, 1542 pr->recv_cq->attr.act_nr_of_cqes); 1543 1544 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 1545 if (!init_attr) { 1546 ret = -ENOMEM; 1547 ehea_error("no mem for ehea_qp_init_attr"); 1548 goto out_free; 1549 } 1550 1551 init_attr->low_lat_rq1 = 1; 1552 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */ 1553 init_attr->rq_count = 3; 1554 init_attr->qp_token = queue_token; 1555 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq; 1556 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1; 1557 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2; 1558 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3; 1559 init_attr->wqe_size_enc_sq = EHEA_SG_SQ; 1560 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1; 1561 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2; 1562 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3; 1563 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD; 1564 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD; 1565 init_attr->port_nr = port->logical_port_id; 1566 init_attr->send_cq_handle = pr->send_cq->fw_handle; 1567 init_attr->recv_cq_handle = pr->recv_cq->fw_handle; 1568 init_attr->aff_eq_handle = port->qp_eq->fw_handle; 1569 1570 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); 1571 if (!pr->qp) { 1572 ehea_error("create_qp failed"); 1573 ret = -EIO; 1574 goto out_free; 1575 } 1576 1577 if (netif_msg_ifup(port)) 1578 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n " 1579 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr, 1580 init_attr->act_nr_send_wqes, 1581 init_attr->act_nr_rwqes_rq1, 1582 init_attr->act_nr_rwqes_rq2, 1583 init_attr->act_nr_rwqes_rq3); 1584 1585 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; 1586 1587 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size); 1588 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1); 1589 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1); 1590 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1); 1591 if (ret) 1592 goto out_free; 1593 1594 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10; 1595 if (ehea_gen_smrs(pr) != 0) { 1596 ret = -EIO; 1597 goto out_free; 1598 } 1599 1600 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1); 1601 1602 kfree(init_attr); 1603 1604 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64); 1605 1606 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr; 1607 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; 1608 pr->lro_mgr.lro_arr = pr->lro_desc; 1609 pr->lro_mgr.get_skb_header = get_skb_hdr; 1610 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; 1611 pr->lro_mgr.dev = port->netdev; 1612 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1613 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1614 1615 ret = 0; 1616 goto out; 1617 1618out_free: 1619 kfree(init_attr); 1620 vfree(pr->sq_skba.arr); 1621 vfree(pr->rq1_skba.arr); 1622 vfree(pr->rq2_skba.arr); 1623 vfree(pr->rq3_skba.arr); 1624 ehea_destroy_qp(pr->qp); 1625 ehea_destroy_cq(pr->send_cq); 1626 ehea_destroy_cq(pr->recv_cq); 1627 ehea_destroy_eq(pr->eq); 1628out: 1629 return ret; 1630} 1631 1632static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) 1633{ 1634 int ret, i; 1635 1636 if (pr->qp) 1637 netif_napi_del(&pr->napi); 1638 1639 ret = ehea_destroy_qp(pr->qp); 1640 1641 if (!ret) { 1642 ehea_destroy_cq(pr->send_cq); 1643 ehea_destroy_cq(pr->recv_cq); 1644 ehea_destroy_eq(pr->eq); 1645 1646 for (i = 0; i < pr->rq1_skba.len; i++) 1647 if (pr->rq1_skba.arr[i]) 1648 dev_kfree_skb(pr->rq1_skba.arr[i]); 1649 1650 for (i = 0; i < pr->rq2_skba.len; i++) 1651 if (pr->rq2_skba.arr[i]) 1652 dev_kfree_skb(pr->rq2_skba.arr[i]); 1653 1654 for (i = 0; i < pr->rq3_skba.len; i++) 1655 if (pr->rq3_skba.arr[i]) 1656 dev_kfree_skb(pr->rq3_skba.arr[i]); 1657 1658 for (i = 0; i < pr->sq_skba.len; i++) 1659 if (pr->sq_skba.arr[i]) 1660 dev_kfree_skb(pr->sq_skba.arr[i]); 1661 1662 vfree(pr->rq1_skba.arr); 1663 vfree(pr->rq2_skba.arr); 1664 vfree(pr->rq3_skba.arr); 1665 vfree(pr->sq_skba.arr); 1666 ret = ehea_rem_smrs(pr); 1667 } 1668 return ret; 1669} 1670 1671/* 1672 * The write_* functions store information in swqe which is used by 1673 * the hardware to calculate the ip/tcp/udp checksum 1674 */ 1675 1676static inline void write_ip_start_end(struct ehea_swqe *swqe, 1677 const struct sk_buff *skb) 1678{ 1679 swqe->ip_start = skb_network_offset(skb); 1680 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); 1681} 1682 1683static inline void write_tcp_offset_end(struct ehea_swqe *swqe, 1684 const struct sk_buff *skb) 1685{ 1686 swqe->tcp_offset = 1687 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check)); 1688 1689 swqe->tcp_end = (u16)skb->len - 1; 1690} 1691 1692static inline void write_udp_offset_end(struct ehea_swqe *swqe, 1693 const struct sk_buff *skb) 1694{ 1695 swqe->tcp_offset = 1696 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check)); 1697 1698 swqe->tcp_end = (u16)skb->len - 1; 1699} 1700 1701 1702static void write_swqe2_TSO(struct sk_buff *skb, 1703 struct ehea_swqe *swqe, u32 lkey) 1704{ 1705 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1706 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1707 int skb_data_size = skb_headlen(skb); 1708 int headersize; 1709 1710 /* Packet is TCP with TSO enabled */ 1711 swqe->tx_control |= EHEA_SWQE_TSO; 1712 swqe->mss = skb_shinfo(skb)->gso_size; 1713 /* copy only eth/ip/tcp headers to immediate data and 1714 * the rest of skb->data to sg1entry 1715 */ 1716 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); 1717 1718 skb_data_size = skb_headlen(skb); 1719 1720 if (skb_data_size >= headersize) { 1721 /* copy immediate data */ 1722 skb_copy_from_linear_data(skb, imm_data, headersize); 1723 swqe->immediate_data_length = headersize; 1724 1725 if (skb_data_size > headersize) { 1726 /* set sg1entry data */ 1727 sg1entry->l_key = lkey; 1728 sg1entry->len = skb_data_size - headersize; 1729 sg1entry->vaddr = 1730 ehea_map_vaddr(skb->data + headersize); 1731 swqe->descriptors++; 1732 } 1733 } else 1734 ehea_error("cannot handle fragmented headers"); 1735} 1736 1737static void write_swqe2_nonTSO(struct sk_buff *skb, 1738 struct ehea_swqe *swqe, u32 lkey) 1739{ 1740 int skb_data_size = skb_headlen(skb); 1741 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1742 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1743 1744 /* Packet is any nonTSO type 1745 * 1746 * Copy as much as possible skb->data to immediate data and 1747 * the rest to sg1entry 1748 */ 1749 if (skb_data_size >= SWQE2_MAX_IMM) { 1750 /* copy immediate data */ 1751 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM); 1752 1753 swqe->immediate_data_length = SWQE2_MAX_IMM; 1754 1755 if (skb_data_size > SWQE2_MAX_IMM) { 1756 /* copy sg1entry data */ 1757 sg1entry->l_key = lkey; 1758 sg1entry->len = skb_data_size - SWQE2_MAX_IMM; 1759 sg1entry->vaddr = 1760 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM); 1761 swqe->descriptors++; 1762 } 1763 } else { 1764 skb_copy_from_linear_data(skb, imm_data, skb_data_size); 1765 swqe->immediate_data_length = skb_data_size; 1766 } 1767} 1768 1769static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, 1770 struct ehea_swqe *swqe, u32 lkey) 1771{ 1772 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; 1773 skb_frag_t *frag; 1774 int nfrags, sg1entry_contains_frag_data, i; 1775 1776 nfrags = skb_shinfo(skb)->nr_frags; 1777 sg1entry = &swqe->u.immdata_desc.sg_entry; 1778 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; 1779 swqe->descriptors = 0; 1780 sg1entry_contains_frag_data = 0; 1781 1782 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size) 1783 write_swqe2_TSO(skb, swqe, lkey); 1784 else 1785 write_swqe2_nonTSO(skb, swqe, lkey); 1786 1787 /* write descriptors */ 1788 if (nfrags > 0) { 1789 if (swqe->descriptors == 0) { 1790 /* sg1entry not yet used */ 1791 frag = &skb_shinfo(skb)->frags[0]; 1792 1793 /* copy sg1entry data */ 1794 sg1entry->l_key = lkey; 1795 sg1entry->len = frag->size; 1796 sg1entry->vaddr = 1797 ehea_map_vaddr(page_address(frag->page) 1798 + frag->page_offset); 1799 swqe->descriptors++; 1800 sg1entry_contains_frag_data = 1; 1801 } 1802 1803 for (i = sg1entry_contains_frag_data; i < nfrags; i++) { 1804 1805 frag = &skb_shinfo(skb)->frags[i]; 1806 sgentry = &sg_list[i - sg1entry_contains_frag_data]; 1807 1808 sgentry->l_key = lkey; 1809 sgentry->len = frag->size; 1810 sgentry->vaddr = 1811 ehea_map_vaddr(page_address(frag->page) 1812 + frag->page_offset); 1813 swqe->descriptors++; 1814 } 1815 } 1816} 1817 1818static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) 1819{ 1820 int ret = 0; 1821 u64 hret; 1822 u8 reg_type; 1823 1824 /* De/Register untagged packets */ 1825 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED; 1826 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1827 port->logical_port_id, 1828 reg_type, port->mac_addr, 0, hcallid); 1829 if (hret != H_SUCCESS) { 1830 ehea_error("%sregistering bc address failed (tagged)", 1831 hcallid == H_REG_BCMC ? "" : "de"); 1832 ret = -EIO; 1833 goto out_herr; 1834 } 1835 1836 /* De/Register VLAN packets */ 1837 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL; 1838 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1839 port->logical_port_id, 1840 reg_type, port->mac_addr, 0, hcallid); 1841 if (hret != H_SUCCESS) { 1842 ehea_error("%sregistering bc address failed (vlan)", 1843 hcallid == H_REG_BCMC ? "" : "de"); 1844 ret = -EIO; 1845 } 1846out_herr: 1847 return ret; 1848} 1849 1850static int ehea_set_mac_addr(struct net_device *dev, void *sa) 1851{ 1852 struct ehea_port *port = netdev_priv(dev); 1853 struct sockaddr *mac_addr = sa; 1854 struct hcp_ehea_port_cb0 *cb0; 1855 int ret; 1856 u64 hret; 1857 1858 if (!is_valid_ether_addr(mac_addr->sa_data)) { 1859 ret = -EADDRNOTAVAIL; 1860 goto out; 1861 } 1862 1863 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 1864 if (!cb0) { 1865 ehea_error("no mem for cb0"); 1866 ret = -ENOMEM; 1867 goto out; 1868 } 1869 1870 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN); 1871 1872 cb0->port_mac_addr = cb0->port_mac_addr >> 16; 1873 1874 hret = ehea_h_modify_ehea_port(port->adapter->handle, 1875 port->logical_port_id, H_PORT_CB0, 1876 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0); 1877 if (hret != H_SUCCESS) { 1878 ret = -EIO; 1879 goto out_free; 1880 } 1881 1882 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); 1883 1884 /* Deregister old MAC in pHYP */ 1885 if (port->state == EHEA_PORT_UP) { 1886 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 1887 if (ret) 1888 goto out_upregs; 1889 } 1890 1891 port->mac_addr = cb0->port_mac_addr << 16; 1892 1893 /* Register new MAC in pHYP */ 1894 if (port->state == EHEA_PORT_UP) { 1895 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 1896 if (ret) 1897 goto out_upregs; 1898 } 1899 1900 ret = 0; 1901 1902out_upregs: 1903 ehea_update_bcmc_registrations(); 1904out_free: 1905 free_page((unsigned long)cb0); 1906out: 1907 return ret; 1908} 1909 1910static void ehea_promiscuous_error(u64 hret, int enable) 1911{ 1912 if (hret == H_AUTHORITY) 1913 ehea_info("Hypervisor denied %sabling promiscuous mode", 1914 enable == 1 ? "en" : "dis"); 1915 else 1916 ehea_error("failed %sabling promiscuous mode", 1917 enable == 1 ? "en" : "dis"); 1918} 1919 1920static void ehea_promiscuous(struct net_device *dev, int enable) 1921{ 1922 struct ehea_port *port = netdev_priv(dev); 1923 struct hcp_ehea_port_cb7 *cb7; 1924 u64 hret; 1925 1926 if ((enable && port->promisc) || (!enable && !port->promisc)) 1927 return; 1928 1929 cb7 = (void *)get_zeroed_page(GFP_ATOMIC); 1930 if (!cb7) { 1931 ehea_error("no mem for cb7"); 1932 goto out; 1933 } 1934 1935 /* Modify Pxs_DUCQPN in CB7 */ 1936 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; 1937 1938 hret = ehea_h_modify_ehea_port(port->adapter->handle, 1939 port->logical_port_id, 1940 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7); 1941 if (hret) { 1942 ehea_promiscuous_error(hret, enable); 1943 goto out; 1944 } 1945 1946 port->promisc = enable; 1947out: 1948 free_page((unsigned long)cb7); 1949} 1950 1951static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, 1952 u32 hcallid) 1953{ 1954 u64 hret; 1955 u8 reg_type; 1956 1957 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST 1958 | EHEA_BCMC_UNTAGGED; 1959 1960 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1961 port->logical_port_id, 1962 reg_type, mc_mac_addr, 0, hcallid); 1963 if (hret) 1964 goto out; 1965 1966 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST 1967 | EHEA_BCMC_VLANID_ALL; 1968 1969 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, 1970 port->logical_port_id, 1971 reg_type, mc_mac_addr, 0, hcallid); 1972out: 1973 return hret; 1974} 1975 1976static int ehea_drop_multicast_list(struct net_device *dev) 1977{ 1978 struct ehea_port *port = netdev_priv(dev); 1979 struct ehea_mc_list *mc_entry = port->mc_list; 1980 struct list_head *pos; 1981 struct list_head *temp; 1982 int ret = 0; 1983 u64 hret; 1984 1985 list_for_each_safe(pos, temp, &(port->mc_list->list)) { 1986 mc_entry = list_entry(pos, struct ehea_mc_list, list); 1987 1988 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, 1989 H_DEREG_BCMC); 1990 if (hret) { 1991 ehea_error("failed deregistering mcast MAC"); 1992 ret = -EIO; 1993 } 1994 1995 list_del(pos); 1996 kfree(mc_entry); 1997 } 1998 return ret; 1999} 2000 2001static void ehea_allmulti(struct net_device *dev, int enable) 2002{ 2003 struct ehea_port *port = netdev_priv(dev); 2004 u64 hret; 2005 2006 if (!port->allmulti) { 2007 if (enable) { 2008 /* Enable ALLMULTI */ 2009 ehea_drop_multicast_list(dev); 2010 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC); 2011 if (!hret) 2012 port->allmulti = 1; 2013 else 2014 ehea_error("failed enabling IFF_ALLMULTI"); 2015 } 2016 } else 2017 if (!enable) { 2018 /* Disable ALLMULTI */ 2019 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); 2020 if (!hret) 2021 port->allmulti = 0; 2022 else 2023 ehea_error("failed disabling IFF_ALLMULTI"); 2024 } 2025} 2026 2027static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) 2028{ 2029 struct ehea_mc_list *ehea_mcl_entry; 2030 u64 hret; 2031 2032 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); 2033 if (!ehea_mcl_entry) { 2034 ehea_error("no mem for mcl_entry"); 2035 return; 2036 } 2037 2038 INIT_LIST_HEAD(&ehea_mcl_entry->list); 2039 2040 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN); 2041 2042 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr, 2043 H_REG_BCMC); 2044 if (!hret) 2045 list_add(&ehea_mcl_entry->list, &port->mc_list->list); 2046 else { 2047 ehea_error("failed registering mcast MAC"); 2048 kfree(ehea_mcl_entry); 2049 } 2050} 2051 2052static void ehea_set_multicast_list(struct net_device *dev) 2053{ 2054 struct ehea_port *port = netdev_priv(dev); 2055 struct netdev_hw_addr *ha; 2056 int ret; 2057 2058 if (dev->flags & IFF_PROMISC) { 2059 ehea_promiscuous(dev, 1); 2060 return; 2061 } 2062 ehea_promiscuous(dev, 0); 2063 2064 if (dev->flags & IFF_ALLMULTI) { 2065 ehea_allmulti(dev, 1); 2066 goto out; 2067 } 2068 ehea_allmulti(dev, 0); 2069 2070 if (!netdev_mc_empty(dev)) { 2071 ret = ehea_drop_multicast_list(dev); 2072 if (ret) { 2073 /* Dropping the current multicast list failed. 2074 * Enabling ALL_MULTI is the best we can do. 2075 */ 2076 ehea_allmulti(dev, 1); 2077 } 2078 2079 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { 2080 ehea_info("Mcast registration limit reached (0x%llx). " 2081 "Use ALLMULTI!", 2082 port->adapter->max_mc_mac); 2083 goto out; 2084 } 2085 2086 netdev_for_each_mc_addr(ha, dev) 2087 ehea_add_multicast_entry(port, ha->addr); 2088 2089 } 2090out: 2091 ehea_update_bcmc_registrations(); 2092} 2093 2094static int ehea_change_mtu(struct net_device *dev, int new_mtu) 2095{ 2096 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE)) 2097 return -EINVAL; 2098 dev->mtu = new_mtu; 2099 return 0; 2100} 2101 2102static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, 2103 struct ehea_swqe *swqe, u32 lkey) 2104{ 2105 if (skb->protocol == htons(ETH_P_IP)) { 2106 const struct iphdr *iph = ip_hdr(skb); 2107 2108 /* IPv4 */ 2109 swqe->tx_control |= EHEA_SWQE_CRC 2110 | EHEA_SWQE_IP_CHECKSUM 2111 | EHEA_SWQE_TCP_CHECKSUM 2112 | EHEA_SWQE_IMM_DATA_PRESENT 2113 | EHEA_SWQE_DESCRIPTORS_PRESENT; 2114 2115 write_ip_start_end(swqe, skb); 2116 2117 if (iph->protocol == IPPROTO_UDP) { 2118 if ((iph->frag_off & IP_MF) || 2119 (iph->frag_off & IP_OFFSET)) 2120 /* IP fragment, so don't change cs */ 2121 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; 2122 else 2123 write_udp_offset_end(swqe, skb); 2124 } else if (iph->protocol == IPPROTO_TCP) { 2125 write_tcp_offset_end(swqe, skb); 2126 } 2127 2128 /* icmp (big data) and ip segmentation packets (all other ip 2129 packets) do not require any special handling */ 2130 2131 } else { 2132 /* Other Ethernet Protocol */ 2133 swqe->tx_control |= EHEA_SWQE_CRC 2134 | EHEA_SWQE_IMM_DATA_PRESENT 2135 | EHEA_SWQE_DESCRIPTORS_PRESENT; 2136 } 2137 2138 write_swqe2_data(skb, dev, swqe, lkey); 2139} 2140 2141static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, 2142 struct ehea_swqe *swqe) 2143{ 2144 int nfrags = skb_shinfo(skb)->nr_frags; 2145 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0]; 2146 skb_frag_t *frag; 2147 int i; 2148 2149 if (skb->protocol == htons(ETH_P_IP)) { 2150 const struct iphdr *iph = ip_hdr(skb); 2151 2152 /* IPv4 */ 2153 write_ip_start_end(swqe, skb); 2154 2155 if (iph->protocol == IPPROTO_TCP) { 2156 swqe->tx_control |= EHEA_SWQE_CRC 2157 | EHEA_SWQE_IP_CHECKSUM 2158 | EHEA_SWQE_TCP_CHECKSUM 2159 | EHEA_SWQE_IMM_DATA_PRESENT; 2160 2161 write_tcp_offset_end(swqe, skb); 2162 2163 } else if (iph->protocol == IPPROTO_UDP) { 2164 if ((iph->frag_off & IP_MF) || 2165 (iph->frag_off & IP_OFFSET)) 2166 /* IP fragment, so don't change cs */ 2167 swqe->tx_control |= EHEA_SWQE_CRC 2168 | EHEA_SWQE_IMM_DATA_PRESENT; 2169 else { 2170 swqe->tx_control |= EHEA_SWQE_CRC 2171 | EHEA_SWQE_IP_CHECKSUM 2172 | EHEA_SWQE_TCP_CHECKSUM 2173 | EHEA_SWQE_IMM_DATA_PRESENT; 2174 2175 write_udp_offset_end(swqe, skb); 2176 } 2177 } else { 2178 /* icmp (big data) and 2179 ip segmentation packets (all other ip packets) */ 2180 swqe->tx_control |= EHEA_SWQE_CRC 2181 | EHEA_SWQE_IP_CHECKSUM 2182 | EHEA_SWQE_IMM_DATA_PRESENT; 2183 } 2184 } else { 2185 /* Other Ethernet Protocol */ 2186 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; 2187 } 2188 /* copy (immediate) data */ 2189 if (nfrags == 0) { 2190 /* data is in a single piece */ 2191 skb_copy_from_linear_data(skb, imm_data, skb->len); 2192 } else { 2193 /* first copy data from the skb->data buffer ... */ 2194 skb_copy_from_linear_data(skb, imm_data, 2195 skb_headlen(skb)); 2196 imm_data += skb_headlen(skb); 2197 2198 /* ... then copy data from the fragments */ 2199 for (i = 0; i < nfrags; i++) { 2200 frag = &skb_shinfo(skb)->frags[i]; 2201 memcpy(imm_data, 2202 page_address(frag->page) + frag->page_offset, 2203 frag->size); 2204 imm_data += frag->size; 2205 } 2206 } 2207 swqe->immediate_data_length = skb->len; 2208 dev_kfree_skb(skb); 2209} 2210 2211static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps) 2212{ 2213 struct tcphdr *tcp; 2214 u32 tmp; 2215 2216 if ((skb->protocol == htons(ETH_P_IP)) && 2217 (ip_hdr(skb)->protocol == IPPROTO_TCP)) { 2218 tcp = (struct tcphdr *)(skb_network_header(skb) + 2219 (ip_hdr(skb)->ihl * 4)); 2220 tmp = (tcp->source + (tcp->dest << 16)) % 31; 2221 tmp += ip_hdr(skb)->daddr % 31; 2222 return tmp % num_qps; 2223 } else 2224 return 0; 2225} 2226 2227static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) 2228{ 2229 struct ehea_port *port = netdev_priv(dev); 2230 struct ehea_swqe *swqe; 2231 unsigned long flags; 2232 u32 lkey; 2233 int swqe_index; 2234 struct ehea_port_res *pr; 2235 2236 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)]; 2237 2238 if (!spin_trylock(&pr->xmit_lock)) 2239 return NETDEV_TX_BUSY; 2240 2241 if (pr->queue_stopped) { 2242 spin_unlock(&pr->xmit_lock); 2243 return NETDEV_TX_BUSY; 2244 } 2245 2246 swqe = ehea_get_swqe(pr->qp, &swqe_index); 2247 memset(swqe, 0, SWQE_HEADER_SIZE); 2248 atomic_dec(&pr->swqe_avail); 2249 2250 if (skb->len <= SWQE3_MAX_IMM) { 2251 u32 sig_iv = port->sig_comp_iv; 2252 u32 swqe_num = pr->swqe_id_counter; 2253 ehea_xmit3(skb, dev, swqe); 2254 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE) 2255 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num); 2256 if (pr->swqe_ll_count >= (sig_iv - 1)) { 2257 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 2258 sig_iv); 2259 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; 2260 pr->swqe_ll_count = 0; 2261 } else 2262 pr->swqe_ll_count += 1; 2263 } else { 2264 swqe->wr_id = 2265 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) 2266 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) 2267 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1) 2268 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); 2269 pr->sq_skba.arr[pr->sq_skba.index] = skb; 2270 2271 pr->sq_skba.index++; 2272 pr->sq_skba.index &= (pr->sq_skba.len - 1); 2273 2274 lkey = pr->send_mr.lkey; 2275 ehea_xmit2(skb, dev, swqe, lkey); 2276 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION; 2277 } 2278 pr->swqe_id_counter += 1; 2279 2280 if (port->vgrp && vlan_tx_tag_present(skb)) { 2281 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; 2282 swqe->vlan_tag = vlan_tx_tag_get(skb); 2283 } 2284 2285 if (netif_msg_tx_queued(port)) { 2286 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); 2287 ehea_dump(swqe, 512, "swqe"); 2288 } 2289 2290 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 2291 netif_stop_queue(dev); 2292 swqe->tx_control |= EHEA_SWQE_PURGE; 2293 } 2294 2295 ehea_post_swqe(pr->qp, swqe); 2296 pr->tx_packets++; 2297 2298 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2299 spin_lock_irqsave(&pr->netif_queue, flags); 2300 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2301 pr->p_stats.queue_stopped++; 2302 netif_stop_queue(dev); 2303 pr->queue_stopped = 1; 2304 } 2305 spin_unlock_irqrestore(&pr->netif_queue, flags); 2306 } 2307 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 2308 spin_unlock(&pr->xmit_lock); 2309 2310 return NETDEV_TX_OK; 2311} 2312 2313static void ehea_vlan_rx_register(struct net_device *dev, 2314 struct vlan_group *grp) 2315{ 2316 struct ehea_port *port = netdev_priv(dev); 2317 struct ehea_adapter *adapter = port->adapter; 2318 struct hcp_ehea_port_cb1 *cb1; 2319 u64 hret; 2320 2321 port->vgrp = grp; 2322 2323 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2324 if (!cb1) { 2325 ehea_error("no mem for cb1"); 2326 goto out; 2327 } 2328 2329 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2330 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2331 if (hret != H_SUCCESS) 2332 ehea_error("modify_ehea_port failed"); 2333 2334 free_page((unsigned long)cb1); 2335out: 2336 return; 2337} 2338 2339static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 2340{ 2341 struct ehea_port *port = netdev_priv(dev); 2342 struct ehea_adapter *adapter = port->adapter; 2343 struct hcp_ehea_port_cb1 *cb1; 2344 int index; 2345 u64 hret; 2346 2347 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2348 if (!cb1) { 2349 ehea_error("no mem for cb1"); 2350 goto out; 2351 } 2352 2353 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2354 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2355 if (hret != H_SUCCESS) { 2356 ehea_error("query_ehea_port failed"); 2357 goto out; 2358 } 2359 2360 index = (vid / 64); 2361 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F))); 2362 2363 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2364 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2365 if (hret != H_SUCCESS) 2366 ehea_error("modify_ehea_port failed"); 2367out: 2368 free_page((unsigned long)cb1); 2369 return; 2370} 2371 2372static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 2373{ 2374 struct ehea_port *port = netdev_priv(dev); 2375 struct ehea_adapter *adapter = port->adapter; 2376 struct hcp_ehea_port_cb1 *cb1; 2377 int index; 2378 u64 hret; 2379 2380 vlan_group_set_device(port->vgrp, vid, NULL); 2381 2382 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2383 if (!cb1) { 2384 ehea_error("no mem for cb1"); 2385 goto out; 2386 } 2387 2388 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2389 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2390 if (hret != H_SUCCESS) { 2391 ehea_error("query_ehea_port failed"); 2392 goto out; 2393 } 2394 2395 index = (vid / 64); 2396 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F))); 2397 2398 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2399 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2400 if (hret != H_SUCCESS) 2401 ehea_error("modify_ehea_port failed"); 2402out: 2403 free_page((unsigned long)cb1); 2404} 2405 2406int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) 2407{ 2408 int ret = -EIO; 2409 u64 hret; 2410 u16 dummy16 = 0; 2411 u64 dummy64 = 0; 2412 struct hcp_modify_qp_cb0 *cb0; 2413 2414 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 2415 if (!cb0) { 2416 ret = -ENOMEM; 2417 goto out; 2418 } 2419 2420 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2421 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2422 if (hret != H_SUCCESS) { 2423 ehea_error("query_ehea_qp failed (1)"); 2424 goto out; 2425 } 2426 2427 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED; 2428 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2429 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2430 &dummy64, &dummy64, &dummy16, &dummy16); 2431 if (hret != H_SUCCESS) { 2432 ehea_error("modify_ehea_qp failed (1)"); 2433 goto out; 2434 } 2435 2436 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2437 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2438 if (hret != H_SUCCESS) { 2439 ehea_error("query_ehea_qp failed (2)"); 2440 goto out; 2441 } 2442 2443 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED; 2444 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2445 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2446 &dummy64, &dummy64, &dummy16, &dummy16); 2447 if (hret != H_SUCCESS) { 2448 ehea_error("modify_ehea_qp failed (2)"); 2449 goto out; 2450 } 2451 2452 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2453 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2454 if (hret != H_SUCCESS) { 2455 ehea_error("query_ehea_qp failed (3)"); 2456 goto out; 2457 } 2458 2459 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND; 2460 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2461 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2462 &dummy64, &dummy64, &dummy16, &dummy16); 2463 if (hret != H_SUCCESS) { 2464 ehea_error("modify_ehea_qp failed (3)"); 2465 goto out; 2466 } 2467 2468 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2469 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2470 if (hret != H_SUCCESS) { 2471 ehea_error("query_ehea_qp failed (4)"); 2472 goto out; 2473 } 2474 2475 ret = 0; 2476out: 2477 free_page((unsigned long)cb0); 2478 return ret; 2479} 2480 2481static int ehea_port_res_setup(struct ehea_port *port, int def_qps, 2482 int add_tx_qps) 2483{ 2484 int ret, i; 2485 struct port_res_cfg pr_cfg, pr_cfg_small_rx; 2486 enum ehea_eq_type eq_type = EHEA_EQ; 2487 2488 port->qp_eq = ehea_create_eq(port->adapter, eq_type, 2489 EHEA_MAX_ENTRIES_EQ, 1); 2490 if (!port->qp_eq) { 2491 ret = -EINVAL; 2492 ehea_error("ehea_create_eq failed (qp_eq)"); 2493 goto out_kill_eq; 2494 } 2495 2496 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries; 2497 pr_cfg.max_entries_scq = sq_entries * 2; 2498 pr_cfg.max_entries_sq = sq_entries; 2499 pr_cfg.max_entries_rq1 = rq1_entries; 2500 pr_cfg.max_entries_rq2 = rq2_entries; 2501 pr_cfg.max_entries_rq3 = rq3_entries; 2502 2503 pr_cfg_small_rx.max_entries_rcq = 1; 2504 pr_cfg_small_rx.max_entries_scq = sq_entries; 2505 pr_cfg_small_rx.max_entries_sq = sq_entries; 2506 pr_cfg_small_rx.max_entries_rq1 = 1; 2507 pr_cfg_small_rx.max_entries_rq2 = 1; 2508 pr_cfg_small_rx.max_entries_rq3 = 1; 2509 2510 for (i = 0; i < def_qps; i++) { 2511 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i); 2512 if (ret) 2513 goto out_clean_pr; 2514 } 2515 for (i = def_qps; i < def_qps + add_tx_qps; i++) { 2516 ret = ehea_init_port_res(port, &port->port_res[i], 2517 &pr_cfg_small_rx, i); 2518 if (ret) 2519 goto out_clean_pr; 2520 } 2521 2522 return 0; 2523 2524out_clean_pr: 2525 while (--i >= 0) 2526 ehea_clean_portres(port, &port->port_res[i]); 2527 2528out_kill_eq: 2529 ehea_destroy_eq(port->qp_eq); 2530 return ret; 2531} 2532 2533static int ehea_clean_all_portres(struct ehea_port *port) 2534{ 2535 int ret = 0; 2536 int i; 2537 2538 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2539 ret |= ehea_clean_portres(port, &port->port_res[i]); 2540 2541 ret |= ehea_destroy_eq(port->qp_eq); 2542 2543 return ret; 2544} 2545 2546static void ehea_remove_adapter_mr(struct ehea_adapter *adapter) 2547{ 2548 if (adapter->active_ports) 2549 return; 2550 2551 ehea_rem_mr(&adapter->mr); 2552} 2553 2554static int ehea_add_adapter_mr(struct ehea_adapter *adapter) 2555{ 2556 if (adapter->active_ports) 2557 return 0; 2558 2559 return ehea_reg_kernel_mr(adapter, &adapter->mr); 2560} 2561 2562static int ehea_up(struct net_device *dev) 2563{ 2564 int ret, i; 2565 struct ehea_port *port = netdev_priv(dev); 2566 2567 if (port->state == EHEA_PORT_UP) 2568 return 0; 2569 2570 ret = ehea_port_res_setup(port, port->num_def_qps, 2571 port->num_add_tx_qps); 2572 if (ret) { 2573 ehea_error("port_res_failed"); 2574 goto out; 2575 } 2576 2577 /* Set default QP for this port */ 2578 ret = ehea_configure_port(port); 2579 if (ret) { 2580 ehea_error("ehea_configure_port failed. ret:%d", ret); 2581 goto out_clean_pr; 2582 } 2583 2584 ret = ehea_reg_interrupts(dev); 2585 if (ret) { 2586 ehea_error("reg_interrupts failed. ret:%d", ret); 2587 goto out_clean_pr; 2588 } 2589 2590 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2591 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); 2592 if (ret) { 2593 ehea_error("activate_qp failed"); 2594 goto out_free_irqs; 2595 } 2596 } 2597 2598 for (i = 0; i < port->num_def_qps; i++) { 2599 ret = ehea_fill_port_res(&port->port_res[i]); 2600 if (ret) { 2601 ehea_error("out_free_irqs"); 2602 goto out_free_irqs; 2603 } 2604 } 2605 2606 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 2607 if (ret) { 2608 ret = -EIO; 2609 goto out_free_irqs; 2610 } 2611 2612 port->state = EHEA_PORT_UP; 2613 2614 ret = 0; 2615 goto out; 2616 2617out_free_irqs: 2618 ehea_free_interrupts(dev); 2619 2620out_clean_pr: 2621 ehea_clean_all_portres(port); 2622out: 2623 if (ret) 2624 ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2625 2626 ehea_update_bcmc_registrations(); 2627 ehea_update_firmware_handles(); 2628 2629 return ret; 2630} 2631 2632static void port_napi_disable(struct ehea_port *port) 2633{ 2634 int i; 2635 2636 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2637 napi_disable(&port->port_res[i].napi); 2638} 2639 2640static void port_napi_enable(struct ehea_port *port) 2641{ 2642 int i; 2643 2644 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2645 napi_enable(&port->port_res[i].napi); 2646} 2647 2648static int ehea_open(struct net_device *dev) 2649{ 2650 int ret; 2651 struct ehea_port *port = netdev_priv(dev); 2652 2653 mutex_lock(&port->port_lock); 2654 2655 if (netif_msg_ifup(port)) 2656 ehea_info("enabling port %s", dev->name); 2657 2658 ret = ehea_up(dev); 2659 if (!ret) { 2660 port_napi_enable(port); 2661 netif_start_queue(dev); 2662 } 2663 2664 mutex_unlock(&port->port_lock); 2665 2666 return ret; 2667} 2668 2669static int ehea_down(struct net_device *dev) 2670{ 2671 int ret; 2672 struct ehea_port *port = netdev_priv(dev); 2673 2674 if (port->state == EHEA_PORT_DOWN) 2675 return 0; 2676 2677 ehea_drop_multicast_list(dev); 2678 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2679 2680 ehea_free_interrupts(dev); 2681 2682 port->state = EHEA_PORT_DOWN; 2683 2684 ehea_update_bcmc_registrations(); 2685 2686 ret = ehea_clean_all_portres(port); 2687 if (ret) 2688 ehea_info("Failed freeing resources for %s. ret=%i", 2689 dev->name, ret); 2690 2691 ehea_update_firmware_handles(); 2692 2693 return ret; 2694} 2695 2696static int ehea_stop(struct net_device *dev) 2697{ 2698 int ret; 2699 struct ehea_port *port = netdev_priv(dev); 2700 2701 if (netif_msg_ifdown(port)) 2702 ehea_info("disabling port %s", dev->name); 2703 2704 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); 2705 cancel_work_sync(&port->reset_task); 2706 mutex_lock(&port->port_lock); 2707 netif_stop_queue(dev); 2708 port_napi_disable(port); 2709 ret = ehea_down(dev); 2710 mutex_unlock(&port->port_lock); 2711 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); 2712 return ret; 2713} 2714 2715static void ehea_purge_sq(struct ehea_qp *orig_qp) 2716{ 2717 struct ehea_qp qp = *orig_qp; 2718 struct ehea_qp_init_attr *init_attr = &qp.init_attr; 2719 struct ehea_swqe *swqe; 2720 int wqe_index; 2721 int i; 2722 2723 for (i = 0; i < init_attr->act_nr_send_wqes; i++) { 2724 swqe = ehea_get_swqe(&qp, &wqe_index); 2725 swqe->tx_control |= EHEA_SWQE_PURGE; 2726 } 2727} 2728 2729static void ehea_flush_sq(struct ehea_port *port) 2730{ 2731 int i; 2732 2733 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2734 struct ehea_port_res *pr = &port->port_res[i]; 2735 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count; 2736 int k = 0; 2737 while (atomic_read(&pr->swqe_avail) < swqe_max) { 2738 msleep(5); 2739 if (++k == 20) { 2740 ehea_error("WARNING: sq not flushed completely"); 2741 break; 2742 } 2743 } 2744 } 2745} 2746 2747int ehea_stop_qps(struct net_device *dev) 2748{ 2749 struct ehea_port *port = netdev_priv(dev); 2750 struct ehea_adapter *adapter = port->adapter; 2751 struct hcp_modify_qp_cb0 *cb0; 2752 int ret = -EIO; 2753 int dret; 2754 int i; 2755 u64 hret; 2756 u64 dummy64 = 0; 2757 u16 dummy16 = 0; 2758 2759 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 2760 if (!cb0) { 2761 ret = -ENOMEM; 2762 goto out; 2763 } 2764 2765 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { 2766 struct ehea_port_res *pr = &port->port_res[i]; 2767 struct ehea_qp *qp = pr->qp; 2768 2769 /* Purge send queue */ 2770 ehea_purge_sq(qp); 2771 2772 /* Disable queue pair */ 2773 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2774 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2775 cb0); 2776 if (hret != H_SUCCESS) { 2777 ehea_error("query_ehea_qp failed (1)"); 2778 goto out; 2779 } 2780 2781 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; 2782 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; 2783 2784 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2785 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 2786 1), cb0, &dummy64, 2787 &dummy64, &dummy16, &dummy16); 2788 if (hret != H_SUCCESS) { 2789 ehea_error("modify_ehea_qp failed (1)"); 2790 goto out; 2791 } 2792 2793 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2794 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2795 cb0); 2796 if (hret != H_SUCCESS) { 2797 ehea_error("query_ehea_qp failed (2)"); 2798 goto out; 2799 } 2800 2801 /* deregister shared memory regions */ 2802 dret = ehea_rem_smrs(pr); 2803 if (dret) { 2804 ehea_error("unreg shared memory region failed"); 2805 goto out; 2806 } 2807 } 2808 2809 ret = 0; 2810out: 2811 free_page((unsigned long)cb0); 2812 2813 return ret; 2814} 2815 2816void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) 2817{ 2818 struct ehea_qp qp = *orig_qp; 2819 struct ehea_qp_init_attr *init_attr = &qp.init_attr; 2820 struct ehea_rwqe *rwqe; 2821 struct sk_buff **skba_rq2 = pr->rq2_skba.arr; 2822 struct sk_buff **skba_rq3 = pr->rq3_skba.arr; 2823 struct sk_buff *skb; 2824 u32 lkey = pr->recv_mr.lkey; 2825 2826 2827 int i; 2828 int index; 2829 2830 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { 2831 rwqe = ehea_get_next_rwqe(&qp, 2); 2832 rwqe->sg_list[0].l_key = lkey; 2833 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); 2834 skb = skba_rq2[index]; 2835 if (skb) 2836 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); 2837 } 2838 2839 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { 2840 rwqe = ehea_get_next_rwqe(&qp, 3); 2841 rwqe->sg_list[0].l_key = lkey; 2842 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); 2843 skb = skba_rq3[index]; 2844 if (skb) 2845 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); 2846 } 2847} 2848 2849int ehea_restart_qps(struct net_device *dev) 2850{ 2851 struct ehea_port *port = netdev_priv(dev); 2852 struct ehea_adapter *adapter = port->adapter; 2853 int ret = 0; 2854 int i; 2855 2856 struct hcp_modify_qp_cb0 *cb0; 2857 u64 hret; 2858 u64 dummy64 = 0; 2859 u16 dummy16 = 0; 2860 2861 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 2862 if (!cb0) { 2863 ret = -ENOMEM; 2864 goto out; 2865 } 2866 2867 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { 2868 struct ehea_port_res *pr = &port->port_res[i]; 2869 struct ehea_qp *qp = pr->qp; 2870 2871 ret = ehea_gen_smrs(pr); 2872 if (ret) { 2873 ehea_error("creation of shared memory regions failed"); 2874 goto out; 2875 } 2876 2877 ehea_update_rqs(qp, pr); 2878 2879 /* Enable queue pair */ 2880 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2881 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2882 cb0); 2883 if (hret != H_SUCCESS) { 2884 ehea_error("query_ehea_qp failed (1)"); 2885 goto out; 2886 } 2887 2888 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; 2889 cb0->qp_ctl_reg |= H_QP_CR_ENABLED; 2890 2891 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, 2892 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 2893 1), cb0, &dummy64, 2894 &dummy64, &dummy16, &dummy16); 2895 if (hret != H_SUCCESS) { 2896 ehea_error("modify_ehea_qp failed (1)"); 2897 goto out; 2898 } 2899 2900 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2901 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2902 cb0); 2903 if (hret != H_SUCCESS) { 2904 ehea_error("query_ehea_qp failed (2)"); 2905 goto out; 2906 } 2907 2908 /* refill entire queue */ 2909 ehea_refill_rq1(pr, pr->rq1_skba.index, 0); 2910 ehea_refill_rq2(pr, 0); 2911 ehea_refill_rq3(pr, 0); 2912 } 2913out: 2914 free_page((unsigned long)cb0); 2915 2916 return ret; 2917} 2918 2919static void ehea_reset_port(struct work_struct *work) 2920{ 2921 int ret; 2922 struct ehea_port *port = 2923 container_of(work, struct ehea_port, reset_task); 2924 struct net_device *dev = port->netdev; 2925 2926 mutex_lock(&dlpar_mem_lock); 2927 port->resets++; 2928 mutex_lock(&port->port_lock); 2929 netif_stop_queue(dev); 2930 2931 port_napi_disable(port); 2932 2933 ehea_down(dev); 2934 2935 ret = ehea_up(dev); 2936 if (ret) 2937 goto out; 2938 2939 ehea_set_multicast_list(dev); 2940 2941 if (netif_msg_timer(port)) 2942 ehea_info("Device %s resetted successfully", dev->name); 2943 2944 port_napi_enable(port); 2945 2946 netif_wake_queue(dev); 2947out: 2948 mutex_unlock(&port->port_lock); 2949 mutex_unlock(&dlpar_mem_lock); 2950} 2951 2952static void ehea_rereg_mrs(struct work_struct *work) 2953{ 2954 int ret, i; 2955 struct ehea_adapter *adapter; 2956 2957 ehea_info("LPAR memory changed - re-initializing driver"); 2958 2959 list_for_each_entry(adapter, &adapter_list, list) 2960 if (adapter->active_ports) { 2961 /* Shutdown all ports */ 2962 for (i = 0; i < EHEA_MAX_PORTS; i++) { 2963 struct ehea_port *port = adapter->port[i]; 2964 struct net_device *dev; 2965 2966 if (!port) 2967 continue; 2968 2969 dev = port->netdev; 2970 2971 if (dev->flags & IFF_UP) { 2972 mutex_lock(&port->port_lock); 2973 netif_stop_queue(dev); 2974 ehea_flush_sq(port); 2975 ret = ehea_stop_qps(dev); 2976 if (ret) { 2977 mutex_unlock(&port->port_lock); 2978 goto out; 2979 } 2980 port_napi_disable(port); 2981 mutex_unlock(&port->port_lock); 2982 } 2983 reset_sq_restart_flag(port); 2984 } 2985 2986 /* Unregister old memory region */ 2987 ret = ehea_rem_mr(&adapter->mr); 2988 if (ret) { 2989 ehea_error("unregister MR failed - driver" 2990 " inoperable!"); 2991 goto out; 2992 } 2993 } 2994 2995 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 2996 2997 list_for_each_entry(adapter, &adapter_list, list) 2998 if (adapter->active_ports) { 2999 /* Register new memory region */ 3000 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); 3001 if (ret) { 3002 ehea_error("register MR failed - driver" 3003 " inoperable!"); 3004 goto out; 3005 } 3006 3007 /* Restart all ports */ 3008 for (i = 0; i < EHEA_MAX_PORTS; i++) { 3009 struct ehea_port *port = adapter->port[i]; 3010 3011 if (port) { 3012 struct net_device *dev = port->netdev; 3013 3014 if (dev->flags & IFF_UP) { 3015 mutex_lock(&port->port_lock); 3016 port_napi_enable(port); 3017 ret = ehea_restart_qps(dev); 3018 check_sqs(port); 3019 if (!ret) 3020 netif_wake_queue(dev); 3021 mutex_unlock(&port->port_lock); 3022 } 3023 } 3024 } 3025 } 3026 ehea_info("re-initializing driver complete"); 3027out: 3028 return; 3029} 3030 3031static void ehea_tx_watchdog(struct net_device *dev) 3032{ 3033 struct ehea_port *port = netdev_priv(dev); 3034 3035 if (netif_carrier_ok(dev) && 3036 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) 3037 ehea_schedule_port_reset(port); 3038} 3039 3040int ehea_sense_adapter_attr(struct ehea_adapter *adapter) 3041{ 3042 struct hcp_query_ehea *cb; 3043 u64 hret; 3044 int ret; 3045 3046 cb = (void *)get_zeroed_page(GFP_KERNEL); 3047 if (!cb) { 3048 ret = -ENOMEM; 3049 goto out; 3050 } 3051 3052 hret = ehea_h_query_ehea(adapter->handle, cb); 3053 3054 if (hret != H_SUCCESS) { 3055 ret = -EIO; 3056 goto out_herr; 3057 } 3058 3059 adapter->max_mc_mac = cb->max_mc_mac - 1; 3060 ret = 0; 3061 3062out_herr: 3063 free_page((unsigned long)cb); 3064out: 3065 return ret; 3066} 3067 3068int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) 3069{ 3070 struct hcp_ehea_port_cb4 *cb4; 3071 u64 hret; 3072 int ret = 0; 3073 3074 *jumbo = 0; 3075 3076 /* (Try to) enable *jumbo frames */ 3077 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 3078 if (!cb4) { 3079 ehea_error("no mem for cb4"); 3080 ret = -ENOMEM; 3081 goto out; 3082 } else { 3083 hret = ehea_h_query_ehea_port(port->adapter->handle, 3084 port->logical_port_id, 3085 H_PORT_CB4, 3086 H_PORT_CB4_JUMBO, cb4); 3087 if (hret == H_SUCCESS) { 3088 if (cb4->jumbo_frame) 3089 *jumbo = 1; 3090 else { 3091 cb4->jumbo_frame = 1; 3092 hret = ehea_h_modify_ehea_port(port->adapter-> 3093 handle, 3094 port-> 3095 logical_port_id, 3096 H_PORT_CB4, 3097 H_PORT_CB4_JUMBO, 3098 cb4); 3099 if (hret == H_SUCCESS) 3100 *jumbo = 1; 3101 } 3102 } else 3103 ret = -EINVAL; 3104 3105 free_page((unsigned long)cb4); 3106 } 3107out: 3108 return ret; 3109} 3110 3111static ssize_t ehea_show_port_id(struct device *dev, 3112 struct device_attribute *attr, char *buf) 3113{ 3114 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); 3115 return sprintf(buf, "%d", port->logical_port_id); 3116} 3117 3118static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, 3119 NULL); 3120 3121static void __devinit logical_port_release(struct device *dev) 3122{ 3123 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); 3124 of_node_put(port->ofdev.dev.of_node); 3125} 3126 3127static struct device *ehea_register_port(struct ehea_port *port, 3128 struct device_node *dn) 3129{ 3130 int ret; 3131 3132 port->ofdev.dev.of_node = of_node_get(dn); 3133 port->ofdev.dev.parent = &port->adapter->ofdev->dev; 3134 port->ofdev.dev.bus = &ibmebus_bus_type; 3135 3136 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++); 3137 port->ofdev.dev.release = logical_port_release; 3138 3139 ret = of_device_register(&port->ofdev); 3140 if (ret) { 3141 ehea_error("failed to register device. ret=%d", ret); 3142 goto out; 3143 } 3144 3145 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); 3146 if (ret) { 3147 ehea_error("failed to register attributes, ret=%d", ret); 3148 goto out_unreg_of_dev; 3149 } 3150 3151 return &port->ofdev.dev; 3152 3153out_unreg_of_dev: 3154 of_device_unregister(&port->ofdev); 3155out: 3156 return NULL; 3157} 3158 3159static void ehea_unregister_port(struct ehea_port *port) 3160{ 3161 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); 3162 of_device_unregister(&port->ofdev); 3163} 3164 3165static const struct net_device_ops ehea_netdev_ops = { 3166 .ndo_open = ehea_open, 3167 .ndo_stop = ehea_stop, 3168 .ndo_start_xmit = ehea_start_xmit, 3169#ifdef CONFIG_NET_POLL_CONTROLLER 3170 .ndo_poll_controller = ehea_netpoll, 3171#endif 3172 .ndo_get_stats = ehea_get_stats, 3173 .ndo_set_mac_address = ehea_set_mac_addr, 3174 .ndo_validate_addr = eth_validate_addr, 3175 .ndo_set_multicast_list = ehea_set_multicast_list, 3176 .ndo_change_mtu = ehea_change_mtu, 3177 .ndo_vlan_rx_register = ehea_vlan_rx_register, 3178 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, 3179 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid, 3180 .ndo_tx_timeout = ehea_tx_watchdog, 3181}; 3182 3183struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, 3184 u32 logical_port_id, 3185 struct device_node *dn) 3186{ 3187 int ret; 3188 struct net_device *dev; 3189 struct ehea_port *port; 3190 struct device *port_dev; 3191 int jumbo; 3192 3193 /* allocate memory for the port structures */ 3194 dev = alloc_etherdev(sizeof(struct ehea_port)); 3195 3196 if (!dev) { 3197 ehea_error("no mem for net_device"); 3198 ret = -ENOMEM; 3199 goto out_err; 3200 } 3201 3202 port = netdev_priv(dev); 3203 3204 mutex_init(&port->port_lock); 3205 port->state = EHEA_PORT_DOWN; 3206 port->sig_comp_iv = sq_entries / 10; 3207 3208 port->adapter = adapter; 3209 port->netdev = dev; 3210 port->logical_port_id = logical_port_id; 3211 3212 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT); 3213 3214 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL); 3215 if (!port->mc_list) { 3216 ret = -ENOMEM; 3217 goto out_free_ethdev; 3218 } 3219 3220 INIT_LIST_HEAD(&port->mc_list->list); 3221 3222 ret = ehea_sense_port_attr(port); 3223 if (ret) 3224 goto out_free_mc_list; 3225 3226 port_dev = ehea_register_port(port, dn); 3227 if (!port_dev) 3228 goto out_free_mc_list; 3229 3230 SET_NETDEV_DEV(dev, port_dev); 3231 3232 /* initialize net_device structure */ 3233 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN); 3234 3235 dev->netdev_ops = &ehea_netdev_ops; 3236 ehea_set_ethtool_ops(dev); 3237 3238 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO 3239 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3240 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3241 | NETIF_F_LLTX; 3242 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; 3243 3244 INIT_WORK(&port->reset_task, ehea_reset_port); 3245 3246 ret = register_netdev(dev); 3247 if (ret) { 3248 ehea_error("register_netdev failed. ret=%d", ret); 3249 goto out_unreg_port; 3250 } 3251 3252 port->lro_max_aggr = lro_max_aggr; 3253 3254 ret = ehea_get_jumboframe_status(port, &jumbo); 3255 if (ret) 3256 ehea_error("failed determining jumbo frame status for %s", 3257 port->netdev->name); 3258 3259 ehea_info("%s: Jumbo frames are %sabled", dev->name, 3260 jumbo == 1 ? "en" : "dis"); 3261 3262 adapter->active_ports++; 3263 3264 return port; 3265 3266out_unreg_port: 3267 ehea_unregister_port(port); 3268 3269out_free_mc_list: 3270 kfree(port->mc_list); 3271 3272out_free_ethdev: 3273 free_netdev(dev); 3274 3275out_err: 3276 ehea_error("setting up logical port with id=%d failed, ret=%d", 3277 logical_port_id, ret); 3278 return NULL; 3279} 3280 3281static void ehea_shutdown_single_port(struct ehea_port *port) 3282{ 3283 struct ehea_adapter *adapter = port->adapter; 3284 unregister_netdev(port->netdev); 3285 ehea_unregister_port(port); 3286 kfree(port->mc_list); 3287 free_netdev(port->netdev); 3288 adapter->active_ports--; 3289} 3290 3291static int ehea_setup_ports(struct ehea_adapter *adapter) 3292{ 3293 struct device_node *lhea_dn; 3294 struct device_node *eth_dn = NULL; 3295 3296 const u32 *dn_log_port_id; 3297 int i = 0; 3298 3299 lhea_dn = adapter->ofdev->dev.of_node; 3300 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 3301 3302 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3303 NULL); 3304 if (!dn_log_port_id) { 3305 ehea_error("bad device node: eth_dn name=%s", 3306 eth_dn->full_name); 3307 continue; 3308 } 3309 3310 if (ehea_add_adapter_mr(adapter)) { 3311 ehea_error("creating MR failed"); 3312 of_node_put(eth_dn); 3313 return -EIO; 3314 } 3315 3316 adapter->port[i] = ehea_setup_single_port(adapter, 3317 *dn_log_port_id, 3318 eth_dn); 3319 if (adapter->port[i]) 3320 ehea_info("%s -> logical port id #%d", 3321 adapter->port[i]->netdev->name, 3322 *dn_log_port_id); 3323 else 3324 ehea_remove_adapter_mr(adapter); 3325 3326 i++; 3327 } 3328 return 0; 3329} 3330 3331static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, 3332 u32 logical_port_id) 3333{ 3334 struct device_node *lhea_dn; 3335 struct device_node *eth_dn = NULL; 3336 const u32 *dn_log_port_id; 3337 3338 lhea_dn = adapter->ofdev->dev.of_node; 3339 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 3340 3341 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3342 NULL); 3343 if (dn_log_port_id) 3344 if (*dn_log_port_id == logical_port_id) 3345 return eth_dn; 3346 } 3347 3348 return NULL; 3349} 3350 3351static ssize_t ehea_probe_port(struct device *dev, 3352 struct device_attribute *attr, 3353 const char *buf, size_t count) 3354{ 3355 struct ehea_adapter *adapter = dev_get_drvdata(dev); 3356 struct ehea_port *port; 3357 struct device_node *eth_dn = NULL; 3358 int i; 3359 3360 u32 logical_port_id; 3361 3362 sscanf(buf, "%d", &logical_port_id); 3363 3364 port = ehea_get_port(adapter, logical_port_id); 3365 3366 if (port) { 3367 ehea_info("adding port with logical port id=%d failed. port " 3368 "already configured as %s.", logical_port_id, 3369 port->netdev->name); 3370 return -EINVAL; 3371 } 3372 3373 eth_dn = ehea_get_eth_dn(adapter, logical_port_id); 3374 3375 if (!eth_dn) { 3376 ehea_info("no logical port with id %d found", logical_port_id); 3377 return -EINVAL; 3378 } 3379 3380 if (ehea_add_adapter_mr(adapter)) { 3381 ehea_error("creating MR failed"); 3382 return -EIO; 3383 } 3384 3385 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn); 3386 3387 of_node_put(eth_dn); 3388 3389 if (port) { 3390 for (i = 0; i < EHEA_MAX_PORTS; i++) 3391 if (!adapter->port[i]) { 3392 adapter->port[i] = port; 3393 break; 3394 } 3395 3396 ehea_info("added %s (logical port id=%d)", port->netdev->name, 3397 logical_port_id); 3398 } else { 3399 ehea_remove_adapter_mr(adapter); 3400 return -EIO; 3401 } 3402 3403 return (ssize_t) count; 3404} 3405 3406static ssize_t ehea_remove_port(struct device *dev, 3407 struct device_attribute *attr, 3408 const char *buf, size_t count) 3409{ 3410 struct ehea_adapter *adapter = dev_get_drvdata(dev); 3411 struct ehea_port *port; 3412 int i; 3413 u32 logical_port_id; 3414 3415 sscanf(buf, "%d", &logical_port_id); 3416 3417 port = ehea_get_port(adapter, logical_port_id); 3418 3419 if (port) { 3420 ehea_info("removed %s (logical port id=%d)", port->netdev->name, 3421 logical_port_id); 3422 3423 ehea_shutdown_single_port(port); 3424 3425 for (i = 0; i < EHEA_MAX_PORTS; i++) 3426 if (adapter->port[i] == port) { 3427 adapter->port[i] = NULL; 3428 break; 3429 } 3430 } else { 3431 ehea_error("removing port with logical port id=%d failed. port " 3432 "not configured.", logical_port_id); 3433 return -EINVAL; 3434 } 3435 3436 ehea_remove_adapter_mr(adapter); 3437 3438 return (ssize_t) count; 3439} 3440 3441static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port); 3442static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port); 3443 3444int ehea_create_device_sysfs(struct platform_device *dev) 3445{ 3446 int ret = device_create_file(&dev->dev, &dev_attr_probe_port); 3447 if (ret) 3448 goto out; 3449 3450 ret = device_create_file(&dev->dev, &dev_attr_remove_port); 3451out: 3452 return ret; 3453} 3454 3455void ehea_remove_device_sysfs(struct platform_device *dev) 3456{ 3457 device_remove_file(&dev->dev, &dev_attr_probe_port); 3458 device_remove_file(&dev->dev, &dev_attr_remove_port); 3459} 3460 3461static int __devinit ehea_probe_adapter(struct platform_device *dev, 3462 const struct of_device_id *id) 3463{ 3464 struct ehea_adapter *adapter; 3465 const u64 *adapter_handle; 3466 int ret; 3467 3468 if (!dev || !dev->dev.of_node) { 3469 ehea_error("Invalid ibmebus device probed"); 3470 return -EINVAL; 3471 } 3472 3473 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3474 if (!adapter) { 3475 ret = -ENOMEM; 3476 dev_err(&dev->dev, "no mem for ehea_adapter\n"); 3477 goto out; 3478 } 3479 3480 list_add(&adapter->list, &adapter_list); 3481 3482 adapter->ofdev = dev; 3483 3484 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle", 3485 NULL); 3486 if (adapter_handle) 3487 adapter->handle = *adapter_handle; 3488 3489 if (!adapter->handle) { 3490 dev_err(&dev->dev, "failed getting handle for adapter" 3491 " '%s'\n", dev->dev.of_node->full_name); 3492 ret = -ENODEV; 3493 goto out_free_ad; 3494 } 3495 3496 adapter->pd = EHEA_PD_ID; 3497 3498 dev_set_drvdata(&dev->dev, adapter); 3499 3500 3501 /* initialize adapter and ports */ 3502 /* get adapter properties */ 3503 ret = ehea_sense_adapter_attr(adapter); 3504 if (ret) { 3505 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret); 3506 goto out_free_ad; 3507 } 3508 3509 adapter->neq = ehea_create_eq(adapter, 3510 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); 3511 if (!adapter->neq) { 3512 ret = -EIO; 3513 dev_err(&dev->dev, "NEQ creation failed\n"); 3514 goto out_free_ad; 3515 } 3516 3517 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, 3518 (unsigned long)adapter); 3519 3520 ret = ibmebus_request_irq(adapter->neq->attr.ist1, 3521 ehea_interrupt_neq, IRQF_DISABLED, 3522 "ehea_neq", adapter); 3523 if (ret) { 3524 dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); 3525 goto out_kill_eq; 3526 } 3527 3528 ret = ehea_create_device_sysfs(dev); 3529 if (ret) 3530 goto out_free_irq; 3531 3532 ret = ehea_setup_ports(adapter); 3533 if (ret) { 3534 dev_err(&dev->dev, "setup_ports failed\n"); 3535 goto out_rem_dev_sysfs; 3536 } 3537 3538 ret = 0; 3539 goto out; 3540 3541out_rem_dev_sysfs: 3542 ehea_remove_device_sysfs(dev); 3543 3544out_free_irq: 3545 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3546 3547out_kill_eq: 3548 ehea_destroy_eq(adapter->neq); 3549 3550out_free_ad: 3551 list_del(&adapter->list); 3552 kfree(adapter); 3553 3554out: 3555 ehea_update_firmware_handles(); 3556 3557 return ret; 3558} 3559 3560static int __devexit ehea_remove(struct platform_device *dev) 3561{ 3562 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev); 3563 int i; 3564 3565 for (i = 0; i < EHEA_MAX_PORTS; i++) 3566 if (adapter->port[i]) { 3567 ehea_shutdown_single_port(adapter->port[i]); 3568 adapter->port[i] = NULL; 3569 } 3570 3571 ehea_remove_device_sysfs(dev); 3572 3573 flush_scheduled_work(); 3574 3575 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3576 tasklet_kill(&adapter->neq_tasklet); 3577 3578 ehea_destroy_eq(adapter->neq); 3579 ehea_remove_adapter_mr(adapter); 3580 list_del(&adapter->list); 3581 kfree(adapter); 3582 3583 ehea_update_firmware_handles(); 3584 3585 return 0; 3586} 3587 3588void ehea_crash_handler(void) 3589{ 3590 int i; 3591 3592 if (ehea_fw_handles.arr) 3593 for (i = 0; i < ehea_fw_handles.num_entries; i++) 3594 ehea_h_free_resource(ehea_fw_handles.arr[i].adh, 3595 ehea_fw_handles.arr[i].fwh, 3596 FORCE_FREE); 3597 3598 if (ehea_bcmc_regs.arr) 3599 for (i = 0; i < ehea_bcmc_regs.num_entries; i++) 3600 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, 3601 ehea_bcmc_regs.arr[i].port_id, 3602 ehea_bcmc_regs.arr[i].reg_type, 3603 ehea_bcmc_regs.arr[i].macaddr, 3604 0, H_DEREG_BCMC); 3605} 3606 3607static int ehea_mem_notifier(struct notifier_block *nb, 3608 unsigned long action, void *data) 3609{ 3610 int ret = NOTIFY_BAD; 3611 struct memory_notify *arg = data; 3612 3613 mutex_lock(&dlpar_mem_lock); 3614 3615 switch (action) { 3616 case MEM_CANCEL_OFFLINE: 3617 ehea_info("memory offlining canceled"); 3618 /* Readd canceled memory block */ 3619 case MEM_ONLINE: 3620 ehea_info("memory is going online"); 3621 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3622 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3623 goto out_unlock; 3624 ehea_rereg_mrs(NULL); 3625 break; 3626 case MEM_GOING_OFFLINE: 3627 ehea_info("memory is going offline"); 3628 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3629 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3630 goto out_unlock; 3631 ehea_rereg_mrs(NULL); 3632 break; 3633 default: 3634 break; 3635 } 3636 3637 ehea_update_firmware_handles(); 3638 ret = NOTIFY_OK; 3639 3640out_unlock: 3641 mutex_unlock(&dlpar_mem_lock); 3642 return ret; 3643} 3644 3645static struct notifier_block ehea_mem_nb = { 3646 .notifier_call = ehea_mem_notifier, 3647}; 3648 3649static int ehea_reboot_notifier(struct notifier_block *nb, 3650 unsigned long action, void *unused) 3651{ 3652 if (action == SYS_RESTART) { 3653 ehea_info("Reboot: freeing all eHEA resources"); 3654 ibmebus_unregister_driver(&ehea_driver); 3655 } 3656 return NOTIFY_DONE; 3657} 3658 3659static struct notifier_block ehea_reboot_nb = { 3660 .notifier_call = ehea_reboot_notifier, 3661}; 3662 3663static int check_module_parm(void) 3664{ 3665 int ret = 0; 3666 3667 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || 3668 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { 3669 ehea_info("Bad parameter: rq1_entries"); 3670 ret = -EINVAL; 3671 } 3672 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || 3673 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { 3674 ehea_info("Bad parameter: rq2_entries"); 3675 ret = -EINVAL; 3676 } 3677 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || 3678 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { 3679 ehea_info("Bad parameter: rq3_entries"); 3680 ret = -EINVAL; 3681 } 3682 if ((sq_entries < EHEA_MIN_ENTRIES_QP) || 3683 (sq_entries > EHEA_MAX_ENTRIES_SQ)) { 3684 ehea_info("Bad parameter: sq_entries"); 3685 ret = -EINVAL; 3686 } 3687 3688 return ret; 3689} 3690 3691static ssize_t ehea_show_capabilities(struct device_driver *drv, 3692 char *buf) 3693{ 3694 return sprintf(buf, "%d", EHEA_CAPABILITIES); 3695} 3696 3697static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH, 3698 ehea_show_capabilities, NULL); 3699 3700int __init ehea_module_init(void) 3701{ 3702 int ret; 3703 3704 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", 3705 DRV_VERSION); 3706 3707 3708 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); 3709 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); 3710 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); 3711 3712 mutex_init(&ehea_fw_handles.lock); 3713 spin_lock_init(&ehea_bcmc_regs.lock); 3714 3715 ret = check_module_parm(); 3716 if (ret) 3717 goto out; 3718 3719 ret = ehea_create_busmap(); 3720 if (ret) 3721 goto out; 3722 3723 ret = register_reboot_notifier(&ehea_reboot_nb); 3724 if (ret) 3725 ehea_info("failed registering reboot notifier"); 3726 3727 ret = register_memory_notifier(&ehea_mem_nb); 3728 if (ret) 3729 ehea_info("failed registering memory remove notifier"); 3730 3731 ret = crash_shutdown_register(&ehea_crash_handler); 3732 if (ret) 3733 ehea_info("failed registering crash handler"); 3734 3735 ret = ibmebus_register_driver(&ehea_driver); 3736 if (ret) { 3737 ehea_error("failed registering eHEA device driver on ebus"); 3738 goto out2; 3739 } 3740 3741 ret = driver_create_file(&ehea_driver.driver, 3742 &driver_attr_capabilities); 3743 if (ret) { 3744 ehea_error("failed to register capabilities attribute, ret=%d", 3745 ret); 3746 goto out3; 3747 } 3748 3749 return ret; 3750 3751out3: 3752 ibmebus_unregister_driver(&ehea_driver); 3753out2: 3754 unregister_memory_notifier(&ehea_mem_nb); 3755 unregister_reboot_notifier(&ehea_reboot_nb); 3756 crash_shutdown_unregister(&ehea_crash_handler); 3757out: 3758 return ret; 3759} 3760 3761static void __exit ehea_module_exit(void) 3762{ 3763 int ret; 3764 3765 flush_scheduled_work(); 3766 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3767 ibmebus_unregister_driver(&ehea_driver); 3768 unregister_reboot_notifier(&ehea_reboot_nb); 3769 ret = crash_shutdown_unregister(&ehea_crash_handler); 3770 if (ret) 3771 ehea_info("failed unregistering crash handler"); 3772 unregister_memory_notifier(&ehea_mem_nb); 3773 kfree(ehea_fw_handles.arr); 3774 kfree(ehea_bcmc_regs.arr); 3775 ehea_destroy_busmap(); 3776} 3777 3778module_init(ehea_module_init); 3779module_exit(ehea_module_exit); 3780