1/*
2 *  linux/drivers/net/ehea/ehea_main.c
3 *
4 *  eHEA ethernet device driver for IBM eServer System p
5 *
6 *  (C) Copyright IBM Corp. 2006
7 *
8 *  Authors:
9 *       Christoph Raisch <raisch@de.ibm.com>
10 *       Jan-Bernd Themann <themann@de.ibm.com>
11 *       Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#include <linux/in.h>
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/udp.h>
33#include <linux/if.h>
34#include <linux/list.h>
35#include <linux/if_ether.h>
36#include <net/ip.h>
37
38#include "ehea.h"
39#include "ehea_qmr.h"
40#include "ehea_phyp.h"
41
42
43MODULE_LICENSE("GPL");
44MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
45MODULE_DESCRIPTION("IBM eServer HEA Driver");
46MODULE_VERSION(DRV_VERSION);
47
48
49static int msg_level = -1;
50static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
51static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
52static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
53static int sq_entries = EHEA_DEF_ENTRIES_SQ;
54static int use_mcs = 0;
55static int num_tx_qps = EHEA_NUM_TX_QP;
56
57module_param(msg_level, int, 0);
58module_param(rq1_entries, int, 0);
59module_param(rq2_entries, int, 0);
60module_param(rq3_entries, int, 0);
61module_param(sq_entries, int, 0);
62module_param(use_mcs, int, 0);
63module_param(num_tx_qps, int, 0);
64
65MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
66MODULE_PARM_DESC(msg_level, "msg_level");
67MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
68		 "[2^x - 1], x = [6..14]. Default = "
69		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
70MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
71		 "[2^x - 1], x = [6..14]. Default = "
72		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
73MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
74		 "[2^x - 1], x = [6..14]. Default = "
75		 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
76MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
77		 "[2^x - 1], x = [6..14]. Default = "
78		 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
79MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
80
81static int port_name_cnt = 0;
82
83static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
84                                        const struct of_device_id *id);
85
86static int __devexit ehea_remove(struct ibmebus_dev *dev);
87
88static struct of_device_id ehea_device_table[] = {
89	{
90		.name = "lhea",
91		.compatible = "IBM,lhea",
92	},
93	{},
94};
95
96static struct ibmebus_driver ehea_driver = {
97	.name = "ehea",
98	.id_table = ehea_device_table,
99	.probe = ehea_probe_adapter,
100	.remove = ehea_remove,
101};
102
103void ehea_dump(void *adr, int len, char *msg) {
104	int x;
105	unsigned char *deb = adr;
106	for (x = 0; x < len; x += 16) {
107		printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
108			  deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
109		deb += 16;
110	}
111}
112
113static struct net_device_stats *ehea_get_stats(struct net_device *dev)
114{
115	struct ehea_port *port = netdev_priv(dev);
116	struct net_device_stats *stats = &port->stats;
117	struct hcp_ehea_port_cb2 *cb2;
118	u64 hret, rx_packets;
119	int i;
120
121	memset(stats, 0, sizeof(*stats));
122
123	cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
124	if (!cb2) {
125		ehea_error("no mem for cb2");
126		goto out;
127	}
128
129	hret = ehea_h_query_ehea_port(port->adapter->handle,
130				      port->logical_port_id,
131				      H_PORT_CB2, H_PORT_CB2_ALL, cb2);
132	if (hret != H_SUCCESS) {
133		ehea_error("query_ehea_port failed");
134		goto out_herr;
135	}
136
137	if (netif_msg_hw(port))
138		ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
139
140	rx_packets = 0;
141	for (i = 0; i < port->num_def_qps; i++)
142		rx_packets += port->port_res[i].rx_packets;
143
144	stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp;
145	stats->multicast = cb2->rxmcp;
146	stats->rx_errors = cb2->rxuerr;
147	stats->rx_bytes = cb2->rxo;
148	stats->tx_bytes = cb2->txo;
149	stats->rx_packets = rx_packets;
150
151out_herr:
152	kfree(cb2);
153out:
154	return stats;
155}
156
157static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
158{
159	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
160	struct net_device *dev = pr->port->netdev;
161	int max_index_mask = pr->rq1_skba.len - 1;
162	int i;
163
164	if (!nr_of_wqes)
165		return;
166
167	for (i = 0; i < nr_of_wqes; i++) {
168		if (!skb_arr_rq1[index]) {
169			skb_arr_rq1[index] = netdev_alloc_skb(dev,
170							      EHEA_L_PKT_SIZE);
171			if (!skb_arr_rq1[index]) {
172				ehea_error("%s: no mem for skb/%d wqes filled",
173					   dev->name, i);
174				break;
175			}
176		}
177		index--;
178		index &= max_index_mask;
179	}
180	/* Ring doorbell */
181	ehea_update_rq1a(pr->qp, i);
182}
183
184static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
185{
186	int ret = 0;
187	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
188	struct net_device *dev = pr->port->netdev;
189	int i;
190
191	for (i = 0; i < pr->rq1_skba.len; i++) {
192		skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
193		if (!skb_arr_rq1[i]) {
194			ehea_error("%s: no mem for skb/%d wqes filled",
195				   dev->name, i);
196			ret = -ENOMEM;
197			goto out;
198		}
199	}
200	/* Ring doorbell */
201	ehea_update_rq1a(pr->qp, nr_rq1a);
202out:
203	return ret;
204}
205
206static int ehea_refill_rq_def(struct ehea_port_res *pr,
207			      struct ehea_q_skb_arr *q_skba, int rq_nr,
208			      int num_wqes, int wqe_type, int packet_size)
209{
210	struct net_device *dev = pr->port->netdev;
211	struct ehea_qp *qp = pr->qp;
212	struct sk_buff **skb_arr = q_skba->arr;
213	struct ehea_rwqe *rwqe;
214	int i, index, max_index_mask, fill_wqes;
215	int ret = 0;
216
217	fill_wqes = q_skba->os_skbs + num_wqes;
218
219	if (!fill_wqes)
220		return ret;
221
222	index = q_skba->index;
223	max_index_mask = q_skba->len - 1;
224	for (i = 0; i < fill_wqes; i++) {
225		struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
226		if (!skb) {
227			ehea_error("%s: no mem for skb/%d wqes filled",
228				   pr->port->netdev->name, i);
229			q_skba->os_skbs = fill_wqes - i;
230			ret = -ENOMEM;
231			break;
232		}
233		skb_reserve(skb, NET_IP_ALIGN);
234
235		skb_arr[index] = skb;
236
237		rwqe = ehea_get_next_rwqe(qp, rq_nr);
238		rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
239		            | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
240		rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
241		rwqe->sg_list[0].vaddr = (u64)skb->data;
242		rwqe->sg_list[0].len = packet_size;
243		rwqe->data_segments = 1;
244
245		index++;
246		index &= max_index_mask;
247	}
248	q_skba->index = index;
249
250	/* Ring doorbell */
251	iosync();
252	if (rq_nr == 2)
253		ehea_update_rq2a(pr->qp, i);
254	else
255		ehea_update_rq3a(pr->qp, i);
256
257	return ret;
258}
259
260
261static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
262{
263	return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
264				  nr_of_wqes, EHEA_RWQE2_TYPE,
265				  EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
266}
267
268
269static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
270{
271	return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
272				  nr_of_wqes, EHEA_RWQE3_TYPE,
273				  EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
274}
275
276static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
277{
278	*rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
279	if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
280		return 0;
281	if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
282	    (cqe->header_length == 0))
283		return 0;
284	return -EINVAL;
285}
286
287static inline void ehea_fill_skb(struct net_device *dev,
288				 struct sk_buff *skb, struct ehea_cqe *cqe)
289{
290	int length = cqe->num_bytes_transfered - 4;	/*remove CRC */
291
292	skb_put(skb, length);
293	skb->ip_summed = CHECKSUM_UNNECESSARY;
294	skb->protocol = eth_type_trans(skb, dev);
295}
296
297static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
298					       int arr_len,
299					       struct ehea_cqe *cqe)
300{
301	int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
302	struct sk_buff *skb;
303	void *pref;
304	int x;
305
306	x = skb_index + 1;
307	x &= (arr_len - 1);
308
309	pref = skb_array[x];
310	prefetchw(pref);
311	prefetchw(pref + EHEA_CACHE_LINE);
312
313	pref = (skb_array[x]->data);
314	prefetch(pref);
315	prefetch(pref + EHEA_CACHE_LINE);
316	prefetch(pref + EHEA_CACHE_LINE * 2);
317	prefetch(pref + EHEA_CACHE_LINE * 3);
318	skb = skb_array[skb_index];
319	skb_array[skb_index] = NULL;
320	return skb;
321}
322
323static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
324						  int arr_len, int wqe_index)
325{
326	struct sk_buff *skb;
327	void *pref;
328	int x;
329
330	x = wqe_index + 1;
331	x &= (arr_len - 1);
332
333	pref = skb_array[x];
334	prefetchw(pref);
335	prefetchw(pref + EHEA_CACHE_LINE);
336
337	pref = (skb_array[x]->data);
338	prefetchw(pref);
339	prefetchw(pref + EHEA_CACHE_LINE);
340
341	skb = skb_array[wqe_index];
342	skb_array[wqe_index] = NULL;
343	return skb;
344}
345
346static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
347				 struct ehea_cqe *cqe, int *processed_rq2,
348				 int *processed_rq3)
349{
350	struct sk_buff *skb;
351
352	if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
353		pr->p_stats.err_tcp_cksum++;
354	if (cqe->status & EHEA_CQE_STAT_ERR_IP)
355		pr->p_stats.err_ip_cksum++;
356	if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
357		pr->p_stats.err_frame_crc++;
358
359	if (netif_msg_rx_err(pr->port)) {
360		ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
361		ehea_dump(cqe, sizeof(*cqe), "CQE");
362	}
363
364	if (rq == 2) {
365		*processed_rq2 += 1;
366		skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
367		dev_kfree_skb(skb);
368	} else if (rq == 3) {
369		*processed_rq3 += 1;
370		skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
371		dev_kfree_skb(skb);
372	}
373
374	if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
375		ehea_error("Critical receive error. Resetting port.");
376		queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task);
377		return 1;
378	}
379
380	return 0;
381}
382
383static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
384					struct ehea_port_res *pr,
385					int *budget)
386{
387	struct ehea_port *port = pr->port;
388	struct ehea_qp *qp = pr->qp;
389	struct ehea_cqe *cqe;
390	struct sk_buff *skb;
391	struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
392	struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
393	struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
394	int skb_arr_rq1_len = pr->rq1_skba.len;
395	int skb_arr_rq2_len = pr->rq2_skba.len;
396	int skb_arr_rq3_len = pr->rq3_skba.len;
397	int processed, processed_rq1, processed_rq2, processed_rq3;
398	int wqe_index, last_wqe_index, rq, my_quota, port_reset;
399
400	processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
401	last_wqe_index = 0;
402	my_quota = min(*budget, dev->quota);
403
404	cqe = ehea_poll_rq1(qp, &wqe_index);
405	while ((my_quota > 0) && cqe) {
406		ehea_inc_rq1(qp);
407		processed_rq1++;
408		processed++;
409		my_quota--;
410		if (netif_msg_rx_status(port))
411			ehea_dump(cqe, sizeof(*cqe), "CQE");
412
413		last_wqe_index = wqe_index;
414		rmb();
415		if (!ehea_check_cqe(cqe, &rq)) {
416			if (rq == 1) {	/* LL RQ1 */
417				skb = get_skb_by_index_ll(skb_arr_rq1,
418							  skb_arr_rq1_len,
419							  wqe_index);
420				if (unlikely(!skb)) {
421					if (netif_msg_rx_err(port))
422						ehea_error("LL rq1: skb=NULL");
423
424					skb = netdev_alloc_skb(port->netdev,
425							       EHEA_L_PKT_SIZE);
426					if (!skb)
427						break;
428				}
429				skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
430					       cqe->num_bytes_transfered - 4);
431				ehea_fill_skb(port->netdev, skb, cqe);
432			} else if (rq == 2) {  /* RQ2 */
433				skb = get_skb_by_index(skb_arr_rq2,
434						       skb_arr_rq2_len, cqe);
435				if (unlikely(!skb)) {
436					if (netif_msg_rx_err(port))
437						ehea_error("rq2: skb=NULL");
438					break;
439				}
440				ehea_fill_skb(port->netdev, skb, cqe);
441				processed_rq2++;
442			} else {  /* RQ3 */
443				skb = get_skb_by_index(skb_arr_rq3,
444						       skb_arr_rq3_len, cqe);
445				if (unlikely(!skb)) {
446					if (netif_msg_rx_err(port))
447						ehea_error("rq3: skb=NULL");
448					break;
449				}
450				ehea_fill_skb(port->netdev, skb, cqe);
451				processed_rq3++;
452			}
453
454			if ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
455			    && port->vgrp)
456				vlan_hwaccel_receive_skb(skb, port->vgrp,
457							 cqe->vlan_tag);
458			else
459				netif_receive_skb(skb);
460		} else {
461			pr->p_stats.poll_receive_errors++;
462			port_reset = ehea_treat_poll_error(pr, rq, cqe,
463							   &processed_rq2,
464							   &processed_rq3);
465			if (port_reset)
466				break;
467		}
468		cqe = ehea_poll_rq1(qp, &wqe_index);
469	}
470
471	pr->rx_packets += processed;
472	*budget -= processed;
473
474	ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
475	ehea_refill_rq2(pr, processed_rq2);
476	ehea_refill_rq3(pr, processed_rq3);
477
478	cqe = ehea_poll_rq1(qp, &wqe_index);
479	return cqe;
480}
481
482static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
483{
484	struct sk_buff *skb;
485	struct ehea_cq *send_cq = pr->send_cq;
486	struct ehea_cqe *cqe;
487	int quota = my_quota;
488	int cqe_counter = 0;
489	int swqe_av = 0;
490	int index;
491	unsigned long flags;
492
493	cqe = ehea_poll_cq(send_cq);
494	while(cqe && (quota > 0)) {
495		ehea_inc_cq(send_cq);
496
497		cqe_counter++;
498		rmb();
499		if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
500			ehea_error("Send Completion Error: Resetting port");
501			if (netif_msg_tx_err(pr->port))
502				ehea_dump(cqe, sizeof(*cqe), "Send CQE");
503			queue_work(pr->port->adapter->ehea_wq,
504				   &pr->port->reset_task);
505			break;
506		}
507
508		if (netif_msg_tx_done(pr->port))
509			ehea_dump(cqe, sizeof(*cqe), "CQE");
510
511		if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
512			   == EHEA_SWQE2_TYPE)) {
513
514			index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
515			skb = pr->sq_skba.arr[index];
516			dev_kfree_skb(skb);
517			pr->sq_skba.arr[index] = NULL;
518		}
519
520		swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
521		quota--;
522
523		cqe = ehea_poll_cq(send_cq);
524	};
525
526	ehea_update_feca(send_cq, cqe_counter);
527	atomic_add(swqe_av, &pr->swqe_avail);
528
529	spin_lock_irqsave(&pr->netif_queue, flags);
530
531	if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
532				  >= pr->swqe_refill_th)) {
533		netif_wake_queue(pr->port->netdev);
534		pr->queue_stopped = 0;
535	}
536	spin_unlock_irqrestore(&pr->netif_queue, flags);
537
538	return cqe;
539}
540
541#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
542
543static int ehea_poll(struct net_device *dev, int *budget)
544{
545	struct ehea_port_res *pr = dev->priv;
546	struct ehea_cqe *cqe;
547	struct ehea_cqe *cqe_skb = NULL;
548	int force_irq, wqe_index;
549
550	cqe = ehea_poll_rq1(pr->qp, &wqe_index);
551	cqe_skb = ehea_poll_cq(pr->send_cq);
552
553	force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
554
555	if ((!cqe && !cqe_skb) || force_irq) {
556		pr->poll_counter = 0;
557		netif_rx_complete(dev);
558		ehea_reset_cq_ep(pr->recv_cq);
559		ehea_reset_cq_ep(pr->send_cq);
560		ehea_reset_cq_n1(pr->recv_cq);
561		ehea_reset_cq_n1(pr->send_cq);
562		cqe = ehea_poll_rq1(pr->qp, &wqe_index);
563		cqe_skb = ehea_poll_cq(pr->send_cq);
564
565		if (!cqe && !cqe_skb)
566			return 0;
567
568		if (!netif_rx_reschedule(dev, dev->quota))
569			return 0;
570	}
571
572	cqe = ehea_proc_rwqes(dev, pr, budget);
573	cqe_skb = ehea_proc_cqes(pr, 300);
574
575	if (cqe || cqe_skb)
576		pr->poll_counter++;
577
578	return 1;
579}
580
581static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
582{
583	struct ehea_port_res *pr = param;
584
585	netif_rx_schedule(pr->d_netdev);
586
587	return IRQ_HANDLED;
588}
589
590static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
591{
592	struct ehea_port *port = param;
593	struct ehea_eqe *eqe;
594	struct ehea_qp *qp;
595	u32 qp_token;
596
597	eqe = ehea_poll_eq(port->qp_eq);
598
599	while (eqe) {
600		qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
601		ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
602			   eqe->entry, qp_token);
603
604		qp = port->port_res[qp_token].qp;
605		ehea_error_data(port->adapter, qp->fw_handle);
606		eqe = ehea_poll_eq(port->qp_eq);
607	}
608
609	queue_work(port->adapter->ehea_wq, &port->reset_task);
610
611	return IRQ_HANDLED;
612}
613
614static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
615				       int logical_port)
616{
617	int i;
618
619	for (i = 0; i < EHEA_MAX_PORTS; i++)
620		if (adapter->port[i])
621	                if (adapter->port[i]->logical_port_id == logical_port)
622				return adapter->port[i];
623	return NULL;
624}
625
626int ehea_sense_port_attr(struct ehea_port *port)
627{
628	int ret;
629	u64 hret;
630	struct hcp_ehea_port_cb0 *cb0;
631
632	cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);   /* May be called via */
633	if (!cb0) {                             /* ehea_neq_tasklet() */
634		ehea_error("no mem for cb0");
635		ret = -ENOMEM;
636		goto out;
637	}
638
639	hret = ehea_h_query_ehea_port(port->adapter->handle,
640				      port->logical_port_id, H_PORT_CB0,
641				      EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
642				      cb0);
643	if (hret != H_SUCCESS) {
644		ret = -EIO;
645		goto out_free;
646	}
647
648	/* MAC address */
649	port->mac_addr = cb0->port_mac_addr << 16;
650
651	if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
652		ret = -EADDRNOTAVAIL;
653		goto out_free;
654	}
655
656	/* Port speed */
657	switch (cb0->port_speed) {
658	case H_SPEED_10M_H:
659		port->port_speed = EHEA_SPEED_10M;
660		port->full_duplex = 0;
661		break;
662	case H_SPEED_10M_F:
663		port->port_speed = EHEA_SPEED_10M;
664		port->full_duplex = 1;
665		break;
666	case H_SPEED_100M_H:
667		port->port_speed = EHEA_SPEED_100M;
668		port->full_duplex = 0;
669		break;
670	case H_SPEED_100M_F:
671		port->port_speed = EHEA_SPEED_100M;
672		port->full_duplex = 1;
673		break;
674	case H_SPEED_1G_F:
675		port->port_speed = EHEA_SPEED_1G;
676		port->full_duplex = 1;
677		break;
678	case H_SPEED_10G_F:
679		port->port_speed = EHEA_SPEED_10G;
680		port->full_duplex = 1;
681		break;
682	default:
683		port->port_speed = 0;
684		port->full_duplex = 0;
685		break;
686	}
687
688	port->autoneg = 1;
689	port->num_mcs = cb0->num_default_qps;
690
691	/* Number of default QPs */
692	if (use_mcs)
693		port->num_def_qps = cb0->num_default_qps;
694	else
695		port->num_def_qps = 1;
696
697	if (!port->num_def_qps) {
698		ret = -EINVAL;
699		goto out_free;
700	}
701
702	port->num_tx_qps = num_tx_qps;
703
704	if (port->num_def_qps >= port->num_tx_qps)
705		port->num_add_tx_qps = 0;
706	else
707		port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
708
709	ret = 0;
710out_free:
711	if (ret || netif_msg_probe(port))
712		ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
713	kfree(cb0);
714out:
715	return ret;
716}
717
718int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
719{
720	struct hcp_ehea_port_cb4 *cb4;
721	u64 hret;
722	int ret = 0;
723
724	cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
725	if (!cb4) {
726		ehea_error("no mem for cb4");
727		ret = -ENOMEM;
728		goto out;
729	}
730
731	cb4->port_speed = port_speed;
732
733	netif_carrier_off(port->netdev);
734
735	hret = ehea_h_modify_ehea_port(port->adapter->handle,
736				       port->logical_port_id,
737				       H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
738	if (hret == H_SUCCESS) {
739		port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
740
741		hret = ehea_h_query_ehea_port(port->adapter->handle,
742					      port->logical_port_id,
743					      H_PORT_CB4, H_PORT_CB4_SPEED,
744					      cb4);
745		if (hret == H_SUCCESS) {
746			switch (cb4->port_speed) {
747			case H_SPEED_10M_H:
748				port->port_speed = EHEA_SPEED_10M;
749				port->full_duplex = 0;
750				break;
751			case H_SPEED_10M_F:
752				port->port_speed = EHEA_SPEED_10M;
753				port->full_duplex = 1;
754				break;
755			case H_SPEED_100M_H:
756				port->port_speed = EHEA_SPEED_100M;
757				port->full_duplex = 0;
758				break;
759			case H_SPEED_100M_F:
760				port->port_speed = EHEA_SPEED_100M;
761				port->full_duplex = 1;
762				break;
763			case H_SPEED_1G_F:
764				port->port_speed = EHEA_SPEED_1G;
765				port->full_duplex = 1;
766				break;
767			case H_SPEED_10G_F:
768				port->port_speed = EHEA_SPEED_10G;
769				port->full_duplex = 1;
770				break;
771			default:
772				port->port_speed = 0;
773				port->full_duplex = 0;
774				break;
775			}
776		} else {
777			ehea_error("Failed sensing port speed");
778			ret = -EIO;
779		}
780	} else {
781		if (hret == H_AUTHORITY) {
782			ehea_info("Hypervisor denied setting port speed");
783			ret = -EPERM;
784		} else {
785			ret = -EIO;
786			ehea_error("Failed setting port speed");
787		}
788	}
789	netif_carrier_on(port->netdev);
790	kfree(cb4);
791out:
792	return ret;
793}
794
795static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
796{
797	int ret;
798	u8 ec;
799	u8 portnum;
800	struct ehea_port *port;
801
802	ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
803	portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
804	port = ehea_get_port(adapter, portnum);
805
806	switch (ec) {
807	case EHEA_EC_PORTSTATE_CHG:	/* port state change */
808
809		if (!port) {
810			ehea_error("unknown portnum %x", portnum);
811			break;
812		}
813
814		if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
815			if (!netif_carrier_ok(port->netdev)) {
816				ret = ehea_sense_port_attr(port);
817				if (ret) {
818					ehea_error("failed resensing port "
819						   "attributes");
820					break;
821				}
822
823				if (netif_msg_link(port))
824					ehea_info("%s: Logical port up: %dMbps "
825						  "%s Duplex",
826						  port->netdev->name,
827						  port->port_speed,
828						  port->full_duplex ==
829						  1 ? "Full" : "Half");
830
831				netif_carrier_on(port->netdev);
832				netif_wake_queue(port->netdev);
833			}
834		} else
835			if (netif_carrier_ok(port->netdev)) {
836				if (netif_msg_link(port))
837					ehea_info("%s: Logical port down",
838						  port->netdev->name);
839				netif_carrier_off(port->netdev);
840				netif_stop_queue(port->netdev);
841			}
842
843		if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
844			if (netif_msg_link(port))
845				ehea_info("%s: Physical port up",
846					  port->netdev->name);
847		} else {
848			if (netif_msg_link(port))
849				ehea_info("%s: Physical port down",
850					  port->netdev->name);
851		}
852
853		if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
854			ehea_info("External switch port is primary port");
855		else
856			ehea_info("External switch port is backup port");
857
858		break;
859	case EHEA_EC_ADAPTER_MALFUNC:
860		ehea_error("Adapter malfunction");
861		break;
862	case EHEA_EC_PORT_MALFUNC:
863		ehea_info("Port malfunction: Device: %s", port->netdev->name);
864		netif_carrier_off(port->netdev);
865		netif_stop_queue(port->netdev);
866		break;
867	default:
868		ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
869		break;
870	}
871}
872
873static void ehea_neq_tasklet(unsigned long data)
874{
875	struct ehea_adapter *adapter = (struct ehea_adapter*)data;
876	struct ehea_eqe *eqe;
877	u64 event_mask;
878
879	eqe = ehea_poll_eq(adapter->neq);
880	ehea_debug("eqe=%p", eqe);
881
882	while (eqe) {
883		ehea_debug("*eqe=%lx", eqe->entry);
884		ehea_parse_eqe(adapter, eqe->entry);
885		eqe = ehea_poll_eq(adapter->neq);
886		ehea_debug("next eqe=%p", eqe);
887	}
888
889	event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
890		   | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
891		   | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
892
893	ehea_h_reset_events(adapter->handle,
894			    adapter->neq->fw_handle, event_mask);
895}
896
897static irqreturn_t ehea_interrupt_neq(int irq, void *param)
898{
899	struct ehea_adapter *adapter = param;
900	tasklet_hi_schedule(&adapter->neq_tasklet);
901	return IRQ_HANDLED;
902}
903
904
905static int ehea_fill_port_res(struct ehea_port_res *pr)
906{
907	int ret;
908	struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
909
910	ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
911				     - init_attr->act_nr_rwqes_rq2
912				     - init_attr->act_nr_rwqes_rq3 - 1);
913
914	ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
915
916	ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
917
918	return ret;
919}
920
921static int ehea_reg_interrupts(struct net_device *dev)
922{
923	struct ehea_port *port = netdev_priv(dev);
924	struct ehea_port_res *pr;
925	int i, ret;
926
927
928	snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
929		 dev->name);
930
931	ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1,
932				  ehea_qp_aff_irq_handler,
933				  IRQF_DISABLED, port->int_aff_name, port);
934	if (ret) {
935		ehea_error("failed registering irq for qp_aff_irq_handler:"
936			   "ist=%X", port->qp_eq->attr.ist1);
937		goto out_free_qpeq;
938	}
939
940	if (netif_msg_ifup(port))
941		ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
942			  "registered", port->qp_eq->attr.ist1);
943
944
945	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
946		pr = &port->port_res[i];
947		snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
948			 "%s-queue%d", dev->name, i);
949		ret = ibmebus_request_irq(NULL, pr->eq->attr.ist1,
950					  ehea_recv_irq_handler,
951					  IRQF_DISABLED, pr->int_send_name,
952					  pr);
953		if (ret) {
954			ehea_error("failed registering irq for ehea_queue "
955				   "port_res_nr:%d, ist=%X", i,
956				   pr->eq->attr.ist1);
957			goto out_free_req;
958		}
959		if (netif_msg_ifup(port))
960			ehea_info("irq_handle 0x%X for function ehea_queue_int "
961				  "%d registered", pr->eq->attr.ist1, i);
962	}
963out:
964	return ret;
965
966
967out_free_req:
968	while (--i >= 0) {
969		u32 ist = port->port_res[i].eq->attr.ist1;
970		ibmebus_free_irq(NULL, ist, &port->port_res[i]);
971	}
972
973out_free_qpeq:
974	ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
975	i = port->num_def_qps;
976
977	goto out;
978
979}
980
981static void ehea_free_interrupts(struct net_device *dev)
982{
983	struct ehea_port *port = netdev_priv(dev);
984	struct ehea_port_res *pr;
985	int i;
986
987	/* send */
988
989	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
990		pr = &port->port_res[i];
991		ibmebus_free_irq(NULL, pr->eq->attr.ist1, pr);
992		if (netif_msg_intr(port))
993			ehea_info("free send irq for res %d with handle 0x%X",
994				  i, pr->eq->attr.ist1);
995	}
996
997	/* associated events */
998	ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
999	if (netif_msg_intr(port))
1000		ehea_info("associated event interrupt for handle 0x%X freed",
1001			  port->qp_eq->attr.ist1);
1002}
1003
1004static int ehea_configure_port(struct ehea_port *port)
1005{
1006	int ret, i;
1007	u64 hret, mask;
1008	struct hcp_ehea_port_cb0 *cb0;
1009
1010	ret = -ENOMEM;
1011	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1012	if (!cb0)
1013		goto out;
1014
1015	cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1016		     | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1017		     | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1018		     | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1019		     | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1020				      PXLY_RC_VLAN_FILTER)
1021		     | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1022
1023	for (i = 0; i < port->num_mcs; i++)
1024		if (use_mcs)
1025			cb0->default_qpn_arr[i] =
1026				port->port_res[i].qp->init_attr.qp_nr;
1027		else
1028			cb0->default_qpn_arr[i] =
1029				port->port_res[0].qp->init_attr.qp_nr;
1030
1031	if (netif_msg_ifup(port))
1032		ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1033
1034	mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1035	     | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1036
1037	hret = ehea_h_modify_ehea_port(port->adapter->handle,
1038				       port->logical_port_id,
1039				       H_PORT_CB0, mask, cb0);
1040	ret = -EIO;
1041	if (hret != H_SUCCESS)
1042		goto out_free;
1043
1044	ret = 0;
1045
1046out_free:
1047	kfree(cb0);
1048out:
1049	return ret;
1050}
1051
1052int ehea_gen_smrs(struct ehea_port_res *pr)
1053{
1054	int ret;
1055	struct ehea_adapter *adapter = pr->port->adapter;
1056
1057	ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1058	if (ret)
1059		goto out;
1060
1061	ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1062	if (ret)
1063		goto out_free;
1064
1065	return 0;
1066
1067out_free:
1068	ehea_rem_mr(&pr->send_mr);
1069out:
1070	ehea_error("Generating SMRS failed\n");
1071	return -EIO;
1072}
1073
1074int ehea_rem_smrs(struct ehea_port_res *pr)
1075{
1076	if ((ehea_rem_mr(&pr->send_mr))
1077	    || (ehea_rem_mr(&pr->recv_mr)))
1078		return -EIO;
1079	else
1080		return 0;
1081}
1082
1083static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1084{
1085	int arr_size = sizeof(void*) * max_q_entries;
1086
1087	q_skba->arr = vmalloc(arr_size);
1088	if (!q_skba->arr)
1089		return -ENOMEM;
1090
1091	memset(q_skba->arr, 0, arr_size);
1092
1093	q_skba->len = max_q_entries;
1094	q_skba->index = 0;
1095	q_skba->os_skbs = 0;
1096
1097	return 0;
1098}
1099
1100static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1101			      struct port_res_cfg *pr_cfg, int queue_token)
1102{
1103	struct ehea_adapter *adapter = port->adapter;
1104	enum ehea_eq_type eq_type = EHEA_EQ;
1105	struct ehea_qp_init_attr *init_attr = NULL;
1106	int ret = -EIO;
1107
1108	memset(pr, 0, sizeof(struct ehea_port_res));
1109
1110	pr->port = port;
1111	spin_lock_init(&pr->xmit_lock);
1112	spin_lock_init(&pr->netif_queue);
1113
1114	pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1115	if (!pr->eq) {
1116		ehea_error("create_eq failed (eq)");
1117		goto out_free;
1118	}
1119
1120	pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1121				     pr->eq->fw_handle,
1122				     port->logical_port_id);
1123	if (!pr->recv_cq) {
1124		ehea_error("create_cq failed (cq_recv)");
1125		goto out_free;
1126	}
1127
1128	pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1129				     pr->eq->fw_handle,
1130				     port->logical_port_id);
1131	if (!pr->send_cq) {
1132		ehea_error("create_cq failed (cq_send)");
1133		goto out_free;
1134	}
1135
1136	if (netif_msg_ifup(port))
1137		ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1138			  pr->send_cq->attr.act_nr_of_cqes,
1139			  pr->recv_cq->attr.act_nr_of_cqes);
1140
1141	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1142	if (!init_attr) {
1143		ret = -ENOMEM;
1144		ehea_error("no mem for ehea_qp_init_attr");
1145		goto out_free;
1146	}
1147
1148	init_attr->low_lat_rq1 = 1;
1149	init_attr->signalingtype = 1;	/* generate CQE if specified in WQE */
1150	init_attr->rq_count = 3;
1151	init_attr->qp_token = queue_token;
1152	init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1153	init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1154	init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1155	init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1156	init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1157	init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1158	init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1159	init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1160	init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1161	init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1162	init_attr->port_nr = port->logical_port_id;
1163	init_attr->send_cq_handle = pr->send_cq->fw_handle;
1164	init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1165	init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1166
1167	pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1168	if (!pr->qp) {
1169		ehea_error("create_qp failed");
1170		ret = -EIO;
1171		goto out_free;
1172	}
1173
1174	if (netif_msg_ifup(port))
1175		ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1176			  "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1177			  init_attr->act_nr_send_wqes,
1178			  init_attr->act_nr_rwqes_rq1,
1179			  init_attr->act_nr_rwqes_rq2,
1180			  init_attr->act_nr_rwqes_rq3);
1181
1182	ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
1183	ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1184	ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1185	ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1186	if (ret)
1187		goto out_free;
1188
1189	pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1190	if (ehea_gen_smrs(pr) != 0) {
1191		ret = -EIO;
1192		goto out_free;
1193	}
1194
1195	atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1196
1197	kfree(init_attr);
1198
1199	pr->d_netdev = alloc_netdev(0, "", ether_setup);
1200	if (!pr->d_netdev)
1201		goto out_free;
1202	pr->d_netdev->priv = pr;
1203	pr->d_netdev->weight = 64;
1204	pr->d_netdev->poll = ehea_poll;
1205	set_bit(__LINK_STATE_START, &pr->d_netdev->state);
1206	strcpy(pr->d_netdev->name, port->netdev->name);
1207
1208	ret = 0;
1209	goto out;
1210
1211out_free:
1212	kfree(init_attr);
1213	vfree(pr->sq_skba.arr);
1214	vfree(pr->rq1_skba.arr);
1215	vfree(pr->rq2_skba.arr);
1216	vfree(pr->rq3_skba.arr);
1217	ehea_destroy_qp(pr->qp);
1218	ehea_destroy_cq(pr->send_cq);
1219	ehea_destroy_cq(pr->recv_cq);
1220	ehea_destroy_eq(pr->eq);
1221out:
1222	return ret;
1223}
1224
1225static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1226{
1227	int ret, i;
1228
1229	free_netdev(pr->d_netdev);
1230
1231	ret = ehea_destroy_qp(pr->qp);
1232
1233	if (!ret) {
1234		ehea_destroy_cq(pr->send_cq);
1235		ehea_destroy_cq(pr->recv_cq);
1236		ehea_destroy_eq(pr->eq);
1237
1238		for (i = 0; i < pr->rq1_skba.len; i++)
1239			if (pr->rq1_skba.arr[i])
1240				dev_kfree_skb(pr->rq1_skba.arr[i]);
1241
1242		for (i = 0; i < pr->rq2_skba.len; i++)
1243			if (pr->rq2_skba.arr[i])
1244				dev_kfree_skb(pr->rq2_skba.arr[i]);
1245
1246		for (i = 0; i < pr->rq3_skba.len; i++)
1247			if (pr->rq3_skba.arr[i])
1248				dev_kfree_skb(pr->rq3_skba.arr[i]);
1249
1250		for (i = 0; i < pr->sq_skba.len; i++)
1251			if (pr->sq_skba.arr[i])
1252				dev_kfree_skb(pr->sq_skba.arr[i]);
1253
1254		vfree(pr->rq1_skba.arr);
1255		vfree(pr->rq2_skba.arr);
1256		vfree(pr->rq3_skba.arr);
1257		vfree(pr->sq_skba.arr);
1258		ret = ehea_rem_smrs(pr);
1259	}
1260	return ret;
1261}
1262
1263/*
1264 * The write_* functions store information in swqe which is used by
1265 * the hardware to calculate the ip/tcp/udp checksum
1266 */
1267
1268static inline void write_ip_start_end(struct ehea_swqe *swqe,
1269				      const struct sk_buff *skb)
1270{
1271	swqe->ip_start = skb_network_offset(skb);
1272	swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1273}
1274
1275static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1276					const struct sk_buff *skb)
1277{
1278	swqe->tcp_offset =
1279		(u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1280
1281	swqe->tcp_end = (u16)skb->len - 1;
1282}
1283
1284static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1285					const struct sk_buff *skb)
1286{
1287	swqe->tcp_offset =
1288		(u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1289
1290	swqe->tcp_end = (u16)skb->len - 1;
1291}
1292
1293
1294static void write_swqe2_TSO(struct sk_buff *skb,
1295			    struct ehea_swqe *swqe, u32 lkey)
1296{
1297	struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1298	u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1299	int skb_data_size = skb->len - skb->data_len;
1300	int headersize;
1301	u64 tmp_addr;
1302
1303	/* Packet is TCP with TSO enabled */
1304	swqe->tx_control |= EHEA_SWQE_TSO;
1305	swqe->mss = skb_shinfo(skb)->gso_size;
1306	/* copy only eth/ip/tcp headers to immediate data and
1307	 * the rest of skb->data to sg1entry
1308	 */
1309	headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1310
1311	skb_data_size = skb->len - skb->data_len;
1312
1313	if (skb_data_size >= headersize) {
1314		/* copy immediate data */
1315		skb_copy_from_linear_data(skb, imm_data, headersize);
1316		swqe->immediate_data_length = headersize;
1317
1318		if (skb_data_size > headersize) {
1319			/* set sg1entry data */
1320			sg1entry->l_key = lkey;
1321			sg1entry->len = skb_data_size - headersize;
1322
1323			tmp_addr = (u64)(skb->data + headersize);
1324			sg1entry->vaddr = tmp_addr;
1325			swqe->descriptors++;
1326		}
1327	} else
1328		ehea_error("cannot handle fragmented headers");
1329}
1330
1331static void write_swqe2_nonTSO(struct sk_buff *skb,
1332			       struct ehea_swqe *swqe, u32 lkey)
1333{
1334	int skb_data_size = skb->len - skb->data_len;
1335	u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1336	struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1337	u64 tmp_addr;
1338
1339	/* Packet is any nonTSO type
1340	 *
1341	 * Copy as much as possible skb->data to immediate data and
1342	 * the rest to sg1entry
1343	 */
1344	if (skb_data_size >= SWQE2_MAX_IMM) {
1345		/* copy immediate data */
1346		skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1347
1348		swqe->immediate_data_length = SWQE2_MAX_IMM;
1349
1350		if (skb_data_size > SWQE2_MAX_IMM) {
1351			/* copy sg1entry data */
1352			sg1entry->l_key = lkey;
1353			sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1354			tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
1355			sg1entry->vaddr = tmp_addr;
1356			swqe->descriptors++;
1357		}
1358	} else {
1359		skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1360		swqe->immediate_data_length = skb_data_size;
1361	}
1362}
1363
1364static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1365				    struct ehea_swqe *swqe, u32 lkey)
1366{
1367	struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1368	skb_frag_t *frag;
1369	int nfrags, sg1entry_contains_frag_data, i;
1370	u64 tmp_addr;
1371
1372	nfrags = skb_shinfo(skb)->nr_frags;
1373	sg1entry = &swqe->u.immdata_desc.sg_entry;
1374	sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
1375	swqe->descriptors = 0;
1376	sg1entry_contains_frag_data = 0;
1377
1378	if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1379		write_swqe2_TSO(skb, swqe, lkey);
1380	else
1381		write_swqe2_nonTSO(skb, swqe, lkey);
1382
1383	/* write descriptors */
1384	if (nfrags > 0) {
1385		if (swqe->descriptors == 0) {
1386			/* sg1entry not yet used */
1387			frag = &skb_shinfo(skb)->frags[0];
1388
1389			/* copy sg1entry data */
1390			sg1entry->l_key = lkey;
1391			sg1entry->len = frag->size;
1392			tmp_addr =  (u64)(page_address(frag->page)
1393					  + frag->page_offset);
1394			sg1entry->vaddr = tmp_addr;
1395			swqe->descriptors++;
1396			sg1entry_contains_frag_data = 1;
1397		}
1398
1399		for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1400
1401			frag = &skb_shinfo(skb)->frags[i];
1402			sgentry = &sg_list[i - sg1entry_contains_frag_data];
1403
1404			sgentry->l_key = lkey;
1405			sgentry->len = frag->size;
1406
1407			tmp_addr = (u64)(page_address(frag->page)
1408					 + frag->page_offset);
1409			sgentry->vaddr = tmp_addr;
1410			swqe->descriptors++;
1411		}
1412	}
1413}
1414
1415static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1416{
1417	int ret = 0;
1418	u64 hret;
1419	u8 reg_type;
1420
1421	/* De/Register untagged packets */
1422	reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1423	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1424				     port->logical_port_id,
1425				     reg_type, port->mac_addr, 0, hcallid);
1426	if (hret != H_SUCCESS) {
1427		ehea_error("reg_dereg_bcmc failed (tagged)");
1428		ret = -EIO;
1429		goto out_herr;
1430	}
1431
1432	/* De/Register VLAN packets */
1433	reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1434	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1435				     port->logical_port_id,
1436				     reg_type, port->mac_addr, 0, hcallid);
1437	if (hret != H_SUCCESS) {
1438		ehea_error("reg_dereg_bcmc failed (vlan)");
1439		ret = -EIO;
1440	}
1441out_herr:
1442	return ret;
1443}
1444
1445static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1446{
1447	struct ehea_port *port = netdev_priv(dev);
1448	struct sockaddr *mac_addr = sa;
1449	struct hcp_ehea_port_cb0 *cb0;
1450	int ret;
1451	u64 hret;
1452
1453	if (!is_valid_ether_addr(mac_addr->sa_data)) {
1454		ret = -EADDRNOTAVAIL;
1455		goto out;
1456	}
1457
1458	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1459	if (!cb0) {
1460		ehea_error("no mem for cb0");
1461		ret = -ENOMEM;
1462		goto out;
1463	}
1464
1465	memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1466
1467	cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1468
1469	hret = ehea_h_modify_ehea_port(port->adapter->handle,
1470				       port->logical_port_id, H_PORT_CB0,
1471				       EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1472	if (hret != H_SUCCESS) {
1473		ret = -EIO;
1474		goto out_free;
1475	}
1476
1477	memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1478
1479	/* Deregister old MAC in pHYP */
1480	ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1481	if (ret)
1482		goto out_free;
1483
1484	port->mac_addr = cb0->port_mac_addr << 16;
1485
1486	/* Register new MAC in pHYP */
1487	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1488	if (ret)
1489		goto out_free;
1490
1491	ret = 0;
1492out_free:
1493	kfree(cb0);
1494out:
1495	return ret;
1496}
1497
1498static void ehea_promiscuous_error(u64 hret, int enable)
1499{
1500	if (hret == H_AUTHORITY)
1501		ehea_info("Hypervisor denied %sabling promiscuous mode",
1502			  enable == 1 ? "en" : "dis");
1503	else
1504		ehea_error("failed %sabling promiscuous mode",
1505			   enable == 1 ? "en" : "dis");
1506}
1507
1508static void ehea_promiscuous(struct net_device *dev, int enable)
1509{
1510	struct ehea_port *port = netdev_priv(dev);
1511	struct hcp_ehea_port_cb7 *cb7;
1512	u64 hret;
1513
1514	if ((enable && port->promisc) || (!enable && !port->promisc))
1515		return;
1516
1517	cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1518	if (!cb7) {
1519		ehea_error("no mem for cb7");
1520		goto out;
1521	}
1522
1523	/* Modify Pxs_DUCQPN in CB7 */
1524	cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1525
1526	hret = ehea_h_modify_ehea_port(port->adapter->handle,
1527				       port->logical_port_id,
1528				       H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1529	if (hret) {
1530		ehea_promiscuous_error(hret, enable);
1531		goto out;
1532	}
1533
1534	port->promisc = enable;
1535out:
1536	kfree(cb7);
1537	return;
1538}
1539
1540static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1541				     u32 hcallid)
1542{
1543	u64 hret;
1544	u8 reg_type;
1545
1546	reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1547		 | EHEA_BCMC_UNTAGGED;
1548
1549	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1550				     port->logical_port_id,
1551				     reg_type, mc_mac_addr, 0, hcallid);
1552	if (hret)
1553		goto out;
1554
1555	reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1556		 | EHEA_BCMC_VLANID_ALL;
1557
1558	hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1559				     port->logical_port_id,
1560				     reg_type, mc_mac_addr, 0, hcallid);
1561out:
1562	return hret;
1563}
1564
1565static int ehea_drop_multicast_list(struct net_device *dev)
1566{
1567	struct ehea_port *port = netdev_priv(dev);
1568	struct ehea_mc_list *mc_entry = port->mc_list;
1569	struct list_head *pos;
1570	struct list_head *temp;
1571	int ret = 0;
1572	u64 hret;
1573
1574	list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1575		mc_entry = list_entry(pos, struct ehea_mc_list, list);
1576
1577		hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1578						 H_DEREG_BCMC);
1579		if (hret) {
1580			ehea_error("failed deregistering mcast MAC");
1581			ret = -EIO;
1582		}
1583
1584		list_del(pos);
1585		kfree(mc_entry);
1586	}
1587	return ret;
1588}
1589
1590static void ehea_allmulti(struct net_device *dev, int enable)
1591{
1592	struct ehea_port *port = netdev_priv(dev);
1593	u64 hret;
1594
1595	if (!port->allmulti) {
1596		if (enable) {
1597			/* Enable ALLMULTI */
1598			ehea_drop_multicast_list(dev);
1599			hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1600			if (!hret)
1601				port->allmulti = 1;
1602			else
1603				ehea_error("failed enabling IFF_ALLMULTI");
1604		}
1605	} else
1606		if (!enable) {
1607			/* Disable ALLMULTI */
1608			hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1609			if (!hret)
1610				port->allmulti = 0;
1611			else
1612				ehea_error("failed disabling IFF_ALLMULTI");
1613		}
1614}
1615
1616static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
1617{
1618	struct ehea_mc_list *ehea_mcl_entry;
1619	u64 hret;
1620
1621	ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1622	if (!ehea_mcl_entry) {
1623		ehea_error("no mem for mcl_entry");
1624		return;
1625	}
1626
1627	INIT_LIST_HEAD(&ehea_mcl_entry->list);
1628
1629	memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1630
1631	hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1632					 H_REG_BCMC);
1633	if (!hret)
1634		list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1635	else {
1636		ehea_error("failed registering mcast MAC");
1637		kfree(ehea_mcl_entry);
1638	}
1639}
1640
1641static void ehea_set_multicast_list(struct net_device *dev)
1642{
1643	struct ehea_port *port = netdev_priv(dev);
1644	struct dev_mc_list *k_mcl_entry;
1645	int ret, i;
1646
1647	if (dev->flags & IFF_PROMISC) {
1648		ehea_promiscuous(dev, 1);
1649		return;
1650	}
1651	ehea_promiscuous(dev, 0);
1652
1653	if (dev->flags & IFF_ALLMULTI) {
1654		ehea_allmulti(dev, 1);
1655		return;
1656	}
1657	ehea_allmulti(dev, 0);
1658
1659	if (dev->mc_count) {
1660		ret = ehea_drop_multicast_list(dev);
1661		if (ret) {
1662			/* Dropping the current multicast list failed.
1663			 * Enabling ALL_MULTI is the best we can do.
1664			 */
1665			ehea_allmulti(dev, 1);
1666		}
1667
1668		if (dev->mc_count > port->adapter->max_mc_mac) {
1669			ehea_info("Mcast registration limit reached (0x%lx). "
1670				  "Use ALLMULTI!",
1671				  port->adapter->max_mc_mac);
1672			goto out;
1673		}
1674
1675		for (i = 0, k_mcl_entry = dev->mc_list;
1676		     i < dev->mc_count;
1677		     i++, k_mcl_entry = k_mcl_entry->next) {
1678			ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1679		}
1680	}
1681out:
1682	return;
1683}
1684
1685static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1686{
1687	if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1688		return -EINVAL;
1689	dev->mtu = new_mtu;
1690	return 0;
1691}
1692
1693static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1694		       struct ehea_swqe *swqe, u32 lkey)
1695{
1696	if (skb->protocol == htons(ETH_P_IP)) {
1697		const struct iphdr *iph = ip_hdr(skb);
1698		/* IPv4 */
1699		swqe->tx_control |= EHEA_SWQE_CRC
1700				 | EHEA_SWQE_IP_CHECKSUM
1701				 | EHEA_SWQE_TCP_CHECKSUM
1702				 | EHEA_SWQE_IMM_DATA_PRESENT
1703				 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1704
1705		write_ip_start_end(swqe, skb);
1706
1707		if (iph->protocol == IPPROTO_UDP) {
1708			if ((iph->frag_off & IP_MF) ||
1709			    (iph->frag_off & IP_OFFSET))
1710				/* IP fragment, so don't change cs */
1711				swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
1712			else
1713				write_udp_offset_end(swqe, skb);
1714
1715		} else if (iph->protocol == IPPROTO_TCP) {
1716			write_tcp_offset_end(swqe, skb);
1717		}
1718
1719		/* icmp (big data) and ip segmentation packets (all other ip
1720		   packets) do not require any special handling */
1721
1722	} else {
1723		/* Other Ethernet Protocol */
1724		swqe->tx_control |= EHEA_SWQE_CRC
1725				 | EHEA_SWQE_IMM_DATA_PRESENT
1726				 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1727	}
1728
1729	write_swqe2_data(skb, dev, swqe, lkey);
1730}
1731
1732static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1733		       struct ehea_swqe *swqe)
1734{
1735	int nfrags = skb_shinfo(skb)->nr_frags;
1736	u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
1737	skb_frag_t *frag;
1738	int i;
1739
1740	if (skb->protocol == htons(ETH_P_IP)) {
1741		const struct iphdr *iph = ip_hdr(skb);
1742		/* IPv4 */
1743		write_ip_start_end(swqe, skb);
1744
1745		if (iph->protocol == IPPROTO_TCP) {
1746			swqe->tx_control |= EHEA_SWQE_CRC
1747					 | EHEA_SWQE_IP_CHECKSUM
1748					 | EHEA_SWQE_TCP_CHECKSUM
1749					 | EHEA_SWQE_IMM_DATA_PRESENT;
1750
1751			write_tcp_offset_end(swqe, skb);
1752
1753		} else if (iph->protocol == IPPROTO_UDP) {
1754			if ((iph->frag_off & IP_MF) ||
1755			    (iph->frag_off & IP_OFFSET))
1756				/* IP fragment, so don't change cs */
1757				swqe->tx_control |= EHEA_SWQE_CRC
1758						 | EHEA_SWQE_IMM_DATA_PRESENT;
1759			else {
1760				swqe->tx_control |= EHEA_SWQE_CRC
1761						 | EHEA_SWQE_IP_CHECKSUM
1762						 | EHEA_SWQE_TCP_CHECKSUM
1763						 | EHEA_SWQE_IMM_DATA_PRESENT;
1764
1765				write_udp_offset_end(swqe, skb);
1766			}
1767		} else {
1768			/* icmp (big data) and
1769			   ip segmentation packets (all other ip packets) */
1770			swqe->tx_control |= EHEA_SWQE_CRC
1771					 | EHEA_SWQE_IP_CHECKSUM
1772					 | EHEA_SWQE_IMM_DATA_PRESENT;
1773		}
1774	} else {
1775		/* Other Ethernet Protocol */
1776		swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
1777	}
1778	/* copy (immediate) data */
1779	if (nfrags == 0) {
1780		/* data is in a single piece */
1781		skb_copy_from_linear_data(skb, imm_data, skb->len);
1782	} else {
1783		/* first copy data from the skb->data buffer ... */
1784		skb_copy_from_linear_data(skb, imm_data,
1785					  skb->len - skb->data_len);
1786		imm_data += skb->len - skb->data_len;
1787
1788		/* ... then copy data from the fragments */
1789		for (i = 0; i < nfrags; i++) {
1790			frag = &skb_shinfo(skb)->frags[i];
1791			memcpy(imm_data,
1792			       page_address(frag->page) + frag->page_offset,
1793			       frag->size);
1794			imm_data += frag->size;
1795		}
1796	}
1797	swqe->immediate_data_length = skb->len;
1798	dev_kfree_skb(skb);
1799}
1800
1801static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
1802{
1803	struct tcphdr *tcp;
1804	u32 tmp;
1805
1806	if ((skb->protocol == htons(ETH_P_IP)) &&
1807	    (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
1808		tcp = (struct tcphdr*)(skb_network_header(skb) + (ip_hdr(skb)->ihl * 4));
1809		tmp = (tcp->source + (tcp->dest << 16)) % 31;
1810		tmp += ip_hdr(skb)->daddr % 31;
1811		return tmp % num_qps;
1812	}
1813	else
1814		return 0;
1815}
1816
1817static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1818{
1819	struct ehea_port *port = netdev_priv(dev);
1820	struct ehea_swqe *swqe;
1821	unsigned long flags;
1822	u32 lkey;
1823	int swqe_index;
1824	struct ehea_port_res *pr;
1825
1826	pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1827
1828	if (!spin_trylock(&pr->xmit_lock))
1829		return NETDEV_TX_BUSY;
1830
1831	if (pr->queue_stopped) {
1832		spin_unlock(&pr->xmit_lock);
1833		return NETDEV_TX_BUSY;
1834	}
1835
1836	swqe = ehea_get_swqe(pr->qp, &swqe_index);
1837	memset(swqe, 0, SWQE_HEADER_SIZE);
1838	atomic_dec(&pr->swqe_avail);
1839
1840	if (skb->len <= SWQE3_MAX_IMM) {
1841		u32 sig_iv = port->sig_comp_iv;
1842		u32 swqe_num = pr->swqe_id_counter;
1843		ehea_xmit3(skb, dev, swqe);
1844		swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
1845			| EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
1846		if (pr->swqe_ll_count >= (sig_iv - 1)) {
1847			swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1848						      sig_iv);
1849			swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1850			pr->swqe_ll_count = 0;
1851		} else
1852			pr->swqe_ll_count += 1;
1853	} else {
1854		swqe->wr_id =
1855			EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1856		      | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1857		      | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
1858		      | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1859		pr->sq_skba.arr[pr->sq_skba.index] = skb;
1860
1861		pr->sq_skba.index++;
1862		pr->sq_skba.index &= (pr->sq_skba.len - 1);
1863
1864		lkey = pr->send_mr.lkey;
1865		ehea_xmit2(skb, dev, swqe, lkey);
1866		swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1867	}
1868	pr->swqe_id_counter += 1;
1869
1870	if (port->vgrp && vlan_tx_tag_present(skb)) {
1871		swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
1872		swqe->vlan_tag = vlan_tx_tag_get(skb);
1873	}
1874
1875	if (netif_msg_tx_queued(port)) {
1876		ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
1877		ehea_dump(swqe, 512, "swqe");
1878	}
1879
1880	ehea_post_swqe(pr->qp, swqe);
1881	pr->tx_packets++;
1882
1883	if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1884		spin_lock_irqsave(&pr->netif_queue, flags);
1885		if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1886			pr->p_stats.queue_stopped++;
1887			netif_stop_queue(dev);
1888			pr->queue_stopped = 1;
1889		}
1890		spin_unlock_irqrestore(&pr->netif_queue, flags);
1891	}
1892	dev->trans_start = jiffies;
1893	spin_unlock(&pr->xmit_lock);
1894
1895	return NETDEV_TX_OK;
1896}
1897
1898static void ehea_vlan_rx_register(struct net_device *dev,
1899				  struct vlan_group *grp)
1900{
1901	struct ehea_port *port = netdev_priv(dev);
1902	struct ehea_adapter *adapter = port->adapter;
1903	struct hcp_ehea_port_cb1 *cb1;
1904	u64 hret;
1905
1906	port->vgrp = grp;
1907
1908	cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1909	if (!cb1) {
1910		ehea_error("no mem for cb1");
1911		goto out;
1912	}
1913
1914	memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
1915
1916	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1917				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1918	if (hret != H_SUCCESS)
1919		ehea_error("modify_ehea_port failed");
1920
1921	kfree(cb1);
1922out:
1923	return;
1924}
1925
1926static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1927{
1928	struct ehea_port *port = netdev_priv(dev);
1929	struct ehea_adapter *adapter = port->adapter;
1930	struct hcp_ehea_port_cb1 *cb1;
1931	int index;
1932	u64 hret;
1933
1934	cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1935	if (!cb1) {
1936		ehea_error("no mem for cb1");
1937		goto out;
1938	}
1939
1940	hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
1941				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1942	if (hret != H_SUCCESS) {
1943		ehea_error("query_ehea_port failed");
1944		goto out;
1945	}
1946
1947	index = (vid / 64);
1948	cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
1949
1950	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1951				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1952	if (hret != H_SUCCESS)
1953		ehea_error("modify_ehea_port failed");
1954out:
1955	kfree(cb1);
1956	return;
1957}
1958
1959static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1960{
1961	struct ehea_port *port = netdev_priv(dev);
1962	struct ehea_adapter *adapter = port->adapter;
1963	struct hcp_ehea_port_cb1 *cb1;
1964	int index;
1965	u64 hret;
1966
1967	vlan_group_set_device(port->vgrp, vid, NULL);
1968
1969	cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1970	if (!cb1) {
1971		ehea_error("no mem for cb1");
1972		goto out;
1973	}
1974
1975	hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
1976				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1977	if (hret != H_SUCCESS) {
1978		ehea_error("query_ehea_port failed");
1979		goto out;
1980	}
1981
1982	index = (vid / 64);
1983	cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
1984
1985	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1986				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1987	if (hret != H_SUCCESS)
1988		ehea_error("modify_ehea_port failed");
1989out:
1990	kfree(cb1);
1991	return;
1992}
1993
1994int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
1995{
1996	int ret = -EIO;
1997	u64 hret;
1998	u16 dummy16 = 0;
1999	u64 dummy64 = 0;
2000	struct hcp_modify_qp_cb0* cb0;
2001
2002	cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2003	if (!cb0) {
2004		ret = -ENOMEM;
2005		goto out;
2006	}
2007
2008	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2009				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2010	if (hret != H_SUCCESS) {
2011		ehea_error("query_ehea_qp failed (1)");
2012		goto out;
2013	}
2014
2015	cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2016	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2017				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2018				     &dummy64, &dummy64, &dummy16, &dummy16);
2019	if (hret != H_SUCCESS) {
2020		ehea_error("modify_ehea_qp failed (1)");
2021		goto out;
2022	}
2023
2024	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2025				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2026	if (hret != H_SUCCESS) {
2027		ehea_error("query_ehea_qp failed (2)");
2028		goto out;
2029	}
2030
2031	cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2032	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2033				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2034				     &dummy64, &dummy64, &dummy16, &dummy16);
2035	if (hret != H_SUCCESS) {
2036		ehea_error("modify_ehea_qp failed (2)");
2037		goto out;
2038	}
2039
2040	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2041				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2042	if (hret != H_SUCCESS) {
2043		ehea_error("query_ehea_qp failed (3)");
2044		goto out;
2045	}
2046
2047	cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2048	hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2049				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2050				     &dummy64, &dummy64, &dummy16, &dummy16);
2051	if (hret != H_SUCCESS) {
2052		ehea_error("modify_ehea_qp failed (3)");
2053		goto out;
2054	}
2055
2056	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2057				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2058	if (hret != H_SUCCESS) {
2059		ehea_error("query_ehea_qp failed (4)");
2060		goto out;
2061	}
2062
2063	ret = 0;
2064out:
2065	kfree(cb0);
2066	return ret;
2067}
2068
2069static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2070			       int add_tx_qps)
2071{
2072	int ret, i;
2073	struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2074	enum ehea_eq_type eq_type = EHEA_EQ;
2075
2076	port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2077				   EHEA_MAX_ENTRIES_EQ, 1);
2078	if (!port->qp_eq) {
2079		ret = -EINVAL;
2080		ehea_error("ehea_create_eq failed (qp_eq)");
2081		goto out_kill_eq;
2082	}
2083
2084	pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2085	pr_cfg.max_entries_scq = sq_entries * 2;
2086	pr_cfg.max_entries_sq = sq_entries;
2087	pr_cfg.max_entries_rq1 = rq1_entries;
2088	pr_cfg.max_entries_rq2 = rq2_entries;
2089	pr_cfg.max_entries_rq3 = rq3_entries;
2090
2091	pr_cfg_small_rx.max_entries_rcq = 1;
2092	pr_cfg_small_rx.max_entries_scq = sq_entries;
2093	pr_cfg_small_rx.max_entries_sq = sq_entries;
2094	pr_cfg_small_rx.max_entries_rq1 = 1;
2095	pr_cfg_small_rx.max_entries_rq2 = 1;
2096	pr_cfg_small_rx.max_entries_rq3 = 1;
2097
2098	for (i = 0; i < def_qps; i++) {
2099		ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2100		if (ret)
2101			goto out_clean_pr;
2102	}
2103	for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2104		ret = ehea_init_port_res(port, &port->port_res[i],
2105					 &pr_cfg_small_rx, i);
2106		if (ret)
2107			goto out_clean_pr;
2108	}
2109
2110	return 0;
2111
2112out_clean_pr:
2113	while (--i >= 0)
2114		ehea_clean_portres(port, &port->port_res[i]);
2115
2116out_kill_eq:
2117	ehea_destroy_eq(port->qp_eq);
2118	return ret;
2119}
2120
2121static int ehea_clean_all_portres(struct ehea_port *port)
2122{
2123	int ret = 0;
2124	int i;
2125
2126	for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2127		ret |= ehea_clean_portres(port, &port->port_res[i]);
2128
2129	ret |= ehea_destroy_eq(port->qp_eq);
2130
2131	return ret;
2132}
2133
2134static void ehea_remove_adapter_mr (struct ehea_adapter *adapter)
2135{
2136	int i;
2137
2138	for (i=0; i < EHEA_MAX_PORTS; i++)
2139		if (adapter->port[i])
2140			return;
2141
2142	ehea_rem_mr(&adapter->mr);
2143}
2144
2145static int ehea_add_adapter_mr (struct ehea_adapter *adapter)
2146{
2147	int i;
2148
2149	for (i=0; i < EHEA_MAX_PORTS; i++)
2150		if (adapter->port[i])
2151			return 0;
2152
2153	return ehea_reg_kernel_mr(adapter, &adapter->mr);
2154}
2155
2156static int ehea_up(struct net_device *dev)
2157{
2158	int ret, i;
2159	struct ehea_port *port = netdev_priv(dev);
2160	u64 mac_addr = 0;
2161
2162	if (port->state == EHEA_PORT_UP)
2163		return 0;
2164
2165	ret = ehea_port_res_setup(port, port->num_def_qps,
2166				  port->num_add_tx_qps);
2167	if (ret) {
2168		ehea_error("port_res_failed");
2169		goto out;
2170	}
2171
2172	/* Set default QP for this port */
2173	ret = ehea_configure_port(port);
2174	if (ret) {
2175		ehea_error("ehea_configure_port failed. ret:%d", ret);
2176		goto out_clean_pr;
2177	}
2178
2179	ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2180	if (ret) {
2181		ret = -EIO;
2182		ehea_error("out_clean_pr");
2183		goto out_clean_pr;
2184	}
2185	mac_addr = (*(u64*)dev->dev_addr) >> 16;
2186
2187	ret = ehea_reg_interrupts(dev);
2188	if (ret) {
2189		ehea_error("out_dereg_bc");
2190		goto out_dereg_bc;
2191	}
2192
2193	for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2194		ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2195		if (ret) {
2196			ehea_error("activate_qp failed");
2197			goto out_free_irqs;
2198		}
2199	}
2200
2201	for(i = 0; i < port->num_def_qps; i++) {
2202		ret = ehea_fill_port_res(&port->port_res[i]);
2203		if (ret) {
2204			ehea_error("out_free_irqs");
2205			goto out_free_irqs;
2206		}
2207	}
2208
2209	ret = 0;
2210	port->state = EHEA_PORT_UP;
2211	goto out;
2212
2213out_free_irqs:
2214	ehea_free_interrupts(dev);
2215
2216out_dereg_bc:
2217	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2218
2219out_clean_pr:
2220	ehea_clean_all_portres(port);
2221out:
2222	return ret;
2223}
2224
2225static int ehea_open(struct net_device *dev)
2226{
2227	int ret;
2228	struct ehea_port *port = netdev_priv(dev);
2229
2230	down(&port->port_lock);
2231
2232	if (netif_msg_ifup(port))
2233		ehea_info("enabling port %s", dev->name);
2234
2235	ret = ehea_up(dev);
2236	if (!ret)
2237		netif_start_queue(dev);
2238
2239	up(&port->port_lock);
2240
2241	return ret;
2242}
2243
2244static int ehea_down(struct net_device *dev)
2245{
2246	int ret, i;
2247	struct ehea_port *port = netdev_priv(dev);
2248
2249	if (port->state == EHEA_PORT_DOWN)
2250		return 0;
2251
2252	ehea_drop_multicast_list(dev);
2253	ehea_free_interrupts(dev);
2254
2255	for (i = 0; i < port->num_def_qps; i++)
2256		while (test_bit(__LINK_STATE_RX_SCHED,
2257				&port->port_res[i].d_netdev->state))
2258			msleep(1);
2259
2260	ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2261	ret = ehea_clean_all_portres(port);
2262	port->state = EHEA_PORT_DOWN;
2263	return ret;
2264}
2265
2266static int ehea_stop(struct net_device *dev)
2267{
2268	int ret;
2269	struct ehea_port *port = netdev_priv(dev);
2270
2271	if (netif_msg_ifdown(port))
2272		ehea_info("disabling port %s", dev->name);
2273
2274	flush_workqueue(port->adapter->ehea_wq);
2275	down(&port->port_lock);
2276	netif_stop_queue(dev);
2277	ret = ehea_down(dev);
2278	up(&port->port_lock);
2279	return ret;
2280}
2281
2282static void ehea_reset_port(struct work_struct *work)
2283{
2284	int ret;
2285	struct ehea_port *port =
2286		container_of(work, struct ehea_port, reset_task);
2287	struct net_device *dev = port->netdev;
2288
2289	port->resets++;
2290	down(&port->port_lock);
2291	netif_stop_queue(dev);
2292	netif_poll_disable(dev);
2293
2294	ret = ehea_down(dev);
2295	if (ret)
2296		ehea_error("ehea_down failed. not all resources are freed");
2297
2298	ret = ehea_up(dev);
2299	if (ret) {
2300		ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
2301		goto out;
2302	}
2303
2304	if (netif_msg_timer(port))
2305		ehea_info("Device %s resetted successfully", dev->name);
2306
2307	netif_poll_enable(dev);
2308	netif_wake_queue(dev);
2309out:
2310	up(&port->port_lock);
2311	return;
2312}
2313
2314static void ehea_tx_watchdog(struct net_device *dev)
2315{
2316	struct ehea_port *port = netdev_priv(dev);
2317
2318	if (netif_carrier_ok(dev))
2319		queue_work(port->adapter->ehea_wq, &port->reset_task);
2320}
2321
2322int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2323{
2324	struct hcp_query_ehea *cb;
2325	u64 hret;
2326	int ret;
2327
2328	cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2329	if (!cb) {
2330		ret = -ENOMEM;
2331		goto out;
2332	}
2333
2334	hret = ehea_h_query_ehea(adapter->handle, cb);
2335
2336	if (hret != H_SUCCESS) {
2337		ret = -EIO;
2338		goto out_herr;
2339	}
2340
2341	adapter->max_mc_mac = cb->max_mc_mac - 1;
2342	ret = 0;
2343
2344out_herr:
2345	kfree(cb);
2346out:
2347	return ret;
2348}
2349
2350int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2351{
2352	struct hcp_ehea_port_cb4 *cb4;
2353	u64 hret;
2354	int ret = 0;
2355
2356	*jumbo = 0;
2357
2358	/* (Try to) enable *jumbo frames */
2359	cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2360	if (!cb4) {
2361		ehea_error("no mem for cb4");
2362		ret = -ENOMEM;
2363		goto out;
2364	} else {
2365		hret = ehea_h_query_ehea_port(port->adapter->handle,
2366					      port->logical_port_id,
2367					      H_PORT_CB4,
2368					      H_PORT_CB4_JUMBO, cb4);
2369		if (hret == H_SUCCESS) {
2370			if (cb4->jumbo_frame)
2371				*jumbo = 1;
2372			else {
2373				cb4->jumbo_frame = 1;
2374				hret = ehea_h_modify_ehea_port(port->adapter->
2375							       handle,
2376							       port->
2377							       logical_port_id,
2378							       H_PORT_CB4,
2379							       H_PORT_CB4_JUMBO,
2380							       cb4);
2381				if (hret == H_SUCCESS)
2382					*jumbo = 1;
2383			}
2384		} else
2385			ret = -EINVAL;
2386
2387		kfree(cb4);
2388	}
2389out:
2390	return ret;
2391}
2392
2393static ssize_t ehea_show_port_id(struct device *dev,
2394				 struct device_attribute *attr, char *buf)
2395{
2396	struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2397	return sprintf(buf, "0x%X", port->logical_port_id);
2398}
2399
2400static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2401		   NULL);
2402
2403static void __devinit logical_port_release(struct device *dev)
2404{
2405	struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2406	of_node_put(port->ofdev.node);
2407}
2408
2409static int ehea_driver_sysfs_add(struct device *dev,
2410                                 struct device_driver *driver)
2411{
2412	int ret;
2413
2414	ret = sysfs_create_link(&driver->kobj, &dev->kobj,
2415				kobject_name(&dev->kobj));
2416	if (ret == 0) {
2417		ret = sysfs_create_link(&dev->kobj, &driver->kobj,
2418					"driver");
2419		if (ret)
2420			sysfs_remove_link(&driver->kobj,
2421					  kobject_name(&dev->kobj));
2422	}
2423	return ret;
2424}
2425
2426static void ehea_driver_sysfs_remove(struct device *dev,
2427                                     struct device_driver *driver)
2428{
2429	struct device_driver *drv = driver;
2430
2431	if (drv) {
2432		sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
2433		sysfs_remove_link(&dev->kobj, "driver");
2434	}
2435}
2436
2437static struct device *ehea_register_port(struct ehea_port *port,
2438					 struct device_node *dn)
2439{
2440	int ret;
2441
2442	port->ofdev.node = of_node_get(dn);
2443	port->ofdev.dev.parent = &port->adapter->ebus_dev->ofdev.dev;
2444	port->ofdev.dev.bus = &ibmebus_bus_type;
2445
2446	sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
2447	port->ofdev.dev.release = logical_port_release;
2448
2449	ret = of_device_register(&port->ofdev);
2450	if (ret) {
2451		ehea_error("failed to register device. ret=%d", ret);
2452		goto out;
2453	}
2454
2455	ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2456        if (ret) {
2457		ehea_error("failed to register attributes, ret=%d", ret);
2458		goto out_unreg_of_dev;
2459	}
2460
2461	ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver);
2462	if (ret) {
2463		ehea_error("failed to register sysfs driver link");
2464		goto out_rem_dev_file;
2465	}
2466
2467	return &port->ofdev.dev;
2468
2469out_rem_dev_file:
2470	device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2471out_unreg_of_dev:
2472	of_device_unregister(&port->ofdev);
2473out:
2474	return NULL;
2475}
2476
2477static void ehea_unregister_port(struct ehea_port *port)
2478{
2479	ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver);
2480	device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2481	of_device_unregister(&port->ofdev);
2482}
2483
2484struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2485					 u32 logical_port_id,
2486					 struct device_node *dn)
2487{
2488	int ret;
2489	struct net_device *dev;
2490	struct ehea_port *port;
2491	struct device *port_dev;
2492	int jumbo;
2493
2494	/* allocate memory for the port structures */
2495	dev = alloc_etherdev(sizeof(struct ehea_port));
2496
2497	if (!dev) {
2498		ehea_error("no mem for net_device");
2499		ret = -ENOMEM;
2500		goto out_err;
2501	}
2502
2503	port = netdev_priv(dev);
2504
2505	sema_init(&port->port_lock, 1);
2506	port->state = EHEA_PORT_DOWN;
2507	port->sig_comp_iv = sq_entries / 10;
2508
2509	port->adapter = adapter;
2510	port->netdev = dev;
2511	port->logical_port_id = logical_port_id;
2512
2513	port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2514
2515	port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2516	if (!port->mc_list) {
2517		ret = -ENOMEM;
2518		goto out_free_ethdev;
2519	}
2520
2521	INIT_LIST_HEAD(&port->mc_list->list);
2522
2523	ret = ehea_sense_port_attr(port);
2524	if (ret)
2525		goto out_free_mc_list;
2526
2527	port_dev = ehea_register_port(port, dn);
2528	if (!port_dev)
2529		goto out_free_mc_list;
2530
2531	SET_NETDEV_DEV(dev, port_dev);
2532
2533	/* initialize net_device structure */
2534	SET_MODULE_OWNER(dev);
2535
2536	memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2537
2538	dev->open = ehea_open;
2539	dev->poll = ehea_poll;
2540	dev->weight = 64;
2541	dev->stop = ehea_stop;
2542	dev->hard_start_xmit = ehea_start_xmit;
2543	dev->get_stats = ehea_get_stats;
2544	dev->set_multicast_list = ehea_set_multicast_list;
2545	dev->set_mac_address = ehea_set_mac_addr;
2546	dev->change_mtu = ehea_change_mtu;
2547	dev->vlan_rx_register = ehea_vlan_rx_register;
2548	dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
2549	dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
2550	dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
2551		      | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX
2552		      | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
2553		      | NETIF_F_LLTX;
2554	dev->tx_timeout = &ehea_tx_watchdog;
2555	dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2556
2557	INIT_WORK(&port->reset_task, ehea_reset_port);
2558
2559	ehea_set_ethtool_ops(dev);
2560
2561	ret = register_netdev(dev);
2562	if (ret) {
2563		ehea_error("register_netdev failed. ret=%d", ret);
2564		goto out_unreg_port;
2565	}
2566
2567	ret = ehea_get_jumboframe_status(port, &jumbo);
2568	if (ret)
2569		ehea_error("failed determining jumbo frame status for %s",
2570			   port->netdev->name);
2571
2572	ehea_info("%s: Jumbo frames are %sabled", dev->name,
2573		  jumbo == 1 ? "en" : "dis");
2574
2575	return port;
2576
2577out_unreg_port:
2578	ehea_unregister_port(port);
2579
2580out_free_mc_list:
2581	kfree(port->mc_list);
2582
2583out_free_ethdev:
2584	free_netdev(dev);
2585
2586out_err:
2587	ehea_error("setting up logical port with id=%d failed, ret=%d",
2588		   logical_port_id, ret);
2589	return NULL;
2590}
2591
2592static void ehea_shutdown_single_port(struct ehea_port *port)
2593{
2594	unregister_netdev(port->netdev);
2595	ehea_unregister_port(port);
2596	kfree(port->mc_list);
2597	free_netdev(port->netdev);
2598}
2599
2600static int ehea_setup_ports(struct ehea_adapter *adapter)
2601{
2602	struct device_node *lhea_dn;
2603	struct device_node *eth_dn = NULL;
2604	const u32 *dn_log_port_id;
2605	int i = 0;
2606
2607	lhea_dn = adapter->ebus_dev->ofdev.node;
2608	while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2609
2610		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
2611						    NULL);
2612		if (!dn_log_port_id) {
2613			ehea_error("bad device node: eth_dn name=%s",
2614				   eth_dn->full_name);
2615			continue;
2616		}
2617
2618		if (ehea_add_adapter_mr(adapter)) {
2619			ehea_error("creating MR failed");
2620			of_node_put(eth_dn);
2621			return -EIO;
2622		}
2623
2624		adapter->port[i] = ehea_setup_single_port(adapter,
2625							  *dn_log_port_id,
2626							  eth_dn);
2627		if (adapter->port[i])
2628			ehea_info("%s -> logical port id #%d",
2629				  adapter->port[i]->netdev->name,
2630				  *dn_log_port_id);
2631		else
2632			ehea_remove_adapter_mr(adapter);
2633
2634		i++;
2635	};
2636
2637	return 0;
2638}
2639
2640static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
2641					   u32 logical_port_id)
2642{
2643	struct device_node *lhea_dn;
2644	struct device_node *eth_dn = NULL;
2645	const u32 *dn_log_port_id;
2646
2647	lhea_dn = adapter->ebus_dev->ofdev.node;
2648	while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2649
2650		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
2651						    NULL);
2652		if (dn_log_port_id)
2653			if (*dn_log_port_id == logical_port_id)
2654				return eth_dn;
2655	};
2656
2657	return NULL;
2658}
2659
2660static ssize_t ehea_probe_port(struct device *dev,
2661			       struct device_attribute *attr,
2662			       const char *buf, size_t count)
2663{
2664	struct ehea_adapter *adapter = dev->driver_data;
2665	struct ehea_port *port;
2666	struct device_node *eth_dn = NULL;
2667	int i;
2668
2669	u32 logical_port_id;
2670
2671	sscanf(buf, "%X", &logical_port_id);
2672
2673	port = ehea_get_port(adapter, logical_port_id);
2674
2675	if (port) {
2676		ehea_info("adding port with logical port id=%d failed. port "
2677			  "already configured as %s.", logical_port_id,
2678			  port->netdev->name);
2679		return -EINVAL;
2680	}
2681
2682	eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
2683
2684	if (!eth_dn) {
2685		ehea_info("no logical port with id %d found", logical_port_id);
2686		return -EINVAL;
2687	}
2688
2689	if (ehea_add_adapter_mr(adapter)) {
2690		ehea_error("creating MR failed");
2691		return -EIO;
2692	}
2693
2694	port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
2695
2696	of_node_put(eth_dn);
2697
2698	if (port) {
2699		for (i=0; i < EHEA_MAX_PORTS; i++)
2700			if (!adapter->port[i]) {
2701				adapter->port[i] = port;
2702				break;
2703			}
2704
2705		ehea_info("added %s (logical port id=%d)", port->netdev->name,
2706			  logical_port_id);
2707	} else {
2708		ehea_remove_adapter_mr(adapter);
2709		return -EIO;
2710	}
2711
2712	return (ssize_t) count;
2713}
2714
2715static ssize_t ehea_remove_port(struct device *dev,
2716				struct device_attribute *attr,
2717				const char *buf, size_t count)
2718{
2719	struct ehea_adapter *adapter = dev->driver_data;
2720	struct ehea_port *port;
2721	int i;
2722	u32 logical_port_id;
2723
2724	sscanf(buf, "%X", &logical_port_id);
2725
2726	port = ehea_get_port(adapter, logical_port_id);
2727
2728	if (port) {
2729		ehea_info("removed %s (logical port id=%d)", port->netdev->name,
2730			  logical_port_id);
2731
2732		ehea_shutdown_single_port(port);
2733
2734		for (i=0; i < EHEA_MAX_PORTS; i++)
2735			if (adapter->port[i] == port) {
2736				adapter->port[i] = NULL;
2737				break;
2738			}
2739	} else {
2740		ehea_error("removing port with logical port id=%d failed. port "
2741			   "not configured.", logical_port_id);
2742		return -EINVAL;
2743	}
2744
2745	ehea_remove_adapter_mr(adapter);
2746
2747	return (ssize_t) count;
2748}
2749
2750static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
2751static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
2752
2753int ehea_create_device_sysfs(struct ibmebus_dev *dev)
2754{
2755	int ret = device_create_file(&dev->ofdev.dev, &dev_attr_probe_port);
2756	if (ret)
2757		goto out;
2758
2759	ret = device_create_file(&dev->ofdev.dev, &dev_attr_remove_port);
2760out:
2761	return ret;
2762}
2763
2764void ehea_remove_device_sysfs(struct ibmebus_dev *dev)
2765{
2766	device_remove_file(&dev->ofdev.dev, &dev_attr_probe_port);
2767	device_remove_file(&dev->ofdev.dev, &dev_attr_remove_port);
2768}
2769
2770static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
2771					const struct of_device_id *id)
2772{
2773	struct ehea_adapter *adapter;
2774	const u64 *adapter_handle;
2775	int ret;
2776
2777	if (!dev || !dev->ofdev.node) {
2778		ehea_error("Invalid ibmebus device probed");
2779		return -EINVAL;
2780	}
2781
2782	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2783	if (!adapter) {
2784		ret = -ENOMEM;
2785		dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n");
2786		goto out;
2787	}
2788
2789	adapter->ebus_dev = dev;
2790
2791	adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle",
2792					    NULL);
2793	if (adapter_handle)
2794		adapter->handle = *adapter_handle;
2795
2796	if (!adapter->handle) {
2797		dev_err(&dev->ofdev.dev, "failed getting handle for adapter"
2798			" '%s'\n", dev->ofdev.node->full_name);
2799		ret = -ENODEV;
2800		goto out_free_ad;
2801	}
2802
2803	adapter->pd = EHEA_PD_ID;
2804
2805	dev->ofdev.dev.driver_data = adapter;
2806
2807
2808	/* initialize adapter and ports */
2809	/* get adapter properties */
2810	ret = ehea_sense_adapter_attr(adapter);
2811	if (ret) {
2812		dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret);
2813		goto out_free_ad;
2814	}
2815
2816	adapter->neq = ehea_create_eq(adapter,
2817				      EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
2818	if (!adapter->neq) {
2819		ret = -EIO;
2820		dev_err(&dev->ofdev.dev, "NEQ creation failed");
2821		goto out_free_ad;
2822	}
2823
2824	tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
2825		     (unsigned long)adapter);
2826
2827	ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1,
2828				  ehea_interrupt_neq, IRQF_DISABLED,
2829				  "ehea_neq", adapter);
2830	if (ret) {
2831		dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed");
2832		goto out_kill_eq;
2833	}
2834
2835	adapter->ehea_wq = create_workqueue("ehea_wq");
2836	if (!adapter->ehea_wq) {
2837		ret = -EIO;
2838		goto out_free_irq;
2839	}
2840
2841	ret = ehea_create_device_sysfs(dev);
2842	if (ret)
2843		goto out_kill_wq;
2844
2845	ret = ehea_setup_ports(adapter);
2846	if (ret) {
2847		dev_err(&dev->ofdev.dev, "setup_ports failed");
2848		goto out_rem_dev_sysfs;
2849	}
2850
2851	ret = 0;
2852	goto out;
2853
2854out_rem_dev_sysfs:
2855	ehea_remove_device_sysfs(dev);
2856
2857out_kill_wq:
2858	destroy_workqueue(adapter->ehea_wq);
2859
2860out_free_irq:
2861	ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2862
2863out_kill_eq:
2864	ehea_destroy_eq(adapter->neq);
2865
2866out_free_ad:
2867	kfree(adapter);
2868out:
2869	return ret;
2870}
2871
2872static int __devexit ehea_remove(struct ibmebus_dev *dev)
2873{
2874	struct ehea_adapter *adapter = dev->ofdev.dev.driver_data;
2875	int i;
2876
2877	for (i = 0; i < EHEA_MAX_PORTS; i++)
2878		if (adapter->port[i]) {
2879			ehea_shutdown_single_port(adapter->port[i]);
2880			adapter->port[i] = NULL;
2881		}
2882
2883	ehea_remove_device_sysfs(dev);
2884
2885	destroy_workqueue(adapter->ehea_wq);
2886
2887	ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2888	tasklet_kill(&adapter->neq_tasklet);
2889
2890	ehea_destroy_eq(adapter->neq);
2891	ehea_remove_adapter_mr(adapter);
2892	kfree(adapter);
2893	return 0;
2894}
2895
2896static int check_module_parm(void)
2897{
2898	int ret = 0;
2899
2900	if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
2901	    (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
2902		ehea_info("Bad parameter: rq1_entries");
2903		ret = -EINVAL;
2904	}
2905	if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
2906	    (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
2907		ehea_info("Bad parameter: rq2_entries");
2908		ret = -EINVAL;
2909	}
2910	if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
2911	    (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
2912		ehea_info("Bad parameter: rq3_entries");
2913		ret = -EINVAL;
2914	}
2915	if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
2916	    (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
2917		ehea_info("Bad parameter: sq_entries");
2918		ret = -EINVAL;
2919	}
2920
2921	return ret;
2922}
2923
2924int __init ehea_module_init(void)
2925{
2926	int ret;
2927
2928	printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
2929	       DRV_VERSION);
2930
2931	ret = check_module_parm();
2932	if (ret)
2933		goto out;
2934	ret = ibmebus_register_driver(&ehea_driver);
2935	if (ret)
2936		ehea_error("failed registering eHEA device driver on ebus");
2937
2938out:
2939	return ret;
2940}
2941
2942static void __exit ehea_module_exit(void)
2943{
2944	ibmebus_unregister_driver(&ehea_driver);
2945}
2946
2947module_init(ehea_module_init);
2948module_exit(ehea_module_exit);
2949