1// SPDX-License-Identifier: GPL-2.0
2/*
3 *    Copyright IBM Corp. 2007, 2009
4 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5 *		 Frank Pavlic <fpavlic@de.ibm.com>,
6 *		 Thomas Spatzier <tspat@de.ibm.com>,
7 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
8 */
9
10#define KMSG_COMPONENT "qeth"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13#include <linux/compat.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/kernel.h>
19#include <linux/log2.h>
20#include <linux/io.h>
21#include <linux/ip.h>
22#include <linux/tcp.h>
23#include <linux/mii.h>
24#include <linux/mm.h>
25#include <linux/kthread.h>
26#include <linux/slab.h>
27#include <linux/if_vlan.h>
28#include <linux/netdevice.h>
29#include <linux/netdev_features.h>
30#include <linux/rcutree.h>
31#include <linux/skbuff.h>
32#include <linux/vmalloc.h>
33
34#include <net/iucv/af_iucv.h>
35#include <net/dsfield.h>
36#include <net/sock.h>
37
38#include <asm/ebcdic.h>
39#include <asm/chpid.h>
40#include <asm/sysinfo.h>
41#include <asm/diag.h>
42#include <asm/cio.h>
43#include <asm/ccwdev.h>
44#include <asm/cpcmd.h>
45
46#include "qeth_core.h"
47
48struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
49	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
50	/*                   N  P  A    M  L  V                      H  */
51	[QETH_DBF_SETUP] = {"qeth_setup",
52				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
53	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
54			    &debug_sprintf_view, NULL},
55	[QETH_DBF_CTRL]  = {"qeth_control",
56		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
57};
58EXPORT_SYMBOL_GPL(qeth_dbf);
59
60static struct kmem_cache *qeth_core_header_cache;
61static struct kmem_cache *qeth_qdio_outbuf_cache;
62static struct kmem_cache *qeth_qaob_cache;
63
64static struct device *qeth_core_root_dev;
65static struct dentry *qeth_debugfs_root;
66static struct lock_class_key qdio_out_skb_queue_key;
67
68static void qeth_issue_next_read_cb(struct qeth_card *card,
69				    struct qeth_cmd_buffer *iob,
70				    unsigned int data_length);
71static int qeth_qdio_establish(struct qeth_card *);
72static void qeth_free_qdio_queues(struct qeth_card *card);
73
74static const char *qeth_get_cardname(struct qeth_card *card)
75{
76	if (IS_VM_NIC(card)) {
77		switch (card->info.type) {
78		case QETH_CARD_TYPE_OSD:
79			return " Virtual NIC QDIO";
80		case QETH_CARD_TYPE_IQD:
81			return " Virtual NIC Hiper";
82		case QETH_CARD_TYPE_OSM:
83			return " Virtual NIC QDIO - OSM";
84		case QETH_CARD_TYPE_OSX:
85			return " Virtual NIC QDIO - OSX";
86		default:
87			return " unknown";
88		}
89	} else {
90		switch (card->info.type) {
91		case QETH_CARD_TYPE_OSD:
92			return " OSD Express";
93		case QETH_CARD_TYPE_IQD:
94			return " HiperSockets";
95		case QETH_CARD_TYPE_OSM:
96			return " OSM QDIO";
97		case QETH_CARD_TYPE_OSX:
98			return " OSX QDIO";
99		default:
100			return " unknown";
101		}
102	}
103	return " n/a";
104}
105
106/* max length to be returned: 14 */
107const char *qeth_get_cardname_short(struct qeth_card *card)
108{
109	if (IS_VM_NIC(card)) {
110		switch (card->info.type) {
111		case QETH_CARD_TYPE_OSD:
112			return "Virt.NIC QDIO";
113		case QETH_CARD_TYPE_IQD:
114			return "Virt.NIC Hiper";
115		case QETH_CARD_TYPE_OSM:
116			return "Virt.NIC OSM";
117		case QETH_CARD_TYPE_OSX:
118			return "Virt.NIC OSX";
119		default:
120			return "unknown";
121		}
122	} else {
123		switch (card->info.type) {
124		case QETH_CARD_TYPE_OSD:
125			switch (card->info.link_type) {
126			case QETH_LINK_TYPE_FAST_ETH:
127				return "OSD_100";
128			case QETH_LINK_TYPE_HSTR:
129				return "HSTR";
130			case QETH_LINK_TYPE_GBIT_ETH:
131				return "OSD_1000";
132			case QETH_LINK_TYPE_10GBIT_ETH:
133				return "OSD_10GIG";
134			case QETH_LINK_TYPE_25GBIT_ETH:
135				return "OSD_25GIG";
136			case QETH_LINK_TYPE_LANE_ETH100:
137				return "OSD_FE_LANE";
138			case QETH_LINK_TYPE_LANE_TR:
139				return "OSD_TR_LANE";
140			case QETH_LINK_TYPE_LANE_ETH1000:
141				return "OSD_GbE_LANE";
142			case QETH_LINK_TYPE_LANE:
143				return "OSD_ATM_LANE";
144			default:
145				return "OSD_Express";
146			}
147		case QETH_CARD_TYPE_IQD:
148			return "HiperSockets";
149		case QETH_CARD_TYPE_OSM:
150			return "OSM_1000";
151		case QETH_CARD_TYPE_OSX:
152			return "OSX_10GIG";
153		default:
154			return "unknown";
155		}
156	}
157	return "n/a";
158}
159
160void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
161			 int clear_start_mask)
162{
163	unsigned long flags;
164
165	spin_lock_irqsave(&card->thread_mask_lock, flags);
166	card->thread_allowed_mask = threads;
167	if (clear_start_mask)
168		card->thread_start_mask &= threads;
169	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
170	wake_up(&card->wait_q);
171}
172EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
173
174int qeth_threads_running(struct qeth_card *card, unsigned long threads)
175{
176	unsigned long flags;
177	int rc = 0;
178
179	spin_lock_irqsave(&card->thread_mask_lock, flags);
180	rc = (card->thread_running_mask & threads);
181	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
182	return rc;
183}
184EXPORT_SYMBOL_GPL(qeth_threads_running);
185
186static void qeth_clear_working_pool_list(struct qeth_card *card)
187{
188	struct qeth_buffer_pool_entry *pool_entry, *tmp;
189	struct qeth_qdio_q *queue = card->qdio.in_q;
190	unsigned int i;
191
192	QETH_CARD_TEXT(card, 5, "clwrklst");
193	list_for_each_entry_safe(pool_entry, tmp,
194				 &card->qdio.in_buf_pool.entry_list, list)
195		list_del(&pool_entry->list);
196
197	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
198		queue->bufs[i].pool_entry = NULL;
199}
200
201static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
202{
203	unsigned int i;
204
205	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
206		if (entry->elements[i])
207			__free_page(entry->elements[i]);
208	}
209
210	kfree(entry);
211}
212
213static void qeth_free_buffer_pool(struct qeth_card *card)
214{
215	struct qeth_buffer_pool_entry *entry, *tmp;
216
217	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
218				 init_list) {
219		list_del(&entry->init_list);
220		qeth_free_pool_entry(entry);
221	}
222}
223
224static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
225{
226	struct qeth_buffer_pool_entry *entry;
227	unsigned int i;
228
229	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
230	if (!entry)
231		return NULL;
232
233	for (i = 0; i < pages; i++) {
234		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
235
236		if (!entry->elements[i]) {
237			qeth_free_pool_entry(entry);
238			return NULL;
239		}
240	}
241
242	return entry;
243}
244
245static int qeth_alloc_buffer_pool(struct qeth_card *card)
246{
247	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
248	unsigned int i;
249
250	QETH_CARD_TEXT(card, 5, "alocpool");
251	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
252		struct qeth_buffer_pool_entry *entry;
253
254		entry = qeth_alloc_pool_entry(buf_elements);
255		if (!entry) {
256			qeth_free_buffer_pool(card);
257			return -ENOMEM;
258		}
259
260		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
261	}
262	return 0;
263}
264
265int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
266{
267	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
268	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
269	struct qeth_buffer_pool_entry *entry, *tmp;
270	int delta = count - pool->buf_count;
271	LIST_HEAD(entries);
272
273	QETH_CARD_TEXT(card, 2, "realcbp");
274
275	/* Defer until pool is allocated: */
276	if (list_empty(&pool->entry_list))
277		goto out;
278
279	/* Remove entries from the pool: */
280	while (delta < 0) {
281		entry = list_first_entry(&pool->entry_list,
282					 struct qeth_buffer_pool_entry,
283					 init_list);
284		list_del(&entry->init_list);
285		qeth_free_pool_entry(entry);
286
287		delta++;
288	}
289
290	/* Allocate additional entries: */
291	while (delta > 0) {
292		entry = qeth_alloc_pool_entry(buf_elements);
293		if (!entry) {
294			list_for_each_entry_safe(entry, tmp, &entries,
295						 init_list) {
296				list_del(&entry->init_list);
297				qeth_free_pool_entry(entry);
298			}
299
300			return -ENOMEM;
301		}
302
303		list_add(&entry->init_list, &entries);
304
305		delta--;
306	}
307
308	list_splice(&entries, &pool->entry_list);
309
310out:
311	card->qdio.in_buf_pool.buf_count = count;
312	pool->buf_count = count;
313	return 0;
314}
315EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
316
317static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
318{
319	if (!q)
320		return;
321
322	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
323	kfree(q);
324}
325
326static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
327{
328	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
329	int i;
330
331	if (!q)
332		return NULL;
333
334	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
335		kfree(q);
336		return NULL;
337	}
338
339	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
340		q->bufs[i].buffer = q->qdio_bufs[i];
341
342	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
343	return q;
344}
345
346static int qeth_cq_init(struct qeth_card *card)
347{
348	int rc;
349
350	if (card->options.cq == QETH_CQ_ENABLED) {
351		QETH_CARD_TEXT(card, 2, "cqinit");
352		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
353				   QDIO_MAX_BUFFERS_PER_Q);
354		card->qdio.c_q->next_buf_to_init = 127;
355
356		rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127);
357		if (rc) {
358			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
359			goto out;
360		}
361	}
362	rc = 0;
363out:
364	return rc;
365}
366
367static int qeth_alloc_cq(struct qeth_card *card)
368{
369	if (card->options.cq == QETH_CQ_ENABLED) {
370		QETH_CARD_TEXT(card, 2, "cqon");
371		card->qdio.c_q = qeth_alloc_qdio_queue();
372		if (!card->qdio.c_q) {
373			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
374			return -ENOMEM;
375		}
376	} else {
377		QETH_CARD_TEXT(card, 2, "nocq");
378		card->qdio.c_q = NULL;
379	}
380	return 0;
381}
382
383static void qeth_free_cq(struct qeth_card *card)
384{
385	if (card->qdio.c_q) {
386		qeth_free_qdio_queue(card->qdio.c_q);
387		card->qdio.c_q = NULL;
388	}
389}
390
391static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
392							int delayed)
393{
394	enum iucv_tx_notify n;
395
396	switch (sbalf15) {
397	case 0:
398		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
399		break;
400	case 4:
401	case 16:
402	case 17:
403	case 18:
404		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
405			TX_NOTIFY_UNREACHABLE;
406		break;
407	default:
408		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
409			TX_NOTIFY_GENERALERROR;
410		break;
411	}
412
413	return n;
414}
415
416static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
417{
418	if (refcount_dec_and_test(&iob->ref_count)) {
419		kfree(iob->data);
420		kfree(iob);
421	}
422}
423static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
424			   void *data)
425{
426	ccw->cmd_code = cmd_code;
427	ccw->flags = flags | CCW_FLAG_SLI;
428	ccw->count = len;
429	ccw->cda = virt_to_dma32(data);
430}
431
432static int __qeth_issue_next_read(struct qeth_card *card)
433{
434	struct qeth_cmd_buffer *iob = card->read_cmd;
435	struct qeth_channel *channel = iob->channel;
436	struct ccw1 *ccw = __ccw_from_cmd(iob);
437	int rc;
438
439	QETH_CARD_TEXT(card, 5, "issnxrd");
440	if (channel->state != CH_STATE_UP)
441		return -EIO;
442
443	memset(iob->data, 0, iob->length);
444	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
445	iob->callback = qeth_issue_next_read_cb;
446	/* keep the cmd alive after completion: */
447	qeth_get_cmd(iob);
448
449	QETH_CARD_TEXT(card, 6, "noirqpnd");
450	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
451	if (!rc) {
452		channel->active_cmd = iob;
453	} else {
454		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
455				 rc, CARD_DEVID(card));
456		qeth_unlock_channel(card, channel);
457		qeth_put_cmd(iob);
458		card->read_or_write_problem = 1;
459		qeth_schedule_recovery(card);
460	}
461	return rc;
462}
463
464static int qeth_issue_next_read(struct qeth_card *card)
465{
466	int ret;
467
468	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
469	ret = __qeth_issue_next_read(card);
470	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
471
472	return ret;
473}
474
475static void qeth_enqueue_cmd(struct qeth_card *card,
476			     struct qeth_cmd_buffer *iob)
477{
478	spin_lock_irq(&card->lock);
479	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
480	spin_unlock_irq(&card->lock);
481}
482
483static void qeth_dequeue_cmd(struct qeth_card *card,
484			     struct qeth_cmd_buffer *iob)
485{
486	spin_lock_irq(&card->lock);
487	list_del(&iob->list_entry);
488	spin_unlock_irq(&card->lock);
489}
490
491static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
492{
493	iob->rc = reason;
494	complete(&iob->done);
495}
496
497static void qeth_flush_local_addrs4(struct qeth_card *card)
498{
499	struct qeth_local_addr *addr;
500	struct hlist_node *tmp;
501	unsigned int i;
502
503	spin_lock_irq(&card->local_addrs4_lock);
504	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
505		hash_del_rcu(&addr->hnode);
506		kfree_rcu(addr, rcu);
507	}
508	spin_unlock_irq(&card->local_addrs4_lock);
509}
510
511static void qeth_flush_local_addrs6(struct qeth_card *card)
512{
513	struct qeth_local_addr *addr;
514	struct hlist_node *tmp;
515	unsigned int i;
516
517	spin_lock_irq(&card->local_addrs6_lock);
518	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
519		hash_del_rcu(&addr->hnode);
520		kfree_rcu(addr, rcu);
521	}
522	spin_unlock_irq(&card->local_addrs6_lock);
523}
524
525static void qeth_flush_local_addrs(struct qeth_card *card)
526{
527	qeth_flush_local_addrs4(card);
528	qeth_flush_local_addrs6(card);
529}
530
531static void qeth_add_local_addrs4(struct qeth_card *card,
532				  struct qeth_ipacmd_local_addrs4 *cmd)
533{
534	unsigned int i;
535
536	if (cmd->addr_length !=
537	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
538		dev_err_ratelimited(&card->gdev->dev,
539				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
540				    cmd->addr_length);
541		return;
542	}
543
544	spin_lock(&card->local_addrs4_lock);
545	for (i = 0; i < cmd->count; i++) {
546		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
547		struct qeth_local_addr *addr;
548		bool duplicate = false;
549
550		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
551			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
552				duplicate = true;
553				break;
554			}
555		}
556
557		if (duplicate)
558			continue;
559
560		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
561		if (!addr) {
562			dev_err(&card->gdev->dev,
563				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
564				&cmd->addrs[i].addr);
565			continue;
566		}
567
568		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
569		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
570	}
571	spin_unlock(&card->local_addrs4_lock);
572}
573
574static void qeth_add_local_addrs6(struct qeth_card *card,
575				  struct qeth_ipacmd_local_addrs6 *cmd)
576{
577	unsigned int i;
578
579	if (cmd->addr_length !=
580	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
581		dev_err_ratelimited(&card->gdev->dev,
582				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
583				    cmd->addr_length);
584		return;
585	}
586
587	spin_lock(&card->local_addrs6_lock);
588	for (i = 0; i < cmd->count; i++) {
589		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
590		struct qeth_local_addr *addr;
591		bool duplicate = false;
592
593		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
594			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
595				duplicate = true;
596				break;
597			}
598		}
599
600		if (duplicate)
601			continue;
602
603		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
604		if (!addr) {
605			dev_err(&card->gdev->dev,
606				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
607				&cmd->addrs[i].addr);
608			continue;
609		}
610
611		addr->addr = cmd->addrs[i].addr;
612		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
613	}
614	spin_unlock(&card->local_addrs6_lock);
615}
616
617static void qeth_del_local_addrs4(struct qeth_card *card,
618				  struct qeth_ipacmd_local_addrs4 *cmd)
619{
620	unsigned int i;
621
622	if (cmd->addr_length !=
623	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
624		dev_err_ratelimited(&card->gdev->dev,
625				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
626				    cmd->addr_length);
627		return;
628	}
629
630	spin_lock(&card->local_addrs4_lock);
631	for (i = 0; i < cmd->count; i++) {
632		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
633		unsigned int key = ipv4_addr_hash(addr->addr);
634		struct qeth_local_addr *tmp;
635
636		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
637			if (tmp->addr.s6_addr32[3] == addr->addr) {
638				hash_del_rcu(&tmp->hnode);
639				kfree_rcu(tmp, rcu);
640				break;
641			}
642		}
643	}
644	spin_unlock(&card->local_addrs4_lock);
645}
646
647static void qeth_del_local_addrs6(struct qeth_card *card,
648				  struct qeth_ipacmd_local_addrs6 *cmd)
649{
650	unsigned int i;
651
652	if (cmd->addr_length !=
653	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
654		dev_err_ratelimited(&card->gdev->dev,
655				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
656				    cmd->addr_length);
657		return;
658	}
659
660	spin_lock(&card->local_addrs6_lock);
661	for (i = 0; i < cmd->count; i++) {
662		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
663		u32 key = ipv6_addr_hash(&addr->addr);
664		struct qeth_local_addr *tmp;
665
666		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
667			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
668				hash_del_rcu(&tmp->hnode);
669				kfree_rcu(tmp, rcu);
670				break;
671			}
672		}
673	}
674	spin_unlock(&card->local_addrs6_lock);
675}
676
677static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
678				      struct sk_buff *skb)
679{
680	struct qeth_local_addr *tmp;
681	bool is_local = false;
682	unsigned int key;
683	__be32 next_hop;
684
685	if (hash_empty(card->local_addrs4))
686		return false;
687
688	rcu_read_lock();
689	next_hop = qeth_next_hop_v4_rcu(skb,
690					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
691	key = ipv4_addr_hash(next_hop);
692
693	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
694		if (tmp->addr.s6_addr32[3] == next_hop) {
695			is_local = true;
696			break;
697		}
698	}
699	rcu_read_unlock();
700
701	return is_local;
702}
703
704static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
705				      struct sk_buff *skb)
706{
707	struct qeth_local_addr *tmp;
708	struct in6_addr *next_hop;
709	bool is_local = false;
710	u32 key;
711
712	if (hash_empty(card->local_addrs6))
713		return false;
714
715	rcu_read_lock();
716	next_hop = qeth_next_hop_v6_rcu(skb,
717					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
718	key = ipv6_addr_hash(next_hop);
719
720	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
721		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
722			is_local = true;
723			break;
724		}
725	}
726	rcu_read_unlock();
727
728	return is_local;
729}
730
731static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
732{
733	struct qeth_card *card = m->private;
734	struct qeth_local_addr *tmp;
735	unsigned int i;
736
737	rcu_read_lock();
738	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
739		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
740	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
741		seq_printf(m, "%pI6c\n", &tmp->addr);
742	rcu_read_unlock();
743
744	return 0;
745}
746
747DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
748
749static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
750		struct qeth_card *card)
751{
752	const char *ipa_name;
753	int com = cmd->hdr.command;
754
755	ipa_name = qeth_get_ipa_cmd_name(com);
756
757	if (rc)
758		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
759				 ipa_name, com, CARD_DEVID(card), rc,
760				 qeth_get_ipa_msg(rc));
761	else
762		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
763				 ipa_name, com, CARD_DEVID(card));
764}
765
766static void qeth_default_link_info(struct qeth_card *card)
767{
768	struct qeth_link_info *link_info = &card->info.link_info;
769
770	QETH_CARD_TEXT(card, 2, "dftlinfo");
771	link_info->duplex = DUPLEX_FULL;
772
773	if (IS_IQD(card) || IS_VM_NIC(card)) {
774		link_info->speed = SPEED_10000;
775		link_info->port = PORT_FIBRE;
776		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
777	} else {
778		switch (card->info.link_type) {
779		case QETH_LINK_TYPE_FAST_ETH:
780		case QETH_LINK_TYPE_LANE_ETH100:
781			link_info->speed = SPEED_100;
782			link_info->port = PORT_TP;
783			break;
784		case QETH_LINK_TYPE_GBIT_ETH:
785		case QETH_LINK_TYPE_LANE_ETH1000:
786			link_info->speed = SPEED_1000;
787			link_info->port = PORT_FIBRE;
788			break;
789		case QETH_LINK_TYPE_10GBIT_ETH:
790			link_info->speed = SPEED_10000;
791			link_info->port = PORT_FIBRE;
792			break;
793		case QETH_LINK_TYPE_25GBIT_ETH:
794			link_info->speed = SPEED_25000;
795			link_info->port = PORT_FIBRE;
796			break;
797		default:
798			dev_info(&card->gdev->dev,
799				 "Unknown link type %x\n",
800				 card->info.link_type);
801			link_info->speed = SPEED_UNKNOWN;
802			link_info->port = PORT_OTHER;
803		}
804
805		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
806	}
807}
808
809static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
810						struct qeth_ipa_cmd *cmd)
811{
812	QETH_CARD_TEXT(card, 5, "chkipad");
813
814	if (IS_IPA_REPLY(cmd)) {
815		if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
816			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
817		return cmd;
818	}
819
820	/* handle unsolicited event: */
821	switch (cmd->hdr.command) {
822	case IPA_CMD_STOPLAN:
823		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
824			dev_err(&card->gdev->dev,
825				"Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
826				netdev_name(card->dev));
827			/* Set offline, then probably fail to set online: */
828			qeth_schedule_recovery(card);
829		} else {
830			/* stay online for subsequent STARTLAN */
831			dev_warn(&card->gdev->dev,
832				 "The link for interface %s on CHPID 0x%X failed\n",
833				 netdev_name(card->dev), card->info.chpid);
834			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
835			netif_carrier_off(card->dev);
836			qeth_default_link_info(card);
837		}
838		return NULL;
839	case IPA_CMD_STARTLAN:
840		dev_info(&card->gdev->dev,
841			 "The link for %s on CHPID 0x%X has been restored\n",
842			 netdev_name(card->dev), card->info.chpid);
843		if (card->info.hwtrap)
844			card->info.hwtrap = 2;
845		qeth_schedule_recovery(card);
846		return NULL;
847	case IPA_CMD_SETBRIDGEPORT_IQD:
848	case IPA_CMD_SETBRIDGEPORT_OSA:
849	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
850		if (card->discipline->control_event_handler(card, cmd))
851			return cmd;
852		return NULL;
853	case IPA_CMD_REGISTER_LOCAL_ADDR:
854		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
855			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
856		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
857			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
858
859		QETH_CARD_TEXT(card, 3, "irla");
860		return NULL;
861	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
862		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
863			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
864		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
865			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
866
867		QETH_CARD_TEXT(card, 3, "urla");
868		return NULL;
869	default:
870		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
871		return cmd;
872	}
873}
874
875static void qeth_clear_ipacmd_list(struct qeth_card *card)
876{
877	struct qeth_cmd_buffer *iob;
878	unsigned long flags;
879
880	QETH_CARD_TEXT(card, 4, "clipalst");
881
882	spin_lock_irqsave(&card->lock, flags);
883	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
884		qeth_notify_cmd(iob, -ECANCELED);
885	spin_unlock_irqrestore(&card->lock, flags);
886}
887
888static int qeth_check_idx_response(struct qeth_card *card,
889	unsigned char *buffer)
890{
891	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
892	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
893		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
894				 buffer[4]);
895		QETH_CARD_TEXT(card, 2, "ckidxres");
896		QETH_CARD_TEXT(card, 2, " idxterm");
897		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
898		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
899		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
900			dev_err(&card->gdev->dev,
901				"The device does not support the configured transport mode\n");
902			return -EPROTONOSUPPORT;
903		}
904		return -EIO;
905	}
906	return 0;
907}
908
909static void qeth_release_buffer_cb(struct qeth_card *card,
910				   struct qeth_cmd_buffer *iob,
911				   unsigned int data_length)
912{
913	qeth_put_cmd(iob);
914}
915
916static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
917{
918	qeth_notify_cmd(iob, rc);
919	qeth_put_cmd(iob);
920}
921
922static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
923					      unsigned int length,
924					      unsigned int ccws, long timeout)
925{
926	struct qeth_cmd_buffer *iob;
927
928	if (length > QETH_BUFSIZE)
929		return NULL;
930
931	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
932	if (!iob)
933		return NULL;
934
935	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
936			    GFP_KERNEL | GFP_DMA);
937	if (!iob->data) {
938		kfree(iob);
939		return NULL;
940	}
941
942	init_completion(&iob->done);
943	spin_lock_init(&iob->lock);
944	refcount_set(&iob->ref_count, 1);
945	iob->channel = channel;
946	iob->timeout = timeout;
947	iob->length = length;
948	return iob;
949}
950
951static void qeth_issue_next_read_cb(struct qeth_card *card,
952				    struct qeth_cmd_buffer *iob,
953				    unsigned int data_length)
954{
955	struct qeth_cmd_buffer *request = NULL;
956	struct qeth_ipa_cmd *cmd = NULL;
957	struct qeth_reply *reply = NULL;
958	struct qeth_cmd_buffer *tmp;
959	unsigned long flags;
960	int rc = 0;
961
962	QETH_CARD_TEXT(card, 4, "sndctlcb");
963	rc = qeth_check_idx_response(card, iob->data);
964	switch (rc) {
965	case 0:
966		break;
967	case -EIO:
968		qeth_schedule_recovery(card);
969		fallthrough;
970	default:
971		qeth_clear_ipacmd_list(card);
972		goto err_idx;
973	}
974
975	cmd = __ipa_reply(iob);
976	if (cmd) {
977		cmd = qeth_check_ipa_data(card, cmd);
978		if (!cmd)
979			goto out;
980	}
981
982	/* match against pending cmd requests */
983	spin_lock_irqsave(&card->lock, flags);
984	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
985		if (tmp->match && tmp->match(tmp, iob)) {
986			request = tmp;
987			/* take the object outside the lock */
988			qeth_get_cmd(request);
989			break;
990		}
991	}
992	spin_unlock_irqrestore(&card->lock, flags);
993
994	if (!request)
995		goto out;
996
997	reply = &request->reply;
998	if (!reply->callback) {
999		rc = 0;
1000		goto no_callback;
1001	}
1002
1003	spin_lock_irqsave(&request->lock, flags);
1004	if (request->rc)
1005		/* Bail out when the requestor has already left: */
1006		rc = request->rc;
1007	else
1008		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1009							(unsigned long)iob);
1010	spin_unlock_irqrestore(&request->lock, flags);
1011
1012no_callback:
1013	if (rc <= 0)
1014		qeth_notify_cmd(request, rc);
1015	qeth_put_cmd(request);
1016out:
1017	memcpy(&card->seqno.pdu_hdr_ack,
1018		QETH_PDU_HEADER_SEQ_NO(iob->data),
1019		QETH_SEQ_NO_LENGTH);
1020	__qeth_issue_next_read(card);
1021err_idx:
1022	qeth_put_cmd(iob);
1023}
1024
1025static int qeth_set_thread_start_bit(struct qeth_card *card,
1026		unsigned long thread)
1027{
1028	unsigned long flags;
1029	int rc = 0;
1030
1031	spin_lock_irqsave(&card->thread_mask_lock, flags);
1032	if (!(card->thread_allowed_mask & thread))
1033		rc = -EPERM;
1034	else if (card->thread_start_mask & thread)
1035		rc = -EBUSY;
1036	else
1037		card->thread_start_mask |= thread;
1038	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1039
1040	return rc;
1041}
1042
1043static void qeth_clear_thread_start_bit(struct qeth_card *card,
1044					unsigned long thread)
1045{
1046	unsigned long flags;
1047
1048	spin_lock_irqsave(&card->thread_mask_lock, flags);
1049	card->thread_start_mask &= ~thread;
1050	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1051	wake_up(&card->wait_q);
1052}
1053
1054static void qeth_clear_thread_running_bit(struct qeth_card *card,
1055					  unsigned long thread)
1056{
1057	unsigned long flags;
1058
1059	spin_lock_irqsave(&card->thread_mask_lock, flags);
1060	card->thread_running_mask &= ~thread;
1061	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1062	wake_up_all(&card->wait_q);
1063}
1064
1065static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1066{
1067	unsigned long flags;
1068	int rc = 0;
1069
1070	spin_lock_irqsave(&card->thread_mask_lock, flags);
1071	if (card->thread_start_mask & thread) {
1072		if ((card->thread_allowed_mask & thread) &&
1073		    !(card->thread_running_mask & thread)) {
1074			rc = 1;
1075			card->thread_start_mask &= ~thread;
1076			card->thread_running_mask |= thread;
1077		} else
1078			rc = -EPERM;
1079	}
1080	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1081	return rc;
1082}
1083
1084static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1085{
1086	int rc = 0;
1087
1088	wait_event(card->wait_q,
1089		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
1090	return rc;
1091}
1092
1093int qeth_schedule_recovery(struct qeth_card *card)
1094{
1095	int rc;
1096
1097	QETH_CARD_TEXT(card, 2, "startrec");
1098
1099	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1100	if (!rc)
1101		schedule_work(&card->kernel_thread_starter);
1102
1103	return rc;
1104}
1105
1106static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1107			    struct irb *irb)
1108{
1109	int dstat, cstat;
1110	char *sense;
1111
1112	sense = (char *) irb->ecw;
1113	cstat = irb->scsw.cmd.cstat;
1114	dstat = irb->scsw.cmd.dstat;
1115
1116	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1117		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1118		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1119		QETH_CARD_TEXT(card, 2, "CGENCHK");
1120		dev_warn(&cdev->dev, "The qeth device driver "
1121			"failed to recover an error on the device\n");
1122		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1123				 CCW_DEVID(cdev), dstat, cstat);
1124		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1125				16, 1, irb, 64, 1);
1126		return -EIO;
1127	}
1128
1129	if (dstat & DEV_STAT_UNIT_CHECK) {
1130		if (sense[SENSE_RESETTING_EVENT_BYTE] &
1131		    SENSE_RESETTING_EVENT_FLAG) {
1132			QETH_CARD_TEXT(card, 2, "REVIND");
1133			return -EIO;
1134		}
1135		if (sense[SENSE_COMMAND_REJECT_BYTE] &
1136		    SENSE_COMMAND_REJECT_FLAG) {
1137			QETH_CARD_TEXT(card, 2, "CMDREJi");
1138			return -EIO;
1139		}
1140		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1141			QETH_CARD_TEXT(card, 2, "AFFE");
1142			return -EIO;
1143		}
1144		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1145			QETH_CARD_TEXT(card, 2, "ZEROSEN");
1146			return 0;
1147		}
1148		QETH_CARD_TEXT(card, 2, "DGENCHK");
1149		return -EIO;
1150	}
1151	return 0;
1152}
1153
1154static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1155				struct irb *irb)
1156{
1157	if (!IS_ERR(irb))
1158		return 0;
1159
1160	switch (PTR_ERR(irb)) {
1161	case -EIO:
1162		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1163				 CCW_DEVID(cdev));
1164		QETH_CARD_TEXT(card, 2, "ckirberr");
1165		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1166		return -EIO;
1167	case -ETIMEDOUT:
1168		dev_warn(&cdev->dev, "A hardware operation timed out"
1169			" on the device\n");
1170		QETH_CARD_TEXT(card, 2, "ckirberr");
1171		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1172		return -ETIMEDOUT;
1173	default:
1174		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1175				 PTR_ERR(irb), CCW_DEVID(cdev));
1176		QETH_CARD_TEXT(card, 2, "ckirberr");
1177		QETH_CARD_TEXT(card, 2, "  rc???");
1178		return PTR_ERR(irb);
1179	}
1180}
1181
1182/**
1183 * qeth_irq() - qeth interrupt handler
1184 * @cdev: ccw device
1185 * @intparm: expect pointer to iob
1186 * @irb: Interruption Response Block
1187 *
1188 * In the good path:
1189 * corresponding qeth channel is locked with last used iob as active_cmd.
1190 * But this function is also called for error interrupts.
1191 *
1192 * Caller ensures that:
1193 * Interrupts are disabled; ccw device lock is held;
1194 *
1195 */
1196static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1197		struct irb *irb)
1198{
1199	int rc;
1200	int cstat, dstat;
1201	struct qeth_cmd_buffer *iob = NULL;
1202	struct ccwgroup_device *gdev;
1203	struct qeth_channel *channel;
1204	struct qeth_card *card;
1205
1206	/* while we hold the ccwdev lock, this stays valid: */
1207	gdev = dev_get_drvdata(&cdev->dev);
1208	card = dev_get_drvdata(&gdev->dev);
1209
1210	QETH_CARD_TEXT(card, 5, "irq");
1211
1212	if (card->read.ccwdev == cdev) {
1213		channel = &card->read;
1214		QETH_CARD_TEXT(card, 5, "read");
1215	} else if (card->write.ccwdev == cdev) {
1216		channel = &card->write;
1217		QETH_CARD_TEXT(card, 5, "write");
1218	} else {
1219		channel = &card->data;
1220		QETH_CARD_TEXT(card, 5, "data");
1221	}
1222
1223	if (intparm == 0) {
1224		QETH_CARD_TEXT(card, 5, "irqunsol");
1225	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1226		QETH_CARD_TEXT(card, 5, "irqunexp");
1227
1228		dev_err(&cdev->dev,
1229			"Received IRQ with intparm %lx, expected %px\n",
1230			intparm, channel->active_cmd);
1231		if (channel->active_cmd)
1232			qeth_cancel_cmd(channel->active_cmd, -EIO);
1233	} else {
1234		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1235	}
1236
1237	rc = qeth_check_irb_error(card, cdev, irb);
1238	if (rc) {
1239		/* IO was terminated, free its resources. */
1240		qeth_unlock_channel(card, channel);
1241		if (iob)
1242			qeth_cancel_cmd(iob, rc);
1243		return;
1244	}
1245
1246	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1247		channel->state = CH_STATE_STOPPED;
1248		wake_up(&card->wait_q);
1249	}
1250
1251	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1252		channel->state = CH_STATE_HALTED;
1253		wake_up(&card->wait_q);
1254	}
1255
1256	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1257					  SCSW_FCTL_HALT_FUNC))) {
1258		qeth_cancel_cmd(iob, -ECANCELED);
1259		iob = NULL;
1260	}
1261
1262	cstat = irb->scsw.cmd.cstat;
1263	dstat = irb->scsw.cmd.dstat;
1264
1265	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1266	    (dstat & DEV_STAT_UNIT_CHECK) ||
1267	    (cstat)) {
1268		if (irb->esw.esw0.erw.cons) {
1269			dev_warn(&channel->ccwdev->dev,
1270				"The qeth device driver failed to recover "
1271				"an error on the device\n");
1272			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1273					 CCW_DEVID(channel->ccwdev), cstat,
1274					 dstat);
1275			print_hex_dump(KERN_WARNING, "qeth: irb ",
1276				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1277			print_hex_dump(KERN_WARNING, "qeth: sense data ",
1278				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1279		}
1280
1281		rc = qeth_get_problem(card, cdev, irb);
1282		if (rc) {
1283			card->read_or_write_problem = 1;
1284			qeth_unlock_channel(card, channel);
1285			if (iob)
1286				qeth_cancel_cmd(iob, rc);
1287			qeth_clear_ipacmd_list(card);
1288			qeth_schedule_recovery(card);
1289			return;
1290		}
1291	}
1292
1293	if (scsw_cmd_is_valid_cc(&irb->scsw) && irb->scsw.cmd.cc == 1 && iob) {
1294		/* channel command hasn't started: retry.
1295		 * active_cmd is still set to last iob
1296		 */
1297		QETH_CARD_TEXT(card, 2, "irqcc1");
1298		rc = ccw_device_start_timeout(cdev, __ccw_from_cmd(iob),
1299					      (addr_t)iob, 0, 0, iob->timeout);
1300		if (rc) {
1301			QETH_DBF_MESSAGE(2,
1302					 "ccw retry on %x failed, rc = %i\n",
1303					 CARD_DEVID(card), rc);
1304			QETH_CARD_TEXT_(card, 2, " err%d", rc);
1305			qeth_unlock_channel(card, channel);
1306			qeth_cancel_cmd(iob, rc);
1307		}
1308		return;
1309	}
1310
1311	qeth_unlock_channel(card, channel);
1312
1313	if (iob) {
1314		/* sanity check: */
1315		if (irb->scsw.cmd.count > iob->length) {
1316			qeth_cancel_cmd(iob, -EIO);
1317			return;
1318		}
1319		if (iob->callback)
1320			iob->callback(card, iob,
1321				      iob->length - irb->scsw.cmd.count);
1322	}
1323}
1324
1325static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1326		struct qeth_qdio_out_buffer *buf,
1327		enum iucv_tx_notify notification)
1328{
1329	struct sk_buff *skb;
1330
1331	skb_queue_walk(&buf->skb_list, skb) {
1332		struct sock *sk = skb->sk;
1333
1334		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1335		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1336		if (sk && sk->sk_family == PF_IUCV)
1337			iucv_sk(sk)->sk_txnotify(sk, notification);
1338	}
1339}
1340
1341static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
1342				 struct qeth_qdio_out_buffer *buf, bool error,
1343				 int budget)
1344{
1345	struct sk_buff *skb;
1346
1347	/* Empty buffer? */
1348	if (buf->next_element_to_fill == 0)
1349		return;
1350
1351	QETH_TXQ_STAT_INC(queue, bufs);
1352	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1353	if (error) {
1354		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1355	} else {
1356		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1357		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1358	}
1359
1360	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1361		unsigned int bytes = qdisc_pkt_len(skb);
1362		bool is_tso = skb_is_gso(skb);
1363		unsigned int packets;
1364
1365		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1366		if (!error) {
1367			if (skb->ip_summed == CHECKSUM_PARTIAL)
1368				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1369			if (skb_is_nonlinear(skb))
1370				QETH_TXQ_STAT_INC(queue, skbs_sg);
1371			if (is_tso) {
1372				QETH_TXQ_STAT_INC(queue, skbs_tso);
1373				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1374			}
1375		}
1376
1377		napi_consume_skb(skb, budget);
1378	}
1379}
1380
1381static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1382				     struct qeth_qdio_out_buffer *buf,
1383				     bool error, int budget)
1384{
1385	int i;
1386
1387	/* is PCI flag set on buffer? */
1388	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1389		atomic_dec(&queue->set_pci_flags_count);
1390		QETH_TXQ_STAT_INC(queue, completion_irq);
1391	}
1392
1393	qeth_tx_complete_buf(queue, buf, error, budget);
1394
1395	for (i = 0; i < queue->max_elements; ++i) {
1396		void *data = dma64_to_virt(buf->buffer->element[i].addr);
1397
1398		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1399			kmem_cache_free(qeth_core_header_cache, data);
1400	}
1401
1402	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1403	buf->next_element_to_fill = 0;
1404	buf->frames = 0;
1405	buf->bytes = 0;
1406	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1407}
1408
1409static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
1410{
1411	if (buf->aob)
1412		kmem_cache_free(qeth_qaob_cache, buf->aob);
1413	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
1414}
1415
1416static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
1417					  struct qeth_qdio_out_q *queue,
1418					  bool drain, int budget)
1419{
1420	struct qeth_qdio_out_buffer *buf, *tmp;
1421
1422	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1423		struct qeth_qaob_priv1 *priv;
1424		struct qaob *aob = buf->aob;
1425		enum iucv_tx_notify notify;
1426		unsigned int i;
1427
1428		priv = (struct qeth_qaob_priv1 *)&aob->user1;
1429		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1430			QETH_CARD_TEXT(card, 5, "fp");
1431			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
1432
1433			notify = drain ? TX_NOTIFY_GENERALERROR :
1434					 qeth_compute_cq_notification(aob->aorc, 1);
1435			qeth_notify_skbs(queue, buf, notify);
1436			qeth_tx_complete_buf(queue, buf, drain, budget);
1437
1438			for (i = 0;
1439			     i < aob->sb_count && i < queue->max_elements;
1440			     i++) {
1441				void *data = dma64_to_virt(aob->sba[i]);
1442
1443				if (test_bit(i, buf->from_kmem_cache) && data)
1444					kmem_cache_free(qeth_core_header_cache,
1445							data);
1446			}
1447
1448			list_del(&buf->list_entry);
1449			qeth_free_out_buf(buf);
1450		}
1451	}
1452}
1453
1454static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1455{
1456	int j;
1457
1458	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1459
1460	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1461		if (!q->bufs[j])
1462			continue;
1463
1464		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1465		if (free) {
1466			qeth_free_out_buf(q->bufs[j]);
1467			q->bufs[j] = NULL;
1468		}
1469	}
1470}
1471
1472static void qeth_drain_output_queues(struct qeth_card *card)
1473{
1474	int i;
1475
1476	QETH_CARD_TEXT(card, 2, "clearqdbf");
1477	/* clear outbound buffers to free skbs */
1478	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1479		if (card->qdio.out_qs[i])
1480			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1481	}
1482}
1483
1484static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1485{
1486	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1487
1488	if (card->qdio.no_out_queues == max)
1489		return;
1490
1491	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1492		qeth_free_qdio_queues(card);
1493
1494	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1495		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1496
1497	card->qdio.no_out_queues = max;
1498}
1499
1500static int qeth_update_from_chp_desc(struct qeth_card *card)
1501{
1502	struct ccw_device *ccwdev;
1503	struct channel_path_desc_fmt0 *chp_dsc;
1504
1505	QETH_CARD_TEXT(card, 2, "chp_desc");
1506
1507	ccwdev = card->data.ccwdev;
1508	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1509	if (!chp_dsc)
1510		return -ENOMEM;
1511
1512	card->info.func_level = 0x4100 + chp_dsc->desc;
1513
1514	if (IS_OSD(card) || IS_OSX(card))
1515		/* CHPP field bit 6 == 1 -> single queue */
1516		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1517
1518	kfree(chp_dsc);
1519	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1520	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1521	return 0;
1522}
1523
1524static void qeth_init_qdio_info(struct qeth_card *card)
1525{
1526	QETH_CARD_TEXT(card, 4, "intqdinf");
1527	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1528	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1529	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1530
1531	/* inbound */
1532	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1533	if (IS_IQD(card))
1534		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1535	else
1536		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1537	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1538	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1539	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1540}
1541
1542static void qeth_set_initial_options(struct qeth_card *card)
1543{
1544	card->options.route4.type = NO_ROUTER;
1545	card->options.route6.type = NO_ROUTER;
1546	card->options.isolation = ISOLATION_MODE_NONE;
1547	card->options.cq = QETH_CQ_DISABLED;
1548	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1549}
1550
1551static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1552{
1553	unsigned long flags;
1554	int rc = 0;
1555
1556	spin_lock_irqsave(&card->thread_mask_lock, flags);
1557	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1558			(u8) card->thread_start_mask,
1559			(u8) card->thread_allowed_mask,
1560			(u8) card->thread_running_mask);
1561	rc = (card->thread_start_mask & thread);
1562	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1563	return rc;
1564}
1565
1566static int qeth_do_reset(void *data);
1567static void qeth_start_kernel_thread(struct work_struct *work)
1568{
1569	struct task_struct *ts;
1570	struct qeth_card *card = container_of(work, struct qeth_card,
1571					kernel_thread_starter);
1572	QETH_CARD_TEXT(card, 2, "strthrd");
1573
1574	if (card->read.state != CH_STATE_UP &&
1575	    card->write.state != CH_STATE_UP)
1576		return;
1577	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1578		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1579		if (IS_ERR(ts)) {
1580			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1581			qeth_clear_thread_running_bit(card,
1582				QETH_RECOVER_THREAD);
1583		}
1584	}
1585}
1586
1587static void qeth_buffer_reclaim_work(struct work_struct *);
1588static void qeth_setup_card(struct qeth_card *card)
1589{
1590	QETH_CARD_TEXT(card, 2, "setupcrd");
1591
1592	card->info.type = CARD_RDEV(card)->id.driver_info;
1593	card->state = CARD_STATE_DOWN;
1594	spin_lock_init(&card->lock);
1595	spin_lock_init(&card->thread_mask_lock);
1596	mutex_init(&card->conf_mutex);
1597	mutex_init(&card->discipline_mutex);
1598	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1599	INIT_LIST_HEAD(&card->cmd_waiter_list);
1600	init_waitqueue_head(&card->wait_q);
1601	qeth_set_initial_options(card);
1602	/* IP address takeover */
1603	INIT_LIST_HEAD(&card->ipato.entries);
1604	qeth_init_qdio_info(card);
1605	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1606	hash_init(card->rx_mode_addrs);
1607	hash_init(card->local_addrs4);
1608	hash_init(card->local_addrs6);
1609	spin_lock_init(&card->local_addrs4_lock);
1610	spin_lock_init(&card->local_addrs6_lock);
1611}
1612
1613static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1614{
1615	struct qeth_card *card = container_of(slr, struct qeth_card,
1616					qeth_service_level);
1617	if (card->info.mcl_level[0])
1618		seq_printf(m, "qeth: %s firmware level %s\n",
1619			CARD_BUS_ID(card), card->info.mcl_level);
1620}
1621
1622static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1623{
1624	struct qeth_card *card;
1625
1626	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1627	card = kzalloc(sizeof(*card), GFP_KERNEL);
1628	if (!card)
1629		goto out;
1630	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1631
1632	card->gdev = gdev;
1633	dev_set_drvdata(&gdev->dev, card);
1634	CARD_RDEV(card) = gdev->cdev[0];
1635	CARD_WDEV(card) = gdev->cdev[1];
1636	CARD_DDEV(card) = gdev->cdev[2];
1637
1638	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1639						 dev_name(&gdev->dev));
1640	if (!card->event_wq)
1641		goto out_wq;
1642
1643	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1644	if (!card->read_cmd)
1645		goto out_read_cmd;
1646
1647	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1648					   qeth_debugfs_root);
1649	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1650			    &qeth_debugfs_local_addr_fops);
1651
1652	card->qeth_service_level.seq_print = qeth_core_sl_print;
1653	register_service_level(&card->qeth_service_level);
1654	return card;
1655
1656out_read_cmd:
1657	destroy_workqueue(card->event_wq);
1658out_wq:
1659	dev_set_drvdata(&gdev->dev, NULL);
1660	kfree(card);
1661out:
1662	return NULL;
1663}
1664
1665static int qeth_clear_channel(struct qeth_card *card,
1666			      struct qeth_channel *channel)
1667{
1668	int rc;
1669
1670	QETH_CARD_TEXT(card, 3, "clearch");
1671	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1672	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1673	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1674
1675	if (rc)
1676		return rc;
1677	rc = wait_event_interruptible_timeout(card->wait_q,
1678			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1679	if (rc == -ERESTARTSYS)
1680		return rc;
1681	if (channel->state != CH_STATE_STOPPED)
1682		return -ETIME;
1683	channel->state = CH_STATE_DOWN;
1684	return 0;
1685}
1686
1687static int qeth_halt_channel(struct qeth_card *card,
1688			     struct qeth_channel *channel)
1689{
1690	int rc;
1691
1692	QETH_CARD_TEXT(card, 3, "haltch");
1693	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1694	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1695	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1696
1697	if (rc)
1698		return rc;
1699	rc = wait_event_interruptible_timeout(card->wait_q,
1700			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1701	if (rc == -ERESTARTSYS)
1702		return rc;
1703	if (channel->state != CH_STATE_HALTED)
1704		return -ETIME;
1705	return 0;
1706}
1707
1708static int qeth_stop_channel(struct qeth_channel *channel)
1709{
1710	struct ccw_device *cdev = channel->ccwdev;
1711	int rc;
1712
1713	rc = ccw_device_set_offline(cdev);
1714
1715	spin_lock_irq(get_ccwdev_lock(cdev));
1716	if (channel->active_cmd)
1717		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1718			channel->active_cmd);
1719
1720	cdev->handler = NULL;
1721	spin_unlock_irq(get_ccwdev_lock(cdev));
1722
1723	return rc;
1724}
1725
1726static int qeth_start_channel(struct qeth_channel *channel)
1727{
1728	struct ccw_device *cdev = channel->ccwdev;
1729	int rc;
1730
1731	channel->state = CH_STATE_DOWN;
1732	xchg(&channel->active_cmd, NULL);
1733
1734	spin_lock_irq(get_ccwdev_lock(cdev));
1735	cdev->handler = qeth_irq;
1736	spin_unlock_irq(get_ccwdev_lock(cdev));
1737
1738	rc = ccw_device_set_online(cdev);
1739	if (rc)
1740		goto err;
1741
1742	return 0;
1743
1744err:
1745	spin_lock_irq(get_ccwdev_lock(cdev));
1746	cdev->handler = NULL;
1747	spin_unlock_irq(get_ccwdev_lock(cdev));
1748	return rc;
1749}
1750
1751static int qeth_halt_channels(struct qeth_card *card)
1752{
1753	int rc1 = 0, rc2 = 0, rc3 = 0;
1754
1755	QETH_CARD_TEXT(card, 3, "haltchs");
1756	rc1 = qeth_halt_channel(card, &card->read);
1757	rc2 = qeth_halt_channel(card, &card->write);
1758	rc3 = qeth_halt_channel(card, &card->data);
1759	if (rc1)
1760		return rc1;
1761	if (rc2)
1762		return rc2;
1763	return rc3;
1764}
1765
1766static int qeth_clear_channels(struct qeth_card *card)
1767{
1768	int rc1 = 0, rc2 = 0, rc3 = 0;
1769
1770	QETH_CARD_TEXT(card, 3, "clearchs");
1771	rc1 = qeth_clear_channel(card, &card->read);
1772	rc2 = qeth_clear_channel(card, &card->write);
1773	rc3 = qeth_clear_channel(card, &card->data);
1774	if (rc1)
1775		return rc1;
1776	if (rc2)
1777		return rc2;
1778	return rc3;
1779}
1780
1781static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1782{
1783	int rc = 0;
1784
1785	QETH_CARD_TEXT(card, 3, "clhacrd");
1786
1787	if (halt)
1788		rc = qeth_halt_channels(card);
1789	if (rc)
1790		return rc;
1791	return qeth_clear_channels(card);
1792}
1793
1794static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1795{
1796	int rc = 0;
1797
1798	QETH_CARD_TEXT(card, 3, "qdioclr");
1799	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1800		QETH_QDIO_CLEANING)) {
1801	case QETH_QDIO_ESTABLISHED:
1802		if (IS_IQD(card))
1803			rc = qdio_shutdown(CARD_DDEV(card),
1804				QDIO_FLAG_CLEANUP_USING_HALT);
1805		else
1806			rc = qdio_shutdown(CARD_DDEV(card),
1807				QDIO_FLAG_CLEANUP_USING_CLEAR);
1808		if (rc)
1809			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1810		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1811		break;
1812	case QETH_QDIO_CLEANING:
1813		return rc;
1814	default:
1815		break;
1816	}
1817	rc = qeth_clear_halt_card(card, use_halt);
1818	if (rc)
1819		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1820	return rc;
1821}
1822
1823static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1824{
1825	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1826	struct diag26c_vnic_resp *response = NULL;
1827	struct diag26c_vnic_req *request = NULL;
1828	struct ccw_dev_id id;
1829	char userid[80];
1830	int rc = 0;
1831
1832	QETH_CARD_TEXT(card, 2, "vmlayer");
1833
1834	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1835	if (rc)
1836		goto out;
1837
1838	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1839	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1840	if (!request || !response) {
1841		rc = -ENOMEM;
1842		goto out;
1843	}
1844
1845	ccw_device_get_id(CARD_RDEV(card), &id);
1846	request->resp_buf_len = sizeof(*response);
1847	request->resp_version = DIAG26C_VERSION6_VM65918;
1848	request->req_format = DIAG26C_VNIC_INFO;
1849	ASCEBC(userid, 8);
1850	memcpy(&request->sys_name, userid, 8);
1851	request->devno = id.devno;
1852
1853	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1854	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1855	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1856	if (rc)
1857		goto out;
1858	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1859
1860	if (request->resp_buf_len < sizeof(*response) ||
1861	    response->version != request->resp_version) {
1862		rc = -EIO;
1863		goto out;
1864	}
1865
1866	if (response->protocol == VNIC_INFO_PROT_L2)
1867		disc = QETH_DISCIPLINE_LAYER2;
1868	else if (response->protocol == VNIC_INFO_PROT_L3)
1869		disc = QETH_DISCIPLINE_LAYER3;
1870
1871out:
1872	kfree(response);
1873	kfree(request);
1874	if (rc)
1875		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1876	return disc;
1877}
1878
1879/* Determine whether the device requires a specific layer discipline */
1880static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1881{
1882	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1883
1884	if (IS_OSM(card))
1885		disc = QETH_DISCIPLINE_LAYER2;
1886	else if (IS_VM_NIC(card))
1887		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1888				      qeth_vm_detect_layer(card);
1889
1890	switch (disc) {
1891	case QETH_DISCIPLINE_LAYER2:
1892		QETH_CARD_TEXT(card, 3, "force l2");
1893		break;
1894	case QETH_DISCIPLINE_LAYER3:
1895		QETH_CARD_TEXT(card, 3, "force l3");
1896		break;
1897	default:
1898		QETH_CARD_TEXT(card, 3, "force no");
1899	}
1900
1901	return disc;
1902}
1903
1904static void qeth_set_blkt_defaults(struct qeth_card *card)
1905{
1906	QETH_CARD_TEXT(card, 2, "cfgblkt");
1907
1908	if (card->info.use_v1_blkt) {
1909		card->info.blkt.time_total = 0;
1910		card->info.blkt.inter_packet = 0;
1911		card->info.blkt.inter_packet_jumbo = 0;
1912	} else {
1913		card->info.blkt.time_total = 250;
1914		card->info.blkt.inter_packet = 5;
1915		card->info.blkt.inter_packet_jumbo = 15;
1916	}
1917}
1918
1919static void qeth_idx_init(struct qeth_card *card)
1920{
1921	memset(&card->seqno, 0, sizeof(card->seqno));
1922
1923	card->token.issuer_rm_w = 0x00010103UL;
1924	card->token.cm_filter_w = 0x00010108UL;
1925	card->token.cm_connection_w = 0x0001010aUL;
1926	card->token.ulp_filter_w = 0x0001010bUL;
1927	card->token.ulp_connection_w = 0x0001010dUL;
1928
1929	switch (card->info.type) {
1930	case QETH_CARD_TYPE_IQD:
1931		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1932		break;
1933	case QETH_CARD_TYPE_OSD:
1934		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1935		break;
1936	default:
1937		break;
1938	}
1939}
1940
1941static void qeth_idx_finalize_cmd(struct qeth_card *card,
1942				  struct qeth_cmd_buffer *iob)
1943{
1944	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1945	       QETH_SEQ_NO_LENGTH);
1946	if (iob->channel == &card->write)
1947		card->seqno.trans_hdr++;
1948}
1949
1950static int qeth_peer_func_level(int level)
1951{
1952	if ((level & 0xff) == 8)
1953		return (level & 0xff) + 0x400;
1954	if (((level >> 8) & 3) == 1)
1955		return (level & 0xff) + 0x200;
1956	return level;
1957}
1958
1959static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1960				  struct qeth_cmd_buffer *iob)
1961{
1962	qeth_idx_finalize_cmd(card, iob);
1963
1964	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1965	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1966	card->seqno.pdu_hdr++;
1967	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1968	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1969
1970	iob->callback = qeth_release_buffer_cb;
1971}
1972
1973static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
1974				 struct qeth_cmd_buffer *reply)
1975{
1976	/* MPC cmds are issued strictly in sequence. */
1977	return !IS_IPA(reply->data);
1978}
1979
1980static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1981						  const void *data,
1982						  unsigned int data_length)
1983{
1984	struct qeth_cmd_buffer *iob;
1985
1986	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1987	if (!iob)
1988		return NULL;
1989
1990	memcpy(iob->data, data, data_length);
1991	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1992		       iob->data);
1993	iob->finalize = qeth_mpc_finalize_cmd;
1994	iob->match = qeth_mpc_match_reply;
1995	return iob;
1996}
1997
1998/**
1999 * qeth_send_control_data() -	send control command to the card
2000 * @card:			qeth_card structure pointer
2001 * @iob:			qeth_cmd_buffer pointer
2002 * @reply_cb:			callback function pointer
2003 *  cb_card:			pointer to the qeth_card structure
2004 *  cb_reply:			pointer to the qeth_reply structure
2005 *  cb_cmd:			pointer to the original iob for non-IPA
2006 *				commands, or to the qeth_ipa_cmd structure
2007 *				for the IPA commands.
2008 * @reply_param:		private pointer passed to the callback
2009 *
2010 * Callback function gets called one or more times, with cb_cmd
2011 * pointing to the response returned by the hardware. Callback
2012 * function must return
2013 *   > 0 if more reply blocks are expected,
2014 *     0 if the last or only reply block is received, and
2015 *   < 0 on error.
2016 * Callback function can get the value of the reply_param pointer from the
2017 * field 'param' of the structure qeth_reply.
2018 */
2019
2020static int qeth_send_control_data(struct qeth_card *card,
2021				  struct qeth_cmd_buffer *iob,
2022				  int (*reply_cb)(struct qeth_card *cb_card,
2023						  struct qeth_reply *cb_reply,
2024						  unsigned long cb_cmd),
2025				  void *reply_param)
2026{
2027	struct qeth_channel *channel = iob->channel;
2028	struct qeth_reply *reply = &iob->reply;
2029	long timeout = iob->timeout;
2030	int rc;
2031
2032	QETH_CARD_TEXT(card, 2, "sendctl");
2033
2034	reply->callback = reply_cb;
2035	reply->param = reply_param;
2036
2037	timeout = wait_event_interruptible_timeout(card->wait_q,
2038						   qeth_trylock_channel(channel, iob),
2039						   timeout);
2040	if (timeout <= 0) {
2041		qeth_put_cmd(iob);
2042		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2043	}
2044
2045	if (iob->finalize)
2046		iob->finalize(card, iob);
2047	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2048
2049	qeth_enqueue_cmd(card, iob);
2050
2051	/* This pairs with iob->callback, and keeps the iob alive after IO: */
2052	qeth_get_cmd(iob);
2053
2054	QETH_CARD_TEXT(card, 6, "noirqpnd");
2055	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2056	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2057				      (addr_t) iob, 0, 0, timeout);
2058	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2059	if (rc) {
2060		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2061				 CARD_DEVID(card), rc);
2062		QETH_CARD_TEXT_(card, 2, " err%d", rc);
2063		qeth_dequeue_cmd(card, iob);
2064		qeth_put_cmd(iob);
2065		qeth_unlock_channel(card, channel);
2066		goto out;
2067	}
2068
2069	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2070							    timeout);
2071	if (timeout <= 0)
2072		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2073
2074	qeth_dequeue_cmd(card, iob);
2075
2076	if (reply_cb) {
2077		/* Wait until the callback for a late reply has completed: */
2078		spin_lock_irq(&iob->lock);
2079		if (rc)
2080			/* Zap any callback that's still pending: */
2081			iob->rc = rc;
2082		spin_unlock_irq(&iob->lock);
2083	}
2084
2085	if (!rc)
2086		rc = iob->rc;
2087
2088out:
2089	qeth_put_cmd(iob);
2090	return rc;
2091}
2092
2093struct qeth_node_desc {
2094	struct node_descriptor nd1;
2095	struct node_descriptor nd2;
2096	struct node_descriptor nd3;
2097};
2098
2099static void qeth_read_conf_data_cb(struct qeth_card *card,
2100				   struct qeth_cmd_buffer *iob,
2101				   unsigned int data_length)
2102{
2103	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2104	int rc = 0;
2105	u8 *tag;
2106
2107	QETH_CARD_TEXT(card, 2, "cfgunit");
2108
2109	if (data_length < sizeof(*nd)) {
2110		rc = -EINVAL;
2111		goto out;
2112	}
2113
2114	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2115			       nd->nd1.plant[1] == _ascebc['M'];
2116	tag = (u8 *)&nd->nd1.tag;
2117	card->info.chpid = tag[0];
2118	card->info.unit_addr2 = tag[1];
2119
2120	tag = (u8 *)&nd->nd2.tag;
2121	card->info.cula = tag[1];
2122
2123	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2124				 nd->nd3.model[1] == 0xF0 &&
2125				 nd->nd3.model[2] >= 0xF1 &&
2126				 nd->nd3.model[2] <= 0xF4;
2127
2128out:
2129	qeth_notify_cmd(iob, rc);
2130	qeth_put_cmd(iob);
2131}
2132
2133static int qeth_read_conf_data(struct qeth_card *card)
2134{
2135	struct qeth_channel *channel = &card->data;
2136	struct qeth_cmd_buffer *iob;
2137	struct ciw *ciw;
2138
2139	/* scan for RCD command in extended SenseID data */
2140	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2141	if (!ciw || ciw->cmd == 0)
2142		return -EOPNOTSUPP;
2143	if (ciw->count < sizeof(struct qeth_node_desc))
2144		return -EINVAL;
2145
2146	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2147	if (!iob)
2148		return -ENOMEM;
2149
2150	iob->callback = qeth_read_conf_data_cb;
2151	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2152		       iob->data);
2153
2154	return qeth_send_control_data(card, iob, NULL, NULL);
2155}
2156
2157static int qeth_idx_check_activate_response(struct qeth_card *card,
2158					    struct qeth_channel *channel,
2159					    struct qeth_cmd_buffer *iob)
2160{
2161	int rc;
2162
2163	rc = qeth_check_idx_response(card, iob->data);
2164	if (rc)
2165		return rc;
2166
2167	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2168		return 0;
2169
2170	/* negative reply: */
2171	QETH_CARD_TEXT_(card, 2, "idxneg%c",
2172			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2173
2174	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2175	case QETH_IDX_ACT_ERR_EXCL:
2176		dev_err(&channel->ccwdev->dev,
2177			"The adapter is used exclusively by another host\n");
2178		return -EBUSY;
2179	case QETH_IDX_ACT_ERR_AUTH:
2180	case QETH_IDX_ACT_ERR_AUTH_USER:
2181		dev_err(&channel->ccwdev->dev,
2182			"Setting the device online failed because of insufficient authorization\n");
2183		return -EPERM;
2184	default:
2185		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2186				 CCW_DEVID(channel->ccwdev));
2187		return -EIO;
2188	}
2189}
2190
2191static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2192					      struct qeth_cmd_buffer *iob,
2193					      unsigned int data_length)
2194{
2195	struct qeth_channel *channel = iob->channel;
2196	u16 peer_level;
2197	int rc;
2198
2199	QETH_CARD_TEXT(card, 2, "idxrdcb");
2200
2201	rc = qeth_idx_check_activate_response(card, channel, iob);
2202	if (rc)
2203		goto out;
2204
2205	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2206	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2207		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2208				 CCW_DEVID(channel->ccwdev),
2209				 card->info.func_level, peer_level);
2210		rc = -EINVAL;
2211		goto out;
2212	}
2213
2214	memcpy(&card->token.issuer_rm_r,
2215	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2216	       QETH_MPC_TOKEN_LENGTH);
2217	memcpy(&card->info.mcl_level[0],
2218	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2219
2220out:
2221	qeth_notify_cmd(iob, rc);
2222	qeth_put_cmd(iob);
2223}
2224
2225static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2226					       struct qeth_cmd_buffer *iob,
2227					       unsigned int data_length)
2228{
2229	struct qeth_channel *channel = iob->channel;
2230	u16 peer_level;
2231	int rc;
2232
2233	QETH_CARD_TEXT(card, 2, "idxwrcb");
2234
2235	rc = qeth_idx_check_activate_response(card, channel, iob);
2236	if (rc)
2237		goto out;
2238
2239	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2240	if ((peer_level & ~0x0100) !=
2241	    qeth_peer_func_level(card->info.func_level)) {
2242		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2243				 CCW_DEVID(channel->ccwdev),
2244				 card->info.func_level, peer_level);
2245		rc = -EINVAL;
2246	}
2247
2248out:
2249	qeth_notify_cmd(iob, rc);
2250	qeth_put_cmd(iob);
2251}
2252
2253static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2254					struct qeth_cmd_buffer *iob)
2255{
2256	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2257	u8 port = ((u8)card->dev->dev_port) | 0x80;
2258	struct ccw1 *ccw = __ccw_from_cmd(iob);
2259
2260	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2261		       iob->data);
2262	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2263	iob->finalize = qeth_idx_finalize_cmd;
2264
2265	port |= QETH_IDX_ACT_INVAL_FRAME;
2266	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2267	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2268	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2269	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2270	       &card->info.func_level, 2);
2271	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2272	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2273}
2274
2275static int qeth_idx_activate_read_channel(struct qeth_card *card)
2276{
2277	struct qeth_channel *channel = &card->read;
2278	struct qeth_cmd_buffer *iob;
2279	int rc;
2280
2281	QETH_CARD_TEXT(card, 2, "idxread");
2282
2283	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2284	if (!iob)
2285		return -ENOMEM;
2286
2287	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2288	qeth_idx_setup_activate_cmd(card, iob);
2289	iob->callback = qeth_idx_activate_read_channel_cb;
2290
2291	rc = qeth_send_control_data(card, iob, NULL, NULL);
2292	if (rc)
2293		return rc;
2294
2295	channel->state = CH_STATE_UP;
2296	return 0;
2297}
2298
2299static int qeth_idx_activate_write_channel(struct qeth_card *card)
2300{
2301	struct qeth_channel *channel = &card->write;
2302	struct qeth_cmd_buffer *iob;
2303	int rc;
2304
2305	QETH_CARD_TEXT(card, 2, "idxwrite");
2306
2307	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2308	if (!iob)
2309		return -ENOMEM;
2310
2311	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2312	qeth_idx_setup_activate_cmd(card, iob);
2313	iob->callback = qeth_idx_activate_write_channel_cb;
2314
2315	rc = qeth_send_control_data(card, iob, NULL, NULL);
2316	if (rc)
2317		return rc;
2318
2319	channel->state = CH_STATE_UP;
2320	return 0;
2321}
2322
2323static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2324		unsigned long data)
2325{
2326	struct qeth_cmd_buffer *iob;
2327
2328	QETH_CARD_TEXT(card, 2, "cmenblcb");
2329
2330	iob = (struct qeth_cmd_buffer *) data;
2331	memcpy(&card->token.cm_filter_r,
2332	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2333	       QETH_MPC_TOKEN_LENGTH);
2334	return 0;
2335}
2336
2337static int qeth_cm_enable(struct qeth_card *card)
2338{
2339	struct qeth_cmd_buffer *iob;
2340
2341	QETH_CARD_TEXT(card, 2, "cmenable");
2342
2343	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2344	if (!iob)
2345		return -ENOMEM;
2346
2347	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2348	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2349	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2350	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2351
2352	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2353}
2354
2355static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2356		unsigned long data)
2357{
2358	struct qeth_cmd_buffer *iob;
2359
2360	QETH_CARD_TEXT(card, 2, "cmsetpcb");
2361
2362	iob = (struct qeth_cmd_buffer *) data;
2363	memcpy(&card->token.cm_connection_r,
2364	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2365	       QETH_MPC_TOKEN_LENGTH);
2366	return 0;
2367}
2368
2369static int qeth_cm_setup(struct qeth_card *card)
2370{
2371	struct qeth_cmd_buffer *iob;
2372
2373	QETH_CARD_TEXT(card, 2, "cmsetup");
2374
2375	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2376	if (!iob)
2377		return -ENOMEM;
2378
2379	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2380	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2381	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2382	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2383	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2384	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2385	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2386}
2387
2388static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2389{
2390	if (link_type == QETH_LINK_TYPE_LANE_TR ||
2391	    link_type == QETH_LINK_TYPE_HSTR) {
2392		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2393		return false;
2394	}
2395
2396	return true;
2397}
2398
2399static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2400{
2401	struct net_device *dev = card->dev;
2402	unsigned int new_mtu;
2403
2404	if (!max_mtu) {
2405		/* IQD needs accurate max MTU to set up its RX buffers: */
2406		if (IS_IQD(card))
2407			return -EINVAL;
2408		/* tolerate quirky HW: */
2409		max_mtu = ETH_MAX_MTU;
2410	}
2411
2412	rtnl_lock();
2413	if (IS_IQD(card)) {
2414		/* move any device with default MTU to new max MTU: */
2415		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2416
2417		/* adjust RX buffer size to new max MTU: */
2418		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2419		if (dev->max_mtu && dev->max_mtu != max_mtu)
2420			qeth_free_qdio_queues(card);
2421	} else {
2422		if (dev->mtu)
2423			new_mtu = dev->mtu;
2424		/* default MTUs for first setup: */
2425		else if (IS_LAYER2(card))
2426			new_mtu = ETH_DATA_LEN;
2427		else
2428			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2429	}
2430
2431	dev->max_mtu = max_mtu;
2432	dev->mtu = min(new_mtu, max_mtu);
2433	rtnl_unlock();
2434	return 0;
2435}
2436
2437static int qeth_get_mtu_outof_framesize(int framesize)
2438{
2439	switch (framesize) {
2440	case 0x4000:
2441		return 8192;
2442	case 0x6000:
2443		return 16384;
2444	case 0xa000:
2445		return 32768;
2446	case 0xffff:
2447		return 57344;
2448	default:
2449		return 0;
2450	}
2451}
2452
2453static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2454		unsigned long data)
2455{
2456	__u16 mtu, framesize;
2457	__u16 len;
2458	struct qeth_cmd_buffer *iob;
2459	u8 link_type = 0;
2460
2461	QETH_CARD_TEXT(card, 2, "ulpenacb");
2462
2463	iob = (struct qeth_cmd_buffer *) data;
2464	memcpy(&card->token.ulp_filter_r,
2465	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2466	       QETH_MPC_TOKEN_LENGTH);
2467	if (IS_IQD(card)) {
2468		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2469		mtu = qeth_get_mtu_outof_framesize(framesize);
2470	} else {
2471		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2472	}
2473	*(u16 *)reply->param = mtu;
2474
2475	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2476	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2477		memcpy(&link_type,
2478		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2479		if (!qeth_is_supported_link_type(card, link_type))
2480			return -EPROTONOSUPPORT;
2481	}
2482
2483	card->info.link_type = link_type;
2484	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2485	return 0;
2486}
2487
2488static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2489{
2490	return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
2491}
2492
2493static int qeth_ulp_enable(struct qeth_card *card)
2494{
2495	u8 prot_type = qeth_mpc_select_prot_type(card);
2496	struct qeth_cmd_buffer *iob;
2497	u16 max_mtu;
2498	int rc;
2499
2500	QETH_CARD_TEXT(card, 2, "ulpenabl");
2501
2502	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2503	if (!iob)
2504		return -ENOMEM;
2505
2506	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2507	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2508	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2509	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2510	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2511	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2512	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2513	if (rc)
2514		return rc;
2515	return qeth_update_max_mtu(card, max_mtu);
2516}
2517
2518static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2519		unsigned long data)
2520{
2521	struct qeth_cmd_buffer *iob;
2522
2523	QETH_CARD_TEXT(card, 2, "ulpstpcb");
2524
2525	iob = (struct qeth_cmd_buffer *) data;
2526	memcpy(&card->token.ulp_connection_r,
2527	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2528	       QETH_MPC_TOKEN_LENGTH);
2529	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2530		     3)) {
2531		QETH_CARD_TEXT(card, 2, "olmlimit");
2532		dev_err(&card->gdev->dev, "A connection could not be "
2533			"established because of an OLM limit\n");
2534		return -EMLINK;
2535	}
2536	return 0;
2537}
2538
2539static int qeth_ulp_setup(struct qeth_card *card)
2540{
2541	__u16 temp;
2542	struct qeth_cmd_buffer *iob;
2543
2544	QETH_CARD_TEXT(card, 2, "ulpsetup");
2545
2546	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2547	if (!iob)
2548		return -ENOMEM;
2549
2550	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2551	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2552	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2553	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2554	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2555	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2556
2557	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
2558	temp = (card->info.cula << 8) + card->info.unit_addr2;
2559	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2560	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2561}
2562
2563static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
2564			      gfp_t gfp)
2565{
2566	struct qeth_qdio_out_buffer *newbuf;
2567
2568	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2569	if (!newbuf)
2570		return -ENOMEM;
2571
2572	newbuf->buffer = q->qdio_bufs[bidx];
2573	skb_queue_head_init(&newbuf->skb_list);
2574	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2575	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2576	q->bufs[bidx] = newbuf;
2577	return 0;
2578}
2579
2580static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2581{
2582	if (!q)
2583		return;
2584
2585	qeth_drain_output_queue(q, true);
2586	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2587	kfree(q);
2588}
2589
2590static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2591{
2592	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2593	unsigned int i;
2594
2595	if (!q)
2596		return NULL;
2597
2598	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
2599		goto err_qdio_bufs;
2600
2601	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2602		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2603			goto err_out_bufs;
2604	}
2605
2606	return q;
2607
2608err_out_bufs:
2609	while (i > 0)
2610		qeth_free_out_buf(q->bufs[--i]);
2611	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2612err_qdio_bufs:
2613	kfree(q);
2614	return NULL;
2615}
2616
2617static void qeth_tx_completion_timer(struct timer_list *timer)
2618{
2619	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2620
2621	napi_schedule(&queue->napi);
2622	QETH_TXQ_STAT_INC(queue, completion_timer);
2623}
2624
2625static int qeth_alloc_qdio_queues(struct qeth_card *card)
2626{
2627	unsigned int i;
2628
2629	QETH_CARD_TEXT(card, 2, "allcqdbf");
2630
2631	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2632		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2633		return 0;
2634
2635	/* inbound buffer pool */
2636	if (qeth_alloc_buffer_pool(card))
2637		goto out_buffer_pool;
2638
2639	/* outbound */
2640	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2641		struct qeth_qdio_out_q *queue;
2642
2643		queue = qeth_alloc_output_queue();
2644		if (!queue)
2645			goto out_freeoutq;
2646		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2647		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2648		card->qdio.out_qs[i] = queue;
2649		queue->card = card;
2650		queue->queue_no = i;
2651		INIT_LIST_HEAD(&queue->pending_bufs);
2652		spin_lock_init(&queue->lock);
2653		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2654		if (IS_IQD(card)) {
2655			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2656			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2657			queue->rescan_usecs = QETH_TX_TIMER_USECS;
2658		} else {
2659			queue->coalesce_usecs = USEC_PER_SEC;
2660			queue->max_coalesced_frames = 0;
2661			queue->rescan_usecs = 10 * USEC_PER_SEC;
2662		}
2663		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
2664	}
2665
2666	/* completion */
2667	if (qeth_alloc_cq(card))
2668		goto out_freeoutq;
2669
2670	return 0;
2671
2672out_freeoutq:
2673	while (i > 0) {
2674		qeth_free_output_queue(card->qdio.out_qs[--i]);
2675		card->qdio.out_qs[i] = NULL;
2676	}
2677	qeth_free_buffer_pool(card);
2678out_buffer_pool:
2679	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2680	return -ENOMEM;
2681}
2682
2683static void qeth_free_qdio_queues(struct qeth_card *card)
2684{
2685	int i, j;
2686
2687	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2688		QETH_QDIO_UNINITIALIZED)
2689		return;
2690
2691	qeth_free_cq(card);
2692	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2693		if (card->qdio.in_q->bufs[j].rx_skb) {
2694			consume_skb(card->qdio.in_q->bufs[j].rx_skb);
2695			card->qdio.in_q->bufs[j].rx_skb = NULL;
2696		}
2697	}
2698
2699	/* inbound buffer pool */
2700	qeth_free_buffer_pool(card);
2701	/* free outbound qdio_qs */
2702	for (i = 0; i < card->qdio.no_out_queues; i++) {
2703		qeth_free_output_queue(card->qdio.out_qs[i]);
2704		card->qdio.out_qs[i] = NULL;
2705	}
2706}
2707
2708static void qeth_fill_qib_parms(struct qeth_card *card,
2709				struct qeth_qib_parms *parms)
2710{
2711	struct qeth_qdio_out_q *queue;
2712	unsigned int i;
2713
2714	parms->pcit_magic[0] = 'P';
2715	parms->pcit_magic[1] = 'C';
2716	parms->pcit_magic[2] = 'I';
2717	parms->pcit_magic[3] = 'T';
2718	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2719	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2720	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2721	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
2722
2723	parms->blkt_magic[0] = 'B';
2724	parms->blkt_magic[1] = 'L';
2725	parms->blkt_magic[2] = 'K';
2726	parms->blkt_magic[3] = 'T';
2727	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2728	parms->blkt_total = card->info.blkt.time_total;
2729	parms->blkt_inter_packet = card->info.blkt.inter_packet;
2730	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2731
2732	/* Prio-queueing implicitly uses the default priorities: */
2733	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
2734		return;
2735
2736	parms->pque_magic[0] = 'P';
2737	parms->pque_magic[1] = 'Q';
2738	parms->pque_magic[2] = 'U';
2739	parms->pque_magic[3] = 'E';
2740	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
2741	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
2742	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
2743
2744	qeth_for_each_output_queue(card, queue, i)
2745		parms->pque_priority[i] = queue->priority;
2746}
2747
2748static int qeth_qdio_activate(struct qeth_card *card)
2749{
2750	QETH_CARD_TEXT(card, 3, "qdioact");
2751	return qdio_activate(CARD_DDEV(card));
2752}
2753
2754static int qeth_dm_act(struct qeth_card *card)
2755{
2756	struct qeth_cmd_buffer *iob;
2757
2758	QETH_CARD_TEXT(card, 2, "dmact");
2759
2760	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2761	if (!iob)
2762		return -ENOMEM;
2763
2764	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2765	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2766	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2767	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2768	return qeth_send_control_data(card, iob, NULL, NULL);
2769}
2770
2771static int qeth_mpc_initialize(struct qeth_card *card)
2772{
2773	int rc;
2774
2775	QETH_CARD_TEXT(card, 2, "mpcinit");
2776
2777	rc = qeth_issue_next_read(card);
2778	if (rc) {
2779		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2780		return rc;
2781	}
2782	rc = qeth_cm_enable(card);
2783	if (rc) {
2784		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2785		return rc;
2786	}
2787	rc = qeth_cm_setup(card);
2788	if (rc) {
2789		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2790		return rc;
2791	}
2792	rc = qeth_ulp_enable(card);
2793	if (rc) {
2794		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2795		return rc;
2796	}
2797	rc = qeth_ulp_setup(card);
2798	if (rc) {
2799		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2800		return rc;
2801	}
2802	rc = qeth_alloc_qdio_queues(card);
2803	if (rc) {
2804		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2805		return rc;
2806	}
2807	rc = qeth_qdio_establish(card);
2808	if (rc) {
2809		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2810		qeth_free_qdio_queues(card);
2811		return rc;
2812	}
2813	rc = qeth_qdio_activate(card);
2814	if (rc) {
2815		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2816		return rc;
2817	}
2818	rc = qeth_dm_act(card);
2819	if (rc) {
2820		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2821		return rc;
2822	}
2823
2824	return 0;
2825}
2826
2827static void qeth_print_status_message(struct qeth_card *card)
2828{
2829	switch (card->info.type) {
2830	case QETH_CARD_TYPE_OSD:
2831	case QETH_CARD_TYPE_OSM:
2832	case QETH_CARD_TYPE_OSX:
2833		/* VM will use a non-zero first character
2834		 * to indicate a HiperSockets like reporting
2835		 * of the level OSA sets the first character to zero
2836		 * */
2837		if (!card->info.mcl_level[0]) {
2838			scnprintf(card->info.mcl_level,
2839				  sizeof(card->info.mcl_level),
2840				  "%02x%02x",
2841				  card->info.mcl_level[2],
2842				  card->info.mcl_level[3]);
2843			break;
2844		}
2845		fallthrough;
2846	case QETH_CARD_TYPE_IQD:
2847		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2848			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2849				card->info.mcl_level[0]];
2850			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2851				card->info.mcl_level[1]];
2852			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2853				card->info.mcl_level[2]];
2854			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2855				card->info.mcl_level[3]];
2856			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2857		}
2858		break;
2859	default:
2860		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2861	}
2862	dev_info(&card->gdev->dev,
2863		 "Device is a%s card%s%s%s\nwith link type %s.\n",
2864		 qeth_get_cardname(card),
2865		 (card->info.mcl_level[0]) ? " (level: " : "",
2866		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2867		 (card->info.mcl_level[0]) ? ")" : "",
2868		 qeth_get_cardname_short(card));
2869}
2870
2871static void qeth_initialize_working_pool_list(struct qeth_card *card)
2872{
2873	struct qeth_buffer_pool_entry *entry;
2874
2875	QETH_CARD_TEXT(card, 5, "inwrklst");
2876
2877	list_for_each_entry(entry,
2878			    &card->qdio.init_pool.entry_list, init_list) {
2879		qeth_put_buffer_pool_entry(card, entry);
2880	}
2881}
2882
2883static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2884					struct qeth_card *card)
2885{
2886	struct qeth_buffer_pool_entry *entry;
2887	int i, free;
2888
2889	if (list_empty(&card->qdio.in_buf_pool.entry_list))
2890		return NULL;
2891
2892	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2893		free = 1;
2894		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2895			if (page_count(entry->elements[i]) > 1) {
2896				free = 0;
2897				break;
2898			}
2899		}
2900		if (free) {
2901			list_del_init(&entry->list);
2902			return entry;
2903		}
2904	}
2905
2906	/* no free buffer in pool so take first one and swap pages */
2907	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2908				 struct qeth_buffer_pool_entry, list);
2909	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2910		if (page_count(entry->elements[i]) > 1) {
2911			struct page *page = dev_alloc_page();
2912
2913			if (!page)
2914				return NULL;
2915
2916			__free_page(entry->elements[i]);
2917			entry->elements[i] = page;
2918			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2919		}
2920	}
2921	list_del_init(&entry->list);
2922	return entry;
2923}
2924
2925static int qeth_init_input_buffer(struct qeth_card *card,
2926		struct qeth_qdio_buffer *buf)
2927{
2928	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2929	int i;
2930
2931	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2932		buf->rx_skb = netdev_alloc_skb(card->dev,
2933					       ETH_HLEN +
2934					       sizeof(struct ipv6hdr));
2935		if (!buf->rx_skb)
2936			return -ENOMEM;
2937	}
2938
2939	if (!pool_entry) {
2940		pool_entry = qeth_find_free_buffer_pool_entry(card);
2941		if (!pool_entry)
2942			return -ENOBUFS;
2943
2944		buf->pool_entry = pool_entry;
2945	}
2946
2947	/*
2948	 * since the buffer is accessed only from the input_tasklet
2949	 * there shouldn't be a need to synchronize; also, since we use
2950	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2951	 * buffers
2952	 */
2953	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2954		buf->buffer->element[i].length = PAGE_SIZE;
2955		buf->buffer->element[i].addr = u64_to_dma64(
2956			page_to_phys(pool_entry->elements[i]));
2957		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2958			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2959		else
2960			buf->buffer->element[i].eflags = 0;
2961		buf->buffer->element[i].sflags = 0;
2962	}
2963	return 0;
2964}
2965
2966static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2967					    struct qeth_qdio_out_q *queue)
2968{
2969	if (!IS_IQD(card) ||
2970	    qeth_iqd_is_mcast_queue(card, queue) ||
2971	    card->options.cq == QETH_CQ_ENABLED ||
2972	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2973		return 1;
2974
2975	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2976}
2977
2978static int qeth_init_qdio_queues(struct qeth_card *card)
2979{
2980	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2981	unsigned int i;
2982	int rc;
2983
2984	QETH_CARD_TEXT(card, 2, "initqdqs");
2985
2986	/* inbound queue */
2987	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2988	memset(&card->rx, 0, sizeof(struct qeth_rx));
2989
2990	qeth_initialize_working_pool_list(card);
2991	/*give only as many buffers to hardware as we have buffer pool entries*/
2992	for (i = 0; i < rx_bufs; i++) {
2993		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2994		if (rc)
2995			return rc;
2996	}
2997
2998	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2999	rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs);
3000	if (rc) {
3001		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
3002		return rc;
3003	}
3004
3005	/* completion */
3006	rc = qeth_cq_init(card);
3007	if (rc) {
3008		return rc;
3009	}
3010
3011	/* outbound queue */
3012	for (i = 0; i < card->qdio.no_out_queues; ++i) {
3013		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3014
3015		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3016		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3017		queue->next_buf_to_fill = 0;
3018		queue->do_pack = 0;
3019		queue->prev_hdr = NULL;
3020		queue->coalesced_frames = 0;
3021		queue->bulk_start = 0;
3022		queue->bulk_count = 0;
3023		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3024		atomic_set(&queue->used_buffers, 0);
3025		atomic_set(&queue->set_pci_flags_count, 0);
3026		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
3027	}
3028	return 0;
3029}
3030
3031static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3032				  struct qeth_cmd_buffer *iob)
3033{
3034	qeth_mpc_finalize_cmd(card, iob);
3035
3036	/* override with IPA-specific values: */
3037	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3038}
3039
3040static void qeth_prepare_ipa_cmd(struct qeth_card *card,
3041				 struct qeth_cmd_buffer *iob, u16 cmd_length)
3042{
3043	u8 prot_type = qeth_mpc_select_prot_type(card);
3044	u16 total_length = iob->length;
3045
3046	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3047		       iob->data);
3048	iob->finalize = qeth_ipa_finalize_cmd;
3049
3050	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3051	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3052	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3053	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3054	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3055	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3056	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3057	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3058}
3059
3060static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3061				 struct qeth_cmd_buffer *reply)
3062{
3063	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3064
3065	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3066}
3067
3068struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3069					   enum qeth_ipa_cmds cmd_code,
3070					   enum qeth_prot_versions prot,
3071					   unsigned int data_length)
3072{
3073	struct qeth_cmd_buffer *iob;
3074	struct qeth_ipacmd_hdr *hdr;
3075
3076	data_length += offsetof(struct qeth_ipa_cmd, data);
3077	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3078			     QETH_IPA_TIMEOUT);
3079	if (!iob)
3080		return NULL;
3081
3082	qeth_prepare_ipa_cmd(card, iob, data_length);
3083	iob->match = qeth_ipa_match_reply;
3084
3085	hdr = &__ipa_cmd(iob)->hdr;
3086	hdr->command = cmd_code;
3087	hdr->initiator = IPA_CMD_INITIATOR_HOST;
3088	/* hdr->seqno is set by qeth_send_control_data() */
3089	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3090	hdr->rel_adapter_no = (u8) card->dev->dev_port;
3091	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3092	hdr->param_count = 1;
3093	hdr->prot_version = prot;
3094	return iob;
3095}
3096EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3097
3098static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3099				struct qeth_reply *reply, unsigned long data)
3100{
3101	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3102
3103	return (cmd->hdr.return_code) ? -EIO : 0;
3104}
3105
3106/*
3107 * qeth_send_ipa_cmd() - send an IPA command
3108 *
3109 * See qeth_send_control_data() for explanation of the arguments.
3110 */
3111
3112int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3113		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3114			unsigned long),
3115		void *reply_param)
3116{
3117	int rc;
3118
3119	QETH_CARD_TEXT(card, 4, "sendipa");
3120
3121	if (card->read_or_write_problem) {
3122		qeth_put_cmd(iob);
3123		return -EIO;
3124	}
3125
3126	if (reply_cb == NULL)
3127		reply_cb = qeth_send_ipa_cmd_cb;
3128	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3129	if (rc == -ETIME) {
3130		qeth_clear_ipacmd_list(card);
3131		qeth_schedule_recovery(card);
3132	}
3133	return rc;
3134}
3135EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3136
3137static int qeth_send_startlan_cb(struct qeth_card *card,
3138				 struct qeth_reply *reply, unsigned long data)
3139{
3140	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3141
3142	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3143		return -ENETDOWN;
3144
3145	return (cmd->hdr.return_code) ? -EIO : 0;
3146}
3147
3148static int qeth_send_startlan(struct qeth_card *card)
3149{
3150	struct qeth_cmd_buffer *iob;
3151
3152	QETH_CARD_TEXT(card, 2, "strtlan");
3153
3154	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3155	if (!iob)
3156		return -ENOMEM;
3157	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3158}
3159
3160static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3161{
3162	if (!cmd->hdr.return_code)
3163		cmd->hdr.return_code =
3164			cmd->data.setadapterparms.hdr.return_code;
3165	return cmd->hdr.return_code;
3166}
3167
3168static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3169		struct qeth_reply *reply, unsigned long data)
3170{
3171	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3172	struct qeth_query_cmds_supp *query_cmd;
3173
3174	QETH_CARD_TEXT(card, 3, "quyadpcb");
3175	if (qeth_setadpparms_inspect_rc(cmd))
3176		return -EIO;
3177
3178	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3179	if (query_cmd->lan_type & 0x7f) {
3180		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3181			return -EPROTONOSUPPORT;
3182
3183		card->info.link_type = query_cmd->lan_type;
3184		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3185	}
3186
3187	card->options.adp.supported = query_cmd->supported_cmds;
3188	return 0;
3189}
3190
3191static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3192						    enum qeth_ipa_setadp_cmd adp_cmd,
3193						    unsigned int data_length)
3194{
3195	struct qeth_ipacmd_setadpparms_hdr *hdr;
3196	struct qeth_cmd_buffer *iob;
3197
3198	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3199				 data_length +
3200				 offsetof(struct qeth_ipacmd_setadpparms,
3201					  data));
3202	if (!iob)
3203		return NULL;
3204
3205	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3206	hdr->cmdlength = sizeof(*hdr) + data_length;
3207	hdr->command_code = adp_cmd;
3208	hdr->used_total = 1;
3209	hdr->seq_no = 1;
3210	return iob;
3211}
3212
3213static int qeth_query_setadapterparms(struct qeth_card *card)
3214{
3215	int rc;
3216	struct qeth_cmd_buffer *iob;
3217
3218	QETH_CARD_TEXT(card, 3, "queryadp");
3219	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3220				   SETADP_DATA_SIZEOF(query_cmds_supp));
3221	if (!iob)
3222		return -ENOMEM;
3223	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3224	return rc;
3225}
3226
3227static int qeth_query_ipassists_cb(struct qeth_card *card,
3228		struct qeth_reply *reply, unsigned long data)
3229{
3230	struct qeth_ipa_cmd *cmd;
3231
3232	QETH_CARD_TEXT(card, 2, "qipasscb");
3233
3234	cmd = (struct qeth_ipa_cmd *) data;
3235
3236	switch (cmd->hdr.return_code) {
3237	case IPA_RC_SUCCESS:
3238		break;
3239	case IPA_RC_NOTSUPP:
3240	case IPA_RC_L2_UNSUPPORTED_CMD:
3241		QETH_CARD_TEXT(card, 2, "ipaunsup");
3242		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3243		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3244		return -EOPNOTSUPP;
3245	default:
3246		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3247				 CARD_DEVID(card), cmd->hdr.return_code);
3248		return -EIO;
3249	}
3250
3251	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3252		card->options.ipa4 = cmd->hdr.assists;
3253	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3254		card->options.ipa6 = cmd->hdr.assists;
3255	else
3256		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3257				 CARD_DEVID(card));
3258	return 0;
3259}
3260
3261static int qeth_query_ipassists(struct qeth_card *card,
3262				enum qeth_prot_versions prot)
3263{
3264	int rc;
3265	struct qeth_cmd_buffer *iob;
3266
3267	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3268	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3269	if (!iob)
3270		return -ENOMEM;
3271	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3272	return rc;
3273}
3274
3275static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3276				struct qeth_reply *reply, unsigned long data)
3277{
3278	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3279	struct qeth_query_switch_attributes *attrs;
3280	struct qeth_switch_info *sw_info;
3281
3282	QETH_CARD_TEXT(card, 2, "qswiatcb");
3283	if (qeth_setadpparms_inspect_rc(cmd))
3284		return -EIO;
3285
3286	sw_info = (struct qeth_switch_info *)reply->param;
3287	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3288	sw_info->capabilities = attrs->capabilities;
3289	sw_info->settings = attrs->settings;
3290	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3291			sw_info->settings);
3292	return 0;
3293}
3294
3295int qeth_query_switch_attributes(struct qeth_card *card,
3296				 struct qeth_switch_info *sw_info)
3297{
3298	struct qeth_cmd_buffer *iob;
3299
3300	QETH_CARD_TEXT(card, 2, "qswiattr");
3301	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3302		return -EOPNOTSUPP;
3303	if (!netif_carrier_ok(card->dev))
3304		return -ENOMEDIUM;
3305	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3306	if (!iob)
3307		return -ENOMEM;
3308	return qeth_send_ipa_cmd(card, iob,
3309				qeth_query_switch_attributes_cb, sw_info);
3310}
3311
3312struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3313					  enum qeth_diags_cmds sub_cmd,
3314					  unsigned int data_length)
3315{
3316	struct qeth_ipacmd_diagass *cmd;
3317	struct qeth_cmd_buffer *iob;
3318
3319	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3320				 DIAG_HDR_LEN + data_length);
3321	if (!iob)
3322		return NULL;
3323
3324	cmd = &__ipa_cmd(iob)->data.diagass;
3325	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3326	cmd->subcmd = sub_cmd;
3327	return iob;
3328}
3329EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3330
3331static int qeth_query_setdiagass_cb(struct qeth_card *card,
3332		struct qeth_reply *reply, unsigned long data)
3333{
3334	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3335	u16 rc = cmd->hdr.return_code;
3336
3337	if (rc) {
3338		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3339		return -EIO;
3340	}
3341
3342	card->info.diagass_support = cmd->data.diagass.ext;
3343	return 0;
3344}
3345
3346static int qeth_query_setdiagass(struct qeth_card *card)
3347{
3348	struct qeth_cmd_buffer *iob;
3349
3350	QETH_CARD_TEXT(card, 2, "qdiagass");
3351	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3352	if (!iob)
3353		return -ENOMEM;
3354	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3355}
3356
3357static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3358{
3359	unsigned long info = get_zeroed_page(GFP_KERNEL);
3360	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3361	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3362	struct ccw_dev_id ccwid;
3363	int level;
3364
3365	tid->chpid = card->info.chpid;
3366	ccw_device_get_id(CARD_RDEV(card), &ccwid);
3367	tid->ssid = ccwid.ssid;
3368	tid->devno = ccwid.devno;
3369	if (!info)
3370		return;
3371	level = stsi(NULL, 0, 0, 0);
3372	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3373		tid->lparnr = info222->lpar_number;
3374	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3375		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3376		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3377	}
3378	free_page(info);
3379}
3380
3381static int qeth_hw_trap_cb(struct qeth_card *card,
3382		struct qeth_reply *reply, unsigned long data)
3383{
3384	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3385	u16 rc = cmd->hdr.return_code;
3386
3387	if (rc) {
3388		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3389		return -EIO;
3390	}
3391	return 0;
3392}
3393
3394int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3395{
3396	struct qeth_cmd_buffer *iob;
3397	struct qeth_ipa_cmd *cmd;
3398
3399	QETH_CARD_TEXT(card, 2, "diagtrap");
3400	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3401	if (!iob)
3402		return -ENOMEM;
3403	cmd = __ipa_cmd(iob);
3404	cmd->data.diagass.type = 1;
3405	cmd->data.diagass.action = action;
3406	switch (action) {
3407	case QETH_DIAGS_TRAP_ARM:
3408		cmd->data.diagass.options = 0x0003;
3409		cmd->data.diagass.ext = 0x00010000 +
3410			sizeof(struct qeth_trap_id);
3411		qeth_get_trap_id(card,
3412			(struct qeth_trap_id *)cmd->data.diagass.cdata);
3413		break;
3414	case QETH_DIAGS_TRAP_DISARM:
3415		cmd->data.diagass.options = 0x0001;
3416		break;
3417	case QETH_DIAGS_TRAP_CAPTURE:
3418		break;
3419	}
3420	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3421}
3422
3423static int qeth_check_qdio_errors(struct qeth_card *card,
3424				  struct qdio_buffer *buf,
3425				  unsigned int qdio_error,
3426				  const char *dbftext)
3427{
3428	if (qdio_error) {
3429		QETH_CARD_TEXT(card, 2, dbftext);
3430		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3431			       buf->element[15].sflags);
3432		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3433			       buf->element[14].sflags);
3434		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3435		if ((buf->element[15].sflags) == 0x12) {
3436			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3437			return 0;
3438		} else
3439			return 1;
3440	}
3441	return 0;
3442}
3443
3444static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3445					 unsigned int count)
3446{
3447	struct qeth_qdio_q *queue = card->qdio.in_q;
3448	struct list_head *lh;
3449	int i;
3450	int rc;
3451	int newcount = 0;
3452
3453	/* only requeue at a certain threshold to avoid SIGAs */
3454	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3455		for (i = queue->next_buf_to_init;
3456		     i < queue->next_buf_to_init + count; ++i) {
3457			if (qeth_init_input_buffer(card,
3458				&queue->bufs[QDIO_BUFNR(i)])) {
3459				break;
3460			} else {
3461				newcount++;
3462			}
3463		}
3464
3465		if (newcount < count) {
3466			/* we are in memory shortage so we switch back to
3467			   traditional skb allocation and drop packages */
3468			atomic_set(&card->force_alloc_skb, 3);
3469			count = newcount;
3470		} else {
3471			atomic_add_unless(&card->force_alloc_skb, -1, 0);
3472		}
3473
3474		if (!count) {
3475			i = 0;
3476			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3477				i++;
3478			if (i == card->qdio.in_buf_pool.buf_count) {
3479				QETH_CARD_TEXT(card, 2, "qsarbw");
3480				schedule_delayed_work(
3481					&card->buffer_reclaim_work,
3482					QETH_RECLAIM_WORK_TIME);
3483			}
3484			return 0;
3485		}
3486
3487		rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0,
3488						  queue->next_buf_to_init,
3489						  count);
3490		if (rc) {
3491			QETH_CARD_TEXT(card, 2, "qinberr");
3492		}
3493		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3494						     count);
3495		return count;
3496	}
3497
3498	return 0;
3499}
3500
3501static void qeth_buffer_reclaim_work(struct work_struct *work)
3502{
3503	struct qeth_card *card = container_of(to_delayed_work(work),
3504					      struct qeth_card,
3505					      buffer_reclaim_work);
3506
3507	local_bh_disable();
3508	napi_schedule(&card->napi);
3509	/* kick-start the NAPI softirq: */
3510	local_bh_enable();
3511}
3512
3513static void qeth_handle_send_error(struct qeth_card *card,
3514		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3515{
3516	int sbalf15 = buffer->buffer->element[15].sflags;
3517
3518	QETH_CARD_TEXT(card, 6, "hdsnderr");
3519	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3520
3521	if (!qdio_err)
3522		return;
3523
3524	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3525		return;
3526
3527	QETH_CARD_TEXT(card, 1, "lnkfail");
3528	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3529		       (u16)qdio_err, (u8)sbalf15);
3530}
3531
3532/**
3533 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3534 * @queue: queue to check for packing buffer
3535 *
3536 * Returns number of buffers that were prepared for flush.
3537 */
3538static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3539{
3540	struct qeth_qdio_out_buffer *buffer;
3541
3542	buffer = queue->bufs[queue->next_buf_to_fill];
3543	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3544	    (buffer->next_element_to_fill > 0)) {
3545		/* it's a packing buffer */
3546		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3547		queue->next_buf_to_fill =
3548			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3549		return 1;
3550	}
3551	return 0;
3552}
3553
3554/*
3555 * Switched to packing state if the number of used buffers on a queue
3556 * reaches a certain limit.
3557 */
3558static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3559{
3560	if (!queue->do_pack) {
3561		if (atomic_read(&queue->used_buffers)
3562		    >= QETH_HIGH_WATERMARK_PACK){
3563			/* switch non-PACKING -> PACKING */
3564			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3565			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3566			queue->do_pack = 1;
3567		}
3568	}
3569}
3570
3571/*
3572 * Switches from packing to non-packing mode. If there is a packing
3573 * buffer on the queue this buffer will be prepared to be flushed.
3574 * In that case 1 is returned to inform the caller. If no buffer
3575 * has to be flushed, zero is returned.
3576 */
3577static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3578{
3579	if (queue->do_pack) {
3580		if (atomic_read(&queue->used_buffers)
3581		    <= QETH_LOW_WATERMARK_PACK) {
3582			/* switch PACKING -> non-PACKING */
3583			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3584			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3585			queue->do_pack = 0;
3586			return qeth_prep_flush_pack_buffer(queue);
3587		}
3588	}
3589	return 0;
3590}
3591
3592static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3593			       int count)
3594{
3595	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3596	struct qeth_card *card = queue->card;
3597	unsigned int frames, usecs;
3598	struct qaob *aob = NULL;
3599	int rc;
3600	int i;
3601
3602	for (i = index; i < index + count; ++i) {
3603		unsigned int bidx = QDIO_BUFNR(i);
3604		struct sk_buff *skb;
3605
3606		buf = queue->bufs[bidx];
3607		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3608				SBAL_EFLAGS_LAST_ENTRY;
3609		queue->coalesced_frames += buf->frames;
3610
3611		if (IS_IQD(card)) {
3612			skb_queue_walk(&buf->skb_list, skb)
3613				skb_tx_timestamp(skb);
3614		}
3615	}
3616
3617	if (IS_IQD(card)) {
3618		if (card->options.cq == QETH_CQ_ENABLED &&
3619		    !qeth_iqd_is_mcast_queue(card, queue) &&
3620		    count == 1) {
3621			if (!buf->aob)
3622				buf->aob = kmem_cache_zalloc(qeth_qaob_cache,
3623							     GFP_ATOMIC);
3624			if (buf->aob) {
3625				struct qeth_qaob_priv1 *priv;
3626
3627				aob = buf->aob;
3628				priv = (struct qeth_qaob_priv1 *)&aob->user1;
3629				priv->state = QETH_QAOB_ISSUED;
3630				priv->queue_no = queue->queue_no;
3631			}
3632		}
3633	} else {
3634		if (!queue->do_pack) {
3635			if ((atomic_read(&queue->used_buffers) >=
3636				(QETH_HIGH_WATERMARK_PACK -
3637				 QETH_WATERMARK_PACK_FUZZ)) &&
3638			    !atomic_read(&queue->set_pci_flags_count)) {
3639				/* it's likely that we'll go to packing
3640				 * mode soon */
3641				atomic_inc(&queue->set_pci_flags_count);
3642				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3643			}
3644		} else {
3645			if (!atomic_read(&queue->set_pci_flags_count)) {
3646				/*
3647				 * there's no outstanding PCI any more, so we
3648				 * have to request a PCI to be sure the PCI
3649				 * will wake at some time in the future then we
3650				 * can flush packed buffers that might still be
3651				 * hanging around, which can happen if no
3652				 * further send was requested by the stack
3653				 */
3654				atomic_inc(&queue->set_pci_flags_count);
3655				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3656			}
3657		}
3658	}
3659
3660	QETH_TXQ_STAT_INC(queue, doorbell);
3661	rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no,
3662					   index, count, aob);
3663
3664	switch (rc) {
3665	case 0:
3666	case -ENOBUFS:
3667		/* ignore temporary SIGA errors without busy condition */
3668
3669		/* Fake the TX completion interrupt: */
3670		frames = READ_ONCE(queue->max_coalesced_frames);
3671		usecs = READ_ONCE(queue->coalesce_usecs);
3672
3673		if (frames && queue->coalesced_frames >= frames) {
3674			napi_schedule(&queue->napi);
3675			queue->coalesced_frames = 0;
3676			QETH_TXQ_STAT_INC(queue, coal_frames);
3677		} else if (qeth_use_tx_irqs(card) &&
3678			   atomic_read(&queue->used_buffers) >= 32) {
3679			/* Old behaviour carried over from the qdio layer: */
3680			napi_schedule(&queue->napi);
3681			QETH_TXQ_STAT_INC(queue, coal_frames);
3682		} else if (usecs) {
3683			qeth_tx_arm_timer(queue, usecs);
3684		}
3685
3686		break;
3687	default:
3688		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3689		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3690		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3691		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3692		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3693
3694		/* this must not happen under normal circumstances. if it
3695		 * happens something is really wrong -> recover */
3696		qeth_schedule_recovery(queue->card);
3697	}
3698}
3699
3700static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3701{
3702	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3703
3704	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3705	queue->prev_hdr = NULL;
3706	queue->bulk_count = 0;
3707}
3708
3709static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3710{
3711	/*
3712	 * check if we have to switch to non-packing mode or if
3713	 * we have to get a pci flag out on the queue
3714	 */
3715	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3716	    !atomic_read(&queue->set_pci_flags_count)) {
3717		unsigned int index, flush_cnt;
3718
3719		spin_lock(&queue->lock);
3720
3721		index = queue->next_buf_to_fill;
3722
3723		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3724		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3725			flush_cnt = qeth_prep_flush_pack_buffer(queue);
3726
3727		if (flush_cnt) {
3728			qeth_flush_buffers(queue, index, flush_cnt);
3729			QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3730		}
3731
3732		spin_unlock(&queue->lock);
3733	}
3734}
3735
3736static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3737{
3738	struct qeth_card *card = (struct qeth_card *)card_ptr;
3739
3740	napi_schedule_irqoff(&card->napi);
3741}
3742
3743int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3744{
3745	int rc;
3746
3747	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3748		rc = -1;
3749		goto out;
3750	} else {
3751		if (card->options.cq == cq) {
3752			rc = 0;
3753			goto out;
3754		}
3755
3756		qeth_free_qdio_queues(card);
3757		card->options.cq = cq;
3758		rc = 0;
3759	}
3760out:
3761	return rc;
3762
3763}
3764EXPORT_SYMBOL_GPL(qeth_configure_cq);
3765
3766static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
3767{
3768	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
3769	unsigned int queue_no = priv->queue_no;
3770
3771	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
3772
3773	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
3774	    queue_no < card->qdio.no_out_queues)
3775		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
3776}
3777
3778static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3779				 unsigned int queue, int first_element,
3780				 int count)
3781{
3782	struct qeth_qdio_q *cq = card->qdio.c_q;
3783	int i;
3784	int rc;
3785
3786	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3787	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3788	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3789
3790	if (qdio_err) {
3791		netif_tx_stop_all_queues(card->dev);
3792		qeth_schedule_recovery(card);
3793		return;
3794	}
3795
3796	for (i = first_element; i < first_element + count; ++i) {
3797		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3798		int e = 0;
3799
3800		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3801		       buffer->element[e].addr) {
3802			dma64_t phys_aob_addr = buffer->element[e].addr;
3803
3804			qeth_qdio_handle_aob(card, dma64_to_virt(phys_aob_addr));
3805			++e;
3806		}
3807		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3808	}
3809	rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue,
3810					  cq->next_buf_to_init, count);
3811	if (rc) {
3812		dev_warn(&card->gdev->dev,
3813			"QDIO reported an error, rc=%i\n", rc);
3814		QETH_CARD_TEXT(card, 2, "qcqherr");
3815	}
3816
3817	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3818}
3819
3820static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3821				    unsigned int qdio_err, int queue,
3822				    int first_elem, int count,
3823				    unsigned long card_ptr)
3824{
3825	struct qeth_card *card = (struct qeth_card *)card_ptr;
3826
3827	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3828	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3829
3830	if (qdio_err)
3831		qeth_schedule_recovery(card);
3832}
3833
3834static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3835				     unsigned int qdio_error, int __queue,
3836				     int first_element, int count,
3837				     unsigned long card_ptr)
3838{
3839	struct qeth_card *card        = (struct qeth_card *) card_ptr;
3840
3841	QETH_CARD_TEXT(card, 2, "achkcond");
3842	netif_tx_stop_all_queues(card->dev);
3843	qeth_schedule_recovery(card);
3844}
3845
3846/*
3847 * Note: Function assumes that we have 4 outbound queues.
3848 */
3849static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3850{
3851	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3852	u8 tos;
3853
3854	switch (card->qdio.do_prio_queueing) {
3855	case QETH_PRIO_Q_ING_TOS:
3856	case QETH_PRIO_Q_ING_PREC:
3857		switch (vlan_get_protocol(skb)) {
3858		case htons(ETH_P_IP):
3859			tos = ipv4_get_dsfield(ip_hdr(skb));
3860			break;
3861		case htons(ETH_P_IPV6):
3862			tos = ipv6_get_dsfield(ipv6_hdr(skb));
3863			break;
3864		default:
3865			return card->qdio.default_out_queue;
3866		}
3867		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3868			return ~tos >> 6 & 3;
3869		if (tos & IPTOS_MINCOST)
3870			return 3;
3871		if (tos & IPTOS_RELIABILITY)
3872			return 2;
3873		if (tos & IPTOS_THROUGHPUT)
3874			return 1;
3875		if (tos & IPTOS_LOWDELAY)
3876			return 0;
3877		break;
3878	case QETH_PRIO_Q_ING_SKB:
3879		if (skb->priority > 5)
3880			return 0;
3881		return ~skb->priority >> 1 & 3;
3882	case QETH_PRIO_Q_ING_VLAN:
3883		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3884			return ~ntohs(veth->h_vlan_TCI) >>
3885			       (VLAN_PRIO_SHIFT + 1) & 3;
3886		break;
3887	case QETH_PRIO_Q_ING_FIXED:
3888		return card->qdio.default_out_queue;
3889	default:
3890		break;
3891	}
3892	return card->qdio.default_out_queue;
3893}
3894
3895/**
3896 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
3897 * @skb:				SKB address
3898 *
3899 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3900 * fragmented part of the SKB. Returns zero for linear SKB.
3901 */
3902static int qeth_get_elements_for_frags(struct sk_buff *skb)
3903{
3904	int cnt, elements = 0;
3905
3906	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3907		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3908
3909		elements += qeth_get_elements_for_range(
3910			(addr_t)skb_frag_address(frag),
3911			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3912	}
3913	return elements;
3914}
3915
3916/**
3917 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
3918 *				to transmit an skb.
3919 * @skb:			the skb to operate on.
3920 * @data_offset:		skip this part of the skb's linear data
3921 *
3922 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3923 * skb's data (both its linear part and paged fragments).
3924 */
3925static unsigned int qeth_count_elements(struct sk_buff *skb,
3926					unsigned int data_offset)
3927{
3928	unsigned int elements = qeth_get_elements_for_frags(skb);
3929	addr_t end = (addr_t)skb->data + skb_headlen(skb);
3930	addr_t start = (addr_t)skb->data + data_offset;
3931
3932	if (start != end)
3933		elements += qeth_get_elements_for_range(start, end);
3934	return elements;
3935}
3936
3937#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
3938					 MAX_TCP_HEADER)
3939
3940/**
3941 * qeth_add_hw_header() - add a HW header to an skb.
3942 * @queue: TX queue that the skb will be placed on.
3943 * @skb: skb that the HW header should be added to.
3944 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3945 *	 it contains a valid pointer to a qeth_hdr.
3946 * @hdr_len: length of the HW header.
3947 * @proto_len: length of protocol headers that need to be in same page as the
3948 *	       HW header.
3949 * @elements: returns the required number of buffer elements for this skb.
3950 *
3951 * Returns the pushed length. If the header can't be pushed on
3952 * (eg. because it would cross a page boundary), it is allocated from
3953 * the cache instead and 0 is returned.
3954 * The number of needed buffer elements is returned in @elements.
3955 * Error to create the hdr is indicated by returning with < 0.
3956 */
3957static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3958			      struct sk_buff *skb, struct qeth_hdr **hdr,
3959			      unsigned int hdr_len, unsigned int proto_len,
3960			      unsigned int *elements)
3961{
3962	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3963	const unsigned int contiguous = proto_len ? proto_len : 1;
3964	const unsigned int max_elements = queue->max_elements;
3965	unsigned int __elements;
3966	addr_t start, end;
3967	bool push_ok;
3968	int rc;
3969
3970check_layout:
3971	start = (addr_t)skb->data - hdr_len;
3972	end = (addr_t)skb->data;
3973
3974	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3975		/* Push HW header into same page as first protocol header. */
3976		push_ok = true;
3977		/* ... but TSO always needs a separate element for headers: */
3978		if (skb_is_gso(skb))
3979			__elements = 1 + qeth_count_elements(skb, proto_len);
3980		else
3981			__elements = qeth_count_elements(skb, 0);
3982	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3983		/* Push HW header into preceding page, flush with skb->data. */
3984		push_ok = true;
3985		__elements = 1 + qeth_count_elements(skb, 0);
3986	} else {
3987		/* Use header cache, copy protocol headers up. */
3988		push_ok = false;
3989		__elements = 1 + qeth_count_elements(skb, proto_len);
3990	}
3991
3992	/* Compress skb to fit into one IO buffer: */
3993	if (__elements > max_elements) {
3994		if (!skb_is_nonlinear(skb)) {
3995			/* Drop it, no easy way of shrinking it further. */
3996			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3997					 max_elements, __elements, skb->len);
3998			return -E2BIG;
3999		}
4000
4001		rc = skb_linearize(skb);
4002		if (rc) {
4003			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
4004			return rc;
4005		}
4006
4007		QETH_TXQ_STAT_INC(queue, skbs_linearized);
4008		/* Linearization changed the layout, re-evaluate: */
4009		goto check_layout;
4010	}
4011
4012	*elements = __elements;
4013	/* Add the header: */
4014	if (push_ok) {
4015		*hdr = skb_push(skb, hdr_len);
4016		return hdr_len;
4017	}
4018
4019	/* Fall back to cache element with known-good alignment: */
4020	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4021		return -E2BIG;
4022	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4023	if (!*hdr)
4024		return -ENOMEM;
4025	/* Copy protocol headers behind HW header: */
4026	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4027	return 0;
4028}
4029
4030static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4031			      struct sk_buff *curr_skb,
4032			      struct qeth_hdr *curr_hdr)
4033{
4034	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4035	struct qeth_hdr *prev_hdr = queue->prev_hdr;
4036
4037	if (!prev_hdr)
4038		return true;
4039
4040	/* All packets must have the same target: */
4041	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4042		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4043
4044		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4045					eth_hdr(curr_skb)->h_dest) &&
4046		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4047	}
4048
4049	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4050	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4051}
4052
4053/**
4054 * qeth_fill_buffer() - map skb into an output buffer
4055 * @buf:	buffer to transport the skb
4056 * @skb:	skb to map into the buffer
4057 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
4058 *		from qeth_core_header_cache.
4059 * @offset:	when mapping the skb, start at skb->data + offset
4060 * @hd_len:	if > 0, build a dedicated header element of this size
4061 */
4062static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4063				     struct sk_buff *skb, struct qeth_hdr *hdr,
4064				     unsigned int offset, unsigned int hd_len)
4065{
4066	struct qdio_buffer *buffer = buf->buffer;
4067	int element = buf->next_element_to_fill;
4068	int length = skb_headlen(skb) - offset;
4069	char *data = skb->data + offset;
4070	unsigned int elem_length, cnt;
4071	bool is_first_elem = true;
4072
4073	__skb_queue_tail(&buf->skb_list, skb);
4074
4075	/* build dedicated element for HW Header */
4076	if (hd_len) {
4077		is_first_elem = false;
4078
4079		buffer->element[element].addr = virt_to_dma64(hdr);
4080		buffer->element[element].length = hd_len;
4081		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4082
4083		/* HW header is allocated from cache: */
4084		if ((void *)hdr != skb->data)
4085			__set_bit(element, buf->from_kmem_cache);
4086		/* HW header was pushed and is contiguous with linear part: */
4087		else if (length > 0 && !PAGE_ALIGNED(data) &&
4088			 (data == (char *)hdr + hd_len))
4089			buffer->element[element].eflags |=
4090				SBAL_EFLAGS_CONTIGUOUS;
4091
4092		element++;
4093	}
4094
4095	/* map linear part into buffer element(s) */
4096	while (length > 0) {
4097		elem_length = min_t(unsigned int, length,
4098				    PAGE_SIZE - offset_in_page(data));
4099
4100		buffer->element[element].addr = virt_to_dma64(data);
4101		buffer->element[element].length = elem_length;
4102		length -= elem_length;
4103		if (is_first_elem) {
4104			is_first_elem = false;
4105			if (length || skb_is_nonlinear(skb))
4106				/* skb needs additional elements */
4107				buffer->element[element].eflags =
4108					SBAL_EFLAGS_FIRST_FRAG;
4109			else
4110				buffer->element[element].eflags = 0;
4111		} else {
4112			buffer->element[element].eflags =
4113				SBAL_EFLAGS_MIDDLE_FRAG;
4114		}
4115
4116		data += elem_length;
4117		element++;
4118	}
4119
4120	/* map page frags into buffer element(s) */
4121	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4122		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4123
4124		data = skb_frag_address(frag);
4125		length = skb_frag_size(frag);
4126		while (length > 0) {
4127			elem_length = min_t(unsigned int, length,
4128					    PAGE_SIZE - offset_in_page(data));
4129
4130			buffer->element[element].addr = virt_to_dma64(data);
4131			buffer->element[element].length = elem_length;
4132			buffer->element[element].eflags =
4133				SBAL_EFLAGS_MIDDLE_FRAG;
4134
4135			length -= elem_length;
4136			data += elem_length;
4137			element++;
4138		}
4139	}
4140
4141	if (buffer->element[element - 1].eflags)
4142		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4143	buf->next_element_to_fill = element;
4144	return element;
4145}
4146
4147static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4148		       struct sk_buff *skb, unsigned int elements,
4149		       struct qeth_hdr *hdr, unsigned int offset,
4150		       unsigned int hd_len)
4151{
4152	unsigned int bytes = qdisc_pkt_len(skb);
4153	struct qeth_qdio_out_buffer *buffer;
4154	unsigned int next_element;
4155	struct netdev_queue *txq;
4156	bool stopped = false;
4157	bool flush;
4158
4159	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4160	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4161
4162	/* Just a sanity check, the wake/stop logic should ensure that we always
4163	 * get a free buffer.
4164	 */
4165	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4166		return -EBUSY;
4167
4168	flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4169
4170	if (flush ||
4171	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
4172		if (buffer->next_element_to_fill > 0) {
4173			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4174			queue->bulk_count++;
4175		}
4176
4177		if (queue->bulk_count >= queue->bulk_max)
4178			flush = true;
4179
4180		if (flush)
4181			qeth_flush_queue(queue);
4182
4183		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4184						queue->bulk_count)];
4185
4186		/* Sanity-check again: */
4187		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4188			return -EBUSY;
4189	}
4190
4191	if (buffer->next_element_to_fill == 0 &&
4192	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4193		/* If a TX completion happens right _here_ and misses to wake
4194		 * the txq, then our re-check below will catch the race.
4195		 */
4196		QETH_TXQ_STAT_INC(queue, stopped);
4197		netif_tx_stop_queue(txq);
4198		stopped = true;
4199	}
4200
4201	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4202	buffer->bytes += bytes;
4203	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4204	queue->prev_hdr = hdr;
4205
4206	flush = __netdev_tx_sent_queue(txq, bytes,
4207				       !stopped && netdev_xmit_more());
4208
4209	if (flush || next_element >= queue->max_elements) {
4210		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4211		queue->bulk_count++;
4212
4213		if (queue->bulk_count >= queue->bulk_max)
4214			flush = true;
4215
4216		if (flush)
4217			qeth_flush_queue(queue);
4218	}
4219
4220	if (stopped && !qeth_out_queue_is_full(queue))
4221		netif_tx_start_queue(txq);
4222	return 0;
4223}
4224
4225static int qeth_do_send_packet(struct qeth_card *card,
4226			       struct qeth_qdio_out_q *queue,
4227			       struct sk_buff *skb, struct qeth_hdr *hdr,
4228			       unsigned int offset, unsigned int hd_len,
4229			       unsigned int elements_needed)
4230{
4231	unsigned int start_index = queue->next_buf_to_fill;
4232	struct qeth_qdio_out_buffer *buffer;
4233	unsigned int next_element;
4234	struct netdev_queue *txq;
4235	bool stopped = false;
4236	int flush_count = 0;
4237	int do_pack = 0;
4238	int rc = 0;
4239
4240	buffer = queue->bufs[queue->next_buf_to_fill];
4241
4242	/* Just a sanity check, the wake/stop logic should ensure that we always
4243	 * get a free buffer.
4244	 */
4245	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4246		return -EBUSY;
4247
4248	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4249
4250	/* check if we need to switch packing state of this queue */
4251	qeth_switch_to_packing_if_needed(queue);
4252	if (queue->do_pack) {
4253		do_pack = 1;
4254		/* does packet fit in current buffer? */
4255		if (buffer->next_element_to_fill + elements_needed >
4256		    queue->max_elements) {
4257			/* ... no -> set state PRIMED */
4258			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4259			flush_count++;
4260			queue->next_buf_to_fill =
4261				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4262			buffer = queue->bufs[queue->next_buf_to_fill];
4263
4264			/* We stepped forward, so sanity-check again: */
4265			if (atomic_read(&buffer->state) !=
4266			    QETH_QDIO_BUF_EMPTY) {
4267				qeth_flush_buffers(queue, start_index,
4268							   flush_count);
4269				rc = -EBUSY;
4270				goto out;
4271			}
4272		}
4273	}
4274
4275	if (buffer->next_element_to_fill == 0 &&
4276	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4277		/* If a TX completion happens right _here_ and misses to wake
4278		 * the txq, then our re-check below will catch the race.
4279		 */
4280		QETH_TXQ_STAT_INC(queue, stopped);
4281		netif_tx_stop_queue(txq);
4282		stopped = true;
4283	}
4284
4285	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4286	buffer->bytes += qdisc_pkt_len(skb);
4287	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4288
4289	if (queue->do_pack)
4290		QETH_TXQ_STAT_INC(queue, skbs_pack);
4291	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4292		flush_count++;
4293		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4294		queue->next_buf_to_fill =
4295				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4296	}
4297
4298	if (flush_count)
4299		qeth_flush_buffers(queue, start_index, flush_count);
4300
4301out:
4302	if (do_pack)
4303		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4304
4305	if (stopped && !qeth_out_queue_is_full(queue))
4306		netif_tx_start_queue(txq);
4307	return rc;
4308}
4309
4310static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4311			      unsigned int payload_len, struct sk_buff *skb,
4312			      unsigned int proto_len)
4313{
4314	struct qeth_hdr_ext_tso *ext = &hdr->ext;
4315
4316	ext->hdr_tot_len = sizeof(*ext);
4317	ext->imb_hdr_no = 1;
4318	ext->hdr_type = 1;
4319	ext->hdr_version = 1;
4320	ext->hdr_len = 28;
4321	ext->payload_len = payload_len;
4322	ext->mss = skb_shinfo(skb)->gso_size;
4323	ext->dg_hdr_len = proto_len;
4324}
4325
4326int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4327	      struct qeth_qdio_out_q *queue, __be16 proto,
4328	      void (*fill_header)(struct qeth_qdio_out_q *queue,
4329				  struct qeth_hdr *hdr, struct sk_buff *skb,
4330				  __be16 proto, unsigned int data_len))
4331{
4332	unsigned int proto_len, hw_hdr_len;
4333	unsigned int frame_len = skb->len;
4334	bool is_tso = skb_is_gso(skb);
4335	unsigned int data_offset = 0;
4336	struct qeth_hdr *hdr = NULL;
4337	unsigned int hd_len = 0;
4338	unsigned int elements;
4339	int push_len, rc;
4340
4341	if (is_tso) {
4342		hw_hdr_len = sizeof(struct qeth_hdr_tso);
4343		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4344	} else {
4345		hw_hdr_len = sizeof(struct qeth_hdr);
4346		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4347	}
4348
4349	rc = skb_cow_head(skb, hw_hdr_len);
4350	if (rc)
4351		return rc;
4352
4353	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4354				      &elements);
4355	if (push_len < 0)
4356		return push_len;
4357	if (is_tso || !push_len) {
4358		/* HW header needs its own buffer element. */
4359		hd_len = hw_hdr_len + proto_len;
4360		data_offset = push_len + proto_len;
4361	}
4362	memset(hdr, 0, hw_hdr_len);
4363	fill_header(queue, hdr, skb, proto, frame_len);
4364	if (is_tso)
4365		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4366				  frame_len - proto_len, skb, proto_len);
4367
4368	if (IS_IQD(card)) {
4369		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4370				 hd_len);
4371	} else {
4372		/* TODO: drop skb_orphan() once TX completion is fast enough */
4373		skb_orphan(skb);
4374		spin_lock(&queue->lock);
4375		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4376					 hd_len, elements);
4377		spin_unlock(&queue->lock);
4378	}
4379
4380	if (rc && !push_len)
4381		kmem_cache_free(qeth_core_header_cache, hdr);
4382
4383	return rc;
4384}
4385EXPORT_SYMBOL_GPL(qeth_xmit);
4386
4387static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4388		struct qeth_reply *reply, unsigned long data)
4389{
4390	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4391	struct qeth_ipacmd_setadpparms *setparms;
4392
4393	QETH_CARD_TEXT(card, 4, "prmadpcb");
4394
4395	setparms = &(cmd->data.setadapterparms);
4396	if (qeth_setadpparms_inspect_rc(cmd)) {
4397		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4398		setparms->data.mode = SET_PROMISC_MODE_OFF;
4399	}
4400	card->info.promisc_mode = setparms->data.mode;
4401	return (cmd->hdr.return_code) ? -EIO : 0;
4402}
4403
4404void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4405{
4406	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4407						    SET_PROMISC_MODE_OFF;
4408	struct qeth_cmd_buffer *iob;
4409	struct qeth_ipa_cmd *cmd;
4410
4411	QETH_CARD_TEXT(card, 4, "setprom");
4412	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4413
4414	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4415				   SETADP_DATA_SIZEOF(mode));
4416	if (!iob)
4417		return;
4418	cmd = __ipa_cmd(iob);
4419	cmd->data.setadapterparms.data.mode = mode;
4420	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4421}
4422EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4423
4424static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4425		struct qeth_reply *reply, unsigned long data)
4426{
4427	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4428	struct qeth_ipacmd_setadpparms *adp_cmd;
4429
4430	QETH_CARD_TEXT(card, 4, "chgmaccb");
4431	if (qeth_setadpparms_inspect_rc(cmd))
4432		return -EIO;
4433
4434	adp_cmd = &cmd->data.setadapterparms;
4435	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4436		return -EADDRNOTAVAIL;
4437
4438	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4439	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4440		return -EADDRNOTAVAIL;
4441
4442	eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
4443	return 0;
4444}
4445
4446int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4447{
4448	int rc;
4449	struct qeth_cmd_buffer *iob;
4450	struct qeth_ipa_cmd *cmd;
4451
4452	QETH_CARD_TEXT(card, 4, "chgmac");
4453
4454	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4455				   SETADP_DATA_SIZEOF(change_addr));
4456	if (!iob)
4457		return -ENOMEM;
4458	cmd = __ipa_cmd(iob);
4459	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4460	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4461	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4462			card->dev->dev_addr);
4463	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4464			       NULL);
4465	return rc;
4466}
4467EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4468
4469static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4470		struct qeth_reply *reply, unsigned long data)
4471{
4472	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4473	struct qeth_set_access_ctrl *access_ctrl_req;
4474
4475	QETH_CARD_TEXT(card, 4, "setaccb");
4476
4477	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4478	QETH_CARD_TEXT_(card, 2, "rc=%d",
4479			cmd->data.setadapterparms.hdr.return_code);
4480	if (cmd->data.setadapterparms.hdr.return_code !=
4481						SET_ACCESS_CTRL_RC_SUCCESS)
4482		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4483				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4484				 cmd->data.setadapterparms.hdr.return_code);
4485	switch (qeth_setadpparms_inspect_rc(cmd)) {
4486	case SET_ACCESS_CTRL_RC_SUCCESS:
4487		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4488			dev_info(&card->gdev->dev,
4489			    "QDIO data connection isolation is deactivated\n");
4490		else
4491			dev_info(&card->gdev->dev,
4492			    "QDIO data connection isolation is activated\n");
4493		return 0;
4494	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4495		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4496				 CARD_DEVID(card));
4497		return 0;
4498	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4499		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4500				 CARD_DEVID(card));
4501		return 0;
4502	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4503		dev_err(&card->gdev->dev, "Adapter does not "
4504			"support QDIO data connection isolation\n");
4505		return -EOPNOTSUPP;
4506	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4507		dev_err(&card->gdev->dev,
4508			"Adapter is dedicated. "
4509			"QDIO data connection isolation not supported\n");
4510		return -EOPNOTSUPP;
4511	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4512		dev_err(&card->gdev->dev,
4513			"TSO does not permit QDIO data connection isolation\n");
4514		return -EPERM;
4515	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4516		dev_err(&card->gdev->dev, "The adjacent switch port does not "
4517			"support reflective relay mode\n");
4518		return -EOPNOTSUPP;
4519	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4520		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4521					"enabled at the adjacent switch port");
4522		return -EREMOTEIO;
4523	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4524		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4525					"at the adjacent switch failed\n");
4526		/* benign error while disabling ISOLATION_MODE_FWD */
4527		return 0;
4528	default:
4529		return -EIO;
4530	}
4531}
4532
4533int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4534				     enum qeth_ipa_isolation_modes mode)
4535{
4536	int rc;
4537	struct qeth_cmd_buffer *iob;
4538	struct qeth_ipa_cmd *cmd;
4539	struct qeth_set_access_ctrl *access_ctrl_req;
4540
4541	QETH_CARD_TEXT(card, 4, "setacctl");
4542
4543	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4544		dev_err(&card->gdev->dev,
4545			"Adapter does not support QDIO data connection isolation\n");
4546		return -EOPNOTSUPP;
4547	}
4548
4549	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4550				   SETADP_DATA_SIZEOF(set_access_ctrl));
4551	if (!iob)
4552		return -ENOMEM;
4553	cmd = __ipa_cmd(iob);
4554	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4555	access_ctrl_req->subcmd_code = mode;
4556
4557	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4558			       NULL);
4559	if (rc) {
4560		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4561		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4562				 rc, CARD_DEVID(card));
4563	}
4564
4565	return rc;
4566}
4567
4568void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4569{
4570	struct qeth_card *card;
4571
4572	card = dev->ml_priv;
4573	QETH_CARD_TEXT(card, 4, "txtimeo");
4574	qeth_schedule_recovery(card);
4575}
4576EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4577
4578static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4579{
4580	struct qeth_card *card = dev->ml_priv;
4581	int rc = 0;
4582
4583	switch (regnum) {
4584	case MII_BMCR: /* Basic mode control register */
4585		rc = BMCR_FULLDPLX;
4586		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4587		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4588		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4589			rc |= BMCR_SPEED100;
4590		break;
4591	case MII_BMSR: /* Basic mode status register */
4592		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4593		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4594		     BMSR_100BASE4;
4595		break;
4596	case MII_PHYSID1: /* PHYS ID 1 */
4597		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4598		     dev->dev_addr[2];
4599		rc = (rc >> 5) & 0xFFFF;
4600		break;
4601	case MII_PHYSID2: /* PHYS ID 2 */
4602		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4603		break;
4604	case MII_ADVERTISE: /* Advertisement control reg */
4605		rc = ADVERTISE_ALL;
4606		break;
4607	case MII_LPA: /* Link partner ability reg */
4608		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4609		     LPA_100BASE4 | LPA_LPACK;
4610		break;
4611	case MII_EXPANSION: /* Expansion register */
4612		break;
4613	case MII_DCOUNTER: /* disconnect counter */
4614		break;
4615	case MII_FCSCOUNTER: /* false carrier counter */
4616		break;
4617	case MII_NWAYTEST: /* N-way auto-neg test register */
4618		break;
4619	case MII_RERRCOUNTER: /* rx error counter */
4620		rc = card->stats.rx_length_errors +
4621		     card->stats.rx_frame_errors +
4622		     card->stats.rx_fifo_errors;
4623		break;
4624	case MII_SREVISION: /* silicon revision */
4625		break;
4626	case MII_RESV1: /* reserved 1 */
4627		break;
4628	case MII_LBRERROR: /* loopback, rx, bypass error */
4629		break;
4630	case MII_PHYADDR: /* physical address */
4631		break;
4632	case MII_RESV2: /* reserved 2 */
4633		break;
4634	case MII_TPISTATUS: /* TPI status for 10mbps */
4635		break;
4636	case MII_NCONFIG: /* network interface config */
4637		break;
4638	default:
4639		break;
4640	}
4641	return rc;
4642}
4643
4644static int qeth_snmp_command_cb(struct qeth_card *card,
4645				struct qeth_reply *reply, unsigned long data)
4646{
4647	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4648	struct qeth_arp_query_info *qinfo = reply->param;
4649	struct qeth_ipacmd_setadpparms *adp_cmd;
4650	unsigned int data_len;
4651	void *snmp_data;
4652
4653	QETH_CARD_TEXT(card, 3, "snpcmdcb");
4654
4655	if (cmd->hdr.return_code) {
4656		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4657		return -EIO;
4658	}
4659	if (cmd->data.setadapterparms.hdr.return_code) {
4660		cmd->hdr.return_code =
4661			cmd->data.setadapterparms.hdr.return_code;
4662		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4663		return -EIO;
4664	}
4665
4666	adp_cmd = &cmd->data.setadapterparms;
4667	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4668	if (adp_cmd->hdr.seq_no == 1) {
4669		snmp_data = &adp_cmd->data.snmp;
4670	} else {
4671		snmp_data = &adp_cmd->data.snmp.request;
4672		data_len -= offsetof(struct qeth_snmp_cmd, request);
4673	}
4674
4675	/* check if there is enough room in userspace */
4676	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4677		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4678		return -ENOSPC;
4679	}
4680	QETH_CARD_TEXT_(card, 4, "snore%i",
4681			cmd->data.setadapterparms.hdr.used_total);
4682	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4683			cmd->data.setadapterparms.hdr.seq_no);
4684	/*copy entries to user buffer*/
4685	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4686	qinfo->udata_offset += data_len;
4687
4688	if (cmd->data.setadapterparms.hdr.seq_no <
4689	    cmd->data.setadapterparms.hdr.used_total)
4690		return 1;
4691	return 0;
4692}
4693
4694static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4695{
4696	struct qeth_snmp_ureq __user *ureq;
4697	struct qeth_cmd_buffer *iob;
4698	unsigned int req_len;
4699	struct qeth_arp_query_info qinfo = {0, };
4700	int rc = 0;
4701
4702	QETH_CARD_TEXT(card, 3, "snmpcmd");
4703
4704	if (IS_VM_NIC(card))
4705		return -EOPNOTSUPP;
4706
4707	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4708	    IS_LAYER3(card))
4709		return -EOPNOTSUPP;
4710
4711	ureq = (struct qeth_snmp_ureq __user *) udata;
4712	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4713	    get_user(req_len, &ureq->hdr.req_len))
4714		return -EFAULT;
4715
4716	/* Sanitize user input, to avoid overflows in iob size calculation: */
4717	if (req_len > QETH_BUFSIZE)
4718		return -EINVAL;
4719
4720	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4721	if (!iob)
4722		return -ENOMEM;
4723
4724	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4725			   &ureq->cmd, req_len)) {
4726		qeth_put_cmd(iob);
4727		return -EFAULT;
4728	}
4729
4730	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4731	if (!qinfo.udata) {
4732		qeth_put_cmd(iob);
4733		return -ENOMEM;
4734	}
4735	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4736
4737	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4738	if (rc)
4739		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4740				 CARD_DEVID(card), rc);
4741	else {
4742		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4743			rc = -EFAULT;
4744	}
4745
4746	kfree(qinfo.udata);
4747	return rc;
4748}
4749
4750static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4751					 struct qeth_reply *reply,
4752					 unsigned long data)
4753{
4754	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4755	struct qeth_qoat_priv *priv = reply->param;
4756	int resdatalen;
4757
4758	QETH_CARD_TEXT(card, 3, "qoatcb");
4759	if (qeth_setadpparms_inspect_rc(cmd))
4760		return -EIO;
4761
4762	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4763
4764	if (resdatalen > (priv->buffer_len - priv->response_len))
4765		return -ENOSPC;
4766
4767	memcpy(priv->buffer + priv->response_len,
4768	       &cmd->data.setadapterparms.hdr, resdatalen);
4769	priv->response_len += resdatalen;
4770
4771	if (cmd->data.setadapterparms.hdr.seq_no <
4772	    cmd->data.setadapterparms.hdr.used_total)
4773		return 1;
4774	return 0;
4775}
4776
4777static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4778{
4779	int rc = 0;
4780	struct qeth_cmd_buffer *iob;
4781	struct qeth_ipa_cmd *cmd;
4782	struct qeth_query_oat *oat_req;
4783	struct qeth_query_oat_data oat_data;
4784	struct qeth_qoat_priv priv;
4785	void __user *tmp;
4786
4787	QETH_CARD_TEXT(card, 3, "qoatcmd");
4788
4789	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4790		return -EOPNOTSUPP;
4791
4792	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4793		return -EFAULT;
4794
4795	priv.buffer_len = oat_data.buffer_len;
4796	priv.response_len = 0;
4797	priv.buffer = vzalloc(oat_data.buffer_len);
4798	if (!priv.buffer)
4799		return -ENOMEM;
4800
4801	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4802				   SETADP_DATA_SIZEOF(query_oat));
4803	if (!iob) {
4804		rc = -ENOMEM;
4805		goto out_free;
4806	}
4807	cmd = __ipa_cmd(iob);
4808	oat_req = &cmd->data.setadapterparms.data.query_oat;
4809	oat_req->subcmd_code = oat_data.command;
4810
4811	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4812	if (!rc) {
4813		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4814					 u64_to_user_ptr(oat_data.ptr);
4815		oat_data.response_len = priv.response_len;
4816
4817		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4818		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4819			rc = -EFAULT;
4820	}
4821
4822out_free:
4823	vfree(priv.buffer);
4824	return rc;
4825}
4826
4827static int qeth_init_link_info_oat_cb(struct qeth_card *card,
4828				      struct qeth_reply *reply_priv,
4829				      unsigned long data)
4830{
4831	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4832	struct qeth_link_info *link_info = reply_priv->param;
4833	struct qeth_query_oat_physical_if *phys_if;
4834	struct qeth_query_oat_reply *reply;
4835
4836	QETH_CARD_TEXT(card, 2, "qoatincb");
4837	if (qeth_setadpparms_inspect_rc(cmd))
4838		return -EIO;
4839
4840	/* Multi-part reply is unexpected, don't bother: */
4841	if (cmd->data.setadapterparms.hdr.used_total > 1)
4842		return -EINVAL;
4843
4844	/* Expect the reply to start with phys_if data: */
4845	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
4846	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
4847	    reply->length < sizeof(*reply))
4848		return -EINVAL;
4849
4850	phys_if = &reply->phys_if;
4851
4852	switch (phys_if->speed_duplex) {
4853	case QETH_QOAT_PHYS_SPEED_10M_HALF:
4854		link_info->speed = SPEED_10;
4855		link_info->duplex = DUPLEX_HALF;
4856		break;
4857	case QETH_QOAT_PHYS_SPEED_10M_FULL:
4858		link_info->speed = SPEED_10;
4859		link_info->duplex = DUPLEX_FULL;
4860		break;
4861	case QETH_QOAT_PHYS_SPEED_100M_HALF:
4862		link_info->speed = SPEED_100;
4863		link_info->duplex = DUPLEX_HALF;
4864		break;
4865	case QETH_QOAT_PHYS_SPEED_100M_FULL:
4866		link_info->speed = SPEED_100;
4867		link_info->duplex = DUPLEX_FULL;
4868		break;
4869	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
4870		link_info->speed = SPEED_1000;
4871		link_info->duplex = DUPLEX_HALF;
4872		break;
4873	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
4874		link_info->speed = SPEED_1000;
4875		link_info->duplex = DUPLEX_FULL;
4876		break;
4877	case QETH_QOAT_PHYS_SPEED_10G_FULL:
4878		link_info->speed = SPEED_10000;
4879		link_info->duplex = DUPLEX_FULL;
4880		break;
4881	case QETH_QOAT_PHYS_SPEED_25G_FULL:
4882		link_info->speed = SPEED_25000;
4883		link_info->duplex = DUPLEX_FULL;
4884		break;
4885	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
4886	default:
4887		link_info->speed = SPEED_UNKNOWN;
4888		link_info->duplex = DUPLEX_UNKNOWN;
4889		break;
4890	}
4891
4892	switch (phys_if->media_type) {
4893	case QETH_QOAT_PHYS_MEDIA_COPPER:
4894		link_info->port = PORT_TP;
4895		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4896		break;
4897	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4898		link_info->port = PORT_FIBRE;
4899		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4900		break;
4901	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
4902		link_info->port = PORT_FIBRE;
4903		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4904		break;
4905	default:
4906		link_info->port = PORT_OTHER;
4907		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4908		break;
4909	}
4910
4911	return 0;
4912}
4913
4914static void qeth_init_link_info(struct qeth_card *card)
4915{
4916	qeth_default_link_info(card);
4917
4918	/* Get more accurate data via QUERY OAT: */
4919	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4920		struct qeth_link_info link_info;
4921		struct qeth_cmd_buffer *iob;
4922
4923		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4924					   SETADP_DATA_SIZEOF(query_oat));
4925		if (iob) {
4926			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
4927			struct qeth_query_oat *oat_req;
4928
4929			oat_req = &cmd->data.setadapterparms.data.query_oat;
4930			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
4931
4932			if (!qeth_send_ipa_cmd(card, iob,
4933					       qeth_init_link_info_oat_cb,
4934					       &link_info)) {
4935				if (link_info.speed != SPEED_UNKNOWN)
4936					card->info.link_info.speed = link_info.speed;
4937				if (link_info.duplex != DUPLEX_UNKNOWN)
4938					card->info.link_info.duplex = link_info.duplex;
4939				if (link_info.port != PORT_OTHER)
4940					card->info.link_info.port = link_info.port;
4941				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
4942					card->info.link_info.link_mode = link_info.link_mode;
4943			}
4944		}
4945	}
4946}
4947
4948/**
4949 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4950 * @card: pointer to a qeth_card
4951 *
4952 * Returns
4953 *	0, if a MAC address has been set for the card's netdevice
4954 *	a return code, for various error conditions
4955 */
4956int qeth_vm_request_mac(struct qeth_card *card)
4957{
4958	struct diag26c_mac_resp *response;
4959	struct diag26c_mac_req *request;
4960	int rc;
4961
4962	QETH_CARD_TEXT(card, 2, "vmreqmac");
4963
4964	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4965	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4966	if (!request || !response) {
4967		rc = -ENOMEM;
4968		goto out;
4969	}
4970
4971	request->resp_buf_len = sizeof(*response);
4972	request->resp_version = DIAG26C_VERSION2;
4973	request->op_code = DIAG26C_GET_MAC;
4974	request->devno = card->info.ddev_devno;
4975
4976	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4977	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4978	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4979	if (rc)
4980		goto out;
4981	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4982
4983	if (request->resp_buf_len < sizeof(*response) ||
4984	    response->version != request->resp_version) {
4985		rc = -EIO;
4986		QETH_CARD_TEXT(card, 2, "badresp");
4987		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4988			      sizeof(request->resp_buf_len));
4989	} else if (!is_valid_ether_addr(response->mac)) {
4990		rc = -EINVAL;
4991		QETH_CARD_TEXT(card, 2, "badmac");
4992		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4993	} else {
4994		eth_hw_addr_set(card->dev, response->mac);
4995	}
4996
4997out:
4998	kfree(response);
4999	kfree(request);
5000	return rc;
5001}
5002EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
5003
5004static void qeth_determine_capabilities(struct qeth_card *card)
5005{
5006	struct qeth_channel *channel = &card->data;
5007	struct ccw_device *ddev = channel->ccwdev;
5008	int rc;
5009	int ddev_offline = 0;
5010
5011	QETH_CARD_TEXT(card, 2, "detcapab");
5012	if (!ddev->online) {
5013		ddev_offline = 1;
5014		rc = qeth_start_channel(channel);
5015		if (rc) {
5016			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5017			goto out;
5018		}
5019	}
5020
5021	rc = qeth_read_conf_data(card);
5022	if (rc) {
5023		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5024				 CARD_DEVID(card), rc);
5025		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5026		goto out_offline;
5027	}
5028
5029	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
5030	if (rc)
5031		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5032
5033	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5034	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5035	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5036	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5037	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5038	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5039	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5040	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5041		dev_info(&card->gdev->dev,
5042			"Completion Queueing supported\n");
5043	} else {
5044		card->options.cq = QETH_CQ_NOTAVAILABLE;
5045	}
5046
5047out_offline:
5048	if (ddev_offline == 1)
5049		qeth_stop_channel(channel);
5050out:
5051	return;
5052}
5053
5054static void qeth_read_ccw_conf_data(struct qeth_card *card)
5055{
5056	struct qeth_card_info *info = &card->info;
5057	struct ccw_device *cdev = CARD_DDEV(card);
5058	struct ccw_dev_id dev_id;
5059
5060	QETH_CARD_TEXT(card, 2, "ccwconfd");
5061	ccw_device_get_id(cdev, &dev_id);
5062
5063	info->ddev_devno = dev_id.devno;
5064	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5065			  !ccw_device_get_iid(cdev, &info->iid) &&
5066			  !ccw_device_get_chid(cdev, 0, &info->chid);
5067	info->ssid = dev_id.ssid;
5068
5069	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5070		 info->chid, info->chpid);
5071
5072	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5073	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5074	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5075	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5076	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5077	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5078	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5079}
5080
5081static int qeth_qdio_establish(struct qeth_card *card)
5082{
5083	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5084	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5085	struct qeth_qib_parms *qib_parms = NULL;
5086	struct qdio_initialize init_data;
5087	unsigned int no_input_qs = 1;
5088	unsigned int i;
5089	int rc = 0;
5090
5091	QETH_CARD_TEXT(card, 2, "qdioest");
5092
5093	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5094		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5095		if (!qib_parms)
5096			return -ENOMEM;
5097
5098		qeth_fill_qib_parms(card, qib_parms);
5099	}
5100
5101	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5102	if (card->options.cq == QETH_CQ_ENABLED) {
5103		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5104		no_input_qs++;
5105	}
5106
5107	for (i = 0; i < card->qdio.no_out_queues; i++)
5108		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5109
5110	memset(&init_data, 0, sizeof(struct qdio_initialize));
5111	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5112							  QDIO_QETH_QFMT;
5113	init_data.qib_param_field_format = 0;
5114	init_data.qib_param_field	 = (void *)qib_parms;
5115	init_data.no_input_qs		 = no_input_qs;
5116	init_data.no_output_qs           = card->qdio.no_out_queues;
5117	init_data.input_handler		 = qeth_qdio_input_handler;
5118	init_data.output_handler	 = qeth_qdio_output_handler;
5119	init_data.irq_poll		 = qeth_qdio_poll;
5120	init_data.int_parm               = (unsigned long) card;
5121	init_data.input_sbal_addr_array  = in_sbal_ptrs;
5122	init_data.output_sbal_addr_array = out_sbal_ptrs;
5123
5124	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5125		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5126		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5127				   init_data.no_output_qs);
5128		if (rc) {
5129			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5130			goto out;
5131		}
5132		rc = qdio_establish(CARD_DDEV(card), &init_data);
5133		if (rc) {
5134			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5135			qdio_free(CARD_DDEV(card));
5136		}
5137	}
5138
5139	switch (card->options.cq) {
5140	case QETH_CQ_ENABLED:
5141		dev_info(&card->gdev->dev, "Completion Queue support enabled");
5142		break;
5143	case QETH_CQ_DISABLED:
5144		dev_info(&card->gdev->dev, "Completion Queue support disabled");
5145		break;
5146	default:
5147		break;
5148	}
5149
5150out:
5151	kfree(qib_parms);
5152	return rc;
5153}
5154
5155static void qeth_core_free_card(struct qeth_card *card)
5156{
5157	QETH_CARD_TEXT(card, 2, "freecrd");
5158
5159	unregister_service_level(&card->qeth_service_level);
5160	debugfs_remove_recursive(card->debugfs);
5161	qeth_put_cmd(card->read_cmd);
5162	destroy_workqueue(card->event_wq);
5163	dev_set_drvdata(&card->gdev->dev, NULL);
5164	kfree(card);
5165}
5166
5167static void qeth_trace_features(struct qeth_card *card)
5168{
5169	QETH_CARD_TEXT(card, 2, "features");
5170	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5171	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5172	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5173	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5174		      sizeof(card->info.diagass_support));
5175}
5176
5177static struct ccw_device_id qeth_ids[] = {
5178	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5179					.driver_info = QETH_CARD_TYPE_OSD},
5180	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5181					.driver_info = QETH_CARD_TYPE_IQD},
5182	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5183					.driver_info = QETH_CARD_TYPE_OSM},
5184#ifdef CONFIG_QETH_OSX
5185	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5186					.driver_info = QETH_CARD_TYPE_OSX},
5187#endif
5188	{},
5189};
5190MODULE_DEVICE_TABLE(ccw, qeth_ids);
5191
5192static struct ccw_driver qeth_ccw_driver = {
5193	.driver = {
5194		.owner = THIS_MODULE,
5195		.name = "qeth",
5196	},
5197	.ids = qeth_ids,
5198	.probe = ccwgroup_probe_ccwdev,
5199	.remove = ccwgroup_remove_ccwdev,
5200};
5201
5202static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5203{
5204	int retries = 3;
5205	int rc;
5206
5207	QETH_CARD_TEXT(card, 2, "hrdsetup");
5208	atomic_set(&card->force_alloc_skb, 0);
5209	rc = qeth_update_from_chp_desc(card);
5210	if (rc)
5211		return rc;
5212retry:
5213	if (retries < 3)
5214		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5215				 CARD_DEVID(card));
5216	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5217	qeth_stop_channel(&card->data);
5218	qeth_stop_channel(&card->write);
5219	qeth_stop_channel(&card->read);
5220	qdio_free(CARD_DDEV(card));
5221
5222	rc = qeth_start_channel(&card->read);
5223	if (rc)
5224		goto retriable;
5225	rc = qeth_start_channel(&card->write);
5226	if (rc)
5227		goto retriable;
5228	rc = qeth_start_channel(&card->data);
5229	if (rc)
5230		goto retriable;
5231retriable:
5232	if (rc == -ERESTARTSYS) {
5233		QETH_CARD_TEXT(card, 2, "break1");
5234		return rc;
5235	} else if (rc) {
5236		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5237		if (--retries < 0)
5238			goto out;
5239		else
5240			goto retry;
5241	}
5242
5243	qeth_determine_capabilities(card);
5244	qeth_read_ccw_conf_data(card);
5245	qeth_idx_init(card);
5246
5247	rc = qeth_idx_activate_read_channel(card);
5248	if (rc == -EINTR) {
5249		QETH_CARD_TEXT(card, 2, "break2");
5250		return rc;
5251	} else if (rc) {
5252		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5253		if (--retries < 0)
5254			goto out;
5255		else
5256			goto retry;
5257	}
5258
5259	rc = qeth_idx_activate_write_channel(card);
5260	if (rc == -EINTR) {
5261		QETH_CARD_TEXT(card, 2, "break3");
5262		return rc;
5263	} else if (rc) {
5264		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5265		if (--retries < 0)
5266			goto out;
5267		else
5268			goto retry;
5269	}
5270	card->read_or_write_problem = 0;
5271	rc = qeth_mpc_initialize(card);
5272	if (rc) {
5273		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5274		goto out;
5275	}
5276
5277	rc = qeth_send_startlan(card);
5278	if (rc) {
5279		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5280		if (rc == -ENETDOWN) {
5281			dev_warn(&card->gdev->dev, "The LAN is offline\n");
5282			*carrier_ok = false;
5283		} else {
5284			goto out;
5285		}
5286	} else {
5287		*carrier_ok = true;
5288	}
5289
5290	card->options.ipa4.supported = 0;
5291	card->options.ipa6.supported = 0;
5292	card->options.adp.supported = 0;
5293	card->options.sbp.supported_funcs = 0;
5294	card->info.diagass_support = 0;
5295	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5296	if (rc == -ENOMEM)
5297		goto out;
5298	if (qeth_is_supported(card, IPA_IPV6)) {
5299		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5300		if (rc == -ENOMEM)
5301			goto out;
5302	}
5303	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5304		rc = qeth_query_setadapterparms(card);
5305		if (rc < 0) {
5306			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5307			goto out;
5308		}
5309	}
5310	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5311		rc = qeth_query_setdiagass(card);
5312		if (rc)
5313			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5314	}
5315
5316	qeth_trace_features(card);
5317
5318	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5319	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5320		card->info.hwtrap = 0;
5321
5322	if (card->options.isolation != ISOLATION_MODE_NONE) {
5323		rc = qeth_setadpparms_set_access_ctrl(card,
5324						      card->options.isolation);
5325		if (rc)
5326			goto out;
5327	}
5328
5329	qeth_init_link_info(card);
5330
5331	rc = qeth_init_qdio_queues(card);
5332	if (rc) {
5333		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5334		goto out;
5335	}
5336
5337	return 0;
5338out:
5339	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5340		"an error on the device\n");
5341	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5342			 CARD_DEVID(card), rc);
5343	return rc;
5344}
5345
5346static int qeth_set_online(struct qeth_card *card,
5347			   const struct qeth_discipline *disc)
5348{
5349	bool carrier_ok;
5350	int rc;
5351
5352	mutex_lock(&card->conf_mutex);
5353	QETH_CARD_TEXT(card, 2, "setonlin");
5354
5355	rc = qeth_hardsetup_card(card, &carrier_ok);
5356	if (rc) {
5357		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5358		rc = -ENODEV;
5359		goto err_hardsetup;
5360	}
5361
5362	qeth_print_status_message(card);
5363
5364	if (card->dev->reg_state != NETREG_REGISTERED)
5365		/* no need for locking / error handling at this early stage: */
5366		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5367
5368	rc = disc->set_online(card, carrier_ok);
5369	if (rc)
5370		goto err_online;
5371
5372	/* let user_space know that device is online */
5373	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5374
5375	mutex_unlock(&card->conf_mutex);
5376	return 0;
5377
5378err_online:
5379err_hardsetup:
5380	qeth_qdio_clear_card(card, 0);
5381	qeth_clear_working_pool_list(card);
5382	qeth_flush_local_addrs(card);
5383
5384	qeth_stop_channel(&card->data);
5385	qeth_stop_channel(&card->write);
5386	qeth_stop_channel(&card->read);
5387	qdio_free(CARD_DDEV(card));
5388
5389	mutex_unlock(&card->conf_mutex);
5390	return rc;
5391}
5392
5393int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
5394		     bool resetting)
5395{
5396	int rc, rc2, rc3;
5397
5398	mutex_lock(&card->conf_mutex);
5399	QETH_CARD_TEXT(card, 3, "setoffl");
5400
5401	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5402		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5403		card->info.hwtrap = 1;
5404	}
5405
5406	/* cancel any stalled cmd that might block the rtnl: */
5407	qeth_clear_ipacmd_list(card);
5408
5409	rtnl_lock();
5410	netif_device_detach(card->dev);
5411	netif_carrier_off(card->dev);
5412	rtnl_unlock();
5413
5414	cancel_work_sync(&card->rx_mode_work);
5415
5416	disc->set_offline(card);
5417
5418	qeth_qdio_clear_card(card, 0);
5419	qeth_drain_output_queues(card);
5420	qeth_clear_working_pool_list(card);
5421	qeth_flush_local_addrs(card);
5422	card->info.promisc_mode = 0;
5423	qeth_default_link_info(card);
5424
5425	rc  = qeth_stop_channel(&card->data);
5426	rc2 = qeth_stop_channel(&card->write);
5427	rc3 = qeth_stop_channel(&card->read);
5428	if (!rc)
5429		rc = (rc2) ? rc2 : rc3;
5430	if (rc)
5431		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5432	qdio_free(CARD_DDEV(card));
5433
5434	/* let user_space know that device is offline */
5435	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5436
5437	mutex_unlock(&card->conf_mutex);
5438	return 0;
5439}
5440EXPORT_SYMBOL_GPL(qeth_set_offline);
5441
5442static int qeth_do_reset(void *data)
5443{
5444	const struct qeth_discipline *disc;
5445	struct qeth_card *card = data;
5446	int rc;
5447
5448	/* Lock-free, other users will block until we are done. */
5449	disc = card->discipline;
5450
5451	QETH_CARD_TEXT(card, 2, "recover1");
5452	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5453		return 0;
5454	QETH_CARD_TEXT(card, 2, "recover2");
5455	dev_warn(&card->gdev->dev,
5456		 "A recovery process has been started for the device\n");
5457
5458	qeth_set_offline(card, disc, true);
5459	rc = qeth_set_online(card, disc);
5460	if (!rc) {
5461		dev_info(&card->gdev->dev,
5462			 "Device successfully recovered!\n");
5463	} else {
5464		qeth_set_offline(card, disc, true);
5465		ccwgroup_set_offline(card->gdev, false);
5466		dev_warn(&card->gdev->dev,
5467			 "The qeth device driver failed to recover an error on the device\n");
5468	}
5469	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5470	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5471	return 0;
5472}
5473
5474#if IS_ENABLED(CONFIG_QETH_L3)
5475static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5476				struct qeth_hdr *hdr)
5477{
5478	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5479	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5480	struct net_device *dev = skb->dev;
5481
5482	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5483		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5484				"FAKELL", skb->len);
5485		return;
5486	}
5487
5488	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5489		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5490							     ETH_P_IP;
5491		unsigned char tg_addr[ETH_ALEN];
5492
5493		skb_reset_network_header(skb);
5494		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5495		case QETH_CAST_MULTICAST:
5496			if (prot == ETH_P_IP)
5497				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5498			else
5499				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5500			QETH_CARD_STAT_INC(card, rx_multicast);
5501			break;
5502		case QETH_CAST_BROADCAST:
5503			ether_addr_copy(tg_addr, dev->broadcast);
5504			QETH_CARD_STAT_INC(card, rx_multicast);
5505			break;
5506		default:
5507			if (card->options.sniffer)
5508				skb->pkt_type = PACKET_OTHERHOST;
5509			ether_addr_copy(tg_addr, dev->dev_addr);
5510		}
5511
5512		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5513			dev_hard_header(skb, dev, prot, tg_addr,
5514					&l3_hdr->next_hop.rx.src_mac, skb->len);
5515		else
5516			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5517					skb->len);
5518	}
5519
5520	/* copy VLAN tag from hdr into skb */
5521	if (!card->options.sniffer &&
5522	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5523				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5524		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5525				l3_hdr->vlan_id :
5526				l3_hdr->next_hop.rx.vlan_id;
5527
5528		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5529	}
5530}
5531#endif
5532
5533static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5534			     bool uses_frags, bool is_cso)
5535{
5536	struct napi_struct *napi = &card->napi;
5537
5538	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5539		skb->ip_summed = CHECKSUM_UNNECESSARY;
5540		QETH_CARD_STAT_INC(card, rx_skb_csum);
5541	} else {
5542		skb->ip_summed = CHECKSUM_NONE;
5543	}
5544
5545	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5546	QETH_CARD_STAT_INC(card, rx_packets);
5547	if (skb_is_nonlinear(skb)) {
5548		QETH_CARD_STAT_INC(card, rx_sg_skbs);
5549		QETH_CARD_STAT_ADD(card, rx_sg_frags,
5550				   skb_shinfo(skb)->nr_frags);
5551	}
5552
5553	if (uses_frags) {
5554		napi_gro_frags(napi);
5555	} else {
5556		skb->protocol = eth_type_trans(skb, skb->dev);
5557		napi_gro_receive(napi, skb);
5558	}
5559}
5560
5561static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5562{
5563	struct page *page = virt_to_page(data);
5564	unsigned int next_frag;
5565
5566	next_frag = skb_shinfo(skb)->nr_frags;
5567	get_page(page);
5568	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5569			data_len);
5570}
5571
5572static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5573{
5574	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5575}
5576
5577static int qeth_extract_skb(struct qeth_card *card,
5578			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5579			    int *__offset)
5580{
5581	struct qeth_priv *priv = netdev_priv(card->dev);
5582	struct qdio_buffer *buffer = qethbuffer->buffer;
5583	struct napi_struct *napi = &card->napi;
5584	struct qdio_buffer_element *element;
5585	unsigned int linear_len = 0;
5586	bool uses_frags = false;
5587	int offset = *__offset;
5588	bool use_rx_sg = false;
5589	unsigned int headroom;
5590	struct qeth_hdr *hdr;
5591	struct sk_buff *skb;
5592	int skb_len = 0;
5593	bool is_cso;
5594
5595	element = &buffer->element[*element_no];
5596
5597next_packet:
5598	/* qeth_hdr must not cross element boundaries */
5599	while (element->length < offset + sizeof(struct qeth_hdr)) {
5600		if (qeth_is_last_sbale(element))
5601			return -ENODATA;
5602		element++;
5603		offset = 0;
5604	}
5605
5606	hdr = dma64_to_virt(element->addr) + offset;
5607	offset += sizeof(*hdr);
5608	skb = NULL;
5609
5610	switch (hdr->hdr.l2.id) {
5611	case QETH_HEADER_TYPE_LAYER2:
5612		skb_len = hdr->hdr.l2.pkt_length;
5613		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5614
5615		linear_len = ETH_HLEN;
5616		headroom = 0;
5617		break;
5618	case QETH_HEADER_TYPE_LAYER3:
5619		skb_len = hdr->hdr.l3.length;
5620		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5621
5622		if (!IS_LAYER3(card)) {
5623			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5624			goto walk_packet;
5625		}
5626
5627		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5628			linear_len = ETH_HLEN;
5629			headroom = 0;
5630			break;
5631		}
5632
5633		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5634			linear_len = sizeof(struct ipv6hdr);
5635		else
5636			linear_len = sizeof(struct iphdr);
5637		headroom = ETH_HLEN;
5638		break;
5639	default:
5640		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5641			QETH_CARD_STAT_INC(card, rx_frame_errors);
5642		else
5643			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5644
5645		/* Can't determine packet length, drop the whole buffer. */
5646		return -EPROTONOSUPPORT;
5647	}
5648
5649	if (skb_len < linear_len) {
5650		QETH_CARD_STAT_INC(card, rx_dropped_runt);
5651		goto walk_packet;
5652	}
5653
5654	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5655		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
5656		     !atomic_read(&card->force_alloc_skb));
5657
5658	if (use_rx_sg) {
5659		/* QETH_CQ_ENABLED only: */
5660		if (qethbuffer->rx_skb &&
5661		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5662			skb = qethbuffer->rx_skb;
5663			qethbuffer->rx_skb = NULL;
5664			goto use_skb;
5665		}
5666
5667		skb = napi_get_frags(napi);
5668		if (!skb) {
5669			/* -ENOMEM, no point in falling back further. */
5670			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5671			goto walk_packet;
5672		}
5673
5674		if (skb_tailroom(skb) >= linear_len + headroom) {
5675			uses_frags = true;
5676			goto use_skb;
5677		}
5678
5679		netdev_info_once(card->dev,
5680				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5681				 linear_len + headroom, skb_tailroom(skb));
5682		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
5683	}
5684
5685	linear_len = skb_len;
5686	skb = napi_alloc_skb(napi, linear_len + headroom);
5687	if (!skb) {
5688		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5689		goto walk_packet;
5690	}
5691
5692use_skb:
5693	if (headroom)
5694		skb_reserve(skb, headroom);
5695walk_packet:
5696	while (skb_len) {
5697		int data_len = min(skb_len, (int)(element->length - offset));
5698		char *data = dma64_to_virt(element->addr) + offset;
5699
5700		skb_len -= data_len;
5701		offset += data_len;
5702
5703		/* Extract data from current element: */
5704		if (skb && data_len) {
5705			if (linear_len) {
5706				unsigned int copy_len;
5707
5708				copy_len = min_t(unsigned int, linear_len,
5709						 data_len);
5710
5711				skb_put_data(skb, data, copy_len);
5712				linear_len -= copy_len;
5713				data_len -= copy_len;
5714				data += copy_len;
5715			}
5716
5717			if (data_len)
5718				qeth_create_skb_frag(skb, data, data_len);
5719		}
5720
5721		/* Step forward to next element: */
5722		if (skb_len) {
5723			if (qeth_is_last_sbale(element)) {
5724				QETH_CARD_TEXT(card, 4, "unexeob");
5725				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5726				if (skb) {
5727					if (uses_frags)
5728						napi_free_frags(napi);
5729					else
5730						kfree_skb(skb);
5731					QETH_CARD_STAT_INC(card,
5732							   rx_length_errors);
5733				}
5734				return -EMSGSIZE;
5735			}
5736			element++;
5737			offset = 0;
5738		}
5739	}
5740
5741	/* This packet was skipped, go get another one: */
5742	if (!skb)
5743		goto next_packet;
5744
5745	*element_no = element - &buffer->element[0];
5746	*__offset = offset;
5747
5748#if IS_ENABLED(CONFIG_QETH_L3)
5749	if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER3)
5750		qeth_l3_rebuild_skb(card, skb, hdr);
5751#endif
5752
5753	qeth_receive_skb(card, skb, uses_frags, is_cso);
5754	return 0;
5755}
5756
5757static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5758				      struct qeth_qdio_buffer *buf, bool *done)
5759{
5760	unsigned int work_done = 0;
5761
5762	while (budget) {
5763		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5764				     &card->rx.e_offset)) {
5765			*done = true;
5766			break;
5767		}
5768
5769		work_done++;
5770		budget--;
5771	}
5772
5773	return work_done;
5774}
5775
5776static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5777{
5778	struct qeth_rx *ctx = &card->rx;
5779	unsigned int work_done = 0;
5780
5781	while (budget > 0) {
5782		struct qeth_qdio_buffer *buffer;
5783		unsigned int skbs_done = 0;
5784		bool done = false;
5785
5786		/* Fetch completed RX buffers: */
5787		if (!card->rx.b_count) {
5788			card->rx.qdio_err = 0;
5789			card->rx.b_count =
5790				qdio_inspect_input_queue(CARD_DDEV(card), 0,
5791							 &card->rx.b_index,
5792							 &card->rx.qdio_err);
5793			if (card->rx.b_count <= 0) {
5794				card->rx.b_count = 0;
5795				break;
5796			}
5797		}
5798
5799		/* Process one completed RX buffer: */
5800		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5801		if (!(card->rx.qdio_err &&
5802		      qeth_check_qdio_errors(card, buffer->buffer,
5803					     card->rx.qdio_err, "qinerr")))
5804			skbs_done = qeth_extract_skbs(card, budget, buffer,
5805						      &done);
5806		else
5807			done = true;
5808
5809		work_done += skbs_done;
5810		budget -= skbs_done;
5811
5812		if (done) {
5813			QETH_CARD_STAT_INC(card, rx_bufs);
5814			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5815			buffer->pool_entry = NULL;
5816			card->rx.b_count--;
5817			ctx->bufs_refill++;
5818			ctx->bufs_refill -= qeth_rx_refill_queue(card,
5819								 ctx->bufs_refill);
5820
5821			/* Step forward to next buffer: */
5822			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5823			card->rx.buf_element = 0;
5824			card->rx.e_offset = 0;
5825		}
5826	}
5827
5828	return work_done;
5829}
5830
5831static void qeth_cq_poll(struct qeth_card *card)
5832{
5833	unsigned int work_done = 0;
5834
5835	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5836		unsigned int start, error;
5837		int completed;
5838
5839		completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start,
5840						     &error);
5841		if (completed <= 0)
5842			return;
5843
5844		qeth_qdio_cq_handler(card, error, 1, start, completed);
5845		work_done += completed;
5846	}
5847}
5848
5849int qeth_poll(struct napi_struct *napi, int budget)
5850{
5851	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5852	unsigned int work_done;
5853
5854	work_done = qeth_rx_poll(card, budget);
5855
5856	if (qeth_use_tx_irqs(card)) {
5857		struct qeth_qdio_out_q *queue;
5858		unsigned int i;
5859
5860		qeth_for_each_output_queue(card, queue, i) {
5861			if (!qeth_out_queue_is_empty(queue))
5862				napi_schedule(&queue->napi);
5863		}
5864	}
5865
5866	if (card->options.cq == QETH_CQ_ENABLED)
5867		qeth_cq_poll(card);
5868
5869	if (budget) {
5870		struct qeth_rx *ctx = &card->rx;
5871
5872		/* Process any substantial refill backlog: */
5873		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5874
5875		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5876		if (work_done >= budget)
5877			return work_done;
5878	}
5879
5880	if (napi_complete_done(napi, work_done) &&
5881	    qdio_start_irq(CARD_DDEV(card)))
5882		napi_schedule(napi);
5883
5884	return work_done;
5885}
5886EXPORT_SYMBOL_GPL(qeth_poll);
5887
5888static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5889				 unsigned int bidx, unsigned int qdio_error,
5890				 int budget)
5891{
5892	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5893	u8 sflags = buffer->buffer->element[15].sflags;
5894	struct qeth_card *card = queue->card;
5895	bool error = !!qdio_error;
5896
5897	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5898		struct qaob *aob = buffer->aob;
5899		struct qeth_qaob_priv1 *priv;
5900		enum iucv_tx_notify notify;
5901
5902		if (!aob) {
5903			netdev_WARN_ONCE(card->dev,
5904					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
5905					 bidx, queue->queue_no);
5906			qeth_schedule_recovery(card);
5907			return;
5908		}
5909
5910		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5911
5912		priv = (struct qeth_qaob_priv1 *)&aob->user1;
5913		/* QAOB hasn't completed yet: */
5914		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5915			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5916
5917			/* Prepare the queue slot for immediate re-use: */
5918			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5919			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
5920				QETH_CARD_TEXT(card, 2, "outofbuf");
5921				qeth_schedule_recovery(card);
5922			}
5923
5924			list_add(&buffer->list_entry, &queue->pending_bufs);
5925			/* Skip clearing the buffer: */
5926			return;
5927		}
5928
5929		/* QAOB already completed: */
5930		notify = qeth_compute_cq_notification(aob->aorc, 0);
5931		qeth_notify_skbs(queue, buffer, notify);
5932		error = !!aob->aorc;
5933		memset(aob, 0, sizeof(*aob));
5934	} else if (card->options.cq == QETH_CQ_ENABLED) {
5935		qeth_notify_skbs(queue, buffer,
5936				 qeth_compute_cq_notification(sflags, 0));
5937	}
5938
5939	qeth_clear_output_buffer(queue, buffer, error, budget);
5940}
5941
5942static int qeth_tx_poll(struct napi_struct *napi, int budget)
5943{
5944	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5945	unsigned int queue_no = queue->queue_no;
5946	struct qeth_card *card = queue->card;
5947	struct net_device *dev = card->dev;
5948	unsigned int work_done = 0;
5949	struct netdev_queue *txq;
5950
5951	if (IS_IQD(card))
5952		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5953	else
5954		txq = netdev_get_tx_queue(dev, queue_no);
5955
5956	while (1) {
5957		unsigned int start, error, i;
5958		unsigned int packets = 0;
5959		unsigned int bytes = 0;
5960		int completed;
5961
5962		qeth_tx_complete_pending_bufs(card, queue, false, budget);
5963
5964		if (qeth_out_queue_is_empty(queue)) {
5965			napi_complete(napi);
5966			return 0;
5967		}
5968
5969		/* Give the CPU a breather: */
5970		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5971			QETH_TXQ_STAT_INC(queue, completion_yield);
5972			if (napi_complete_done(napi, 0))
5973				napi_schedule(napi);
5974			return 0;
5975		}
5976
5977		completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no,
5978						      &start, &error);
5979		if (completed <= 0) {
5980			/* Ensure we see TX completion for pending work: */
5981			if (napi_complete_done(napi, 0) &&
5982			    !atomic_read(&queue->set_pci_flags_count))
5983				qeth_tx_arm_timer(queue, queue->rescan_usecs);
5984			return 0;
5985		}
5986
5987		for (i = start; i < start + completed; i++) {
5988			struct qeth_qdio_out_buffer *buffer;
5989			unsigned int bidx = QDIO_BUFNR(i);
5990
5991			buffer = queue->bufs[bidx];
5992			packets += buffer->frames;
5993			bytes += buffer->bytes;
5994
5995			qeth_handle_send_error(card, buffer, error);
5996			if (IS_IQD(card))
5997				qeth_iqd_tx_complete(queue, bidx, error, budget);
5998			else
5999				qeth_clear_output_buffer(queue, buffer, error,
6000							 budget);
6001		}
6002
6003		atomic_sub(completed, &queue->used_buffers);
6004		work_done += completed;
6005		if (IS_IQD(card))
6006			netdev_tx_completed_queue(txq, packets, bytes);
6007		else
6008			qeth_check_outbound_queue(queue);
6009
6010		/* xmit may have observed the full-condition, but not yet
6011		 * stopped the txq. In which case the code below won't trigger.
6012		 * So before returning, xmit will re-check the txq's fill level
6013		 * and wake it up if needed.
6014		 */
6015		if (netif_tx_queue_stopped(txq) &&
6016		    !qeth_out_queue_is_full(queue))
6017			netif_tx_wake_queue(txq);
6018	}
6019}
6020
6021static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
6022{
6023	if (!cmd->hdr.return_code)
6024		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6025	return cmd->hdr.return_code;
6026}
6027
6028static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
6029					struct qeth_reply *reply,
6030					unsigned long data)
6031{
6032	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6033	struct qeth_ipa_caps *caps = reply->param;
6034
6035	if (qeth_setassparms_inspect_rc(cmd))
6036		return -EIO;
6037
6038	caps->supported = cmd->data.setassparms.data.caps.supported;
6039	caps->enabled = cmd->data.setassparms.data.caps.enabled;
6040	return 0;
6041}
6042
6043int qeth_setassparms_cb(struct qeth_card *card,
6044			struct qeth_reply *reply, unsigned long data)
6045{
6046	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6047
6048	QETH_CARD_TEXT(card, 4, "defadpcb");
6049
6050	if (cmd->hdr.return_code)
6051		return -EIO;
6052
6053	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6054	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6055		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6056	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6057		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6058	return 0;
6059}
6060EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6061
6062struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
6063						 enum qeth_ipa_funcs ipa_func,
6064						 u16 cmd_code,
6065						 unsigned int data_length,
6066						 enum qeth_prot_versions prot)
6067{
6068	struct qeth_ipacmd_setassparms *setassparms;
6069	struct qeth_ipacmd_setassparms_hdr *hdr;
6070	struct qeth_cmd_buffer *iob;
6071
6072	QETH_CARD_TEXT(card, 4, "getasscm");
6073	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6074				 data_length +
6075				 offsetof(struct qeth_ipacmd_setassparms,
6076					  data));
6077	if (!iob)
6078		return NULL;
6079
6080	setassparms = &__ipa_cmd(iob)->data.setassparms;
6081	setassparms->assist_no = ipa_func;
6082
6083	hdr = &setassparms->hdr;
6084	hdr->length = sizeof(*hdr) + data_length;
6085	hdr->command_code = cmd_code;
6086	return iob;
6087}
6088EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6089
6090int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6091				      enum qeth_ipa_funcs ipa_func,
6092				      u16 cmd_code, u32 *data,
6093				      enum qeth_prot_versions prot)
6094{
6095	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6096	struct qeth_cmd_buffer *iob;
6097
6098	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6099	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6100	if (!iob)
6101		return -ENOMEM;
6102
6103	if (data)
6104		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6105	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6106}
6107EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6108
6109static void qeth_unregister_dbf_views(void)
6110{
6111	int x;
6112
6113	for (x = 0; x < QETH_DBF_INFOS; x++) {
6114		debug_unregister(qeth_dbf[x].id);
6115		qeth_dbf[x].id = NULL;
6116	}
6117}
6118
6119void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6120{
6121	char dbf_txt_buf[32];
6122	va_list args;
6123
6124	if (!debug_level_enabled(id, level))
6125		return;
6126	va_start(args, fmt);
6127	vscnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6128	va_end(args);
6129	debug_text_event(id, level, dbf_txt_buf);
6130}
6131EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6132
6133static int qeth_register_dbf_views(void)
6134{
6135	int ret;
6136	int x;
6137
6138	for (x = 0; x < QETH_DBF_INFOS; x++) {
6139		/* register the areas */
6140		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6141						qeth_dbf[x].pages,
6142						qeth_dbf[x].areas,
6143						qeth_dbf[x].len);
6144		if (qeth_dbf[x].id == NULL) {
6145			qeth_unregister_dbf_views();
6146			return -ENOMEM;
6147		}
6148
6149		/* register a view */
6150		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6151		if (ret) {
6152			qeth_unregister_dbf_views();
6153			return ret;
6154		}
6155
6156		/* set a passing level */
6157		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6158	}
6159
6160	return 0;
6161}
6162
6163static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */
6164
6165int qeth_setup_discipline(struct qeth_card *card,
6166			  enum qeth_discipline_id discipline)
6167{
6168	int rc;
6169
6170	mutex_lock(&qeth_mod_mutex);
6171	switch (discipline) {
6172	case QETH_DISCIPLINE_LAYER3:
6173		card->discipline = try_then_request_module(
6174			symbol_get(qeth_l3_discipline), "qeth_l3");
6175		break;
6176	case QETH_DISCIPLINE_LAYER2:
6177		card->discipline = try_then_request_module(
6178			symbol_get(qeth_l2_discipline), "qeth_l2");
6179		break;
6180	default:
6181		break;
6182	}
6183	mutex_unlock(&qeth_mod_mutex);
6184
6185	if (!card->discipline) {
6186		dev_err(&card->gdev->dev, "There is no kernel module to "
6187			"support discipline %d\n", discipline);
6188		return -EINVAL;
6189	}
6190
6191	rc = card->discipline->setup(card->gdev);
6192	if (rc) {
6193		if (discipline == QETH_DISCIPLINE_LAYER2)
6194			symbol_put(qeth_l2_discipline);
6195		else
6196			symbol_put(qeth_l3_discipline);
6197		card->discipline = NULL;
6198
6199		return rc;
6200	}
6201
6202	card->options.layer = discipline;
6203	return 0;
6204}
6205
6206void qeth_remove_discipline(struct qeth_card *card)
6207{
6208	card->discipline->remove(card->gdev);
6209
6210	if (IS_LAYER2(card))
6211		symbol_put(qeth_l2_discipline);
6212	else
6213		symbol_put(qeth_l3_discipline);
6214	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6215	card->discipline = NULL;
6216}
6217
6218static const struct device_type qeth_generic_devtype = {
6219	.name = "qeth_generic",
6220};
6221
6222#define DBF_NAME_LEN	20
6223
6224struct qeth_dbf_entry {
6225	char dbf_name[DBF_NAME_LEN];
6226	debug_info_t *dbf_info;
6227	struct list_head dbf_list;
6228};
6229
6230static LIST_HEAD(qeth_dbf_list);
6231static DEFINE_MUTEX(qeth_dbf_list_mutex);
6232
6233static debug_info_t *qeth_get_dbf_entry(char *name)
6234{
6235	struct qeth_dbf_entry *entry;
6236	debug_info_t *rc = NULL;
6237
6238	mutex_lock(&qeth_dbf_list_mutex);
6239	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6240		if (strcmp(entry->dbf_name, name) == 0) {
6241			rc = entry->dbf_info;
6242			break;
6243		}
6244	}
6245	mutex_unlock(&qeth_dbf_list_mutex);
6246	return rc;
6247}
6248
6249static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6250{
6251	struct qeth_dbf_entry *new_entry;
6252
6253	card->debug = debug_register(name, 2, 1, 8);
6254	if (!card->debug) {
6255		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6256		goto err;
6257	}
6258	if (debug_register_view(card->debug, &debug_hex_ascii_view))
6259		goto err_dbg;
6260	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6261	if (!new_entry)
6262		goto err_dbg;
6263	strscpy(new_entry->dbf_name, name, sizeof(new_entry->dbf_name));
6264	new_entry->dbf_info = card->debug;
6265	mutex_lock(&qeth_dbf_list_mutex);
6266	list_add(&new_entry->dbf_list, &qeth_dbf_list);
6267	mutex_unlock(&qeth_dbf_list_mutex);
6268
6269	return 0;
6270
6271err_dbg:
6272	debug_unregister(card->debug);
6273err:
6274	return -ENOMEM;
6275}
6276
6277static void qeth_clear_dbf_list(void)
6278{
6279	struct qeth_dbf_entry *entry, *tmp;
6280
6281	mutex_lock(&qeth_dbf_list_mutex);
6282	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6283		list_del(&entry->dbf_list);
6284		debug_unregister(entry->dbf_info);
6285		kfree(entry);
6286	}
6287	mutex_unlock(&qeth_dbf_list_mutex);
6288}
6289
6290static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6291{
6292	struct net_device *dev;
6293	struct qeth_priv *priv;
6294
6295	switch (card->info.type) {
6296	case QETH_CARD_TYPE_IQD:
6297		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6298				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6299		break;
6300	case QETH_CARD_TYPE_OSM:
6301		dev = alloc_etherdev(sizeof(*priv));
6302		break;
6303	default:
6304		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6305	}
6306
6307	if (!dev)
6308		return NULL;
6309
6310	priv = netdev_priv(dev);
6311	priv->rx_copybreak = QETH_RX_COPYBREAK;
6312	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6313
6314	dev->ml_priv = card;
6315	dev->watchdog_timeo = QETH_TX_TIMEOUT;
6316	dev->min_mtu = 576;
6317	 /* initialized when device first goes online: */
6318	dev->max_mtu = 0;
6319	dev->mtu = 0;
6320	SET_NETDEV_DEV(dev, &card->gdev->dev);
6321	netif_carrier_off(dev);
6322
6323	dev->ethtool_ops = &qeth_ethtool_ops;
6324	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6325	dev->hw_features |= NETIF_F_SG;
6326	dev->vlan_features |= NETIF_F_SG;
6327	if (IS_IQD(card))
6328		dev->features |= NETIF_F_SG;
6329
6330	return dev;
6331}
6332
6333struct net_device *qeth_clone_netdev(struct net_device *orig)
6334{
6335	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6336
6337	if (!clone)
6338		return NULL;
6339
6340	clone->dev_port = orig->dev_port;
6341	return clone;
6342}
6343
6344static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6345{
6346	struct qeth_card *card;
6347	struct device *dev;
6348	int rc;
6349	enum qeth_discipline_id enforced_disc;
6350	char dbf_name[DBF_NAME_LEN];
6351
6352	QETH_DBF_TEXT(SETUP, 2, "probedev");
6353
6354	dev = &gdev->dev;
6355	if (!get_device(dev))
6356		return -ENODEV;
6357
6358	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6359
6360	card = qeth_alloc_card(gdev);
6361	if (!card) {
6362		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6363		rc = -ENOMEM;
6364		goto err_dev;
6365	}
6366
6367	scnprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6368		  dev_name(&gdev->dev));
6369	card->debug = qeth_get_dbf_entry(dbf_name);
6370	if (!card->debug) {
6371		rc = qeth_add_dbf_entry(card, dbf_name);
6372		if (rc)
6373			goto err_card;
6374	}
6375
6376	qeth_setup_card(card);
6377	card->dev = qeth_alloc_netdev(card);
6378	if (!card->dev) {
6379		rc = -ENOMEM;
6380		goto err_card;
6381	}
6382
6383	qeth_determine_capabilities(card);
6384	qeth_set_blkt_defaults(card);
6385
6386	card->qdio.in_q = qeth_alloc_qdio_queue();
6387	if (!card->qdio.in_q) {
6388		rc = -ENOMEM;
6389		goto err_rx_queue;
6390	}
6391
6392	card->qdio.no_out_queues = card->dev->num_tx_queues;
6393	rc = qeth_update_from_chp_desc(card);
6394	if (rc)
6395		goto err_chp_desc;
6396
6397	gdev->dev.groups = qeth_dev_groups;
6398
6399	enforced_disc = qeth_enforce_discipline(card);
6400	switch (enforced_disc) {
6401	case QETH_DISCIPLINE_UNDETERMINED:
6402		gdev->dev.type = &qeth_generic_devtype;
6403		break;
6404	default:
6405		card->info.layer_enforced = true;
6406		/* It's so early that we don't need the discipline_mutex yet. */
6407		rc = qeth_setup_discipline(card, enforced_disc);
6408		if (rc)
6409			goto err_setup_disc;
6410
6411		break;
6412	}
6413
6414	return 0;
6415
6416err_setup_disc:
6417err_chp_desc:
6418	qeth_free_qdio_queue(card->qdio.in_q);
6419err_rx_queue:
6420	free_netdev(card->dev);
6421err_card:
6422	qeth_core_free_card(card);
6423err_dev:
6424	put_device(dev);
6425	return rc;
6426}
6427
6428static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6429{
6430	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6431
6432	QETH_CARD_TEXT(card, 2, "removedv");
6433
6434	mutex_lock(&card->discipline_mutex);
6435	if (card->discipline)
6436		qeth_remove_discipline(card);
6437	mutex_unlock(&card->discipline_mutex);
6438
6439	qeth_free_qdio_queues(card);
6440
6441	qeth_free_qdio_queue(card->qdio.in_q);
6442	free_netdev(card->dev);
6443	qeth_core_free_card(card);
6444	put_device(&gdev->dev);
6445}
6446
6447static int qeth_core_set_online(struct ccwgroup_device *gdev)
6448{
6449	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6450	int rc = 0;
6451	enum qeth_discipline_id def_discipline;
6452
6453	mutex_lock(&card->discipline_mutex);
6454	if (!card->discipline) {
6455		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6456						QETH_DISCIPLINE_LAYER2;
6457		rc = qeth_setup_discipline(card, def_discipline);
6458		if (rc)
6459			goto err;
6460	}
6461
6462	rc = qeth_set_online(card, card->discipline);
6463
6464err:
6465	mutex_unlock(&card->discipline_mutex);
6466	return rc;
6467}
6468
6469static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6470{
6471	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6472	int rc;
6473
6474	mutex_lock(&card->discipline_mutex);
6475	rc = qeth_set_offline(card, card->discipline, false);
6476	mutex_unlock(&card->discipline_mutex);
6477
6478	return rc;
6479}
6480
6481static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6482{
6483	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6484
6485	qeth_set_allowed_threads(card, 0, 1);
6486	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6487		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6488	qeth_qdio_clear_card(card, 0);
6489	qeth_drain_output_queues(card);
6490	qdio_free(CARD_DDEV(card));
6491}
6492
6493static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6494			   size_t count)
6495{
6496	int err;
6497
6498	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6499				  buf);
6500
6501	return err ? err : count;
6502}
6503static DRIVER_ATTR_WO(group);
6504
6505static struct attribute *qeth_drv_attrs[] = {
6506	&driver_attr_group.attr,
6507	NULL,
6508};
6509static struct attribute_group qeth_drv_attr_group = {
6510	.attrs = qeth_drv_attrs,
6511};
6512static const struct attribute_group *qeth_drv_attr_groups[] = {
6513	&qeth_drv_attr_group,
6514	NULL,
6515};
6516
6517static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6518	.driver = {
6519		.groups = qeth_drv_attr_groups,
6520		.owner = THIS_MODULE,
6521		.name = "qeth",
6522	},
6523	.ccw_driver = &qeth_ccw_driver,
6524	.setup = qeth_core_probe_device,
6525	.remove = qeth_core_remove_device,
6526	.set_online = qeth_core_set_online,
6527	.set_offline = qeth_core_set_offline,
6528	.shutdown = qeth_core_shutdown,
6529};
6530
6531int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
6532{
6533	struct qeth_card *card = dev->ml_priv;
6534	int rc = 0;
6535
6536	switch (cmd) {
6537	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6538		rc = qeth_snmp_command(card, data);
6539		break;
6540	case SIOC_QETH_GET_CARD_TYPE:
6541		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6542		    !IS_VM_NIC(card))
6543			return 1;
6544		return 0;
6545	case SIOC_QETH_QUERY_OAT:
6546		rc = qeth_query_oat_command(card, data);
6547		break;
6548	default:
6549		rc = -EOPNOTSUPP;
6550	}
6551	if (rc)
6552		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6553	return rc;
6554}
6555EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
6556
6557int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6558{
6559	struct qeth_card *card = dev->ml_priv;
6560	struct mii_ioctl_data *mii_data;
6561	int rc = 0;
6562
6563	switch (cmd) {
6564	case SIOCGMIIPHY:
6565		mii_data = if_mii(rq);
6566		mii_data->phy_id = 0;
6567		break;
6568	case SIOCGMIIREG:
6569		mii_data = if_mii(rq);
6570		if (mii_data->phy_id != 0)
6571			rc = -EINVAL;
6572		else
6573			mii_data->val_out = qeth_mdio_read(dev,
6574				mii_data->phy_id, mii_data->reg_num);
6575		break;
6576	default:
6577		return -EOPNOTSUPP;
6578	}
6579	if (rc)
6580		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6581	return rc;
6582}
6583EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6584
6585static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6586			      unsigned long data)
6587{
6588	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6589	u32 *features = reply->param;
6590
6591	if (qeth_setassparms_inspect_rc(cmd))
6592		return -EIO;
6593
6594	*features = cmd->data.setassparms.data.flags_32bit;
6595	return 0;
6596}
6597
6598static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6599			     enum qeth_prot_versions prot)
6600{
6601	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6602						 NULL, prot);
6603}
6604
6605static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6606			    enum qeth_prot_versions prot, u8 *lp2lp)
6607{
6608	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6609	struct qeth_cmd_buffer *iob;
6610	struct qeth_ipa_caps caps;
6611	u32 features;
6612	int rc;
6613
6614	/* some L3 HW requires combined L3+L4 csum offload: */
6615	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6616	    cstype == IPA_OUTBOUND_CHECKSUM)
6617		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6618
6619	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6620				       prot);
6621	if (!iob)
6622		return -ENOMEM;
6623
6624	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6625	if (rc)
6626		return rc;
6627
6628	if ((required_features & features) != required_features) {
6629		qeth_set_csum_off(card, cstype, prot);
6630		return -EOPNOTSUPP;
6631	}
6632
6633	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6634				       SETASS_DATA_SIZEOF(flags_32bit),
6635				       prot);
6636	if (!iob) {
6637		qeth_set_csum_off(card, cstype, prot);
6638		return -ENOMEM;
6639	}
6640
6641	if (features & QETH_IPA_CHECKSUM_LP2LP)
6642		required_features |= QETH_IPA_CHECKSUM_LP2LP;
6643	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6644	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6645	if (rc) {
6646		qeth_set_csum_off(card, cstype, prot);
6647		return rc;
6648	}
6649
6650	if (!qeth_ipa_caps_supported(&caps, required_features) ||
6651	    !qeth_ipa_caps_enabled(&caps, required_features)) {
6652		qeth_set_csum_off(card, cstype, prot);
6653		return -EOPNOTSUPP;
6654	}
6655
6656	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6657		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6658
6659	if (lp2lp)
6660		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6661
6662	return 0;
6663}
6664
6665static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6666			     enum qeth_prot_versions prot, u8 *lp2lp)
6667{
6668	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6669		    qeth_set_csum_off(card, cstype, prot);
6670}
6671
6672static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6673			     unsigned long data)
6674{
6675	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6676	struct qeth_tso_start_data *tso_data = reply->param;
6677
6678	if (qeth_setassparms_inspect_rc(cmd))
6679		return -EIO;
6680
6681	tso_data->mss = cmd->data.setassparms.data.tso.mss;
6682	tso_data->supported = cmd->data.setassparms.data.tso.supported;
6683	return 0;
6684}
6685
6686static int qeth_set_tso_off(struct qeth_card *card,
6687			    enum qeth_prot_versions prot)
6688{
6689	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6690						 IPA_CMD_ASS_STOP, NULL, prot);
6691}
6692
6693static int qeth_set_tso_on(struct qeth_card *card,
6694			   enum qeth_prot_versions prot)
6695{
6696	struct qeth_tso_start_data tso_data;
6697	struct qeth_cmd_buffer *iob;
6698	struct qeth_ipa_caps caps;
6699	int rc;
6700
6701	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6702				       IPA_CMD_ASS_START, 0, prot);
6703	if (!iob)
6704		return -ENOMEM;
6705
6706	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6707	if (rc)
6708		return rc;
6709
6710	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6711		qeth_set_tso_off(card, prot);
6712		return -EOPNOTSUPP;
6713	}
6714
6715	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6716				       IPA_CMD_ASS_ENABLE,
6717				       SETASS_DATA_SIZEOF(caps), prot);
6718	if (!iob) {
6719		qeth_set_tso_off(card, prot);
6720		return -ENOMEM;
6721	}
6722
6723	/* enable TSO capability */
6724	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6725		QETH_IPA_LARGE_SEND_TCP;
6726	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6727	if (rc) {
6728		qeth_set_tso_off(card, prot);
6729		return rc;
6730	}
6731
6732	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6733	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6734		qeth_set_tso_off(card, prot);
6735		return -EOPNOTSUPP;
6736	}
6737
6738	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6739		 tso_data.mss);
6740	return 0;
6741}
6742
6743static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6744			    enum qeth_prot_versions prot)
6745{
6746	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6747}
6748
6749static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6750{
6751	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6752	int rc_ipv6;
6753
6754	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6755		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6756					    QETH_PROT_IPV4, NULL);
6757	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6758		/* no/one Offload Assist available, so the rc is trivial */
6759		return rc_ipv4;
6760
6761	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6762				    QETH_PROT_IPV6, NULL);
6763
6764	if (on)
6765		/* enable: success if any Assist is active */
6766		return (rc_ipv6) ? rc_ipv4 : 0;
6767
6768	/* disable: failure if any Assist is still active */
6769	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6770}
6771
6772/**
6773 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6774 * @dev:	a net_device
6775 */
6776void qeth_enable_hw_features(struct net_device *dev)
6777{
6778	struct qeth_card *card = dev->ml_priv;
6779	netdev_features_t features;
6780
6781	features = dev->features;
6782	/* force-off any feature that might need an IPA sequence.
6783	 * netdev_update_features() will restart them.
6784	 */
6785	dev->features &= ~dev->hw_features;
6786	/* toggle VLAN filter, so that VIDs are re-programmed: */
6787	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6788		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6789		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6790	}
6791	netdev_update_features(dev);
6792	if (features != dev->features)
6793		dev_warn(&card->gdev->dev,
6794			 "Device recovery failed to restore all offload features\n");
6795}
6796EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6797
6798static void qeth_check_restricted_features(struct qeth_card *card,
6799					   netdev_features_t changed,
6800					   netdev_features_t actual)
6801{
6802	netdev_features_t ipv6_features = NETIF_F_TSO6;
6803	netdev_features_t ipv4_features = NETIF_F_TSO;
6804
6805	if (!card->info.has_lp2lp_cso_v6)
6806		ipv6_features |= NETIF_F_IPV6_CSUM;
6807	if (!card->info.has_lp2lp_cso_v4)
6808		ipv4_features |= NETIF_F_IP_CSUM;
6809
6810	if ((changed & ipv6_features) && !(actual & ipv6_features))
6811		qeth_flush_local_addrs6(card);
6812	if ((changed & ipv4_features) && !(actual & ipv4_features))
6813		qeth_flush_local_addrs4(card);
6814}
6815
6816int qeth_set_features(struct net_device *dev, netdev_features_t features)
6817{
6818	struct qeth_card *card = dev->ml_priv;
6819	netdev_features_t changed = dev->features ^ features;
6820	int rc = 0;
6821
6822	QETH_CARD_TEXT(card, 2, "setfeat");
6823	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6824
6825	if ((changed & NETIF_F_IP_CSUM)) {
6826		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6827				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6828				       &card->info.has_lp2lp_cso_v4);
6829		if (rc)
6830			changed ^= NETIF_F_IP_CSUM;
6831	}
6832	if (changed & NETIF_F_IPV6_CSUM) {
6833		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6834				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6835				       &card->info.has_lp2lp_cso_v6);
6836		if (rc)
6837			changed ^= NETIF_F_IPV6_CSUM;
6838	}
6839	if (changed & NETIF_F_RXCSUM) {
6840		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6841		if (rc)
6842			changed ^= NETIF_F_RXCSUM;
6843	}
6844	if (changed & NETIF_F_TSO) {
6845		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6846				      QETH_PROT_IPV4);
6847		if (rc)
6848			changed ^= NETIF_F_TSO;
6849	}
6850	if (changed & NETIF_F_TSO6) {
6851		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6852				      QETH_PROT_IPV6);
6853		if (rc)
6854			changed ^= NETIF_F_TSO6;
6855	}
6856
6857	qeth_check_restricted_features(card, dev->features ^ features,
6858				       dev->features ^ changed);
6859
6860	/* everything changed successfully? */
6861	if ((dev->features ^ features) == changed)
6862		return 0;
6863	/* something went wrong. save changed features and return error */
6864	dev->features ^= changed;
6865	return -EIO;
6866}
6867EXPORT_SYMBOL_GPL(qeth_set_features);
6868
6869netdev_features_t qeth_fix_features(struct net_device *dev,
6870				    netdev_features_t features)
6871{
6872	struct qeth_card *card = dev->ml_priv;
6873
6874	QETH_CARD_TEXT(card, 2, "fixfeat");
6875	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6876		features &= ~NETIF_F_IP_CSUM;
6877	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6878		features &= ~NETIF_F_IPV6_CSUM;
6879	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6880	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6881		features &= ~NETIF_F_RXCSUM;
6882	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6883		features &= ~NETIF_F_TSO;
6884	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6885		features &= ~NETIF_F_TSO6;
6886
6887	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6888	return features;
6889}
6890EXPORT_SYMBOL_GPL(qeth_fix_features);
6891
6892netdev_features_t qeth_features_check(struct sk_buff *skb,
6893				      struct net_device *dev,
6894				      netdev_features_t features)
6895{
6896	struct qeth_card *card = dev->ml_priv;
6897
6898	/* Traffic with local next-hop is not eligible for some offloads: */
6899	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6900	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6901		netdev_features_t restricted = 0;
6902
6903		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6904			restricted |= NETIF_F_ALL_TSO;
6905
6906		switch (vlan_get_protocol(skb)) {
6907		case htons(ETH_P_IP):
6908			if (!card->info.has_lp2lp_cso_v4)
6909				restricted |= NETIF_F_IP_CSUM;
6910
6911			if (restricted && qeth_next_hop_is_local_v4(card, skb))
6912				features &= ~restricted;
6913			break;
6914		case htons(ETH_P_IPV6):
6915			if (!card->info.has_lp2lp_cso_v6)
6916				restricted |= NETIF_F_IPV6_CSUM;
6917
6918			if (restricted && qeth_next_hop_is_local_v6(card, skb))
6919				features &= ~restricted;
6920			break;
6921		default:
6922			break;
6923		}
6924	}
6925
6926	/* GSO segmentation builds skbs with
6927	 *	a (small) linear part for the headers, and
6928	 *	page frags for the data.
6929	 * Compared to a linear skb, the header-only part consumes an
6930	 * additional buffer element. This reduces buffer utilization, and
6931	 * hurts throughput. So compress small segments into one element.
6932	 */
6933	if (netif_needs_gso(skb, features)) {
6934		/* match skb_segment(): */
6935		unsigned int doffset = skb->data - skb_mac_header(skb);
6936		unsigned int hsize = skb_shinfo(skb)->gso_size;
6937		unsigned int hroom = skb_headroom(skb);
6938
6939		/* linearize only if resulting skb allocations are order-0: */
6940		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6941			features &= ~NETIF_F_SG;
6942	}
6943
6944	return vlan_features_check(skb, features);
6945}
6946EXPORT_SYMBOL_GPL(qeth_features_check);
6947
6948void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6949{
6950	struct qeth_card *card = dev->ml_priv;
6951	struct qeth_qdio_out_q *queue;
6952	unsigned int i;
6953
6954	QETH_CARD_TEXT(card, 5, "getstat");
6955
6956	stats->rx_packets = card->stats.rx_packets;
6957	stats->rx_bytes = card->stats.rx_bytes;
6958	stats->rx_errors = card->stats.rx_length_errors +
6959			   card->stats.rx_frame_errors +
6960			   card->stats.rx_fifo_errors;
6961	stats->rx_dropped = card->stats.rx_dropped_nomem +
6962			    card->stats.rx_dropped_notsupp +
6963			    card->stats.rx_dropped_runt;
6964	stats->multicast = card->stats.rx_multicast;
6965	stats->rx_length_errors = card->stats.rx_length_errors;
6966	stats->rx_frame_errors = card->stats.rx_frame_errors;
6967	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6968
6969	for (i = 0; i < card->qdio.no_out_queues; i++) {
6970		queue = card->qdio.out_qs[i];
6971
6972		stats->tx_packets += queue->stats.tx_packets;
6973		stats->tx_bytes += queue->stats.tx_bytes;
6974		stats->tx_errors += queue->stats.tx_errors;
6975		stats->tx_dropped += queue->stats.tx_dropped;
6976	}
6977}
6978EXPORT_SYMBOL_GPL(qeth_get_stats64);
6979
6980#define TC_IQD_UCAST   0
6981static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6982				     unsigned int ucast_txqs)
6983{
6984	unsigned int prio;
6985
6986	/* IQD requires mcast traffic to be placed on a dedicated queue, and
6987	 * qeth_iqd_select_queue() deals with this.
6988	 * For unicast traffic, we defer the queue selection to the stack.
6989	 * By installing a trivial prio map that spans over only the unicast
6990	 * queues, we can encourage the stack to spread the ucast traffic evenly
6991	 * without selecting the mcast queue.
6992	 */
6993
6994	/* One traffic class, spanning over all active ucast queues: */
6995	netdev_set_num_tc(dev, 1);
6996	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6997			    QETH_IQD_MIN_UCAST_TXQ);
6998
6999	/* Map all priorities to this traffic class: */
7000	for (prio = 0; prio <= TC_BITMASK; prio++)
7001		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
7002}
7003
7004int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
7005{
7006	struct net_device *dev = card->dev;
7007	int rc;
7008
7009	/* Per netif_setup_tc(), adjust the mapping first: */
7010	if (IS_IQD(card))
7011		qeth_iqd_set_prio_tc_map(dev, count - 1);
7012
7013	rc = netif_set_real_num_tx_queues(dev, count);
7014
7015	if (rc && IS_IQD(card))
7016		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
7017
7018	return rc;
7019}
7020EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
7021
7022u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
7023			  u8 cast_type, struct net_device *sb_dev)
7024{
7025	u16 txq;
7026
7027	if (cast_type != RTN_UNICAST)
7028		return QETH_IQD_MCAST_TXQ;
7029	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
7030		return QETH_IQD_MIN_UCAST_TXQ;
7031
7032	txq = netdev_pick_tx(dev, skb, sb_dev);
7033	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7034}
7035EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
7036
7037u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
7038			  struct net_device *sb_dev)
7039{
7040	struct qeth_card *card = dev->ml_priv;
7041
7042	if (qeth_uses_tx_prio_queueing(card))
7043		return qeth_get_priority_queue(card, skb);
7044
7045	return netdev_pick_tx(dev, skb, sb_dev);
7046}
7047EXPORT_SYMBOL_GPL(qeth_osa_select_queue);
7048
7049int qeth_open(struct net_device *dev)
7050{
7051	struct qeth_card *card = dev->ml_priv;
7052	struct qeth_qdio_out_q *queue;
7053	unsigned int i;
7054
7055	QETH_CARD_TEXT(card, 4, "qethopen");
7056
7057	card->data.state = CH_STATE_UP;
7058	netif_tx_start_all_queues(dev);
7059
7060	local_bh_disable();
7061	qeth_for_each_output_queue(card, queue, i) {
7062		netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll);
7063		napi_enable(&queue->napi);
7064		napi_schedule(&queue->napi);
7065	}
7066
7067	napi_enable(&card->napi);
7068	napi_schedule(&card->napi);
7069	/* kick-start the NAPI softirq: */
7070	local_bh_enable();
7071
7072	return 0;
7073}
7074EXPORT_SYMBOL_GPL(qeth_open);
7075
7076int qeth_stop(struct net_device *dev)
7077{
7078	struct qeth_card *card = dev->ml_priv;
7079	struct qeth_qdio_out_q *queue;
7080	unsigned int i;
7081
7082	QETH_CARD_TEXT(card, 4, "qethstop");
7083
7084	napi_disable(&card->napi);
7085	cancel_delayed_work_sync(&card->buffer_reclaim_work);
7086	qdio_stop_irq(CARD_DDEV(card));
7087
7088	/* Quiesce the NAPI instances: */
7089	qeth_for_each_output_queue(card, queue, i)
7090		napi_disable(&queue->napi);
7091
7092	/* Stop .ndo_start_xmit, might still access queue->napi. */
7093	netif_tx_disable(dev);
7094
7095	qeth_for_each_output_queue(card, queue, i) {
7096		del_timer_sync(&queue->timer);
7097		/* Queues may get re-allocated, so remove the NAPIs. */
7098		netif_napi_del(&queue->napi);
7099	}
7100
7101	return 0;
7102}
7103EXPORT_SYMBOL_GPL(qeth_stop);
7104
7105static int __init qeth_core_init(void)
7106{
7107	int rc;
7108
7109	pr_info("loading core functions\n");
7110
7111	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7112
7113	rc = qeth_register_dbf_views();
7114	if (rc)
7115		goto dbf_err;
7116	qeth_core_root_dev = root_device_register("qeth");
7117	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7118	if (rc)
7119		goto register_err;
7120	qeth_core_header_cache =
7121		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7122				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7123				  0, NULL);
7124	if (!qeth_core_header_cache) {
7125		rc = -ENOMEM;
7126		goto slab_err;
7127	}
7128	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7129			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7130	if (!qeth_qdio_outbuf_cache) {
7131		rc = -ENOMEM;
7132		goto cqslab_err;
7133	}
7134
7135	qeth_qaob_cache = kmem_cache_create("qeth_qaob",
7136					    sizeof(struct qaob),
7137					    sizeof(struct qaob),
7138					    0, NULL);
7139	if (!qeth_qaob_cache) {
7140		rc = -ENOMEM;
7141		goto qaob_err;
7142	}
7143
7144	rc = ccw_driver_register(&qeth_ccw_driver);
7145	if (rc)
7146		goto ccw_err;
7147	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7148	if (rc)
7149		goto ccwgroup_err;
7150
7151	return 0;
7152
7153ccwgroup_err:
7154	ccw_driver_unregister(&qeth_ccw_driver);
7155ccw_err:
7156	kmem_cache_destroy(qeth_qaob_cache);
7157qaob_err:
7158	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7159cqslab_err:
7160	kmem_cache_destroy(qeth_core_header_cache);
7161slab_err:
7162	root_device_unregister(qeth_core_root_dev);
7163register_err:
7164	qeth_unregister_dbf_views();
7165dbf_err:
7166	debugfs_remove_recursive(qeth_debugfs_root);
7167	pr_err("Initializing the qeth device driver failed\n");
7168	return rc;
7169}
7170
7171static void __exit qeth_core_exit(void)
7172{
7173	qeth_clear_dbf_list();
7174	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7175	ccw_driver_unregister(&qeth_ccw_driver);
7176	kmem_cache_destroy(qeth_qaob_cache);
7177	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7178	kmem_cache_destroy(qeth_core_header_cache);
7179	root_device_unregister(qeth_core_root_dev);
7180	qeth_unregister_dbf_views();
7181	debugfs_remove_recursive(qeth_debugfs_root);
7182	pr_info("core functions removed\n");
7183}
7184
7185module_init(qeth_core_init);
7186module_exit(qeth_core_exit);
7187MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7188MODULE_DESCRIPTION("qeth core functions");
7189MODULE_LICENSE("GPL");
7190