• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/infiniband/hw/amso1100/
1/*
2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/pci.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/inetdevice.h>
39#include <linux/delay.h>
40#include <linux/ethtool.h>
41#include <linux/mii.h>
42#include <linux/if_vlan.h>
43#include <linux/crc32.h>
44#include <linux/in.h>
45#include <linux/ip.h>
46#include <linux/tcp.h>
47#include <linux/init.h>
48#include <linux/dma-mapping.h>
49
50#include <asm/io.h>
51#include <asm/irq.h>
52#include <asm/byteorder.h>
53
54#include <rdma/ib_smi.h>
55#include "c2.h"
56#include "c2_provider.h"
57
58MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
59MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
60MODULE_LICENSE("Dual BSD/GPL");
61MODULE_VERSION(DRV_VERSION);
62
63static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
64    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
65
66static int debug = -1;		/* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70static int c2_up(struct net_device *netdev);
71static int c2_down(struct net_device *netdev);
72static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
73static void c2_tx_interrupt(struct net_device *netdev);
74static void c2_rx_interrupt(struct net_device *netdev);
75static irqreturn_t c2_interrupt(int irq, void *dev_id);
76static void c2_tx_timeout(struct net_device *netdev);
77static int c2_change_mtu(struct net_device *netdev, int new_mtu);
78static void c2_reset(struct c2_port *c2_port);
79static struct net_device_stats *c2_get_stats(struct net_device *netdev);
80
81static struct pci_device_id c2_pci_table[] = {
82	{ PCI_DEVICE(0x18b8, 0xb001) },
83	{ 0 }
84};
85
86MODULE_DEVICE_TABLE(pci, c2_pci_table);
87
88static void c2_print_macaddr(struct net_device *netdev)
89{
90	pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
91		"IRQ %u\n", netdev->name,
92		netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
93		netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
94		netdev->irq);
95}
96
97static void c2_set_rxbufsize(struct c2_port *c2_port)
98{
99	struct net_device *netdev = c2_port->netdev;
100
101	if (netdev->mtu > RX_BUF_SIZE)
102		c2_port->rx_buf_size =
103		    netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
104		    NET_IP_ALIGN;
105	else
106		c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
107}
108
109/*
110 * Allocate TX ring elements and chain them together.
111 * One-to-one association of adapter descriptors with ring elements.
112 */
113static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
114			    dma_addr_t base, void __iomem * mmio_txp_ring)
115{
116	struct c2_tx_desc *tx_desc;
117	struct c2_txp_desc __iomem *txp_desc;
118	struct c2_element *elem;
119	int i;
120
121	tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);
122	if (!tx_ring->start)
123		return -ENOMEM;
124
125	elem = tx_ring->start;
126	tx_desc = vaddr;
127	txp_desc = mmio_txp_ring;
128	for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
129		tx_desc->len = 0;
130		tx_desc->status = 0;
131
132		/* Set TXP_HTXD_UNINIT */
133		__raw_writeq(cpu_to_be64(0x1122334455667788ULL),
134			     (void __iomem *) txp_desc + C2_TXP_ADDR);
135		__raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
136		__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
137			     (void __iomem *) txp_desc + C2_TXP_FLAGS);
138
139		elem->skb = NULL;
140		elem->ht_desc = tx_desc;
141		elem->hw_desc = txp_desc;
142
143		if (i == tx_ring->count - 1) {
144			elem->next = tx_ring->start;
145			tx_desc->next_offset = base;
146		} else {
147			elem->next = elem + 1;
148			tx_desc->next_offset =
149			    base + (i + 1) * sizeof(*tx_desc);
150		}
151	}
152
153	tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
154
155	return 0;
156}
157
158/*
159 * Allocate RX ring elements and chain them together.
160 * One-to-one association of adapter descriptors with ring elements.
161 */
162static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
163			    dma_addr_t base, void __iomem * mmio_rxp_ring)
164{
165	struct c2_rx_desc *rx_desc;
166	struct c2_rxp_desc __iomem *rxp_desc;
167	struct c2_element *elem;
168	int i;
169
170	rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);
171	if (!rx_ring->start)
172		return -ENOMEM;
173
174	elem = rx_ring->start;
175	rx_desc = vaddr;
176	rxp_desc = mmio_rxp_ring;
177	for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
178		rx_desc->len = 0;
179		rx_desc->status = 0;
180
181		/* Set RXP_HRXD_UNINIT */
182		__raw_writew(cpu_to_be16(RXP_HRXD_OK),
183		       (void __iomem *) rxp_desc + C2_RXP_STATUS);
184		__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
185		__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
186		__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
187			     (void __iomem *) rxp_desc + C2_RXP_ADDR);
188		__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
189			     (void __iomem *) rxp_desc + C2_RXP_FLAGS);
190
191		elem->skb = NULL;
192		elem->ht_desc = rx_desc;
193		elem->hw_desc = rxp_desc;
194
195		if (i == rx_ring->count - 1) {
196			elem->next = rx_ring->start;
197			rx_desc->next_offset = base;
198		} else {
199			elem->next = elem + 1;
200			rx_desc->next_offset =
201			    base + (i + 1) * sizeof(*rx_desc);
202		}
203	}
204
205	rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
206
207	return 0;
208}
209
210/* Setup buffer for receiving */
211static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
212{
213	struct c2_dev *c2dev = c2_port->c2dev;
214	struct c2_rx_desc *rx_desc = elem->ht_desc;
215	struct sk_buff *skb;
216	dma_addr_t mapaddr;
217	u32 maplen;
218	struct c2_rxp_hdr *rxp_hdr;
219
220	skb = dev_alloc_skb(c2_port->rx_buf_size);
221	if (unlikely(!skb)) {
222		pr_debug("%s: out of memory for receive\n",
223			c2_port->netdev->name);
224		return -ENOMEM;
225	}
226
227	/* Zero out the rxp hdr in the sk_buff */
228	memset(skb->data, 0, sizeof(*rxp_hdr));
229
230	skb->dev = c2_port->netdev;
231
232	maplen = c2_port->rx_buf_size;
233	mapaddr =
234	    pci_map_single(c2dev->pcidev, skb->data, maplen,
235			   PCI_DMA_FROMDEVICE);
236
237	/* Set the sk_buff RXP_header to RXP_HRXD_READY */
238	rxp_hdr = (struct c2_rxp_hdr *) skb->data;
239	rxp_hdr->flags = RXP_HRXD_READY;
240
241	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
242	__raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
243		     elem->hw_desc + C2_RXP_LEN);
244	__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
245	__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
246
247	elem->skb = skb;
248	elem->mapaddr = mapaddr;
249	elem->maplen = maplen;
250	rx_desc->len = maplen;
251
252	return 0;
253}
254
255/*
256 * Allocate buffers for the Rx ring
257 * For receive:  rx_ring.to_clean is next received frame
258 */
259static int c2_rx_fill(struct c2_port *c2_port)
260{
261	struct c2_ring *rx_ring = &c2_port->rx_ring;
262	struct c2_element *elem;
263	int ret = 0;
264
265	elem = rx_ring->start;
266	do {
267		if (c2_rx_alloc(c2_port, elem)) {
268			ret = 1;
269			break;
270		}
271	} while ((elem = elem->next) != rx_ring->start);
272
273	rx_ring->to_clean = rx_ring->start;
274	return ret;
275}
276
277/* Free all buffers in RX ring, assumes receiver stopped */
278static void c2_rx_clean(struct c2_port *c2_port)
279{
280	struct c2_dev *c2dev = c2_port->c2dev;
281	struct c2_ring *rx_ring = &c2_port->rx_ring;
282	struct c2_element *elem;
283	struct c2_rx_desc *rx_desc;
284
285	elem = rx_ring->start;
286	do {
287		rx_desc = elem->ht_desc;
288		rx_desc->len = 0;
289
290		__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
291		__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
292		__raw_writew(0, elem->hw_desc + C2_RXP_LEN);
293		__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
294			     elem->hw_desc + C2_RXP_ADDR);
295		__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
296			     elem->hw_desc + C2_RXP_FLAGS);
297
298		if (elem->skb) {
299			pci_unmap_single(c2dev->pcidev, elem->mapaddr,
300					 elem->maplen, PCI_DMA_FROMDEVICE);
301			dev_kfree_skb(elem->skb);
302			elem->skb = NULL;
303		}
304	} while ((elem = elem->next) != rx_ring->start);
305}
306
307static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
308{
309	struct c2_tx_desc *tx_desc = elem->ht_desc;
310
311	tx_desc->len = 0;
312
313	pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
314			 PCI_DMA_TODEVICE);
315
316	if (elem->skb) {
317		dev_kfree_skb_any(elem->skb);
318		elem->skb = NULL;
319	}
320
321	return 0;
322}
323
324/* Free all buffers in TX ring, assumes transmitter stopped */
325static void c2_tx_clean(struct c2_port *c2_port)
326{
327	struct c2_ring *tx_ring = &c2_port->tx_ring;
328	struct c2_element *elem;
329	struct c2_txp_desc txp_htxd;
330	int retry;
331	unsigned long flags;
332
333	spin_lock_irqsave(&c2_port->tx_lock, flags);
334
335	elem = tx_ring->start;
336
337	do {
338		retry = 0;
339		do {
340			txp_htxd.flags =
341			    readw(elem->hw_desc + C2_TXP_FLAGS);
342
343			if (txp_htxd.flags == TXP_HTXD_READY) {
344				retry = 1;
345				__raw_writew(0,
346					     elem->hw_desc + C2_TXP_LEN);
347				__raw_writeq(0,
348					     elem->hw_desc + C2_TXP_ADDR);
349				__raw_writew(cpu_to_be16(TXP_HTXD_DONE),
350					     elem->hw_desc + C2_TXP_FLAGS);
351				c2_port->netstats.tx_dropped++;
352				break;
353			} else {
354				__raw_writew(0,
355					     elem->hw_desc + C2_TXP_LEN);
356				__raw_writeq(cpu_to_be64(0x1122334455667788ULL),
357					     elem->hw_desc + C2_TXP_ADDR);
358				__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
359					     elem->hw_desc + C2_TXP_FLAGS);
360			}
361
362			c2_tx_free(c2_port->c2dev, elem);
363
364		} while ((elem = elem->next) != tx_ring->start);
365	} while (retry);
366
367	c2_port->tx_avail = c2_port->tx_ring.count - 1;
368	c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
369
370	if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
371		netif_wake_queue(c2_port->netdev);
372
373	spin_unlock_irqrestore(&c2_port->tx_lock, flags);
374}
375
376/*
377 * Process transmit descriptors marked 'DONE' by the firmware,
378 * freeing up their unneeded sk_buffs.
379 */
380static void c2_tx_interrupt(struct net_device *netdev)
381{
382	struct c2_port *c2_port = netdev_priv(netdev);
383	struct c2_dev *c2dev = c2_port->c2dev;
384	struct c2_ring *tx_ring = &c2_port->tx_ring;
385	struct c2_element *elem;
386	struct c2_txp_desc txp_htxd;
387
388	spin_lock(&c2_port->tx_lock);
389
390	for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
391	     elem = elem->next) {
392		txp_htxd.flags =
393		    be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS));
394
395		if (txp_htxd.flags != TXP_HTXD_DONE)
396			break;
397
398		if (netif_msg_tx_done(c2_port)) {
399			/* PCI reads are expensive in fast path */
400			txp_htxd.len =
401			    be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN));
402			pr_debug("%s: tx done slot %3Zu status 0x%x len "
403				"%5u bytes\n",
404				netdev->name, elem - tx_ring->start,
405				txp_htxd.flags, txp_htxd.len);
406		}
407
408		c2_tx_free(c2dev, elem);
409		++(c2_port->tx_avail);
410	}
411
412	tx_ring->to_clean = elem;
413
414	if (netif_queue_stopped(netdev)
415	    && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
416		netif_wake_queue(netdev);
417
418	spin_unlock(&c2_port->tx_lock);
419}
420
421static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
422{
423	struct c2_rx_desc *rx_desc = elem->ht_desc;
424	struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
425
426	if (rxp_hdr->status != RXP_HRXD_OK ||
427	    rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
428		pr_debug("BAD RXP_HRXD\n");
429		pr_debug("  rx_desc : %p\n", rx_desc);
430		pr_debug("    index : %Zu\n",
431			elem - c2_port->rx_ring.start);
432		pr_debug("    len   : %u\n", rx_desc->len);
433		pr_debug("  rxp_hdr : %p [PA %p]\n", rxp_hdr,
434			(void *) __pa((unsigned long) rxp_hdr));
435		pr_debug("    flags : 0x%x\n", rxp_hdr->flags);
436		pr_debug("    status: 0x%x\n", rxp_hdr->status);
437		pr_debug("    len   : %u\n", rxp_hdr->len);
438		pr_debug("    rsvd  : 0x%x\n", rxp_hdr->rsvd);
439	}
440
441	/* Setup the skb for reuse since we're dropping this pkt */
442	elem->skb->data = elem->skb->head;
443	skb_reset_tail_pointer(elem->skb);
444
445	/* Zero out the rxp hdr in the sk_buff */
446	memset(elem->skb->data, 0, sizeof(*rxp_hdr));
447
448	/* Write the descriptor to the adapter's rx ring */
449	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
450	__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
451	__raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
452		     elem->hw_desc + C2_RXP_LEN);
453	__raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR);
454	__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
455
456	pr_debug("packet dropped\n");
457	c2_port->netstats.rx_dropped++;
458}
459
460static void c2_rx_interrupt(struct net_device *netdev)
461{
462	struct c2_port *c2_port = netdev_priv(netdev);
463	struct c2_dev *c2dev = c2_port->c2dev;
464	struct c2_ring *rx_ring = &c2_port->rx_ring;
465	struct c2_element *elem;
466	struct c2_rx_desc *rx_desc;
467	struct c2_rxp_hdr *rxp_hdr;
468	struct sk_buff *skb;
469	dma_addr_t mapaddr;
470	u32 maplen, buflen;
471	unsigned long flags;
472
473	spin_lock_irqsave(&c2dev->lock, flags);
474
475	/* Begin where we left off */
476	rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
477
478	for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
479	     elem = elem->next) {
480		rx_desc = elem->ht_desc;
481		mapaddr = elem->mapaddr;
482		maplen = elem->maplen;
483		skb = elem->skb;
484		rxp_hdr = (struct c2_rxp_hdr *) skb->data;
485
486		if (rxp_hdr->flags != RXP_HRXD_DONE)
487			break;
488		buflen = rxp_hdr->len;
489
490		/* Sanity check the RXP header */
491		if (rxp_hdr->status != RXP_HRXD_OK ||
492		    buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
493			c2_rx_error(c2_port, elem);
494			continue;
495		}
496
497		/*
498		 * Allocate and map a new skb for replenishing the host
499		 * RX desc
500		 */
501		if (c2_rx_alloc(c2_port, elem)) {
502			c2_rx_error(c2_port, elem);
503			continue;
504		}
505
506		/* Unmap the old skb */
507		pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
508				 PCI_DMA_FROMDEVICE);
509
510		prefetch(skb->data);
511
512		/*
513		 * Skip past the leading 8 bytes comprising of the
514		 * "struct c2_rxp_hdr", prepended by the adapter
515		 * to the usual Ethernet header ("struct ethhdr"),
516		 * to the start of the raw Ethernet packet.
517		 *
518		 * Fix up the various fields in the sk_buff before
519		 * passing it up to netif_rx(). The transfer size
520		 * (in bytes) specified by the adapter len field of
521		 * the "struct rxp_hdr_t" does NOT include the
522		 * "sizeof(struct c2_rxp_hdr)".
523		 */
524		skb->data += sizeof(*rxp_hdr);
525		skb_set_tail_pointer(skb, buflen);
526		skb->len = buflen;
527		skb->protocol = eth_type_trans(skb, netdev);
528
529		netif_rx(skb);
530
531		netdev->last_rx = jiffies;
532		c2_port->netstats.rx_packets++;
533		c2_port->netstats.rx_bytes += buflen;
534	}
535
536	/* Save where we left off */
537	rx_ring->to_clean = elem;
538	c2dev->cur_rx = elem - rx_ring->start;
539	C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
540
541	spin_unlock_irqrestore(&c2dev->lock, flags);
542}
543
544/*
545 * Handle netisr0 TX & RX interrupts.
546 */
547static irqreturn_t c2_interrupt(int irq, void *dev_id)
548{
549	unsigned int netisr0, dmaisr;
550	int handled = 0;
551	struct c2_dev *c2dev = (struct c2_dev *) dev_id;
552
553	/* Process CCILNET interrupts */
554	netisr0 = readl(c2dev->regs + C2_NISR0);
555	if (netisr0) {
556
557		/*
558		 * There is an issue with the firmware that always
559		 * provides the status of RX for both TX & RX
560		 * interrupts.  So process both queues here.
561		 */
562		c2_rx_interrupt(c2dev->netdev);
563		c2_tx_interrupt(c2dev->netdev);
564
565		/* Clear the interrupt */
566		writel(netisr0, c2dev->regs + C2_NISR0);
567		handled++;
568	}
569
570	/* Process RNIC interrupts */
571	dmaisr = readl(c2dev->regs + C2_DISR);
572	if (dmaisr) {
573		writel(dmaisr, c2dev->regs + C2_DISR);
574		c2_rnic_interrupt(c2dev);
575		handled++;
576	}
577
578	if (handled) {
579		return IRQ_HANDLED;
580	} else {
581		return IRQ_NONE;
582	}
583}
584
585static int c2_up(struct net_device *netdev)
586{
587	struct c2_port *c2_port = netdev_priv(netdev);
588	struct c2_dev *c2dev = c2_port->c2dev;
589	struct c2_element *elem;
590	struct c2_rxp_hdr *rxp_hdr;
591	struct in_device *in_dev;
592	size_t rx_size, tx_size;
593	int ret, i;
594	unsigned int netimr0;
595
596	if (netif_msg_ifup(c2_port))
597		pr_debug("%s: enabling interface\n", netdev->name);
598
599	/* Set the Rx buffer size based on MTU */
600	c2_set_rxbufsize(c2_port);
601
602	/* Allocate DMA'able memory for Tx/Rx host descriptor rings */
603	rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
604	tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
605
606	c2_port->mem_size = tx_size + rx_size;
607	c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
608					    &c2_port->dma);
609	if (c2_port->mem == NULL) {
610		pr_debug("Unable to allocate memory for "
611			"host descriptor rings\n");
612		return -ENOMEM;
613	}
614
615	memset(c2_port->mem, 0, c2_port->mem_size);
616
617	/* Create the Rx host descriptor ring */
618	if ((ret =
619	     c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
620			      c2dev->mmio_rxp_ring))) {
621		pr_debug("Unable to create RX ring\n");
622		goto bail0;
623	}
624
625	/* Allocate Rx buffers for the host descriptor ring */
626	if (c2_rx_fill(c2_port)) {
627		pr_debug("Unable to fill RX ring\n");
628		goto bail1;
629	}
630
631	/* Create the Tx host descriptor ring */
632	if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
633				    c2_port->dma + rx_size,
634				    c2dev->mmio_txp_ring))) {
635		pr_debug("Unable to create TX ring\n");
636		goto bail1;
637	}
638
639	/* Set the TX pointer to where we left off */
640	c2_port->tx_avail = c2_port->tx_ring.count - 1;
641	c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
642	    c2_port->tx_ring.start + c2dev->cur_tx;
643
644	/* missing: Initialize MAC */
645
646	BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
647
648	/* Reset the adapter, ensures the driver is in sync with the RXP */
649	c2_reset(c2_port);
650
651	/* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
652	for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
653	     i++, elem++) {
654		rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
655		rxp_hdr->flags = 0;
656		__raw_writew(cpu_to_be16(RXP_HRXD_READY),
657			     elem->hw_desc + C2_RXP_FLAGS);
658	}
659
660	/* Enable network packets */
661	netif_start_queue(netdev);
662
663	/* Enable IRQ */
664	writel(0, c2dev->regs + C2_IDIS);
665	netimr0 = readl(c2dev->regs + C2_NIMR0);
666	netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
667	writel(netimr0, c2dev->regs + C2_NIMR0);
668
669	/* Tell the stack to ignore arp requests for ipaddrs bound to
670	 * other interfaces.  This is needed to prevent the host stack
671	 * from responding to arp requests to the ipaddr bound on the
672	 * rdma interface.
673	 */
674	in_dev = in_dev_get(netdev);
675	IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
676	in_dev_put(in_dev);
677
678	return 0;
679
680      bail1:
681	c2_rx_clean(c2_port);
682	kfree(c2_port->rx_ring.start);
683
684      bail0:
685	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
686			    c2_port->dma);
687
688	return ret;
689}
690
691static int c2_down(struct net_device *netdev)
692{
693	struct c2_port *c2_port = netdev_priv(netdev);
694	struct c2_dev *c2dev = c2_port->c2dev;
695
696	if (netif_msg_ifdown(c2_port))
697		pr_debug("%s: disabling interface\n",
698			netdev->name);
699
700	/* Wait for all the queued packets to get sent */
701	c2_tx_interrupt(netdev);
702
703	/* Disable network packets */
704	netif_stop_queue(netdev);
705
706	/* Disable IRQs by clearing the interrupt mask */
707	writel(1, c2dev->regs + C2_IDIS);
708	writel(0, c2dev->regs + C2_NIMR0);
709
710	/* missing: Stop transmitter */
711
712	/* missing: Stop receiver */
713
714	/* Reset the adapter, ensures the driver is in sync with the RXP */
715	c2_reset(c2_port);
716
717	/* missing: Turn off LEDs here */
718
719	/* Free all buffers in the host descriptor rings */
720	c2_tx_clean(c2_port);
721	c2_rx_clean(c2_port);
722
723	/* Free the host descriptor rings */
724	kfree(c2_port->rx_ring.start);
725	kfree(c2_port->tx_ring.start);
726	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
727			    c2_port->dma);
728
729	return 0;
730}
731
732static void c2_reset(struct c2_port *c2_port)
733{
734	struct c2_dev *c2dev = c2_port->c2dev;
735	unsigned int cur_rx = c2dev->cur_rx;
736
737	/* Tell the hardware to quiesce */
738	C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
739
740	/*
741	 * The hardware will reset the C2_PCI_HRX_QUI bit once
742	 * the RXP is quiesced.  Wait 2 seconds for this.
743	 */
744	ssleep(2);
745
746	cur_rx = C2_GET_CUR_RX(c2dev);
747
748	if (cur_rx & C2_PCI_HRX_QUI)
749		pr_debug("c2_reset: failed to quiesce the hardware!\n");
750
751	cur_rx &= ~C2_PCI_HRX_QUI;
752
753	c2dev->cur_rx = cur_rx;
754
755	pr_debug("Current RX: %u\n", c2dev->cur_rx);
756}
757
758static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
759{
760	struct c2_port *c2_port = netdev_priv(netdev);
761	struct c2_dev *c2dev = c2_port->c2dev;
762	struct c2_ring *tx_ring = &c2_port->tx_ring;
763	struct c2_element *elem;
764	dma_addr_t mapaddr;
765	u32 maplen;
766	unsigned long flags;
767	unsigned int i;
768
769	spin_lock_irqsave(&c2_port->tx_lock, flags);
770
771	if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
772		netif_stop_queue(netdev);
773		spin_unlock_irqrestore(&c2_port->tx_lock, flags);
774
775		pr_debug("%s: Tx ring full when queue awake!\n",
776			netdev->name);
777		return NETDEV_TX_BUSY;
778	}
779
780	maplen = skb_headlen(skb);
781	mapaddr =
782	    pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
783
784	elem = tx_ring->to_use;
785	elem->skb = skb;
786	elem->mapaddr = mapaddr;
787	elem->maplen = maplen;
788
789	/* Tell HW to xmit */
790	__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR);
791	__raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN);
792	__raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS);
793
794	c2_port->netstats.tx_packets++;
795	c2_port->netstats.tx_bytes += maplen;
796
797	/* Loop thru additional data fragments and queue them */
798	if (skb_shinfo(skb)->nr_frags) {
799		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
800			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
801			maplen = frag->size;
802			mapaddr =
803			    pci_map_page(c2dev->pcidev, frag->page,
804					 frag->page_offset, maplen,
805					 PCI_DMA_TODEVICE);
806
807			elem = elem->next;
808			elem->skb = NULL;
809			elem->mapaddr = mapaddr;
810			elem->maplen = maplen;
811
812			/* Tell HW to xmit */
813			__raw_writeq(cpu_to_be64(mapaddr),
814				     elem->hw_desc + C2_TXP_ADDR);
815			__raw_writew(cpu_to_be16(maplen),
816				     elem->hw_desc + C2_TXP_LEN);
817			__raw_writew(cpu_to_be16(TXP_HTXD_READY),
818				     elem->hw_desc + C2_TXP_FLAGS);
819
820			c2_port->netstats.tx_packets++;
821			c2_port->netstats.tx_bytes += maplen;
822		}
823	}
824
825	tx_ring->to_use = elem->next;
826	c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
827
828	if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
829		netif_stop_queue(netdev);
830		if (netif_msg_tx_queued(c2_port))
831			pr_debug("%s: transmit queue full\n",
832				netdev->name);
833	}
834
835	spin_unlock_irqrestore(&c2_port->tx_lock, flags);
836
837	netdev->trans_start = jiffies;
838
839	return NETDEV_TX_OK;
840}
841
842static struct net_device_stats *c2_get_stats(struct net_device *netdev)
843{
844	struct c2_port *c2_port = netdev_priv(netdev);
845
846	return &c2_port->netstats;
847}
848
849static void c2_tx_timeout(struct net_device *netdev)
850{
851	struct c2_port *c2_port = netdev_priv(netdev);
852
853	if (netif_msg_timer(c2_port))
854		pr_debug("%s: tx timeout\n", netdev->name);
855
856	c2_tx_clean(c2_port);
857}
858
859static int c2_change_mtu(struct net_device *netdev, int new_mtu)
860{
861	int ret = 0;
862
863	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
864		return -EINVAL;
865
866	netdev->mtu = new_mtu;
867
868	if (netif_running(netdev)) {
869		c2_down(netdev);
870
871		c2_up(netdev);
872	}
873
874	return ret;
875}
876
877/* Initialize network device */
878static struct net_device *c2_devinit(struct c2_dev *c2dev,
879				     void __iomem * mmio_addr)
880{
881	struct c2_port *c2_port = NULL;
882	struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
883
884	if (!netdev) {
885		pr_debug("c2_port etherdev alloc failed");
886		return NULL;
887	}
888
889	SET_MODULE_OWNER(netdev);
890	SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
891
892	netdev->open = c2_up;
893	netdev->stop = c2_down;
894	netdev->hard_start_xmit = c2_xmit_frame;
895	netdev->get_stats = c2_get_stats;
896	netdev->tx_timeout = c2_tx_timeout;
897	netdev->change_mtu = c2_change_mtu;
898	netdev->watchdog_timeo = C2_TX_TIMEOUT;
899	netdev->irq = c2dev->pcidev->irq;
900
901	c2_port = netdev_priv(netdev);
902	c2_port->netdev = netdev;
903	c2_port->c2dev = c2dev;
904	c2_port->msg_enable = netif_msg_init(debug, default_msg);
905	c2_port->tx_ring.count = C2_NUM_TX_DESC;
906	c2_port->rx_ring.count = C2_NUM_RX_DESC;
907
908	spin_lock_init(&c2_port->tx_lock);
909
910	/* Copy our 48-bit ethernet hardware address */
911	memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
912
913	/* Validate the MAC address */
914	if (!is_valid_ether_addr(netdev->dev_addr)) {
915		pr_debug("Invalid MAC Address\n");
916		c2_print_macaddr(netdev);
917		free_netdev(netdev);
918		return NULL;
919	}
920
921	c2dev->netdev = netdev;
922
923	return netdev;
924}
925
926static int __devinit c2_probe(struct pci_dev *pcidev,
927			      const struct pci_device_id *ent)
928{
929	int ret = 0, i;
930	unsigned long reg0_start, reg0_flags, reg0_len;
931	unsigned long reg2_start, reg2_flags, reg2_len;
932	unsigned long reg4_start, reg4_flags, reg4_len;
933	unsigned kva_map_size;
934	struct net_device *netdev = NULL;
935	struct c2_dev *c2dev = NULL;
936	void __iomem *mmio_regs = NULL;
937
938	printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
939		DRV_VERSION);
940
941	/* Enable PCI device */
942	ret = pci_enable_device(pcidev);
943	if (ret) {
944		printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
945			pci_name(pcidev));
946		goto bail0;
947	}
948
949	reg0_start = pci_resource_start(pcidev, BAR_0);
950	reg0_len = pci_resource_len(pcidev, BAR_0);
951	reg0_flags = pci_resource_flags(pcidev, BAR_0);
952
953	reg2_start = pci_resource_start(pcidev, BAR_2);
954	reg2_len = pci_resource_len(pcidev, BAR_2);
955	reg2_flags = pci_resource_flags(pcidev, BAR_2);
956
957	reg4_start = pci_resource_start(pcidev, BAR_4);
958	reg4_len = pci_resource_len(pcidev, BAR_4);
959	reg4_flags = pci_resource_flags(pcidev, BAR_4);
960
961	pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
962	pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
963	pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
964
965	/* Make sure PCI base addr are MMIO */
966	if (!(reg0_flags & IORESOURCE_MEM) ||
967	    !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
968		printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
969		ret = -ENODEV;
970		goto bail1;
971	}
972
973	/* Check for weird/broken PCI region reporting */
974	if ((reg0_len < C2_REG0_SIZE) ||
975	    (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
976		printk(KERN_ERR PFX "Invalid PCI region sizes\n");
977		ret = -ENODEV;
978		goto bail1;
979	}
980
981	/* Reserve PCI I/O and memory resources */
982	ret = pci_request_regions(pcidev, DRV_NAME);
983	if (ret) {
984		printk(KERN_ERR PFX "%s: Unable to request regions\n",
985			pci_name(pcidev));
986		goto bail1;
987	}
988
989	if ((sizeof(dma_addr_t) > 4)) {
990		ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
991		if (ret < 0) {
992			printk(KERN_ERR PFX "64b DMA configuration failed\n");
993			goto bail2;
994		}
995	} else {
996		ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
997		if (ret < 0) {
998			printk(KERN_ERR PFX "32b DMA configuration failed\n");
999			goto bail2;
1000		}
1001	}
1002
1003	/* Enables bus-mastering on the device */
1004	pci_set_master(pcidev);
1005
1006	/* Remap the adapter PCI registers in BAR4 */
1007	mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1008				    sizeof(struct c2_adapter_pci_regs));
1009	if (mmio_regs == 0UL) {
1010		printk(KERN_ERR PFX
1011			"Unable to remap adapter PCI registers in BAR4\n");
1012		ret = -EIO;
1013		goto bail2;
1014	}
1015
1016	/* Validate PCI regs magic */
1017	for (i = 0; i < sizeof(c2_magic); i++) {
1018		if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
1019			printk(KERN_ERR PFX "Downlevel Firmware boot loader "
1020				"[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
1021			       "utility to update your boot loader\n",
1022				i + 1, sizeof(c2_magic),
1023				readb(mmio_regs + C2_REGS_MAGIC + i),
1024				c2_magic[i]);
1025			printk(KERN_ERR PFX "Adapter not claimed\n");
1026			iounmap(mmio_regs);
1027			ret = -EIO;
1028			goto bail2;
1029		}
1030	}
1031
1032	/* Validate the adapter version */
1033	if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
1034		printk(KERN_ERR PFX "Version mismatch "
1035			"[fw=%u, c2=%u], Adapter not claimed\n",
1036			be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)),
1037			C2_VERSION);
1038		ret = -EINVAL;
1039		iounmap(mmio_regs);
1040		goto bail2;
1041	}
1042
1043	/* Validate the adapter IVN */
1044	if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
1045		printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
1046		       "the OpenIB device support kit. "
1047		       "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1048			be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)),
1049			C2_IVN);
1050		ret = -EINVAL;
1051		iounmap(mmio_regs);
1052		goto bail2;
1053	}
1054
1055	/* Allocate hardware structure */
1056	c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
1057	if (!c2dev) {
1058		printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
1059			pci_name(pcidev));
1060		ret = -ENOMEM;
1061		iounmap(mmio_regs);
1062		goto bail2;
1063	}
1064
1065	memset(c2dev, 0, sizeof(*c2dev));
1066	spin_lock_init(&c2dev->lock);
1067	c2dev->pcidev = pcidev;
1068	c2dev->cur_tx = 0;
1069
1070	/* Get the last RX index */
1071	c2dev->cur_rx =
1072	    (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) -
1073	     0xffffc000) / sizeof(struct c2_rxp_desc);
1074
1075	/* Request an interrupt line for the driver */
1076	ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
1077	if (ret) {
1078		printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
1079			pci_name(pcidev), pcidev->irq);
1080		iounmap(mmio_regs);
1081		goto bail3;
1082	}
1083
1084	/* Set driver specific data */
1085	pci_set_drvdata(pcidev, c2dev);
1086
1087	/* Initialize network device */
1088	if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
1089		iounmap(mmio_regs);
1090		goto bail4;
1091	}
1092
1093	/* Save off the actual size prior to unmapping mmio_regs */
1094	kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE));
1095
1096	/* Unmap the adapter PCI registers in BAR4 */
1097	iounmap(mmio_regs);
1098
1099	/* Register network device */
1100	ret = register_netdev(netdev);
1101	if (ret) {
1102		printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
1103			ret);
1104		goto bail5;
1105	}
1106
1107	/* Disable network packets */
1108	netif_stop_queue(netdev);
1109
1110	/* Remap the adapter HRXDQ PA space to kernel VA space */
1111	c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
1112					       C2_RXP_HRXDQ_SIZE);
1113	if (c2dev->mmio_rxp_ring == 0UL) {
1114		printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
1115		ret = -EIO;
1116		goto bail6;
1117	}
1118
1119	/* Remap the adapter HTXDQ PA space to kernel VA space */
1120	c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
1121					       C2_TXP_HTXDQ_SIZE);
1122	if (c2dev->mmio_txp_ring == 0UL) {
1123		printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
1124		ret = -EIO;
1125		goto bail7;
1126	}
1127
1128	/* Save off the current RX index in the last 4 bytes of the TXP Ring */
1129	C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
1130
1131	/* Remap the PCI registers in adapter BAR0 to kernel VA space */
1132	c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
1133	if (c2dev->regs == 0UL) {
1134		printk(KERN_ERR PFX "Unable to remap BAR0\n");
1135		ret = -EIO;
1136		goto bail8;
1137	}
1138
1139	/* Remap the PCI registers in adapter BAR4 to kernel VA space */
1140	c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
1141	c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
1142				     kva_map_size);
1143	if (c2dev->kva == 0UL) {
1144		printk(KERN_ERR PFX "Unable to remap BAR4\n");
1145		ret = -EIO;
1146		goto bail9;
1147	}
1148
1149	/* Print out the MAC address */
1150	c2_print_macaddr(netdev);
1151
1152	ret = c2_rnic_init(c2dev);
1153	if (ret) {
1154		printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
1155		goto bail10;
1156	}
1157
1158	if (c2_register_device(c2dev))
1159		goto bail10;
1160
1161	return 0;
1162
1163 bail10:
1164	iounmap(c2dev->kva);
1165
1166 bail9:
1167	iounmap(c2dev->regs);
1168
1169 bail8:
1170	iounmap(c2dev->mmio_txp_ring);
1171
1172 bail7:
1173	iounmap(c2dev->mmio_rxp_ring);
1174
1175 bail6:
1176	unregister_netdev(netdev);
1177
1178 bail5:
1179	free_netdev(netdev);
1180
1181 bail4:
1182	free_irq(pcidev->irq, c2dev);
1183
1184 bail3:
1185	ib_dealloc_device(&c2dev->ibdev);
1186
1187 bail2:
1188	pci_release_regions(pcidev);
1189
1190 bail1:
1191	pci_disable_device(pcidev);
1192
1193 bail0:
1194	return ret;
1195}
1196
1197static void __devexit c2_remove(struct pci_dev *pcidev)
1198{
1199	struct c2_dev *c2dev = pci_get_drvdata(pcidev);
1200	struct net_device *netdev = c2dev->netdev;
1201
1202	/* Unregister with OpenIB */
1203	c2_unregister_device(c2dev);
1204
1205	/* Clean up the RNIC resources */
1206	c2_rnic_term(c2dev);
1207
1208	/* Remove network device from the kernel */
1209	unregister_netdev(netdev);
1210
1211	/* Free network device */
1212	free_netdev(netdev);
1213
1214	/* Free the interrupt line */
1215	free_irq(pcidev->irq, c2dev);
1216
1217	/* missing: Turn LEDs off here */
1218
1219	/* Unmap adapter PA space */
1220	iounmap(c2dev->kva);
1221	iounmap(c2dev->regs);
1222	iounmap(c2dev->mmio_txp_ring);
1223	iounmap(c2dev->mmio_rxp_ring);
1224
1225	/* Free the hardware structure */
1226	ib_dealloc_device(&c2dev->ibdev);
1227
1228	/* Release reserved PCI I/O and memory resources */
1229	pci_release_regions(pcidev);
1230
1231	/* Disable PCI device */
1232	pci_disable_device(pcidev);
1233
1234	/* Clear driver specific data */
1235	pci_set_drvdata(pcidev, NULL);
1236}
1237
1238static struct pci_driver c2_pci_driver = {
1239	.name = DRV_NAME,
1240	.id_table = c2_pci_table,
1241	.probe = c2_probe,
1242	.remove = __devexit_p(c2_remove),
1243};
1244
1245static int __init c2_init_module(void)
1246{
1247	return pci_register_driver(&c2_pci_driver);
1248}
1249
1250static void __exit c2_exit_module(void)
1251{
1252	pci_unregister_driver(&c2_pci_driver);
1253}
1254
1255module_init(c2_init_module);
1256module_exit(c2_exit_module);
1257