• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/block/
1/*
2 *  sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
3 *
4 *  Copyright 2004-2005 Red Hat, Inc.
5 *
6 *  Author/maintainer:  Jeff Garzik <jgarzik@pobox.com>
7 *
8 *  This file is subject to the terms and conditions of the GNU General Public
9 *  License.  See the file "COPYING" in the main directory of this archive
10 *  for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/pci.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/blkdev.h>
20#include <linux/sched.h>
21#include <linux/interrupt.h>
22#include <linux/compiler.h>
23#include <linux/workqueue.h>
24#include <linux/bitops.h>
25#include <linux/delay.h>
26#include <linux/time.h>
27#include <linux/hdreg.h>
28#include <linux/dma-mapping.h>
29#include <linux/completion.h>
30#include <linux/scatterlist.h>
31#include <asm/io.h>
32#include <asm/uaccess.h>
33
34#undef CARM_DEBUG
35#undef CARM_VERBOSE_DEBUG
36#undef CARM_NDEBUG
37
38#define DRV_NAME "sx8"
39#define DRV_VERSION "1.0"
40#define PFX DRV_NAME ": "
41
42MODULE_AUTHOR("Jeff Garzik");
43MODULE_LICENSE("GPL");
44MODULE_DESCRIPTION("Promise SATA SX8 block driver");
45MODULE_VERSION(DRV_VERSION);
46
47/*
48 * SX8 hardware has a single message queue for all ATA ports.
49 * When this driver was written, the hardware (firmware?) would
50 * corrupt data eventually, if more than one request was outstanding.
51 * As one can imagine, having 8 ports bottlenecking on a single
52 * command hurts performance.
53 *
54 * Based on user reports, later versions of the hardware (firmware?)
55 * seem to be able to survive with more than one command queued.
56 *
57 * Therefore, we default to the safe option -- 1 command -- but
58 * allow the user to increase this.
59 *
60 * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
61 * but problems seem to occur when you exceed ~30, even on newer hardware.
62 */
63static int max_queue = 1;
64module_param(max_queue, int, 0444);
65MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
66
67
68#define NEXT_RESP(idx)	((idx + 1) % RMSG_Q_LEN)
69
70/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
71#define TAG_ENCODE(tag)	(((tag) << 16) | 0xf)
72#define TAG_DECODE(tag)	(((tag) >> 16) & 0x1f)
73#define TAG_VALID(tag)	((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
74
75/* note: prints function name for you */
76#ifdef CARM_DEBUG
77#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
78#ifdef CARM_VERBOSE_DEBUG
79#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
80#else
81#define VPRINTK(fmt, args...)
82#endif	/* CARM_VERBOSE_DEBUG */
83#else
84#define DPRINTK(fmt, args...)
85#define VPRINTK(fmt, args...)
86#endif	/* CARM_DEBUG */
87
88#ifdef CARM_NDEBUG
89#define assert(expr)
90#else
91#define assert(expr) \
92        if(unlikely(!(expr))) {                                   \
93        printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
94	#expr, __FILE__, __func__, __LINE__);          \
95        }
96#endif
97
98/* defines only for the constants which don't work well as enums */
99struct carm_host;
100
101enum {
102	/* adapter-wide limits */
103	CARM_MAX_PORTS		= 8,
104	CARM_SHM_SIZE		= (4096 << 7),
105	CARM_MINORS_PER_MAJOR	= 256 / CARM_MAX_PORTS,
106	CARM_MAX_WAIT_Q		= CARM_MAX_PORTS + 1,
107
108	/* command message queue limits */
109	CARM_MAX_REQ		= 64,	       /* max command msgs per host */
110	CARM_MSG_LOW_WATER	= (CARM_MAX_REQ / 4),	     /* refill mark */
111
112	/* S/G limits, host-wide and per-request */
113	CARM_MAX_REQ_SG		= 32,	     /* max s/g entries per request */
114	CARM_MAX_HOST_SG	= 600,		/* max s/g entries per host */
115	CARM_SG_LOW_WATER	= (CARM_MAX_HOST_SG / 4),   /* re-fill mark */
116
117	/* hardware registers */
118	CARM_IHQP		= 0x1c,
119	CARM_INT_STAT		= 0x10, /* interrupt status */
120	CARM_INT_MASK		= 0x14, /* interrupt mask */
121	CARM_HMUC		= 0x18, /* host message unit control */
122	RBUF_ADDR_LO		= 0x20, /* response msg DMA buf low 32 bits */
123	RBUF_ADDR_HI		= 0x24, /* response msg DMA buf high 32 bits */
124	RBUF_BYTE_SZ		= 0x28,
125	CARM_RESP_IDX		= 0x2c,
126	CARM_CMS0		= 0x30, /* command message size reg 0 */
127	CARM_LMUC		= 0x48,
128	CARM_HMPHA		= 0x6c,
129	CARM_INITC		= 0xb5,
130
131	/* bits in CARM_INT_{STAT,MASK} */
132	INT_RESERVED		= 0xfffffff0,
133	INT_WATCHDOG		= (1 << 3),	/* watchdog timer */
134	INT_Q_OVERFLOW		= (1 << 2),	/* cmd msg q overflow */
135	INT_Q_AVAILABLE		= (1 << 1),	/* cmd msg q has free space */
136	INT_RESPONSE		= (1 << 0),	/* response msg available */
137	INT_ACK_MASK		= INT_WATCHDOG | INT_Q_OVERFLOW,
138	INT_DEF_MASK		= INT_RESERVED | INT_Q_OVERFLOW |
139				  INT_RESPONSE,
140
141	/* command messages, and related register bits */
142	CARM_HAVE_RESP		= 0x01,
143	CARM_MSG_READ		= 1,
144	CARM_MSG_WRITE		= 2,
145	CARM_MSG_VERIFY		= 3,
146	CARM_MSG_GET_CAPACITY	= 4,
147	CARM_MSG_FLUSH		= 5,
148	CARM_MSG_IOCTL		= 6,
149	CARM_MSG_ARRAY		= 8,
150	CARM_MSG_MISC		= 9,
151	CARM_CME		= (1 << 2),
152	CARM_RME		= (1 << 1),
153	CARM_WZBC		= (1 << 0),
154	CARM_RMI		= (1 << 0),
155	CARM_Q_FULL		= (1 << 3),
156	CARM_MSG_SIZE		= 288,
157	CARM_Q_LEN		= 48,
158
159	/* CARM_MSG_IOCTL messages */
160	CARM_IOC_SCAN_CHAN	= 5,	/* scan channels for devices */
161	CARM_IOC_GET_TCQ	= 13,	/* get tcq/ncq depth */
162	CARM_IOC_SET_TCQ	= 14,	/* set tcq/ncq depth */
163
164	IOC_SCAN_CHAN_NODEV	= 0x1f,
165	IOC_SCAN_CHAN_OFFSET	= 0x40,
166
167	/* CARM_MSG_ARRAY messages */
168	CARM_ARRAY_INFO		= 0,
169
170	ARRAY_NO_EXIST		= (1 << 31),
171
172	/* response messages */
173	RMSG_SZ			= 8,	/* sizeof(struct carm_response) */
174	RMSG_Q_LEN		= 48,	/* resp. msg list length */
175	RMSG_OK			= 1,	/* bit indicating msg was successful */
176					/* length of entire resp. msg buffer */
177	RBUF_LEN		= RMSG_SZ * RMSG_Q_LEN,
178
179	PDC_SHM_SIZE		= (4096 << 7), /* length of entire h/w buffer */
180
181	/* CARM_MSG_MISC messages */
182	MISC_GET_FW_VER		= 2,
183	MISC_ALLOC_MEM		= 3,
184	MISC_SET_TIME		= 5,
185
186	/* MISC_GET_FW_VER feature bits */
187	FW_VER_4PORT		= (1 << 2), /* 1=4 ports, 0=8 ports */
188	FW_VER_NON_RAID		= (1 << 1), /* 1=non-RAID firmware, 0=RAID */
189	FW_VER_ZCR		= (1 << 0), /* zero channel RAID (whatever that is) */
190
191	/* carm_host flags */
192	FL_NON_RAID		= FW_VER_NON_RAID,
193	FL_4PORT		= FW_VER_4PORT,
194	FL_FW_VER_MASK		= (FW_VER_NON_RAID | FW_VER_4PORT),
195	FL_DAC			= (1 << 16),
196	FL_DYN_MAJOR		= (1 << 17),
197};
198
199enum {
200	CARM_SG_BOUNDARY	= 0xffffUL,	    /* s/g segment boundary */
201};
202
203enum scatter_gather_types {
204	SGT_32BIT		= 0,
205	SGT_64BIT		= 1,
206};
207
208enum host_states {
209	HST_INVALID,		/* invalid state; never used */
210	HST_ALLOC_BUF,		/* setting up master SHM area */
211	HST_ERROR,		/* we never leave here */
212	HST_PORT_SCAN,		/* start dev scan */
213	HST_DEV_SCAN_START,	/* start per-device probe */
214	HST_DEV_SCAN,		/* continue per-device probe */
215	HST_DEV_ACTIVATE,	/* activate devices we found */
216	HST_PROBE_FINISHED,	/* probe is complete */
217	HST_PROBE_START,	/* initiate probe */
218	HST_SYNC_TIME,		/* tell firmware what time it is */
219	HST_GET_FW_VER,		/* get firmware version, adapter port cnt */
220};
221
222#ifdef CARM_DEBUG
223static const char *state_name[] = {
224	"HST_INVALID",
225	"HST_ALLOC_BUF",
226	"HST_ERROR",
227	"HST_PORT_SCAN",
228	"HST_DEV_SCAN_START",
229	"HST_DEV_SCAN",
230	"HST_DEV_ACTIVATE",
231	"HST_PROBE_FINISHED",
232	"HST_PROBE_START",
233	"HST_SYNC_TIME",
234	"HST_GET_FW_VER",
235};
236#endif
237
238struct carm_port {
239	unsigned int			port_no;
240	struct gendisk			*disk;
241	struct carm_host		*host;
242
243	/* attached device characteristics */
244	u64				capacity;
245	char				name[41];
246	u16				dev_geom_head;
247	u16				dev_geom_sect;
248	u16				dev_geom_cyl;
249};
250
251struct carm_request {
252	unsigned int			tag;
253	int				n_elem;
254	unsigned int			msg_type;
255	unsigned int			msg_subtype;
256	unsigned int			msg_bucket;
257	struct request			*rq;
258	struct carm_port		*port;
259	struct scatterlist		sg[CARM_MAX_REQ_SG];
260};
261
262struct carm_host {
263	unsigned long			flags;
264	void				__iomem *mmio;
265	void				*shm;
266	dma_addr_t			shm_dma;
267
268	int				major;
269	int				id;
270	char				name[32];
271
272	spinlock_t			lock;
273	struct pci_dev			*pdev;
274	unsigned int			state;
275	u32				fw_ver;
276
277	struct request_queue		*oob_q;
278	unsigned int			n_oob;
279
280	unsigned int			hw_sg_used;
281
282	unsigned int			resp_idx;
283
284	unsigned int			wait_q_prod;
285	unsigned int			wait_q_cons;
286	struct request_queue		*wait_q[CARM_MAX_WAIT_Q];
287
288	unsigned int			n_msgs;
289	u64				msg_alloc;
290	struct carm_request		req[CARM_MAX_REQ];
291	void				*msg_base;
292	dma_addr_t			msg_dma;
293
294	int				cur_scan_dev;
295	unsigned long			dev_active;
296	unsigned long			dev_present;
297	struct carm_port		port[CARM_MAX_PORTS];
298
299	struct work_struct		fsm_task;
300
301	struct completion		probe_comp;
302};
303
304struct carm_response {
305	__le32 ret_handle;
306	__le32 status;
307}  __attribute__((packed));
308
309struct carm_msg_sg {
310	__le32 start;
311	__le32 len;
312}  __attribute__((packed));
313
314struct carm_msg_rw {
315	u8 type;
316	u8 id;
317	u8 sg_count;
318	u8 sg_type;
319	__le32 handle;
320	__le32 lba;
321	__le16 lba_count;
322	__le16 lba_high;
323	struct carm_msg_sg sg[32];
324}  __attribute__((packed));
325
326struct carm_msg_allocbuf {
327	u8 type;
328	u8 subtype;
329	u8 n_sg;
330	u8 sg_type;
331	__le32 handle;
332	__le32 addr;
333	__le32 len;
334	__le32 evt_pool;
335	__le32 n_evt;
336	__le32 rbuf_pool;
337	__le32 n_rbuf;
338	__le32 msg_pool;
339	__le32 n_msg;
340	struct carm_msg_sg sg[8];
341}  __attribute__((packed));
342
343struct carm_msg_ioctl {
344	u8 type;
345	u8 subtype;
346	u8 array_id;
347	u8 reserved1;
348	__le32 handle;
349	__le32 data_addr;
350	u32 reserved2;
351}  __attribute__((packed));
352
353struct carm_msg_sync_time {
354	u8 type;
355	u8 subtype;
356	u16 reserved1;
357	__le32 handle;
358	u32 reserved2;
359	__le32 timestamp;
360}  __attribute__((packed));
361
362struct carm_msg_get_fw_ver {
363	u8 type;
364	u8 subtype;
365	u16 reserved1;
366	__le32 handle;
367	__le32 data_addr;
368	u32 reserved2;
369}  __attribute__((packed));
370
371struct carm_fw_ver {
372	__le32 version;
373	u8 features;
374	u8 reserved1;
375	u16 reserved2;
376}  __attribute__((packed));
377
378struct carm_array_info {
379	__le32 size;
380
381	__le16 size_hi;
382	__le16 stripe_size;
383
384	__le32 mode;
385
386	__le16 stripe_blk_sz;
387	__le16 reserved1;
388
389	__le16 cyl;
390	__le16 head;
391
392	__le16 sect;
393	u8 array_id;
394	u8 reserved2;
395
396	char name[40];
397
398	__le32 array_status;
399
400	/* device list continues beyond this point? */
401}  __attribute__((packed));
402
403static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
404static void carm_remove_one (struct pci_dev *pdev);
405static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
406
407static const struct pci_device_id carm_pci_tbl[] = {
408	{ PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
409	{ PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
410	{ }	/* terminate list */
411};
412MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
413
414static struct pci_driver carm_driver = {
415	.name		= DRV_NAME,
416	.id_table	= carm_pci_tbl,
417	.probe		= carm_init_one,
418	.remove		= carm_remove_one,
419};
420
421static const struct block_device_operations carm_bd_ops = {
422	.owner		= THIS_MODULE,
423	.getgeo		= carm_bdev_getgeo,
424};
425
426static unsigned int carm_host_id;
427static unsigned long carm_major_alloc;
428
429
430
431static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
432{
433	struct carm_port *port = bdev->bd_disk->private_data;
434
435	geo->heads = (u8) port->dev_geom_head;
436	geo->sectors = (u8) port->dev_geom_sect;
437	geo->cylinders = port->dev_geom_cyl;
438	return 0;
439}
440
441static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
442
443static inline int carm_lookup_bucket(u32 msg_size)
444{
445	int i;
446
447	for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
448		if (msg_size <= msg_sizes[i])
449			return i;
450
451	return -ENOENT;
452}
453
454static void carm_init_buckets(void __iomem *mmio)
455{
456	unsigned int i;
457
458	for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
459		writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
460}
461
462static inline void *carm_ref_msg(struct carm_host *host,
463				 unsigned int msg_idx)
464{
465	return host->msg_base + (msg_idx * CARM_MSG_SIZE);
466}
467
468static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
469					  unsigned int msg_idx)
470{
471	return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
472}
473
474static int carm_send_msg(struct carm_host *host,
475			 struct carm_request *crq)
476{
477	void __iomem *mmio = host->mmio;
478	u32 msg = (u32) carm_ref_msg_dma(host, crq->tag);
479	u32 cm_bucket = crq->msg_bucket;
480	u32 tmp;
481	int rc = 0;
482
483	VPRINTK("ENTER\n");
484
485	tmp = readl(mmio + CARM_HMUC);
486	if (tmp & CARM_Q_FULL) {
487		DPRINTK("host msg queue full\n");
488		rc = -EBUSY;
489	} else {
490		writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
491		readl(mmio + CARM_IHQP);	/* flush */
492	}
493
494	return rc;
495}
496
497static struct carm_request *carm_get_request(struct carm_host *host)
498{
499	unsigned int i;
500
501	/* obey global hardware limit on S/G entries */
502	if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG))
503		return NULL;
504
505	for (i = 0; i < max_queue; i++)
506		if ((host->msg_alloc & (1ULL << i)) == 0) {
507			struct carm_request *crq = &host->req[i];
508			crq->port = NULL;
509			crq->n_elem = 0;
510
511			host->msg_alloc |= (1ULL << i);
512			host->n_msgs++;
513
514			assert(host->n_msgs <= CARM_MAX_REQ);
515			sg_init_table(crq->sg, CARM_MAX_REQ_SG);
516			return crq;
517		}
518
519	DPRINTK("no request available, returning NULL\n");
520	return NULL;
521}
522
523static int carm_put_request(struct carm_host *host, struct carm_request *crq)
524{
525	assert(crq->tag < max_queue);
526
527	if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0))
528		return -EINVAL; /* tried to clear a tag that was not active */
529
530	assert(host->hw_sg_used >= crq->n_elem);
531
532	host->msg_alloc &= ~(1ULL << crq->tag);
533	host->hw_sg_used -= crq->n_elem;
534	host->n_msgs--;
535
536	return 0;
537}
538
539static struct carm_request *carm_get_special(struct carm_host *host)
540{
541	unsigned long flags;
542	struct carm_request *crq = NULL;
543	struct request *rq;
544	int tries = 5000;
545
546	while (tries-- > 0) {
547		spin_lock_irqsave(&host->lock, flags);
548		crq = carm_get_request(host);
549		spin_unlock_irqrestore(&host->lock, flags);
550
551		if (crq)
552			break;
553		msleep(10);
554	}
555
556	if (!crq)
557		return NULL;
558
559	rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
560	if (!rq) {
561		spin_lock_irqsave(&host->lock, flags);
562		carm_put_request(host, crq);
563		spin_unlock_irqrestore(&host->lock, flags);
564		return NULL;
565	}
566
567	crq->rq = rq;
568	return crq;
569}
570
571static int carm_array_info (struct carm_host *host, unsigned int array_idx)
572{
573	struct carm_msg_ioctl *ioc;
574	unsigned int idx;
575	u32 msg_data;
576	dma_addr_t msg_dma;
577	struct carm_request *crq;
578	int rc;
579
580	crq = carm_get_special(host);
581	if (!crq) {
582		rc = -ENOMEM;
583		goto err_out;
584	}
585
586	idx = crq->tag;
587
588	ioc = carm_ref_msg(host, idx);
589	msg_dma = carm_ref_msg_dma(host, idx);
590	msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
591
592	crq->msg_type = CARM_MSG_ARRAY;
593	crq->msg_subtype = CARM_ARRAY_INFO;
594	rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
595				sizeof(struct carm_array_info));
596	BUG_ON(rc < 0);
597	crq->msg_bucket = (u32) rc;
598
599	memset(ioc, 0, sizeof(*ioc));
600	ioc->type	= CARM_MSG_ARRAY;
601	ioc->subtype	= CARM_ARRAY_INFO;
602	ioc->array_id	= (u8) array_idx;
603	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
604	ioc->data_addr	= cpu_to_le32(msg_data);
605
606	spin_lock_irq(&host->lock);
607	assert(host->state == HST_DEV_SCAN_START ||
608	       host->state == HST_DEV_SCAN);
609	spin_unlock_irq(&host->lock);
610
611	DPRINTK("blk_insert_request, tag == %u\n", idx);
612	blk_insert_request(host->oob_q, crq->rq, 1, crq);
613
614	return 0;
615
616err_out:
617	spin_lock_irq(&host->lock);
618	host->state = HST_ERROR;
619	spin_unlock_irq(&host->lock);
620	return rc;
621}
622
623typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
624
625static int carm_send_special (struct carm_host *host, carm_sspc_t func)
626{
627	struct carm_request *crq;
628	struct carm_msg_ioctl *ioc;
629	void *mem;
630	unsigned int idx, msg_size;
631	int rc;
632
633	crq = carm_get_special(host);
634	if (!crq)
635		return -ENOMEM;
636
637	idx = crq->tag;
638
639	mem = carm_ref_msg(host, idx);
640
641	msg_size = func(host, idx, mem);
642
643	ioc = mem;
644	crq->msg_type = ioc->type;
645	crq->msg_subtype = ioc->subtype;
646	rc = carm_lookup_bucket(msg_size);
647	BUG_ON(rc < 0);
648	crq->msg_bucket = (u32) rc;
649
650	DPRINTK("blk_insert_request, tag == %u\n", idx);
651	blk_insert_request(host->oob_q, crq->rq, 1, crq);
652
653	return 0;
654}
655
656static unsigned int carm_fill_sync_time(struct carm_host *host,
657					unsigned int idx, void *mem)
658{
659	struct timeval tv;
660	struct carm_msg_sync_time *st = mem;
661
662	do_gettimeofday(&tv);
663
664	memset(st, 0, sizeof(*st));
665	st->type	= CARM_MSG_MISC;
666	st->subtype	= MISC_SET_TIME;
667	st->handle	= cpu_to_le32(TAG_ENCODE(idx));
668	st->timestamp	= cpu_to_le32(tv.tv_sec);
669
670	return sizeof(struct carm_msg_sync_time);
671}
672
673static unsigned int carm_fill_alloc_buf(struct carm_host *host,
674					unsigned int idx, void *mem)
675{
676	struct carm_msg_allocbuf *ab = mem;
677
678	memset(ab, 0, sizeof(*ab));
679	ab->type	= CARM_MSG_MISC;
680	ab->subtype	= MISC_ALLOC_MEM;
681	ab->handle	= cpu_to_le32(TAG_ENCODE(idx));
682	ab->n_sg	= 1;
683	ab->sg_type	= SGT_32BIT;
684	ab->addr	= cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
685	ab->len		= cpu_to_le32(PDC_SHM_SIZE >> 1);
686	ab->evt_pool	= cpu_to_le32(host->shm_dma + (16 * 1024));
687	ab->n_evt	= cpu_to_le32(1024);
688	ab->rbuf_pool	= cpu_to_le32(host->shm_dma);
689	ab->n_rbuf	= cpu_to_le32(RMSG_Q_LEN);
690	ab->msg_pool	= cpu_to_le32(host->shm_dma + RBUF_LEN);
691	ab->n_msg	= cpu_to_le32(CARM_Q_LEN);
692	ab->sg[0].start	= cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
693	ab->sg[0].len	= cpu_to_le32(65536);
694
695	return sizeof(struct carm_msg_allocbuf);
696}
697
698static unsigned int carm_fill_scan_channels(struct carm_host *host,
699					    unsigned int idx, void *mem)
700{
701	struct carm_msg_ioctl *ioc = mem;
702	u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
703			      IOC_SCAN_CHAN_OFFSET);
704
705	memset(ioc, 0, sizeof(*ioc));
706	ioc->type	= CARM_MSG_IOCTL;
707	ioc->subtype	= CARM_IOC_SCAN_CHAN;
708	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
709	ioc->data_addr	= cpu_to_le32(msg_data);
710
711	/* fill output data area with "no device" default values */
712	mem += IOC_SCAN_CHAN_OFFSET;
713	memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
714
715	return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
716}
717
718static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
719					 unsigned int idx, void *mem)
720{
721	struct carm_msg_get_fw_ver *ioc = mem;
722	u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
723
724	memset(ioc, 0, sizeof(*ioc));
725	ioc->type	= CARM_MSG_MISC;
726	ioc->subtype	= MISC_GET_FW_VER;
727	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
728	ioc->data_addr	= cpu_to_le32(msg_data);
729
730	return sizeof(struct carm_msg_get_fw_ver) +
731	       sizeof(struct carm_fw_ver);
732}
733
734static inline void carm_end_request_queued(struct carm_host *host,
735					   struct carm_request *crq,
736					   int error)
737{
738	struct request *req = crq->rq;
739	int rc;
740
741	__blk_end_request_all(req, error);
742
743	rc = carm_put_request(host, crq);
744	assert(rc == 0);
745}
746
747static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
748{
749	unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
750
751	blk_stop_queue(q);
752	VPRINTK("STOPPED QUEUE %p\n", q);
753
754	host->wait_q[idx] = q;
755	host->wait_q_prod++;
756	BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
757}
758
759static inline struct request_queue *carm_pop_q(struct carm_host *host)
760{
761	unsigned int idx;
762
763	if (host->wait_q_prod == host->wait_q_cons)
764		return NULL;
765
766	idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
767	host->wait_q_cons++;
768
769	return host->wait_q[idx];
770}
771
772static inline void carm_round_robin(struct carm_host *host)
773{
774	struct request_queue *q = carm_pop_q(host);
775	if (q) {
776		blk_start_queue(q);
777		VPRINTK("STARTED QUEUE %p\n", q);
778	}
779}
780
781static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
782			       int error)
783{
784	carm_end_request_queued(host, crq, error);
785	if (max_queue == 1)
786		carm_round_robin(host);
787	else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
788		 (host->hw_sg_used <= CARM_SG_LOW_WATER)) {
789		carm_round_robin(host);
790	}
791}
792
793static void carm_oob_rq_fn(struct request_queue *q)
794{
795	struct carm_host *host = q->queuedata;
796	struct carm_request *crq;
797	struct request *rq;
798	int rc;
799
800	while (1) {
801		DPRINTK("get req\n");
802		rq = blk_fetch_request(q);
803		if (!rq)
804			break;
805
806		crq = rq->special;
807		assert(crq != NULL);
808		assert(crq->rq == rq);
809
810		crq->n_elem = 0;
811
812		DPRINTK("send req\n");
813		rc = carm_send_msg(host, crq);
814		if (rc) {
815			blk_requeue_request(q, rq);
816			carm_push_q(host, q);
817			return;		/* call us again later, eventually */
818		}
819	}
820}
821
822static void carm_rq_fn(struct request_queue *q)
823{
824	struct carm_port *port = q->queuedata;
825	struct carm_host *host = port->host;
826	struct carm_msg_rw *msg;
827	struct carm_request *crq;
828	struct request *rq;
829	struct scatterlist *sg;
830	int writing = 0, pci_dir, i, n_elem, rc;
831	u32 tmp;
832	unsigned int msg_size;
833
834queue_one_request:
835	VPRINTK("get req\n");
836	rq = blk_peek_request(q);
837	if (!rq)
838		return;
839
840	crq = carm_get_request(host);
841	if (!crq) {
842		carm_push_q(host, q);
843		return;		/* call us again later, eventually */
844	}
845	crq->rq = rq;
846
847	blk_start_request(rq);
848
849	if (rq_data_dir(rq) == WRITE) {
850		writing = 1;
851		pci_dir = PCI_DMA_TODEVICE;
852	} else {
853		pci_dir = PCI_DMA_FROMDEVICE;
854	}
855
856	/* get scatterlist from block layer */
857	sg = &crq->sg[0];
858	n_elem = blk_rq_map_sg(q, rq, sg);
859	if (n_elem <= 0) {
860		carm_end_rq(host, crq, -EIO);
861		return;		/* request with no s/g entries? */
862	}
863
864	/* map scatterlist to PCI bus addresses */
865	n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
866	if (n_elem <= 0) {
867		carm_end_rq(host, crq, -EIO);
868		return;		/* request with no s/g entries? */
869	}
870	crq->n_elem = n_elem;
871	crq->port = port;
872	host->hw_sg_used += n_elem;
873
874	/*
875	 * build read/write message
876	 */
877
878	VPRINTK("build msg\n");
879	msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag);
880
881	if (writing) {
882		msg->type = CARM_MSG_WRITE;
883		crq->msg_type = CARM_MSG_WRITE;
884	} else {
885		msg->type = CARM_MSG_READ;
886		crq->msg_type = CARM_MSG_READ;
887	}
888
889	msg->id		= port->port_no;
890	msg->sg_count	= n_elem;
891	msg->sg_type	= SGT_32BIT;
892	msg->handle	= cpu_to_le32(TAG_ENCODE(crq->tag));
893	msg->lba	= cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
894	tmp		= (blk_rq_pos(rq) >> 16) >> 16;
895	msg->lba_high	= cpu_to_le16( (u16) tmp );
896	msg->lba_count	= cpu_to_le16(blk_rq_sectors(rq));
897
898	msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
899	for (i = 0; i < n_elem; i++) {
900		struct carm_msg_sg *carm_sg = &msg->sg[i];
901		carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
902		carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
903		msg_size += sizeof(struct carm_msg_sg);
904	}
905
906	rc = carm_lookup_bucket(msg_size);
907	BUG_ON(rc < 0);
908	crq->msg_bucket = (u32) rc;
909
910	/*
911	 * queue read/write message to hardware
912	 */
913
914	VPRINTK("send msg, tag == %u\n", crq->tag);
915	rc = carm_send_msg(host, crq);
916	if (rc) {
917		carm_put_request(host, crq);
918		blk_requeue_request(q, rq);
919		carm_push_q(host, q);
920		return;		/* call us again later, eventually */
921	}
922
923	goto queue_one_request;
924}
925
926static void carm_handle_array_info(struct carm_host *host,
927				   struct carm_request *crq, u8 *mem,
928				   int error)
929{
930	struct carm_port *port;
931	u8 *msg_data = mem + sizeof(struct carm_array_info);
932	struct carm_array_info *desc = (struct carm_array_info *) msg_data;
933	u64 lo, hi;
934	int cur_port;
935	size_t slen;
936
937	DPRINTK("ENTER\n");
938
939	carm_end_rq(host, crq, error);
940
941	if (error)
942		goto out;
943	if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
944		goto out;
945
946	cur_port = host->cur_scan_dev;
947
948	/* should never occur */
949	if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
950		printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
951		       cur_port, (int) desc->array_id);
952		goto out;
953	}
954
955	port = &host->port[cur_port];
956
957	lo = (u64) le32_to_cpu(desc->size);
958	hi = (u64) le16_to_cpu(desc->size_hi);
959
960	port->capacity = lo | (hi << 32);
961	port->dev_geom_head = le16_to_cpu(desc->head);
962	port->dev_geom_sect = le16_to_cpu(desc->sect);
963	port->dev_geom_cyl = le16_to_cpu(desc->cyl);
964
965	host->dev_active |= (1 << cur_port);
966
967	strncpy(port->name, desc->name, sizeof(port->name));
968	port->name[sizeof(port->name) - 1] = 0;
969	slen = strlen(port->name);
970	while (slen && (port->name[slen - 1] == ' ')) {
971		port->name[slen - 1] = 0;
972		slen--;
973	}
974
975	printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
976	       pci_name(host->pdev), port->port_no,
977	       (unsigned long long) port->capacity);
978	printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
979	       pci_name(host->pdev), port->port_no, port->name);
980
981out:
982	assert(host->state == HST_DEV_SCAN);
983	schedule_work(&host->fsm_task);
984}
985
986static void carm_handle_scan_chan(struct carm_host *host,
987				  struct carm_request *crq, u8 *mem,
988				  int error)
989{
990	u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
991	unsigned int i, dev_count = 0;
992	int new_state = HST_DEV_SCAN_START;
993
994	DPRINTK("ENTER\n");
995
996	carm_end_rq(host, crq, error);
997
998	if (error) {
999		new_state = HST_ERROR;
1000		goto out;
1001	}
1002
1003	/* TODO: scan and support non-disk devices */
1004	for (i = 0; i < 8; i++)
1005		if (msg_data[i] == 0) { /* direct-access device (disk) */
1006			host->dev_present |= (1 << i);
1007			dev_count++;
1008		}
1009
1010	printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
1011	       pci_name(host->pdev), dev_count);
1012
1013out:
1014	assert(host->state == HST_PORT_SCAN);
1015	host->state = new_state;
1016	schedule_work(&host->fsm_task);
1017}
1018
1019static void carm_handle_generic(struct carm_host *host,
1020				struct carm_request *crq, int error,
1021				int cur_state, int next_state)
1022{
1023	DPRINTK("ENTER\n");
1024
1025	carm_end_rq(host, crq, error);
1026
1027	assert(host->state == cur_state);
1028	if (error)
1029		host->state = HST_ERROR;
1030	else
1031		host->state = next_state;
1032	schedule_work(&host->fsm_task);
1033}
1034
1035static inline void carm_handle_rw(struct carm_host *host,
1036				  struct carm_request *crq, int error)
1037{
1038	int pci_dir;
1039
1040	VPRINTK("ENTER\n");
1041
1042	if (rq_data_dir(crq->rq) == WRITE)
1043		pci_dir = PCI_DMA_TODEVICE;
1044	else
1045		pci_dir = PCI_DMA_FROMDEVICE;
1046
1047	pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
1048
1049	carm_end_rq(host, crq, error);
1050}
1051
1052static inline void carm_handle_resp(struct carm_host *host,
1053				    __le32 ret_handle_le, u32 status)
1054{
1055	u32 handle = le32_to_cpu(ret_handle_le);
1056	unsigned int msg_idx;
1057	struct carm_request *crq;
1058	int error = (status == RMSG_OK) ? 0 : -EIO;
1059	u8 *mem;
1060
1061	VPRINTK("ENTER, handle == 0x%x\n", handle);
1062
1063	if (unlikely(!TAG_VALID(handle))) {
1064		printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
1065		       pci_name(host->pdev), handle);
1066		return;
1067	}
1068
1069	msg_idx = TAG_DECODE(handle);
1070	VPRINTK("tag == %u\n", msg_idx);
1071
1072	crq = &host->req[msg_idx];
1073
1074	/* fast path */
1075	if (likely(crq->msg_type == CARM_MSG_READ ||
1076		   crq->msg_type == CARM_MSG_WRITE)) {
1077		carm_handle_rw(host, crq, error);
1078		return;
1079	}
1080
1081	mem = carm_ref_msg(host, msg_idx);
1082
1083	switch (crq->msg_type) {
1084	case CARM_MSG_IOCTL: {
1085		switch (crq->msg_subtype) {
1086		case CARM_IOC_SCAN_CHAN:
1087			carm_handle_scan_chan(host, crq, mem, error);
1088			break;
1089		default:
1090			/* unknown / invalid response */
1091			goto err_out;
1092		}
1093		break;
1094	}
1095
1096	case CARM_MSG_MISC: {
1097		switch (crq->msg_subtype) {
1098		case MISC_ALLOC_MEM:
1099			carm_handle_generic(host, crq, error,
1100					    HST_ALLOC_BUF, HST_SYNC_TIME);
1101			break;
1102		case MISC_SET_TIME:
1103			carm_handle_generic(host, crq, error,
1104					    HST_SYNC_TIME, HST_GET_FW_VER);
1105			break;
1106		case MISC_GET_FW_VER: {
1107			struct carm_fw_ver *ver = (struct carm_fw_ver *)
1108				mem + sizeof(struct carm_msg_get_fw_ver);
1109			if (!error) {
1110				host->fw_ver = le32_to_cpu(ver->version);
1111				host->flags |= (ver->features & FL_FW_VER_MASK);
1112			}
1113			carm_handle_generic(host, crq, error,
1114					    HST_GET_FW_VER, HST_PORT_SCAN);
1115			break;
1116		}
1117		default:
1118			/* unknown / invalid response */
1119			goto err_out;
1120		}
1121		break;
1122	}
1123
1124	case CARM_MSG_ARRAY: {
1125		switch (crq->msg_subtype) {
1126		case CARM_ARRAY_INFO:
1127			carm_handle_array_info(host, crq, mem, error);
1128			break;
1129		default:
1130			/* unknown / invalid response */
1131			goto err_out;
1132		}
1133		break;
1134	}
1135
1136	default:
1137		/* unknown / invalid response */
1138		goto err_out;
1139	}
1140
1141	return;
1142
1143err_out:
1144	printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
1145	       pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
1146	carm_end_rq(host, crq, -EIO);
1147}
1148
1149static inline void carm_handle_responses(struct carm_host *host)
1150{
1151	void __iomem *mmio = host->mmio;
1152	struct carm_response *resp = (struct carm_response *) host->shm;
1153	unsigned int work = 0;
1154	unsigned int idx = host->resp_idx % RMSG_Q_LEN;
1155
1156	while (1) {
1157		u32 status = le32_to_cpu(resp[idx].status);
1158
1159		if (status == 0xffffffff) {
1160			VPRINTK("ending response on index %u\n", idx);
1161			writel(idx << 3, mmio + CARM_RESP_IDX);
1162			break;
1163		}
1164
1165		/* response to a message we sent */
1166		else if ((status & (1 << 31)) == 0) {
1167			VPRINTK("handling msg response on index %u\n", idx);
1168			carm_handle_resp(host, resp[idx].ret_handle, status);
1169			resp[idx].status = cpu_to_le32(0xffffffff);
1170		}
1171
1172		/* asynchronous events the hardware throws our way */
1173		else if ((status & 0xff000000) == (1 << 31)) {
1174			u8 *evt_type_ptr = (u8 *) &resp[idx];
1175			u8 evt_type = *evt_type_ptr;
1176			printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
1177			       pci_name(host->pdev), (int) evt_type);
1178			resp[idx].status = cpu_to_le32(0xffffffff);
1179		}
1180
1181		idx = NEXT_RESP(idx);
1182		work++;
1183	}
1184
1185	VPRINTK("EXIT, work==%u\n", work);
1186	host->resp_idx += work;
1187}
1188
1189static irqreturn_t carm_interrupt(int irq, void *__host)
1190{
1191	struct carm_host *host = __host;
1192	void __iomem *mmio;
1193	u32 mask;
1194	int handled = 0;
1195	unsigned long flags;
1196
1197	if (!host) {
1198		VPRINTK("no host\n");
1199		return IRQ_NONE;
1200	}
1201
1202	spin_lock_irqsave(&host->lock, flags);
1203
1204	mmio = host->mmio;
1205
1206	/* reading should also clear interrupts */
1207	mask = readl(mmio + CARM_INT_STAT);
1208
1209	if (mask == 0 || mask == 0xffffffff) {
1210		VPRINTK("no work, mask == 0x%x\n", mask);
1211		goto out;
1212	}
1213
1214	if (mask & INT_ACK_MASK)
1215		writel(mask, mmio + CARM_INT_STAT);
1216
1217	if (unlikely(host->state == HST_INVALID)) {
1218		VPRINTK("not initialized yet, mask = 0x%x\n", mask);
1219		goto out;
1220	}
1221
1222	if (mask & CARM_HAVE_RESP) {
1223		handled = 1;
1224		carm_handle_responses(host);
1225	}
1226
1227out:
1228	spin_unlock_irqrestore(&host->lock, flags);
1229	VPRINTK("EXIT\n");
1230	return IRQ_RETVAL(handled);
1231}
1232
1233static void carm_fsm_task (struct work_struct *work)
1234{
1235	struct carm_host *host =
1236		container_of(work, struct carm_host, fsm_task);
1237	unsigned long flags;
1238	unsigned int state;
1239	int rc, i, next_dev;
1240	int reschedule = 0;
1241	int new_state = HST_INVALID;
1242
1243	spin_lock_irqsave(&host->lock, flags);
1244	state = host->state;
1245	spin_unlock_irqrestore(&host->lock, flags);
1246
1247	DPRINTK("ENTER, state == %s\n", state_name[state]);
1248
1249	switch (state) {
1250	case HST_PROBE_START:
1251		new_state = HST_ALLOC_BUF;
1252		reschedule = 1;
1253		break;
1254
1255	case HST_ALLOC_BUF:
1256		rc = carm_send_special(host, carm_fill_alloc_buf);
1257		if (rc) {
1258			new_state = HST_ERROR;
1259			reschedule = 1;
1260		}
1261		break;
1262
1263	case HST_SYNC_TIME:
1264		rc = carm_send_special(host, carm_fill_sync_time);
1265		if (rc) {
1266			new_state = HST_ERROR;
1267			reschedule = 1;
1268		}
1269		break;
1270
1271	case HST_GET_FW_VER:
1272		rc = carm_send_special(host, carm_fill_get_fw_ver);
1273		if (rc) {
1274			new_state = HST_ERROR;
1275			reschedule = 1;
1276		}
1277		break;
1278
1279	case HST_PORT_SCAN:
1280		rc = carm_send_special(host, carm_fill_scan_channels);
1281		if (rc) {
1282			new_state = HST_ERROR;
1283			reschedule = 1;
1284		}
1285		break;
1286
1287	case HST_DEV_SCAN_START:
1288		host->cur_scan_dev = -1;
1289		new_state = HST_DEV_SCAN;
1290		reschedule = 1;
1291		break;
1292
1293	case HST_DEV_SCAN:
1294		next_dev = -1;
1295		for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
1296			if (host->dev_present & (1 << i)) {
1297				next_dev = i;
1298				break;
1299			}
1300
1301		if (next_dev >= 0) {
1302			host->cur_scan_dev = next_dev;
1303			rc = carm_array_info(host, next_dev);
1304			if (rc) {
1305				new_state = HST_ERROR;
1306				reschedule = 1;
1307			}
1308		} else {
1309			new_state = HST_DEV_ACTIVATE;
1310			reschedule = 1;
1311		}
1312		break;
1313
1314	case HST_DEV_ACTIVATE: {
1315		int activated = 0;
1316		for (i = 0; i < CARM_MAX_PORTS; i++)
1317			if (host->dev_active & (1 << i)) {
1318				struct carm_port *port = &host->port[i];
1319				struct gendisk *disk = port->disk;
1320
1321				set_capacity(disk, port->capacity);
1322				add_disk(disk);
1323				activated++;
1324			}
1325
1326		printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
1327		       pci_name(host->pdev), activated);
1328
1329		new_state = HST_PROBE_FINISHED;
1330		reschedule = 1;
1331		break;
1332	}
1333
1334	case HST_PROBE_FINISHED:
1335		complete(&host->probe_comp);
1336		break;
1337
1338	case HST_ERROR:
1339		break;
1340
1341	default:
1342		/* should never occur */
1343		printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
1344		assert(0);
1345		break;
1346	}
1347
1348	if (new_state != HST_INVALID) {
1349		spin_lock_irqsave(&host->lock, flags);
1350		host->state = new_state;
1351		spin_unlock_irqrestore(&host->lock, flags);
1352	}
1353	if (reschedule)
1354		schedule_work(&host->fsm_task);
1355}
1356
1357static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
1358{
1359	unsigned int i;
1360
1361	for (i = 0; i < 50000; i++) {
1362		u32 tmp = readl(mmio + CARM_LMUC);
1363		udelay(100);
1364
1365		if (test_bit) {
1366			if ((tmp & bits) == bits)
1367				return 0;
1368		} else {
1369			if ((tmp & bits) == 0)
1370				return 0;
1371		}
1372
1373		cond_resched();
1374	}
1375
1376	printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
1377	       bits, test_bit ? "yes" : "no");
1378	return -EBUSY;
1379}
1380
1381static void carm_init_responses(struct carm_host *host)
1382{
1383	void __iomem *mmio = host->mmio;
1384	unsigned int i;
1385	struct carm_response *resp = (struct carm_response *) host->shm;
1386
1387	for (i = 0; i < RMSG_Q_LEN; i++)
1388		resp[i].status = cpu_to_le32(0xffffffff);
1389
1390	writel(0, mmio + CARM_RESP_IDX);
1391}
1392
1393static int carm_init_host(struct carm_host *host)
1394{
1395	void __iomem *mmio = host->mmio;
1396	u32 tmp;
1397	u8 tmp8;
1398	int rc;
1399
1400	DPRINTK("ENTER\n");
1401
1402	writel(0, mmio + CARM_INT_MASK);
1403
1404	tmp8 = readb(mmio + CARM_INITC);
1405	if (tmp8 & 0x01) {
1406		tmp8 &= ~0x01;
1407		writeb(tmp8, mmio + CARM_INITC);
1408		readb(mmio + CARM_INITC);	/* flush */
1409
1410		DPRINTK("snooze...\n");
1411		msleep(5000);
1412	}
1413
1414	tmp = readl(mmio + CARM_HMUC);
1415	if (tmp & CARM_CME) {
1416		DPRINTK("CME bit present, waiting\n");
1417		rc = carm_init_wait(mmio, CARM_CME, 1);
1418		if (rc) {
1419			DPRINTK("EXIT, carm_init_wait 1 failed\n");
1420			return rc;
1421		}
1422	}
1423	if (tmp & CARM_RME) {
1424		DPRINTK("RME bit present, waiting\n");
1425		rc = carm_init_wait(mmio, CARM_RME, 1);
1426		if (rc) {
1427			DPRINTK("EXIT, carm_init_wait 2 failed\n");
1428			return rc;
1429		}
1430	}
1431
1432	tmp &= ~(CARM_RME | CARM_CME);
1433	writel(tmp, mmio + CARM_HMUC);
1434	readl(mmio + CARM_HMUC);	/* flush */
1435
1436	rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
1437	if (rc) {
1438		DPRINTK("EXIT, carm_init_wait 3 failed\n");
1439		return rc;
1440	}
1441
1442	carm_init_buckets(mmio);
1443
1444	writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
1445	writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
1446	writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
1447
1448	tmp = readl(mmio + CARM_HMUC);
1449	tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
1450	writel(tmp, mmio + CARM_HMUC);
1451	readl(mmio + CARM_HMUC);	/* flush */
1452
1453	rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
1454	if (rc) {
1455		DPRINTK("EXIT, carm_init_wait 4 failed\n");
1456		return rc;
1457	}
1458
1459	writel(0, mmio + CARM_HMPHA);
1460	writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
1461
1462	carm_init_responses(host);
1463
1464	/* start initialization, probing state machine */
1465	spin_lock_irq(&host->lock);
1466	assert(host->state == HST_INVALID);
1467	host->state = HST_PROBE_START;
1468	spin_unlock_irq(&host->lock);
1469	schedule_work(&host->fsm_task);
1470
1471	DPRINTK("EXIT\n");
1472	return 0;
1473}
1474
1475static int carm_init_disks(struct carm_host *host)
1476{
1477	unsigned int i;
1478	int rc = 0;
1479
1480	for (i = 0; i < CARM_MAX_PORTS; i++) {
1481		struct gendisk *disk;
1482		struct request_queue *q;
1483		struct carm_port *port;
1484
1485		port = &host->port[i];
1486		port->host = host;
1487		port->port_no = i;
1488
1489		disk = alloc_disk(CARM_MINORS_PER_MAJOR);
1490		if (!disk) {
1491			rc = -ENOMEM;
1492			break;
1493		}
1494
1495		port->disk = disk;
1496		sprintf(disk->disk_name, DRV_NAME "/%u",
1497			(unsigned int) (host->id * CARM_MAX_PORTS) + i);
1498		disk->major = host->major;
1499		disk->first_minor = i * CARM_MINORS_PER_MAJOR;
1500		disk->fops = &carm_bd_ops;
1501		disk->private_data = port;
1502
1503		q = blk_init_queue(carm_rq_fn, &host->lock);
1504		if (!q) {
1505			rc = -ENOMEM;
1506			break;
1507		}
1508		disk->queue = q;
1509		blk_queue_max_segments(q, CARM_MAX_REQ_SG);
1510		blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
1511
1512		q->queuedata = port;
1513	}
1514
1515	return rc;
1516}
1517
1518static void carm_free_disks(struct carm_host *host)
1519{
1520	unsigned int i;
1521
1522	for (i = 0; i < CARM_MAX_PORTS; i++) {
1523		struct gendisk *disk = host->port[i].disk;
1524		if (disk) {
1525			struct request_queue *q = disk->queue;
1526
1527			if (disk->flags & GENHD_FL_UP)
1528				del_gendisk(disk);
1529			if (q)
1530				blk_cleanup_queue(q);
1531			put_disk(disk);
1532		}
1533	}
1534}
1535
1536static int carm_init_shm(struct carm_host *host)
1537{
1538	host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE,
1539					 &host->shm_dma);
1540	if (!host->shm)
1541		return -ENOMEM;
1542
1543	host->msg_base = host->shm + RBUF_LEN;
1544	host->msg_dma = host->shm_dma + RBUF_LEN;
1545
1546	memset(host->shm, 0xff, RBUF_LEN);
1547	memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
1548
1549	return 0;
1550}
1551
1552static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1553{
1554	struct carm_host *host;
1555	unsigned int pci_dac;
1556	int rc;
1557	struct request_queue *q;
1558	unsigned int i;
1559
1560	printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
1561
1562	rc = pci_enable_device(pdev);
1563	if (rc)
1564		return rc;
1565
1566	rc = pci_request_regions(pdev, DRV_NAME);
1567	if (rc)
1568		goto err_out;
1569
1570#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
1571	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1572	if (!rc) {
1573		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1574		if (rc) {
1575			printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
1576				pci_name(pdev));
1577			goto err_out_regions;
1578		}
1579		pci_dac = 1;
1580	} else {
1581#endif
1582		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1583		if (rc) {
1584			printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
1585				pci_name(pdev));
1586			goto err_out_regions;
1587		}
1588		pci_dac = 0;
1589#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
1590	}
1591#endif
1592
1593	host = kzalloc(sizeof(*host), GFP_KERNEL);
1594	if (!host) {
1595		printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
1596		       pci_name(pdev));
1597		rc = -ENOMEM;
1598		goto err_out_regions;
1599	}
1600
1601	host->pdev = pdev;
1602	host->flags = pci_dac ? FL_DAC : 0;
1603	spin_lock_init(&host->lock);
1604	INIT_WORK(&host->fsm_task, carm_fsm_task);
1605	init_completion(&host->probe_comp);
1606
1607	for (i = 0; i < ARRAY_SIZE(host->req); i++)
1608		host->req[i].tag = i;
1609
1610	host->mmio = ioremap(pci_resource_start(pdev, 0),
1611			     pci_resource_len(pdev, 0));
1612	if (!host->mmio) {
1613		printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
1614		       pci_name(pdev));
1615		rc = -ENOMEM;
1616		goto err_out_kfree;
1617	}
1618
1619	rc = carm_init_shm(host);
1620	if (rc) {
1621		printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
1622		       pci_name(pdev));
1623		goto err_out_iounmap;
1624	}
1625
1626	q = blk_init_queue(carm_oob_rq_fn, &host->lock);
1627	if (!q) {
1628		printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n",
1629		       pci_name(pdev));
1630		rc = -ENOMEM;
1631		goto err_out_pci_free;
1632	}
1633	host->oob_q = q;
1634	q->queuedata = host;
1635
1636	/*
1637	 * Figure out which major to use: 160, 161, or dynamic
1638	 */
1639	if (!test_and_set_bit(0, &carm_major_alloc))
1640		host->major = 160;
1641	else if (!test_and_set_bit(1, &carm_major_alloc))
1642		host->major = 161;
1643	else
1644		host->flags |= FL_DYN_MAJOR;
1645
1646	host->id = carm_host_id;
1647	sprintf(host->name, DRV_NAME "%d", carm_host_id);
1648
1649	rc = register_blkdev(host->major, host->name);
1650	if (rc < 0)
1651		goto err_out_free_majors;
1652	if (host->flags & FL_DYN_MAJOR)
1653		host->major = rc;
1654
1655	rc = carm_init_disks(host);
1656	if (rc)
1657		goto err_out_blkdev_disks;
1658
1659	pci_set_master(pdev);
1660
1661	rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host);
1662	if (rc) {
1663		printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
1664		       pci_name(pdev));
1665		goto err_out_blkdev_disks;
1666	}
1667
1668	rc = carm_init_host(host);
1669	if (rc)
1670		goto err_out_free_irq;
1671
1672	DPRINTK("waiting for probe_comp\n");
1673	wait_for_completion(&host->probe_comp);
1674
1675	printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
1676	       host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
1677	       (unsigned long long)pci_resource_start(pdev, 0),
1678		   pdev->irq, host->major);
1679
1680	carm_host_id++;
1681	pci_set_drvdata(pdev, host);
1682	return 0;
1683
1684err_out_free_irq:
1685	free_irq(pdev->irq, host);
1686err_out_blkdev_disks:
1687	carm_free_disks(host);
1688	unregister_blkdev(host->major, host->name);
1689err_out_free_majors:
1690	if (host->major == 160)
1691		clear_bit(0, &carm_major_alloc);
1692	else if (host->major == 161)
1693		clear_bit(1, &carm_major_alloc);
1694	blk_cleanup_queue(host->oob_q);
1695err_out_pci_free:
1696	pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
1697err_out_iounmap:
1698	iounmap(host->mmio);
1699err_out_kfree:
1700	kfree(host);
1701err_out_regions:
1702	pci_release_regions(pdev);
1703err_out:
1704	pci_disable_device(pdev);
1705	return rc;
1706}
1707
1708static void carm_remove_one (struct pci_dev *pdev)
1709{
1710	struct carm_host *host = pci_get_drvdata(pdev);
1711
1712	if (!host) {
1713		printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
1714		       pci_name(pdev));
1715		return;
1716	}
1717
1718	free_irq(pdev->irq, host);
1719	carm_free_disks(host);
1720	unregister_blkdev(host->major, host->name);
1721	if (host->major == 160)
1722		clear_bit(0, &carm_major_alloc);
1723	else if (host->major == 161)
1724		clear_bit(1, &carm_major_alloc);
1725	blk_cleanup_queue(host->oob_q);
1726	pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
1727	iounmap(host->mmio);
1728	kfree(host);
1729	pci_release_regions(pdev);
1730	pci_disable_device(pdev);
1731	pci_set_drvdata(pdev, NULL);
1732}
1733
1734static int __init carm_init(void)
1735{
1736	return pci_register_driver(&carm_driver);
1737}
1738
1739static void __exit carm_exit(void)
1740{
1741	pci_unregister_driver(&carm_driver);
1742}
1743
1744module_init(carm_init);
1745module_exit(carm_exit);
1746