• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/mvsas/
1/*
2 * Marvell 88SE64xx/88SE94xx pci init
3 *
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
12 * License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23*/
24
25
26#include "mv_sas.h"
27
28static struct scsi_transport_template *mvs_stt;
29static const struct mvs_chip_info mvs_chips[] = {
30	[chip_6320] =	{ 1, 2, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
31	[chip_6440] =	{ 1, 4, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
32	[chip_6485] =	{ 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
33	[chip_9180] =	{ 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
34	[chip_9480] =	{ 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
35	[chip_1300] =	{ 1, 4, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
36	[chip_1320] =	{ 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
37};
38
39#define SOC_SAS_NUM 2
40#define SG_MX 64
41
42static struct scsi_host_template mvs_sht = {
43	.module			= THIS_MODULE,
44	.name			= DRV_NAME,
45	.queuecommand		= sas_queuecommand,
46	.target_alloc		= sas_target_alloc,
47	.slave_configure	= mvs_slave_configure,
48	.slave_destroy		= sas_slave_destroy,
49	.scan_finished		= mvs_scan_finished,
50	.scan_start		= mvs_scan_start,
51	.change_queue_depth	= sas_change_queue_depth,
52	.change_queue_type	= sas_change_queue_type,
53	.bios_param		= sas_bios_param,
54	.can_queue		= 1,
55	.cmd_per_lun		= 1,
56	.this_id		= -1,
57	.sg_tablesize		= SG_MX,
58	.max_sectors		= SCSI_DEFAULT_MAX_SECTORS,
59	.use_clustering		= ENABLE_CLUSTERING,
60	.eh_device_reset_handler = sas_eh_device_reset_handler,
61	.eh_bus_reset_handler	= sas_eh_bus_reset_handler,
62	.slave_alloc		= mvs_slave_alloc,
63	.target_destroy		= sas_target_destroy,
64	.ioctl			= sas_ioctl,
65};
66
67static struct sas_domain_function_template mvs_transport_ops = {
68	.lldd_dev_found 	= mvs_dev_found,
69	.lldd_dev_gone		= mvs_dev_gone,
70	.lldd_execute_task	= mvs_queue_command,
71	.lldd_control_phy	= mvs_phy_control,
72
73	.lldd_abort_task	= mvs_abort_task,
74	.lldd_abort_task_set    = mvs_abort_task_set,
75	.lldd_clear_aca         = mvs_clear_aca,
76	.lldd_clear_task_set    = mvs_clear_task_set,
77	.lldd_I_T_nexus_reset	= mvs_I_T_nexus_reset,
78	.lldd_lu_reset 		= mvs_lu_reset,
79	.lldd_query_task	= mvs_query_task,
80	.lldd_port_formed	= mvs_port_formed,
81	.lldd_port_deformed     = mvs_port_deformed,
82
83};
84
85static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
86{
87	struct mvs_phy *phy = &mvi->phy[phy_id];
88	struct asd_sas_phy *sas_phy = &phy->sas_phy;
89
90	phy->mvi = mvi;
91	init_timer(&phy->timer);
92	sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
93	sas_phy->class = SAS;
94	sas_phy->iproto = SAS_PROTOCOL_ALL;
95	sas_phy->tproto = 0;
96	sas_phy->type = PHY_TYPE_PHYSICAL;
97	sas_phy->role = PHY_ROLE_INITIATOR;
98	sas_phy->oob_mode = OOB_NOT_CONNECTED;
99	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
100
101	sas_phy->id = phy_id;
102	sas_phy->sas_addr = &mvi->sas_addr[0];
103	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
104	sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
105	sas_phy->lldd_phy = phy;
106}
107
108static void mvs_free(struct mvs_info *mvi)
109{
110	int i;
111	struct mvs_wq *mwq;
112	int slot_nr;
113
114	if (!mvi)
115		return;
116
117	if (mvi->flags & MVF_FLAG_SOC)
118		slot_nr = MVS_SOC_SLOTS;
119	else
120		slot_nr = MVS_SLOTS;
121
122	for (i = 0; i < mvi->tags_num; i++) {
123		struct mvs_slot_info *slot = &mvi->slot_info[i];
124		if (slot->buf)
125			dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
126					  slot->buf, slot->buf_dma);
127	}
128
129	if (mvi->tx)
130		dma_free_coherent(mvi->dev,
131				  sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
132				  mvi->tx, mvi->tx_dma);
133	if (mvi->rx_fis)
134		dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
135				  mvi->rx_fis, mvi->rx_fis_dma);
136	if (mvi->rx)
137		dma_free_coherent(mvi->dev,
138				  sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
139				  mvi->rx, mvi->rx_dma);
140	if (mvi->slot)
141		dma_free_coherent(mvi->dev,
142				  sizeof(*mvi->slot) * slot_nr,
143				  mvi->slot, mvi->slot_dma);
144#ifndef DISABLE_HOTPLUG_DMA_FIX
145	if (mvi->bulk_buffer)
146		dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
147				  mvi->bulk_buffer, mvi->bulk_buffer_dma);
148#endif
149
150	MVS_CHIP_DISP->chip_iounmap(mvi);
151	if (mvi->shost)
152		scsi_host_put(mvi->shost);
153	list_for_each_entry(mwq, &mvi->wq_list, entry)
154		cancel_delayed_work(&mwq->work_q);
155	kfree(mvi);
156}
157
158#ifdef MVS_USE_TASKLET
159struct tasklet_struct	mv_tasklet;
160static void mvs_tasklet(unsigned long opaque)
161{
162	unsigned long flags;
163	u32 stat;
164	u16 core_nr, i = 0;
165
166	struct mvs_info *mvi;
167	struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
168
169	core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
170	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
171
172	if (unlikely(!mvi))
173		BUG_ON(1);
174
175	for (i = 0; i < core_nr; i++) {
176		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
177		stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
178		if (stat)
179			MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
180	}
181
182}
183#endif
184
185static irqreturn_t mvs_interrupt(int irq, void *opaque)
186{
187	u32 core_nr, i = 0;
188	u32 stat;
189	struct mvs_info *mvi;
190	struct sas_ha_struct *sha = opaque;
191
192	core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
193	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
194
195	if (unlikely(!mvi))
196		return IRQ_NONE;
197
198	stat = MVS_CHIP_DISP->isr_status(mvi, irq);
199	if (!stat)
200		return IRQ_NONE;
201
202#ifdef MVS_USE_TASKLET
203	tasklet_schedule(&mv_tasklet);
204#else
205	for (i = 0; i < core_nr; i++) {
206		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
207		MVS_CHIP_DISP->isr(mvi, irq, stat);
208	}
209#endif
210	return IRQ_HANDLED;
211}
212
213static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
214{
215	int i = 0, slot_nr;
216
217	if (mvi->flags & MVF_FLAG_SOC)
218		slot_nr = MVS_SOC_SLOTS;
219	else
220		slot_nr = MVS_SLOTS;
221
222	spin_lock_init(&mvi->lock);
223	for (i = 0; i < mvi->chip->n_phy; i++) {
224		mvs_phy_init(mvi, i);
225		mvi->port[i].wide_port_phymap = 0;
226		mvi->port[i].port_attached = 0;
227		INIT_LIST_HEAD(&mvi->port[i].list);
228	}
229	for (i = 0; i < MVS_MAX_DEVICES; i++) {
230		mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
231		mvi->devices[i].dev_type = NO_DEVICE;
232		mvi->devices[i].device_id = i;
233		mvi->devices[i].dev_status = MVS_DEV_NORMAL;
234		init_timer(&mvi->devices[i].timer);
235	}
236
237	/*
238	 * alloc and init our DMA areas
239	 */
240	mvi->tx = dma_alloc_coherent(mvi->dev,
241				     sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
242				     &mvi->tx_dma, GFP_KERNEL);
243	if (!mvi->tx)
244		goto err_out;
245	memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
246	mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
247					 &mvi->rx_fis_dma, GFP_KERNEL);
248	if (!mvi->rx_fis)
249		goto err_out;
250	memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
251
252	mvi->rx = dma_alloc_coherent(mvi->dev,
253				     sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
254				     &mvi->rx_dma, GFP_KERNEL);
255	if (!mvi->rx)
256		goto err_out;
257	memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
258	mvi->rx[0] = cpu_to_le32(0xfff);
259	mvi->rx_cons = 0xfff;
260
261	mvi->slot = dma_alloc_coherent(mvi->dev,
262				       sizeof(*mvi->slot) * slot_nr,
263				       &mvi->slot_dma, GFP_KERNEL);
264	if (!mvi->slot)
265		goto err_out;
266	memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
267
268#ifndef DISABLE_HOTPLUG_DMA_FIX
269	mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
270				       TRASH_BUCKET_SIZE,
271				       &mvi->bulk_buffer_dma, GFP_KERNEL);
272	if (!mvi->bulk_buffer)
273		goto err_out;
274#endif
275	for (i = 0; i < slot_nr; i++) {
276		struct mvs_slot_info *slot = &mvi->slot_info[i];
277
278		slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
279					       &slot->buf_dma, GFP_KERNEL);
280		if (!slot->buf) {
281			printk(KERN_DEBUG"failed to allocate slot->buf.\n");
282			goto err_out;
283		}
284		memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
285		++mvi->tags_num;
286	}
287	/* Initialize tags */
288	mvs_tag_init(mvi);
289	return 0;
290err_out:
291	return 1;
292}
293
294
295int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
296{
297	unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
298	struct pci_dev *pdev = mvi->pdev;
299	if (bar_ex != -1) {
300		/*
301		 * ioremap main and peripheral registers
302		 */
303		res_start = pci_resource_start(pdev, bar_ex);
304		res_len = pci_resource_len(pdev, bar_ex);
305		if (!res_start || !res_len)
306			goto err_out;
307
308		res_flag_ex = pci_resource_flags(pdev, bar_ex);
309		if (res_flag_ex & IORESOURCE_MEM) {
310			if (res_flag_ex & IORESOURCE_CACHEABLE)
311				mvi->regs_ex = ioremap(res_start, res_len);
312			else
313				mvi->regs_ex = ioremap_nocache(res_start,
314						res_len);
315		} else
316			mvi->regs_ex = (void *)res_start;
317		if (!mvi->regs_ex)
318			goto err_out;
319	}
320
321	res_start = pci_resource_start(pdev, bar);
322	res_len = pci_resource_len(pdev, bar);
323	if (!res_start || !res_len)
324		goto err_out;
325
326	res_flag = pci_resource_flags(pdev, bar);
327	if (res_flag & IORESOURCE_CACHEABLE)
328		mvi->regs = ioremap(res_start, res_len);
329	else
330		mvi->regs = ioremap_nocache(res_start, res_len);
331
332	if (!mvi->regs) {
333		if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
334			iounmap(mvi->regs_ex);
335		mvi->regs_ex = NULL;
336		goto err_out;
337	}
338
339	return 0;
340err_out:
341	return -1;
342}
343
344void mvs_iounmap(void __iomem *regs)
345{
346	iounmap(regs);
347}
348
349static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
350				const struct pci_device_id *ent,
351				struct Scsi_Host *shost, unsigned int id)
352{
353	struct mvs_info *mvi;
354	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
355
356	mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
357			GFP_KERNEL);
358	if (!mvi)
359		return NULL;
360
361	mvi->pdev = pdev;
362	mvi->dev = &pdev->dev;
363	mvi->chip_id = ent->driver_data;
364	mvi->chip = &mvs_chips[mvi->chip_id];
365	INIT_LIST_HEAD(&mvi->wq_list);
366	mvi->irq = pdev->irq;
367
368	((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
369	((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
370
371	mvi->id = id;
372	mvi->sas = sha;
373	mvi->shost = shost;
374#ifdef MVS_USE_TASKLET
375	tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
376#endif
377
378	if (MVS_CHIP_DISP->chip_ioremap(mvi))
379		goto err_out;
380	if (!mvs_alloc(mvi, shost))
381		return mvi;
382err_out:
383	mvs_free(mvi);
384	return NULL;
385}
386
387/* move to PCI layer or libata core? */
388static int pci_go_64(struct pci_dev *pdev)
389{
390	int rc;
391
392	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
393		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
394		if (rc) {
395			rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
396			if (rc) {
397				dev_printk(KERN_ERR, &pdev->dev,
398					   "64-bit DMA enable failed\n");
399				return rc;
400			}
401		}
402	} else {
403		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
404		if (rc) {
405			dev_printk(KERN_ERR, &pdev->dev,
406				   "32-bit DMA enable failed\n");
407			return rc;
408		}
409		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
410		if (rc) {
411			dev_printk(KERN_ERR, &pdev->dev,
412				   "32-bit consistent DMA enable failed\n");
413			return rc;
414		}
415	}
416
417	return rc;
418}
419
420static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
421				const struct mvs_chip_info *chip_info)
422{
423	int phy_nr, port_nr; unsigned short core_nr;
424	struct asd_sas_phy **arr_phy;
425	struct asd_sas_port **arr_port;
426	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
427
428	core_nr = chip_info->n_host;
429	phy_nr  = core_nr * chip_info->n_phy;
430	port_nr = phy_nr;
431
432	memset(sha, 0x00, sizeof(struct sas_ha_struct));
433	arr_phy  = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
434	arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
435	if (!arr_phy || !arr_port)
436		goto exit_free;
437
438	sha->sas_phy = arr_phy;
439	sha->sas_port = arr_port;
440	sha->core.shost = shost;
441
442	sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
443	if (!sha->lldd_ha)
444		goto exit_free;
445
446	((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
447
448	shost->transportt = mvs_stt;
449	shost->max_id = 128;
450	shost->max_lun = ~0;
451	shost->max_channel = 1;
452	shost->max_cmd_len = 16;
453
454	return 0;
455exit_free:
456	kfree(arr_phy);
457	kfree(arr_port);
458	return -1;
459
460}
461
462static void  __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
463			const struct mvs_chip_info *chip_info)
464{
465	int can_queue, i = 0, j = 0;
466	struct mvs_info *mvi = NULL;
467	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
468	unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
469
470	for (j = 0; j < nr_core; j++) {
471		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
472		for (i = 0; i < chip_info->n_phy; i++) {
473			sha->sas_phy[j * chip_info->n_phy  + i] =
474				&mvi->phy[i].sas_phy;
475			sha->sas_port[j * chip_info->n_phy + i] =
476				&mvi->port[i].sas_port;
477		}
478	}
479
480	sha->sas_ha_name = DRV_NAME;
481	sha->dev = mvi->dev;
482	sha->lldd_module = THIS_MODULE;
483	sha->sas_addr = &mvi->sas_addr[0];
484
485	sha->num_phys = nr_core * chip_info->n_phy;
486
487	sha->lldd_max_execute_num = 1;
488
489	if (mvi->flags & MVF_FLAG_SOC)
490		can_queue = MVS_SOC_CAN_QUEUE;
491	else
492		can_queue = MVS_CAN_QUEUE;
493
494	sha->lldd_queue_size = can_queue;
495	shost->can_queue = can_queue;
496	mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
497	sha->core.shost = mvi->shost;
498}
499
500static void mvs_init_sas_add(struct mvs_info *mvi)
501{
502	u8 i;
503	for (i = 0; i < mvi->chip->n_phy; i++) {
504		mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
505		mvi->phy[i].dev_sas_addr =
506			cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
507	}
508
509	memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
510}
511
512static int __devinit mvs_pci_init(struct pci_dev *pdev,
513				  const struct pci_device_id *ent)
514{
515	unsigned int rc, nhost = 0;
516	struct mvs_info *mvi;
517	irq_handler_t irq_handler = mvs_interrupt;
518	struct Scsi_Host *shost = NULL;
519	const struct mvs_chip_info *chip;
520
521	dev_printk(KERN_INFO, &pdev->dev,
522		"mvsas: driver version %s\n", DRV_VERSION);
523	rc = pci_enable_device(pdev);
524	if (rc)
525		goto err_out_enable;
526
527	pci_set_master(pdev);
528
529	rc = pci_request_regions(pdev, DRV_NAME);
530	if (rc)
531		goto err_out_disable;
532
533	rc = pci_go_64(pdev);
534	if (rc)
535		goto err_out_regions;
536
537	shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
538	if (!shost) {
539		rc = -ENOMEM;
540		goto err_out_regions;
541	}
542
543	chip = &mvs_chips[ent->driver_data];
544	SHOST_TO_SAS_HA(shost) =
545		kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
546	if (!SHOST_TO_SAS_HA(shost)) {
547		kfree(shost);
548		rc = -ENOMEM;
549		goto err_out_regions;
550	}
551
552	rc = mvs_prep_sas_ha_init(shost, chip);
553	if (rc) {
554		kfree(shost);
555		rc = -ENOMEM;
556		goto err_out_regions;
557	}
558
559	pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
560
561	do {
562		mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
563		if (!mvi) {
564			rc = -ENOMEM;
565			goto err_out_regions;
566		}
567
568		mvs_init_sas_add(mvi);
569
570		mvi->instance = nhost;
571		rc = MVS_CHIP_DISP->chip_init(mvi);
572		if (rc) {
573			mvs_free(mvi);
574			goto err_out_regions;
575		}
576		nhost++;
577	} while (nhost < chip->n_host);
578#ifdef MVS_USE_TASKLET
579	tasklet_init(&mv_tasklet, mvs_tasklet,
580		     (unsigned long)SHOST_TO_SAS_HA(shost));
581#endif
582
583	mvs_post_sas_ha_init(shost, chip);
584
585	rc = scsi_add_host(shost, &pdev->dev);
586	if (rc)
587		goto err_out_shost;
588
589	rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
590	if (rc)
591		goto err_out_shost;
592	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
593		DRV_NAME, SHOST_TO_SAS_HA(shost));
594	if (rc)
595		goto err_not_sas;
596
597	MVS_CHIP_DISP->interrupt_enable(mvi);
598
599	scsi_scan_host(mvi->shost);
600
601	return 0;
602
603err_not_sas:
604	sas_unregister_ha(SHOST_TO_SAS_HA(shost));
605err_out_shost:
606	scsi_remove_host(mvi->shost);
607err_out_regions:
608	pci_release_regions(pdev);
609err_out_disable:
610	pci_disable_device(pdev);
611err_out_enable:
612	return rc;
613}
614
615static void __devexit mvs_pci_remove(struct pci_dev *pdev)
616{
617	unsigned short core_nr, i = 0;
618	struct sas_ha_struct *sha = pci_get_drvdata(pdev);
619	struct mvs_info *mvi = NULL;
620
621	core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
622	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
623
624#ifdef MVS_USE_TASKLET
625	tasklet_kill(&mv_tasklet);
626#endif
627
628	pci_set_drvdata(pdev, NULL);
629	sas_unregister_ha(sha);
630	sas_remove_host(mvi->shost);
631	scsi_remove_host(mvi->shost);
632
633	MVS_CHIP_DISP->interrupt_disable(mvi);
634	free_irq(mvi->irq, sha);
635	for (i = 0; i < core_nr; i++) {
636		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
637		mvs_free(mvi);
638	}
639	kfree(sha->sas_phy);
640	kfree(sha->sas_port);
641	kfree(sha);
642	pci_release_regions(pdev);
643	pci_disable_device(pdev);
644	return;
645}
646
647static struct pci_device_id __devinitdata mvs_pci_table[] = {
648	{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
649	{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
650	{
651		.vendor 	= PCI_VENDOR_ID_MARVELL,
652		.device 	= 0x6440,
653		.subvendor	= PCI_ANY_ID,
654		.subdevice	= 0x6480,
655		.class		= 0,
656		.class_mask	= 0,
657		.driver_data	= chip_6485,
658	},
659	{ PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
660	{ PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
661	{ PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
662	{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
663	{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
664	{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
665	{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
666
667	{ }	/* terminate list */
668};
669
670static struct pci_driver mvs_pci_driver = {
671	.name		= DRV_NAME,
672	.id_table	= mvs_pci_table,
673	.probe		= mvs_pci_init,
674	.remove		= __devexit_p(mvs_pci_remove),
675};
676
677/* task handler */
678struct task_struct *mvs_th;
679static int __init mvs_init(void)
680{
681	int rc;
682	mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
683	if (!mvs_stt)
684		return -ENOMEM;
685
686	rc = pci_register_driver(&mvs_pci_driver);
687
688	if (rc)
689		goto err_out;
690
691	return 0;
692
693err_out:
694	sas_release_transport(mvs_stt);
695	return rc;
696}
697
698static void __exit mvs_exit(void)
699{
700	pci_unregister_driver(&mvs_pci_driver);
701	sas_release_transport(mvs_stt);
702}
703
704module_init(mvs_init);
705module_exit(mvs_exit);
706
707MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
708MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
709MODULE_VERSION(DRV_VERSION);
710MODULE_LICENSE("GPL");
711#ifdef CONFIG_PCI
712MODULE_DEVICE_TABLE(pci, mvs_pci_table);
713#endif
714