• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/scsi/
1/*
2 * Linux driver for VMware's para-virtualized SCSI HBA.
3 *
4 * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT.  See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Maintained by: Alok N Kataria <akataria@vmware.com>
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/interrupt.h>
27#include <linux/slab.h>
28#include <linux/workqueue.h>
29#include <linux/pci.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_device.h>
35
36#include "vmw_pvscsi.h"
37
38#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
39
40MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
41MODULE_AUTHOR("VMware, Inc.");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
44
45#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING	8
46#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING	1
47#define PVSCSI_DEFAULT_QUEUE_DEPTH		64
48#define SGL_SIZE				PAGE_SIZE
49
50struct pvscsi_sg_list {
51	struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
52};
53
54struct pvscsi_ctx {
55	/*
56	 * The index of the context in cmd_map serves as the context ID for a
57	 * 1-to-1 mapping completions back to requests.
58	 */
59	struct scsi_cmnd	*cmd;
60	struct pvscsi_sg_list	*sgl;
61	struct list_head	list;
62	dma_addr_t		dataPA;
63	dma_addr_t		sensePA;
64	dma_addr_t		sglPA;
65};
66
67struct pvscsi_adapter {
68	char				*mmioBase;
69	unsigned int			irq;
70	u8				rev;
71	bool				use_msi;
72	bool				use_msix;
73	bool				use_msg;
74
75	spinlock_t			hw_lock;
76
77	struct workqueue_struct		*workqueue;
78	struct work_struct		work;
79
80	struct PVSCSIRingReqDesc	*req_ring;
81	unsigned			req_pages;
82	unsigned			req_depth;
83	dma_addr_t			reqRingPA;
84
85	struct PVSCSIRingCmpDesc	*cmp_ring;
86	unsigned			cmp_pages;
87	dma_addr_t			cmpRingPA;
88
89	struct PVSCSIRingMsgDesc	*msg_ring;
90	unsigned			msg_pages;
91	dma_addr_t			msgRingPA;
92
93	struct PVSCSIRingsState		*rings_state;
94	dma_addr_t			ringStatePA;
95
96	struct pci_dev			*dev;
97	struct Scsi_Host		*host;
98
99	struct list_head		cmd_pool;
100	struct pvscsi_ctx		*cmd_map;
101};
102
103
104/* Command line parameters */
105static int pvscsi_ring_pages     = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
106static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
107static int pvscsi_cmd_per_lun    = PVSCSI_DEFAULT_QUEUE_DEPTH;
108static bool pvscsi_disable_msi;
109static bool pvscsi_disable_msix;
110static bool pvscsi_use_msg       = true;
111
112#define PVSCSI_RW (S_IRUSR | S_IWUSR)
113
114module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
115MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
116		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
117
118module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
119MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
120		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
121
122module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
123MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
124		 __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
125
126module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
127MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
128
129module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
130MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
131
132module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
133MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
134
135static const struct pci_device_id pvscsi_pci_tbl[] = {
136	{ PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
137	{ 0 }
138};
139
140MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
141
142static struct device *
143pvscsi_dev(const struct pvscsi_adapter *adapter)
144{
145	return &(adapter->dev->dev);
146}
147
148static struct pvscsi_ctx *
149pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
150{
151	struct pvscsi_ctx *ctx, *end;
152
153	end = &adapter->cmd_map[adapter->req_depth];
154	for (ctx = adapter->cmd_map; ctx < end; ctx++)
155		if (ctx->cmd == cmd)
156			return ctx;
157
158	return NULL;
159}
160
161static struct pvscsi_ctx *
162pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
163{
164	struct pvscsi_ctx *ctx;
165
166	if (list_empty(&adapter->cmd_pool))
167		return NULL;
168
169	ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
170	ctx->cmd = cmd;
171	list_del(&ctx->list);
172
173	return ctx;
174}
175
176static void pvscsi_release_context(struct pvscsi_adapter *adapter,
177				   struct pvscsi_ctx *ctx)
178{
179	ctx->cmd = NULL;
180	list_add(&ctx->list, &adapter->cmd_pool);
181}
182
183/*
184 * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
185 * non-zero integer. ctx always points to an entry in cmd_map array, hence
186 * the return value is always >=1.
187 */
188static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
189			      const struct pvscsi_ctx *ctx)
190{
191	return ctx - adapter->cmd_map + 1;
192}
193
194static struct pvscsi_ctx *
195pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
196{
197	return &adapter->cmd_map[context - 1];
198}
199
200static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
201			     u32 offset, u32 val)
202{
203	writel(val, adapter->mmioBase + offset);
204}
205
206static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
207{
208	return readl(adapter->mmioBase + offset);
209}
210
211static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
212{
213	return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
214}
215
216static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
217				     u32 val)
218{
219	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
220}
221
222static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
223{
224	u32 intr_bits;
225
226	intr_bits = PVSCSI_INTR_CMPL_MASK;
227	if (adapter->use_msg)
228		intr_bits |= PVSCSI_INTR_MSG_MASK;
229
230	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
231}
232
233static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
234{
235	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
236}
237
238static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
239				  u32 cmd, const void *desc, size_t len)
240{
241	const u32 *ptr = desc;
242	size_t i;
243
244	len /= sizeof(*ptr);
245	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
246	for (i = 0; i < len; i++)
247		pvscsi_reg_write(adapter,
248				 PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
249}
250
251static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
252			     const struct pvscsi_ctx *ctx)
253{
254	struct PVSCSICmdDescAbortCmd cmd = { 0 };
255
256	cmd.target = ctx->cmd->device->id;
257	cmd.context = pvscsi_map_context(adapter, ctx);
258
259	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
260}
261
262static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
263{
264	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
265}
266
267static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
268{
269	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
270}
271
272static int scsi_is_rw(unsigned char op)
273{
274	return op == READ_6  || op == WRITE_6 ||
275	       op == READ_10 || op == WRITE_10 ||
276	       op == READ_12 || op == WRITE_12 ||
277	       op == READ_16 || op == WRITE_16;
278}
279
280static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
281			   unsigned char op)
282{
283	if (scsi_is_rw(op))
284		pvscsi_kick_rw_io(adapter);
285	else
286		pvscsi_process_request_ring(adapter);
287}
288
289static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
290{
291	dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
292
293	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
294}
295
296static void ll_bus_reset(const struct pvscsi_adapter *adapter)
297{
298	dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
299
300	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
301}
302
303static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
304{
305	struct PVSCSICmdDescResetDevice cmd = { 0 };
306
307	dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
308
309	cmd.target = target;
310
311	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
312			      &cmd, sizeof(cmd));
313}
314
315static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
316			     struct scatterlist *sg, unsigned count)
317{
318	unsigned i;
319	struct PVSCSISGElement *sge;
320
321	BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
322
323	sge = &ctx->sgl->sge[0];
324	for (i = 0; i < count; i++, sg++) {
325		sge[i].addr   = sg_dma_address(sg);
326		sge[i].length = sg_dma_len(sg);
327		sge[i].flags  = 0;
328	}
329}
330
331/*
332 * Map all data buffers for a command into PCI space and
333 * setup the scatter/gather list if needed.
334 */
335static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
336			       struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
337			       struct PVSCSIRingReqDesc *e)
338{
339	unsigned count;
340	unsigned bufflen = scsi_bufflen(cmd);
341	struct scatterlist *sg;
342
343	e->dataLen = bufflen;
344	e->dataAddr = 0;
345	if (bufflen == 0)
346		return;
347
348	sg = scsi_sglist(cmd);
349	count = scsi_sg_count(cmd);
350	if (count != 0) {
351		int segs = scsi_dma_map(cmd);
352		if (segs > 1) {
353			pvscsi_create_sg(ctx, sg, segs);
354
355			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
356			ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
357						    SGL_SIZE, PCI_DMA_TODEVICE);
358			e->dataAddr = ctx->sglPA;
359		} else
360			e->dataAddr = sg_dma_address(sg);
361	} else {
362		/*
363		 * In case there is no S/G list, scsi_sglist points
364		 * directly to the buffer.
365		 */
366		ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
367					     cmd->sc_data_direction);
368		e->dataAddr = ctx->dataPA;
369	}
370}
371
372static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
373				 struct pvscsi_ctx *ctx)
374{
375	struct scsi_cmnd *cmd;
376	unsigned bufflen;
377
378	cmd = ctx->cmd;
379	bufflen = scsi_bufflen(cmd);
380
381	if (bufflen != 0) {
382		unsigned count = scsi_sg_count(cmd);
383
384		if (count != 0) {
385			scsi_dma_unmap(cmd);
386			if (ctx->sglPA) {
387				pci_unmap_single(adapter->dev, ctx->sglPA,
388						 SGL_SIZE, PCI_DMA_TODEVICE);
389				ctx->sglPA = 0;
390			}
391		} else
392			pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
393					 cmd->sc_data_direction);
394	}
395	if (cmd->sense_buffer)
396		pci_unmap_single(adapter->dev, ctx->sensePA,
397				 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
398}
399
400static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
401{
402	adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
403						    &adapter->ringStatePA);
404	if (!adapter->rings_state)
405		return -ENOMEM;
406
407	adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
408				 pvscsi_ring_pages);
409	adapter->req_depth = adapter->req_pages
410					* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
411	adapter->req_ring = pci_alloc_consistent(adapter->dev,
412						 adapter->req_pages * PAGE_SIZE,
413						 &adapter->reqRingPA);
414	if (!adapter->req_ring)
415		return -ENOMEM;
416
417	adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
418				 pvscsi_ring_pages);
419	adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
420						 adapter->cmp_pages * PAGE_SIZE,
421						 &adapter->cmpRingPA);
422	if (!adapter->cmp_ring)
423		return -ENOMEM;
424
425	BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
426	BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
427	BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
428
429	if (!adapter->use_msg)
430		return 0;
431
432	adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
433				 pvscsi_msg_ring_pages);
434	adapter->msg_ring = pci_alloc_consistent(adapter->dev,
435						 adapter->msg_pages * PAGE_SIZE,
436						 &adapter->msgRingPA);
437	if (!adapter->msg_ring)
438		return -ENOMEM;
439	BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
440
441	return 0;
442}
443
444static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
445{
446	struct PVSCSICmdDescSetupRings cmd = { 0 };
447	dma_addr_t base;
448	unsigned i;
449
450	cmd.ringsStatePPN   = adapter->ringStatePA >> PAGE_SHIFT;
451	cmd.reqRingNumPages = adapter->req_pages;
452	cmd.cmpRingNumPages = adapter->cmp_pages;
453
454	base = adapter->reqRingPA;
455	for (i = 0; i < adapter->req_pages; i++) {
456		cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
457		base += PAGE_SIZE;
458	}
459
460	base = adapter->cmpRingPA;
461	for (i = 0; i < adapter->cmp_pages; i++) {
462		cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
463		base += PAGE_SIZE;
464	}
465
466	memset(adapter->rings_state, 0, PAGE_SIZE);
467	memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
468	memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
469
470	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
471			      &cmd, sizeof(cmd));
472
473	if (adapter->use_msg) {
474		struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
475
476		cmd_msg.numPages = adapter->msg_pages;
477
478		base = adapter->msgRingPA;
479		for (i = 0; i < adapter->msg_pages; i++) {
480			cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
481			base += PAGE_SIZE;
482		}
483		memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
484
485		pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
486				      &cmd_msg, sizeof(cmd_msg));
487	}
488}
489
490/*
491 * Pull a completion descriptor off and pass the completion back
492 * to the SCSI mid layer.
493 */
494static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
495				    const struct PVSCSIRingCmpDesc *e)
496{
497	struct pvscsi_ctx *ctx;
498	struct scsi_cmnd *cmd;
499	u32 btstat = e->hostStatus;
500	u32 sdstat = e->scsiStatus;
501
502	ctx = pvscsi_get_context(adapter, e->context);
503	cmd = ctx->cmd;
504	pvscsi_unmap_buffers(adapter, ctx);
505	pvscsi_release_context(adapter, ctx);
506	cmd->result = 0;
507
508	if (sdstat != SAM_STAT_GOOD &&
509	    (btstat == BTSTAT_SUCCESS ||
510	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
511	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
512		cmd->result = (DID_OK << 16) | sdstat;
513		if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
514			cmd->result |= (DRIVER_SENSE << 24);
515	} else
516		switch (btstat) {
517		case BTSTAT_SUCCESS:
518		case BTSTAT_LINKED_COMMAND_COMPLETED:
519		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
520			/* If everything went fine, let's move on..  */
521			cmd->result = (DID_OK << 16);
522			break;
523
524		case BTSTAT_DATARUN:
525		case BTSTAT_DATA_UNDERRUN:
526			/* Report residual data in underruns */
527			scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
528			cmd->result = (DID_ERROR << 16);
529			break;
530
531		case BTSTAT_SELTIMEO:
532			/* Our emulation returns this for non-connected devs */
533			cmd->result = (DID_BAD_TARGET << 16);
534			break;
535
536		case BTSTAT_LUNMISMATCH:
537		case BTSTAT_TAGREJECT:
538		case BTSTAT_BADMSG:
539			cmd->result = (DRIVER_INVALID << 24);
540			/* fall through */
541
542		case BTSTAT_HAHARDWARE:
543		case BTSTAT_INVPHASE:
544		case BTSTAT_HATIMEOUT:
545		case BTSTAT_NORESPONSE:
546		case BTSTAT_DISCONNECT:
547		case BTSTAT_HASOFTWARE:
548		case BTSTAT_BUSFREE:
549		case BTSTAT_SENSFAILED:
550			cmd->result |= (DID_ERROR << 16);
551			break;
552
553		case BTSTAT_SENTRST:
554		case BTSTAT_RECVRST:
555		case BTSTAT_BUSRESET:
556			cmd->result = (DID_RESET << 16);
557			break;
558
559		case BTSTAT_ABORTQUEUE:
560			cmd->result = (DID_ABORT << 16);
561			break;
562
563		case BTSTAT_SCSIPARITY:
564			cmd->result = (DID_PARITY << 16);
565			break;
566
567		default:
568			cmd->result = (DID_ERROR << 16);
569			scmd_printk(KERN_DEBUG, cmd,
570				    "Unknown completion status: 0x%x\n",
571				    btstat);
572	}
573
574	dev_dbg(&cmd->device->sdev_gendev,
575		"cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
576		cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
577
578	cmd->scsi_done(cmd);
579}
580
581/*
582 * barrier usage : Since the PVSCSI device is emulated, there could be cases
583 * where we may want to serialize some accesses between the driver and the
584 * emulation layer. We use compiler barriers instead of the more expensive
585 * memory barriers because PVSCSI is only supported on X86 which has strong
586 * memory access ordering.
587 */
588static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
589{
590	struct PVSCSIRingsState *s = adapter->rings_state;
591	struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
592	u32 cmp_entries = s->cmpNumEntriesLog2;
593
594	while (s->cmpConsIdx != s->cmpProdIdx) {
595		struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
596						      MASK(cmp_entries));
597		/*
598		 * This barrier() ensures that *e is not dereferenced while
599		 * the device emulation still writes data into the slot.
600		 * Since the device emulation advances s->cmpProdIdx only after
601		 * updating the slot we want to check it first.
602		 */
603		barrier();
604		pvscsi_complete_request(adapter, e);
605		/*
606		 * This barrier() ensures that compiler doesn't reorder write
607		 * to s->cmpConsIdx before the read of (*e) inside
608		 * pvscsi_complete_request. Otherwise, device emulation may
609		 * overwrite *e before we had a chance to read it.
610		 */
611		barrier();
612		s->cmpConsIdx++;
613	}
614}
615
616/*
617 * Translate a Linux SCSI request into a request ring entry.
618 */
619static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
620			     struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
621{
622	struct PVSCSIRingsState *s;
623	struct PVSCSIRingReqDesc *e;
624	struct scsi_device *sdev;
625	u32 req_entries;
626
627	s = adapter->rings_state;
628	sdev = cmd->device;
629	req_entries = s->reqNumEntriesLog2;
630
631	/*
632	 * If this condition holds, we might have room on the request ring, but
633	 * we might not have room on the completion ring for the response.
634	 * However, we have already ruled out this possibility - we would not
635	 * have successfully allocated a context if it were true, since we only
636	 * have one context per request entry.  Check for it anyway, since it
637	 * would be a serious bug.
638	 */
639	if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
640		scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
641			    "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
642			    s->reqProdIdx, s->cmpConsIdx);
643		return -1;
644	}
645
646	e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
647
648	e->bus    = sdev->channel;
649	e->target = sdev->id;
650	memset(e->lun, 0, sizeof(e->lun));
651	e->lun[1] = sdev->lun;
652
653	if (cmd->sense_buffer) {
654		ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
655					      SCSI_SENSE_BUFFERSIZE,
656					      PCI_DMA_FROMDEVICE);
657		e->senseAddr = ctx->sensePA;
658		e->senseLen = SCSI_SENSE_BUFFERSIZE;
659	} else {
660		e->senseLen  = 0;
661		e->senseAddr = 0;
662	}
663	e->cdbLen   = cmd->cmd_len;
664	e->vcpuHint = smp_processor_id();
665	memcpy(e->cdb, cmd->cmnd, e->cdbLen);
666
667	e->tag = SIMPLE_QUEUE_TAG;
668	if (sdev->tagged_supported &&
669	    (cmd->tag == HEAD_OF_QUEUE_TAG ||
670	     cmd->tag == ORDERED_QUEUE_TAG))
671		e->tag = cmd->tag;
672
673	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
674		e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
675	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
676		e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
677	else if (cmd->sc_data_direction == DMA_NONE)
678		e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
679	else
680		e->flags = 0;
681
682	pvscsi_map_buffers(adapter, ctx, cmd, e);
683
684	e->context = pvscsi_map_context(adapter, ctx);
685
686	barrier();
687
688	s->reqProdIdx++;
689
690	return 0;
691}
692
693static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
694{
695	struct Scsi_Host *host = cmd->device->host;
696	struct pvscsi_adapter *adapter = shost_priv(host);
697	struct pvscsi_ctx *ctx;
698	unsigned long flags;
699
700	spin_lock_irqsave(&adapter->hw_lock, flags);
701
702	ctx = pvscsi_acquire_context(adapter, cmd);
703	if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
704		if (ctx)
705			pvscsi_release_context(adapter, ctx);
706		spin_unlock_irqrestore(&adapter->hw_lock, flags);
707		return SCSI_MLQUEUE_HOST_BUSY;
708	}
709
710	cmd->scsi_done = done;
711
712	dev_dbg(&cmd->device->sdev_gendev,
713		"queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
714
715	spin_unlock_irqrestore(&adapter->hw_lock, flags);
716
717	pvscsi_kick_io(adapter, cmd->cmnd[0]);
718
719	return 0;
720}
721
722static int pvscsi_abort(struct scsi_cmnd *cmd)
723{
724	struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
725	struct pvscsi_ctx *ctx;
726	unsigned long flags;
727
728	scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
729		    adapter->host->host_no, cmd);
730
731	spin_lock_irqsave(&adapter->hw_lock, flags);
732
733	/*
734	 * Poll the completion ring first - we might be trying to abort
735	 * a command that is waiting to be dispatched in the completion ring.
736	 */
737	pvscsi_process_completion_ring(adapter);
738
739	/*
740	 * If there is no context for the command, it either already succeeded
741	 * or else was never properly issued.  Not our problem.
742	 */
743	ctx = pvscsi_find_context(adapter, cmd);
744	if (!ctx) {
745		scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
746		goto out;
747	}
748
749	pvscsi_abort_cmd(adapter, ctx);
750
751	pvscsi_process_completion_ring(adapter);
752
753out:
754	spin_unlock_irqrestore(&adapter->hw_lock, flags);
755	return SUCCESS;
756}
757
758/*
759 * Abort all outstanding requests.  This is only safe to use if the completion
760 * ring will never be walked again or the device has been reset, because it
761 * destroys the 1-1 mapping between context field passed to emulation and our
762 * request structure.
763 */
764static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
765{
766	unsigned i;
767
768	for (i = 0; i < adapter->req_depth; i++) {
769		struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
770		struct scsi_cmnd *cmd = ctx->cmd;
771		if (cmd) {
772			scmd_printk(KERN_ERR, cmd,
773				    "Forced reset on cmd %p\n", cmd);
774			pvscsi_unmap_buffers(adapter, ctx);
775			pvscsi_release_context(adapter, ctx);
776			cmd->result = (DID_RESET << 16);
777			cmd->scsi_done(cmd);
778		}
779	}
780}
781
782static int pvscsi_host_reset(struct scsi_cmnd *cmd)
783{
784	struct Scsi_Host *host = cmd->device->host;
785	struct pvscsi_adapter *adapter = shost_priv(host);
786	unsigned long flags;
787	bool use_msg;
788
789	scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
790
791	spin_lock_irqsave(&adapter->hw_lock, flags);
792
793	use_msg = adapter->use_msg;
794
795	if (use_msg) {
796		adapter->use_msg = 0;
797		spin_unlock_irqrestore(&adapter->hw_lock, flags);
798
799		/*
800		 * Now that we know that the ISR won't add more work on the
801		 * workqueue we can safely flush any outstanding work.
802		 */
803		flush_workqueue(adapter->workqueue);
804		spin_lock_irqsave(&adapter->hw_lock, flags);
805	}
806
807	/*
808	 * We're going to tear down the entire ring structure and set it back
809	 * up, so stalling new requests until all completions are flushed and
810	 * the rings are back in place.
811	 */
812
813	pvscsi_process_request_ring(adapter);
814
815	ll_adapter_reset(adapter);
816
817	/*
818	 * Now process any completions.  Note we do this AFTER adapter reset,
819	 * which is strange, but stops races where completions get posted
820	 * between processing the ring and issuing the reset.  The backend will
821	 * not touch the ring memory after reset, so the immediately pre-reset
822	 * completion ring state is still valid.
823	 */
824	pvscsi_process_completion_ring(adapter);
825
826	pvscsi_reset_all(adapter);
827	adapter->use_msg = use_msg;
828	pvscsi_setup_all_rings(adapter);
829	pvscsi_unmask_intr(adapter);
830
831	spin_unlock_irqrestore(&adapter->hw_lock, flags);
832
833	return SUCCESS;
834}
835
836static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
837{
838	struct Scsi_Host *host = cmd->device->host;
839	struct pvscsi_adapter *adapter = shost_priv(host);
840	unsigned long flags;
841
842	scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
843
844	/*
845	 * We don't want to queue new requests for this bus after
846	 * flushing all pending requests to emulation, since new
847	 * requests could then sneak in during this bus reset phase,
848	 * so take the lock now.
849	 */
850	spin_lock_irqsave(&adapter->hw_lock, flags);
851
852	pvscsi_process_request_ring(adapter);
853	ll_bus_reset(adapter);
854	pvscsi_process_completion_ring(adapter);
855
856	spin_unlock_irqrestore(&adapter->hw_lock, flags);
857
858	return SUCCESS;
859}
860
861static int pvscsi_device_reset(struct scsi_cmnd *cmd)
862{
863	struct Scsi_Host *host = cmd->device->host;
864	struct pvscsi_adapter *adapter = shost_priv(host);
865	unsigned long flags;
866
867	scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
868		    host->host_no, cmd->device->id);
869
870	/*
871	 * We don't want to queue new requests for this device after flushing
872	 * all pending requests to emulation, since new requests could then
873	 * sneak in during this device reset phase, so take the lock now.
874	 */
875	spin_lock_irqsave(&adapter->hw_lock, flags);
876
877	pvscsi_process_request_ring(adapter);
878	ll_device_reset(adapter, cmd->device->id);
879	pvscsi_process_completion_ring(adapter);
880
881	spin_unlock_irqrestore(&adapter->hw_lock, flags);
882
883	return SUCCESS;
884}
885
886static struct scsi_host_template pvscsi_template;
887
888static const char *pvscsi_info(struct Scsi_Host *host)
889{
890	struct pvscsi_adapter *adapter = shost_priv(host);
891	static char buf[256];
892
893	sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
894		"%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
895		adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
896		pvscsi_template.cmd_per_lun);
897
898	return buf;
899}
900
901static struct scsi_host_template pvscsi_template = {
902	.module				= THIS_MODULE,
903	.name				= "VMware PVSCSI Host Adapter",
904	.proc_name			= "vmw_pvscsi",
905	.info				= pvscsi_info,
906	.queuecommand			= pvscsi_queue,
907	.this_id			= -1,
908	.sg_tablesize			= PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
909	.dma_boundary			= UINT_MAX,
910	.max_sectors			= 0xffff,
911	.use_clustering			= ENABLE_CLUSTERING,
912	.eh_abort_handler		= pvscsi_abort,
913	.eh_device_reset_handler	= pvscsi_device_reset,
914	.eh_bus_reset_handler		= pvscsi_bus_reset,
915	.eh_host_reset_handler		= pvscsi_host_reset,
916};
917
918static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
919			       const struct PVSCSIRingMsgDesc *e)
920{
921	struct PVSCSIRingsState *s = adapter->rings_state;
922	struct Scsi_Host *host = adapter->host;
923	struct scsi_device *sdev;
924
925	printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
926	       e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
927
928	BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
929
930	if (e->type == PVSCSI_MSG_DEV_ADDED) {
931		struct PVSCSIMsgDescDevStatusChanged *desc;
932		desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
933
934		printk(KERN_INFO
935		       "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
936		       desc->bus, desc->target, desc->lun[1]);
937
938		if (!scsi_host_get(host))
939			return;
940
941		sdev = scsi_device_lookup(host, desc->bus, desc->target,
942					  desc->lun[1]);
943		if (sdev) {
944			printk(KERN_INFO "vmw_pvscsi: device already exists\n");
945			scsi_device_put(sdev);
946		} else
947			scsi_add_device(adapter->host, desc->bus,
948					desc->target, desc->lun[1]);
949
950		scsi_host_put(host);
951	} else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
952		struct PVSCSIMsgDescDevStatusChanged *desc;
953		desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
954
955		printk(KERN_INFO
956		       "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
957		       desc->bus, desc->target, desc->lun[1]);
958
959		if (!scsi_host_get(host))
960			return;
961
962		sdev = scsi_device_lookup(host, desc->bus, desc->target,
963					  desc->lun[1]);
964		if (sdev) {
965			scsi_remove_device(sdev);
966			scsi_device_put(sdev);
967		} else
968			printk(KERN_INFO
969			       "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
970			       desc->bus, desc->target, desc->lun[1]);
971
972		scsi_host_put(host);
973	}
974}
975
976static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
977{
978	struct PVSCSIRingsState *s = adapter->rings_state;
979
980	return s->msgProdIdx != s->msgConsIdx;
981}
982
983static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
984{
985	struct PVSCSIRingsState *s = adapter->rings_state;
986	struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
987	u32 msg_entries = s->msgNumEntriesLog2;
988
989	while (pvscsi_msg_pending(adapter)) {
990		struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
991						      MASK(msg_entries));
992
993		barrier();
994		pvscsi_process_msg(adapter, e);
995		barrier();
996		s->msgConsIdx++;
997	}
998}
999
1000static void pvscsi_msg_workqueue_handler(struct work_struct *data)
1001{
1002	struct pvscsi_adapter *adapter;
1003
1004	adapter = container_of(data, struct pvscsi_adapter, work);
1005
1006	pvscsi_process_msg_ring(adapter);
1007}
1008
1009static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
1010{
1011	char name[32];
1012
1013	if (!pvscsi_use_msg)
1014		return 0;
1015
1016	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
1017			 PVSCSI_CMD_SETUP_MSG_RING);
1018
1019	if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
1020		return 0;
1021
1022	snprintf(name, sizeof(name),
1023		 "vmw_pvscsi_wq_%u", adapter->host->host_no);
1024
1025	adapter->workqueue = create_singlethread_workqueue(name);
1026	if (!adapter->workqueue) {
1027		printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
1028		return 0;
1029	}
1030	INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
1031
1032	return 1;
1033}
1034
1035static irqreturn_t pvscsi_isr(int irq, void *devp)
1036{
1037	struct pvscsi_adapter *adapter = devp;
1038	int handled;
1039
1040	if (adapter->use_msi || adapter->use_msix)
1041		handled = true;
1042	else {
1043		u32 val = pvscsi_read_intr_status(adapter);
1044		handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
1045		if (handled)
1046			pvscsi_write_intr_status(devp, val);
1047	}
1048
1049	if (handled) {
1050		unsigned long flags;
1051
1052		spin_lock_irqsave(&adapter->hw_lock, flags);
1053
1054		pvscsi_process_completion_ring(adapter);
1055		if (adapter->use_msg && pvscsi_msg_pending(adapter))
1056			queue_work(adapter->workqueue, &adapter->work);
1057
1058		spin_unlock_irqrestore(&adapter->hw_lock, flags);
1059	}
1060
1061	return IRQ_RETVAL(handled);
1062}
1063
1064static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
1065{
1066	struct pvscsi_ctx *ctx = adapter->cmd_map;
1067	unsigned i;
1068
1069	for (i = 0; i < adapter->req_depth; ++i, ++ctx)
1070		free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
1071}
1072
1073static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
1074			     unsigned int *irq)
1075{
1076	struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
1077	int ret;
1078
1079	ret = pci_enable_msix(adapter->dev, &entry, 1);
1080	if (ret)
1081		return ret;
1082
1083	*irq = entry.vector;
1084
1085	return 0;
1086}
1087
1088static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
1089{
1090	if (adapter->irq) {
1091		free_irq(adapter->irq, adapter);
1092		adapter->irq = 0;
1093	}
1094	if (adapter->use_msi) {
1095		pci_disable_msi(adapter->dev);
1096		adapter->use_msi = 0;
1097	} else if (adapter->use_msix) {
1098		pci_disable_msix(adapter->dev);
1099		adapter->use_msix = 0;
1100	}
1101}
1102
1103static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
1104{
1105	pvscsi_shutdown_intr(adapter);
1106
1107	if (adapter->workqueue)
1108		destroy_workqueue(adapter->workqueue);
1109
1110	if (adapter->mmioBase)
1111		pci_iounmap(adapter->dev, adapter->mmioBase);
1112
1113	pci_release_regions(adapter->dev);
1114
1115	if (adapter->cmd_map) {
1116		pvscsi_free_sgls(adapter);
1117		kfree(adapter->cmd_map);
1118	}
1119
1120	if (adapter->rings_state)
1121		pci_free_consistent(adapter->dev, PAGE_SIZE,
1122				    adapter->rings_state, adapter->ringStatePA);
1123
1124	if (adapter->req_ring)
1125		pci_free_consistent(adapter->dev,
1126				    adapter->req_pages * PAGE_SIZE,
1127				    adapter->req_ring, adapter->reqRingPA);
1128
1129	if (adapter->cmp_ring)
1130		pci_free_consistent(adapter->dev,
1131				    adapter->cmp_pages * PAGE_SIZE,
1132				    adapter->cmp_ring, adapter->cmpRingPA);
1133
1134	if (adapter->msg_ring)
1135		pci_free_consistent(adapter->dev,
1136				    adapter->msg_pages * PAGE_SIZE,
1137				    adapter->msg_ring, adapter->msgRingPA);
1138}
1139
1140/*
1141 * Allocate scatter gather lists.
1142 *
1143 * These are statically allocated.  Trying to be clever was not worth it.
1144 *
1145 * Dynamic allocation can fail, and we can't go deeep into the memory
1146 * allocator, since we're a SCSI driver, and trying too hard to allocate
1147 * memory might generate disk I/O.  We also don't want to fail disk I/O
1148 * in that case because we can't get an allocation - the I/O could be
1149 * trying to swap out data to free memory.  Since that is pathological,
1150 * just use a statically allocated scatter list.
1151 *
1152 */
1153static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
1154{
1155	struct pvscsi_ctx *ctx;
1156	int i;
1157
1158	ctx = adapter->cmd_map;
1159	BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
1160
1161	for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
1162		ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
1163						    get_order(SGL_SIZE));
1164		ctx->sglPA = 0;
1165		BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
1166		if (!ctx->sgl) {
1167			for (; i >= 0; --i, --ctx) {
1168				free_pages((unsigned long)ctx->sgl,
1169					   get_order(SGL_SIZE));
1170				ctx->sgl = NULL;
1171			}
1172			return -ENOMEM;
1173		}
1174	}
1175
1176	return 0;
1177}
1178
1179static int __devinit pvscsi_probe(struct pci_dev *pdev,
1180				  const struct pci_device_id *id)
1181{
1182	struct pvscsi_adapter *adapter;
1183	struct Scsi_Host *host;
1184	unsigned int i;
1185	unsigned long flags = 0;
1186	int error;
1187
1188	error = -ENODEV;
1189
1190	if (pci_enable_device(pdev))
1191		return error;
1192
1193	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
1194	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1195		printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
1196	} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
1197		   pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
1198		printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
1199	} else {
1200		printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
1201		goto out_disable_device;
1202	}
1203
1204	pvscsi_template.can_queue =
1205		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
1206		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1207	pvscsi_template.cmd_per_lun =
1208		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
1209	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
1210	if (!host) {
1211		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
1212		goto out_disable_device;
1213	}
1214
1215	adapter = shost_priv(host);
1216	memset(adapter, 0, sizeof(*adapter));
1217	adapter->dev  = pdev;
1218	adapter->host = host;
1219
1220	spin_lock_init(&adapter->hw_lock);
1221
1222	host->max_channel = 0;
1223	host->max_id      = 16;
1224	host->max_lun     = 1;
1225	host->max_cmd_len = 16;
1226
1227	adapter->rev = pdev->revision;
1228
1229	if (pci_request_regions(pdev, "vmw_pvscsi")) {
1230		printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
1231		goto out_free_host;
1232	}
1233
1234	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1235		if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
1236			continue;
1237
1238		if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
1239			continue;
1240
1241		break;
1242	}
1243
1244	if (i == DEVICE_COUNT_RESOURCE) {
1245		printk(KERN_ERR
1246		       "vmw_pvscsi: adapter has no suitable MMIO region\n");
1247		goto out_release_resources;
1248	}
1249
1250	adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
1251
1252	if (!adapter->mmioBase) {
1253		printk(KERN_ERR
1254		       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
1255		       i, PVSCSI_MEM_SPACE_SIZE);
1256		goto out_release_resources;
1257	}
1258
1259	pci_set_master(pdev);
1260	pci_set_drvdata(pdev, host);
1261
1262	ll_adapter_reset(adapter);
1263
1264	adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
1265
1266	error = pvscsi_allocate_rings(adapter);
1267	if (error) {
1268		printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
1269		goto out_release_resources;
1270	}
1271
1272	/*
1273	 * From this point on we should reset the adapter if anything goes
1274	 * wrong.
1275	 */
1276	pvscsi_setup_all_rings(adapter);
1277
1278	adapter->cmd_map = kcalloc(adapter->req_depth,
1279				   sizeof(struct pvscsi_ctx), GFP_KERNEL);
1280	if (!adapter->cmd_map) {
1281		printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
1282		error = -ENOMEM;
1283		goto out_reset_adapter;
1284	}
1285
1286	INIT_LIST_HEAD(&adapter->cmd_pool);
1287	for (i = 0; i < adapter->req_depth; i++) {
1288		struct pvscsi_ctx *ctx = adapter->cmd_map + i;
1289		list_add(&ctx->list, &adapter->cmd_pool);
1290	}
1291
1292	error = pvscsi_allocate_sg(adapter);
1293	if (error) {
1294		printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
1295		goto out_reset_adapter;
1296	}
1297
1298	if (!pvscsi_disable_msix &&
1299	    pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
1300		printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
1301		adapter->use_msix = 1;
1302	} else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
1303		printk(KERN_INFO "vmw_pvscsi: using MSI\n");
1304		adapter->use_msi = 1;
1305		adapter->irq = pdev->irq;
1306	} else {
1307		printk(KERN_INFO "vmw_pvscsi: using INTx\n");
1308		adapter->irq = pdev->irq;
1309		flags = IRQF_SHARED;
1310	}
1311
1312	error = request_irq(adapter->irq, pvscsi_isr, flags,
1313			    "vmw_pvscsi", adapter);
1314	if (error) {
1315		printk(KERN_ERR
1316		       "vmw_pvscsi: unable to request IRQ: %d\n", error);
1317		adapter->irq = 0;
1318		goto out_reset_adapter;
1319	}
1320
1321	error = scsi_add_host(host, &pdev->dev);
1322	if (error) {
1323		printk(KERN_ERR
1324		       "vmw_pvscsi: scsi_add_host failed: %d\n", error);
1325		goto out_reset_adapter;
1326	}
1327
1328	dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
1329		 adapter->rev, host->host_no);
1330
1331	pvscsi_unmask_intr(adapter);
1332
1333	scsi_scan_host(host);
1334
1335	return 0;
1336
1337out_reset_adapter:
1338	ll_adapter_reset(adapter);
1339out_release_resources:
1340	pvscsi_release_resources(adapter);
1341out_free_host:
1342	scsi_host_put(host);
1343out_disable_device:
1344	pci_set_drvdata(pdev, NULL);
1345	pci_disable_device(pdev);
1346
1347	return error;
1348}
1349
1350static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
1351{
1352	pvscsi_mask_intr(adapter);
1353
1354	if (adapter->workqueue)
1355		flush_workqueue(adapter->workqueue);
1356
1357	pvscsi_shutdown_intr(adapter);
1358
1359	pvscsi_process_request_ring(adapter);
1360	pvscsi_process_completion_ring(adapter);
1361	ll_adapter_reset(adapter);
1362}
1363
1364static void pvscsi_shutdown(struct pci_dev *dev)
1365{
1366	struct Scsi_Host *host = pci_get_drvdata(dev);
1367	struct pvscsi_adapter *adapter = shost_priv(host);
1368
1369	__pvscsi_shutdown(adapter);
1370}
1371
1372static void pvscsi_remove(struct pci_dev *pdev)
1373{
1374	struct Scsi_Host *host = pci_get_drvdata(pdev);
1375	struct pvscsi_adapter *adapter = shost_priv(host);
1376
1377	scsi_remove_host(host);
1378
1379	__pvscsi_shutdown(adapter);
1380	pvscsi_release_resources(adapter);
1381
1382	scsi_host_put(host);
1383
1384	pci_set_drvdata(pdev, NULL);
1385	pci_disable_device(pdev);
1386}
1387
1388static struct pci_driver pvscsi_pci_driver = {
1389	.name		= "vmw_pvscsi",
1390	.id_table	= pvscsi_pci_tbl,
1391	.probe		= pvscsi_probe,
1392	.remove		= __devexit_p(pvscsi_remove),
1393	.shutdown       = pvscsi_shutdown,
1394};
1395
1396static int __init pvscsi_init(void)
1397{
1398	pr_info("%s - version %s\n",
1399		PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
1400	return pci_register_driver(&pvscsi_pci_driver);
1401}
1402
1403static void __exit pvscsi_exit(void)
1404{
1405	pci_unregister_driver(&pvscsi_pci_driver);
1406}
1407
1408module_init(pvscsi_init);
1409module_exit(pvscsi_exit);
1410