• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/staging/hv/
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 *   Haiyang Zhang <haiyangz@microsoft.com>
19 *   Hank Janssen  <hjanssen@microsoft.com>
20 */
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/device.h>
24#include <linux/blkdev.h>
25#include <linux/major.h>
26#include <linux/delay.h>
27#include <linux/hdreg.h>
28#include <linux/smp_lock.h>
29#include <linux/slab.h>
30#include <scsi/scsi.h>
31#include <scsi/scsi_cmnd.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_dbg.h>
34#include "osd.h"
35#include "logging.h"
36#include "version_info.h"
37#include "vmbus.h"
38#include "storvsc_api.h"
39
40
41#define BLKVSC_MINORS	64
42
43enum blkvsc_device_type {
44	UNKNOWN_DEV_TYPE,
45	HARDDISK_TYPE,
46	DVD_TYPE,
47};
48
49/*
50 * This request ties the struct request and struct
51 * blkvsc_request/hv_storvsc_request together A struct request may be
52 * represented by 1 or more struct blkvsc_request
53 */
54struct blkvsc_request_group {
55	int outstanding;
56	int status;
57	struct list_head blkvsc_req_list;	/* list of blkvsc_requests */
58};
59
60struct blkvsc_request {
61	/* blkvsc_request_group.blkvsc_req_list */
62	struct list_head req_entry;
63
64	/* block_device_context.pending_list */
65	struct list_head pend_entry;
66
67	/* This may be null if we generate a request internally */
68	struct request *req;
69
70	struct block_device_context *dev;
71
72	/* The group this request is part of. Maybe null */
73	struct blkvsc_request_group *group;
74
75	wait_queue_head_t wevent;
76	int cond;
77
78	int write;
79	sector_t sector_start;
80	unsigned long sector_count;
81
82	unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
83	unsigned char cmd_len;
84	unsigned char cmnd[MAX_COMMAND_SIZE];
85
86	struct hv_storvsc_request request;
87	/*
88	 * !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap,
89	 * because - The extension buffer falls right here and is pointed to by
90	 * request.Extension;
91	 * Which sounds like a horrible idea, who designed this?
92	 */
93};
94
95/* Per device structure */
96struct block_device_context {
97	/* point back to our device context */
98	struct vm_device *device_ctx;
99	struct kmem_cache *request_pool;
100	spinlock_t lock;
101	struct gendisk *gd;
102	enum blkvsc_device_type	device_type;
103	struct list_head pending_list;
104
105	unsigned char device_id[64];
106	unsigned int device_id_len;
107	int num_outstanding_reqs;
108	int shutting_down;
109	int media_not_present;
110	unsigned int sector_size;
111	sector_t capacity;
112	unsigned int port;
113	unsigned char path;
114	unsigned char target;
115	int users;
116};
117
118/* Per driver */
119struct blkvsc_driver_context {
120	/* !! These must be the first 2 fields !! */
121	struct driver_context drv_ctx;
122	struct storvsc_driver_object drv_obj;
123};
124
125/* Static decl */
126static int blkvsc_probe(struct device *dev);
127static int blkvsc_remove(struct device *device);
128static void blkvsc_shutdown(struct device *device);
129
130static int blkvsc_open(struct block_device *bdev,  fmode_t mode);
131static int blkvsc_release(struct gendisk *disk, fmode_t mode);
132static int blkvsc_media_changed(struct gendisk *gd);
133static int blkvsc_revalidate_disk(struct gendisk *gd);
134static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
135static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
136			unsigned cmd, unsigned long argument);
137static void blkvsc_request(struct request_queue *queue);
138static void blkvsc_request_completion(struct hv_storvsc_request *request);
139static int blkvsc_do_request(struct block_device_context *blkdev,
140			     struct request *req);
141static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
142		void (*request_completion)(struct hv_storvsc_request *));
143static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req);
144static void blkvsc_cmd_completion(struct hv_storvsc_request *request);
145static int blkvsc_do_inquiry(struct block_device_context *blkdev);
146static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
147static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
148static int blkvsc_do_flush(struct block_device_context *blkdev);
149static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
150static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
151
152static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
153module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
154MODULE_PARM_DESC(ring_size, "Ring buffer size (in bytes)");
155
156/* The one and only one */
157static struct blkvsc_driver_context g_blkvsc_drv;
158
159static const struct block_device_operations block_ops = {
160	.owner = THIS_MODULE,
161	.open = blkvsc_open,
162	.release = blkvsc_release,
163	.media_changed = blkvsc_media_changed,
164	.revalidate_disk = blkvsc_revalidate_disk,
165	.getgeo = blkvsc_getgeo,
166	.ioctl  = blkvsc_ioctl,
167};
168
169/*
170 * blkvsc_drv_init -  BlkVsc driver initialization.
171 */
172static int blkvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
173{
174	struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv.drv_obj;
175	struct driver_context *drv_ctx = &g_blkvsc_drv.drv_ctx;
176	int ret;
177
178	vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
179
180	storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
181
182	/* Callback to client driver to complete the initialization */
183	drv_init(&storvsc_drv_obj->Base);
184
185	drv_ctx->driver.name = storvsc_drv_obj->Base.name;
186	memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType,
187	       sizeof(struct hv_guid));
188
189	drv_ctx->probe = blkvsc_probe;
190	drv_ctx->remove = blkvsc_remove;
191	drv_ctx->shutdown = blkvsc_shutdown;
192
193	/* The driver belongs to vmbus */
194	ret = vmbus_child_driver_register(drv_ctx);
195
196	return ret;
197}
198
199static int blkvsc_drv_exit_cb(struct device *dev, void *data)
200{
201	struct device **curr = (struct device **)data;
202	*curr = dev;
203	return 1; /* stop iterating */
204}
205
206static void blkvsc_drv_exit(void)
207{
208	struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv.drv_obj;
209	struct driver_context *drv_ctx = &g_blkvsc_drv.drv_ctx;
210	struct device *current_dev;
211	int ret;
212
213	while (1) {
214		current_dev = NULL;
215
216		/* Get the device */
217		ret = driver_for_each_device(&drv_ctx->driver, NULL,
218					     (void *) &current_dev,
219					     blkvsc_drv_exit_cb);
220
221		if (ret)
222			DPRINT_WARN(BLKVSC_DRV,
223				    "driver_for_each_device returned %d", ret);
224
225
226		if (current_dev == NULL)
227			break;
228
229		/* Initiate removal from the top-down */
230		device_unregister(current_dev);
231	}
232
233	if (storvsc_drv_obj->Base.OnCleanup)
234		storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
235
236	vmbus_child_driver_unregister(drv_ctx);
237
238	return;
239}
240
241/*
242 * blkvsc_probe - Add a new device for this driver
243 */
244static int blkvsc_probe(struct device *device)
245{
246	struct driver_context *driver_ctx =
247				driver_to_driver_context(device->driver);
248	struct blkvsc_driver_context *blkvsc_drv_ctx =
249				(struct blkvsc_driver_context *)driver_ctx;
250	struct storvsc_driver_object *storvsc_drv_obj =
251				&blkvsc_drv_ctx->drv_obj;
252	struct vm_device *device_ctx = device_to_vm_device(device);
253	struct hv_device *device_obj = &device_ctx->device_obj;
254
255	struct block_device_context *blkdev = NULL;
256	struct storvsc_device_info device_info;
257	int major = 0;
258	int devnum = 0;
259	int ret = 0;
260	static int ide0_registered;
261	static int ide1_registered;
262
263	DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
264
265	if (!storvsc_drv_obj->Base.OnDeviceAdd) {
266		DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
267		ret = -1;
268		goto Cleanup;
269	}
270
271	blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
272	if (!blkdev) {
273		ret = -ENOMEM;
274		goto Cleanup;
275	}
276
277	INIT_LIST_HEAD(&blkdev->pending_list);
278
279	/* Initialize what we can here */
280	spin_lock_init(&blkdev->lock);
281
282	/* ASSERT(sizeof(struct blkvsc_request_group) <= */
283	/* 	sizeof(struct blkvsc_request)); */
284
285	blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
286					sizeof(struct blkvsc_request) +
287					storvsc_drv_obj->RequestExtSize, 0,
288					SLAB_HWCACHE_ALIGN, NULL);
289	if (!blkdev->request_pool) {
290		ret = -ENOMEM;
291		goto Cleanup;
292	}
293
294
295	/* Call to the vsc driver to add the device */
296	ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
297	if (ret != 0) {
298		DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
299		goto Cleanup;
300	}
301
302	blkdev->device_ctx = device_ctx;
303	/* this identified the device 0 or 1 */
304	blkdev->target = device_info.TargetId;
305	/* this identified the ide ctrl 0 or 1 */
306	blkdev->path = device_info.PathId;
307
308	dev_set_drvdata(device, blkdev);
309
310	/* Calculate the major and device num */
311	if (blkdev->path == 0) {
312		major = IDE0_MAJOR;
313		devnum = blkdev->path + blkdev->target;		/* 0 or 1 */
314
315		if (!ide0_registered) {
316			ret = register_blkdev(major, "ide");
317			if (ret != 0) {
318				DPRINT_ERR(BLKVSC_DRV,
319					   "register_blkdev() failed! ret %d",
320					   ret);
321				goto Remove;
322			}
323
324			ide0_registered = 1;
325		}
326	} else if (blkdev->path == 1) {
327		major = IDE1_MAJOR;
328		devnum = blkdev->path + blkdev->target + 1; /* 2 or 3 */
329
330		if (!ide1_registered) {
331			ret = register_blkdev(major, "ide");
332			if (ret != 0) {
333				DPRINT_ERR(BLKVSC_DRV,
334					   "register_blkdev() failed! ret %d",
335					   ret);
336				goto Remove;
337			}
338
339			ide1_registered = 1;
340		}
341	} else {
342		DPRINT_ERR(BLKVSC_DRV, "invalid pathid");
343		ret = -1;
344		goto Cleanup;
345	}
346
347	DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major);
348
349	blkdev->gd = alloc_disk(BLKVSC_MINORS);
350	if (!blkdev->gd) {
351		DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
352		ret = -1;
353		goto Cleanup;
354	}
355
356	blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
357
358	blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
359	blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
360	blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
361	blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
362	blk_queue_dma_alignment(blkdev->gd->queue, 511);
363
364	blkdev->gd->major = major;
365	if (devnum == 1 || devnum == 3)
366		blkdev->gd->first_minor = BLKVSC_MINORS;
367	else
368		blkdev->gd->first_minor = 0;
369	blkdev->gd->fops = &block_ops;
370	blkdev->gd->private_data = blkdev;
371	blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
372	sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
373
374	blkvsc_do_inquiry(blkdev);
375	if (blkdev->device_type == DVD_TYPE) {
376		set_disk_ro(blkdev->gd, 1);
377		blkdev->gd->flags |= GENHD_FL_REMOVABLE;
378		blkvsc_do_read_capacity(blkdev);
379	} else {
380		blkvsc_do_read_capacity16(blkdev);
381	}
382
383	set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
384	blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
385	/* go! */
386	add_disk(blkdev->gd);
387
388	DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d",
389		    blkdev->gd->disk_name, (unsigned long)blkdev->capacity,
390		    blkdev->sector_size);
391
392	return ret;
393
394Remove:
395	storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
396
397Cleanup:
398	if (blkdev) {
399		if (blkdev->request_pool) {
400			kmem_cache_destroy(blkdev->request_pool);
401			blkdev->request_pool = NULL;
402		}
403		kfree(blkdev);
404		blkdev = NULL;
405	}
406
407	return ret;
408}
409
410static void blkvsc_shutdown(struct device *device)
411{
412	struct block_device_context *blkdev = dev_get_drvdata(device);
413	unsigned long flags;
414
415	if (!blkdev)
416		return;
417
418	DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n",
419		   blkdev->users, blkdev->gd->disk_name);
420
421	spin_lock_irqsave(&blkdev->lock, flags);
422
423	blkdev->shutting_down = 1;
424
425	blk_stop_queue(blkdev->gd->queue);
426
427	spin_unlock_irqrestore(&blkdev->lock, flags);
428
429	while (blkdev->num_outstanding_reqs) {
430		DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
431			    blkdev->num_outstanding_reqs);
432		udelay(100);
433	}
434
435	blkvsc_do_flush(blkdev);
436
437	spin_lock_irqsave(&blkdev->lock, flags);
438
439	blkvsc_cancel_pending_reqs(blkdev);
440
441	spin_unlock_irqrestore(&blkdev->lock, flags);
442}
443
444static int blkvsc_do_flush(struct block_device_context *blkdev)
445{
446	struct blkvsc_request *blkvsc_req;
447
448	DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n");
449
450	if (blkdev->device_type != HARDDISK_TYPE)
451		return 0;
452
453	blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
454	if (!blkvsc_req)
455		return -ENOMEM;
456
457	memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
458	init_waitqueue_head(&blkvsc_req->wevent);
459	blkvsc_req->dev = blkdev;
460	blkvsc_req->req = NULL;
461	blkvsc_req->write = 0;
462
463	blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
464	blkvsc_req->request.DataBuffer.Offset = 0;
465	blkvsc_req->request.DataBuffer.Length = 0;
466
467	blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
468	blkvsc_req->cmd_len = 10;
469
470	/*
471	 * Set this here since the completion routine may be invoked and
472	 * completed before we return
473	 */
474	blkvsc_req->cond = 0;
475	blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
476
477	wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
478
479	kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
480
481	return 0;
482}
483
484/* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */
485static int blkvsc_do_inquiry(struct block_device_context *blkdev)
486{
487	struct blkvsc_request *blkvsc_req;
488	struct page *page_buf;
489	unsigned char *buf;
490	unsigned char device_type;
491
492	DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n");
493
494	blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
495	if (!blkvsc_req)
496		return -ENOMEM;
497
498	memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
499	page_buf = alloc_page(GFP_KERNEL);
500	if (!page_buf) {
501		kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
502		return -ENOMEM;
503	}
504
505	init_waitqueue_head(&blkvsc_req->wevent);
506	blkvsc_req->dev = blkdev;
507	blkvsc_req->req = NULL;
508	blkvsc_req->write = 0;
509
510	blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
511	blkvsc_req->request.DataBuffer.Offset = 0;
512	blkvsc_req->request.DataBuffer.Length = 64;
513
514	blkvsc_req->cmnd[0] = INQUIRY;
515	blkvsc_req->cmnd[1] = 0x1;		/* Get product data */
516	blkvsc_req->cmnd[2] = 0x83;		/* mode page 83 */
517	blkvsc_req->cmnd[4] = 64;
518	blkvsc_req->cmd_len = 6;
519
520	/*
521	 * Set this here since the completion routine may be invoked and
522	 * completed before we return
523	 */
524	blkvsc_req->cond = 0;
525
526	blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
527
528	DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n",
529		   blkvsc_req, blkvsc_req->cond);
530
531	wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
532
533	buf = kmap(page_buf);
534
535	/* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
536	/* be to le */
537	device_type = buf[0] & 0x1F;
538
539	if (device_type == 0x0) {
540		blkdev->device_type = HARDDISK_TYPE;
541	} else if (device_type == 0x5) {
542		blkdev->device_type = DVD_TYPE;
543	} else {
544		/* TODO: this is currently unsupported device type */
545		blkdev->device_type = UNKNOWN_DEV_TYPE;
546	}
547
548	DPRINT_DBG(BLKVSC_DRV, "device type %d\n", device_type);
549
550	blkdev->device_id_len = buf[7];
551	if (blkdev->device_id_len > 64)
552		blkdev->device_id_len = 64;
553
554	memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
555	/* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
556	 * blkdev->device_id_len); */
557
558	kunmap(page_buf);
559
560	__free_page(page_buf);
561
562	kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
563
564	return 0;
565}
566
567/* Do a scsi READ_CAPACITY cmd here to get the size of the disk */
568static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
569{
570	struct blkvsc_request *blkvsc_req;
571	struct page *page_buf;
572	unsigned char *buf;
573	struct scsi_sense_hdr sense_hdr;
574
575	DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
576
577	blkdev->sector_size = 0;
578	blkdev->capacity = 0;
579	blkdev->media_not_present = 0; /* assume a disk is present */
580
581	blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
582	if (!blkvsc_req)
583		return -ENOMEM;
584
585	memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
586	page_buf = alloc_page(GFP_KERNEL);
587	if (!page_buf) {
588		kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
589		return -ENOMEM;
590	}
591
592	init_waitqueue_head(&blkvsc_req->wevent);
593	blkvsc_req->dev = blkdev;
594	blkvsc_req->req = NULL;
595	blkvsc_req->write = 0;
596
597	blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
598	blkvsc_req->request.DataBuffer.Offset = 0;
599	blkvsc_req->request.DataBuffer.Length = 8;
600
601	blkvsc_req->cmnd[0] = READ_CAPACITY;
602	blkvsc_req->cmd_len = 16;
603
604	/*
605	 * Set this here since the completion routine may be invoked
606	 * and completed before we return
607	 */
608	blkvsc_req->cond = 0;
609
610	blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
611
612	DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n",
613		   blkvsc_req, blkvsc_req->cond);
614
615	wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
616
617	/* check error */
618	if (blkvsc_req->request.Status) {
619		scsi_normalize_sense(blkvsc_req->sense_buffer,
620				     SCSI_SENSE_BUFFERSIZE, &sense_hdr);
621
622		if (sense_hdr.asc == 0x3A) {
623			/* Medium not present */
624			blkdev->media_not_present = 1;
625		}
626		return 0;
627	}
628	buf = kmap(page_buf);
629
630	/* be to le */
631	blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) |
632			    (buf[2] << 8) | buf[3]) + 1;
633	blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) |
634			      (buf[6] << 8) | buf[7];
635
636	kunmap(page_buf);
637
638	__free_page(page_buf);
639
640	kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
641
642	return 0;
643}
644
645static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
646{
647	struct blkvsc_request *blkvsc_req;
648	struct page *page_buf;
649	unsigned char *buf;
650	struct scsi_sense_hdr sense_hdr;
651
652	DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
653
654	blkdev->sector_size = 0;
655	blkdev->capacity = 0;
656	blkdev->media_not_present = 0; /* assume a disk is present */
657
658	blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
659	if (!blkvsc_req)
660		return -ENOMEM;
661
662	memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
663	page_buf = alloc_page(GFP_KERNEL);
664	if (!page_buf) {
665		kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
666		return -ENOMEM;
667	}
668
669	init_waitqueue_head(&blkvsc_req->wevent);
670	blkvsc_req->dev = blkdev;
671	blkvsc_req->req = NULL;
672	blkvsc_req->write = 0;
673
674	blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
675	blkvsc_req->request.DataBuffer.Offset = 0;
676	blkvsc_req->request.DataBuffer.Length = 12;
677
678	blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */
679	blkvsc_req->cmd_len = 16;
680
681	/*
682	 * Set this here since the completion routine may be invoked
683	 * and completed before we return
684	 */
685	blkvsc_req->cond = 0;
686
687	blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
688
689	DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n",
690		   blkvsc_req, blkvsc_req->cond);
691
692	wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
693
694	/* check error */
695	if (blkvsc_req->request.Status) {
696		scsi_normalize_sense(blkvsc_req->sense_buffer,
697				     SCSI_SENSE_BUFFERSIZE, &sense_hdr);
698		if (sense_hdr.asc == 0x3A) {
699			/* Medium not present */
700			blkdev->media_not_present = 1;
701		}
702		return 0;
703	}
704	buf = kmap(page_buf);
705
706	/* be to le */
707	blkdev->capacity = be64_to_cpu(*(unsigned long long *) &buf[0]) + 1;
708	blkdev->sector_size = be32_to_cpu(*(unsigned int *)&buf[8]);
709
710
711	kunmap(page_buf);
712
713	__free_page(page_buf);
714
715	kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
716
717	return 0;
718}
719
720/*
721 * blkvsc_remove() - Callback when our device is removed
722 */
723static int blkvsc_remove(struct device *device)
724{
725	struct driver_context *driver_ctx =
726				driver_to_driver_context(device->driver);
727	struct blkvsc_driver_context *blkvsc_drv_ctx =
728				(struct blkvsc_driver_context *)driver_ctx;
729	struct storvsc_driver_object *storvsc_drv_obj =
730				&blkvsc_drv_ctx->drv_obj;
731	struct vm_device *device_ctx = device_to_vm_device(device);
732	struct hv_device *device_obj = &device_ctx->device_obj;
733	struct block_device_context *blkdev = dev_get_drvdata(device);
734	unsigned long flags;
735	int ret;
736
737	DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
738
739	if (!storvsc_drv_obj->Base.OnDeviceRemove)
740		return -1;
741
742	/*
743	 * Call to the vsc driver to let it know that the device is being
744	 * removed
745	 */
746	ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
747	if (ret != 0) {
748		/* TODO: */
749		DPRINT_ERR(BLKVSC_DRV,
750			   "unable to remove blkvsc device (ret %d)", ret);
751	}
752
753	/* Get to a known state */
754	spin_lock_irqsave(&blkdev->lock, flags);
755
756	blkdev->shutting_down = 1;
757
758	blk_stop_queue(blkdev->gd->queue);
759
760	spin_unlock_irqrestore(&blkdev->lock, flags);
761
762	while (blkdev->num_outstanding_reqs) {
763		DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
764			    blkdev->num_outstanding_reqs);
765		udelay(100);
766	}
767
768	blkvsc_do_flush(blkdev);
769
770	spin_lock_irqsave(&blkdev->lock, flags);
771
772	blkvsc_cancel_pending_reqs(blkdev);
773
774	spin_unlock_irqrestore(&blkdev->lock, flags);
775
776	blk_cleanup_queue(blkdev->gd->queue);
777
778	del_gendisk(blkdev->gd);
779
780	kmem_cache_destroy(blkdev->request_pool);
781
782	kfree(blkdev);
783
784	return ret;
785}
786
787static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
788{
789	/* ASSERT(blkvsc_req->req); */
790	/* ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8)); */
791
792	blkvsc_req->cmd_len = 16;
793
794	if (blkvsc_req->sector_start > 0xffffffff) {
795		if (rq_data_dir(blkvsc_req->req)) {
796			blkvsc_req->write = 1;
797			blkvsc_req->cmnd[0] = WRITE_16;
798		} else {
799			blkvsc_req->write = 0;
800			blkvsc_req->cmnd[0] = READ_16;
801		}
802
803		blkvsc_req->cmnd[1] |=
804			(blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
805
806		*(unsigned long long *)&blkvsc_req->cmnd[2] =
807				cpu_to_be64(blkvsc_req->sector_start);
808		*(unsigned int *)&blkvsc_req->cmnd[10] =
809				cpu_to_be32(blkvsc_req->sector_count);
810	} else if ((blkvsc_req->sector_count > 0xff) ||
811		   (blkvsc_req->sector_start > 0x1fffff)) {
812		if (rq_data_dir(blkvsc_req->req)) {
813			blkvsc_req->write = 1;
814			blkvsc_req->cmnd[0] = WRITE_10;
815		} else {
816			blkvsc_req->write = 0;
817			blkvsc_req->cmnd[0] = READ_10;
818		}
819
820		blkvsc_req->cmnd[1] |=
821			(blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
822
823		*(unsigned int *)&blkvsc_req->cmnd[2] =
824				cpu_to_be32(blkvsc_req->sector_start);
825		*(unsigned short *)&blkvsc_req->cmnd[7] =
826				cpu_to_be16(blkvsc_req->sector_count);
827	} else {
828		if (rq_data_dir(blkvsc_req->req)) {
829			blkvsc_req->write = 1;
830			blkvsc_req->cmnd[0] = WRITE_6;
831		} else {
832			blkvsc_req->write = 0;
833			blkvsc_req->cmnd[0] = READ_6;
834		}
835
836		*(unsigned int *)&blkvsc_req->cmnd[1] =
837				cpu_to_be32(blkvsc_req->sector_start) >> 8;
838		blkvsc_req->cmnd[1] &= 0x1f;
839		blkvsc_req->cmnd[4] = (unsigned char)blkvsc_req->sector_count;
840	}
841}
842
843static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
844			void (*request_completion)(struct hv_storvsc_request *))
845{
846	struct block_device_context *blkdev = blkvsc_req->dev;
847	struct vm_device *device_ctx = blkdev->device_ctx;
848	struct driver_context *driver_ctx =
849			driver_to_driver_context(device_ctx->device.driver);
850	struct blkvsc_driver_context *blkvsc_drv_ctx =
851			(struct blkvsc_driver_context *)driver_ctx;
852	struct storvsc_driver_object *storvsc_drv_obj =
853			&blkvsc_drv_ctx->drv_obj;
854	struct hv_storvsc_request *storvsc_req;
855	int ret;
856
857	DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - "
858		   "req %p type %s start_sector %lu count %ld offset %d "
859		   "len %d\n", blkvsc_req,
860		   (blkvsc_req->write) ? "WRITE" : "READ",
861		   (unsigned long) blkvsc_req->sector_start,
862		   blkvsc_req->sector_count,
863		   blkvsc_req->request.DataBuffer.Offset,
864		   blkvsc_req->request.DataBuffer.Length);
865
866	storvsc_req = &blkvsc_req->request;
867	storvsc_req->Extension = (void *)((unsigned long)blkvsc_req +
868					  sizeof(struct blkvsc_request));
869
870	storvsc_req->Type = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
871
872	storvsc_req->OnIOCompletion = request_completion;
873	storvsc_req->Context = blkvsc_req;
874
875	storvsc_req->Host = blkdev->port;
876	storvsc_req->Bus = blkdev->path;
877	storvsc_req->TargetId = blkdev->target;
878	storvsc_req->LunId = 0;	 /* this is not really used at all */
879
880	storvsc_req->CdbLen = blkvsc_req->cmd_len;
881	storvsc_req->Cdb = blkvsc_req->cmnd;
882
883	storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
884	storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
885
886	ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj,
887					   &blkvsc_req->request);
888	if (ret == 0)
889		blkdev->num_outstanding_reqs++;
890
891	return ret;
892}
893
894/*
895 * We break the request into 1 or more blkvsc_requests and submit
896 * them.  If we cant submit them all, we put them on the
897 * pending_list. The blkvsc_request() will work on the pending_list.
898 */
899static int blkvsc_do_request(struct block_device_context *blkdev,
900			     struct request *req)
901{
902	struct bio *bio = NULL;
903	struct bio_vec *bvec = NULL;
904	struct bio_vec *prev_bvec = NULL;
905	struct blkvsc_request *blkvsc_req = NULL;
906	struct blkvsc_request *tmp;
907	int databuf_idx = 0;
908	int seg_idx = 0;
909	sector_t start_sector;
910	unsigned long num_sectors = 0;
911	int ret = 0;
912	int pending = 0;
913	struct blkvsc_request_group *group = NULL;
914
915	DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu\n", blkdev, req,
916		  (unsigned long)blk_rq_pos(req));
917
918	/* Create a group to tie req to list of blkvsc_reqs */
919	group = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
920	if (!group)
921		return -ENOMEM;
922
923	INIT_LIST_HEAD(&group->blkvsc_req_list);
924	group->outstanding = group->status = 0;
925
926	start_sector = blk_rq_pos(req);
927
928	/* foreach bio in the request */
929	if (req->bio) {
930		for (bio = req->bio; bio; bio = bio->bi_next) {
931			/*
932			 * Map this bio into an existing or new storvsc request
933			 */
934			bio_for_each_segment(bvec, bio, seg_idx) {
935				DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() "
936					   "- req %p bio %p bvec %p seg_idx %d "
937					   "databuf_idx %d\n", req, bio, bvec,
938					   seg_idx, databuf_idx);
939
940				/* Get a new storvsc request */
941				/* 1st-time */
942				if ((!blkvsc_req) ||
943				    (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT)
944				    /* hole at the begin of page */
945				    || (bvec->bv_offset != 0) ||
946				    /* hold at the end of page */
947				    (prev_bvec &&
948				     (prev_bvec->bv_len != PAGE_SIZE))) {
949					/* submit the prev one */
950					if (blkvsc_req) {
951						blkvsc_req->sector_start = start_sector;
952						sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
953
954						blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
955						blkvsc_init_rw(blkvsc_req);
956					}
957
958					/*
959					 * Create new blkvsc_req to represent
960					 * the current bvec
961					 */
962					blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
963					if (!blkvsc_req) {
964						/* free up everything */
965						list_for_each_entry_safe(
966							blkvsc_req, tmp,
967							&group->blkvsc_req_list,
968							req_entry) {
969							list_del(&blkvsc_req->req_entry);
970							kmem_cache_free(blkdev->request_pool, blkvsc_req);
971						}
972
973						kmem_cache_free(blkdev->request_pool, group);
974						return -ENOMEM;
975					}
976
977					memset(blkvsc_req, 0,
978					       sizeof(struct blkvsc_request));
979
980					blkvsc_req->dev = blkdev;
981					blkvsc_req->req = req;
982					blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
983					blkvsc_req->request.DataBuffer.Length = 0;
984
985					/* Add to the group */
986					blkvsc_req->group = group;
987					blkvsc_req->group->outstanding++;
988					list_add_tail(&blkvsc_req->req_entry,
989						&blkvsc_req->group->blkvsc_req_list);
990
991					start_sector += num_sectors;
992					num_sectors = 0;
993					databuf_idx = 0;
994				}
995
996				/* Add the curr bvec/segment to the curr blkvsc_req */
997				blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
998				blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
999
1000				prev_bvec = bvec;
1001
1002				databuf_idx++;
1003				num_sectors += bvec->bv_len >> 9;
1004
1005			} /* bio_for_each_segment */
1006
1007		} /* rq_for_each_bio */
1008	}
1009
1010	/* Handle the last one */
1011	if (blkvsc_req) {
1012		DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n",
1013			   blkdev, req, blkvsc_req->group,
1014			   blkvsc_req->group->outstanding);
1015
1016		blkvsc_req->sector_start = start_sector;
1017		sector_div(blkvsc_req->sector_start,
1018			   (blkdev->sector_size >> 9));
1019
1020		blkvsc_req->sector_count = num_sectors /
1021					   (blkdev->sector_size >> 9);
1022
1023		blkvsc_init_rw(blkvsc_req);
1024	}
1025
1026	list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) {
1027		if (pending) {
1028			DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to "
1029				   "pending_list - blkvsc_req %p start_sect %lu"
1030				   " sect_count %ld (%lu %ld)\n", blkvsc_req,
1031				   (unsigned long)blkvsc_req->sector_start,
1032				   blkvsc_req->sector_count,
1033				   (unsigned long)start_sector,
1034				   (unsigned long)num_sectors);
1035
1036			list_add_tail(&blkvsc_req->pend_entry,
1037				      &blkdev->pending_list);
1038		} else {
1039			ret = blkvsc_submit_request(blkvsc_req,
1040						    blkvsc_request_completion);
1041			if (ret == -1) {
1042				pending = 1;
1043				list_add_tail(&blkvsc_req->pend_entry,
1044					      &blkdev->pending_list);
1045			}
1046
1047			DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p "
1048				   "start_sect %lu sect_count %ld (%lu %ld) "
1049				   "ret %d\n", blkvsc_req,
1050				   (unsigned long)blkvsc_req->sector_start,
1051				   blkvsc_req->sector_count,
1052				   (unsigned long)start_sector,
1053				   num_sectors, ret);
1054		}
1055	}
1056
1057	return pending;
1058}
1059
1060static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
1061{
1062	struct blkvsc_request *blkvsc_req =
1063			(struct blkvsc_request *)request->Context;
1064	struct block_device_context *blkdev =
1065			(struct block_device_context *)blkvsc_req->dev;
1066	struct scsi_sense_hdr sense_hdr;
1067
1068	DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n",
1069		   blkvsc_req);
1070
1071	blkdev->num_outstanding_reqs--;
1072
1073	if (blkvsc_req->request.Status)
1074		if (scsi_normalize_sense(blkvsc_req->sense_buffer,
1075					 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1076			scsi_print_sense_hdr("blkvsc", &sense_hdr);
1077
1078	blkvsc_req->cond = 1;
1079	wake_up_interruptible(&blkvsc_req->wevent);
1080}
1081
1082static void blkvsc_request_completion(struct hv_storvsc_request *request)
1083{
1084	struct blkvsc_request *blkvsc_req =
1085			(struct blkvsc_request *)request->Context;
1086	struct block_device_context *blkdev =
1087			(struct block_device_context *)blkvsc_req->dev;
1088	unsigned long flags;
1089	struct blkvsc_request *comp_req, *tmp;
1090
1091	/* ASSERT(blkvsc_req->group); */
1092
1093	DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s "
1094		   "sect_start %lu sect_count %ld len %d group outstd %d "
1095		   "total outstd %d\n",
1096		   blkdev, blkvsc_req, blkvsc_req->group,
1097		   (blkvsc_req->write) ? "WRITE" : "READ",
1098		   (unsigned long)blkvsc_req->sector_start,
1099		   blkvsc_req->sector_count,
1100		   blkvsc_req->request.DataBuffer.Length,
1101		   blkvsc_req->group->outstanding,
1102		   blkdev->num_outstanding_reqs);
1103
1104	spin_lock_irqsave(&blkdev->lock, flags);
1105
1106	blkdev->num_outstanding_reqs--;
1107	blkvsc_req->group->outstanding--;
1108
1109	/*
1110	 * Only start processing when all the blkvsc_reqs are
1111	 * completed. This guarantees no out-of-order blkvsc_req
1112	 * completion when calling end_that_request_first()
1113	 */
1114	if (blkvsc_req->group->outstanding == 0) {
1115		list_for_each_entry_safe(comp_req, tmp,
1116					 &blkvsc_req->group->blkvsc_req_list,
1117					 req_entry) {
1118			DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
1119				   "sect_start %lu sect_count %ld\n",
1120				   comp_req,
1121				   (unsigned long)comp_req->sector_start,
1122				   comp_req->sector_count);
1123
1124			list_del(&comp_req->req_entry);
1125
1126			if (!__blk_end_request(comp_req->req,
1127				(!comp_req->request.Status ? 0 : -EIO),
1128				comp_req->sector_count * blkdev->sector_size)) {
1129				/*
1130				 * All the sectors have been xferred ie the
1131				 * request is done
1132				 */
1133				DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n",
1134					   comp_req->req);
1135				kmem_cache_free(blkdev->request_pool,
1136						comp_req->group);
1137			}
1138
1139			kmem_cache_free(blkdev->request_pool, comp_req);
1140		}
1141
1142		if (!blkdev->shutting_down) {
1143			blkvsc_do_pending_reqs(blkdev);
1144			blk_start_queue(blkdev->gd->queue);
1145			blkvsc_request(blkdev->gd->queue);
1146		}
1147	}
1148
1149	spin_unlock_irqrestore(&blkdev->lock, flags);
1150}
1151
1152static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1153{
1154	struct blkvsc_request *pend_req, *tmp;
1155	struct blkvsc_request *comp_req, *tmp2;
1156
1157	int ret = 0;
1158
1159	DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1160
1161	/* Flush the pending list first */
1162	list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
1163				 pend_entry) {
1164		/*
1165		 * The pend_req could be part of a partially completed
1166		 * request. If so, complete those req first until we
1167		 * hit the pend_req
1168		 */
1169		list_for_each_entry_safe(comp_req, tmp2,
1170					 &pend_req->group->blkvsc_req_list,
1171					 req_entry) {
1172			DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
1173				   "sect_start %lu sect_count %ld\n",
1174				   comp_req,
1175				   (unsigned long) comp_req->sector_start,
1176				   comp_req->sector_count);
1177
1178			if (comp_req == pend_req)
1179				break;
1180
1181			list_del(&comp_req->req_entry);
1182
1183			if (comp_req->req) {
1184				ret = __blk_end_request(comp_req->req,
1185					(!comp_req->request.Status ? 0 : -EIO),
1186					comp_req->sector_count *
1187					blkdev->sector_size);
1188
1189				if (ret)
1190					goto out;
1191			}
1192
1193			kmem_cache_free(blkdev->request_pool, comp_req);
1194		}
1195
1196		DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n",
1197			   pend_req);
1198
1199		list_del(&pend_req->pend_entry);
1200
1201		list_del(&pend_req->req_entry);
1202
1203		if (comp_req->req) {
1204			if (!__blk_end_request(pend_req->req, -EIO,
1205					       pend_req->sector_count *
1206					       blkdev->sector_size)) {
1207				/*
1208				 * All the sectors have been xferred ie the
1209				 * request is done
1210				 */
1211				DPRINT_DBG(BLKVSC_DRV,
1212					   "blkvsc_cancel_pending_reqs() - "
1213					   "req %p COMPLETED\n", pend_req->req);
1214				kmem_cache_free(blkdev->request_pool,
1215						pend_req->group);
1216			}
1217		}
1218
1219		kmem_cache_free(blkdev->request_pool, pend_req);
1220	}
1221
1222out:
1223	return ret;
1224}
1225
1226static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1227{
1228	struct blkvsc_request *pend_req, *tmp;
1229	int ret = 0;
1230
1231	/* Flush the pending list first */
1232	list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
1233				 pend_entry) {
1234		DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n",
1235			   pend_req);
1236
1237		ret = blkvsc_submit_request(pend_req,
1238					    blkvsc_request_completion);
1239		if (ret != 0)
1240			break;
1241		else
1242			list_del(&pend_req->pend_entry);
1243	}
1244
1245	return ret;
1246}
1247
1248static void blkvsc_request(struct request_queue *queue)
1249{
1250	struct block_device_context *blkdev = NULL;
1251	struct request *req;
1252	int ret = 0;
1253
1254	DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1255	while ((req = blk_peek_request(queue)) != NULL) {
1256		DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
1257
1258		blkdev = req->rq_disk->private_data;
1259		if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS ||
1260		    blkdev->media_not_present) {
1261			__blk_end_request_cur(req, 0);
1262			continue;
1263		}
1264
1265		ret = blkvsc_do_pending_reqs(blkdev);
1266
1267		if (ret != 0) {
1268			DPRINT_DBG(BLKVSC_DRV,
1269				   "- stop queue - pending_list not empty\n");
1270			blk_stop_queue(queue);
1271			break;
1272		}
1273
1274		blk_start_request(req);
1275
1276		ret = blkvsc_do_request(blkdev, req);
1277		if (ret > 0) {
1278			DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
1279			blk_stop_queue(queue);
1280			break;
1281		} else if (ret < 0) {
1282			DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
1283			blk_requeue_request(queue, req);
1284			blk_stop_queue(queue);
1285			break;
1286		}
1287	}
1288}
1289
1290static int blkvsc_open(struct block_device *bdev, fmode_t mode)
1291{
1292	struct block_device_context *blkdev = bdev->bd_disk->private_data;
1293
1294	DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
1295		   blkdev->gd->disk_name);
1296
1297	lock_kernel();
1298	spin_lock(&blkdev->lock);
1299
1300	if (!blkdev->users && blkdev->device_type == DVD_TYPE) {
1301		spin_unlock(&blkdev->lock);
1302		check_disk_change(bdev);
1303		spin_lock(&blkdev->lock);
1304	}
1305
1306	blkdev->users++;
1307
1308	spin_unlock(&blkdev->lock);
1309	unlock_kernel();
1310	return 0;
1311}
1312
1313static int blkvsc_release(struct gendisk *disk, fmode_t mode)
1314{
1315	struct block_device_context *blkdev = disk->private_data;
1316
1317	DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
1318		   blkdev->gd->disk_name);
1319
1320	lock_kernel();
1321	spin_lock(&blkdev->lock);
1322	if (blkdev->users == 1) {
1323		spin_unlock(&blkdev->lock);
1324		blkvsc_do_flush(blkdev);
1325		spin_lock(&blkdev->lock);
1326	}
1327
1328	blkdev->users--;
1329
1330	spin_unlock(&blkdev->lock);
1331	unlock_kernel();
1332	return 0;
1333}
1334
1335static int blkvsc_media_changed(struct gendisk *gd)
1336{
1337	DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1338	return 1;
1339}
1340
1341static int blkvsc_revalidate_disk(struct gendisk *gd)
1342{
1343	struct block_device_context *blkdev = gd->private_data;
1344
1345	DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1346
1347	if (blkdev->device_type == DVD_TYPE) {
1348		blkvsc_do_read_capacity(blkdev);
1349		set_capacity(blkdev->gd, blkdev->capacity *
1350			    (blkdev->sector_size/512));
1351		blk_queue_logical_block_size(gd->queue, blkdev->sector_size);
1352	}
1353	return 0;
1354}
1355
1356static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1357{
1358	sector_t total_sectors = get_capacity(bd->bd_disk);
1359	sector_t cylinder_times_heads = 0;
1360	sector_t temp = 0;
1361
1362	int sectors_per_track = 0;
1363	int heads = 0;
1364	int cylinders = 0;
1365	int rem = 0;
1366
1367	if (total_sectors > (65535 * 16 * 255))
1368		total_sectors = (65535 * 16 * 255);
1369
1370	if (total_sectors >= (65535 * 16 * 63)) {
1371		sectors_per_track = 255;
1372		heads = 16;
1373
1374		cylinder_times_heads = total_sectors;
1375		/* sector_div stores the quotient in cylinder_times_heads */
1376		rem = sector_div(cylinder_times_heads, sectors_per_track);
1377	} else {
1378		sectors_per_track = 17;
1379
1380		cylinder_times_heads = total_sectors;
1381		/* sector_div stores the quotient in cylinder_times_heads */
1382		rem = sector_div(cylinder_times_heads, sectors_per_track);
1383
1384		temp = cylinder_times_heads + 1023;
1385		/* sector_div stores the quotient in temp */
1386		rem = sector_div(temp, 1024);
1387
1388		heads = temp;
1389
1390		if (heads < 4)
1391			heads = 4;
1392
1393
1394		if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1395			sectors_per_track = 31;
1396			heads = 16;
1397
1398			cylinder_times_heads = total_sectors;
1399			/*
1400			 * sector_div stores the quotient in
1401			 * cylinder_times_heads
1402			 */
1403			rem = sector_div(cylinder_times_heads,
1404					 sectors_per_track);
1405		}
1406
1407		if (cylinder_times_heads >= (heads * 1024)) {
1408			sectors_per_track = 63;
1409			heads = 16;
1410
1411			cylinder_times_heads = total_sectors;
1412			/*
1413			 * sector_div stores the quotient in
1414			 * cylinder_times_heads
1415			 */
1416			rem = sector_div(cylinder_times_heads,
1417					 sectors_per_track);
1418		}
1419	}
1420
1421	temp = cylinder_times_heads;
1422	/* sector_div stores the quotient in temp */
1423	rem = sector_div(temp, heads);
1424	cylinders = temp;
1425
1426	hg->heads = heads;
1427	hg->sectors = sectors_per_track;
1428	hg->cylinders = cylinders;
1429
1430	DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads,
1431		    sectors_per_track);
1432
1433    return 0;
1434}
1435
1436static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
1437			unsigned cmd, unsigned long argument)
1438{
1439/*	struct block_device_context *blkdev = bd->bd_disk->private_data; */
1440	int ret;
1441
1442	switch (cmd) {
1443	/*
1444	 * TODO: I think there is certain format for HDIO_GET_IDENTITY rather
1445	 * than just a GUID. Commented it out for now.
1446	 */
1447	default:
1448		ret = -EINVAL;
1449		break;
1450	}
1451
1452	return ret;
1453}
1454
1455static int __init blkvsc_init(void)
1456{
1457	int ret;
1458
1459	BUILD_BUG_ON(sizeof(sector_t) != 8);
1460
1461	DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
1462
1463	ret = blkvsc_drv_init(BlkVscInitialize);
1464
1465	return ret;
1466}
1467
1468static void __exit blkvsc_exit(void)
1469{
1470	blkvsc_drv_exit();
1471}
1472
1473MODULE_LICENSE("GPL");
1474MODULE_VERSION(HV_DRV_VERSION);
1475MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
1476module_init(blkvsc_init);
1477module_exit(blkvsc_exit);
1478