• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/staging/hv/

Lines Matching refs:blkdev

24 #include <linux/blkdev.h>
139 static int blkvsc_do_request(struct block_device_context *blkdev,
145 static int blkvsc_do_inquiry(struct block_device_context *blkdev);
146 static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
147 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
148 static int blkvsc_do_flush(struct block_device_context *blkdev);
149 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
150 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
255 struct block_device_context *blkdev = NULL;
271 blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
272 if (!blkdev) {
277 INIT_LIST_HEAD(&blkdev->pending_list);
280 spin_lock_init(&blkdev->lock);
285 blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
289 if (!blkdev->request_pool) {
302 blkdev->device_ctx = device_ctx;
304 blkdev->target = device_info.TargetId;
306 blkdev->path = device_info.PathId;
308 dev_set_drvdata(device, blkdev);
311 if (blkdev->path == 0) {
313 devnum = blkdev->path + blkdev->target; /* 0 or 1 */
326 } else if (blkdev->path == 1) {
328 devnum = blkdev->path + blkdev->target + 1; /* 2 or 3 */
349 blkdev->gd = alloc_disk(BLKVSC_MINORS);
350 if (!blkdev->gd) {
356 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
358 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
359 blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
360 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
361 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
362 blk_queue_dma_alignment(blkdev->gd->queue, 511);
364 blkdev->gd->major = major;
366 blkdev->gd->first_minor = BLKVSC_MINORS;
368 blkdev->gd->first_minor = 0;
369 blkdev->gd->fops = &block_ops;
370 blkdev->gd->private_data = blkdev;
371 blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
372 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
374 blkvsc_do_inquiry(blkdev);
375 if (blkdev->device_type == DVD_TYPE) {
376 set_disk_ro(blkdev->gd, 1);
377 blkdev->gd->flags |= GENHD_FL_REMOVABLE;
378 blkvsc_do_read_capacity(blkdev);
380 blkvsc_do_read_capacity16(blkdev);
383 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
384 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
386 add_disk(blkdev->gd);
389 blkdev->gd->disk_name, (unsigned long)blkdev->capacity,
390 blkdev->sector_size);
398 if (blkdev) {
399 if (blkdev->request_pool) {
400 kmem_cache_destroy(blkdev->request_pool);
401 blkdev->request_pool = NULL;
403 kfree(blkdev);
404 blkdev = NULL;
412 struct block_device_context *blkdev = dev_get_drvdata(device);
415 if (!blkdev)
419 blkdev->users, blkdev->gd->disk_name);
421 spin_lock_irqsave(&blkdev->lock, flags);
423 blkdev->shutting_down = 1;
425 blk_stop_queue(blkdev->gd->queue);
427 spin_unlock_irqrestore(&blkdev->lock, flags);
429 while (blkdev->num_outstanding_reqs) {
431 blkdev->num_outstanding_reqs);
435 blkvsc_do_flush(blkdev);
437 spin_lock_irqsave(&blkdev->lock, flags);
439 blkvsc_cancel_pending_reqs(blkdev);
441 spin_unlock_irqrestore(&blkdev->lock, flags);
444 static int blkvsc_do_flush(struct block_device_context *blkdev)
450 if (blkdev->device_type != HARDDISK_TYPE)
453 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
459 blkvsc_req->dev = blkdev;
485 static int blkvsc_do_inquiry(struct block_device_context *blkdev)
494 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
506 blkvsc_req->dev = blkdev;
540 blkdev->device_type = HARDDISK_TYPE;
542 blkdev->device_type = DVD_TYPE;
545 blkdev->device_type = UNKNOWN_DEV_TYPE;
550 blkdev->device_id_len = buf[7];
551 if (blkdev->device_id_len > 64)
552 blkdev->device_id_len = 64;
554 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
555 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
556 * blkdev->device_id_len); */
568 static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
577 blkdev->sector_size = 0;
578 blkdev->capacity = 0;
579 blkdev->media_not_present = 0; /* assume a disk is present */
581 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
593 blkvsc_req->dev = blkdev;
624 blkdev->media_not_present = 1;
631 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) |
633 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) |
645 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
654 blkdev->sector_size = 0;
655 blkdev->capacity = 0;
656 blkdev->media_not_present = 0; /* assume a disk is present */
658 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
670 blkvsc_req->dev = blkdev;
700 blkdev->media_not_present = 1;
707 blkdev->capacity = be64_to_cpu(*(unsigned long long *) &buf[0]) + 1;
708 blkdev->sector_size = be32_to_cpu(*(unsigned int *)&buf[8]);
733 struct block_device_context *blkdev = dev_get_drvdata(device);
754 spin_lock_irqsave(&blkdev->lock, flags);
756 blkdev->shutting_down = 1;
758 blk_stop_queue(blkdev->gd->queue);
760 spin_unlock_irqrestore(&blkdev->lock, flags);
762 while (blkdev->num_outstanding_reqs) {
764 blkdev->num_outstanding_reqs);
768 blkvsc_do_flush(blkdev);
770 spin_lock_irqsave(&blkdev->lock, flags);
772 blkvsc_cancel_pending_reqs(blkdev);
774 spin_unlock_irqrestore(&blkdev->lock, flags);
776 blk_cleanup_queue(blkdev->gd->queue);
778 del_gendisk(blkdev->gd);
780 kmem_cache_destroy(blkdev->request_pool);
782 kfree(blkdev);
846 struct block_device_context *blkdev = blkvsc_req->dev;
847 struct vm_device *device_ctx = blkdev->device_ctx;
875 storvsc_req->Host = blkdev->port;
876 storvsc_req->Bus = blkdev->path;
877 storvsc_req->TargetId = blkdev->target;
886 ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj,
889 blkdev->num_outstanding_reqs++;
899 static int blkvsc_do_request(struct block_device_context *blkdev,
915 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu\n", blkdev, req,
919 group = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
952 sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
954 blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
962 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
970 kmem_cache_free(blkdev->request_pool, blkvsc_req);
973 kmem_cache_free(blkdev->request_pool, group);
980 blkvsc_req->dev = blkdev;
1012 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n",
1013 blkdev, req, blkvsc_req->group,
1018 (blkdev->sector_size >> 9));
1021 (blkdev->sector_size >> 9);
1037 &blkdev->pending_list);
1044 &blkdev->pending_list);
1064 struct block_device_context *blkdev =
1071 blkdev->num_outstanding_reqs--;
1086 struct block_device_context *blkdev =
1093 DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s "
1096 blkdev, blkvsc_req, blkvsc_req->group,
1102 blkdev->num_outstanding_reqs);
1104 spin_lock_irqsave(&blkdev->lock, flags);
1106 blkdev->num_outstanding_reqs--;
1128 comp_req->sector_count * blkdev->sector_size)) {
1135 kmem_cache_free(blkdev->request_pool,
1139 kmem_cache_free(blkdev->request_pool, comp_req);
1142 if (!blkdev->shutting_down) {
1143 blkvsc_do_pending_reqs(blkdev);
1144 blk_start_queue(blkdev->gd->queue);
1145 blkvsc_request(blkdev->gd->queue);
1149 spin_unlock_irqrestore(&blkdev->lock, flags);
1152 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1162 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
1187 blkdev->sector_size);
1193 kmem_cache_free(blkdev->request_pool, comp_req);
1206 blkdev->sector_size)) {
1214 kmem_cache_free(blkdev->request_pool,
1219 kmem_cache_free(blkdev->request_pool, pend_req);
1226 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1232 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
1250 struct block_device_context *blkdev = NULL;
1258 blkdev = req->rq_disk->private_data;
1259 if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS ||
1260 blkdev->media_not_present) {
1265 ret = blkvsc_do_pending_reqs(blkdev);
1276 ret = blkvsc_do_request(blkdev, req);
1292 struct block_device_context *blkdev = bdev->bd_disk->private_data;
1294 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
1295 blkdev->gd->disk_name);
1298 spin_lock(&blkdev->lock);
1300 if (!blkdev->users && blkdev->device_type == DVD_TYPE) {
1301 spin_unlock(&blkdev->lock);
1303 spin_lock(&blkdev->lock);
1306 blkdev->users++;
1308 spin_unlock(&blkdev->lock);
1315 struct block_device_context *blkdev = disk->private_data;
1317 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
1318 blkdev->gd->disk_name);
1321 spin_lock(&blkdev->lock);
1322 if (blkdev->users == 1) {
1323 spin_unlock(&blkdev->lock);
1324 blkvsc_do_flush(blkdev);
1325 spin_lock(&blkdev->lock);
1328 blkdev->users--;
1330 spin_unlock(&blkdev->lock);
1343 struct block_device_context *blkdev = gd->private_data;
1347 if (blkdev->device_type == DVD_TYPE) {
1348 blkvsc_do_read_capacity(blkdev);
1349 set_capacity(blkdev->gd, blkdev->capacity *
1350 (blkdev->sector_size/512));
1351 blk_queue_logical_block_size(gd->queue, blkdev->sector_size);
1439 /* struct block_device_context *blkdev = bd->bd_disk->private_data; */