1/* 2 * Block OSM 3 * 4 * Copyright (C) 1999-2002 Red Hat Software 5 * 6 * Written by Alan Cox, Building Number Three Ltd 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * For the purpose of avoiding doubt the preferred form of the work 19 * for making modifications shall be a standards compliant form such 20 * gzipped tar and not one requiring a proprietary or patent encumbered 21 * tool to unpack. 22 * 23 * Fixes/additions: 24 * Steve Ralston: 25 * Multiple device handling error fixes, 26 * Added a queue depth. 27 * Alan Cox: 28 * FC920 has an rmw bug. Dont or in the end marker. 29 * Removed queue walk, fixed for 64bitness. 30 * Rewrote much of the code over time 31 * Added indirect block lists 32 * Handle 64K limits on many controllers 33 * Don't use indirects on the Promise (breaks) 34 * Heavily chop down the queue depths 35 * Deepak Saxena: 36 * Independent queues per IOP 37 * Support for dynamic device creation/deletion 38 * Code cleanup 39 * Support for larger I/Os through merge* functions 40 * (taken from DAC960 driver) 41 * Boji T Kannanthanam: 42 * Set the I2O Block devices to be detected in increasing 43 * order of TIDs during boot. 44 * Search and set the I2O block device that we boot off 45 * from as the first device to be claimed (as /dev/i2o/hda) 46 * Properly attach/detach I2O gendisk structure from the 47 * system gendisk list. The I2O block devices now appear in 48 * /proc/partitions. 49 * Markus Lidel <Markus.Lidel@shadowconnect.com>: 50 * Minor bugfixes for 2.6. 51 */ 52 53#include <linux/module.h> 54#include <linux/slab.h> 55#include <linux/i2o.h> 56#include <linux/smp_lock.h> 57 58#include <linux/mempool.h> 59 60#include <linux/genhd.h> 61#include <linux/blkdev.h> 62#include <linux/hdreg.h> 63 64#include <scsi/scsi.h> 65 66#include "i2o_block.h" 67 68#define OSM_NAME "block-osm" 69#define OSM_VERSION "1.325" 70#define OSM_DESCRIPTION "I2O Block Device OSM" 71 72static struct i2o_driver i2o_block_driver; 73 74/* global Block OSM request mempool */ 75static struct i2o_block_mempool i2o_blk_req_pool; 76 77/* Block OSM class handling definition */ 78static struct i2o_class_id i2o_block_class_id[] = { 79 {I2O_CLASS_RANDOM_BLOCK_STORAGE}, 80 {I2O_CLASS_END} 81}; 82 83/** 84 * i2o_block_device_free - free the memory of the I2O Block device 85 * @dev: I2O Block device, which should be cleaned up 86 * 87 * Frees the request queue, gendisk and the i2o_block_device structure. 88 */ 89static void i2o_block_device_free(struct i2o_block_device *dev) 90{ 91 blk_cleanup_queue(dev->gd->queue); 92 93 put_disk(dev->gd); 94 95 kfree(dev); 96}; 97 98/** 99 * i2o_block_remove - remove the I2O Block device from the system again 100 * @dev: I2O Block device which should be removed 101 * 102 * Remove gendisk from system and free all allocated memory. 103 * 104 * Always returns 0. 105 */ 106static int i2o_block_remove(struct device *dev) 107{ 108 struct i2o_device *i2o_dev = to_i2o_device(dev); 109 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); 110 111 osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid, 112 i2o_blk_dev->gd->disk_name); 113 114 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); 115 116 del_gendisk(i2o_blk_dev->gd); 117 118 dev_set_drvdata(dev, NULL); 119 120 i2o_device_claim_release(i2o_dev); 121 122 i2o_block_device_free(i2o_blk_dev); 123 124 return 0; 125}; 126 127/** 128 * i2o_block_device flush - Flush all dirty data of I2O device dev 129 * @dev: I2O device which should be flushed 130 * 131 * Flushes all dirty data on device dev. 132 * 133 * Returns 0 on success or negative error code on failure. 134 */ 135static int i2o_block_device_flush(struct i2o_device *dev) 136{ 137 struct i2o_message *msg; 138 139 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 140 if (IS_ERR(msg)) 141 return PTR_ERR(msg); 142 143 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 144 msg->u.head[1] = 145 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev-> 146 lct_data.tid); 147 msg->body[0] = cpu_to_le32(60 << 16); 148 osm_debug("Flushing...\n"); 149 150 return i2o_msg_post_wait(dev->iop, msg, 60); 151}; 152 153/** 154 * i2o_block_device_mount - Mount (load) the media of device dev 155 * @dev: I2O device which should receive the mount request 156 * @media_id: Media Identifier 157 * 158 * Load a media into drive. Identifier should be set to -1, because the 159 * spec does not support any other value. 160 * 161 * Returns 0 on success or negative error code on failure. 162 */ 163static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 164{ 165 struct i2o_message *msg; 166 167 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 168 if (IS_ERR(msg)) 169 return PTR_ERR(msg); 170 171 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 172 msg->u.head[1] = 173 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev-> 174 lct_data.tid); 175 msg->body[0] = cpu_to_le32(-1); 176 msg->body[1] = cpu_to_le32(0x00000000); 177 osm_debug("Mounting...\n"); 178 179 return i2o_msg_post_wait(dev->iop, msg, 2); 180}; 181 182/** 183 * i2o_block_device_lock - Locks the media of device dev 184 * @dev: I2O device which should receive the lock request 185 * @media_id: Media Identifier 186 * 187 * Lock media of device dev to prevent removal. The media identifier 188 * should be set to -1, because the spec does not support any other value. 189 * 190 * Returns 0 on success or negative error code on failure. 191 */ 192static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 193{ 194 struct i2o_message *msg; 195 196 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 197 if (IS_ERR(msg)) 198 return PTR_ERR(msg); 199 200 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 201 msg->u.head[1] = 202 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev-> 203 lct_data.tid); 204 msg->body[0] = cpu_to_le32(-1); 205 osm_debug("Locking...\n"); 206 207 return i2o_msg_post_wait(dev->iop, msg, 2); 208}; 209 210/** 211 * i2o_block_device_unlock - Unlocks the media of device dev 212 * @dev: I2O device which should receive the unlocked request 213 * @media_id: Media Identifier 214 * 215 * Unlocks the media in device dev. The media identifier should be set to 216 * -1, because the spec does not support any other value. 217 * 218 * Returns 0 on success or negative error code on failure. 219 */ 220static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 221{ 222 struct i2o_message *msg; 223 224 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 225 if (IS_ERR(msg)) 226 return PTR_ERR(msg); 227 228 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 229 msg->u.head[1] = 230 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev-> 231 lct_data.tid); 232 msg->body[0] = cpu_to_le32(media_id); 233 osm_debug("Unlocking...\n"); 234 235 return i2o_msg_post_wait(dev->iop, msg, 2); 236}; 237 238/** 239 * i2o_block_device_power - Power management for device dev 240 * @dev: I2O device which should receive the power management request 241 * @op: Operation to send 242 * 243 * Send a power management request to the device dev. 244 * 245 * Returns 0 on success or negative error code on failure. 246 */ 247static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) 248{ 249 struct i2o_device *i2o_dev = dev->i2o_dev; 250 struct i2o_controller *c = i2o_dev->iop; 251 struct i2o_message *msg; 252 int rc; 253 254 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 255 if (IS_ERR(msg)) 256 return PTR_ERR(msg); 257 258 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); 259 msg->u.head[1] = 260 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev-> 261 lct_data.tid); 262 msg->body[0] = cpu_to_le32(op << 24); 263 osm_debug("Power...\n"); 264 265 rc = i2o_msg_post_wait(c, msg, 60); 266 if (!rc) 267 dev->power = op; 268 269 return rc; 270}; 271 272/** 273 * i2o_block_request_alloc - Allocate an I2O block request struct 274 * 275 * Allocates an I2O block request struct and initialize the list. 276 * 277 * Returns a i2o_block_request pointer on success or negative error code 278 * on failure. 279 */ 280static inline struct i2o_block_request *i2o_block_request_alloc(void) 281{ 282 struct i2o_block_request *ireq; 283 284 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); 285 if (!ireq) 286 return ERR_PTR(-ENOMEM); 287 288 INIT_LIST_HEAD(&ireq->queue); 289 sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS); 290 291 return ireq; 292}; 293 294/** 295 * i2o_block_request_free - Frees a I2O block request 296 * @ireq: I2O block request which should be freed 297 * 298 * Frees the allocated memory (give it back to the request mempool). 299 */ 300static inline void i2o_block_request_free(struct i2o_block_request *ireq) 301{ 302 mempool_free(ireq, i2o_blk_req_pool.pool); 303}; 304 305/** 306 * i2o_block_sglist_alloc - Allocate the SG list and map it 307 * @c: I2O controller to which the request belongs 308 * @ireq: I2O block request 309 * @mptr: message body pointer 310 * 311 * Builds the SG list and map it to be accessable by the controller. 312 * 313 * Returns 0 on failure or 1 on success. 314 */ 315static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 316 struct i2o_block_request *ireq, 317 u32 ** mptr) 318{ 319 int nents; 320 enum dma_data_direction direction; 321 322 ireq->dev = &c->pdev->dev; 323 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); 324 325 if (rq_data_dir(ireq->req) == READ) 326 direction = PCI_DMA_FROMDEVICE; 327 else 328 direction = PCI_DMA_TODEVICE; 329 330 ireq->sg_nents = nents; 331 332 return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); 333}; 334 335/** 336 * i2o_block_sglist_free - Frees the SG list 337 * @ireq: I2O block request from which the SG should be freed 338 * 339 * Frees the SG list from the I2O block request. 340 */ 341static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) 342{ 343 enum dma_data_direction direction; 344 345 if (rq_data_dir(ireq->req) == READ) 346 direction = PCI_DMA_FROMDEVICE; 347 else 348 direction = PCI_DMA_TODEVICE; 349 350 dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); 351}; 352 353/** 354 * i2o_block_prep_req_fn - Allocates I2O block device specific struct 355 * @q: request queue for the request 356 * @req: the request to prepare 357 * 358 * Allocate the necessary i2o_block_request struct and connect it to 359 * the request. This is needed that we not lose the SG list later on. 360 * 361 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. 362 */ 363static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) 364{ 365 struct i2o_block_device *i2o_blk_dev = q->queuedata; 366 struct i2o_block_request *ireq; 367 368 if (unlikely(!i2o_blk_dev)) { 369 osm_err("block device already removed\n"); 370 return BLKPREP_KILL; 371 } 372 373 /* connect the i2o_block_request to the request */ 374 if (!req->special) { 375 ireq = i2o_block_request_alloc(); 376 if (IS_ERR(ireq)) { 377 osm_debug("unable to allocate i2o_block_request!\n"); 378 return BLKPREP_DEFER; 379 } 380 381 ireq->i2o_blk_dev = i2o_blk_dev; 382 req->special = ireq; 383 ireq->req = req; 384 } 385 /* do not come back here */ 386 req->cmd_flags |= REQ_DONTPREP; 387 388 return BLKPREP_OK; 389}; 390 391/** 392 * i2o_block_delayed_request_fn - delayed request queue function 393 * @work: the delayed request with the queue to start 394 * 395 * If the request queue is stopped for a disk, and there is no open 396 * request, a new event is created, which calls this function to start 397 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never 398 * be started again. 399 */ 400static void i2o_block_delayed_request_fn(struct work_struct *work) 401{ 402 struct i2o_block_delayed_request *dreq = 403 container_of(work, struct i2o_block_delayed_request, 404 work.work); 405 struct request_queue *q = dreq->queue; 406 unsigned long flags; 407 408 spin_lock_irqsave(q->queue_lock, flags); 409 blk_start_queue(q); 410 spin_unlock_irqrestore(q->queue_lock, flags); 411 kfree(dreq); 412}; 413 414/** 415 * i2o_block_end_request - Post-processing of completed commands 416 * @req: request which should be completed 417 * @error: 0 for success, < 0 for error 418 * @nr_bytes: number of bytes to complete 419 * 420 * Mark the request as complete. The lock must not be held when entering. 421 * 422 */ 423static void i2o_block_end_request(struct request *req, int error, 424 int nr_bytes) 425{ 426 struct i2o_block_request *ireq = req->special; 427 struct i2o_block_device *dev = ireq->i2o_blk_dev; 428 struct request_queue *q = req->q; 429 unsigned long flags; 430 431 if (blk_end_request(req, error, nr_bytes)) 432 if (error) 433 blk_end_request_all(req, -EIO); 434 435 spin_lock_irqsave(q->queue_lock, flags); 436 437 if (likely(dev)) { 438 dev->open_queue_depth--; 439 list_del(&ireq->queue); 440 } 441 442 blk_start_queue(q); 443 444 spin_unlock_irqrestore(q->queue_lock, flags); 445 446 i2o_block_sglist_free(ireq); 447 i2o_block_request_free(ireq); 448}; 449 450/** 451 * i2o_block_reply - Block OSM reply handler. 452 * @c: I2O controller from which the message arrives 453 * @m: message id of reply 454 * @msg: the actual I2O message reply 455 * 456 * This function gets all the message replies. 457 * 458 */ 459static int i2o_block_reply(struct i2o_controller *c, u32 m, 460 struct i2o_message *msg) 461{ 462 struct request *req; 463 int error = 0; 464 465 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); 466 if (unlikely(!req)) { 467 osm_err("NULL reply received!\n"); 468 return -1; 469 } 470 471 /* 472 * Lets see what is cooking. We stuffed the 473 * request in the context. 474 */ 475 476 if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { 477 u32 status = le32_to_cpu(msg->body[0]); 478 /* 479 * Device not ready means two things. One is that the 480 * the thing went offline (but not a removal media) 481 * 482 * The second is that you have a SuperTrak 100 and the 483 * firmware got constipated. Unlike standard i2o card 484 * setups the supertrak returns an error rather than 485 * blocking for the timeout in these cases. 486 * 487 * Don't stick a supertrak100 into cache aggressive modes 488 */ 489 490 osm_err("TID %03x error status: 0x%02x, detailed status: " 491 "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), 492 status >> 24, status & 0xffff); 493 494 req->errors++; 495 496 error = -EIO; 497 } 498 499 i2o_block_end_request(req, error, le32_to_cpu(msg->body[1])); 500 501 return 1; 502}; 503 504static void i2o_block_event(struct work_struct *work) 505{ 506 struct i2o_event *evt = container_of(work, struct i2o_event, work); 507 osm_debug("event received\n"); 508 kfree(evt); 509}; 510 511/* 512 * SCSI-CAM for ioctl geometry mapping 513 * Duplicated with SCSI - this should be moved into somewhere common 514 * perhaps genhd ? 515 * 516 * LBA -> CHS mapping table taken from: 517 * 518 * "Incorporating the I2O Architecture into BIOS for Intel Architecture 519 * Platforms" 520 * 521 * This is an I2O document that is only available to I2O members, 522 * not developers. 523 * 524 * From my understanding, this is how all the I2O cards do this 525 * 526 * Disk Size | Sectors | Heads | Cylinders 527 * ---------------+---------+-------+------------------- 528 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) 529 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) 530 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) 531 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) 532 * 533 */ 534#define BLOCK_SIZE_528M 1081344 535#define BLOCK_SIZE_1G 2097152 536#define BLOCK_SIZE_21G 4403200 537#define BLOCK_SIZE_42G 8806400 538#define BLOCK_SIZE_84G 17612800 539 540static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, 541 unsigned char *hds, unsigned char *secs) 542{ 543 unsigned long heads, sectors, cylinders; 544 545 sectors = 63L; /* Maximize sectors per track */ 546 if (capacity <= BLOCK_SIZE_528M) 547 heads = 16; 548 else if (capacity <= BLOCK_SIZE_1G) 549 heads = 32; 550 else if (capacity <= BLOCK_SIZE_21G) 551 heads = 64; 552 else if (capacity <= BLOCK_SIZE_42G) 553 heads = 128; 554 else 555 heads = 255; 556 557 cylinders = (unsigned long)capacity / (heads * sectors); 558 559 *cyls = (unsigned short)cylinders; /* Stuff return values */ 560 *secs = (unsigned char)sectors; 561 *hds = (unsigned char)heads; 562} 563 564/** 565 * i2o_block_open - Open the block device 566 * @bdev: block device being opened 567 * @mode: file open mode 568 * 569 * Power up the device, mount and lock the media. This function is called, 570 * if the block device is opened for access. 571 * 572 * Returns 0 on success or negative error code on failure. 573 */ 574static int i2o_block_open(struct block_device *bdev, fmode_t mode) 575{ 576 struct i2o_block_device *dev = bdev->bd_disk->private_data; 577 578 if (!dev->i2o_dev) 579 return -ENODEV; 580 581 lock_kernel(); 582 if (dev->power > 0x1f) 583 i2o_block_device_power(dev, 0x02); 584 585 i2o_block_device_mount(dev->i2o_dev, -1); 586 587 i2o_block_device_lock(dev->i2o_dev, -1); 588 589 osm_debug("Ready.\n"); 590 unlock_kernel(); 591 592 return 0; 593}; 594 595/** 596 * i2o_block_release - Release the I2O block device 597 * @disk: gendisk device being released 598 * @mode: file open mode 599 * 600 * Unlock and unmount the media, and power down the device. Gets called if 601 * the block device is closed. 602 * 603 * Returns 0 on success or negative error code on failure. 604 */ 605static int i2o_block_release(struct gendisk *disk, fmode_t mode) 606{ 607 struct i2o_block_device *dev = disk->private_data; 608 u8 operation; 609 610 /* 611 * This is to deail with the case of an application 612 * opening a device and then the device dissapears while 613 * it's in use, and then the application tries to release 614 * it. ex: Unmounting a deleted RAID volume at reboot. 615 * If we send messages, it will just cause FAILs since 616 * the TID no longer exists. 617 */ 618 if (!dev->i2o_dev) 619 return 0; 620 621 lock_kernel(); 622 i2o_block_device_flush(dev->i2o_dev); 623 624 i2o_block_device_unlock(dev->i2o_dev, -1); 625 626 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */ 627 operation = 0x21; 628 else 629 operation = 0x24; 630 631 i2o_block_device_power(dev, operation); 632 unlock_kernel(); 633 634 return 0; 635} 636 637static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) 638{ 639 i2o_block_biosparam(get_capacity(bdev->bd_disk), 640 &geo->cylinders, &geo->heads, &geo->sectors); 641 return 0; 642} 643 644/** 645 * i2o_block_ioctl - Issue device specific ioctl calls. 646 * @bdev: block device being opened 647 * @mode: file open mode 648 * @cmd: ioctl command 649 * @arg: arg 650 * 651 * Handles ioctl request for the block device. 652 * 653 * Return 0 on success or negative error on failure. 654 */ 655static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, 656 unsigned int cmd, unsigned long arg) 657{ 658 struct gendisk *disk = bdev->bd_disk; 659 struct i2o_block_device *dev = disk->private_data; 660 int ret = -ENOTTY; 661 662 /* Anyone capable of this syscall can do *real bad* things */ 663 664 if (!capable(CAP_SYS_ADMIN)) 665 return -EPERM; 666 667 lock_kernel(); 668 switch (cmd) { 669 case BLKI2OGRSTRAT: 670 ret = put_user(dev->rcache, (int __user *)arg); 671 break; 672 case BLKI2OGWSTRAT: 673 ret = put_user(dev->wcache, (int __user *)arg); 674 break; 675 case BLKI2OSRSTRAT: 676 ret = -EINVAL; 677 if (arg < 0 || arg > CACHE_SMARTFETCH) 678 break; 679 dev->rcache = arg; 680 ret = 0; 681 break; 682 case BLKI2OSWSTRAT: 683 ret = -EINVAL; 684 if (arg != 0 685 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) 686 break; 687 dev->wcache = arg; 688 ret = 0; 689 break; 690 } 691 unlock_kernel(); 692 693 return ret; 694}; 695 696/** 697 * i2o_block_media_changed - Have we seen a media change? 698 * @disk: gendisk which should be verified 699 * 700 * Verifies if the media has changed. 701 * 702 * Returns 1 if the media was changed or 0 otherwise. 703 */ 704static int i2o_block_media_changed(struct gendisk *disk) 705{ 706 struct i2o_block_device *p = disk->private_data; 707 708 if (p->media_change_flag) { 709 p->media_change_flag = 0; 710 return 1; 711 } 712 return 0; 713} 714 715/** 716 * i2o_block_transfer - Transfer a request to/from the I2O controller 717 * @req: the request which should be transfered 718 * 719 * This function converts the request into a I2O message. The necessary 720 * DMA buffers are allocated and after everything is setup post the message 721 * to the I2O controller. No cleanup is done by this function. It is done 722 * on the interrupt side when the reply arrives. 723 * 724 * Return 0 on success or negative error code on failure. 725 */ 726static int i2o_block_transfer(struct request *req) 727{ 728 struct i2o_block_device *dev = req->rq_disk->private_data; 729 struct i2o_controller *c; 730 u32 tid; 731 struct i2o_message *msg; 732 u32 *mptr; 733 struct i2o_block_request *ireq = req->special; 734 u32 tcntxt; 735 u32 sgl_offset = SGL_OFFSET_8; 736 u32 ctl_flags = 0x00000000; 737 int rc; 738 u32 cmd; 739 740 if (unlikely(!dev->i2o_dev)) { 741 osm_err("transfer to removed drive\n"); 742 rc = -ENODEV; 743 goto exit; 744 } 745 746 tid = dev->i2o_dev->lct_data.tid; 747 c = dev->i2o_dev->iop; 748 749 msg = i2o_msg_get(c); 750 if (IS_ERR(msg)) { 751 rc = PTR_ERR(msg); 752 goto exit; 753 } 754 755 tcntxt = i2o_cntxt_list_add(c, req); 756 if (!tcntxt) { 757 rc = -ENOMEM; 758 goto nop_msg; 759 } 760 761 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); 762 msg->u.s.tcntxt = cpu_to_le32(tcntxt); 763 764 mptr = &msg->body[0]; 765 766 if (rq_data_dir(req) == READ) { 767 cmd = I2O_CMD_BLOCK_READ << 24; 768 769 switch (dev->rcache) { 770 case CACHE_PREFETCH: 771 ctl_flags = 0x201F0008; 772 break; 773 774 case CACHE_SMARTFETCH: 775 if (blk_rq_sectors(req) > 16) 776 ctl_flags = 0x201F0008; 777 else 778 ctl_flags = 0x001F0000; 779 break; 780 781 default: 782 break; 783 } 784 } else { 785 cmd = I2O_CMD_BLOCK_WRITE << 24; 786 787 switch (dev->wcache) { 788 case CACHE_WRITETHROUGH: 789 ctl_flags = 0x001F0008; 790 break; 791 case CACHE_WRITEBACK: 792 ctl_flags = 0x001F0010; 793 break; 794 case CACHE_SMARTBACK: 795 if (blk_rq_sectors(req) > 16) 796 ctl_flags = 0x001F0004; 797 else 798 ctl_flags = 0x001F0010; 799 break; 800 case CACHE_SMARTTHROUGH: 801 if (blk_rq_sectors(req) > 16) 802 ctl_flags = 0x001F0004; 803 else 804 ctl_flags = 0x001F0010; 805 default: 806 break; 807 } 808 } 809 810#ifdef CONFIG_I2O_EXT_ADAPTEC 811 if (c->adaptec) { 812 u8 cmd[10]; 813 u32 scsi_flags; 814 u16 hwsec; 815 816 hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; 817 memset(cmd, 0, 10); 818 819 sgl_offset = SGL_OFFSET_12; 820 821 msg->u.head[1] = 822 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid); 823 824 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); 825 *mptr++ = cpu_to_le32(tid); 826 827 /* 828 * ENABLE_DISCONNECT 829 * SIMPLE_TAG 830 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 831 */ 832 if (rq_data_dir(req) == READ) { 833 cmd[0] = READ_10; 834 scsi_flags = 0x60a0000a; 835 } else { 836 cmd[0] = WRITE_10; 837 scsi_flags = 0xa0a0000a; 838 } 839 840 *mptr++ = cpu_to_le32(scsi_flags); 841 842 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); 843 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); 844 845 memcpy(mptr, cmd, 10); 846 mptr += 4; 847 *mptr++ = cpu_to_le32(blk_rq_bytes(req)); 848 } else 849#endif 850 { 851 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 852 *mptr++ = cpu_to_le32(ctl_flags); 853 *mptr++ = cpu_to_le32(blk_rq_bytes(req)); 854 *mptr++ = 855 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); 856 *mptr++ = 857 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); 858 } 859 860 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 861 rc = -ENOMEM; 862 goto context_remove; 863 } 864 865 msg->u.head[0] = 866 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); 867 868 list_add_tail(&ireq->queue, &dev->open_queue); 869 dev->open_queue_depth++; 870 871 i2o_msg_post(c, msg); 872 873 return 0; 874 875 context_remove: 876 i2o_cntxt_list_remove(c, req); 877 878 nop_msg: 879 i2o_msg_nop(c, msg); 880 881 exit: 882 return rc; 883}; 884 885/** 886 * i2o_block_request_fn - request queue handling function 887 * @q: request queue from which the request could be fetched 888 * 889 * Takes the next request from the queue, transfers it and if no error 890 * occurs dequeue it from the queue. On arrival of the reply the message 891 * will be processed further. If an error occurs requeue the request. 892 */ 893static void i2o_block_request_fn(struct request_queue *q) 894{ 895 struct request *req; 896 897 while (!blk_queue_plugged(q)) { 898 req = blk_peek_request(q); 899 if (!req) 900 break; 901 902 if (req->cmd_type == REQ_TYPE_FS) { 903 struct i2o_block_delayed_request *dreq; 904 struct i2o_block_request *ireq = req->special; 905 unsigned int queue_depth; 906 907 queue_depth = ireq->i2o_blk_dev->open_queue_depth; 908 909 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 910 if (!i2o_block_transfer(req)) { 911 blk_start_request(req); 912 continue; 913 } else 914 osm_info("transfer error\n"); 915 } 916 917 if (queue_depth) 918 break; 919 920 /* stop the queue and retry later */ 921 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); 922 if (!dreq) 923 continue; 924 925 dreq->queue = q; 926 INIT_DELAYED_WORK(&dreq->work, 927 i2o_block_delayed_request_fn); 928 929 if (!queue_delayed_work(i2o_block_driver.event_queue, 930 &dreq->work, 931 I2O_BLOCK_RETRY_TIME)) 932 kfree(dreq); 933 else { 934 blk_stop_queue(q); 935 break; 936 } 937 } else { 938 blk_start_request(req); 939 __blk_end_request_all(req, -EIO); 940 } 941 } 942}; 943 944/* I2O Block device operations definition */ 945static const struct block_device_operations i2o_block_fops = { 946 .owner = THIS_MODULE, 947 .open = i2o_block_open, 948 .release = i2o_block_release, 949 .ioctl = i2o_block_ioctl, 950 .compat_ioctl = i2o_block_ioctl, 951 .getgeo = i2o_block_getgeo, 952 .media_changed = i2o_block_media_changed 953}; 954 955/** 956 * i2o_block_device_alloc - Allocate memory for a I2O Block device 957 * 958 * Allocate memory for the i2o_block_device struct, gendisk and request 959 * queue and initialize them as far as no additional information is needed. 960 * 961 * Returns a pointer to the allocated I2O Block device on success or a 962 * negative error code on failure. 963 */ 964static struct i2o_block_device *i2o_block_device_alloc(void) 965{ 966 struct i2o_block_device *dev; 967 struct gendisk *gd; 968 struct request_queue *queue; 969 int rc; 970 971 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 972 if (!dev) { 973 osm_err("Insufficient memory to allocate I2O Block disk.\n"); 974 rc = -ENOMEM; 975 goto exit; 976 } 977 978 INIT_LIST_HEAD(&dev->open_queue); 979 spin_lock_init(&dev->lock); 980 dev->rcache = CACHE_PREFETCH; 981 dev->wcache = CACHE_WRITEBACK; 982 983 /* allocate a gendisk with 16 partitions */ 984 gd = alloc_disk(16); 985 if (!gd) { 986 osm_err("Insufficient memory to allocate gendisk.\n"); 987 rc = -ENOMEM; 988 goto cleanup_dev; 989 } 990 991 /* initialize the request queue */ 992 queue = blk_init_queue(i2o_block_request_fn, &dev->lock); 993 if (!queue) { 994 osm_err("Insufficient memory to allocate request queue.\n"); 995 rc = -ENOMEM; 996 goto cleanup_queue; 997 } 998 999 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); 1000 1001 gd->major = I2O_MAJOR; 1002 gd->queue = queue; 1003 gd->fops = &i2o_block_fops; 1004 gd->private_data = dev; 1005 1006 dev->gd = gd; 1007 1008 return dev; 1009 1010 cleanup_queue: 1011 put_disk(gd); 1012 1013 cleanup_dev: 1014 kfree(dev); 1015 1016 exit: 1017 return ERR_PTR(rc); 1018}; 1019 1020/** 1021 * i2o_block_probe - verify if dev is a I2O Block device and install it 1022 * @dev: device to verify if it is a I2O Block device 1023 * 1024 * We only verify if the user_tid of the device is 0xfff and then install 1025 * the device. Otherwise it is used by some other device (e. g. RAID). 1026 * 1027 * Returns 0 on success or negative error code on failure. 1028 */ 1029static int i2o_block_probe(struct device *dev) 1030{ 1031 struct i2o_device *i2o_dev = to_i2o_device(dev); 1032 struct i2o_controller *c = i2o_dev->iop; 1033 struct i2o_block_device *i2o_blk_dev; 1034 struct gendisk *gd; 1035 struct request_queue *queue; 1036 static int unit = 0; 1037 int rc; 1038 u64 size; 1039 u32 blocksize; 1040 u16 body_size = 4; 1041 u16 power; 1042 unsigned short max_sectors; 1043 1044#ifdef CONFIG_I2O_EXT_ADAPTEC 1045 if (c->adaptec) 1046 body_size = 8; 1047#endif 1048 1049 if (c->limit_sectors) 1050 max_sectors = I2O_MAX_SECTORS_LIMITED; 1051 else 1052 max_sectors = I2O_MAX_SECTORS; 1053 1054 /* skip devices which are used by IOP */ 1055 if (i2o_dev->lct_data.user_tid != 0xfff) { 1056 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); 1057 return -ENODEV; 1058 } 1059 1060 if (i2o_device_claim(i2o_dev)) { 1061 osm_warn("Unable to claim device. Installation aborted\n"); 1062 rc = -EFAULT; 1063 goto exit; 1064 } 1065 1066 i2o_blk_dev = i2o_block_device_alloc(); 1067 if (IS_ERR(i2o_blk_dev)) { 1068 osm_err("could not alloc a new I2O block device"); 1069 rc = PTR_ERR(i2o_blk_dev); 1070 goto claim_release; 1071 } 1072 1073 i2o_blk_dev->i2o_dev = i2o_dev; 1074 dev_set_drvdata(dev, i2o_blk_dev); 1075 1076 /* setup gendisk */ 1077 gd = i2o_blk_dev->gd; 1078 gd->first_minor = unit << 4; 1079 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); 1080 gd->driverfs_dev = &i2o_dev->device; 1081 1082 /* setup request queue */ 1083 queue = gd->queue; 1084 queue->queuedata = i2o_blk_dev; 1085 1086 blk_queue_max_hw_sectors(queue, max_sectors); 1087 blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); 1088 1089 osm_debug("max sectors = %d\n", queue->max_sectors); 1090 osm_debug("phys segments = %d\n", queue->max_phys_segments); 1091 osm_debug("max hw segments = %d\n", queue->max_hw_segments); 1092 1093 /* 1094 * Ask for the current media data. If that isn't supported 1095 * then we ask for the device capacity data 1096 */ 1097 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1098 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1099 blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); 1100 } else 1101 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1102 1103 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1104 !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1105 set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT); 1106 } else 1107 osm_warn("could not get size of %s\n", gd->disk_name); 1108 1109 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) 1110 i2o_blk_dev->power = power; 1111 1112 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1113 1114 add_disk(gd); 1115 1116 unit++; 1117 1118 osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid, 1119 i2o_blk_dev->gd->disk_name); 1120 1121 return 0; 1122 1123 claim_release: 1124 i2o_device_claim_release(i2o_dev); 1125 1126 exit: 1127 return rc; 1128}; 1129 1130/* Block OSM driver struct */ 1131static struct i2o_driver i2o_block_driver = { 1132 .name = OSM_NAME, 1133 .event = i2o_block_event, 1134 .reply = i2o_block_reply, 1135 .classes = i2o_block_class_id, 1136 .driver = { 1137 .probe = i2o_block_probe, 1138 .remove = i2o_block_remove, 1139 }, 1140}; 1141 1142/** 1143 * i2o_block_init - Block OSM initialization function 1144 * 1145 * Allocate the slab and mempool for request structs, registers i2o_block 1146 * block device and finally register the Block OSM in the I2O core. 1147 * 1148 * Returns 0 on success or negative error code on failure. 1149 */ 1150static int __init i2o_block_init(void) 1151{ 1152 int rc; 1153 int size; 1154 1155 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1156 1157 /* Allocate request mempool and slab */ 1158 size = sizeof(struct i2o_block_request); 1159 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, 1160 SLAB_HWCACHE_ALIGN, NULL); 1161 if (!i2o_blk_req_pool.slab) { 1162 osm_err("can't init request slab\n"); 1163 rc = -ENOMEM; 1164 goto exit; 1165 } 1166 1167 i2o_blk_req_pool.pool = 1168 mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE, 1169 i2o_blk_req_pool.slab); 1170 if (!i2o_blk_req_pool.pool) { 1171 osm_err("can't init request mempool\n"); 1172 rc = -ENOMEM; 1173 goto free_slab; 1174 } 1175 1176 /* Register the block device interfaces */ 1177 rc = register_blkdev(I2O_MAJOR, "i2o_block"); 1178 if (rc) { 1179 osm_err("unable to register block device\n"); 1180 goto free_mempool; 1181 } 1182#ifdef MODULE 1183 osm_info("registered device at major %d\n", I2O_MAJOR); 1184#endif 1185 1186 /* Register Block OSM into I2O core */ 1187 rc = i2o_driver_register(&i2o_block_driver); 1188 if (rc) { 1189 osm_err("Could not register Block driver\n"); 1190 goto unregister_blkdev; 1191 } 1192 1193 return 0; 1194 1195 unregister_blkdev: 1196 unregister_blkdev(I2O_MAJOR, "i2o_block"); 1197 1198 free_mempool: 1199 mempool_destroy(i2o_blk_req_pool.pool); 1200 1201 free_slab: 1202 kmem_cache_destroy(i2o_blk_req_pool.slab); 1203 1204 exit: 1205 return rc; 1206}; 1207 1208/** 1209 * i2o_block_exit - Block OSM exit function 1210 * 1211 * Unregisters Block OSM from I2O core, unregisters i2o_block block device 1212 * and frees the mempool and slab. 1213 */ 1214static void __exit i2o_block_exit(void) 1215{ 1216 /* Unregister I2O Block OSM from I2O core */ 1217 i2o_driver_unregister(&i2o_block_driver); 1218 1219 /* Unregister block device */ 1220 unregister_blkdev(I2O_MAJOR, "i2o_block"); 1221 1222 /* Free request mempool and slab */ 1223 mempool_destroy(i2o_blk_req_pool.pool); 1224 kmem_cache_destroy(i2o_blk_req_pool.slab); 1225}; 1226 1227MODULE_AUTHOR("Red Hat"); 1228MODULE_LICENSE("GPL"); 1229MODULE_DESCRIPTION(OSM_DESCRIPTION); 1230MODULE_VERSION(OSM_VERSION); 1231 1232module_init(i2o_block_init); 1233module_exit(i2o_block_exit); 1234