• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/block/

Lines Matching refs:pd

87 #define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
110 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
121 p->pd = pd;
201 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
205 n = sprintf(data, "%lu\n", pd->stats.pkt_started);
208 n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
211 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
214 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
217 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
220 spin_lock(&pd->lock);
221 v = pd->bio_queue_size;
222 spin_unlock(&pd->lock);
226 spin_lock(&pd->lock);
227 v = pd->write_congestion_off;
228 spin_unlock(&pd->lock);
232 spin_lock(&pd->lock);
233 v = pd->write_congestion_on;
234 spin_unlock(&pd->lock);
261 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
265 pd->stats.pkt_started = 0;
266 pd->stats.pkt_ended = 0;
267 pd->stats.secs_w = 0;
268 pd->stats.secs_rg = 0;
269 pd->stats.secs_r = 0;
273 spin_lock(&pd->lock);
274 pd->write_congestion_off = val;
275 init_write_congestion_marks(&pd->write_congestion_off,
276 &pd->write_congestion_on);
277 spin_unlock(&pd->lock);
281 spin_lock(&pd->lock);
282 pd->write_congestion_on = val;
283 init_write_congestion_marks(&pd->write_congestion_off,
284 &pd->write_congestion_on);
285 spin_unlock(&pd->lock);
305 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
308 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
309 "%s", pd->name);
310 if (IS_ERR(pd->dev))
311 pd->dev = NULL;
313 if (pd->dev) {
314 pd->kobj_stat = pkt_kobj_create(pd, "stat",
315 &pd->dev->kobj,
317 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
318 &pd->dev->kobj,
323 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
325 pkt_kobj_remove(pd->kobj_stat);
326 pkt_kobj_remove(pd->kobj_wqueue);
328 device_unregister(pd->dev);
351 struct pktcdvd_device *pd = pkt_devs[idx];
352 if (!pd)
355 pd->name,
356 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
357 MAJOR(pd->bdev->bd_dev),
358 MINOR(pd->bdev->bd_dev));
465 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
469 pd->dfs_f_info = NULL;
470 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
471 if (IS_ERR(pd->dfs_d_root)) {
472 pd->dfs_d_root = NULL;
475 pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
476 pd->dfs_d_root, pd, &debug_fops);
477 if (IS_ERR(pd->dfs_f_info)) {
478 pd->dfs_f_info = NULL;
483 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
487 if (pd->dfs_f_info)
488 debugfs_remove(pd->dfs_f_info);
489 pd->dfs_f_info = NULL;
490 if (pd->dfs_d_root)
491 debugfs_remove(pd->dfs_d_root);
492 pd->dfs_d_root = NULL;
515 static void pkt_bio_finished(struct pktcdvd_device *pd)
517 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
518 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
520 atomic_set(&pd->iosched.attention, 1);
521 wake_up(&pd->wqueue);
628 static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
632 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
634 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
637 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
640 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
644 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
647 pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
649 pkt_shrink_pktlist(pd);
653 pkt->pd = pd;
654 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
668 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
670 rb_erase(&node->rb_node, &pd->bio_queue);
671 mempool_free(node, pd->rb_pool);
672 pd->bio_queue_size--;
673 BUG_ON(pd->bio_queue_size < 0);
677 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
679 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
681 struct rb_node *n = pd->bio_queue.rb_node;
686 BUG_ON(pd->bio_queue_size > 0);
711 * Insert a node into the pd->bio_queue rb tree.
713 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
715 struct rb_node **p = &pd->bio_queue.rb_node;
729 rb_insert_color(&node->rb_node, &pd->bio_queue);
730 pd->bio_queue_size++;
737 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
739 struct request_queue *q = bdev_get_queue(pd->bdev);
760 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
803 static int pkt_flush_cache(struct pktcdvd_device *pd)
815 return pkt_generic_packet(pd, &cgc);
821 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
836 if ((ret = pkt_generic_packet(pd, &cgc)))
846 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
848 spin_lock(&pd->iosched.lock);
850 bio_list_add(&pd->iosched.read_queue, bio);
852 bio_list_add(&pd->iosched.write_queue, bio);
853 spin_unlock(&pd->iosched.lock);
855 atomic_set(&pd->iosched.attention, 1);
856 wake_up(&pd->wqueue);
875 static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
878 if (atomic_read(&pd->iosched.attention) == 0)
880 atomic_set(&pd->iosched.attention, 0);
886 spin_lock(&pd->iosched.lock);
887 reads_queued = !bio_list_empty(&pd->iosched.read_queue);
888 writes_queued = !bio_list_empty(&pd->iosched.write_queue);
889 spin_unlock(&pd->iosched.lock);
894 if (pd->iosched.writing) {
896 spin_lock(&pd->iosched.lock);
897 bio = bio_list_peek(&pd->iosched.write_queue);
898 spin_unlock(&pd->iosched.lock);
899 if (bio && (bio->bi_sector == pd->iosched.last_write))
902 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
906 pkt_flush_cache(pd);
907 pd->iosched.writing = 0;
911 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
915 pd->iosched.writing = 1;
919 spin_lock(&pd->iosched.lock);
920 if (pd->iosched.writing)
921 bio = bio_list_pop(&pd->iosched.write_queue);
923 bio = bio_list_pop(&pd->iosched.read_queue);
924 spin_unlock(&pd->iosched.lock);
930 pd->iosched.successive_reads += bio->bi_size >> 10;
932 pd->iosched.successive_reads = 0;
933 pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
935 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
936 if (pd->read_speed == pd->write_speed) {
937 pd->read_speed = MAX_SPEED;
938 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
941 if (pd->read_speed != pd->write_speed) {
942 pd->read_speed = pd->write_speed;
943 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
947 atomic_inc(&pd->cdrw.pending_bios);
956 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
958 if ((pd->settings.size << 9) / CD_FRAMESIZE
963 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
965 } else if ((pd->settings.size << 9) / PAGE_SIZE
971 set_bit(PACKET_MERGE_SEGS, &pd->flags);
1040 struct pktcdvd_device *pd = pkt->pd;
1041 BUG_ON(!pd);
1050 wake_up(&pd->wqueue);
1052 pkt_bio_finished(pd);
1058 struct pktcdvd_device *pd = pkt->pd;
1059 BUG_ON(!pd);
1063 pd->stats.pkt_ended++;
1065 pkt_bio_finished(pd);
1068 wake_up(&pd->wqueue);
1074 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1094 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1122 bio->bi_bdev = pd->bdev;
1137 pkt_queue_bio(pd, bio);
1144 pd->stats.pkt_started++;
1145 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
1152 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
1156 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
1157 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
1168 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1171 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
1173 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
1205 static int pkt_handle_queue(struct pktcdvd_device *pd)
1216 atomic_set(&pd->scan_queue, 0);
1218 if (list_empty(&pd->cdrw.pkt_free_list)) {
1226 spin_lock(&pd->lock);
1227 first_node = pkt_rbtree_find(pd, pd->current_sector);
1229 n = rb_first(&pd->bio_queue);
1236 zone = ZONE(bio->bi_sector, pd);
1237 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1247 n = rb_first(&pd->bio_queue);
1254 spin_unlock(&pd->lock);
1260 pkt = pkt_get_packet_data(pd, zone);
1262 pd->current_sector = zone + pd->settings.size;
1264 BUG_ON(pkt->frames != pd->settings.size >> 2);
1271 spin_lock(&pd->lock);
1273 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1276 (unsigned long long)ZONE(bio->bi_sector, pd));
1277 if (ZONE(bio->bi_sector, pd) != zone)
1279 pkt_rbtree_erase(pd, node);
1287 wakeup = (pd->write_congestion_on > 0
1288 && pd->bio_queue_size <= pd->write_congestion_off);
1289 spin_unlock(&pd->lock);
1291 clear_bdi_congested(&pd->disk->queue->backing_dev_info,
1299 spin_lock(&pd->cdrw.active_list_lock);
1300 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
1301 spin_unlock(&pd->cdrw.active_list_lock);
1310 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1362 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1373 pkt->w_bio->bi_bdev = pd->bdev;
1385 pkt_queue_bio(pd, pkt->w_bio);
1400 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1413 pkt_gather_data(pd, pkt);
1424 pkt_start_write(pd, pkt);
1441 pkt_start_write(pd, pkt);
1460 static void pkt_handle_packets(struct pktcdvd_device *pd)
1469 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1472 pkt_run_state_machine(pd, pkt);
1479 spin_lock(&pd->cdrw.active_list_lock);
1480 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1483 pkt_put_packet_data(pd, pkt);
1485 atomic_set(&pd->scan_queue, 1);
1488 spin_unlock(&pd->cdrw.active_list_lock);
1491 static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1499 spin_lock(&pd->cdrw.active_list_lock);
1500 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1503 spin_unlock(&pd->cdrw.active_list_lock);
1512 struct pktcdvd_device *pd = foobar;
1525 add_wait_queue(&pd->wqueue, &wait);
1530 if (atomic_read(&pd->scan_queue) > 0)
1534 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1540 if (atomic_read(&pd->iosched.attention) != 0)
1546 pkt_count_states(pd, states);
1553 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1558 generic_unplug_device(bdev_get_queue(pd->bdev));
1567 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1582 remove_wait_queue(&pd->wqueue, &wait);
1591 while (pkt_handle_queue(pd))
1597 pkt_handle_packets(pd);
1602 pkt_iosched_process_queue(pd);
1608 static void pkt_print_settings(struct pktcdvd_device *pd)
1610 printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1611 printk("%u blocks, ", pd->settings.size >> 2);
1612 printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1615 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1624 return pkt_generic_packet(pd, cgc);
1627 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1636 return pkt_generic_packet(pd, cgc);
1639 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1650 if ((ret = pkt_generic_packet(pd, &cgc)))
1663 return pkt_generic_packet(pd, &cgc);
1666 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1679 if ((ret = pkt_generic_packet(pd, &cgc)))
1689 return pkt_generic_packet(pd, &cgc);
1692 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
1700 if ((ret = pkt_get_disc_info(pd, &di)))
1704 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1710 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1728 * write mode select package based on pd->settings
1730 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
1739 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1745 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1751 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1760 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1768 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1770 wp->fp = pd->settings.fp;
1771 wp->track_mode = pd->settings.track_mode;
1772 wp->write_type = pd->settings.write_type;
1773 wp->data_block_type = pd->settings.block_mode;
1795 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1798 if ((ret = pkt_mode_select(pd, &cgc))) {
1803 pkt_print_settings(pd);
1810 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1812 switch (pd->mmc3_profile) {
1843 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1845 switch (pd->mmc3_profile) {
1854 VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
1885 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
1896 ret = pkt_generic_packet(pd, &cgc);
1897 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1902 if ((ret = pkt_get_disc_info(pd, &di))) {
1907 if (!pkt_writable_disc(pd, &di))
1910 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1913 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1918 if (!pkt_writable_track(pd, &ti)) {
1927 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1928 if (pd->settings.size == 0) {
1932 if (pd->settings.size > PACKET_MAX_SECTORS) {
1936 pd->settings.fp = ti.fp;
1937 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1940 pd->nwa = be32_to_cpu(ti.next_writable);
1941 set_bit(PACKET_NWA_VALID, &pd->flags);
1950 pd->lra = be32_to_cpu(ti.last_rec_address);
1951 set_bit(PACKET_LRA_VALID, &pd->flags);
1953 pd->lra = 0xffffffff;
1954 set_bit(PACKET_LRA_VALID, &pd->flags);
1960 pd->settings.link_loss = 7;
1961 pd->settings.write_type = 0; /* packet */
1962 pd->settings.track_mode = ti.track_mode;
1969 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1972 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1984 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
1994 cgc.buflen = pd->mode_offset + 12;
2001 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
2004 buf[pd->mode_offset + 10] |= (!!set << 2);
2007 ret = pkt_mode_select(pd, &cgc);
2012 printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
2016 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
2023 return pkt_generic_packet(pd, &cgc);
2029 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
2038 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
2042 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
2044 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
2046 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
2090 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
2105 ret = pkt_generic_packet(pd, &cgc);
2120 ret = pkt_generic_packet(pd, &cgc);
2163 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
2176 if ((ret = pkt_generic_packet(pd, &cgc)))
2181 static int pkt_open_write(struct pktcdvd_device *pd)
2186 if ((ret = pkt_probe_settings(pd))) {
2187 VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
2191 if ((ret = pkt_set_write_settings(pd))) {
2192 DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
2196 pkt_write_caching(pd, USE_WCACHING);
2198 if ((ret = pkt_get_max_speed(pd, &write_speed)))
2200 switch (pd->mmc3_profile) {
2207 if ((ret = pkt_media_speed(pd, &media_write_speed)))
2215 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
2216 DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
2219 pd->write_speed = write_speed;
2220 pd->read_speed = read_speed;
2222 if ((ret = pkt_perform_opc(pd))) {
2223 DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
2232 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
2243 bdget(pd->bdev->bd_dev);
2244 if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
2247 if ((ret = bd_claim(pd->bdev, pd)))
2250 if ((ret = pkt_get_last_written(pd, &lba))) {
2255 set_capacity(pd->disk, lba << 2);
2256 set_capacity(pd->bdev->bd_disk, lba << 2);
2257 bd_set_size(pd->bdev, (loff_t)lba << 11);
2259 q = bdev_get_queue(pd->bdev);
2261 if ((ret = pkt_open_write(pd)))
2268 blk_queue_max_hw_sectors(q, pd->settings.size);
2270 set_bit(PACKET_WRITABLE, &pd->flags);
2272 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2273 clear_bit(PACKET_WRITABLE, &pd->flags);
2276 if ((ret = pkt_set_segment_merging(pd, q)))
2280 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2291 bd_release(pd->bdev);
2293 blkdev_put(pd->bdev, FMODE_READ);
2302 static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
2304 if (flush && pkt_flush_cache(pd))
2305 DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
2307 pkt_lock_door(pd, 0);
2309 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
2310 bd_release(pd->bdev);
2311 blkdev_put(pd->bdev, FMODE_READ);
2313 pkt_shrink_pktlist(pd);
2325 struct pktcdvd_device *pd = NULL;
2332 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
2333 if (!pd) {
2337 BUG_ON(pd->refcnt < 0);
2339 pd->refcnt++;
2340 if (pd->refcnt > 1) {
2342 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2347 ret = pkt_open_dev(pd, mode & FMODE_WRITE);
2362 pd->refcnt--;
2372 struct pktcdvd_device *pd = disk->private_data;
2377 pd->refcnt--;
2378 BUG_ON(pd->refcnt < 0);
2379 if (pd->refcnt == 0) {
2380 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2381 pkt_release_dev(pd, flush);
2392 struct pktcdvd_device *pd = psd->pd;
2397 pkt_bio_finished(pd);
2402 struct pktcdvd_device *pd;
2409 pd = q->queuedata;
2410 if (!pd) {
2422 psd->pd = pd;
2424 cloned_bio->bi_bdev = pd->bdev;
2427 pd->stats.secs_r += bio->bi_size >> 9;
2428 pkt_queue_bio(pd, cloned_bio);
2432 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2434 pd->name, (unsigned long long)bio->bi_sector);
2445 zone = ZONE(bio->bi_sector, pd);
2456 last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
2458 BUG_ON(last_zone != zone + pd->settings.size);
2473 spin_lock(&pd->cdrw.active_list_lock);
2475 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2485 wake_up(&pd->wqueue);
2488 spin_unlock(&pd->cdrw.active_list_lock);
2496 spin_unlock(&pd->cdrw.active_list_lock);
2503 spin_lock(&pd->lock);
2504 if (pd->write_congestion_on > 0
2505 && pd->bio_queue_size >= pd->write_congestion_on) {
2508 spin_unlock(&pd->lock);
2510 spin_lock(&pd->lock);
2511 } while(pd->bio_queue_size > pd->write_congestion_off);
2513 spin_unlock(&pd->lock);
2518 node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2520 spin_lock(&pd->lock);
2521 BUG_ON(pd->bio_queue_size < 0);
2522 was_empty = (pd->bio_queue_size == 0);
2523 pkt_rbtree_insert(pd, node);
2524 spin_unlock(&pd->lock);
2529 atomic_set(&pd->scan_queue, 1);
2532 wake_up(&pd->wqueue);
2533 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2538 wake_up(&pd->wqueue);
2551 struct pktcdvd_device *pd = q->queuedata;
2552 sector_t zone = ZONE(bmd->bi_sector, pd);
2554 int remaining = (pd->settings.size << 9) - used;
2568 static void pkt_init_queue(struct pktcdvd_device *pd)
2570 struct request_queue *q = pd->disk->queue;
2576 q->queuedata = pd;
2581 struct pktcdvd_device *pd = m->private;
2586 seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2587 bdevname(pd->bdev, bdev_buf));
2590 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2592 if (pd->settings.write_type == 0)
2598 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2599 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2601 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2603 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2605 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2612 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2613 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2614 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2615 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2616 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2619 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2620 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2621 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2622 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2623 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2624 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2627 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2628 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2629 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2631 pkt_count_states(pd, states);
2636 pd->write_congestion_off,
2637 pd->write_congestion_on);
2653 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2660 if (pd->pkt_dev == dev) {
2688 pd->bdev = bdev;
2691 pkt_init_queue(pd);
2693 atomic_set(&pd->cdrw.pending_bios, 0);
2694 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2695 if (IS_ERR(pd->cdrw.thread)) {
2701 proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
2702 DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2714 struct pktcdvd_device *pd = bdev->bd_disk->private_data;
2727 if (pd->refcnt == 1)
2728 pkt_lock_door(pd, 0);
2738 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
2742 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
2752 struct pktcdvd_device *pd = disk->private_data;
2755 if (!pd)
2757 if (!pd->bdev)
2759 attached_disk = pd->bdev->bd_disk;
2785 struct pktcdvd_device *pd;
2799 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2800 if (!pd)
2803 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
2805 if (!pd->rb_pool)
2808 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
2809 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
2810 spin_lock_init(&pd->cdrw.active_list_lock);
2812 spin_lock_init(&pd->lock);
2813 spin_lock_init(&pd->iosched.lock);
2814 bio_list_init(&pd->iosched.read_queue);
2815 bio_list_init(&pd->iosched.write_queue);
2816 sprintf(pd->name, DRIVER_NAME"%d", idx);
2817 init_waitqueue_head(&pd->wqueue);
2818 pd->bio_queue = RB_ROOT;
2820 pd->write_congestion_on = write_congestion_on;
2821 pd->write_congestion_off = write_congestion_off;
2826 pd->disk = disk;
2831 strcpy(disk->disk_name, pd->name);
2833 disk->private_data = pd;
2838 pd->pkt_dev = MKDEV(pktdev_major, idx);
2839 ret = pkt_new_dev(pd, dev);
2845 pkt_sysfs_dev_new(pd);
2846 pkt_debugfs_dev_new(pd);
2848 pkt_devs[idx] = pd;
2850 *pkt_dev = pd->pkt_dev;
2860 if (pd->rb_pool)
2861 mempool_destroy(pd->rb_pool);
2862 kfree(pd);
2874 struct pktcdvd_device *pd;
2881 pd = pkt_devs[idx];
2882 if (pd && (pd->pkt_dev == pkt_dev))
2891 if (pd->refcnt > 0) {
2895 if (!IS_ERR(pd->cdrw.thread))
2896 kthread_stop(pd->cdrw.thread);
2900 pkt_debugfs_dev_remove(pd);
2901 pkt_sysfs_dev_remove(pd);
2903 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
2905 remove_proc_entry(pd->name, pkt_proc);
2906 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
2908 del_gendisk(pd->disk);
2909 blk_cleanup_queue(pd->disk->queue);
2910 put_disk(pd->disk);
2912 mempool_destroy(pd->rb_pool);
2913 kfree(pd);
2925 struct pktcdvd_device *pd;
2929 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2930 if (pd) {
2931 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2932 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);