Lines Matching refs:block

700  * Returns 1 if the block is one of the special blocks that needs
834 cqr->block = NULL;
1522 cqr->block = NULL;
1622 cqr->block = NULL;
1729 if (cqr->block)
1730 data->base = cqr->block->base;
1806 cqr->block = NULL;
1902 cqr->block = NULL;
2018 struct dasd_block *block;
2026 block = dasd_alloc_block();
2027 if (IS_ERR(block)) {
2029 "could not allocate dasd block structure");
2030 return PTR_ERR(block);
2032 device->block = block;
2033 block->base = device;
2129 /* check if block device is needed and allocate in case */
2194 dasd_free_block(device->block);
2195 device->block = NULL;
2269 cqr->block = NULL;
2316 static int dasd_eckd_start_analysis(struct dasd_block *block)
2320 init_cqr = dasd_eckd_analysis_ccw(block->base);
2335 static int dasd_eckd_end_analysis(struct dasd_block *block)
2337 struct dasd_device *device = block->base;
2355 block->bp_block = DASD_RAW_BLOCKSIZE;
2357 block->s2b_shift = 3;
2408 block->bp_block = count_area->dl;
2410 if (block->bp_block == 0) {
2415 block->s2b_shift = 0; /* bits to shift 512 to get a block */
2416 for (sb = 512; sb < block->bp_block; sb = sb << 1)
2417 block->s2b_shift++;
2419 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2422 block->blocks = ((unsigned long) private->real_cyl *
2427 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2428 "%s\n", (block->bp_block >> 10),
2431 blk_per_trk * (block->bp_block >> 9)) >> 1),
2432 ((blk_per_trk * block->bp_block) >> 10),
2439 static int dasd_eckd_do_analysis(struct dasd_block *block)
2441 struct dasd_eckd_private *private = block->base->private;
2444 return dasd_eckd_start_analysis(block);
2446 return dasd_eckd_end_analysis(block);
2470 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2472 struct dasd_eckd_private *private = block->base->private;
2474 if (dasd_check_blocksize(block->bp_block) == 0) {
2476 0, block->bp_block);
2806 base->block->bp_block);
2970 "The DASD cannot be formatted with block size %u\n",
3114 struct dasd_block *block = cqr->block;
3119 spin_lock_irqsave(&block->format_lock, flags);
3120 if (cqr->trkcount != atomic_read(&block->trkcount)) {
3130 list_for_each_entry(format, &block->format_list, list) {
3136 list_add_tail(&to_format->list, &block->format_list);
3139 spin_unlock_irqrestore(&block->format_lock, flags);
3144 struct dasd_block *block)
3148 spin_lock_irqsave(&block->format_lock, flags);
3149 atomic_inc(&block->trkcount);
3151 spin_unlock_irqrestore(&block->format_lock, flags);
3163 clear_format_track(format, cqr->basedev->block);
3178 struct dasd_block *block;
3187 block = cqr->block;
3188 base = block->base;
3190 blksize = block->bp_block;
3194 first_trk = blk_rq_pos(req) >> block->s2b_shift;
3197 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3261 struct dasd_block *block;
3272 base = cqr->block->base;
3273 blksize = base->block->bp_block;
3274 block = cqr->block;
3280 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3283 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3549 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3551 cqr->startdev = cqr->block->base;
3552 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3596 if (!device->block && private->lcu &&
3648 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3747 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3845 cqr->block = block;
3874 struct dasd_block *block = device->block;
3913 spin_lock_irq(&block->queue_lock);
3915 spin_unlock_irq(&block->queue_lock);
3926 spin_lock_irq(&block->queue_lock);
3928 spin_unlock_irq(&block->queue_lock);
3952 struct dasd_block *block,
3978 basedev = block->base;
3994 count += bv.bv_len >> (block->s2b_shift + 9);
3996 cidaw += bv.bv_len >> (block->s2b_shift + 9);
4078 /* Locate record for cdl special block ? */
4119 block->base->features & DASD_FEATURE_FAILFAST)
4123 cqr->block = block;
4142 struct dasd_block *block,
4170 basedev = block->base;
4298 block->base->features & DASD_FEATURE_FAILFAST)
4302 cqr->block = block;
4474 struct dasd_block *block,
4505 basedev = block->base;
4564 * We can let the block layer handle this by setting
4621 block->base->features & DASD_FEATURE_FAILFAST)
4626 cqr->block = block;
4647 struct dasd_block *block,
4663 basedev = block->base;
4667 blksize = block->bp_block;
4671 /* Calculate record id of first and last block. */
4672 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4675 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4697 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4708 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4718 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4727 struct dasd_block *block,
4758 basedev = block->base;
4863 block->base->features & DASD_FEATURE_FAILFAST)
4867 cqr->block = block;
4892 private = cqr->block->base->private;
4893 blksize = cqr->block->bp_block;
4895 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4965 struct dasd_block *block,
4983 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4985 cqr = dasd_eckd_build_cp(startdev, block, req);
5484 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5486 struct dasd_device *device = block->base;
5886 cqr->block = NULL;
5955 if (!device->block && private->lcu->pav == HYPER_PAV)
5979 cqr->block = NULL;
6149 * IO is paused on the block queue before swap and may be resumed afterwards.
6156 struct dasd_block *block;
6181 /* swap DASD internal device <> block assignment */
6182 block = primary->block;
6183 primary->block = NULL;
6184 secondary->block = block;
6185 block->base = secondary;
6190 gdp = block->gdp;
6223 cqr->block = NULL;
6283 cqr->block = NULL;
6347 cqr->block = NULL;
6827 static unsigned int dasd_eckd_max_sectors(struct dasd_block *block)
6829 if (block->base->features & DASD_FEATURE_USERAW) {
6837 return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6840 return DASD_ECKD_MAX_BLOCKS << block->s2b_shift;