Lines Matching defs:lba

1192 			    unsigned long long lba)
1196 lba = do_div(lba, sdebug_store_sectors);
1201 return lsip->storep + lba * sdebug_sector_size;
3097 unsigned long long lba)
3099 u32 zno = lba >> devip->zsize_shift;
3112 if (lba >= zsp->z_start + zsp->z_size)
3114 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3221 unsigned long long lba, unsigned int num)
3223 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3237 if (lba != zsp->z_wp)
3240 end = lba + num;
3242 n = zend - lba;
3254 lba += n;
3263 unsigned long long lba, unsigned int num, bool write)
3267 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3268 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3314 if (lba != zsp->z_wp) {
3338 (struct scsi_cmnd *scp, unsigned long long lba,
3344 if (lba + num > sdebug_capacity) {
3359 return check_zbc_access_params(scp, lba, num, write);
3382 u32 sg_skip, u64 lba, u32 num, bool do_write,
3408 block = do_div(lba, sdebug_store_sectors);
3441 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3442 * arr into sip->storep+lba and return true. If comparison fails then
3444 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3453 block = do_div(lba, store_blks);
3669 u64 lba;
3676 lba = get_unaligned_be64(cmd + 2);
3682 lba = get_unaligned_be32(cmd + 2);
3688 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3695 lba = get_unaligned_be32(cmd + 2);
3701 lba = get_unaligned_be32(cmd + 2);
3706 lba = get_unaligned_be64(cmd + 12);
3730 ret = check_device_access_params(scp, lba, num, false);
3734 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3735 ((lba + num) > sdebug_medium_error_start))) {
3741 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3742 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3753 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3779 ret = do_device_access(sip, scp, 0, lba, num, false, 0);
3878 static unsigned long lba_to_map_index(sector_t lba)
3881 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3882 sector_div(lba, sdebug_unmap_granularity);
3883 return lba;
3888 sector_t lba = index * sdebug_unmap_granularity;
3891 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3892 return lba;
3895 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3903 index = lba_to_map_index(lba);
3912 *num = end - lba;
3916 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3919 sector_t end = lba + len;
3921 while (lba < end) {
3922 unsigned long index = lba_to_map_index(lba);
3927 lba = map_index_to_lba(index + 1);
3931 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3934 sector_t end = lba + len;
3937 while (lba < end) {
3938 unsigned long index = lba_to_map_index(lba);
3940 if (lba == map_index_to_lba(index) &&
3941 lba + sdebug_unmap_granularity <= end &&
3945 memset(fsp + lba * sdebug_sector_size,
3951 memset(sip->dif_storep + lba, 0xff,
3956 lba = map_index_to_lba(index + 1);
3967 u64 lba;
3974 lba = get_unaligned_be64(cmd + 2);
3981 lba = get_unaligned_be32(cmd + 2);
3988 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3995 lba = get_unaligned_be32(cmd + 2);
4002 lba = get_unaligned_be32(cmd + 2);
4009 lba = get_unaligned_be64(cmd + 12);
4029 ret = check_device_access_params(scp, lba, num, true);
4037 switch (prot_verify_write(scp, lba, num, ei_lba)) {
4063 ret = do_device_access(sip, scp, 0, lba, num, true, group);
4065 map_region(sip, lba, num);
4068 zbc_inc_wp(devip, lba, num);
4114 u64 lba;
4184 lba = get_unaligned_be64(up + 0);
4189 my_name, __func__, k, lba, num, sg_off);
4192 ret = check_device_access_params(scp, lba, num, true);
4211 int prot_ret = prot_verify_write(scp, lba, num,
4222 ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
4225 zbc_inc_wp(devip, lba, num);
4227 map_region(sip, lba, num);
4267 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4283 ret = check_device_access_params(scp, lba, num, true);
4290 unmap_region(sip, lba, num);
4293 lbaa = lba;
4314 lbaa = lba + i;
4319 map_region(sip, lba, num);
4322 zbc_inc_wp(devip, lba, num);
4333 u32 lba;
4345 lba = get_unaligned_be32(cmd + 2);
4351 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4358 u64 lba;
4373 lba = get_unaligned_be64(cmd + 2);
4379 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4437 u64 lba;
4444 lba = get_unaligned_be64(cmd + 2);
4458 ret = check_device_access_params(scp, lba, num, false);
4479 if (!comp_write_worker(sip, lba, num, arr, false)) {
4485 map_region(sip, lba, num);
4493 __be64 lba;
4534 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4537 ret = check_device_access_params(scp, lba, num, true);
4541 unmap_region(sip, lba, num);
4559 u64 lba;
4564 lba = get_unaligned_be64(cmd + 2);
4570 ret = check_device_access_params(scp, lba, 1, false);
4577 mapped = map_state(sip, lba, &num);
4582 if (sdebug_capacity - lba <= 0xffffffff)
4583 num = sdebug_capacity - lba;
4590 put_unaligned_be64(lba, arr + 8); /* LBA */
4646 u64 lba;
4651 lba = get_unaligned_be32(cmd + 2);
4654 lba = get_unaligned_be64(cmd + 2);
4657 if (lba + num_blocks > sdebug_capacity) {
4679 u64 lba;
4687 lba = get_unaligned_be32(cmd + 2);
4690 lba = get_unaligned_be64(cmd + 2);
4693 if (lba + nblks > sdebug_capacity) {
4700 block = do_div(lba, sdebug_store_sectors);
4827 u64 lba;
4843 lba = get_unaligned_be64(cmd + 2);
4847 lba = get_unaligned_be32(cmd + 2);
4858 ret = check_device_access_params(scp, lba, a_num, false);
4885 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4906 u64 lba, zs_lba;
4940 for (lba = zs_lba; lba < sdebug_capacity;
4941 lba = zsp->z_start + zsp->z_size) {
4942 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4944 zsp = zbc_zone(devip, lba);
6364 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6391 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");