Lines Matching defs:span

123 u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
125 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
148 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
151 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
338 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
379 u32 span, j;
381 for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) {
398 return span;
418 * span - Span number
430 u32 span, info;
442 for (span = 0; span < raid->spanDepth; span++)
443 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
446 spanBlock[span].
463 return span;
493 u32 info, strip_offset, span, span_offset;
509 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
510 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
513 span_set->strip_offset[span])
551 u32 span, info;
562 for (span = 0; span < raid->spanDepth; span++)
563 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
566 spanBlock[span].block_span_info.quad[info];
577 strip += span_set->strip_offset[span];
611 u32 info, strip_offset, span, span_offset, retval;
625 for (span = 0, span_offset = 0; span < raid->spanDepth; span++)
626 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
629 span_set->strip_offset[span])
631 span_set->strip_offset[span];
648 static u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
659 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
676 * This routine calculates the arm, span and block for the specified stripe and
687 * span - Span number
697 u8 physArm, span;
707 /*Get row and span from io_info for Uneven Span IO.*/
709 span = io_info->start_span;
716 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
717 armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod;
719 if (arm >= SPAN_ROW_SIZE(map, ld, span))
720 arm -= SPAN_ROW_SIZE(map, ld, span);
724 physArm = get_arm(instance, ld, span, stripRow, map);
728 arRef = MR_LdSpanArrayGet(ld, span, map);
759 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
762 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
764 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
767 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
777 * This routine calculates the arm, span and block for the specified stripe and
788 * span - Span number
798 u8 physArm, span;
832 span = 0;
835 span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map);
836 if (span == SPAN_INVALID)
840 /* Get the array on which this span is present */
841 arRef = MR_LdSpanArrayGet(ld, span, map);
875 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
878 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
880 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
883 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
909 u8 span, dataArms, arms, dataArm, logArm;
927 span = 0;
929 span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map);
930 if (span == SPAN_INVALID)
954 pRAID_Context->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | dataArm;
1107 * For Even span region lock optimization.
1128 * For Uneven span region lock optimization.
1223 u8 span, count;
1239 for (span = 0; span < raid->spanDepth; span++) {
1240 if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].
1246 spanBlock[span].block_span_info.
1315 if (span == raid->spanDepth)
1357 u8 bestArm, pd0, pd1, span, arm;
1363 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
1370 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
1372 arRef = MR_LdSpanArrayGet(ld, span, drv_map);
1402 (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;