diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index f5de40823259e4..1ec3f7ee0f5fb2 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -4485,6 +4485,16 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) } else { +#ifdef TARGET_ARM64 + // Handle LEA with "contained" BFIZ + if (index->isContained() && index->OperIs(GT_BFIZ)) + { + assert(scale == 0); + scale = (DWORD)index->gtGetOp2()->AsIntConCommon()->IconValue(); + index = index->gtGetOp1()->gtGetOp1(); + } +#endif + // Then compute target reg from [base + index*scale] genScaledAdd(size, lea->GetRegNum(), memBase->GetRegNum(), index->GetRegNum(), scale); } diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index f6626dc3e6f7b0..c8b3c512688dac 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -5443,7 +5443,7 @@ bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* par // Check if we can "contain" LEA(BFIZ) in order to extend 32bit index to 64bit as part of load/store. if ((index != nullptr) && index->OperIs(GT_BFIZ) && index->gtGetOp1()->OperIs(GT_CAST) && - index->gtGetOp2()->IsCnsIntOrI() && (varTypeIsIntegral(targetType) || varTypeIsFloating(targetType))) + index->gtGetOp2()->IsCnsIntOrI() && !varTypeIsStruct(targetType)) { // BFIZ node is a binary op where op1 is GT_CAST and op2 is GT_CNS_INT GenTreeCast* cast = index->gtGetOp1()->AsCast(); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 1ecb8dab146b0e..7dc8c744009714 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -4735,8 +4735,8 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr) // we at least will be able to hoist/CSE "index + elemOffset" in some cases. // See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497 - // Use 2) form only for primitive types for now - it significantly reduced number of size regressions - if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp)) + // Don't use 2) for structs to reduce number of size regressions + if (varTypeIsStruct(elemTyp)) { groupArrayRefWithElemOffset = false; }