Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,8 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon

boolean canVmRestartOnAnotherServer(long vmId);

void saveVolumeDetails(Long diskOfferingId, Long volumeId);

/**
* Allocate a volume or multiple volumes in case of template is registered with the 'deploy-as-is' option, allowing multiple disks
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -863,18 +863,7 @@ public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offeri
vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType()));
vol = _volsDao.persist(vol);

List<VolumeDetailVO> volumeDetailsVO = new ArrayList<VolumeDetailVO>();
DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
if (bandwidthLimitDetail != null) {
volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false));
}
DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.IOPS_LIMIT);
if (iopsLimitDetail != null) {
volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false));
}
if (!volumeDetailsVO.isEmpty()) {
_volDetailDao.saveDetails(volumeDetailsVO);
}
saveVolumeDetails(offering.getId(), vol.getId());

// Save usage event and update resource count for user vm volumes
if (vm.getType() == VirtualMachine.Type.User) {
Expand All @@ -891,6 +880,32 @@ public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offeri
return diskProfile;
}

@Override
public void saveVolumeDetails(Long diskOfferingId, Long volumeId) {
List<VolumeDetailVO> volumeDetailsVO = new ArrayList<>();
DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailDao.findDetail(diskOfferingId, Volume.BANDWIDTH_LIMIT_IN_MBPS);
if (bandwidthLimitDetail != null) {
volumeDetailsVO.add(new VolumeDetailVO(volumeId, Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false));
} else {
VolumeDetailVO bandwidthLimit = _volDetailDao.findDetail(volumeId, Volume.BANDWIDTH_LIMIT_IN_MBPS);
if (bandwidthLimit != null) {
_volDetailDao.remove(bandwidthLimit.getId());
}
}
DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailDao.findDetail(diskOfferingId, Volume.IOPS_LIMIT);
if (iopsLimitDetail != null) {
volumeDetailsVO.add(new VolumeDetailVO(volumeId, Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false));
} else {
VolumeDetailVO iopsLimit = _volDetailDao.findDetail(volumeId, Volume.IOPS_LIMIT);
if (iopsLimit != null) {
_volDetailDao.remove(iopsLimit.getId());
}
}
if (!volumeDetailsVO.isEmpty()) {
_volDetailDao.saveDetails(volumeDetailsVO);
}
}

private DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering offering, Long rootDisksize, Long minIops, Long maxIops, VirtualMachineTemplate template, VirtualMachine vm,
Account owner, long deviceId, String configurationId) {
assert (template.getFormat() != ImageFormat.ISO) : "ISO is not a template.";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,4 +80,7 @@ public boolean isDisplay() {
return display;
}

public void setValue(String value) {
this.value = value;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

import javax.inject.Inject;

import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
Expand All @@ -38,6 +39,8 @@
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO;
import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao;
import org.apache.cloudstack.storage.RemoteHostEndPoint;
import org.apache.cloudstack.storage.command.CommandResult;
import org.apache.cloudstack.storage.command.CopyCommand;
Expand Down Expand Up @@ -127,11 +130,15 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver {
@Inject
private ConfigurationDao configDao;
@Inject
private DiskOfferingDetailsDao diskOfferingDetailsDao;
@Inject
private HostDao hostDao;
@Inject
private VMInstanceDao vmInstanceDao;
@Inject
private VolumeService volumeService;
@Inject
private VolumeOrchestrationService volumeMgr;

public ScaleIOPrimaryDataStoreDriver() {

Expand All @@ -141,40 +148,47 @@ public ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Ex
return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao);
}

private boolean setVolumeLimitsOnSDC(VolumeVO volume, Host host, DataStore dataStore, Long iopsLimit, Long bandwidthLimitInKbps) throws Exception {
final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
if (StringUtils.isBlank(sdcId)) {
alertHostSdcDisconnection(host);
throw new CloudRuntimeException("Unable to grant access to volume: " + volume.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}

final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId, iopsLimit, bandwidthLimitInKbps);
}

private boolean setVolumeLimitsFromDetails(VolumeVO volume, Host host, DataStore dataStore) throws Exception {
Long bandwidthLimitInKbps = 0L; // Unlimited
// Check Bandwidth Limit parameter in volume details
final VolumeDetailVO bandwidthVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
if (bandwidthVolumeDetail != null && bandwidthVolumeDetail.getValue() != null) {
bandwidthLimitInKbps = Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024;
}

Long iopsLimit = 0L; // Unlimited
// Check IOPS Limit parameter in volume details, else try MaxIOPS
final VolumeDetailVO iopsVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT);
if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != null) {
iopsLimit = Long.parseLong(iopsVolumeDetail.getValue());
} else if (volume.getMaxIops() != null) {
iopsLimit = volume.getMaxIops();
}
if (iopsLimit > 0 && iopsLimit < ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) {
iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT;
}

return setVolumeLimitsOnSDC(volume, host, dataStore, iopsLimit, bandwidthLimitInKbps);
}

@Override
public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
try {
if (DataObjectType.VOLUME.equals(dataObject.getType())) {
final VolumeVO volume = volumeDao.findById(dataObject.getId());
LOGGER.debug("Granting access for PowerFlex volume: " + volume.getPath());

Long bandwidthLimitInKbps = Long.valueOf(0); // Unlimited
// Check Bandwidht Limit parameter in volume details
final VolumeDetailVO bandwidthVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS);
if (bandwidthVolumeDetail != null && bandwidthVolumeDetail.getValue() != null) {
bandwidthLimitInKbps = Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024;
}

Long iopsLimit = Long.valueOf(0); // Unlimited
// Check IOPS Limit parameter in volume details, else try MaxIOPS
final VolumeDetailVO iopsVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT);
if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != null) {
iopsLimit = Long.parseLong(iopsVolumeDetail.getValue());
} else if (volume.getMaxIops() != null) {
iopsLimit = volume.getMaxIops();
}
if (iopsLimit > 0 && iopsLimit < ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) {
iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT;
}

final String sdcId = getConnectedSdc(dataStore.getId(), host.getId());
if (StringUtils.isBlank(sdcId)) {
alertHostSdcDisconnection(host);
throw new CloudRuntimeException("Unable to grant access to volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress());
}

final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId());
return client.mapVolumeToSdcWithLimits(ScaleIOUtil.getVolumePath(volume.getPath()), sdcId, iopsLimit, bandwidthLimitInKbps);
return setVolumeLimitsFromDetails(volume, host, dataStore);
} else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) {
final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null);
LOGGER.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath());
Expand Down Expand Up @@ -791,7 +805,15 @@ private Answer copyTemplateToVolume(DataObject srcData, DataObject destData, Hos
LOGGER.error(errorMsg);
answer = new Answer(cmd, false, errorMsg);
} else {
answer = ep.sendMessage(cmd);
VolumeVO volume = volumeDao.findById(destData.getId());
Host host = destHost != null ? destHost : hostDao.findById(ep.getId());
try {
setVolumeLimitsOnSDC(volume, host, destData.getDataStore(), 0L, 0L);
answer = ep.sendMessage(cmd);
} catch (Exception e) {
LOGGER.error("Failed to copy template to volume due to: " + e.getMessage(), e);
answer = new Answer(cmd, false, e.getMessage());
}
}

return answer;
Expand Down Expand Up @@ -1181,7 +1203,7 @@ private void resizeVolume(VolumeInfo volumeInfo) {
ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload();
long newSizeInBytes = payload.newSize != null ? payload.newSize : volumeInfo.getSize();
// Only increase size is allowed and size should be specified in granularity of 8 GB
if (newSizeInBytes <= volumeInfo.getSize()) {
if (newSizeInBytes < volumeInfo.getSize()) {
throw new CloudRuntimeException("Only increase size is allowed for volume: " + volumeInfo.getName());
}

Expand Down Expand Up @@ -1210,6 +1232,20 @@ private void resizeVolume(VolumeInfo volumeInfo) {
}
}

Long newMaxIops = payload.newMaxIops != null ? payload.newMaxIops : volumeInfo.getMaxIops();
long newBandwidthLimit = 0L;
Long newDiskOfferingId = payload.newDiskOfferingId != null ? payload.newDiskOfferingId : volumeInfo.getDiskOfferingId();
if (newDiskOfferingId != null) {
DiskOfferingDetailVO bandwidthLimitDetail = diskOfferingDetailsDao.findDetail(newDiskOfferingId, Volume.BANDWIDTH_LIMIT_IN_MBPS);
if (bandwidthLimitDetail != null) {
newBandwidthLimit = Long.parseLong(bandwidthLimitDetail.getValue()) * 1024;
}
DiskOfferingDetailVO iopsLimitDetail = diskOfferingDetailsDao.findDetail(newDiskOfferingId, Volume.IOPS_LIMIT);
if (iopsLimitDetail != null) {
newMaxIops = Long.parseLong(iopsLimitDetail.getValue());
}
}

if (volumeInfo.getFormat().equals(Storage.ImageFormat.QCOW2) || attachedRunning) {
LOGGER.debug("Volume needs to be resized at the hypervisor host");

Expand All @@ -1229,9 +1265,8 @@ private void resizeVolume(VolumeInfo volumeInfo) {
volumeInfo.getPassphrase(), volumeInfo.getEncryptFormat());

try {
if (!attachedRunning) {
grantAccess(volumeInfo, ep, volumeInfo.getDataStore());
}
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
setVolumeLimitsOnSDC(volume, host, volumeInfo.getDataStore(), newMaxIops != null ? newMaxIops : 0L, newBandwidthLimit);
Answer answer = ep.sendMessage(resizeVolumeCommand);

if (!answer.getResult() && volumeInfo.getFormat().equals(Storage.ImageFormat.QCOW2)) {
Expand All @@ -1253,14 +1288,23 @@ private void resizeVolume(VolumeInfo volumeInfo) {
VolumeVO volume = volumeDao.findById(volumeInfo.getId());
long oldVolumeSize = volume.getSize();
volume.setSize(scaleIOVolume.getSizeInKb() * 1024);
if (payload.newMinIops != null) {
volume.setMinIops(payload.newMinIops);
}
if (payload.newMaxIops != null) {
volume.setMaxIops(payload.newMaxIops);
}
volumeDao.update(volume.getId(), volume);
if (payload.newDiskOfferingId != null) {
volumeMgr.saveVolumeDetails(payload.newDiskOfferingId, volume.getId());
}

long capacityBytes = storagePool.getCapacityBytes();
long usedBytes = storagePool.getUsedBytes();

long newVolumeSize = volume.getSize();
usedBytes += newVolumeSize - oldVolumeSize;
storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);
storagePool.setUsedBytes(Math.min(usedBytes, capacityBytes));
storagePoolDao.update(storagePoolId, storagePool);
} catch (Exception e) {
String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo.getId() + " due to " + e.getMessage();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ public class ResizeVolumePayload {
public final Long newSize;
public final Long newMinIops;
public final Long newMaxIops;
public Long newDiskOfferingId;
public final Integer newHypervisorSnapshotReserve;
public final boolean shrinkOk;
public final String instanceName;
Expand All @@ -37,5 +38,12 @@ public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, Integ
this.instanceName = instanceName;
this.hosts = hosts;
this.isManaged = isManaged;
this.newDiskOfferingId = null;
}

public ResizeVolumePayload(Long newSize, Long newMinIops, Long newMaxIops, Long newDiskOfferingId, Integer newHypervisorSnapshotReserve, boolean shrinkOk,
String instanceName, long[] hosts, boolean isManaged) {
this(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged);
this.newDiskOfferingId = newDiskOfferingId;
}
}
12 changes: 11 additions & 1 deletion server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
Original file line number Diff line number Diff line change
Expand Up @@ -1471,7 +1471,7 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n
}
}

ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged);
ResizeVolumePayload payload = new ResizeVolumePayload(newSize, newMinIops, newMaxIops, newDiskOfferingId, newHypervisorSnapshotReserve, shrinkOk, instanceName, hosts, isManaged);

try {
VolumeInfo vol = volFactory.getVolume(volume.getId());
Expand Down Expand Up @@ -1510,6 +1510,15 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n

if (newDiskOfferingId != null) {
volume.setDiskOfferingId(newDiskOfferingId);
_volumeMgr.saveVolumeDetails(newDiskOfferingId, volume.getId());
}

if (newMinIops != null) {
volume.setMinIops(newMinIops);
}

if (newMaxIops != null) {
volume.setMaxIops(newMaxIops);
}

// Update size if volume has same size as before, else it is already updated
Expand Down Expand Up @@ -2029,6 +2038,7 @@ private Volume changeDiskOfferingForVolumeInternal(VolumeVO volume, Long newDisk

if (newDiskOffering != null) {
volume.setDiskOfferingId(newDiskOfferingId);
_volumeMgr.saveVolumeDetails(newDiskOfferingId, volume.getId());
}

_volsDao.update(volume.getId(), volume);
Expand Down