diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java index 19670b2066e5..4a9001b27dad 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBase.java @@ -51,8 +51,10 @@ import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.BooleanUtils; import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; import org.joda.time.Duration; @@ -173,7 +175,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe * used to describe what type of resource a storage device is of */ public enum SRType { - EXT, FILE, ISCSI, ISO, LVM, LVMOHBA, LVMOISCSI, + EXT, ISO, LVM, LVMOHBA, LVMOISCSI, /** * used for resigning metadata (like SR UUID and VDI UUID when a * particular storage manager is installed on a XenServer host (for back-end snapshots to work)) @@ -761,11 +763,6 @@ public HashMap clusterVMMetaDataSync(final Connection conn) { final HashMap vmMetaDatum = new HashMap(); try { final Map vm_map = VM.getAllRecords(conn); // USE - // THIS TO - // GET ALL - // VMS - // FROM A - // CLUSTER if (vm_map != null) { for (final VM.Record record : vm_map.values()) { if (record.isControlDomain || record.isASnapshot || record.isATemplate) { @@ -2274,16 +2271,11 @@ public HostStatsEntry getHostStats(final Connection conn, final GetHostStatsComm } protected HashMap getHostVmStateReport(final Connection conn) { - - // TODO : new VM sync model does not require a cluster-scope report, we - // need to optimize - // the report accordingly final HashMap vmStates = new HashMap(); Map vm_map = null; for (int i = 0; i < 2; i++) { try { - vm_map = VM.getAllRecords(conn); // USE THIS TO GET ALL VMS FROM - // A CLUSTER + vm_map = VM.getAllRecords(conn); break; } catch (final Throwable e) { s_logger.warn("Unable to get vms", e); @@ -2652,76 +2644,6 @@ public String getLabel() { return result; } - protected SR getLocalEXTSR(final Connection conn) { - try { - final Map map = SR.getAllRecords(conn); - if (map != null && !map.isEmpty()) { - for (final Map.Entry entry : map.entrySet()) { - final SR.Record srRec = entry.getValue(); - if (SRType.FILE.equals(srRec.type) || SRType.EXT.equals(srRec.type)) { - final Set pbds = srRec.PBDs; - if (pbds == null) { - continue; - } - for (final PBD pbd : pbds) { - final Host host = pbd.getHost(conn); - if (!isRefNull(host) && host.getUuid(conn).equals(_host.getUuid())) { - if (!pbd.getCurrentlyAttached(conn)) { - pbd.plug(conn); - } - final SR sr = entry.getKey(); - sr.scan(conn); - return sr; - } - } - } - } - } - } catch (final XenAPIException e) { - final String msg = "Unable to get local EXTSR in host:" + _host.getUuid() + e.toString(); - s_logger.warn(msg); - } catch (final XmlRpcException e) { - final String msg = "Unable to get local EXTSR in host:" + _host.getUuid() + e.getCause(); - s_logger.warn(msg); - } - return null; - } - - protected SR getLocalLVMSR(final Connection conn) { - try { - final Map map = SR.getAllRecords(conn); - if (map != null && !map.isEmpty()) { - for (final Map.Entry entry : map.entrySet()) { - final SR.Record srRec = entry.getValue(); - if (SRType.LVM.equals(srRec.type)) { - final Set pbds = srRec.PBDs; - if (pbds == null) { - continue; - } - for (final PBD pbd : pbds) { - final Host host = pbd.getHost(conn); - if (!isRefNull(host) && host.getUuid(conn).equals(_host.getUuid())) { - if (!pbd.getCurrentlyAttached(conn)) { - pbd.plug(conn); - } - final SR sr = entry.getKey(); - sr.scan(conn); - return sr; - } - } - } - } - } - } catch (final XenAPIException e) { - final String msg = "Unable to get local LVMSR in host:" + _host.getUuid() + e.toString(); - s_logger.warn(msg); - } catch (final XmlRpcException e) { - final String msg = "Unable to get local LVMSR in host:" + _host.getUuid() + e.getCause(); - s_logger.warn(msg); - } - return null; - } - public String getLowestAvailableVIFDeviceNum(final Connection conn, final VM vm) { String vmName = ""; try { @@ -3677,75 +3599,144 @@ public StartupCommand[] initialize() throws IllegalArgumentException { } catch (final Throwable e) { s_logger.warn("Check for master failed, failing the FULL Cluster sync command"); } - final StartupStorageCommand sscmd = initializeLocalSR(conn); - if (sscmd != null) { - return new StartupCommand[] {cmd, sscmd}; + List startUpLocalStorageCommands = null; + try { + startUpLocalStorageCommands = initializeLocalSrs(conn); + } catch (XenAPIException | XmlRpcException e) { + s_logger.warn("Could not initialize local SRs on host: " + _host.getUuid(), e); + } + if (CollectionUtils.isEmpty(startUpLocalStorageCommands)) { + return new StartupCommand[] {cmd}; } - return new StartupCommand[] {cmd}; + return createStartupCommandsArray(cmd, startUpLocalStorageCommands); } - protected StartupStorageCommand initializeLocalSR(final Connection conn) { - final SR lvmsr = getLocalLVMSR(conn); - if (lvmsr != null) { - try { - _host.setLocalSRuuid(lvmsr.getUuid(conn)); - - final String lvmuuid = lvmsr.getUuid(conn); - final long cap = lvmsr.getPhysicalSize(conn); - if (cap > 0) { - final long avail = cap - lvmsr.getPhysicalUtilisation(conn); - lvmsr.setNameLabel(conn, lvmuuid); - final String name = "Cloud Stack Local LVM Storage Pool for " + _host.getUuid(); - lvmsr.setNameDescription(conn, name); - final Host host = Host.getByUuid(conn, _host.getUuid()); - final String address = host.getAddress(conn); - final StoragePoolInfo pInfo = new StoragePoolInfo(lvmuuid, address, SRType.LVM.toString(), SRType.LVM.toString(), StoragePoolType.LVM, cap, avail); - final StartupStorageCommand cmd = new StartupStorageCommand(); - cmd.setPoolInfo(pInfo); - cmd.setGuid(_host.getUuid()); - cmd.setDataCenter(Long.toString(_dcId)); - cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL); - return cmd; + /** + * We simply create an array and add the {@link StartupRoutingCommand} as the first element of the array. Then, we add all elements from startUpLocalStorageCommands + */ + private StartupCommand[] createStartupCommandsArray(StartupRoutingCommand startupRoutingCommand, List startUpLocalStorageCommands) { + StartupCommand[] startupCommands = new StartupCommand[startUpLocalStorageCommands.size() + 1]; + startupCommands[0] = startupRoutingCommand; + for (int i = 1; i < startupCommands.length; i++) { + startupCommands[i] = startUpLocalStorageCommands.get(i - 1); + } + return startupCommands; + } + + /** + * This method will return a list of all local SRs. + * An SR is considered local if it meets all of the following criteria: + *
    + *
  • {@link Record#shared} is equal to false + *
  • The PBDs of the SR ({@link Record#PBDs}) are connected to host {@link #_host} + *
  • SR type is equal to the {@link SRType} sent as parameter + *
+ */ + protected List getAllLocalSrForType(Connection conn, SRType srType) throws XenAPIException, XmlRpcException { + List localSrs = new ArrayList<>(); + Map allSrRecords = SR.getAllRecords(conn); + if (MapUtils.isEmpty(allSrRecords)) { + return localSrs; + } + for (Map.Entry entry : allSrRecords.entrySet()) { + SR.Record srRec = entry.getValue(); + if (!srType.equals(srRec.type)) { + continue; + } + if (BooleanUtils.toBoolean(srRec.shared)) { + continue; + } + Set pbds = srRec.PBDs; + if (CollectionUtils.isEmpty(pbds)) { + continue; + } + for (PBD pbd : pbds) { + Host host = pbd.getHost(conn); + if (!isRefNull(host) && org.apache.commons.lang3.StringUtils.equals(host.getUuid(conn), _host.getUuid())) { + if (!pbd.getCurrentlyAttached(conn)) { + s_logger.debug(String.format("PBD [%s] of local SR [%s] was unplugged, pluggin it now", pbd.getUuid(conn), srRec.uuid)); + pbd.plug(conn); + } + s_logger.debug("Scanning local SR: " + srRec.uuid); + SR sr = entry.getKey(); + sr.scan(conn); + localSrs.add(sr); } - } catch (final XenAPIException e) { - final String msg = "build local LVM info err in host:" + _host.getUuid() + e.toString(); - s_logger.warn(msg); - } catch (final XmlRpcException e) { - final String msg = "build local LVM info err in host:" + _host.getUuid() + e.getMessage(); - s_logger.warn(msg); } } + s_logger.debug(String.format("Found %d local storage of type [%s] for host [%s]", localSrs.size(), srType.toString(), _host.getUuid())); + return localSrs; + } - final SR extsr = getLocalEXTSR(conn); - if (extsr != null) { - try { - final String extuuid = extsr.getUuid(conn); - _host.setLocalSRuuid(extuuid); - final long cap = extsr.getPhysicalSize(conn); - if (cap > 0) { - final long avail = cap - extsr.getPhysicalUtilisation(conn); - extsr.setNameLabel(conn, extuuid); - final String name = "Cloud Stack Local EXT Storage Pool for " + _host.getUuid(); - extsr.setNameDescription(conn, name); - final Host host = Host.getByUuid(conn, _host.getUuid()); - final String address = host.getAddress(conn); - final StoragePoolInfo pInfo = new StoragePoolInfo(extuuid, address, SRType.EXT.toString(), SRType.EXT.toString(), StoragePoolType.EXT, cap, avail); - final StartupStorageCommand cmd = new StartupStorageCommand(); - cmd.setPoolInfo(pInfo); - cmd.setGuid(_host.getUuid()); - cmd.setDataCenter(Long.toString(_dcId)); - cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL); - return cmd; - } - } catch (final XenAPIException e) { - final String msg = "build local EXT info err in host:" + _host.getUuid() + e.toString(); - s_logger.warn(msg); - } catch (final XmlRpcException e) { - final String msg = "build local EXT info err in host:" + _host.getUuid() + e.getMessage(); - s_logger.warn(msg); + /** + * This method will prepare Local SRs to be used by Apache CloudStack. + */ + protected List initializeLocalSrs(Connection conn) throws XenAPIException, XmlRpcException { + List localStorageStartupCommands = new ArrayList<>(); + List allLocalSrs = getAllLocalSrs(conn); + + for (SR sr : allLocalSrs) { + long totalCapacity = sr.getPhysicalSize(conn); + if (totalCapacity > 0) { + StartupStorageCommand cmd = createStartUpStorageCommand(conn, sr); + localStorageStartupCommands.add(cmd); } } - return null; + return localStorageStartupCommands; + } + + /** + * This method will retrieve all Local SRs according to {@link #getAllLocalSrForType(Connection, SRType)}. + * The types used are {@link SRType#LVM} and {@link SRType#EXT}. + * + */ + protected List getAllLocalSrs(Connection conn) throws XenAPIException, XmlRpcException { + List allLocalSrLvmType = getAllLocalSrForType(conn, SRType.LVM); + List allLocalSrExtType = getAllLocalSrForType(conn, SRType.EXT); + List allLocalSrs = new ArrayList<>(allLocalSrLvmType); + allLocalSrs.addAll(allLocalSrExtType); + return allLocalSrs; + } + + /** + * This method creates the StartUp storage command for the local SR. + * We will configure 'name-label' and 'description' using {@link #configureStorageNameAndDescription(Connection, SR)}. + * Then, we will create the POJO {@link StoragePoolInfo} with SR's information using method {@link #createStoragePoolInfo(Connection, SR)}. + */ + protected StartupStorageCommand createStartUpStorageCommand(Connection conn, SR sr) throws XenAPIException, XmlRpcException { + configureStorageNameAndDescription(conn, sr); + + StoragePoolInfo storagePoolInfo = createStoragePoolInfo(conn, sr); + + StartupStorageCommand cmd = new StartupStorageCommand(); + cmd.setPoolInfo(storagePoolInfo); + cmd.setGuid(_host.getUuid()); + cmd.setDataCenter(Long.toString(_dcId)); + cmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL); + + String.format("StartUp command created for local storage [%s] of type [%s] on host [%s]", storagePoolInfo.getUuid(), storagePoolInfo.getPoolType(), _host.getUuid()); + return cmd; + } + + /** + * Instantiate {@link StoragePoolInfo} with SR's information. + */ + protected StoragePoolInfo createStoragePoolInfo(Connection conn, SR sr) throws XenAPIException, XmlRpcException { + long totalCapacity = sr.getPhysicalSize(conn); + String srUuid = sr.getUuid(conn); + Host host = Host.getByUuid(conn, _host.getUuid()); + String address = host.getAddress(conn); + long availableCapacity = totalCapacity - sr.getPhysicalUtilisation(conn); + String srType = sr.getType(conn).toUpperCase(); + return new StoragePoolInfo(srUuid, address, srType, srType, StoragePoolType.valueOf(srType), totalCapacity, availableCapacity); + } + + protected void configureStorageNameAndDescription(Connection conn, SR sr) throws XenAPIException, XmlRpcException { + String srUuid = sr.getUuid(conn); + sr.setNameLabel(conn, srUuid); + + String nameFormat = "Cloud Stack Local (%s) Storage Pool for %s"; + sr.setNameDescription(conn, String.format(nameFormat, sr.getType(conn), _host.getUuid())); } public boolean isDeviceUsed(final Connection conn, final VM vm, final Long deviceId) { diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer56Resource.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer56Resource.java index e8e21d4e8b47..b7d0273675c6 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer56Resource.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XenServer56Resource.java @@ -125,7 +125,6 @@ public Boolean checkHeartbeat(final String hostuuid) { @Override public StartupCommand[] initialize() { pingXAPI(); - final StartupCommand[] cmds = super.initialize(); - return cmds; + return super.initialize(); } } \ No newline at end of file diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java index d809560ac22d..a0e5c4616d37 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/Xenserver625StorageProcessor.java @@ -75,7 +75,7 @@ private void mountNfs(Connection conn, String remoteDir, String localDir) { } String result = hypervisorResource.callHostPluginAsync(conn, "cloud-plugin-storage", "mountNfsSecondaryStorage", 100 * 1000, "localDir", localDir, "remoteDir", remoteDir); if (StringUtils.isBlank(result)) { - final String errMsg = "Could not mount secondary storage " + remoteDir + " on host " + localDir; + String errMsg = "Could not mount secondary storage " + remoteDir + " on host " + localDir; s_logger.warn(errMsg); throw new CloudRuntimeException(errMsg); } diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XsHost.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XsHost.java index e17a017e6f40..dcc5ff248285 100644 --- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XsHost.java +++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/XsHost.java @@ -43,7 +43,6 @@ public class XsHost { private Integer cpuSockets; private int cpus; private String productVersion; - private String localSRuuid; public String getSystemvmisouuid() { return systemvmisouuid; @@ -197,14 +196,6 @@ public void setProductVersion(final String productVersion) { this.productVersion = productVersion; } - public String getLocalSRuuid() { - return localSRuuid; - } - - public void setLocalSRuuid(final String localSRuuid) { - this.localSRuuid = localSRuuid; - } - @Override public String toString() { return new StringBuilder("XS[").append(uuid).append("-").append(ip).append("]").toString(); diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBaseTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBaseTest.java index dfdc127fab18..b34bba09e807 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBaseTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/CitrixResourceBaseTest.java @@ -16,14 +16,20 @@ package com.cloud.hypervisor.xenserver.resource; import java.io.File; +import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Set; import org.apache.xmlrpc.XmlRpcException; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.BDDMockito; +import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.Spy; @@ -31,14 +37,21 @@ import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; +import com.cloud.agent.api.StartupStorageCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.hypervisor.xenserver.resource.CitrixResourceBase.SRType; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.Storage.StorageResourceType; import com.cloud.utils.script.Script; import com.xensource.xenapi.Connection; import com.xensource.xenapi.Host; import com.xensource.xenapi.Host.Record; +import com.xensource.xenapi.PBD; +import com.xensource.xenapi.SR; import com.xensource.xenapi.Types.XenAPIException; @RunWith(PowerMockRunner.class) -@PrepareForTest({Host.class, Script.class}) +@PrepareForTest({Host.class, Script.class, SR.class}) public class CitrixResourceBaseTest { @Spy @@ -168,4 +181,191 @@ public void actualIsoTemplateTestXenServer71() throws XenAPIException, XmlRpcExc Assert.assertEquals("guest-tools.iso", returnedIsoTemplateName); } + + @Test + public void getAllLocalSrForTypeTest() throws Exception { + String mockHostUuid = "hostUuid"; + citrixResourceBase._host.setUuid(mockHostUuid); + + Connection connectionMock = Mockito.mock(Connection.class); + + SR srExtShared = Mockito.mock(SR.class); + SR srExtNonShared = Mockito.mock(SR.class); + + List expectedListOfSrs = new ArrayList<>(); + expectedListOfSrs.add(srExtNonShared); + + Set pbds = new HashSet<>(); + PBD pbdMock = Mockito.mock(PBD.class); + Host hostMock = Mockito.mock(Host.class); + Mockito.when(hostMock.getUuid(connectionMock)).thenReturn(mockHostUuid); + Mockito.when(hostMock.toWireString()).thenReturn(mockHostUuid); + + Mockito.when(pbdMock.getHost(connectionMock)).thenReturn(hostMock); + pbds.add(pbdMock); + + SR.Record srExtSharedRecord = Mockito.mock(SR.Record.class); + srExtSharedRecord.type = "EXT"; + srExtSharedRecord.shared = true; + srExtSharedRecord.PBDs = pbds; + + SR.Record srExtNonSharedRecord = Mockito.mock(SR.Record.class); + srExtNonSharedRecord.type = "EXT"; + srExtNonSharedRecord.shared = false; + srExtNonSharedRecord.PBDs = pbds; + + Map mapOfSrsRecords = new HashMap<>(); + mapOfSrsRecords.put(srExtShared, srExtSharedRecord); + mapOfSrsRecords.put(srExtNonShared, srExtNonSharedRecord); + + PowerMockito.mockStatic(SR.class); + BDDMockito.given(SR.getAllRecords(connectionMock)).willReturn(mapOfSrsRecords); + + List allLocalSrForType = citrixResourceBase.getAllLocalSrForType(connectionMock, SRType.EXT); + + Assert.assertEquals(expectedListOfSrs.size(), allLocalSrForType.size()); + Assert.assertEquals(expectedListOfSrs.get(0), allLocalSrForType.get(0)); + } + + @Test + public void getAllLocalSrForTypeNoSrsFoundTest() throws XenAPIException, XmlRpcException { + Connection connectionMock = Mockito.mock(Connection.class); + List allLocalSrForType = citrixResourceBase.getAllLocalSrForType(connectionMock, SRType.EXT); + Assert.assertTrue(allLocalSrForType.isEmpty()); + } + + @Test + public void getAllLocalSrsTest() throws XenAPIException, XmlRpcException { + Connection connectionMock = Mockito.mock(Connection.class); + SR sr1 = Mockito.mock(SR.class); + List srsExt = new ArrayList<>(); + srsExt.add(sr1); + + SR sr2 = Mockito.mock(SR.class); + List srsLvm = new ArrayList<>(); + srsLvm.add(sr2); + + Mockito.doReturn(srsExt).when(citrixResourceBase).getAllLocalSrForType(connectionMock, SRType.EXT); + Mockito.doReturn(srsLvm).when(citrixResourceBase).getAllLocalSrForType(connectionMock, SRType.LVM); + + List allLocalSrs = citrixResourceBase.getAllLocalSrs(connectionMock); + + Assert.assertEquals(srsExt.size() + srsLvm.size(), allLocalSrs.size()); + Assert.assertEquals(srsExt.get(0), allLocalSrs.get(1)); + Assert.assertEquals(srsLvm.get(0), allLocalSrs.get(0)); + + InOrder inOrder = Mockito.inOrder(citrixResourceBase); + inOrder.verify(citrixResourceBase).getAllLocalSrForType(connectionMock, SRType.LVM); + inOrder.verify(citrixResourceBase).getAllLocalSrForType(connectionMock, SRType.EXT); + } + + @Test + public void createStoragePoolInfoTest() throws XenAPIException, XmlRpcException { + Connection connectionMock = Mockito.mock(Connection.class); + Host hostMock = Mockito.mock(Host.class); + SR srMock = Mockito.mock(SR.class); + + String hostAddress = "hostAddress"; + Mockito.when(hostMock.getAddress(connectionMock)).thenReturn(hostAddress); + + String hostUuid = "hostUuid"; + citrixResourceBase._host.setUuid(hostUuid); + + PowerMockito.mockStatic(Host.class); + PowerMockito.when(Host.getByUuid(connectionMock, hostUuid)).thenReturn(hostMock); + + String srType = "ext"; + String srUuid = "srUuid"; + long srPhysicalSize = 100l; + long physicalUtilization = 10l; + + Mockito.when(srMock.getPhysicalSize(connectionMock)).thenReturn(srPhysicalSize); + Mockito.when(srMock.getUuid(connectionMock)).thenReturn(srUuid); + Mockito.when(srMock.getPhysicalUtilisation(connectionMock)).thenReturn(physicalUtilization); + Mockito.when(srMock.getType(connectionMock)).thenReturn(srType); + + StoragePoolInfo storagePoolInfo = citrixResourceBase.createStoragePoolInfo(connectionMock, srMock); + + Assert.assertEquals(srUuid, storagePoolInfo.getUuid()); + Assert.assertEquals(hostAddress, storagePoolInfo.getHost()); + Assert.assertEquals(srType.toUpperCase(), storagePoolInfo.getHostPath()); + Assert.assertEquals(srType.toUpperCase(), storagePoolInfo.getLocalPath()); + Assert.assertEquals(StoragePoolType.EXT, storagePoolInfo.getPoolType()); + Assert.assertEquals(srPhysicalSize, storagePoolInfo.getCapacityBytes()); + Assert.assertEquals(srPhysicalSize - physicalUtilization, storagePoolInfo.getAvailableBytes()); + } + + @Test + public void configureStorageNameAndDescriptionTest() throws XenAPIException, XmlRpcException { + String nameFormat = "Cloud Stack Local (%s) Storage Pool for %s"; + + String hostUuid = "hostUuid"; + citrixResourceBase._host.setUuid(hostUuid); + + Connection connectionMock = Mockito.mock(Connection.class); + SR srMock = Mockito.mock(SR.class); + + String srUuid = "srUuid"; + String srType = "ext"; + String expectedNameDescription = String.format(nameFormat, srType, hostUuid); + + Mockito.when(srMock.getUuid(connectionMock)).thenReturn(srUuid); + Mockito.when(srMock.getType(connectionMock)).thenReturn(srType); + + Mockito.doNothing().when(srMock).setNameLabel(connectionMock, srUuid); + Mockito.doNothing().when(srMock).setNameDescription(connectionMock, expectedNameDescription); + + citrixResourceBase.configureStorageNameAndDescription(connectionMock, srMock); + + Mockito.verify(srMock).setNameLabel(connectionMock, srUuid); + Mockito.verify(srMock).setNameDescription(connectionMock, expectedNameDescription); + } + + @Test + public void createStartUpStorageCommandTest() throws XenAPIException, XmlRpcException { + String hostUuid = "hostUUid"; + citrixResourceBase._host.setUuid(hostUuid); + citrixResourceBase._dcId = 1; + + Connection connectionMock = Mockito.mock(Connection.class); + SR srMock = Mockito.mock(SR.class); + + StoragePoolInfo storagePoolInfoMock = Mockito.mock(StoragePoolInfo.class); + + Mockito.doNothing().when(citrixResourceBase).configureStorageNameAndDescription(connectionMock, srMock); + Mockito.doReturn(storagePoolInfoMock).when(citrixResourceBase).createStoragePoolInfo(connectionMock, srMock); + + StartupStorageCommand startUpStorageCommand = citrixResourceBase.createStartUpStorageCommand(connectionMock, srMock); + + Assert.assertEquals(hostUuid, startUpStorageCommand.getGuid()); + Assert.assertEquals(storagePoolInfoMock, startUpStorageCommand.getPoolInfo()); + Assert.assertEquals(citrixResourceBase._dcId + "", startUpStorageCommand.getDataCenter()); + Assert.assertEquals(StorageResourceType.STORAGE_POOL, startUpStorageCommand.getResourceType()); + } + + @Test + public void initializeLocalSrTest() throws XenAPIException, XmlRpcException { + Connection connectionMock = Mockito.mock(Connection.class); + + List srsMocks = new ArrayList<>(); + SR srMock1 = Mockito.mock(SR.class); + SR srMock2 = Mockito.mock(SR.class); + + Mockito.when(srMock1.getPhysicalSize(connectionMock)).thenReturn(0l); + Mockito.when(srMock2.getPhysicalSize(connectionMock)).thenReturn(100l); + srsMocks.add(srMock1); + srsMocks.add(srMock2); + + Mockito.doReturn(srsMocks).when(citrixResourceBase).getAllLocalSrs(connectionMock); + + StartupStorageCommand startupStorageCommandMock = Mockito.mock(StartupStorageCommand.class); + Mockito.doReturn(startupStorageCommandMock).when(citrixResourceBase).createStartUpStorageCommand(Mockito.eq(connectionMock), Mockito.any(SR.class)); + + List startUpCommandsForLocalStorage = citrixResourceBase.initializeLocalSrs(connectionMock); + + Mockito.verify(citrixResourceBase, Mockito.times(0)).createStartUpStorageCommand(connectionMock, srMock1); + Mockito.verify(citrixResourceBase, Mockito.times(1)).createStartUpStorageCommand(connectionMock, srMock2); + + Assert.assertEquals(1, startUpCommandsForLocalStorage.size()); + } } diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XcpOssResourceTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XcpOssResourceTest.java index 15bd88b0db70..8f703ed4a2a8 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XcpOssResourceTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XcpOssResourceTest.java @@ -19,6 +19,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Types.XenAPIException; @@ -28,7 +29,7 @@ public class XcpOssResourceTest extends CitrixResourceBaseTest { @Before @Override public void beforeTest() throws XenAPIException, XmlRpcException { - super.citrixResourceBase = new XcpOssResource(); + super.citrixResourceBase = Mockito.spy(new XcpOssResource()); super.beforeTest(); } @@ -41,12 +42,12 @@ public void testPatchFilePath() { } @Test(expected = CloudRuntimeException.class) - public void testGetFiles(){ + public void testGetFiles() { testGetPathFilesExeption(); } @Test - public void testGetFilesListReturned(){ + public void testGetFilesListReturned() { testGetPathFilesListReturned(); } } diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XcpServerResourceTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XcpServerResourceTest.java index 6a90f234c500..f1022cf435aa 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XcpServerResourceTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XcpServerResourceTest.java @@ -19,6 +19,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Types.XenAPIException; @@ -28,7 +29,7 @@ public class XcpServerResourceTest extends CitrixResourceBaseTest { @Before @Override public void beforeTest() throws XenAPIException, XmlRpcException { - super.citrixResourceBase = new XcpServerResource(); + super.citrixResourceBase = Mockito.spy(new XcpServerResource()); super.beforeTest(); } @@ -41,12 +42,12 @@ public void testPatchFilePath() { } @Test(expected = CloudRuntimeException.class) - public void testGetFilesExeption(){ + public void testGetFilesExeption() { testGetPathFilesExeption(); } @Test - public void testGetFilesListReturned(){ + public void testGetFilesListReturned() { testGetPathFilesListReturned(); } } diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56FP1ResourceTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56FP1ResourceTest.java index 84df3c69f1c1..902e8fd22200 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56FP1ResourceTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56FP1ResourceTest.java @@ -19,6 +19,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Types.XenAPIException; @@ -28,7 +29,7 @@ public class XenServer56FP1ResourceTest extends CitrixResourceBaseTest { @Before @Override public void beforeTest() throws XenAPIException, XmlRpcException { - super.citrixResourceBase = new XenServer56FP1Resource(); + super.citrixResourceBase = Mockito.spy(new XenServer56FP1Resource()); super.beforeTest(); } @@ -41,12 +42,12 @@ public void testPatchFilePath() { } @Test(expected = CloudRuntimeException.class) - public void testGetFiles(){ + public void testGetFiles() { testGetPathFilesExeption(); } @Test - public void testGetFilesListReturned(){ + public void testGetFilesListReturned() { testGetPathFilesListReturned(); } } diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56ResourceTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56ResourceTest.java index 4f6c2b3f65a3..7d8d5d9b6638 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56ResourceTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56ResourceTest.java @@ -19,6 +19,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Types.XenAPIException; @@ -28,7 +29,7 @@ public class XenServer56ResourceTest extends CitrixResourceBaseTest { @Override @Before public void beforeTest() throws XenAPIException, XmlRpcException { - super.citrixResourceBase = new XenServer56Resource(); + super.citrixResourceBase = Mockito.spy(new XenServer56Resource()); super.beforeTest(); } @@ -41,12 +42,12 @@ public void testPatchFilePath() { } @Test(expected = CloudRuntimeException.class) - public void testGetFiles(){ + public void testGetFiles() { testGetPathFilesExeption(); } @Test - public void testGetFilesListReturned(){ + public void testGetFilesListReturned() { testGetPathFilesListReturned(); } } diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56SP2ResourceTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56SP2ResourceTest.java index f34c89baa8dd..65a9b474feca 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56SP2ResourceTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer56SP2ResourceTest.java @@ -19,6 +19,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Types.XenAPIException; @@ -28,7 +29,7 @@ public class XenServer56SP2ResourceTest extends CitrixResourceBaseTest { @Override @Before public void beforeTest() throws XenAPIException, XmlRpcException { - super.citrixResourceBase = new XenServer56SP2Resource(); + super.citrixResourceBase = Mockito.spy(new XenServer56SP2Resource()); super.beforeTest(); } diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer600ResourceTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer600ResourceTest.java index 83c2a696a599..2175884d4db3 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer600ResourceTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer600ResourceTest.java @@ -19,6 +19,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Types.XenAPIException; @@ -28,7 +29,7 @@ public class XenServer600ResourceTest extends CitrixResourceBaseTest { @Before @Override public void beforeTest() throws XenAPIException, XmlRpcException { - super.citrixResourceBase = new XenServer600Resource(); + super.citrixResourceBase = Mockito.spy(new XenServer600Resource()); super.beforeTest(); } diff --git a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer650ResourceTest.java b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer650ResourceTest.java index e5773afc534f..be4184081583 100644 --- a/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer650ResourceTest.java +++ b/plugins/hypervisors/xenserver/src/test/java/com/cloud/hypervisor/xenserver/resource/XenServer650ResourceTest.java @@ -19,6 +19,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.cloud.utils.exception.CloudRuntimeException; import com.xensource.xenapi.Types.XenAPIException; @@ -28,7 +29,7 @@ public class XenServer650ResourceTest extends CitrixResourceBaseTest { @Override @Before public void beforeTest() throws XenAPIException, XmlRpcException { - super.citrixResourceBase = new XenServer650Resource(); + super.citrixResourceBase = Mockito.spy(new XenServer650Resource()); super.beforeTest(); } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 56a6b77b4076..3ced3d603cca 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -696,10 +696,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServer, Configurable { public static final Logger s_logger = Logger.getLogger(ManagementServerImpl.class.getName()); - static final ConfigKey vmPasswordLength = new ConfigKey("Advanced", Integer.class, "vm.password.length", "6", - "Specifies the length of a randomly generated password", false); - static final ConfigKey sshKeyLength = new ConfigKey("Advanced", Integer.class, "ssh.key.length", - "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global); + static final ConfigKey vmPasswordLength = new ConfigKey("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false); + static final ConfigKey sshKeyLength = new ConfigKey("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global); @Inject public AccountManager _accountMgr; @Inject @@ -761,8 +759,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private StorageManager _storageMgr; @Inject - private VolumeOrchestrationService _volumeMgr; - @Inject private VirtualMachineManager _itMgr; @Inject private HostPodDao _hostPodDao; @@ -1119,16 +1115,14 @@ public Pair, Integer> searchForServers(final ListHostsCmd c final Object resourceState = cmd.getResourceState(); final Object haHosts = cmd.getHaHost(); - final Pair, Integer> result = searchForServers(cmd.getStartIndex(), cmd.getPageSizeVal(), name, type, state, zoneId, pod, cluster, id, keyword, resourceState, - haHosts, null, null); + final Pair, Integer> result = searchForServers(cmd.getStartIndex(), cmd.getPageSizeVal(), name, type, state, zoneId, pod, cluster, id, keyword, resourceState, haHosts, null, + null); return new Pair, Integer>(result.first(), result.second()); } @Override - public Ternary, Integer>, List, Map> - listHostsForMigrationOfVM(final Long vmId, - final Long startIndex, - final Long pageSize, final String keyword) { + public Ternary, Integer>, List, Map> listHostsForMigrationOfVM(final Long vmId, final Long startIndex, final Long pageSize, + final String keyword) { final Account caller = getCaller(); if (!_accountMgr.isRootAdmin(caller.getId())) { if (s_logger.isDebugEnabled()) { @@ -1152,11 +1146,11 @@ public Pair, Integer> searchForServers(final ListHostsCmd c throw ex; } - if(_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { - s_logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName()+ " is not supported"); + if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { + s_logger.info(" Live Migration of GPU enabled VM : " + vm.getInstanceName() + " is not supported"); // Return empty list. - return new Ternary, Integer>, List, Map>(new Pair, - Integer>(new ArrayList(), new Integer(0)), new ArrayList(), new HashMap()); + return new Ternary, Integer>, List, Map>(new Pair, Integer>(new ArrayList(), new Integer(0)), + new ArrayList(), new HashMap()); } if (!vm.getHypervisorType().equals(HypervisorType.XenServer) && !vm.getHypervisorType().equals(HypervisorType.VMware) && !vm.getHypervisorType().equals(HypervisorType.KVM) @@ -1217,8 +1211,8 @@ public Pair, Integer> searchForServers(final ListHostsCmd c final Map requiresStorageMotion = new HashMap(); DataCenterDeployment plan = null; if (canMigrateWithStorage) { - allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), null, null, null, keyword, null, null, - srcHost.getHypervisorType(), srcHost.getHypervisorVersion()); + allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), null, null, null, keyword, null, null, srcHost.getHypervisorType(), + srcHost.getHypervisorVersion()); allHosts = allHostsPair.first(); allHosts.remove(srcHost); @@ -1238,8 +1232,7 @@ public Pair, Integer> searchForServers(final ListHostsCmd c // because we need to create a new target volume and copy the contents of the source volume into it before deleting the // source volume. iterator.remove(); - } - else { + } else { if (hasSuitablePoolsForVolume(volume, host, vmProfile)) { requiresStorageMotion.put(host, true); } else { @@ -1247,8 +1240,7 @@ public Pair, Integer> searchForServers(final ListHostsCmd c } } } - } - else { + } else { if (storagePool.isManaged()) { if (srcHost.getClusterId() != host.getClusterId()) { // If the volume's storage pool is managed and at the zone level, then we still have to perform a storage migration @@ -1399,7 +1391,6 @@ public Pair, List> listStorag return new Pair, List>(allPools, suitablePools); } - /** * This method looks for all storage pools that are compatible with the given volume. *
    @@ -1410,11 +1401,11 @@ public Pair, List> listStorag private List getAllStoragePoolCompatileWithVolumeSourceStoragePool(StoragePool srcVolumePool) { List storagePools = new ArrayList<>(); List zoneWideStoragePools = _poolDao.findZoneWideStoragePoolsByTags(srcVolumePool.getDataCenterId(), null); - if(CollectionUtils.isNotEmpty(zoneWideStoragePools)) { + if (CollectionUtils.isNotEmpty(zoneWideStoragePools)) { storagePools.addAll(zoneWideStoragePools); } List clusterAndLocalStoragePools = _poolDao.listBy(srcVolumePool.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null); - if(CollectionUtils.isNotEmpty(clusterAndLocalStoragePools)) { + if (CollectionUtils.isNotEmpty(clusterAndLocalStoragePools)) { storagePools.addAll(clusterAndLocalStoragePools); } return storagePools; @@ -1462,9 +1453,8 @@ private List findAllSuitableStoragePoolsForVm(final VolumeVO volume return suitablePools; } - - private Pair, Integer> searchForServers(final Long startIndex, final Long pageSize, final Object name, final Object type, final Object state, final Object zone, final Object pod, final Object cluster, - final Object id, final Object keyword, final Object resourceState, final Object haHosts, final Object hypervisorType, final Object hypervisorVersion) { + private Pair, Integer> searchForServers(final Long startIndex, final Long pageSize, final Object name, final Object type, final Object state, final Object zone, final Object pod, + final Object cluster, final Object id, final Object keyword, final Object resourceState, final Object haHosts, final Object hypervisorType, final Object hypervisorVersion) { final Filter searchFilter = new Filter(HostVO.class, "id", Boolean.TRUE, startIndex, pageSize); final SearchBuilder sb = _hostDao.createSearchBuilder(); @@ -1857,10 +1847,9 @@ public Pair, Integer> searchForIPAddresses(final ListP if (isAllocated) { final Account caller = getCaller(); - final Ternary domainIdRecursiveListProject = new Ternary( - cmd.getDomainId(), cmd.isRecursive(), null); - _accountMgr.buildACLSearchParameters(caller, cmd.getId(), cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, - domainIdRecursiveListProject, cmd.listAll(), false); + final Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), + null); + _accountMgr.buildACLSearchParameters(caller, cmd.getId(), cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); domainId = domainIdRecursiveListProject.first(); isRecursive = domainIdRecursiveListProject.second(); listProjectResourcesCriteria = domainIdRecursiveListProject.third(); @@ -2104,8 +2093,7 @@ public GuestOSHypervisor addGuestOsMapping(final AddGuestOsMappingCmd cmd) { //by this point either osTypeId or osStdType is non-empty. Find by either of them. ID takes preference if both are specified if (osTypeId != null) { guestOs = ApiDBUtils.findGuestOSById(osTypeId); - } - else if (osStdName != null) { + } else if (osStdName != null) { guestOs = ApiDBUtils.findGuestOSByDisplayName(osStdName); } @@ -2116,8 +2104,8 @@ else if (osStdName != null) { final GuestOSHypervisorVO duplicate = _guestOSHypervisorDao.findByOsIdAndHypervisorAndUserDefined(guestOs.getId(), hypervisorType.toString(), hypervisorVersion, true); if (duplicate != null) { - throw new InvalidParameterValueException("Mapping from hypervisor : " + hypervisorType.toString() + ", version : " + hypervisorVersion + " and guest OS : " - + guestOs.getDisplayName() + " already exists!"); + throw new InvalidParameterValueException( + "Mapping from hypervisor : " + hypervisorType.toString() + ", version : " + hypervisorVersion + " and guest OS : " + guestOs.getDisplayName() + " already exists!"); } final GuestOSHypervisorVO guestOsMapping = new GuestOSHypervisorVO(); guestOsMapping.setGuestOsId(guestOs.getId()); @@ -2161,10 +2149,10 @@ public GuestOS addGuestOs(final AddGuestOsCmd cmd) { guestOsVo.setIsUserDefined(true); final GuestOS guestOsPersisted = _guestOSDao.persist(guestOsVo); - if(cmd.getDetails() != null && !cmd.getDetails().isEmpty()){ + if (cmd.getDetails() != null && !cmd.getDetails().isEmpty()) { Map detailsMap = cmd.getDetails(); - for(Object key: detailsMap.keySet()){ - _guestOsDetailsDao.addDetail(guestOsPersisted.getId(),(String) key,detailsMap.get(key), false); + for (Object key : detailsMap.keySet()) { + _guestOsDetailsDao.addDetail(guestOsPersisted.getId(), (String)key, detailsMap.get(key), false); } } @@ -2194,10 +2182,10 @@ public GuestOS updateGuestOs(final UpdateGuestOsCmd cmd) { throw new InvalidParameterValueException("Unable to modify system defined guest OS"); } - if(cmd.getDetails() != null && !cmd.getDetails().isEmpty()){ + if (cmd.getDetails() != null && !cmd.getDetails().isEmpty()) { Map detailsMap = cmd.getDetails(); - for(Object key: detailsMap.keySet()){ - _guestOsDetailsDao.addDetail(id,(String) key,detailsMap.get(key), false); + for (Object key : detailsMap.keySet()) { + _guestOsDetailsDao.addDetail(id, (String)key, detailsMap.get(key), false); } } @@ -2208,7 +2196,7 @@ public GuestOS updateGuestOs(final UpdateGuestOsCmd cmd) { //Check if another Guest OS by same name exists final GuestOS duplicate = ApiDBUtils.findGuestOSByDisplayName(displayName); - if(duplicate != null) { + if (duplicate != null) { throw new InvalidParameterValueException("The specified Guest OS name : " + displayName + " already exists. Please specify a unique guest OS name"); } final GuestOSVO guestOs = _guestOSDao.createForUpdate(id); @@ -2344,25 +2332,6 @@ public Pair getVncPort(final VirtualMachine vm) { return new Pair(null, -1); } - private String getUpdatedDomainPath(final String oldPath, final String newName) { - final String[] tokenizedPath = oldPath.split("/"); - tokenizedPath[tokenizedPath.length - 1] = newName; - final StringBuilder finalPath = new StringBuilder(); - for (final String token : tokenizedPath) { - finalPath.append(token); - finalPath.append("/"); - } - return finalPath.toString(); - } - - private void updateDomainChildren(final DomainVO domain, final String updatedDomainPrefix) { - final List domainChildren = _domainDao.findAllChildren(domain.getPath(), domain.getId()); - // for each child, update the path - for (final DomainVO dom : domainChildren) { - dom.setPath(dom.getPath().replaceFirst(domain.getPath(), updatedDomainPrefix)); - _domainDao.update(dom.getId(), dom); - } - } @Override public Pair, Integer> searchForAlerts(final ListAlertsCmd cmd) { @@ -2498,23 +2467,23 @@ List getStorageUsed(Long clusterId, Long podId, Long zoneId, Int if (zone == null || zone.getAllocationState() == AllocationState.Disabled) { return null; } - List capacities=new ArrayList(); + List capacities = new ArrayList(); capacities.add(_storageMgr.getSecondaryStorageUsedStats(null, zoneId)); - capacities.add(_storageMgr.getStoragePoolUsedStats(null,clusterId, podId, zoneId)); + capacities.add(_storageMgr.getStoragePoolUsedStats(null, clusterId, podId, zoneId)); for (CapacityVO capacity : capacities) { if (capacity.getTotalCapacity() != 0) { capacity.setUsedPercentage((float)capacity.getUsedCapacity() / capacity.getTotalCapacity()); } else { capacity.setUsedPercentage(0); } - final SummedCapacity summedCapacity = new SummedCapacity(capacity.getUsedCapacity(), capacity.getTotalCapacity(), capacity.getUsedPercentage(), - capacity.getCapacityType(), capacity.getDataCenterId(), capacity.getPodId(), capacity.getClusterId()); + final SummedCapacity summedCapacity = new SummedCapacity(capacity.getUsedCapacity(), capacity.getTotalCapacity(), capacity.getUsedPercentage(), capacity.getCapacityType(), + capacity.getDataCenterId(), capacity.getPodId(), capacity.getClusterId()); list.add(summedCapacity); } } else { List dcList = _dcDao.listEnabledZones(); for (DataCenterVO dc : dcList) { - List capacities=new ArrayList(); + List capacities = new ArrayList(); capacities.add(_storageMgr.getSecondaryStorageUsedStats(null, dc.getId())); capacities.add(_storageMgr.getStoragePoolUsedStats(null, null, null, dc.getId())); for (CapacityVO capacity : capacities) { @@ -2523,8 +2492,8 @@ List getStorageUsed(Long clusterId, Long podId, Long zoneId, Int } else { capacity.setUsedPercentage(0); } - SummedCapacity summedCapacity = new SummedCapacity(capacity.getUsedCapacity(), capacity.getTotalCapacity(), capacity.getUsedPercentage(), - capacity.getCapacityType(), capacity.getDataCenterId(), capacity.getPodId(), capacity.getClusterId()); + SummedCapacity summedCapacity = new SummedCapacity(capacity.getUsedCapacity(), capacity.getTotalCapacity(), capacity.getUsedPercentage(), capacity.getCapacityType(), + capacity.getDataCenterId(), capacity.getPodId(), capacity.getClusterId()); list.add(summedCapacity); } }// End of for @@ -2552,8 +2521,8 @@ public List listCapacities(final ListCapacityCmd cmd) { final List capacities = new ArrayList(); for (final SummedCapacity summedCapacity : summedCapacities) { - final CapacityVO capacity = new CapacityVO(null, summedCapacity.getDataCenterId(),summedCapacity.getPodId(), summedCapacity.getClusterId(), summedCapacity.getUsedCapacity() - + summedCapacity.getReservedCapacity(), summedCapacity.getTotalCapacity(), summedCapacity.getCapacityType()); + final CapacityVO capacity = new CapacityVO(null, summedCapacity.getDataCenterId(), summedCapacity.getPodId(), summedCapacity.getClusterId(), + summedCapacity.getUsedCapacity() + summedCapacity.getReservedCapacity(), summedCapacity.getTotalCapacity(), summedCapacity.getCapacityType()); capacity.setAllocatedCapacity(summedCapacity.getAllocatedCapacity()); capacities.add(capacity); } @@ -3164,8 +3133,8 @@ private SecondaryStorageVmVO startSecondaryStorageVm(final long instanceId) { return _secStorageVmMgr.startSecStorageVm(instanceId); } - private SecondaryStorageVmVO stopSecondaryStorageVm(final VMInstanceVO systemVm, final boolean isForced) throws ResourceUnavailableException, OperationTimedoutException, - ConcurrentOperationException { + private SecondaryStorageVmVO stopSecondaryStorageVm(final VMInstanceVO systemVm, final boolean isForced) + throws ResourceUnavailableException, OperationTimedoutException, ConcurrentOperationException { _itMgr.advanceStop(systemVm.getUuid(), isForced); return _secStorageVmDao.findById(systemVm.getId()); @@ -3567,9 +3536,9 @@ public String uploadCertificate(final UploadCustomCertificateCmd cmd) { } _consoleProxyMgr.setManagementState(ConsoleProxyManagementState.ResetSuspending); - return "Certificate has been successfully updated, if its the server certificate we would reboot all " + - "running console proxy VMs and secondary storage VMs to propagate the new certificate, " + - "please give a few minutes for console access and storage services service to be up and working again"; + return "Certificate has been successfully updated, if its the server certificate we would reboot all " + + "running console proxy VMs and secondary storage VMs to propagate the new certificate, " + + "please give a few minutes for console access and storage services service to be up and working again"; } @Override @@ -3652,8 +3621,8 @@ public boolean deleteSSHKeyPair(final DeleteSSHKeyPairCmd cmd) { final SSHKeyPairVO s = _sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), cmd.getName()); if (s == null) { - final InvalidParameterValueException ex = new InvalidParameterValueException("A key pair with name '" + cmd.getName() + "' does not exist for account " - + owner.getAccountName() + " in specified domain id"); + final InvalidParameterValueException ex = new InvalidParameterValueException( + "A key pair with name '" + cmd.getName() + "' does not exist for account " + owner.getAccountName() + " in specified domain id"); final DomainVO domain = ApiDBUtils.findDomainById(owner.getDomainId()); String domainUuid = String.valueOf(owner.getDomainId()); if (domain != null) { @@ -3674,10 +3643,8 @@ public Pair, Integer> listSSHKeyPairs(final ListSSHKe final Account caller = getCaller(); final List permittedAccounts = new ArrayList(); - final Ternary domainIdRecursiveListProject = new Ternary( - cmd.getDomainId(), cmd.isRecursive(), null); - _accountMgr.buildACLSearchParameters(caller, null, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, - cmd.listAll(), false); + final Ternary domainIdRecursiveListProject = new Ternary(cmd.getDomainId(), cmd.isRecursive(), null); + _accountMgr.buildACLSearchParameters(caller, null, cmd.getAccountName(), cmd.getProjectId(), permittedAccounts, domainIdRecursiveListProject, cmd.listAll(), false); final Long domainId = domainIdRecursiveListProject.first(); final Boolean isRecursive = domainIdRecursiveListProject.second(); final ListProjectResourcesCriteria listProjectResourcesCriteria = domainIdRecursiveListProject.third(); @@ -3814,8 +3781,8 @@ public String getVMPassword(final GetVMPasswordCmd cmd) { _userVmDao.loadDetails(vm); final String password = vm.getDetail("Encrypted.Password"); if (password == null || password.equals("")) { - final InvalidParameterValueException ex = new InvalidParameterValueException("No password for VM with specified id found. " - + "If VM is created from password enabled template and SSH keypair is assigned to VM then only password can be retrieved."); + final InvalidParameterValueException ex = new InvalidParameterValueException( + "No password for VM with specified id found. " + "If VM is created from password enabled template and SSH keypair is assigned to VM then only password can be retrieved."); ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } @@ -3986,8 +3953,7 @@ public HypervisorCapabilities updateHypervisorCapabilities(final Long id, final @Override @ActionEvent(eventType = EventTypes.EVENT_VM_UPGRADE, eventDescription = "Upgrading system VM", async = true) - public VirtualMachine upgradeSystemVM(final ScaleSystemVMCmd cmd) throws ResourceUnavailableException, ManagementServerException, VirtualMachineMigrationException, - ConcurrentOperationException { + public VirtualMachine upgradeSystemVM(final ScaleSystemVMCmd cmd) throws ResourceUnavailableException, ManagementServerException, VirtualMachineMigrationException, ConcurrentOperationException { final VMInstanceVO vmInstance = _vmInstanceDao.findById(cmd.getId()); if (vmInstance.getHypervisorType() == HypervisorType.XenServer && vmInstance.getState().equals(State.Running)) { @@ -4052,7 +4018,7 @@ private void enableAdminUser(final String password) { String encodedPassword = null; final UserVO adminUser = _userDao.getUser(2); - if (adminUser == null) { + if (adminUser == null) { final String msg = "CANNOT find admin user"; s_logger.error(msg); throw new CloudRuntimeException(msg); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 724ca8a47459..a179f8d65c98 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -40,10 +40,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; @@ -64,11 +60,13 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; @@ -82,8 +80,8 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; @@ -98,6 +96,8 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -476,8 +476,8 @@ public boolean configure(String name, Map params) throws Configu _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _storagePoolDao, _dataStoreProviderMgr), true, false, true); - s_logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value() + - ", template cleanup enabled: " + TemplateCleanupEnabled.value()); + s_logger.info("Storage cleanup enabled: " + StorageCleanupEnabled.value() + ", interval: " + StorageCleanupInterval.value() + ", delay: " + StorageCleanupDelay.value() + + ", template cleanup enabled: " + TemplateCleanupEnabled.value()); String cleanupInterval = configs.get("extract.url.cleanup.interval"); _downloadUrlCleanupInterval = NumbersUtil.parseInt(cleanupInterval, 7200); @@ -586,8 +586,7 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con } if (pool == null) { //the path can be different, but if they have the same uuid, assume they are the same storage - pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null, - pInfo.getUuid()); + pool = _storagePoolDao.findPoolByHostPath(host.getDataCenterId(), host.getPodId(), hostAddress, null, pInfo.getUuid()); if (pool != null) { s_logger.debug("Found a storage pool: " + pInfo.getUuid() + ", but with different hostpath " + pInfo.getHostPath() + ", still treat it as the same pool"); } @@ -597,7 +596,7 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); if (pool == null) { Map params = new HashMap(); - String name = (host.getName() + " Local Storage"); + String name = createLocalStoragePoolName(host, pInfo); params.put("zoneId", host.getDataCenterId()); params.put("clusterId", host.getClusterId()); params.put("podId", host.getPodId()); @@ -627,9 +626,16 @@ public DataStore createLocalStorage(Host host, StoragePoolInfo pInfo) throws Con return _dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); } + /** + * Creates the local storage pool name. + * The name will follow the pattern: -local- + */ + protected String createLocalStoragePoolName(Host host, StoragePoolInfo storagePoolInformation) { + return String.format("%s-%s-%s", org.apache.commons.lang3.StringUtils.trim(host.getName()), "local", storagePoolInformation.getUuid().split("-")[0]); + } + @Override - public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, - ResourceUnavailableException { + public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws ResourceInUseException, IllegalArgumentException, UnknownHostException, ResourceUnavailableException { String providerName = cmd.getStorageProviderName(); DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName); @@ -675,7 +681,8 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource } else { throw new InvalidParameterValueException("Missing parameter hypervisor. Hypervisor type is required to create zone wide primary storage."); } - if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Hyperv && hypervisorType != HypervisorType.LXC && hypervisorType != HypervisorType.Any) { + if (hypervisorType != HypervisorType.KVM && hypervisorType != HypervisorType.VMware && hypervisorType != HypervisorType.Hyperv && hypervisorType != HypervisorType.LXC + && hypervisorType != HypervisorType.Any) { throw new InvalidParameterValueException("zone wide storage pool is not supported for hypervisor type " + hypervisor); } } @@ -716,7 +723,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource lifeCycle.attachZone(store, zoneScope, hypervisorType); } } catch (Exception e) { - s_logger.debug("Failed to add data store: "+e.getMessage(), e); + s_logger.debug("Failed to add data store: " + e.getMessage(), e); try { // clean up the db, just absorb the exception thrown in deletion with error logged, so that user can get error for adding data store // not deleting data store. @@ -726,7 +733,7 @@ public PrimaryDataStoreInfo createPool(CreateStoragePoolCmd cmd) throws Resource } catch (Exception ex) { s_logger.debug("Failed to clean up storage pool: " + ex.getMessage()); } - throw new CloudRuntimeException("Failed to add data store: "+e.getMessage(), e); + throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); @@ -752,8 +759,7 @@ private Map extractApiParamAsMap(Map ds) { @ActionEvent(eventType = EventTypes.EVENT_DISABLE_PRIMARY_STORAGE, eventDescription = "disable storage pool") private void disablePrimaryStoragePool(StoragePoolVO primaryStorage) { if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be disabled. Storage pool state : " + - primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be disabled. Storage pool state : " + primaryStorage.getStatus().toString()); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -765,8 +771,7 @@ private void disablePrimaryStoragePool(StoragePoolVO primaryStorage) { @ActionEvent(eventType = EventTypes.EVENT_ENABLE_PRIMARY_STORAGE, eventDescription = "enable storage pool") private void enablePrimaryStoragePool(StoragePoolVO primaryStorage) { if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be enabled. Storage pool state : " + - primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be enabled. Storage pool state : " + primaryStorage.getStatus().toString()); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -863,8 +868,7 @@ public void removeStoragePoolFromCluster(long hostId, String iScsiName, StorageP final Answer answer = _agentMgr.easySend(hostId, cmd); if (answer == null || !answer.getResult()) { - String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + - (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); s_logger.error(errMsg); @@ -969,8 +973,7 @@ public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, l // All this is for the inaccuracy of floats for big number multiplication. BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getId()); totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue(); - s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " - + overProvFactor.toString()); + s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + storagePool.getCapacityBytes()); } else { s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString()); @@ -996,19 +999,18 @@ public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, l if (storagePool.getScope() == ScopeType.HOST) { List stoargePoolHostVO = _storagePoolHostDao.listByPoolId(storagePool.getId()); - if(stoargePoolHostVO != null && !stoargePoolHostVO.isEmpty()){ + if (stoargePoolHostVO != null && !stoargePoolHostVO.isEmpty()) { HostVO host = _hostDao.findById(stoargePoolHostVO.get(0).getHostId()); - if(host != null){ + if (host != null) { capacityState = (host.getResourceState() == ResourceState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; } } } if (capacities.size() == 0) { - CapacityVO capacity = - new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(), allocated, totalOverProvCapacity, - capacityType); + CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(), allocated, totalOverProvCapacity, + capacityType); capacity.setCapacityState(capacityState); _capacityDao.persist(capacity); } else { @@ -1020,8 +1022,8 @@ public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, l _capacityDao.update(capacity.getId(), capacity); } } - s_logger.debug("Successfully set Capacity - " + totalOverProvCapacity + " for capacity type - " + capacityType + " , DataCenterId - " + - storagePool.getDataCenterId() + ", HostOrPoolId - " + storagePool.getId() + ", PodId " + storagePool.getPodId()); + s_logger.debug("Successfully set Capacity - " + totalOverProvCapacity + " for capacity type - " + capacityType + " , DataCenterId - " + storagePool.getDataCenterId() + ", HostOrPoolId - " + + storagePool.getId() + ", PodId " + storagePool.getPodId()); } @Override @@ -1049,8 +1051,7 @@ public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirs hostIds.removeAll(hostIdsToAvoid); } if (hostIds == null || hostIds.isEmpty()) { - throw new StorageUnavailableException("Unable to send command to the pool " + pool.getId() + " due to there is no enabled hosts up in this cluster", - pool.getId()); + throw new StorageUnavailableException("Unable to send command to the pool " + pool.getId() + " due to there is no enabled hosts up in this cluster", pool.getId()); } for (Long hostId : hostIds) { try { @@ -1092,20 +1093,19 @@ public void cleanupStorage(boolean recurring) { try { List unusedTemplatesInPool = _tmpltMgr.getUnusedTemplatesInPool(pool); - s_logger.debug("Storage pool garbage collector found " + unusedTemplatesInPool.size() + " templates to clean up in storage pool: " + - pool.getName()); + s_logger.debug("Storage pool garbage collector found " + unusedTemplatesInPool.size() + " templates to clean up in storage pool: " + pool.getName()); for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) { if (templatePoolVO.getDownloadState() != VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { - s_logger.debug("Storage pool garbage collector is skipping template with ID: " + templatePoolVO.getTemplateId() + - " on pool " + templatePoolVO.getPoolId() + " because it is not completely downloaded."); + s_logger.debug("Storage pool garbage collector is skipping template with ID: " + templatePoolVO.getTemplateId() + " on pool " + templatePoolVO.getPoolId() + + " because it is not completely downloaded."); continue; } if (!templatePoolVO.getMarkedForGC()) { templatePoolVO.setMarkedForGC(true); _vmTemplatePoolDao.update(templatePoolVO.getId(), templatePoolVO); - s_logger.debug("Storage pool garbage collector has marked template with ID: " + templatePoolVO.getTemplateId() + - " on pool " + templatePoolVO.getPoolId() + " for garbage collection."); + s_logger.debug("Storage pool garbage collector has marked template with ID: " + templatePoolVO.getTemplateId() + " on pool " + templatePoolVO.getPoolId() + + " for garbage collection."); continue; } @@ -1118,8 +1118,8 @@ public void cleanupStorage(boolean recurring) { } //destroy snapshots in destroying state in snapshot_store_ref - List ssSnapshots = _snapshotStoreDao.listByState(ObjectInDataStoreStateMachine.State.Destroying); - for(SnapshotDataStoreVO ssSnapshotVO : ssSnapshots){ + List ssSnapshots = _snapshotStoreDao.listByState(ObjectInDataStoreStateMachine.State.Destroying); + for (SnapshotDataStoreVO ssSnapshotVO : ssSnapshots) { try { _snapshotService.deleteSnapshot(snapshotFactory.getSnapshot(ssSnapshotVO.getSnapshotId(), DataStoreRole.Image)); } catch (Exception e) { @@ -1129,7 +1129,7 @@ public void cleanupStorage(boolean recurring) { cleanupSecondaryStorage(recurring); - List vols = _volsDao.listVolumesToBeDestroyed(new Date(System.currentTimeMillis() - ((long) StorageCleanupDelay.value() << 10))); + List vols = _volsDao.listVolumesToBeDestroyed(new Date(System.currentTimeMillis() - ((long)StorageCleanupDelay.value() << 10))); for (VolumeVO vol : vols) { try { // If this fails, just log a warning. It's ideal if we clean up the host-side clustered file @@ -1290,8 +1290,7 @@ private void handleManagedStorage(Volume volume) { if (cluster.getHypervisorType() == HypervisorType.KVM) { volService.revokeAccess(volumeInfo, host, volumeInfo.getDataStore()); - } - else { + } else { DataTO volTO = volFactory.getVolume(volume.getId()).getTO(); DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType()); @@ -1371,8 +1370,7 @@ public void cleanupSecondaryStorage(boolean recurring) { try { long storeId = store.getId(); List destroyedTemplateStoreVOs = _templateStoreDao.listDestroyed(storeId); - s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + - " templates to cleanup on template_store_ref for store: " + store.getName()); + s_logger.debug("Secondary storage garbage collector found " + destroyedTemplateStoreVOs.size() + " templates to cleanup on template_store_ref for store: " + store.getName()); for (TemplateDataStoreVO destroyedTemplateStoreVO : destroyedTemplateStoreVOs) { if (s_logger.isDebugEnabled()) { s_logger.debug("Deleting template store DB entry: " + destroyedTemplateStoreVO); @@ -1388,8 +1386,7 @@ public void cleanupSecondaryStorage(boolean recurring) { for (DataStore store : imageStores) { try { List destroyedSnapshotStoreVOs = _snapshotStoreDao.listDestroyed(store.getId()); - s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + - " snapshots to cleanup on snapshot_store_ref for store: " + store.getName()); + s_logger.debug("Secondary storage garbage collector found " + destroyedSnapshotStoreVOs.size() + " snapshots to cleanup on snapshot_store_ref for store: " + store.getName()); for (SnapshotDataStoreVO destroyedSnapshotStoreVO : destroyedSnapshotStoreVOs) { // check if this snapshot has child SnapshotInfo snap = snapshotFactory.getSnapshot(destroyedSnapshotStoreVO.getSnapshotId(), store); @@ -1420,8 +1417,7 @@ public void cleanupSecondaryStorage(boolean recurring) { for (DataStore store : imageStores) { try { List destroyedStoreVOs = _volumeStoreDao.listDestroyed(store.getId()); - s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + - store.getName()); + s_logger.debug("Secondary storage garbage collector found " + destroyedStoreVOs.size() + " volumes to cleanup on volume_store_ref for store: " + store.getName()); for (VolumeDataStoreVO destroyedStoreVO : destroyedStoreVOs) { if (s_logger.isDebugEnabled()) { s_logger.debug("Deleting volume store DB entry: " + destroyedStoreVO); @@ -1463,8 +1459,7 @@ public PrimaryDataStoreInfo preparePrimaryStorageForMaintenance(Long primaryStor } if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up) && !primaryStorage.getStatus().equals(StoragePoolStatus.ErrorInMaintenance)) { - throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:" + - primaryStorage.getStatus().toString()); + throw new InvalidParameterValueException("Primary storage with id " + primaryStorageId + " is not ready for migration, as the status is:" + primaryStorage.getStatus().toString()); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -1490,8 +1485,8 @@ public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(CancelPrimaryStor } if (primaryStorage.getStatus().equals(StoragePoolStatus.Up) || primaryStorage.getStatus().equals(StoragePoolStatus.PrepareForMaintenance)) { - throw new StorageUnavailableException("Primary storage with id " + primaryStorageId + " is not ready to complete migration, as the status is:" + - primaryStorage.getStatus().toString(), primaryStorageId); + throw new StorageUnavailableException("Primary storage with id " + primaryStorageId + " is not ready to complete migration, as the status is:" + primaryStorage.getStatus().toString(), + primaryStorageId); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); @@ -1502,7 +1497,6 @@ public PrimaryDataStoreInfo cancelPrimaryStorageForMaintenance(CancelPrimaryStor return (PrimaryDataStoreInfo)_dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); } - protected class StorageGarbageCollector extends ManagedContextRunnable { public StorageGarbageCollector() { @@ -1523,8 +1517,6 @@ protected void runInContext() { @Override public void onManagementNodeJoined(List nodeList, long selfNodeId) { - // TODO Auto-generated method stub - } @Override @@ -1537,9 +1529,8 @@ public void onManagementNodeLeft(List nodeList, for (Long poolId : poolIds) { StoragePoolVO pool = _storagePoolDao.findById(poolId); // check if pool is in an inconsistent state - if (pool != null && - (pool.getStatus().equals(StoragePoolStatus.ErrorInMaintenance) || pool.getStatus().equals(StoragePoolStatus.PrepareForMaintenance) || pool.getStatus() - .equals(StoragePoolStatus.CancelMaintenance))) { + if (pool != null && (pool.getStatus().equals(StoragePoolStatus.ErrorInMaintenance) || pool.getStatus().equals(StoragePoolStatus.PrepareForMaintenance) + || pool.getStatus().equals(StoragePoolStatus.CancelMaintenance))) { _storagePoolWorkDao.removePendingJobsOnMsRestart(vo.getMsid(), poolId); pool.setStatus(StoragePoolStatus.ErrorInMaintenance); _storagePoolDao.update(poolId, pool); @@ -1739,13 +1730,13 @@ private boolean checkUsagedSpace(StoragePool pool) { if (stats != null) { double usedPercentage = ((double)stats.getByteUsed() / (double)totalSize); if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + - ", usedPct: " + usedPercentage + ", disable threshold: " + storageUsedThreshold); + s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + ", usedPct: " + usedPercentage + + ", disable threshold: " + storageUsedThreshold); } if (usedPercentage >= storageUsedThreshold) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + - " has crossed the pool.storage.capacity.disablethreshold: " + storageUsedThreshold); + s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " + usedPercentage + " has crossed the pool.storage.capacity.disablethreshold: " + + storageUsedThreshold); } return false; } @@ -1803,7 +1794,7 @@ public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool, } // allocated space includes templates - if(s_logger.isDebugEnabled()) { + if (s_logger.isDebugEnabled()) { s_logger.debug("Destination pool id: " + pool.getId()); } StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId()); @@ -1860,8 +1851,7 @@ public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool, if (pool.getPoolType().supportsOverProvisioning()) { BigDecimal overProvFactor = getStorageOverProvisioningFactor(pool.getId()); totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue(); - s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " - + overProvFactor.toString()); + s_logger.debug("Found storage pool " + poolVO.getName() + " of type " + pool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + pool.getCapacityBytes()); } else { totalOverProvCapacity = pool.getCapacityBytes(); @@ -1871,26 +1861,23 @@ public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool, s_logger.debug("Total capacity of the pool " + poolVO.getName() + " id: " + pool.getId() + " is " + totalOverProvCapacity); double storageAllocatedThreshold = CapacityManager.StorageAllocatedCapacityDisableThreshold.valueIn(pool.getDataCenterId()); if (s_logger.isDebugEnabled()) { - s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + - ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + - storageAllocatedThreshold); + s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + storageAllocatedThreshold); } double usedPercentage = (allocatedSizeWithTemplate + totalAskingSize) / (double)(totalOverProvCapacity); if (usedPercentage > storageAllocatedThreshold) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + - " since its allocated percentage: " + usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + - storageAllocatedThreshold + ", skipping this pool"); + s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + " since its allocated percentage: " + usedPercentage + + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + storageAllocatedThreshold + ", skipping this pool"); } return false; } if (totalOverProvCapacity < (allocatedSizeWithTemplate + totalAskingSize)) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + - ", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + - totalAskingSize); + s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + ", not enough storage, maxSize : " + totalOverProvCapacity + + ", totalAllocatedSize : " + allocatedSizeWithTemplate + ", askingSize : " + totalAskingSize); } return false; } @@ -1915,21 +1902,6 @@ private HypervisorType getHypervisorType(Volume volume) { return null; } - private long getDataObjectSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) { - DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); - DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); - - if (storeDriver instanceof PrimaryDataStoreDriver) { - PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; - - VolumeInfo volumeInfo = volFactory.getVolume(volume.getId()); - - return primaryStoreDriver.getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, pool); - } - - return volume.getSize(); - } - private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); @@ -1976,8 +1948,7 @@ public String getName() { } @Override - public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException, - InvalidParameterValueException { + public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName); if (storeProvider == null) { @@ -2025,16 +1996,14 @@ public ImageStore discoverImageStore(String name, String url, String providerNam } Account account = CallContext.current().getCallingAccount(); - if (Grouping.AllocationState.Disabled == zone.getAllocationState() - && !_accountMgr.isRootAdmin(account.getId())) { - PermissionDeniedException ex = new PermissionDeniedException( - "Cannot perform this operation, Zone with specified id is currently disabled"); + if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) { + PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, Zone with specified id is currently disabled"); ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } } - Map params = new HashMap(); + Map params = new HashMap<>(); params.put("zoneId", zoneId); params.put("url", url); params.put("name", name); @@ -2049,7 +2018,7 @@ public ImageStore discoverImageStore(String name, String url, String providerNam try { store = lifeCycle.initialize(params); } catch (Exception e) { - if(s_logger.isDebugEnabled()) { + if (s_logger.isDebugEnabled()) { s_logger.debug("Failed to add data store: " + e.getMessage(), e); } throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); @@ -2075,8 +2044,7 @@ public ImageStore discoverImageStore(String name, String url, String providerNam } @Override - public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws IllegalArgumentException, DiscoveryException, - InvalidParameterValueException { + public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { // check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages List imgStores = _imageStoreDao.listImageStores(); List nfsStores = new ArrayList(); @@ -2230,8 +2198,7 @@ public ImageStore createSecondaryStagingStore(CreateSecondaryStagingStoreCmd cmd Account account = CallContext.current().getCallingAccount(); if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(account.getId())) { - PermissionDeniedException ex = new PermissionDeniedException( - "Cannot perform this operation, Zone with specified id is currently disabled"); + PermissionDeniedException ex = new PermissionDeniedException("Cannot perform this operation, Zone with specified id is currently disabled"); ex.addProxyObject(zone.getUuid(), "dcId"); throw ex; } @@ -2250,8 +2217,8 @@ public ImageStore createSecondaryStagingStore(CreateSecondaryStagingStoreCmd cmd try { store = lifeCycle.initialize(params); } catch (Exception e) { - s_logger.debug("Failed to add data store: "+e.getMessage(), e); - throw new CloudRuntimeException("Failed to add data store: "+e.getMessage(), e); + s_logger.debug("Failed to add data store: " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to add data store: " + e.getMessage(), e); } return (ImageStore)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.ImageCache); @@ -2320,18 +2287,18 @@ public void run() { } @Override - public void cleanupDownloadUrls(){ + public void cleanupDownloadUrls() { // Cleanup expired volume URLs List volumesOnImageStoreList = _volumeStoreDao.listVolumeDownloadUrls(); HashSet expiredVolumeIds = new HashSet(); HashSet activeVolumeIds = new HashSet(); - for(VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList){ + for (VolumeDataStoreVO volumeOnImageStore : volumesOnImageStoreList) { long volumeId = volumeOnImageStore.getVolumeId(); try { long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), volumeOnImageStore.getExtractUrlCreated()); - if(downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval){ // URL hasnt expired yet + if (downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval) { // URL hasnt expired yet activeVolumeIds.add(volumeId); continue; } @@ -2339,19 +2306,17 @@ public void cleanupDownloadUrls(){ s_logger.debug("Removing download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeId); // Remove it from image store - ImageStoreEntity secStore = (ImageStoreEntity) _dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image); + ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(volumeOnImageStore.getDataStoreId(), DataStoreRole.Image); secStore.deleteExtractUrl(volumeOnImageStore.getInstallPath(), volumeOnImageStore.getExtractUrl(), Upload.Type.VOLUME); - // Now expunge it from DB since this entry was created only for download purpose + // Now expunge it from DB since this entry was created only for download purpose _volumeStoreDao.expunge(volumeOnImageStore.getId()); - }catch(Throwable th){ - s_logger.warn("Caught exception while deleting download url " +volumeOnImageStore.getExtractUrl() + - " for volume id " + volumeOnImageStore.getVolumeId(), th); + } catch (Throwable th) { + s_logger.warn("Caught exception while deleting download url " + volumeOnImageStore.getExtractUrl() + " for volume id " + volumeOnImageStore.getVolumeId(), th); } } - for(Long volumeId : expiredVolumeIds) - { - if(activeVolumeIds.contains(volumeId)) { + for (Long volumeId : expiredVolumeIds) { + if (activeVolumeIds.contains(volumeId)) { continue; } Volume volume = _volumeDao.findById(volumeId); @@ -2362,27 +2327,26 @@ public void cleanupDownloadUrls(){ // Cleanup expired template URLs List templatesOnImageStoreList = _templateStoreDao.listTemplateDownloadUrls(); - for(TemplateDataStoreVO templateOnImageStore : templatesOnImageStoreList){ + for (TemplateDataStoreVO templateOnImageStore : templatesOnImageStoreList) { try { long downloadUrlCurrentAgeInSecs = DateUtil.getTimeDifference(DateUtil.now(), templateOnImageStore.getExtractUrlCreated()); - if(downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval){ // URL hasnt expired yet + if (downloadUrlCurrentAgeInSecs < _downloadUrlExpirationInterval) { // URL hasnt expired yet continue; } s_logger.debug("Removing download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId()); // Remove it from image store - ImageStoreEntity secStore = (ImageStoreEntity) _dataStoreMgr.getDataStore(templateOnImageStore.getDataStoreId(), DataStoreRole.Image); + ImageStoreEntity secStore = (ImageStoreEntity)_dataStoreMgr.getDataStore(templateOnImageStore.getDataStoreId(), DataStoreRole.Image); secStore.deleteExtractUrl(templateOnImageStore.getInstallPath(), templateOnImageStore.getExtractUrl(), Upload.Type.TEMPLATE); // Now remove download details from DB. templateOnImageStore.setExtractUrl(null); templateOnImageStore.setExtractUrlCreated(null); _templateStoreDao.update(templateOnImageStore.getId(), templateOnImageStore); - }catch(Throwable th){ - s_logger.warn("caught exception while deleting download url " +templateOnImageStore.getExtractUrl() + - " for template id " +templateOnImageStore.getTemplateId(), th); + } catch (Throwable th) { + s_logger.warn("caught exception while deleting download url " + templateOnImageStore.getExtractUrl() + " for template id " + templateOnImageStore.getTemplateId(), th); } } } @@ -2473,7 +2437,7 @@ public void setDiskProfileThrottling(DiskProfile dskCh, final ServiceOffering of public DiskTO getDiskWithThrottling(final DataTO volTO, final Volume.Type volumeType, final long deviceId, final String path, final long offeringId, final long diskOfferingId) { DiskTO disk = null; if (volTO != null && volTO instanceof VolumeObjectTO) { - VolumeObjectTO volumeTO = (VolumeObjectTO) volTO; + VolumeObjectTO volumeTO = (VolumeObjectTO)volTO; ServiceOffering offering = _entityMgr.findById(ServiceOffering.class, offeringId); DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, diskOfferingId); if (volumeType == Volume.Type.ROOT) { diff --git a/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java new file mode 100644 index 000000000000..dc79ac512fa5 --- /dev/null +++ b/server/src/test/java/com/cloud/storage/StorageManagerImplTest.java @@ -0,0 +1,61 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.storage; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.runners.MockitoJUnitRunner; + +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.host.Host; + +@RunWith(MockitoJUnitRunner.class) +public class StorageManagerImplTest { + + @Spy + private StorageManagerImpl storageManagerImpl; + + @Test + public void createLocalStoragePoolName() { + String hostMockName = "host1"; + executeCreateLocalStoragePoolNameForHostName(hostMockName); + } + + @Test + public void createLocalStoragePoolNameUsingHostNameWithSpaces() { + String hostMockName = " hostNameWithSpaces "; + executeCreateLocalStoragePoolNameForHostName(hostMockName); + } + + private void executeCreateLocalStoragePoolNameForHostName(String hostMockName) { + String firstBlockUuid = "dsdsh665"; + + String expectedLocalStorageName = hostMockName.trim() + "-local-" + firstBlockUuid; + + Host hostMock = Mockito.mock(Host.class); + StoragePoolInfo storagePoolInfoMock = Mockito.mock(StoragePoolInfo.class); + + Mockito.when(hostMock.getName()).thenReturn(hostMockName); + Mockito.when(storagePoolInfoMock.getUuid()).thenReturn(firstBlockUuid + "-213151-df21ef333d-2d33f1"); + + String localStoragePoolName = storageManagerImpl.createLocalStoragePoolName(hostMock, storagePoolInfoMock); + Assert.assertEquals(expectedLocalStorageName, localStoragePoolName); + } +}