diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index 0b48a4867c00..d6252c084751 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -379,7 +379,7 @@ UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffe String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map dataDiskTemplateToDiskOfferingMap, - Map templateOvfPropertiesMap, boolean dynamicScalingEnabled) + Map templateOvfPropertiesMap, boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 0fff4efef4fc..233fde2ff1fe 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -837,11 +837,16 @@ public class ApiConstants { public static final String KUBERNETES_VERSION_ID = "kubernetesversionid"; public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname"; public static final String MASTER_NODES = "masternodes"; + public static final String NODE_IDS = "nodeids"; public static final String CONTROL_NODES = "controlnodes"; public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion"; public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; public static final String SUPPORTS_HA = "supportsha"; + public static final String SUPPORTS_AUTOSCALING = "supportsautoscaling"; + public static final String AUTOSCALING_ENABLED = "autoscalingenabled"; + public static final String MIN_SIZE = "minsize"; + public static final String MAX_SIZE = "maxsize"; public static final String BOOT_TYPE = "boottype"; public static final String BOOT_MODE = "bootmode"; diff --git a/debian/rules b/debian/rules index a19089a18165..ed1559a46dfe 100755 --- a/debian/rules +++ b/debian/rules @@ -66,17 +66,21 @@ override_dh_auto_install: mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/lib mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/setup + mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm mkdir $(DESTDIR)/var/log/$(PACKAGE)/management mkdir $(DESTDIR)/var/cache/$(PACKAGE)/management mkdir $(DESTDIR)/var/log/$(PACKAGE)/ipallocator mkdir $(DESTDIR)/var/lib/$(PACKAGE)/management mkdir $(DESTDIR)/var/lib/$(PACKAGE)/mnt + cp -r client/target/utilities/scripts/db/* $(DESTDIR)/usr/share/$(PACKAGE)-management/setup/ cp -r client/target/classes/META-INF/webapp $(DESTDIR)/usr/share/$(PACKAGE)-management/webapp cp server/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/ cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/ cp client/target/cloud-client-ui-$(VERSION).jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/cloudstack-$(VERSION).jar cp client/target/lib/*jar $(DESTDIR)/usr/share/$(PACKAGE)-management/lib/ + cp -r engine/schema/dist/systemvm-templates/* $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/ + rm -rf $(DESTDIR)/usr/share/$(PACKAGE)-management/templates/systemvm/md5sum.txt # nast hack for a couple of configuration files mv $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/cloudstack-limits.conf $(DESTDIR)/$(SYSCONFDIR)/security/limits.d/ diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineGuru.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineGuru.java index d6d123cd2434..7611df820c80 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineGuru.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineGuru.java @@ -20,6 +20,10 @@ import com.cloud.agent.manager.Commands; import com.cloud.deploy.DeployDestination; import com.cloud.exception.ResourceUnavailableException; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; + /** * A VirtualMachineGuru knows how to process a certain type of virtual machine. * @@ -60,4 +64,12 @@ public interface VirtualMachineGuru { void prepareStop(VirtualMachineProfile profile); void finalizeUnmanage(VirtualMachine vm); + + static String getEncodedMsPublicKey(String pubKey) { + String base64EncodedPublicKey = null; + if (pubKey != null) { + base64EncodedPublicKey = Base64.getEncoder().encodeToString(pubKey.getBytes(StandardCharsets.UTF_8)); + } + return base64EncodedPublicKey; + } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 854d5a2107d9..d085499ce4cd 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -412,6 +412,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac static final ConfigKey HaVmRestartHostUp = new ConfigKey("Advanced", Boolean.class, "ha.vm.restart.hostup", "true", "If an out-of-band stop of a VM is detected and its host is up, then power on the VM", true); + static final ConfigKey SystemVmRootDiskSize = new ConfigKey("Advanced", + Long.class, "systemvm.root.disk.size", "-1", + "Size of root volume (in GB) of system VMs and virtual routers", true); + ScheduledExecutorService _executor = null; private long _nodeId; @@ -460,6 +464,12 @@ public void allocate(final String vmInstanceName, final VirtualMachineTemplate t final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmFinal, template, serviceOffering, null, null); + Long rootDiskSize = rootDiskOfferingInfo.getSize(); + if (vm.getType().isUsedBySystem() && SystemVmRootDiskSize.value() != null && SystemVmRootDiskSize.value() > 0L) { + rootDiskSize = SystemVmRootDiskSize.value(); + } + final Long rootDiskSizeFinal = rootDiskSize; + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) throws InsufficientCapacityException { @@ -485,7 +495,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws } else if (template.getFormat() == ImageFormat.BAREMETAL) { // Do nothing } else { - volumeMgr.allocateTemplatedVolumes(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(), + volumeMgr.allocateTemplatedVolumes(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskSizeFinal, rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), template, vmFinal, owner); } @@ -1728,7 +1738,7 @@ protected boolean sendStop(final VirtualMachineGuru guru, final VirtualMachinePr final UserVmVO userVm = _userVmDao.findById(vm.getId()); if (vm.getType() == VirtualMachine.Type.User) { - if (userVm != null){ + if (userVm != null) { userVm.setPowerState(PowerState.PowerOff); _userVmDao.update(userVm.getId(), userVm); } @@ -4829,7 +4839,7 @@ public ConfigKey[] getConfigKeys() { return new ConfigKey[] { ClusterDeltaSyncInterval, StartRetry, VmDestroyForcestop, VmOpCancelInterval, VmOpCleanupInterval, VmOpCleanupWait, VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool, - HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel }; + HaVmRestartHostUp, ResourceCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, SystemVmRootDiskSize }; } public List getStoragePoolAllocators() { diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index f5daf7a6e22f..acd49a900a03 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -52,5 +52,175 @@ mysql mysql-connector-java + + org.ini4j + ini4j + ${cs.ini.version} + + + + + org.codehaus.gmaven + gmaven-plugin + 1.5 + + + setproperty + validate + + execute + + + + def projectVersion = project.version + String[] versionParts = projectVersion.tokenize('.') + pom.properties['cs.version'] = versionParts[0] + "." + versionParts[1] + pom.properties['patch.version'] = versionParts[2] + + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.3 + + + download-checksums + validate + + wget + + + https://download.cloudstack.org/systemvm/${cs.version}/md5sum.txt + ${basedir}/dist/systemvm-templates/ + true + true + + + + + + org.codehaus.gmaven + gmaven-plugin + 1.5 + + + set-properties + generate-sources + + execute + + + + def csVersion = pom.properties['cs.version'] + def patch = pom.properties['patch.version'] + def templateList = [] + templateList.add("systemvmtemplate-${csVersion}.${patch}-kvm") + templateList.add("systemvmtemplate-${csVersion}.${patch}-vmware") + templateList.add("systemvmtemplate-${csVersion}.${patch}-xen") + templateList.add("systemvmtemplate-${csVersion}.${patch}-ovm") + templateList.add("systemvmtemplate-${csVersion}.${patch}-hyperv") + File file = new File("./engine/schema/dist/systemvm-templates/md5sum.txt") + def lines = file.readLines() + for (template in templateList) { + def data = lines.findAll { it.contains(template) } + if (data != null) { + def hypervisor = template.tokenize('-')[-1] + pom.properties["$hypervisor" + ".checksum"] = data[0].tokenize(' ')[0] + } + } + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + systemvm-template-metadata + package + + exec + + + ${basedir}/ + bash + + templateConfig.sh + ${project.version} + + + + + + + + + + template-create + + + noredist + + + + + + org.apache.maven.plugins + maven-resources-plugin + ${cs.resources-plugin.version} + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.3 + + + download-kvm-template + + wget + + + true + https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-kvm.qcow2.bz2 + ${basedir}/dist/systemvm-templates/ + ${kvm.checksum} + + + + download-vmware-template + + wget + + + true + https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-vmware.ova + ${basedir}/dist/systemvm-templates/ + ${vmware.checksum} + + + + download-xenserver-template + + wget + + + true + https://download.cloudstack.org/systemvm/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-xen.vhd.bz2 + ${basedir}/dist/systemvm-templates/ + ${xen.checksum} + + + + + + + + diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java index de8d604fb4ce..ab9c5cab8c4a 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDao.java @@ -22,6 +22,7 @@ import java.util.List; import java.util.Map; +import java.util.Set; public interface ClusterDao extends GenericDao { List listByPodId(long podId); @@ -34,6 +35,8 @@ public interface ClusterDao extends GenericDao { List getAvailableHypervisorInZone(Long zoneId); + Set getDistictAvailableHypervisorsAcrossClusters(); + List listByDcHyType(long dcId, String hyType); Map> getPodClusterIdMap(List clusterIds); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java index b1fce6195ba5..4d9bedba9669 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterDaoImpl.java @@ -39,8 +39,10 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; @Component public class ClusterDaoImpl extends GenericDaoBase implements ClusterDao { @@ -51,6 +53,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C protected final SearchBuilder ZoneSearch; protected final SearchBuilder ZoneHyTypeSearch; protected final SearchBuilder ZoneClusterSearch; + protected final SearchBuilder ClusterSearch; protected GenericSearchBuilder ClusterIdSearch; @@ -97,6 +100,10 @@ public ClusterDaoImpl() { ClusterIdSearch.selectFields(ClusterIdSearch.entity().getId()); ClusterIdSearch.and("dataCenterId", ClusterIdSearch.entity().getDataCenterId(), Op.EQ); ClusterIdSearch.done(); + + ClusterSearch = createSearchBuilder(); + ClusterSearch.select(null, Func.DISTINCT, ClusterSearch.entity().getHypervisorType()); + ClusterIdSearch.done(); } @Override @@ -154,6 +161,17 @@ public List getAvailableHypervisorInZone(Long zoneId) { return hypers; } + @Override + public Set getDistictAvailableHypervisorsAcrossClusters() { + SearchCriteria sc = ClusterSearch.create(); + List clusters = listBy(sc); + Set hypers = new HashSet<>(); + for (ClusterVO cluster : clusters) { + hypers.add(cluster.getHypervisorType()); + } + return hypers; + } + @Override public Map> getPodClusterIdMap(List clusterIds) { TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index 699336bc2c7b..63221e745d8d 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -20,6 +20,7 @@ import java.util.Map; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateVO; import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.db.GenericDao; @@ -72,6 +73,8 @@ public interface VMTemplateDao extends GenericDao, StateDao< VMTemplateVO findRoutingTemplate(HypervisorType type, String templateName); + VMTemplateVO findLatestTemplateByTypeAndHypervisor(HypervisorType hypervisorType, Storage.TemplateType type); + public Long countTemplatesForAccount(long accountId); public List listUnRemovedTemplatesByStates(VirtualMachineTemplate.State ...states); @@ -81,4 +84,6 @@ public interface VMTemplateDao extends GenericDao, StateDao< void saveDetails(VMTemplateVO tmpl); List listByParentTemplatetId(long parentTemplatetId); + + VMTemplateVO findLatestTemplateByName(String name); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index b7e55b69b450..74d210be0deb 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -97,6 +97,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private SearchBuilder AllFieldsSearch; protected SearchBuilder ParentTemplateIdSearch; private SearchBuilder InactiveUnremovedTmpltSearch; + private SearchBuilder LatestTemplateByHypervisorTypeSearch; @Inject ResourceTagDao _tagsDao; @@ -105,6 +106,11 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private String consoleProxyTmpltName; public VMTemplateDaoImpl() { + super(); + LatestTemplateByHypervisorTypeSearch = createSearchBuilder(); + LatestTemplateByHypervisorTypeSearch.and("hypervisorType", LatestTemplateByHypervisorTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); + LatestTemplateByHypervisorTypeSearch.and("templateType", LatestTemplateByHypervisorTypeSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); + LatestTemplateByHypervisorTypeSearch.and("removed", LatestTemplateByHypervisorTypeSearch.entity().getRemoved(), SearchCriteria.Op.NULL); } @Override @@ -229,6 +235,20 @@ public List listReadyTemplates() { return listIncludingRemovedBy(sc); } + + @Override + public VMTemplateVO findLatestTemplateByName(String name) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("name", SearchCriteria.Op.EQ, name); + sc.addAnd("removed", SearchCriteria.Op.NULL); + Filter filter = new Filter(VMTemplateVO.class, "id", false, null, 1L); + List templates = listBy(sc, filter); + if ((templates != null) && !templates.isEmpty()) { + return templates.get(0); + } + return null; + } + @Override public List findIsosByIdAndPath(Long domainId, Long accountId, String path) { SearchCriteria sc = createSearchCriteria(); @@ -587,6 +607,19 @@ public VMTemplateVO findRoutingTemplate(HypervisorType hType, String templateNam } } + @Override + public VMTemplateVO findLatestTemplateByTypeAndHypervisor(HypervisorType hypervisorType, TemplateType type) { + SearchCriteria sc = LatestTemplateByHypervisorTypeSearch.create(); + sc.setParameters("hypervisorType", hypervisorType); + sc.setParameters("templateType", type); + Filter filter = new Filter(VMTemplateVO.class, "id", false, null, 1L); + List templates = listBy(sc, filter); + if (templates != null && !templates.isEmpty()) { + return templates.get(0); + } + return null; + } + @Override public Long countTemplatesForAccount(long accountId) { SearchCriteria sc = CountTemplatesByAccount.create(); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index 99250442f7c2..e19834550d15 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -274,8 +274,6 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer final DbUpgrade[] upgrades = calculateUpgradePath(dbVersion, currentVersion); - updateSystemVmTemplates(upgrades); - for (DbUpgrade upgrade : upgrades) { VersionVO version; s_logger.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade @@ -346,6 +344,7 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer txn.close(); } } + updateSystemVmTemplates(upgrades); } @Override @@ -366,7 +365,11 @@ public void check() { return; } + SystemVmTemplateRegistration.parseMetadataFile(); final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue); + SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(currentVersion.getMajorRelease()) + "." + String.valueOf(currentVersion.getMinorRelease()); + SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(currentVersion.getPatchRelease()); + s_logger.info("DB version = " + dbVersion + " Code Version = " + currentVersion); if (dbVersion.compareTo(currentVersion) > 0) { diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java new file mode 100644 index 000000000000..0616537a60b4 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -0,0 +1,849 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade; + +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.ClusterDaoImpl; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DataCenterDaoImpl; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage; +import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateDaoImpl; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.upgrade.dao.BasicTemplateDataStoreDaoImpl; +import com.cloud.user.Account; +import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.UriUtils; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.dao.VMInstanceDaoImpl; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.dao.ConfigurationDaoImpl; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDaoImpl; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.log4j.Logger; +import org.ini4j.Ini; + +import javax.inject.Inject; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.sql.Connection; +import java.sql.Date; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.stream.Collectors; + +public class SystemVmTemplateRegistration { + private static final Logger LOGGER = Logger.getLogger(SystemVmTemplateRegistration.class); + private static final String MOUNT_COMMAND = "sudo mount -t nfs %s %s"; + private static final String UMOUNT_COMMAND = "sudo umount %s"; + private static final String RELATIVE_TEMPLATE_PATH = "./engine/schema/dist/systemvm-templates/"; + private static final String ABSOLUTE_TEMPLATE_PATH = "/usr/share/cloudstack-management/templates/systemvm/"; + private static final String TEMPLATES_PATH = fetchTemplatesPath(); + private static final String METADATA_FILE_NAME = "metadata.ini"; + private static final String METADATA_FILE = TEMPLATES_PATH + METADATA_FILE_NAME; + public static final String TEMPORARY_SECONDARY_STORE = "tmp"; + private static final String PARTIAL_TEMPLATE_FOLDER = String.format("/template/tmpl/%d/", Account.ACCOUNT_ID_SYSTEM); + private static final String storageScriptsDir = "scripts/storage/secondary"; + private static final Integer OTHER_LINUX_ID = 99; + private static final Integer LINUX_5_ID = 15; + private static final Integer LINUX_7_ID = 183; + private static final Integer SCRIPT_TIMEOUT = 1800000; + private static final Integer LOCK_WAIT_TIMEOUT = 1200; + + + public static String CS_MAJOR_VERSION = null; + public static String CS_TINY_VERSION = null; + + @Inject + DataCenterDao dataCenterDao; + @Inject + VMTemplateDao vmTemplateDao; + @Inject + TemplateDataStoreDao templateDataStoreDao; + @Inject + VMInstanceDao vmInstanceDao; + @Inject + ImageStoreDao imageStoreDao; + @Inject + ClusterDao clusterDao; + @Inject + ConfigurationDao configurationDao; + + public SystemVmTemplateRegistration() { + dataCenterDao = new DataCenterDaoImpl(); + vmTemplateDao = new VMTemplateDaoImpl(); + templateDataStoreDao = new BasicTemplateDataStoreDaoImpl(); + vmInstanceDao = new VMInstanceDaoImpl(); + imageStoreDao = new ImageStoreDaoImpl(); + clusterDao = new ClusterDaoImpl(); + configurationDao = new ConfigurationDaoImpl(); + } + + private static class SystemVMTemplateDetails { + Long id; + String uuid; + String name; + String uniqueName; + Date created; + String url; + String checksum; + ImageFormat format; + Integer guestOsId; + Hypervisor.HypervisorType hypervisorType; + Long storeId; + Long size; + Long physicalSize; + String installPath; + boolean deployAsIs; + Date updated; + + SystemVMTemplateDetails(String uuid, String name, Date created, String url, String checksum, + ImageFormat format, Integer guestOsId, Hypervisor.HypervisorType hypervisorType, + Long storeId) { + this.uuid = uuid; + this.name = name; + this.created = created; + this.url = url; + this.checksum = checksum; + this.format = format; + this.guestOsId = guestOsId; + this.hypervisorType = hypervisorType; + this.storeId = storeId; + } + + public void setId(Long id) { + this.id = id; + } + + public Long getId() { + return id; + } + + public String getUuid() { + return uuid; + } + + public String getName() { + return name; + } + + public Date getCreated() { + return created; + } + + public String getUrl() { + return url; + } + + public String getChecksum() { + return checksum; + } + + public ImageFormat getFormat() { + return format; + } + + public Integer getGuestOsId() { + return guestOsId; + } + + public Hypervisor.HypervisorType getHypervisorType() { + return hypervisorType; + } + + public Long getStoreId() { + return storeId; + } + + public Long getSize() { + return size; + } + + public void setSize(Long size) { + this.size = size; + } + + public Long getPhysicalSize() { + return physicalSize; + } + + public void setPhysicalSize(Long physicalSize) { + this.physicalSize = physicalSize; + } + + public String getInstallPath() { + return installPath; + } + + public void setInstallPath(String installPath) { + this.installPath = installPath; + } + + public String getUniqueName() { + return uniqueName; + } + + public void setUniqueName(String uniqueName) { + this.uniqueName = uniqueName; + } + + public boolean isDeployAsIs() { + return deployAsIs; + } + + public void setDeployAsIs(boolean deployAsIs) { + this.deployAsIs = deployAsIs; + } + + public Date getUpdated() { + return updated; + } + + public void setUpdated(Date updated) { + this.updated = updated; + } + } + + public static final List hypervisorList = Arrays.asList(Hypervisor.HypervisorType.KVM, + Hypervisor.HypervisorType.VMware, + Hypervisor.HypervisorType.XenServer, + Hypervisor.HypervisorType.Hyperv, + Hypervisor.HypervisorType.LXC, + Hypervisor.HypervisorType.Ovm3 + ); + + public static final Map NewTemplateNameList = new HashMap(); + public static final Map FileNames = new HashMap(); + public static final Map NewTemplateUrl = new HashMap(); + public static final Map NewTemplateChecksum = new HashMap(); + + public static final Map RouterTemplateConfigurationNames = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, "router.template.kvm"); + put(Hypervisor.HypervisorType.VMware, "router.template.vmware"); + put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver"); + put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv"); + put(Hypervisor.HypervisorType.LXC, "router.template.lxc"); + put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3"); + } + }; + + public static final Map hypervisorGuestOsMap = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, LINUX_5_ID); + put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID); + put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID); + put(Hypervisor.HypervisorType.Hyperv, LINUX_5_ID); + put(Hypervisor.HypervisorType.LXC, LINUX_5_ID); + put(Hypervisor.HypervisorType.Ovm3, LINUX_7_ID); + } + }; + + public static final Map hypervisorImageFormat = new HashMap() { + { + put(Hypervisor.HypervisorType.KVM, ImageFormat.QCOW2); + put(Hypervisor.HypervisorType.XenServer, ImageFormat.VHD); + put(Hypervisor.HypervisorType.VMware, ImageFormat.OVA); + put(Hypervisor.HypervisorType.Hyperv, ImageFormat.VHD); + put(Hypervisor.HypervisorType.LXC, ImageFormat.QCOW2); + put(Hypervisor.HypervisorType.Ovm3, ImageFormat.RAW); + } + }; + + public static boolean validateIfSeeded(String url, String path) { + String filePath = null; + try { + filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString(); + if (filePath == null) { + throw new CloudRuntimeException("Failed to create temporary directory to mount secondary store"); + } + mountStore(url, filePath); + int lastIdx = path.lastIndexOf(File.separator); + String partialDirPath = path.substring(0, lastIdx); + String templatePath = filePath + File.separator + partialDirPath; + File templateProps = new File(templatePath + "/template.properties"); + if (templateProps.exists()) { + LOGGER.info("SystemVM template already seeded, skipping registration"); + return true; + } + LOGGER.info("SystemVM template not seeded"); + return false; + } catch (Exception e) { + LOGGER.error("Failed to verify if the template is seeded", e); + throw new CloudRuntimeException("Failed to verify if the template is seeded", e); + } finally { + unmountStore(filePath); + try { + Files.delete(Path.of(filePath)); + } catch (IOException e) { + LOGGER.error(String.format("Failed to delete temporary directory: %s", filePath)); + } + } + } + + private String calculateChecksum(File file) { + try (InputStream is = Files.newInputStream(Paths.get(file.getPath()))) { + return DigestUtils.md5Hex(is); + } catch (IOException e) { + String errMsg = "Failed to calculate template checksum"; + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + } + + public Long getRegisteredTemplateId(Pair hypervisorAndTemplateName) { + VMTemplateVO vmTemplate = vmTemplateDao.findLatestTemplateByName(hypervisorAndTemplateName.second()); + Long templateId = null; + if (vmTemplate != null) { + templateId = vmTemplate.getId(); + } + return templateId; + } + + private static String fetchTemplatesPath() { + String filePath = RELATIVE_TEMPLATE_PATH + METADATA_FILE_NAME; + LOGGER.debug(String.format("Looking for file [ %s ] in the classpath.", filePath)); + File metaFile = new File(filePath); + String templatePath = null; + if (metaFile.exists()) { + templatePath = RELATIVE_TEMPLATE_PATH; + } + if (templatePath == null) { + filePath = ABSOLUTE_TEMPLATE_PATH + METADATA_FILE_NAME; + metaFile = new File(filePath); + templatePath = ABSOLUTE_TEMPLATE_PATH; + LOGGER.debug(String.format("Looking for file [ %s ] in the classpath.", filePath)); + if (!metaFile.exists()) { + String errMsg = String.format("Unable to locate metadata file in your setup at %s", filePath.toString()); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + return templatePath; + } + + private String getHypervisorName(String name) { + if (name.equals("xenserver")) { + return "xen"; + } + if (name.equals("ovm3")) { + return "ovm"; + } + return name; + + } + + private Hypervisor.HypervisorType getHypervisorType(String hypervisor) { + if (hypervisor.equalsIgnoreCase("xen")) { + hypervisor = "xenserver"; + } else if (hypervisor.equalsIgnoreCase("ovm")) { + hypervisor = "ovm3"; + } + return Hypervisor.HypervisorType.getType(hypervisor); + } + + private List getEligibleZoneIds() { + List zoneIds = new ArrayList<>(); + List stores = imageStoreDao.findByProtocol("nfs"); + for (ImageStoreVO store : stores) { + if (!zoneIds.contains(store.getDataCenterId())) { + zoneIds.add(store.getDataCenterId()); + } + } + return zoneIds; + } + + private Pair getNfsStoreInZone(Long zoneId) { + String url = null; + Long storeId = null; + ImageStoreVO storeVO = imageStoreDao.findOneByZoneAndProtocol(zoneId, "nfs"); + if (storeVO == null) { + String errMsg = String.format("Failed to fetch NFS store in zone = %s for SystemVM template registration", zoneId); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + url = storeVO.getUrl(); + storeId = storeVO.getId(); + return new Pair<>(url, storeId); + } + + public static void mountStore(String storeUrl, String path) { + try { + if (storeUrl != null) { + URI uri = new URI(UriUtils.encodeURIComponent(storeUrl)); + String host = uri.getHost(); + String mountPath = uri.getPath(); + String mount = String.format(MOUNT_COMMAND, host + ":" + mountPath, path); + Script.runSimpleBashScript(mount); + } + } catch (Exception e) { + String msg = "NFS Store URL is not in the correct format"; + LOGGER.error(msg, e); + throw new CloudRuntimeException(msg, e); + } + } + + private List fetchAllHypervisors(Long zoneId) { + List hypervisorList = new ArrayList<>(); + List hypervisorTypes = clusterDao.getAvailableHypervisorInZone(zoneId); + hypervisorList = hypervisorTypes.stream().distinct().map(Enum::name).collect(Collectors.toList()); + return hypervisorList; + } + + private Long createTemplateObjectInDB(SystemVMTemplateDetails details) { + Long templateId = vmTemplateDao.getNextInSequence(Long.class, "id"); + VMTemplateVO template = new VMTemplateVO(); + template.setUuid(details.getUuid()); + template.setUniqueName(String.format("routing-%s" , String.valueOf(templateId))); + template.setName(details.getName()); + template.setPublicTemplate(false); + template.setFeatured(false); + template.setTemplateType(Storage.TemplateType.SYSTEM); + template.setRequiresHvm(true); + template.setBits(64); + template.setAccountId(Account.ACCOUNT_ID_SYSTEM); + template.setUrl(details.getUrl()); + template.setChecksum(details.getChecksum()); + template.setEnablePassword(false); + template.setDisplayText(details.getName()); + template.setFormat(details.getFormat()); + template.setGuestOSId(details.getGuestOsId()); + template.setCrossZones(true); + template.setHypervisorType(details.getHypervisorType()); + template.setState(VirtualMachineTemplate.State.Inactive); + template.setDeployAsIs(Hypervisor.HypervisorType.VMware.equals(details.getHypervisorType())); + template = vmTemplateDao.persist(template); + if (template == null) { + return null; + } + return template.getId(); + } + + private void createTemplateStoreRefEntry(SystemVMTemplateDetails details) { + TemplateDataStoreVO templateDataStoreVO = new TemplateDataStoreVO(details.storeId, details.getId(), details.getCreated(), 0, + VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED, null, null, null, details.getInstallPath(), details.getUrl()); + templateDataStoreVO.setDataStoreRole(DataStoreRole.Image); + templateDataStoreVO = templateDataStoreDao.persist(templateDataStoreVO); + if (templateDataStoreVO == null) { + throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM template for hypervisor: %s", details.getHypervisorType().name())); + } + } + + public void updateTemplateDetails(SystemVMTemplateDetails details, boolean updateTemplateDetails) { + VMTemplateVO template = vmTemplateDao.findById(details.getId()); + if (updateTemplateDetails) { + template.setSize(details.getSize()); + template.setState(VirtualMachineTemplate.State.Active); + vmTemplateDao.update(template.getId(), template); + } + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(details.getStoreId(), template.getId()); + templateDataStoreVO.setSize(details.getSize()); + templateDataStoreVO.setPhysicalSize(details.getPhysicalSize()); + templateDataStoreVO.setDownloadPercent(100); + templateDataStoreVO.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + templateDataStoreVO.setLastUpdated(details.getUpdated()); + templateDataStoreVO.setState(ObjectInDataStoreStateMachine.State.Ready); + boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO); + if (!updated) { + throw new CloudRuntimeException("Failed to update template_store_ref entry for registered systemVM template"); + } + } + + public void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) { + vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType); + } + + public void updateConfigurationParams(Map configParams) { + for (Map.Entry config : configParams.entrySet()) { + boolean updated = configurationDao.update(config.getKey(), config.getValue()); + if (!updated) { + throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", config.getKey())); + } + } + } + + private static void readTemplateProperties(String path, SystemVMTemplateDetails details) { + File tmpFile = new File(path); + Long size = null; + Long physicalSize = 0L; + try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr);) { + String line = null; + while ((line = brf.readLine()) != null) { + if (line.startsWith("size=")) { + physicalSize = Long.parseLong(line.split("=")[1]); + } else if (line.startsWith("virtualsize=")) { + size = Long.parseLong(line.split("=")[1]); + } + if (size == null) { + size = physicalSize; + } + } + } catch (IOException ex) { + LOGGER.warn("Failed to read from template.properties", ex); + } + details.setSize(size); + details.setPhysicalSize(physicalSize); + } + + private void updateTemplateTablesOnFailure(long templateId) { + VMTemplateVO template = vmTemplateDao.createForUpdate(templateId); + template.setState(VirtualMachineTemplate.State.Inactive); + vmTemplateDao.update(template.getId(), template); + vmTemplateDao.remove(templateId); + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(template.getId(), DataStoreRole.Image); + templateDataStoreDao.remove(templateDataStoreVO.getId()); + } + + public static void unmountStore(String filePath) { + try { + LOGGER.info("Unmounting store"); + String umountCmd = String.format(UMOUNT_COMMAND, filePath); + Script.runSimpleBashScript(umountCmd); + try { + Files.deleteIfExists(Paths.get(filePath)); + } catch (IOException e) { + LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e); + } + } catch (Exception e) { + String msg = String.format("Failed to unmount store mounted at %s", filePath); + LOGGER.error(msg, e); + throw new CloudRuntimeException(msg, e); + } + } + + private void setupTemplate(String templateName, Pair hypervisorAndTemplateName, + String destTempFolder) throws CloudRuntimeException { + String setupTmpltScript = Script.findScript(storageScriptsDir, "setup-sysvm-tmplt"); + if (setupTmpltScript == null) { + throw new CloudRuntimeException("Unable to find the createtmplt.sh"); + } + Script scr = new Script(setupTmpltScript, SCRIPT_TIMEOUT, LOGGER); + scr.add("-u", templateName); + scr.add("-f", TEMPLATES_PATH + FileNames.get(hypervisorAndTemplateName.first())); + scr.add("-h", hypervisorAndTemplateName.first().name().toLowerCase(Locale.ROOT)); + scr.add("-d", destTempFolder); + String result = scr.execute(); + if (result != null) { + String errMsg = String.format("failed to create template: %s ", result); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + + } + + private Long performTemplateRegistrationOperations(Pair hypervisorAndTemplateName, + String url, String checksum, ImageFormat format, long guestOsId, + Long storeId, Long templateId, String filePath, boolean updateTmpltDetails) { + Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); + String templateName = UUID.randomUUID().toString(); + Date created = new Date(DateUtil.currentGMTTime().getTime()); + SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, hypervisorAndTemplateName.second(), created, + url, checksum, format, (int) guestOsId, hypervisor, storeId); + if (templateId == null) { + templateId = createTemplateObjectInDB(details); + } + if (templateId == null) { + throw new CloudRuntimeException(String.format("Failed to register template for hypervisor: %s", hypervisor.name())); + } + details.setId(templateId); + String destTempFolderName = String.valueOf(templateId); + String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + destTempFolderName; + details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); + createTemplateStoreRefEntry(details); + setupTemplate(templateName, hypervisorAndTemplateName, destTempFolder); + readTemplateProperties(destTempFolder + "/template.properties", details); + details.setUpdated(new Date(DateUtil.currentGMTTime().getTime())); + updateTemplateDetails(details, updateTmpltDetails); + return templateId; + } + + public void registerTemplate(Pair hypervisorAndTemplateName, + Pair storeUrlAndId, VMTemplateVO templateVO, String filePath) { + Long templateId = null; + try { + templateId = templateVO.getId(); + performTemplateRegistrationOperations(hypervisorAndTemplateName, templateVO.getUrl(), templateVO.getChecksum(), + templateVO.getFormat(), templateVO.getGuestOSId(), storeUrlAndId.second(), templateId, filePath, false); + } catch (Exception e) { + String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first()); + LOGGER.error(errMsg, e); + if (templateId != null) { + updateTemplateTablesOnFailure(templateId); + cleanupStore(templateId, filePath); + } + throw new CloudRuntimeException(errMsg, e); + } + } + + public void registerTemplate(Pair hypervisorAndTemplateName, Pair storeUrlAndId, String filePath) { + Long templateId = null; + try { + Hypervisor.HypervisorType hypervisor = hypervisorAndTemplateName.first(); + templateId = performTemplateRegistrationOperations(hypervisorAndTemplateName, NewTemplateUrl.get(hypervisor), NewTemplateChecksum.get(hypervisor), + hypervisorImageFormat.get(hypervisor), hypervisorGuestOsMap.get(hypervisor), storeUrlAndId.second(), null, filePath, true); + Map configParams = new HashMap<>(); + configParams.put(RouterTemplateConfigurationNames.get(hypervisorAndTemplateName.first()), hypervisorAndTemplateName.second()); + configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION); + updateConfigurationParams(configParams); + updateSystemVMEntries(templateId, hypervisorAndTemplateName.first()); + } catch (Exception e) { + String errMsg = String.format("Failed to register template for hypervisor: %s", hypervisorAndTemplateName.first()); + LOGGER.error(errMsg, e); + if (templateId != null) { + updateTemplateTablesOnFailure(templateId); + cleanupStore(templateId, filePath); + } + throw new CloudRuntimeException(errMsg, e); + } + } + + public static void parseMetadataFile() { + try { + Ini ini = new Ini(); + ini.load(new FileReader(METADATA_FILE)); + for (Hypervisor.HypervisorType hypervisorType : hypervisorList) { + String hypervisor = hypervisorType.name().toLowerCase(Locale.ROOT); + Ini.Section section = ini.get(hypervisor); + NewTemplateNameList.put(hypervisorType, section.get("templatename")); + FileNames.put(hypervisorType, section.get("filename")); + NewTemplateChecksum.put(hypervisorType, section.get("checksum")); + NewTemplateUrl.put(hypervisorType, section.get("downloadurl")); + } + } catch (Exception e) { + String errMsg = String.format("Failed to parse systemVM template metadata file: %s", METADATA_FILE); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + } + + private static void cleanupStore(Long templateId, String filePath) { + String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId); + try { + Files.deleteIfExists(Paths.get(destTempFolder)); + } catch (IOException e) { + LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e); + } + } + + private void validateTemplates(Set hypervisorsInUse) { + Set hypervisors = hypervisorsInUse.stream().map(Enum::name). + map(name -> name.toLowerCase(Locale.ROOT)).map(this::getHypervisorName).collect(Collectors.toSet()); + List templates = new ArrayList<>(); + for (Hypervisor.HypervisorType hypervisorType : hypervisorsInUse) { + templates.add(FileNames.get(hypervisorType)); + } + + boolean templatesFound = true; + for (String hypervisor : hypervisors) { + String matchedTemplate = templates.stream().filter(x -> x.contains(hypervisor)).findAny().orElse(null); + if (matchedTemplate == null) { + templatesFound = false; + break; + } + + File tempFile = new File(TEMPLATES_PATH + matchedTemplate); + String templateChecksum = calculateChecksum(tempFile); + if (!templateChecksum.equals(NewTemplateChecksum.get(getHypervisorType(hypervisor)))) { + LOGGER.error(String.format("Checksum mismatch: %s != %s ", templateChecksum, NewTemplateChecksum.get(getHypervisorType(hypervisor)))); + templatesFound = false; + break; + } + } + + if (!templatesFound) { + String errMsg = "SystemVm template not found. Cannot upgrade system Vms"; + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + public void registerTemplates(Set hypervisorsInUse) { + GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock"); + try { + LOGGER.info("Grabbing lock to register templates."); + if (!lock.lock(LOCK_WAIT_TIMEOUT)) { + throw new CloudRuntimeException("Unable to acquire lock to register SystemVM template."); + } + try { + validateTemplates(hypervisorsInUse); + // Perform Registration if templates not already registered + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(final TransactionStatus status) { + List zoneIds = getEligibleZoneIds(); + for (Long zoneId : zoneIds) { + String filePath = null; + try { + filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString(); + if (filePath == null) { + throw new CloudRuntimeException("Failed to create temporary file path to mount the store"); + } + Pair storeUrlAndId = getNfsStoreInZone(zoneId); + mountStore(storeUrlAndId.first(), filePath); + List hypervisorList = fetchAllHypervisors(zoneId); + for (String hypervisor : hypervisorList) { + Hypervisor.HypervisorType name = Hypervisor.HypervisorType.getType(hypervisor); + String templateName = NewTemplateNameList.get(name); + Pair hypervisorAndTemplateName = new Pair(name, templateName); + Long templateId = getRegisteredTemplateId(hypervisorAndTemplateName); + if (templateId != null) { + VMTemplateVO templateVO = vmTemplateDao.findById(templateId); + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(templateId, DataStoreRole.Image); + String installPath = templateDataStoreVO.getInstallPath(); + if (validateIfSeeded(storeUrlAndId.first(), installPath)) { + continue; + } else if (templateVO != null) { + registerTemplate(hypervisorAndTemplateName, storeUrlAndId, templateVO, filePath); + continue; + } + } + registerTemplate(hypervisorAndTemplateName, storeUrlAndId, filePath); + } + unmountStore(filePath); + } catch (Exception e) { + unmountStore(filePath); + throw new CloudRuntimeException("Failed to register systemVM template. Upgrade Failed"); + } + } + } + }); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to register systemVM template. Upgrade Failed"); + } + } finally { + lock.unlock(); + lock.releaseRef(); + } + } + + private void updateRegisteredTemplateDetails(Long templateId, Map.Entry hypervisorAndTemplateName) { + VMTemplateVO templateVO = vmTemplateDao.findById(templateId); + templateVO.setTemplateType(Storage.TemplateType.SYSTEM); + if (Hypervisor.HypervisorType.VMware == templateVO.getHypervisorType()) { + templateVO.setDeployAsIs(true); + } + boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO); + if (!updated) { + String errMsg = String.format("updateSystemVmTemplates:Exception while updating template with id %s to be marked as 'system'", templateId); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + + updateSystemVMEntries(templateId, hypervisorAndTemplateName.getKey()); + + // Change value of global configuration parameter router.template.* for the corresponding hypervisor and minreq.sysvmtemplate.version for the ACS version + Map configParams = new HashMap<>(); + configParams.put(RouterTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()), hypervisorAndTemplateName.getValue()); + configParams.put("minreq.sysvmtemplate.version", CS_MAJOR_VERSION + "." + CS_TINY_VERSION); + updateConfigurationParams(configParams); + } + + private void updateTemplateUrlAndChecksum(VMTemplateVO templateVO, Map.Entry hypervisorAndTemplateName) { + templateVO.setUrl(NewTemplateUrl.get(hypervisorAndTemplateName.getKey())); + templateVO.setChecksum(NewTemplateChecksum.get(hypervisorAndTemplateName.getKey())); + if (Hypervisor.HypervisorType.VMware == templateVO.getHypervisorType()) { + templateVO.setDeployAsIs(true); + } + boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO); + if (!updated) { + String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", hypervisorAndTemplateName.getKey().name()); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + + public void updateSystemVmTemplates(final Connection conn) { + LOGGER.debug("Updating System Vm template IDs"); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(final TransactionStatus status) { + Set hypervisorsListInUse = new HashSet(); + try { + hypervisorsListInUse = clusterDao.getDistictAvailableHypervisorsAcrossClusters(); + + } catch (final Exception e) { + LOGGER.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); + throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e); + } + + for (final Map.Entry hypervisorAndTemplateName : NewTemplateNameList.entrySet()) { + LOGGER.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms"); + Long templateId = getRegisteredTemplateId(new Pair<>(hypervisorAndTemplateName.getKey(), hypervisorAndTemplateName.getValue())); + try { + // change template type to SYSTEM + if (templateId != null) { + updateRegisteredTemplateDetails(templateId, hypervisorAndTemplateName); + } else { + if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) { + try { + registerTemplates(hypervisorsListInUse); + break; + } catch (final Exception e) { + throw new CloudRuntimeException(String.format("%s.%s %s SystemVm template not found. Cannot upgrade system Vms", CS_MAJOR_VERSION, CS_TINY_VERSION, hypervisorAndTemplateName.getKey())); + } + } else { + LOGGER.warn(String.format("%s.%s %s SystemVm template not found. Cannot upgrade system Vms hypervisor is not used, so not failing upgrade", + CS_MAJOR_VERSION, CS_TINY_VERSION, hypervisorAndTemplateName.getKey())); + // Update the latest template URLs for corresponding hypervisor + VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisor(hypervisorAndTemplateName.getKey(), Storage.TemplateType.SYSTEM); + if (templateVO != null) { + updateTemplateUrlAndChecksum(templateVO, hypervisorAndTemplateName); + } + } + } + } catch (final Exception e) { + String errMsg = "updateSystemVmTemplates:Exception while getting ids of templates"; + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + } + LOGGER.debug("Updating System Vm Template IDs Complete"); + } + }); + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java new file mode 100644 index 000000000000..3ea63d059a68 --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/BasicTemplateDataStoreDaoImpl.java @@ -0,0 +1,236 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.util.List; +import java.util.Map; + +import javax.naming.ConfigurationException; + +import com.cloud.utils.db.Filter; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; + +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.template.VirtualMachineTemplate; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +public class BasicTemplateDataStoreDaoImpl extends GenericDaoBase implements TemplateDataStoreDao { + private SearchBuilder templateRoleSearch; + private SearchBuilder storeTemplateSearch; + + public BasicTemplateDataStoreDaoImpl() { + super(); + templateRoleSearch = createSearchBuilder(); + templateRoleSearch.and("template_id", templateRoleSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); + templateRoleSearch.and("store_role", templateRoleSearch.entity().getDataStoreRole(), SearchCriteria.Op.EQ); + templateRoleSearch.and("destroyed", templateRoleSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); + templateRoleSearch.and("state", templateRoleSearch.entity().getState(), SearchCriteria.Op.EQ); + templateRoleSearch.done(); + + storeTemplateSearch = createSearchBuilder(); + storeTemplateSearch.and("template_id", storeTemplateSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); + storeTemplateSearch.and("store_id", storeTemplateSearch.entity().getDataStoreId(), SearchCriteria.Op.EQ); + storeTemplateSearch.and("destroyed", storeTemplateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); + storeTemplateSearch.done(); + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + return true; + } + + @Override + public List listByStoreId(long id) { + return null; + } + + @Override + public List listDestroyed(long storeId) { + return null; + } + + @Override + public List listActiveOnCache(long id) { + return null; + } + + @Override + public void deletePrimaryRecordsForStore(long id) { + + } + + @Override + public void deletePrimaryRecordsForTemplate(long templateId) { + + } + + @Override + public List listByTemplateStore(long templateId, long storeId) { + return null; + } + + @Override + public List listByTemplateStoreStatus(long templateId, long storeId, ObjectInDataStoreStateMachine.State... states) { + return null; + } + + @Override + public List listByTemplateStoreDownloadStatus(long templateId, long storeId, VMTemplateStorageResourceAssoc.Status... status) { + return null; + } + + @Override + public List listByTemplateZoneDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplateZoneDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplateZoneStagingDownloadStatus(long templateId, Long zoneId, VMTemplateStorageResourceAssoc.Status... status) { + return null; + } + + @Override + public TemplateDataStoreVO findByStoreTemplate(long storeId, long templateId) { + SearchCriteria sc = storeTemplateSearch.create(); + sc.setParameters("store_id", storeId); + sc.setParameters("template_id", templateId); + sc.setParameters("destroyed", false); + Filter filter = new Filter(TemplateDataStoreVO.class, "id", false, 0L, 1L); + List templates = listBy(sc, filter); + if ((templates != null) && !templates.isEmpty()) { + return templates.get(0); + } + return null; + } + + @Override + public TemplateDataStoreVO findByStoreTemplate(long storeId, long templateId, boolean lock) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplate(long templateId, DataStoreRole role) { + SearchCriteria sc = templateRoleSearch.create(); + sc.setParameters("template_id", templateId); + sc.setParameters("store_role", role); + sc.setParameters("destroyed", false); + return findOneIncludingRemovedBy(sc); + } + + @Override + public TemplateDataStoreVO findReadyByTemplate(long templateId, DataStoreRole role) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, DataStoreRole role) { + return null; + } + + @Override + public List listByTemplate(long templateId) { + return null; + } + + @Override + public List listByTemplateNotBypassed(long templateId, Long... storeIds) { + return null; + } + + @Override + public TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId) { + return null; + } + + @Override + public void duplicateCacheRecordsOnRegionStore(long storeId) { + + } + + @Override + public TemplateDataStoreVO findReadyOnCache(long templateId) { + return null; + } + + @Override + public List listOnCache(long templateId) { + return null; + } + + @Override + public void updateStoreRoleToCachce(long storeId) { + + } + + @Override + public List listTemplateDownloadUrls() { + return null; + } + + @Override + public void removeByTemplateStore(long templateId, long imageStoreId) { + + } + + @Override + public void expireDnldUrlsForZone(Long dcId) { + + } + + @Override + public List listByTemplateState(VirtualMachineTemplate.State... states) { + return null; + } + + @Override + public TemplateDataStoreVO createTemplateDirectDownloadEntry(long templateId, Long size) { + return null; + } + + @Override + public TemplateDataStoreVO getReadyBypassedTemplate(long templateId) { + return null; + } + + @Override + public boolean isTemplateMarkedForDirectDownload(long templateId) { + return false; + } + + @Override + public List listTemplateDownloadUrlsByStoreId(long storeId) { + return null; + } + + @Override + public boolean updateState(ObjectInDataStoreStateMachine.State currentState, ObjectInDataStoreStateMachine.Event event, ObjectInDataStoreStateMachine.State nextState, DataObjectInStore vo, Object data) { + return false; + } +} diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41520to41600.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41520to41600.java index 5c5523c78215..c61d5188e367 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41520to41600.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41520to41600.java @@ -22,19 +22,19 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; +import com.cloud.upgrade.SystemVmTemplateRegistration; import org.apache.log4j.Logger; -import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.exception.CloudRuntimeException; public class Upgrade41520to41600 implements DbUpgrade, DbUpgradeSystemVmTemplate { final static Logger LOG = Logger.getLogger(Upgrade41520to41600.class); + private SystemVmTemplateRegistration systemVmTemplateRegistration; + + public Upgrade41520to41600() { + } @Override public String[] getUpgradableVersionRange() { @@ -92,173 +92,20 @@ private void generateUuidForExistingSshKeyPairs(Connection conn) { } } + private void initSystemVmTemplateRegistration() { + systemVmTemplateRegistration = new SystemVmTemplateRegistration(); + } + @Override @SuppressWarnings("serial") public void updateSystemVmTemplates(final Connection conn) { LOG.debug("Updating System Vm template IDs"); - final Set hypervisorsListInUse = new HashSet(); - try (PreparedStatement pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); ResultSet rs = pstmt.executeQuery()) { - while (rs.next()) { - switch (Hypervisor.HypervisorType.getType(rs.getString(1))) { - case XenServer: - hypervisorsListInUse.add(Hypervisor.HypervisorType.XenServer); - break; - case KVM: - hypervisorsListInUse.add(Hypervisor.HypervisorType.KVM); - break; - case VMware: - hypervisorsListInUse.add(Hypervisor.HypervisorType.VMware); - break; - case Hyperv: - hypervisorsListInUse.add(Hypervisor.HypervisorType.Hyperv); - break; - case LXC: - hypervisorsListInUse.add(Hypervisor.HypervisorType.LXC); - break; - case Ovm3: - hypervisorsListInUse.add(Hypervisor.HypervisorType.Ovm3); - break; - default: - break; - } - } - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates: Exception caught while getting hypervisor types from clusters: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting hypervisor types from clusters", e); - } - - final Map NewTemplateNameList = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-4.16.0"); - put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-4.16.0"); - put(Hypervisor.HypervisorType.XenServer, "systemvm-xenserver-4.16.0"); - put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-4.16.0"); - put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-4.16.0"); - put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-4.16.0"); - } - }; - - final Map routerTemplateConfigurationNames = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "router.template.kvm"); - put(Hypervisor.HypervisorType.VMware, "router.template.vmware"); - put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver"); - put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv"); - put(Hypervisor.HypervisorType.LXC, "router.template.lxc"); - put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3"); - } - }; - - final Map newTemplateUrl = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-vmware.ova"); - put(Hypervisor.HypervisorType.XenServer, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-xen.vhd.bz2"); - put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-hyperv.vhd.zip"); - put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-kvm.qcow2.bz2"); - put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.16/systemvmtemplate-4.16.0-ovm.raw.bz2"); - } - }; - - final Map newTemplateChecksum = new HashMap() { - { - put(Hypervisor.HypervisorType.KVM, "81b3e48bb934784a13555a43c5ef5ffb"); - put(Hypervisor.HypervisorType.XenServer, "1b178a5dbdbe090555515340144c6017"); - put(Hypervisor.HypervisorType.VMware, "e6a88e518c57d6f36c096c4204c3417f"); - put(Hypervisor.HypervisorType.Hyperv, "5c94da45337cf3e1910dcbe084d4b9ad"); - put(Hypervisor.HypervisorType.LXC, "81b3e48bb934784a13555a43c5ef5ffb"); - put(Hypervisor.HypervisorType.Ovm3, "875c5c65455fc06c4a012394410db375"); - } - }; - - for (final Map.Entry hypervisorAndTemplateName : NewTemplateNameList.entrySet()) { - LOG.debug("Updating " + hypervisorAndTemplateName.getKey() + " System Vms"); - try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = ? and removed is null order by id desc limit 1")) { - // Get systemvm template id for corresponding hypervisor - long templateId = -1; - pstmt.setString(1, hypervisorAndTemplateName.getValue()); - try (ResultSet rs = pstmt.executeQuery()) { - if (rs.next()) { - templateId = rs.getLong(1); - } - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates: Exception caught while getting ids of templates: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates: Exception caught while getting ids of templates", e); - } - - // change template type to SYSTEM - if (templateId != -1) { - try (PreparedStatement templ_type_pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?");) { - templ_type_pstmt.setLong(1, templateId); - templ_type_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system': " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating template with id " + templateId + " to be marked as 'system'", e); - } - // update template ID of system Vms - try (PreparedStatement update_templ_id_pstmt = conn - .prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = ? and removed is NULL");) { - update_templ_id_pstmt.setLong(1, templateId); - update_templ_id_pstmt.setString(2, hypervisorAndTemplateName.getKey().toString()); - update_templ_id_pstmt.executeUpdate(); - } catch (final Exception e) { - LOG.error("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " + templateId - + ": " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting template for " + hypervisorAndTemplateName.getKey().toString() + " to " - + templateId, e); - } - - // Change value of global configuration parameter - // router.template.* for the corresponding hypervisor - try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) { - update_pstmt.setString(1, hypervisorAndTemplateName.getValue()); - update_pstmt.setString(2, routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey())); - update_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while setting " + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " - + hypervisorAndTemplateName.getValue() + ": " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting " - + routerTemplateConfigurationNames.get(hypervisorAndTemplateName.getKey()) + " to " + hypervisorAndTemplateName.getValue(), e); - } - - // Change value of global configuration parameter - // minreq.sysvmtemplate.version for the ACS version - try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) { - update_pstmt.setString(1, "4.16.0"); - update_pstmt.setString(2, "minreq.sysvmtemplate.version"); - update_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.16.0: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while setting 'minreq.sysvmtemplate.version' to 4.16.0", e); - } - } else { - if (hypervisorsListInUse.contains(hypervisorAndTemplateName.getKey())) { - throw new CloudRuntimeException(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. Cannot upgrade system Vms"); - } else { - LOG.warn(getUpgradedVersion() + hypervisorAndTemplateName.getKey() + " SystemVm template not found. " + hypervisorAndTemplateName.getKey() - + " hypervisor is not used, so not failing upgrade"); - // Update the latest template URLs for corresponding - // hypervisor - try (PreparedStatement update_templ_url_pstmt = conn - .prepareStatement("UPDATE `cloud`.`vm_template` SET url = ? , checksum = ? WHERE hypervisor_type = ? AND type = 'SYSTEM' AND removed is null order by id desc limit 1");) { - update_templ_url_pstmt.setString(1, newTemplateUrl.get(hypervisorAndTemplateName.getKey())); - update_templ_url_pstmt.setString(2, newTemplateChecksum.get(hypervisorAndTemplateName.getKey())); - update_templ_url_pstmt.setString(3, hypervisorAndTemplateName.getKey().toString()); - update_templ_url_pstmt.executeUpdate(); - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " - + hypervisorAndTemplateName.getKey().toString() + ": " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type " - + hypervisorAndTemplateName.getKey().toString(), e); - } - } - } - } catch (final SQLException e) { - LOG.error("updateSystemVmTemplates:Exception while getting ids of templates: " + e.getMessage()); - throw new CloudRuntimeException("updateSystemVmTemplates:Exception while getting ids of templates", e); - } + initSystemVmTemplateRegistration(); + try { + systemVmTemplateRegistration.updateSystemVmTemplates(conn); + } catch (Exception e) { + throw new CloudRuntimeException("Failed to find / register SystemVM template(s)"); } - LOG.debug("Updating System Vm Template IDs Complete"); } @Override diff --git a/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java b/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java index 311a6c5b3744..e62162e25960 100644 --- a/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java +++ b/engine/schema/src/main/java/com/cloud/vm/UserVmVO.java @@ -48,6 +48,9 @@ public class UserVmVO extends VMInstanceVO implements UserVm { @Column(name = "update_parameters", updatable = true) protected boolean updateParameters = true; + @Column(name = "user_vm_type", updatable = true) + private String userVmType; + transient String password; @Override @@ -126,6 +129,14 @@ public boolean isUpdateParameters() { return updateParameters; } + public String getUserVmType() { + return userVmType; + } + + public void setUserVmType(String userVmType) { + this.userVmType = userVmType; + } + @Override public String getName() { return instanceName; diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java index 3ac023e2dfd8..0bbcb37ea0a2 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDao.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.Map; +import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; import com.cloud.utils.fsm.StateDao; @@ -159,4 +160,6 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listNonMigratingVmsByHostEqualsLastHost(long hostId); + void updateSystemVmTemplateId(long templateId, Hypervisor.HypervisorType hypervisorType); + } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 22a197a13d32..7ceff5eb5953 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -28,6 +28,7 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; +import com.cloud.hypervisor.Hypervisor; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -126,6 +127,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem private static final String COUNT_VMS_BASED_ON_VGPU_TYPES2 = "GROUP BY offering.service_offering_id) results GROUP BY pci, type"; + private static final String UPDATE_SYSTEM_VM_TEMPLATE_ID_FOR_HYPERVISOR = "UPDATE `cloud`.`vm_instance` SET vm_template_id = ? WHERE type <> 'User' AND hypervisor_type = ? AND removed is NULL"; + @Inject protected HostDao _hostDao; @@ -941,4 +944,23 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } + + + @Override + public void updateSystemVmTemplateId(long templateId, Hypervisor.HypervisorType hypervisorType) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + + StringBuilder sql = new StringBuilder(UPDATE_SYSTEM_VM_TEMPLATE_ID_FOR_HYPERVISOR); + try { + PreparedStatement updateStatement = txn.prepareAutoCloseStatement(sql.toString()); + updateStatement.setLong(1, templateId); + updateStatement.setString(2, hypervisorType.toString()); + updateStatement.executeUpdate(); + } catch (SQLException e) { + throw new CloudRuntimeException("DB Exception on: " + sql, e); + } catch (Throwable e) { + throw new CloudRuntimeException("Caught: " + sql, e); + } + + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java index 71609a982ca8..ba9825c3c868 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java @@ -20,6 +20,7 @@ import java.util.List; +import com.cloud.storage.DataStoreRole; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import com.cloud.utils.db.GenericDao; @@ -42,4 +43,10 @@ public interface ImageStoreDao extends GenericDao { List listImageCacheStores(); List listStoresByZoneId(long zoneId); + + List listAllStoresInZone(Long zoneId, String provider, DataStoreRole role); + + List findByProtocol(String protocol); + + ImageStoreVO findOneByZoneAndProtocol(long zoneId, String protocol); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java index 44ae96180774..3468b6008d99 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java @@ -23,6 +23,7 @@ import javax.naming.ConfigurationException; +import com.cloud.utils.db.Filter; import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; @@ -38,7 +39,23 @@ public class ImageStoreDaoImpl extends GenericDaoBase implem private SearchBuilder nameSearch; private SearchBuilder providerSearch; private SearchBuilder regionSearch; - + private SearchBuilder storeSearch; + private SearchBuilder protocolSearch; + private SearchBuilder zoneProtocolSearch; + + public ImageStoreDaoImpl() { + super(); + protocolSearch = createSearchBuilder(); + protocolSearch.and("protocol", protocolSearch.entity().getProtocol(), SearchCriteria.Op.EQ); + protocolSearch.and("role", protocolSearch.entity().getRole(), SearchCriteria.Op.EQ); + protocolSearch.done(); + + zoneProtocolSearch = createSearchBuilder(); + zoneProtocolSearch.and("dataCenterId", zoneProtocolSearch.entity().getDcId(), SearchCriteria.Op.EQ); + zoneProtocolSearch.and("protocol", zoneProtocolSearch.entity().getProtocol(), SearchCriteria.Op.EQ); + zoneProtocolSearch.and("role", zoneProtocolSearch.entity().getRole(), SearchCriteria.Op.EQ); + zoneProtocolSearch.done(); + } @Override public boolean configure(String name, Map params) throws ConfigurationException { super.configure(name, params); @@ -58,6 +75,12 @@ public boolean configure(String name, Map params) throws Configu regionSearch.and("role", regionSearch.entity().getRole(), SearchCriteria.Op.EQ); regionSearch.done(); + storeSearch = createSearchBuilder(); + storeSearch.and("providerName", storeSearch.entity().getProviderName(), SearchCriteria.Op.EQ); + storeSearch.and("role", storeSearch.entity().getRole(), SearchCriteria.Op.EQ); + storeSearch.and("dataCenterId", storeSearch.entity().getDcId(), SearchCriteria.Op.EQ); + storeSearch.done(); + return true; } @@ -76,6 +99,15 @@ public List findByProvider(String provider) { return listBy(sc); } + @Override + public List listAllStoresInZone(Long zoneId, String provider, DataStoreRole role) { + SearchCriteria sc = storeSearch.create(); + sc.setParameters("providerName", provider); + sc.setParameters("role", role); + sc.setParameters("dataCenterId", zoneId); + return listBy(sc); + } + @Override public List findByZone(ZoneScope scope, Boolean readonly) { SearchCriteria sc = createSearchCriteria(); @@ -140,4 +172,23 @@ public List listStoresByZoneId(long zoneId) { sc.addAnd("dcId", SearchCriteria.Op.EQ, zoneId); return listBy(sc); } + + @Override + public List findByProtocol(String protocol) { + SearchCriteria sc = protocolSearch.create(); + sc.setParameters("protocol", protocol); + sc.setParameters("role", DataStoreRole.Image); + return listBy(sc); + } + + @Override + public ImageStoreVO findOneByZoneAndProtocol(long dataCenterId, String protocol) { + SearchCriteria sc = zoneProtocolSearch.create(); + sc.setParameters("dataCenterId", dataCenterId); + sc.setParameters("protocol", protocol); + sc.setParameters("role", DataStoreRole.Image); + Filter filter = new Filter(1); + List results = listBy(sc, filter); + return results.size() == 0 ? null : results.get(0); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java index d24582714868..3ca9259c0997 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java @@ -135,6 +135,10 @@ public Long getDataCenterId() { return this.dcId; } + public Long getDcId() { + return this.dcId; + } + public ScopeType getScope() { return this.scope; } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql index 64c381e0e7ad..eda211c6772f 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql @@ -19,6 +19,19 @@ -- Schema upgrade from 4.15.2.0 to 4.16.0.0 --; +ALTER TABLE `cloud`.`user_vm` ADD COLUMN `user_vm_type` varchar(255) DEFAULT "UserVM" COMMENT 'Defines the type of UserVM'; + +-- This is set, so as to ensure that the controller details from the ovf template are adhered to +UPDATE `cloud`.`vm_template` set deploy_as_is = 1 where id = 8; + +DELETE FROM `cloud`.`configuration` WHERE name IN ("cloud.kubernetes.cluster.template.name.kvm", "cloud.kubernetes.cluster.template.name.vmware", "cloud.kubernetes.cluster.template.name.xenserver", "cloud.kubernetes.cluster.template.name.hyperv"); + +ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint; +ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `maxsize` bigint; + +ALTER TABLE `cloud`.`kubernetes_cluster_vm_map` ADD COLUMN `control_node` tinyint(1) unsigned NOT NULL DEFAULT 0; + -- Adding dynamic scalable flag for service offering table ALTER TABLE `cloud`.`service_offering` ADD COLUMN `dynamic_scaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 1 COMMENT 'true(1) if VM needs to be dynamically scalable of cpu or memory'; DROP VIEW IF EXISTS `cloud`.`service_offering_view`; diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh new file mode 100644 index 000000000000..d39eb124fef6 --- /dev/null +++ b/engine/schema/templateConfig.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +function getTemplateVersion() { + projVersion=$1 + version="$(cut -d'-' -f1 <<<"$projVersion")" + subversion1="$(cut -d'.' -f1 <<<"$version")" + subversion2="$(cut -d'.' -f2 <<<"$version")" + minorversion="$(cut -d'.' -f3 <<<"$version")" + export CS_VERSION="${subversion1}"."${subversion2}" + export CS_MINOR_VERSION="${minorversion}" +} + +function getGenericName() { + hypervisor=$(echo "$1" | tr "[:upper:]" "[:lower:]") + if [[ "$hypervisor" == "ovm3" ]]; then + echo "ovm" + elif [[ "$hypervisor" == "lxc" ]]; then + echo "kvm" + elif [[ "$hypervisor" == "xenserver" ]]; then + echo "xen" + else + echo "$hypervisor" + fi +} + +function getChecksum() { + local fileData="$1" + local hvName=$2 + while IFS= read -r line; do + if [[ $line == *"$hvName"* ]]; then + echo "$(cut -d' ' -f1 <<<"$line")" + fi + done <<< "$fileData" +} + +function createMetadataFile() { + local fileData=$(cat $SOURCEFILE) + for i in "${!templates[@]}" + do + section="$i" + hvName=$(getGenericName $i) + templatename="systemvm-${i}-${CS_VERSION}" + checksum=$(getChecksum "$fileData" $hvName) + downloadurl="${templates[$i]}" + filename=$(echo ${downloadurl##*'/'}) + echo -e "["$section"]\ntemplatename = $templatename\nchecksum = $checksum\ndownloadurl = $downloadurl\nfilename = $filename\n" >> $METADATAFILE + done +} + +declare -A templates +getTemplateVersion $1 +templates=( ["kvm"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-kvm.qcow2.bz2" + ["vmware"]="https://download.cloudstack.org/systemvm/${CS_VERSION}/systemvmtemplate-${CS_VERSION}.${CS_MINOR_VERSION}-vmware.ova" + ["xenserver"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-xen.vhd.bz2" + ["hyperv"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-hyperv.vhd.zip" + ["lxc"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-kvm.qcow2.bz2" + ["ovm3"]="https://download.cloudstack.org/systemvm/$CS_VERSION/systemvmtemplate-$CS_VERSION.$CS_MINOR_VERSION-ovm.raw.bz2" ) + + +PARENTPATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )/dist/systemvm-templates/" +mkdir -p $PARENTPATH +METADATAFILE=${PARENTPATH}"metadata.ini" +echo > $METADATAFILE +SOURCEFILE=${PARENTPATH}'md5sum.txt' +createMetadataFile diff --git a/packaging/centos7/cloud.spec b/packaging/centos7/cloud.spec index d8e6de184c2e..0728f589829b 100644 --- a/packaging/centos7/cloud.spec +++ b/packaging/centos7/cloud.spec @@ -298,6 +298,11 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir} touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina +# SystemVM template +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt + # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ @@ -551,6 +556,7 @@ pip3 install --upgrade urllib3 %{_datadir}/%{name}-management/conf %{_datadir}/%{name}-management/lib/*.jar %{_datadir}/%{name}-management/logs +%{_datadir}/%{name}-management/templates %attr(0755,root,root) %{_bindir}/%{name}-setup-databases %attr(0755,root,root) %{_bindir}/%{name}-migrate-databases %attr(0755,root,root) %{_bindir}/%{name}-set-guest-password diff --git a/packaging/centos8/cloud.spec b/packaging/centos8/cloud.spec index eee2fa3a6452..31d85dda0806 100644 --- a/packaging/centos8/cloud.spec +++ b/packaging/centos8/cloud.spec @@ -291,6 +291,11 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir} touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina +# SystemVM template +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt + # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ @@ -539,6 +544,7 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz %{_datadir}/%{name}-management/conf %{_datadir}/%{name}-management/lib/*.jar %{_datadir}/%{name}-management/logs +%{_datadir}/%{name}-management/templates %attr(0755,root,root) %{_bindir}/%{name}-setup-databases %attr(0755,root,root) %{_bindir}/%{name}-migrate-databases %attr(0755,root,root) %{_bindir}/%{name}-set-guest-password diff --git a/packaging/suse15/cloud.spec b/packaging/suse15/cloud.spec index 6ba8f9f50c5b..30300c6be471 100644 --- a/packaging/suse15/cloud.spec +++ b/packaging/suse15/cloud.spec @@ -293,6 +293,11 @@ install -D server/target/conf/cloudstack-sudoers ${RPM_BUILD_ROOT}%{_sysconfdir} touch ${RPM_BUILD_ROOT}%{_localstatedir}/run/%{name}-management.pid #install -D server/target/conf/cloudstack-catalina.logrotate ${RPM_BUILD_ROOT}%{_sysconfdir}/logrotate.d/%{name}-catalina +# SystemVM template +mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +cp -r engine/schema/dist/systemvm-templates/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm +rm -rf ${RPM_BUILD_ROOT}%{_datadir}/%{name}-management/templates/systemvm/md5sum.txt + # UI mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/ui mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-ui/ @@ -533,6 +538,7 @@ pip install --upgrade /usr/share/cloudstack-marvin/Marvin-*.tar.gz %{_datadir}/%{name}-management/conf %{_datadir}/%{name}-management/lib/*.jar %{_datadir}/%{name}-management/logs +%{_datadir}/%{name}-management/templates %attr(0755,root,root) %{_bindir}/%{name}-setup-databases %attr(0755,root,root) %{_bindir}/%{name}-migrate-databases %attr(0755,root,root) %{_bindir}/%{name}-set-guest-password diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java index dbb9571cea31..f151255d5cdc 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtStartCommandWrapper.java @@ -40,6 +40,7 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; +import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine; @ResourceWrapper(handles = StartCommand.class) @@ -88,14 +89,7 @@ public Answer execute(final StartCommand command, final LibvirtComputingResource libvirtComputingResource.applyDefaultNetworkRules(conn, vmSpec, false); // pass cmdline info to system vms - if (vmSpec.getType() != VirtualMachine.Type.User) { - String controlIp = null; - for (final NicTO nic : vmSpec.getNics()) { - if (nic.getType() == TrafficType.Control) { - controlIp = nic.getIp(); - break; - } - } + if (vmSpec.getType() != VirtualMachine.Type.User || (vmSpec.getBootArgs() != null && vmSpec.getBootArgs().contains(UserVmManager.CKS_NODE))) { // try to patch and SSH into the systemvm for up to 5 minutes for (int count = 0; count < 10; count++) { // wait and try passCmdLine for 30 seconds at most for CLOUDSTACK-2823 @@ -104,12 +98,22 @@ public Answer execute(final StartCommand command, final LibvirtComputingResource } } - final VirtualRoutingResource virtRouterResource = libvirtComputingResource.getVirtRouterResource(); - // check if the router is up? - for (int count = 0; count < 60; count++) { - final boolean result = virtRouterResource.connect(controlIp, 1, 5000); - if (result) { - break; + if (vmSpec.getType() != VirtualMachine.Type.User) { + String controlIp = null; + for (final NicTO nic : vmSpec.getNics()) { + if (nic.getType() == TrafficType.Control) { + controlIp = nic.getIp(); + break; + } + } + + final VirtualRoutingResource virtRouterResource = libvirtComputingResource.getVirtRouterResource(); + // check if the router is up? + for (int count = 0; count < 60; count++) { + final boolean result = virtRouterResource.connect(controlIp, 1, 5000); + if (result) { + break; + } } } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 0e93398b552e..4729d62c6e41 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -48,6 +48,7 @@ import javax.naming.ConfigurationException; import javax.xml.datatype.XMLGregorianCalendar; +import com.cloud.utils.script.Script; import com.cloud.hypervisor.vmware.mo.NetworkMO; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.storage.command.CopyCommand; @@ -275,7 +276,6 @@ import com.cloud.utils.mgmt.PropertyMapDynamicBean; import com.cloud.utils.net.NetUtils; import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion; -import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.PowerState; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java index 0e1f91c9c8a8..3be959bc9fef 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java @@ -37,6 +37,7 @@ enum Event { StopRequested, DestroyRequested, RecoveryRequested, + AutoscaleRequested, ScaleUpRequested, ScaleDownRequested, UpgradeRequested, @@ -81,6 +82,7 @@ enum State { s_fsm.addTransition(State.Running, Event.FaultsDetected, State.Alert); + s_fsm.addTransition(State.Running, Event.AutoscaleRequested, State.Scaling); s_fsm.addTransition(State.Running, Event.ScaleUpRequested, State.Scaling); s_fsm.addTransition(State.Running, Event.ScaleDownRequested, State.Scaling); s_fsm.addTransition(State.Scaling, Event.OperationSucceeded, State.Running); @@ -131,4 +133,7 @@ enum State { @Override State getState(); Date getCreated(); + boolean getAutoscalingEnabled(); + Long getMinSize(); + Long getMaxSize(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index 01ac63ff2036..aec96fe67785 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -21,18 +21,19 @@ import java.net.URL; import java.security.SecureRandom; import java.util.ArrayList; +import java.util.Arrays; import java.util.Date; import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.UUID; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -273,60 +274,6 @@ private void logAndThrow(final Level logLevel, final String message, final Excep logTransitStateAndThrow(logLevel, message, null, null, ex); } - private boolean isKubernetesServiceTemplateConfigured(DataCenter zone) { - // Check Kubernetes VM template for zone - boolean isHyperVAvailable = false; - boolean isKVMAvailable = false; - boolean isVMwareAvailable = false; - boolean isXenserverAvailable = false; - List clusters = clusterDao.listByZoneId(zone.getId()); - for (ClusterVO clusterVO : clusters) { - if (Hypervisor.HypervisorType.Hyperv.equals(clusterVO.getHypervisorType())) { - isHyperVAvailable = true; - } - if (Hypervisor.HypervisorType.KVM.equals(clusterVO.getHypervisorType())) { - isKVMAvailable = true; - } - if (Hypervisor.HypervisorType.VMware.equals(clusterVO.getHypervisorType())) { - isVMwareAvailable = true; - } - if (Hypervisor.HypervisorType.XenServer.equals(clusterVO.getHypervisorType())) { - isXenserverAvailable = true; - } - } - List> templatePairs = new ArrayList<>(); - if (isHyperVAvailable) { - templatePairs.add(new Pair<>(KubernetesClusterHyperVTemplateName.key(), KubernetesClusterHyperVTemplateName.value())); - } - if (isKVMAvailable) { - templatePairs.add(new Pair<>(KubernetesClusterKVMTemplateName.key(), KubernetesClusterKVMTemplateName.value())); - } - if (isVMwareAvailable) { - templatePairs.add(new Pair<>(KubernetesClusterVMwareTemplateName.key(), KubernetesClusterVMwareTemplateName.value())); - } - if (isXenserverAvailable) { - templatePairs.add(new Pair<>(KubernetesClusterXenserverTemplateName.key(), KubernetesClusterXenserverTemplateName.value())); - } - for (Pair templatePair : templatePairs) { - String templateKey = templatePair.first(); - String templateName = templatePair.second(); - if (Strings.isNullOrEmpty(templateName)) { - LOGGER.warn(String.format("Global setting %s is empty. Template name need to be specified for Kubernetes service to function", templateKey)); - return false; - } - final VMTemplateVO template = templateDao.findValidByTemplateName(templateName); - if (template == null) { - LOGGER.warn(String.format("Unable to find the template %s to be used for provisioning Kubernetes cluster nodes", templateName)); - return false; - } - if (CollectionUtils.isEmpty(templateJoinDao.newTemplateView(template, zone.getId(), true))) { - LOGGER.warn(String.format("The template ID: %s, name: %s is not available for use in zone ID: %s provisioning Kubernetes cluster nodes", template.getUuid(), templateName, zone.getUuid())); - return false; - } - } - return true; - } - private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { // Check network offering String networkOfferingName = KubernetesClusterNetworkOffering.value(); @@ -374,9 +321,6 @@ private boolean isKubernetesServiceNetworkOfferingConfigured(DataCenter zone) { } private boolean isKubernetesServiceConfigured(DataCenter zone) { - if (!isKubernetesServiceTemplateConfigured(zone)) { - return false; - } if (!isKubernetesServiceNetworkOfferingConfigured(zone)) { return false; } @@ -396,23 +340,12 @@ private IpAddress getSourceNatIp(Network network) { return null; } - private VMTemplateVO getKubernetesServiceTemplate(Hypervisor.HypervisorType hypervisorType) { - String templateName = null; - switch (hypervisorType) { - case Hyperv: - templateName = KubernetesClusterHyperVTemplateName.value(); - break; - case KVM: - templateName = KubernetesClusterKVMTemplateName.value(); - break; - case VMware: - templateName = KubernetesClusterVMwareTemplateName.value(); - break; - case XenServer: - templateName = KubernetesClusterXenserverTemplateName.value(); - break; + public VMTemplateVO getKubernetesServiceTemplate(DataCenter dataCenter, Hypervisor.HypervisorType hypervisorType) { + VMTemplateVO template = templateDao.findSystemVMReadyTemplate(dataCenter.getId(), hypervisorType); + if (template == null) { + throw new CloudRuntimeException("Not able to find the System templates or not downloaded in zone " + dataCenter.getId()); } - return templateDao.findValidByTemplateName(templateName); + return template; } private boolean validateIsolatedNetwork(Network network, int clusterTotalNodeCount) { @@ -482,7 +415,7 @@ private boolean validateServiceOffering(final ServiceOffering serviceOffering, f throw new InvalidParameterValueException(String.format("Custom service offerings are not supported for creating clusters, service offering ID: %s", serviceOffering.getUuid())); } if (serviceOffering.getCpu() < MIN_KUBERNETES_CLUSTER_NODE_CPU || serviceOffering.getRamSize() < MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes cluster template(CoreOS) needs minimum %d vCPUs and %d MB RAM", serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE)); + throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes cluster template needs minimum %d vCPUs and %d MB RAM", serviceOffering.getUuid(), MIN_KUBERNETES_CLUSTER_NODE_CPU, MIN_KUBERNETES_CLUSTER_NODE_RAM_SIZE)); } if (serviceOffering.getCpu() < version.getMinimumCpu()) { throw new InvalidParameterValueException(String.format("Kubernetes cluster cannot be created with service offering ID: %s, Kubernetes version ID: %s needs minimum %d vCPUs", serviceOffering.getUuid(), version.getUuid(), version.getMinimumCpu())); @@ -634,6 +567,7 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes response.setIpAddressId(ipAddresses.get(0).getUuid()); } } + List vmResponses = new ArrayList(); List vmList = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); ResponseView respView = ResponseView.Restricted; @@ -655,6 +589,9 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes response.setHasAnnotation(annotationDao.hasAnnotations(kubernetesCluster.getUuid(), AnnotationService.EntityType.KUBERNETES_CLUSTER.name(), accountService.isRootAdmin(caller.getId()))); response.setVirtualMachines(vmResponses); + response.setAutoscalingEnabled(kubernetesCluster.getAutoscalingEnabled()); + response.setMinSize(kubernetesCluster.getMinSize()); + response.setMaxSize(kubernetesCluster.getMaxSize()); return response; } @@ -678,6 +615,7 @@ private void validateKubernetesClusterCreateParameters(final CreateKubernetesClu final String sshKeyPair = cmd.getSSHKeyPairName(); final Long controlNodeCount = cmd.getControlNodes(); final Long clusterSize = cmd.getClusterSize(); + final long totalNodeCount = controlNodeCount + clusterSize; final String dockerRegistryUserName = cmd.getDockerRegistryUserName(); final String dockerRegistryPassword = cmd.getDockerRegistryPassword(); final String dockerRegistryUrl = cmd.getDockerRegistryUrl(); @@ -689,14 +627,20 @@ private void validateKubernetesClusterCreateParameters(final CreateKubernetesClu throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name:" + name); } - if (controlNodeCount < 1 || controlNodeCount > 100) { + if (controlNodeCount < 1) { throw new InvalidParameterValueException("Invalid cluster control nodes count: " + controlNodeCount); } - if (clusterSize < 1 || clusterSize > 100) { + if (clusterSize < 1) { throw new InvalidParameterValueException("Invalid cluster size: " + clusterSize); } + int maxClusterSize = KubernetesMaxClusterSize.valueIn(owner.getId()); + if (totalNodeCount > maxClusterSize) { + throw new InvalidParameterValueException( + String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize)); + } + DataCenter zone = dataCenterDao.findById(zoneId); if (zone == null) { throw new InvalidParameterValueException("Unable to find zone by ID: " + zoneId); @@ -870,29 +814,89 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd final Long kubernetesClusterId = cmd.getId(); final Long serviceOfferingId = cmd.getServiceOfferingId(); final Long clusterSize = cmd.getClusterSize(); + final List nodeIds = cmd.getNodeIds(); + final Boolean isAutoscalingEnabled = cmd.isAutoscalingEnabled(); + final Long minSize = cmd.getMinSize(); + final Long maxSize = cmd.getMaxSize(); + if (kubernetesClusterId == null || kubernetesClusterId < 1L) { throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); } + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(kubernetesClusterId); if (kubernetesCluster == null || kubernetesCluster.getRemoved() != null) { throw new InvalidParameterValueException("Invalid Kubernetes cluster ID"); } + final DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); if (zone == null) { logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); } + if (serviceOfferingId == null && clusterSize == null && nodeIds == null && isAutoscalingEnabled == null) { + throw new InvalidParameterValueException(String.format("Kubernetes cluster %s cannot be scaled, either service offering or cluster size or nodeids to be removed or autoscaling must be passed", kubernetesCluster.getName())); + } + Account caller = CallContext.current().getCallingAccount(); accountManager.checkAccess(caller, SecurityChecker.AccessType.OperateEntry, false, kubernetesCluster); - if (serviceOfferingId == null && clusterSize == null) { - throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled, either a new service offering or a new cluster size must be passed", kubernetesCluster.getName())); - } - final KubernetesSupportedVersion clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); if (clusterVersion == null) { throw new CloudRuntimeException(String.format("Invalid Kubernetes version associated with Kubernetes cluster : %s", kubernetesCluster.getName())); } + List validClusterStates = Arrays.asList(KubernetesCluster.State.Created, KubernetesCluster.State.Running, KubernetesCluster.State.Stopped); + if (!(validClusterStates.contains(kubernetesCluster.getState()))) { + throw new PermissionDeniedException(String.format("Kubernetes cluster %s is in %s state and can not be scaled", kubernetesCluster.getName(), kubernetesCluster.getState().toString())); + } + + int maxClusterSize = KubernetesMaxClusterSize.valueIn(kubernetesCluster.getAccountId()); + if (isAutoscalingEnabled != null && isAutoscalingEnabled) { + if (clusterSize != null || serviceOfferingId != null || nodeIds != null) { + throw new InvalidParameterValueException("Autoscaling can not be passed along with nodeids or clustersize or service offering"); + } + + if (!KubernetesVersionManagerImpl.versionSupportsAutoscaling(clusterVersion)) { + throw new InvalidParameterValueException(String.format("Autoscaling requires Kubernetes Version %s or above", + KubernetesVersionManagerImpl.MINIMUN_AUTOSCALER_SUPPORTED_VERSION )); + } + + validateEndpointUrl(); + + if (minSize == null || maxSize == null) { + throw new InvalidParameterValueException("Autoscaling requires minsize and maxsize to be passed"); + } + if (minSize < 1) { + throw new InvalidParameterValueException("Minsize must be at least than 1"); + } + if (maxSize <= minSize) { + throw new InvalidParameterValueException("Maxsize must be greater than minsize"); + } + if (maxSize + kubernetesCluster.getControlNodeCount() > maxClusterSize) { + throw new InvalidParameterValueException( + String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize)); + } + } + + if (nodeIds != null) { + if (clusterSize != null || serviceOfferingId != null) { + throw new InvalidParameterValueException("nodeids can not be passed along with clustersize or service offering"); + } + List nodes = kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds); + // Do all the nodes exist ? + if (nodes == null || nodes.size() != nodeIds.size()) { + throw new InvalidParameterValueException("Invalid node ids"); + } + // Ensure there's always a control node + long controleNodesToRemove = nodes.stream().filter(x -> x.isControlNode()).count(); + if (controleNodesToRemove >= kubernetesCluster.getControlNodeCount()) { + throw new InvalidParameterValueException("Can not remove all control nodes from a cluster"); + } + // Ensure there's always a node + long nodesToRemove = nodes.stream().filter(x -> !x.isControlNode()).count(); + if (nodesToRemove >= kubernetesCluster.getNodeCount()) { + throw new InvalidParameterValueException("Can not remove all nodes from a cluster"); + } + } ServiceOffering serviceOffering = null; if (serviceOfferingId != null) { @@ -924,12 +928,6 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd } } - if (!(kubernetesCluster.getState().equals(KubernetesCluster.State.Created) || - kubernetesCluster.getState().equals(KubernetesCluster.State.Running) || - kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped))) { - throw new PermissionDeniedException(String.format("Kubernetes cluster : %s is in %s state", kubernetesCluster.getName(), kubernetesCluster.getState().toString())); - } - if (clusterSize != null) { if (kubernetesCluster.getState().equals(KubernetesCluster.State.Stopped)) { // Cannot scale stopped cluster currently for cluster size throw new PermissionDeniedException(String.format("Kubernetes cluster : %s is in %s state", kubernetesCluster.getName(), kubernetesCluster.getState().toString())); @@ -937,6 +935,10 @@ private void validateKubernetesClusterScaleParameters(ScaleKubernetesClusterCmd if (clusterSize < 1) { throw new InvalidParameterValueException(String.format("Kubernetes cluster : %s cannot be scaled for size, %d", kubernetesCluster.getName(), clusterSize)); } + if (clusterSize + kubernetesCluster.getControlNodeCount() > maxClusterSize) { + throw new InvalidParameterValueException( + String.format("Maximum cluster size can not exceed %d. Please contact your administrator", maxClusterSize)); + } if (clusterSize > kubernetesCluster.getNodeCount()) { // Upscale VMTemplateVO template = templateDao.findById(kubernetesCluster.getTemplateId()); if (template == null) { @@ -982,8 +984,8 @@ private void validateKubernetesClusterUpgradeParameters(UpgradeKubernetesCluster } KubernetesSupportedVersionVO clusterVersion = kubernetesSupportedVersionDao.findById(kubernetesCluster.getKubernetesVersionId()); if (clusterVersion == null || clusterVersion.getRemoved() != null) { - throw new InvalidParameterValueException(String.format("Invalid Kubernetes version associated with cluster ID: %s", - kubernetesCluster.getUuid())); + throw new InvalidParameterValueException(String.format("Invalid Kubernetes version associated with cluster : %s", + kubernetesCluster.getName())); } final ServiceOffering serviceOffering = serviceOfferingDao.findByIdIncludingRemoved(kubernetesCluster.getServiceOfferingId()); if (serviceOffering == null) { @@ -1050,7 +1052,7 @@ public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) } final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId()); - final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(deployDestination.getCluster().getHypervisorType()); + final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(zone, deployDestination.getCluster().getHypervisorType()); final long cores = serviceOffering.getCpu() * (controlNodeCount + clusterSize); final long memory = serviceOffering.getRamSize() * (controlNodeCount + clusterSize); @@ -1115,7 +1117,7 @@ public boolean startKubernetesCluster(long kubernetesClusterId, boolean onCreate logAndThrow(Level.WARN, String.format("Unable to find zone for Kubernetes cluster : %s", kubernetesCluster.getName())); } KubernetesClusterStartWorker startWorker = - new KubernetesClusterStartWorker(kubernetesCluster, this); + new KubernetesClusterStartWorker(kubernetesCluster, this); startWorker = ComponentContext.inject(startWorker); if (onCreate) { // Start for Kubernetes cluster in 'Created' state @@ -1279,9 +1281,20 @@ public boolean scaleKubernetesCluster(ScaleKubernetesClusterCmd cmd) throws Clou logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } validateKubernetesClusterScaleParameters(cmd); + + KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId()); + Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); + String[] keys = getServiceUserKeys(owner); KubernetesClusterScaleWorker scaleWorker = - new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), - serviceOfferingDao.findById(cmd.getServiceOfferingId()), cmd.getClusterSize(), this); + new KubernetesClusterScaleWorker(kubernetesClusterDao.findById(cmd.getId()), + serviceOfferingDao.findById(cmd.getServiceOfferingId()), + cmd.getClusterSize(), + cmd.getNodeIds(), + cmd.isAutoscalingEnabled(), + cmd.getMinSize(), + cmd.getMaxSize(), + this); + scaleWorker.setKeys(keys); scaleWorker = ComponentContext.inject(scaleWorker); return scaleWorker.scaleCluster(); } @@ -1291,13 +1304,14 @@ public boolean upgradeKubernetesCluster(UpgradeKubernetesClusterCmd cmd) throws if (!KubernetesServiceEnabled.value()) { logAndThrow(Level.ERROR, "Kubernetes Service plugin is disabled"); } + validateKubernetesClusterUpgradeParameters(cmd); KubernetesClusterVO kubernetesCluster = kubernetesClusterDao.findById(cmd.getId()); Account owner = accountService.getActiveAccountById(kubernetesCluster.getAccountId()); String[] keys = getServiceUserKeys(owner); KubernetesClusterUpgradeWorker upgradeWorker = - new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()), - kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this, keys); + new KubernetesClusterUpgradeWorker(kubernetesClusterDao.findById(cmd.getId()), + kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()), this, keys); upgradeWorker = ComponentContext.inject(upgradeWorker); return upgradeWorker.upgradeCluster(); } @@ -1501,8 +1515,8 @@ boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualM // check cluster is running at desired capacity include control nodes as well if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) { if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster ID: %s while expected %d VMs to be in state: %s", - clusterVMs.size(), kubernetesCluster.getUuid(), kubernetesCluster.getTotalNodeCount(), state.toString())); + LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster %s while expected %d VMs to be in state: %s", + clusterVMs.size(), kubernetesCluster.getName(), kubernetesCluster.getTotalNodeCount(), state.toString())); } return false; } @@ -1578,16 +1592,13 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] { - KubernetesServiceEnabled, - KubernetesClusterHyperVTemplateName, - KubernetesClusterKVMTemplateName, - KubernetesClusterVMwareTemplateName, - KubernetesClusterXenserverTemplateName, - KubernetesClusterNetworkOffering, - KubernetesClusterStartTimeout, - KubernetesClusterScaleTimeout, - KubernetesClusterUpgradeTimeout, - KubernetesClusterExperimentalFeaturesEnabled + KubernetesServiceEnabled, + KubernetesClusterNetworkOffering, + KubernetesClusterStartTimeout, + KubernetesClusterScaleTimeout, + KubernetesClusterUpgradeTimeout, + KubernetesClusterExperimentalFeaturesEnabled, + KubernetesMaxClusterSize }; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java index 07939ddb101a..138889a2fb37 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java @@ -41,26 +41,6 @@ public interface KubernetesClusterService extends PluggableService, Configurable "false", "Indicates whether Kubernetes Service plugin is enabled or not. Management server restart needed on change", false); - static final ConfigKey KubernetesClusterHyperVTemplateName = new ConfigKey("Advanced", String.class, - "cloud.kubernetes.cluster.template.name.hyperv", - "Kubernetes-Service-Template-HyperV", - "Name of the template to be used for creating Kubernetes cluster nodes on HyperV", - true); - static final ConfigKey KubernetesClusterKVMTemplateName = new ConfigKey("Advanced", String.class, - "cloud.kubernetes.cluster.template.name.kvm", - "Kubernetes-Service-Template-KVM", - "Name of the template to be used for creating Kubernetes cluster nodes on KVM", - true); - static final ConfigKey KubernetesClusterVMwareTemplateName = new ConfigKey("Advanced", String.class, - "cloud.kubernetes.cluster.template.name.vmware", - "Kubernetes-Service-Template-VMware", - "Name of the template to be used for creating Kubernetes cluster nodes on VMware", - true); - static final ConfigKey KubernetesClusterXenserverTemplateName = new ConfigKey("Advanced", String.class, - "cloud.kubernetes.cluster.template.name.xenserver", - "Kubernetes-Service-Template-Xenserver", - "Name of the template to be used for creating Kubernetes cluster nodes on Xenserver", - true); static final ConfigKey KubernetesClusterNetworkOffering = new ConfigKey("Advanced", String.class, "cloud.kubernetes.cluster.network.offering", "DefaultNetworkOfferingforKubernetesService", @@ -86,6 +66,12 @@ public interface KubernetesClusterService extends PluggableService, Configurable "false", "Indicates whether experimental feature for Kubernetes cluster such as Docker private registry are enabled or not", true); + static final ConfigKey KubernetesMaxClusterSize = new ConfigKey("Advanced", Integer.class, + "cloud.kubernetes.cluster.max.size", + "10", + "Maximum size of the kubernetes cluster.", + true, ConfigKey.Scope.Account); + KubernetesCluster findById(final Long id); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index b6a37d9607c8..90b368de1192 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -93,6 +93,15 @@ public class KubernetesClusterVO implements KubernetesCluster { @Column(name = "endpoint") private String endpoint; + @Column(name = "autoscaling_enabled") + private boolean autoscalingEnabled; + + @Column(name = "minsize") + private Long minSize; + + @Column(name = "maxsize") + private Long maxSize; + @Column(name = GenericDao.CREATED_COLUMN) private Date created; @@ -303,6 +312,33 @@ public Date getCreated() { return created; } + @Override + public boolean getAutoscalingEnabled() { + return autoscalingEnabled; + } + + public void setAutoscalingEnabled(boolean enabled) { + this.autoscalingEnabled = enabled; + } + + @Override + public Long getMinSize() { + return minSize; + } + + public void setMinSize(Long minSize) { + this.minSize = minSize; + } + + @Override + public Long getMaxSize() { + return maxSize; + } + + public void setMaxSize(Long maxSize) { + this.maxSize = maxSize; + } + public KubernetesClusterVO() { this.uuid = UUID.randomUUID().toString(); } @@ -333,6 +369,16 @@ public KubernetesClusterVO(String name, String description, long zoneId, long ku this.checkForGc = false; } + public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId, + long networkId, long domainId, long accountId, long controlNodeCount, long nodeCount, State state, String keyPair, long cores, + long memory, Long nodeRootDiskSize, String endpoint, boolean autoscalingEnabled, Long minSize, Long maxSize) { + this(name, description, zoneId, kubernetesVersionId, serviceOfferingId, templateId, networkId, domainId, accountId, controlNodeCount, + nodeCount, state, keyPair, cores, memory, nodeRootDiskSize, endpoint); + this.autoscalingEnabled = autoscalingEnabled; + this.minSize = minSize; + this.maxSize = maxSize; + } + @Override public Class getEntityType() { return KubernetesCluster.class; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java index c7399202348f..9a35fccdf8d9 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMap.java @@ -27,4 +27,5 @@ public interface KubernetesClusterVmMap { long getId(); long getClusterId(); long getVmId(); + boolean isControlNode(); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java index edb06e79534a..f6126f01be5b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVmMapVO.java @@ -28,6 +28,30 @@ @Table(name = "kubernetes_cluster_vm_map") public class KubernetesClusterVmMapVO implements KubernetesClusterVmMap { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + long id; + + @Column(name = "cluster_id") + long clusterId; + + @Column(name = "vm_id") + long vmId; + + @Column(name = "control_node") + boolean controlNode; + + public KubernetesClusterVmMapVO() { + } + + public KubernetesClusterVmMapVO(long clusterId, long vmId, boolean controlNode) { + this.vmId = vmId; + this.clusterId = clusterId; + this.controlNode = controlNode; + } + + @Override public long getId() { return id; @@ -36,11 +60,9 @@ public long getId() { @Override public long getClusterId() { return clusterId; - } public void setClusterId(long clusterId) { - this.clusterId = clusterId; } @@ -50,27 +72,15 @@ public long getVmId() { } public void setVmId(long vmId) { - this.vmId = vmId; } - @Id - @GeneratedValue(strategy = GenerationType.IDENTITY) - @Column(name = "id") - long id; - - @Column(name = "cluster_id") - long clusterId; - - @Column(name = "vm_id") - long vmId; - - public KubernetesClusterVmMapVO() { - + @Override + public boolean isControlNode() { + return controlNode; } - public KubernetesClusterVmMapVO(long clusterId, long vmId) { - this.vmId = vmId; - this.clusterId = clusterId; + public void setControlNode(boolean controlNode) { + this.controlNode = controlNode; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 5426e9cd242e..62d45a3e028d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -38,8 +38,10 @@ import org.apache.log4j.Level; import org.apache.log4j.Logger; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VlanDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.kubernetes.cluster.KubernetesCluster; import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; @@ -59,6 +61,7 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.LaunchPermissionDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.template.TemplateApiService; import com.cloud.template.VirtualMachineTemplate; @@ -76,6 +79,7 @@ import com.cloud.utils.fsm.StateMachine2; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmService; +import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.dao.UserVmDao; import com.google.common.base.Strings; @@ -119,6 +123,10 @@ public class KubernetesClusterActionWorker { protected UserVmService userVmService; @Inject protected VlanDao vlanDao; + @Inject + protected VirtualMachineManager itMgr; + @Inject + protected LaunchPermissionDao launchPermissionDao; protected KubernetesClusterDao kubernetesClusterDao; protected KubernetesClusterVmMapDao kubernetesClusterVmMapDao; @@ -135,9 +143,11 @@ public class KubernetesClusterActionWorker { protected final String deploySecretsScriptFilename = "deploy-cloudstack-secret"; protected final String deployProviderScriptFilename = "deploy-provider"; + protected final String autoscaleScriptFilename = "autoscale-kube-cluster"; protected final String scriptPath = "/opt/bin/"; protected File deploySecretsScriptFile; protected File deployProviderScriptFile; + protected File autoscaleScriptFile; protected KubernetesClusterManagerImpl manager; protected String[] keys; @@ -152,7 +162,12 @@ protected KubernetesClusterActionWorker(final KubernetesCluster kubernetesCluste protected void init() { this.owner = accountDao.findById(kubernetesCluster.getAccountId()); - this.clusterTemplate = templateDao.findById(kubernetesCluster.getTemplateId()); + long zoneId = this.kubernetesCluster.getZoneId(); + long templateId = this.kubernetesCluster.getTemplateId(); + DataCenterVO dataCenterVO = dataCenterDao.findById(zoneId); + VMTemplateVO template = templateDao.findById(templateId); + Hypervisor.HypervisorType type = template.getHypervisorType(); + this.clusterTemplate = manager.getKubernetesServiceTemplate(dataCenterVO, type); this.sshKeyFile = getManagementServerSshPublicKeyFile(); } @@ -193,7 +208,7 @@ protected void logMessage(final Level logLevel, final String message, final Exce } protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final String message, final KubernetesCluster kubernetesCluster, - final List clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { + final List clusterVMs, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { logMessage(logLevel, message, e); stateTransitTo(kubernetesCluster.getId(), event); detachIsoKubernetesVMs(clusterVMs); @@ -203,11 +218,19 @@ protected void logTransitStateDetachIsoAndThrow(final Level logLevel, final Stri throw new CloudRuntimeException(message, e); } + protected void deleteTemplateLaunchPermission() { + if (clusterTemplate != null && owner != null) { + LOGGER.info("Revoking launch permission for systemVM template"); + launchPermissionDao.removePermissions(clusterTemplate.getId(), Collections.singletonList(owner.getId())); + } + } + protected void logTransitStateAndThrow(final Level logLevel, final String message, final Long kubernetesClusterId, final KubernetesCluster.Event event, final Exception e) throws CloudRuntimeException { logMessage(logLevel, message, e); if (kubernetesClusterId != null && event != null) { stateTransitTo(kubernetesClusterId, event); } + deleteTemplateLaunchPermission(); if (e == null) { throw new CloudRuntimeException(message); } @@ -235,11 +258,11 @@ protected File getManagementServerSshPublicKeyFile() { return new File(keyFile); } - protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId) { + protected KubernetesClusterVmMapVO addKubernetesClusterVm(final long kubernetesClusterId, final long vmId, boolean isControlNode) { return Transaction.execute(new TransactionCallback() { @Override public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) { - KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId); + KubernetesClusterVmMapVO newClusterVmMap = new KubernetesClusterVmMapVO(kubernetesClusterId, vmId, isControlNode); kubernetesClusterVmMapDao.persist(newClusterVmMap); return newClusterVmMap; } @@ -332,6 +355,7 @@ protected void attachIsoKubernetesVMs(List clusterVMs, final KubernetesS if (!iso.getState().equals(VirtualMachineTemplate.State.Active)) { logTransitStateAndThrow(Level.ERROR, String.format("Unable to attach ISO to Kubernetes cluster : %s. Binaries ISO not active.", kubernetesCluster.getName()), kubernetesCluster.getId(), failedEvent); } + for (UserVm vm : clusterVMs) { try { templateService.attachIso(iso.getId(), vm.getId(), true); @@ -368,12 +392,13 @@ protected void detachIsoKubernetesVMs(List clusterVMs) { protected List getKubernetesClusterVMMaps() { List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); - if (!CollectionUtils.isEmpty(clusterVMs)) { - clusterVMs.sort((t1, t2) -> (int)((t1.getId() - t2.getId())/Math.abs(t1.getId() - t2.getId()))); - } return clusterVMs; } + protected List getKubernetesClusterVMMapsForNodes(List nodeIds) { + return kubernetesClusterVmMapDao.listByClusterIdAndVmIdsIn(kubernetesCluster.getId(), nodeIds); + } + protected List getKubernetesClusterVMs() { List vmList = new ArrayList<>(); List clusterVMs = getKubernetesClusterVMMaps(); @@ -433,18 +458,20 @@ protected File retrieveScriptFile(String filename) { protected void retrieveScriptFiles() { deploySecretsScriptFile = retrieveScriptFile(deploySecretsScriptFilename); deployProviderScriptFile = retrieveScriptFile(deployProviderScriptFilename); + autoscaleScriptFile = retrieveScriptFile(autoscaleScriptFilename); } protected void copyScripts(String nodeAddress, final int sshPort) { + copyScriptFile(nodeAddress, sshPort, deploySecretsScriptFile, deploySecretsScriptFilename); + copyScriptFile(nodeAddress, sshPort, deployProviderScriptFile, deployProviderScriptFilename); + copyScriptFile(nodeAddress, sshPort, autoscaleScriptFile, autoscaleScriptFilename); + } + + protected void copyScriptFile(String nodeAddress, final int sshPort, File file, String desitnation) { try { SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, - "~/", deploySecretsScriptFile.getAbsolutePath(), "0755"); - SshHelper.scpTo(nodeAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, - "~/", deployProviderScriptFile.getAbsolutePath(), "0755"); - String cmdStr = String.format("sudo mv ~/%s %s/%s", deploySecretsScriptFile.getName(), scriptPath, deploySecretsScriptFilename); - SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, - cmdStr, 10000, 10000, 10 * 60 * 1000); - cmdStr = String.format("sudo mv ~/%s %s/%s", deployProviderScriptFile.getName(), scriptPath, deployProviderScriptFilename); + "~/", file.getAbsolutePath(), "0755"); + String cmdStr = String.format("sudo mv ~/%s %s/%s", file.getName(), scriptPath, desitnation); SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, cmdStr, 10000, 10000, 10 * 60 * 1000); } catch (Exception e) { @@ -452,6 +479,33 @@ protected void copyScripts(String nodeAddress, final int sshPort) { } } + protected boolean taintControlNodes() { + StringBuilder commands = new StringBuilder(); + List vmMapVOList = getKubernetesClusterVMMaps(); + for(KubernetesClusterVmMapVO vmMap :vmMapVOList) { + if(!vmMap.isControlNode()) { + continue; + } + String name = userVmDao.findById(vmMap.getVmId()).getDisplayName().toLowerCase(); + String command = String.format("sudo /opt/bin/kubectl annotate node %s cluster-autoscaler.kubernetes.io/scale-down-disabled=true ; ", name); + commands.append(command); + } + try { + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, commands.toString(), 10000, 10000, 60000); + return result.first(); + } catch (Exception e) { + String msg = String.format("Failed to taint control nodes on : %s : %s", kubernetesCluster.getName(), e.getMessage()); + logMessage(Level.ERROR, msg, e); + return false; + } + } + protected boolean deployProvider() { Network network = networkDao.findById(kubernetesCluster.getNetworkId()); // Since the provider creates IP addresses, don't deploy it unless the underlying network supports it diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java index e60807ec56d7..595aab939994 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterResourceModifierActionWorker.java @@ -17,6 +17,7 @@ package com.cloud.kubernetes.cluster.actionworkers; +import java.io.File; import java.io.IOException; import java.lang.reflect.Field; import java.util.ArrayList; @@ -47,7 +48,7 @@ import com.cloud.exception.InsufficientServerCapacityException; import com.cloud.exception.ManagementServerException; import com.cloud.exception.NetworkRuleConflictException; -import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -56,6 +57,7 @@ import com.cloud.kubernetes.cluster.KubernetesCluster; import com.cloud.kubernetes.cluster.KubernetesClusterDetailsVO; import com.cloud.kubernetes.cluster.KubernetesClusterManagerImpl; +import com.cloud.kubernetes.cluster.KubernetesClusterVO; import com.cloud.kubernetes.cluster.utils.KubernetesClusterUtil; import com.cloud.network.IpAddress; import com.cloud.network.Network; @@ -72,8 +74,9 @@ import com.cloud.offering.ServiceOffering; import com.cloud.resource.ResourceManager; import com.cloud.storage.Volume; -import com.cloud.storage.VolumeApiService; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.LaunchPermissionDao; +import com.cloud.storage.VolumeApiService; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.SSHKeyPairVO; @@ -82,11 +85,13 @@ import com.cloud.utils.StringUtils; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionCallbackWithException; import com.cloud.utils.db.TransactionStatus; -import com.cloud.utils.exception.ExecutionException; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; +import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.Nic; import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine; @@ -124,6 +129,8 @@ public class KubernetesClusterResourceModifierActionWorker extends KubernetesClu @Inject protected UserVmManager userVmManager; @Inject + protected LaunchPermissionDao launchPermissionDao; + @Inject protected VolumeApiService volumeService; @Inject protected VolumeDao volumeDao; @@ -150,7 +157,7 @@ private String getKubernetesNodeConfig(final String joinIp, final boolean ejectI if (!Strings.isNullOrEmpty(sshKeyPair)) { SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); if (sshkp != null) { - pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } k8sNodeConfig = k8sNodeConfig.replace(sshPubKey, pubKey); @@ -181,7 +188,7 @@ private String getKubernetesNodeConfig(final String joinIp, final boolean ejectI if (!Strings.isNullOrEmpty(dockerUserName) && !Strings.isNullOrEmpty(dockerPassword)) { // do write file for /.docker/config.json through the code instead of k8s-node.yml as we can no make a section // optional or conditionally applied - String dockerConfigString = "write-files:\n" + + String dockerConfigString = "write_files:\n" + " - path: /.docker/config.json\n" + " owner: core:core\n" + " permissions: '0644'\n" + @@ -194,7 +201,7 @@ private String getKubernetesNodeConfig(final String joinIp, final boolean ejectI " }\n" + " }\n" + " }"; - k8sNodeConfig = k8sNodeConfig.replace("write-files:", dockerConfigString); + k8sNodeConfig = k8sNodeConfig.replace("write_files:", dockerConfigString); final String dockerUrlKey = "{{docker.url}}"; final String dockerAuthKey = "{{docker.secret}}"; final String dockerEmailKey = "{{docker.email}}"; @@ -307,12 +314,11 @@ protected void startKubernetesVM(final UserVm vm) throws ManagementServerExcepti Field f = startVm.getClass().getDeclaredField("id"); f.setAccessible(true); f.set(startVm, vm.getId()); - userVmService.startVirtualMachine(startVm); + itMgr.advanceStart(vm.getUuid(), null, null); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Started VM : %s in the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); } - } catch (IllegalAccessException | NoSuchFieldException | ExecutionException | - ResourceUnavailableException | ResourceAllocationException | InsufficientCapacityException ex) { + } catch (IllegalAccessException | NoSuchFieldException | OperationTimedoutException | ResourceUnavailableException | InsufficientCapacityException ex) { throw new ManagementServerException(String.format("Failed to start VM in the Kubernetes cluster : %s", kubernetesCluster.getName()), ex); } @@ -326,8 +332,8 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f ResourceUnavailableException, InsufficientCapacityException { List nodes = new ArrayList<>(); for (int i = offset + 1; i <= nodeCount; i++) { - UserVm vm = createKubernetesNode(publicIpAddress, i); - addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); + UserVm vm = createKubernetesNode(publicIpAddress); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), false); if (kubernetesCluster.getNodeRootDiskSize() > 0) { resizeNodeVolume(vm); } @@ -349,7 +355,7 @@ protected List provisionKubernetesClusterNodeVms(final long nodeCount, f return provisionKubernetesClusterNodeVms(nodeCount, 0, publicIpAddress); } - protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws ManagementServerException, + protected UserVm createKubernetesNode(String joinIp) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { UserVm nodeVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); @@ -363,7 +369,8 @@ protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws Ma if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, nodeInstance)); + String suffix = Long.toHexString(System.currentTimeMillis()); + String hostName = String.format("%s-node-%s", kubernetesClusterNodeNamePrefix, suffix); String k8sNodeConfig = null; try { k8sNodeConfig = getKubernetesNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); @@ -374,7 +381,7 @@ protected UserVm createKubernetesNode(String joinIp, int nodeInstance) throws Ma nodeVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - null, addrs, null, null, null, customParameterMap, null, null, null, null, true); + null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created node VM : %s, %s in the Kubernetes cluster : %s", hostName, nodeVm.getUuid(), kubernetesCluster.getName())); } @@ -453,7 +460,6 @@ protected void provisionSshPortForwardingRules(IpAddress publicIp, Network netwo final Ip vmIp = new Ip(vmNic.getIPv4Address()); final long vmIdFinal = vmId; final int srcPortFinal = firewallRuleSourcePortStart + i; - PortForwardingRuleVO pfRule = Transaction.execute(new TransactionCallbackWithException() { @Override public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { @@ -519,6 +525,17 @@ protected void removePortForwardingRules(final IpAddress publicIp, final Network } } + protected void removePortForwardingRules(final IpAddress publicIp, final Network network, final Account account, int startPort, int endPort) + throws ResourceUnavailableException { + List pfRules = portForwardingRulesDao.listByNetwork(network.getId()); + for (PortForwardingRuleVO pfRule : pfRules) { + if (startPort <= pfRule.getSourcePortStart() && pfRule.getSourcePortStart() <= endPort) { + portForwardingRulesDao.remove(pfRule.getId()); + } + } + rulesService.applyPortForwardingRules(publicIp.getId(), account); + } + protected void removeLoadBalancingRule(final IpAddress publicIp, final Network network, final Account account, final int port) throws ResourceUnavailableException { List rules = loadBalancerDao.listByIpAddress(publicIp.getId()); @@ -548,13 +565,96 @@ protected String getKubernetesClusterNodeNamePrefix() { return prefix; } - protected String getKubernetesClusterNodeAvailableName(final String hostName) { - String name = hostName; - int suffix = 1; - while (vmInstanceDao.findVMByHostName(name) != null) { - name = String.format("%s-%d", hostName, suffix); - suffix++; + protected KubernetesClusterVO updateKubernetesClusterEntry(final Long cores, final Long memory, + final Long size, final Long serviceOfferingId, final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) { + return Transaction.execute(new TransactionCallback() { + @Override + public KubernetesClusterVO doInTransaction(TransactionStatus status) { + KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId()); + if (cores != null) { + updatedCluster.setCores(cores); + } + if (memory != null) { + updatedCluster.setMemory(memory); + } + if (size != null) { + updatedCluster.setNodeCount(size); + } + if (serviceOfferingId != null) { + updatedCluster.setServiceOfferingId(serviceOfferingId); + } + if (autoscaleEnabled != null) { + updatedCluster.setAutoscalingEnabled(autoscaleEnabled.booleanValue()); + } + updatedCluster.setMinSize(minSize); + updatedCluster.setMaxSize(maxSize); + return kubernetesClusterDao.persist(updatedCluster); + } + }); + } + + private KubernetesClusterVO updateKubernetesClusterEntry(final Boolean autoscaleEnabled, final Long minSize, final Long maxSize) throws CloudRuntimeException { + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(null, null, null, null, autoscaleEnabled, minSize, maxSize); + if (kubernetesClusterVO == null) { + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster", + kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + return kubernetesClusterVO; + } + + protected boolean autoscaleCluster(boolean enable, Long minSize, Long maxSize) { + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.AutoscaleRequested); + } + + File pkFile = getManagementServerSshPublicKeyFile(); + Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); + publicIpAddress = publicIpSshPort.first(); + sshPort = publicIpSshPort.second(); + + try { + if (enable) { + String command = String.format("sudo /opt/bin/autoscale-kube-cluster -i %s -e -M %d -m %d", + kubernetesCluster.getUuid(), maxSize, minSize); + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, command, 10000, 10000, 60000); + + // Maybe the file isn't present. Try and copy it + if (!result.first()) { + logMessage(Level.INFO, "Autoscaling files missing. Adding them now", null); + retrieveScriptFiles(); + copyScripts(publicIpAddress, sshPort); + + if (!createCloudStackSecret(keys)) { + logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup keys for Kubernetes cluster %s", + kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + } + + // If at first you don't succeed ... + result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, command, 10000, 10000, 60000); + if (!result.first()) { + throw new CloudRuntimeException(result.second()); + } + } + updateKubernetesClusterEntry(true, minSize, maxSize); + } else { + Pair result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, + pkFile, null, String.format("sudo /opt/bin/autoscale-kube-cluster -d"), + 10000, 10000, 60000); + if (!result.first()) { + throw new CloudRuntimeException(result.second()); + } + updateKubernetesClusterEntry(false, null, null); + } + return true; + } catch (Exception e) { + String msg = String.format("Failed to autoscale Kubernetes cluster: %s : %s", kubernetesCluster.getName(), e.getMessage()); + logAndThrow(Level.ERROR, msg); + return false; + } finally { + // Deploying the autoscaler might fail but it can be deployed manually too, so no need to go to an alert state + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); } - return name; } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 3e32d8ebf4ce..0c2c17d3aa4c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -19,8 +19,10 @@ import java.io.File; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -46,10 +48,9 @@ import com.cloud.network.Network; import com.cloud.network.rules.FirewallRule; import com.cloud.offering.ServiceOffering; +import com.cloud.storage.LaunchPermissionVO; import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.ssh.SshHelper; import com.cloud.vm.UserVmVO; @@ -65,18 +66,35 @@ public class KubernetesClusterScaleWorker extends KubernetesClusterResourceModif private ServiceOffering serviceOffering; private Long clusterSize; + private List nodeIds; private KubernetesCluster.State originalState; private Network network; + private Long minSize; + private Long maxSize; + private Boolean isAutoscalingEnabled; private long scaleTimeoutTime; public KubernetesClusterScaleWorker(final KubernetesCluster kubernetesCluster, final ServiceOffering serviceOffering, final Long clusterSize, + final List nodeIds, + final Boolean isAutoscalingEnabled, + final Long minSize, + final Long maxSize, final KubernetesClusterManagerImpl clusterManager) { super(kubernetesCluster, clusterManager); this.serviceOffering = serviceOffering; - this.clusterSize = clusterSize; + this.nodeIds = nodeIds; + this.isAutoscalingEnabled = isAutoscalingEnabled; + this.minSize = minSize; + this.maxSize = maxSize; this.originalState = kubernetesCluster.getState(); + if (this.nodeIds != null) { + this.clusterSize = kubernetesCluster.getNodeCount() - this.nodeIds.size(); + } else { + this.clusterSize = clusterSize; + } + } protected void init() { @@ -100,13 +118,12 @@ private void logTransitStateToFailedIfNeededAndThrow(final Level logLevel, final /** * Scale network rules for an existing Kubernetes cluster while scaling it * Open up firewall for SSH access from port NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n. - * Also remove port forwarding rules for removed virtual machines and create port-forwarding rule + * Also remove port forwarding rules for all virtual machines and re-create port-forwarding rule * to forward public IP traffic to all node VMs' private IP. * @param clusterVMIds - * @param removedVMIds * @throws ManagementServerException */ - private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, final List removedVMIds) throws ManagementServerException { + private void scaleKubernetesClusterNetworkRules(final List clusterVMIds) throws ManagementServerException { if (!Network.GuestType.Isolated.equals(network.getGuestType())) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Network : %s for Kubernetes cluster : %s is not an isolated network, therefore, no need for network rules", network.getName(), kubernetesCluster.getName())); @@ -124,48 +141,31 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned"); } int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); - final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getControlNodeCount()); + int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; // Provision new SSH firewall rules try { - provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1); + provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (LOGGER.isDebugEnabled()) { - LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster ID: %s", - CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1, publicIp.getAddress().addr(), kubernetesCluster.getUuid())); + LOGGER.debug(String.format("Provisioned firewall rule to open up port %d to %d on %s in Kubernetes cluster %s", + CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to activate SSH firewall rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } try { - removePortForwardingRules(publicIp, network, owner, removedVMIds); + removePortForwardingRules(publicIp, network, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, existingFirewallRuleSourcePortEnd); } catch (ResourceUnavailableException e) { throw new ManagementServerException(String.format("Failed to remove SSH port forwarding rules for removed VMs for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } try { - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, existingFirewallRuleSourcePortEnd + 1); + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); } catch (ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } } - private KubernetesClusterVO updateKubernetesClusterEntry(final long cores, final long memory, - final Long size, final Long serviceOfferingId) { - return Transaction.execute((TransactionCallback) status -> { - KubernetesClusterVO updatedCluster = kubernetesClusterDao.createForUpdate(kubernetesCluster.getId()); - updatedCluster.setCores(cores); - updatedCluster.setMemory(memory); - if (size != null) { - updatedCluster.setNodeCount(size); - } - if (serviceOfferingId != null) { - updatedCluster.setServiceOfferingId(serviceOfferingId); - } - kubernetesClusterDao.persist(updatedCluster); - return updatedCluster; - }); - } - private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, final ServiceOffering newServiceOffering) throws CloudRuntimeException { final ServiceOffering serviceOffering = newServiceOffering == null ? serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering; @@ -173,10 +173,11 @@ private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, fin final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getControlNodeCount()); final long cores = serviceOffering.getCpu() * size; final long memory = serviceOffering.getRamSize() * size; - KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId); + KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId, + kubernetesCluster.getAutoscalingEnabled(), kubernetesCluster.getMinSize(), kubernetesCluster.getMaxSize()); if (kubernetesClusterVO == null) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to update Kubernetes cluster", - kubernetesCluster.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to update Kubernetes cluster", + kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } return kubernetesClusterVO; } @@ -192,13 +193,13 @@ private boolean removeKubernetesClusterNode(final String ipAddress, final int po retryCounter++; try { Pair result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + pkFile, null, String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); if (!result.first()) { LOGGER.warn(String.format("Draining node: %s on VM : %s in Kubernetes cluster : %s unsuccessful", hostName, userVm.getDisplayName(), kubernetesCluster.getName())); } else { result = SshHelper.sshExecute(ipAddress, port, CLUSTER_NODE_VM_USER, - pkFile, null, String.format("sudo kubectl delete node %s", hostName), + pkFile, null, String.format("sudo /opt/bin/kubectl delete node %s", hostName), 10000, 10000, 30000); if (result.first()) { return true; @@ -302,72 +303,78 @@ private void scaleKubernetesClusterOffering() throws CloudRuntimeException { kubernetesCluster = updateKubernetesClusterEntry(null, serviceOffering); } - private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { - if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { - stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleDownRequested); - } - final List originalVmList = getKubernetesClusterVMMaps(); - int i = originalVmList.size() - 1; - List removedVmIds = new ArrayList<>(); - while (i >= kubernetesCluster.getControlNodeCount() + clusterSize) { - KubernetesClusterVmMapVO vmMapVO = originalVmList.get(i); + private void removeNodesFromCluster(List vmMaps) throws CloudRuntimeException { + for (KubernetesClusterVmMapVO vmMapVO : vmMaps) { UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); + LOGGER.info(String.format("Removing vm : %s from cluster %s", userVM.getDisplayName(), kubernetesCluster.getName())); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, failed to remove Kubernetes node: %s running on VM : %s", kubernetesCluster.getName(), userVM.getHostName(), userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } - // For removing port-forwarding network rules - removedVmIds.add(userVM.getId()); try { UserVm vm = userVmService.destroyVm(userVM.getId(), true); if (!userVmManager.expunge(userVM, CallContext.current().getCallingUserId(), CallContext.current().getCallingAccount())) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to expunge VM '%s'." - , kubernetesCluster.getUuid() - , vm.getInstanceName()), - kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to expunge VM '%s'." + , kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } } catch (ResourceUnavailableException e) { - logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster ID: %s failed, unable to remove VM ID: %s" - , kubernetesCluster.getUuid() , userVM.getUuid()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); + logTransitStateAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster %s failed, unable to remove VM ID: %s", + kubernetesCluster.getName() , userVM.getDisplayName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); } kubernetesClusterVmMapDao.expunge(vmMapVO.getId()); if (System.currentTimeMillis() > scaleTimeoutTime) { - logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster : %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); + logTransitStateAndThrow(Level.WARN, String.format("Scaling Kubernetes cluster %s failed, scaling action timed out", kubernetesCluster.getName()),kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } - i--; } + // Scale network rules to update firewall rule try { - scaleKubernetesClusterNetworkRules(null, removedVmIds); + List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { logTransitStateAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed, e); } } + private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { + if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { + stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleDownRequested); + } + List vmList; + if (this.nodeIds != null) { + vmList = getKubernetesClusterVMMapsForNodes(this.nodeIds); + } else { + vmList = getKubernetesClusterVMMaps(); + vmList = vmList.subList((int) (kubernetesCluster.getControlNodeCount() + clusterSize), vmList.size()); + } + Collections.reverse(vmList); + removeNodesFromCluster(vmList); + } + private void scaleUpKubernetesClusterSize(final long newVmCount) throws CloudRuntimeException { if (!kubernetesCluster.getState().equals(KubernetesCluster.State.Scaling)) { stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.ScaleUpRequested); } List clusterVMs = new ArrayList<>(); - List clusterVMIds = new ArrayList<>(); + LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId()); + launchPermissionDao.persist(launchPermission); try { clusterVMs = provisionKubernetesClusterNodeVms((int)(newVmCount + kubernetesCluster.getNodeCount()), (int)kubernetesCluster.getNodeCount(), publicIpAddress); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to provision node VM in the cluster", kubernetesCluster.getName()), e); } - attachIsoKubernetesVMs(clusterVMs); - for (UserVm vm : clusterVMs) { - clusterVMIds.add(vm.getId()); - } try { - scaleKubernetesClusterNetworkRules(clusterVMIds, null); + List clusterVMIds = getKubernetesClusterVMMaps().stream().map(KubernetesClusterVmMapVO::getVmId).collect(Collectors.toList()); + scaleKubernetesClusterNetworkRules(clusterVMIds); } catch (ManagementServerException e) { logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling failed for Kubernetes cluster : %s, unable to update network rules", kubernetesCluster.getName()), e); } + attachIsoKubernetesVMs(clusterVMs); KubernetesClusterVO kubernetesClusterVO = kubernetesClusterDao.findById(kubernetesCluster.getId()); kubernetesClusterVO.setNodeCount(clusterSize); boolean readyNodesCountValid = KubernetesClusterUtil.validateKubernetesClusterReadyNodesCount(kubernetesClusterVO, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, scaleTimeoutTime, 15000); detachIsoKubernetesVMs(clusterVMs); + deleteTemplateLaunchPermission(); if (!readyNodesCountValid) { // Scaling failed logTransitStateToFailedIfNeededAndThrow(Level.ERROR, String.format("Scaling unsuccessful for Kubernetes cluster : %s as it does not have desired number of nodes in ready state", kubernetesCluster.getName())); } @@ -409,6 +416,10 @@ public boolean scaleCluster() throws CloudRuntimeException { if (existingServiceOffering == null) { logAndThrow(Level.ERROR, String.format("Scaling Kubernetes cluster : %s failed, service offering for the Kubernetes cluster not found!", kubernetesCluster.getName())); } + + if (this.isAutoscalingEnabled != null) { + return autoscaleCluster(this.isAutoscalingEnabled, minSize, maxSize); + } final boolean serviceOfferingScalingNeeded = serviceOffering != null && serviceOffering.getId() != existingServiceOffering.getId(); final boolean clusterSizeScalingNeeded = clusterSize != null && clusterSize != originalClusterSize; final long newVMRequired = clusterSize == null ? 0 : clusterSize - originalClusterSize; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 072094e7c031..b6da75dba81b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -60,6 +60,7 @@ import com.cloud.network.addr.PublicIp; import com.cloud.network.rules.LoadBalancer; import com.cloud.offering.ServiceOffering; +import com.cloud.storage.LaunchPermissionVO; import com.cloud.user.Account; import com.cloud.user.SSHKeyPairVO; import com.cloud.uservm.UserVm; @@ -71,6 +72,7 @@ import com.cloud.vm.Nic; import com.cloud.vm.ReservationContext; import com.cloud.vm.ReservationContextImpl; +import com.cloud.vm.UserVmManager; import com.cloud.vm.VirtualMachine; import com.google.common.base.Strings; @@ -157,7 +159,7 @@ private String getKubernetesControlNodeConfig(final String controlNodeIp, final if (!Strings.isNullOrEmpty(sshKeyPair)) { SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); if (sshkp != null) { - pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } k8sControlNodeConfig = k8sControlNodeConfig.replace(sshPubKey, pubKey); @@ -195,11 +197,8 @@ private UserVm createKubernetesControlNode(final Network network, String serverI if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = kubernetesClusterNodeNamePrefix + "-control"; - if (kubernetesCluster.getControlNodeCount() > 1) { - hostName += "-1"; - } - hostName = getKubernetesClusterNodeAvailableName(hostName); + String suffix = Long.toHexString(System.currentTimeMillis()); + String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix); boolean haSupported = isKubernetesVersionSupportsHA(); String k8sControlNodeConfig = null; try { @@ -211,7 +210,7 @@ private UserVm createKubernetesControlNode(final Network network, String serverI controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true); + requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created control VM ID: %s, %s in the Kubernetes cluster : %s", controlVm.getUuid(), hostName, kubernetesCluster.getName())); } @@ -230,7 +229,7 @@ private String getKubernetesAdditionalControlNodeConfig(final String joinIp, fin if (!Strings.isNullOrEmpty(sshKeyPair)) { SSHKeyPairVO sshkp = sshKeyPairDao.findByName(owner.getAccountId(), owner.getDomainId(), sshKeyPair); if (sshkp != null) { - pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; + pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } k8sControlNodeConfig = k8sControlNodeConfig.replace(sshPubKey, pubKey); @@ -254,7 +253,8 @@ private UserVm createKubernetesAdditionalControlNode(final String joinIp, final if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-control-%d", kubernetesClusterNodeNamePrefix, additionalControlNodeInstance + 1)); + String suffix = Long.toHexString(System.currentTimeMillis()); + String hostName = String.format("%s-control-%s", kubernetesClusterNodeNamePrefix, suffix); String k8sControlNodeConfig = null; try { k8sControlNodeConfig = getKubernetesAdditionalControlNodeConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); @@ -265,7 +265,7 @@ private UserVm createKubernetesAdditionalControlNode(final String joinIp, final additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), - null, addrs, null, null, null, customParameterMap, null, null, null, null, true); + null, addrs, null, null, null, customParameterMap, null, null, null, null, true, UserVmManager.CKS_NODE); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", additionalControlVm.getUuid(), hostName, kubernetesCluster.getName())); } @@ -276,7 +276,7 @@ private UserVm provisionKubernetesClusterControlVm(final Network network, final ManagementServerException, InsufficientCapacityException, ResourceUnavailableException { UserVm k8sControlVM = null; k8sControlVM = createKubernetesControlNode(network, publicIpAddress); - addKubernetesClusterVm(kubernetesCluster.getId(), k8sControlVM.getId()); + addKubernetesClusterVm(kubernetesCluster.getId(), k8sControlVM.getId(), true); if (kubernetesCluster.getNodeRootDiskSize() > 0) { resizeNodeVolume(k8sControlVM); } @@ -298,7 +298,7 @@ private List provisionKubernetesClusterAdditionalControlVms(final String for (int i = 1; i < kubernetesCluster.getControlNodeCount(); i++) { UserVm vm = null; vm = createKubernetesAdditionalControlNode(publicIpAddress, i); - addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); + addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId(), true); if (kubernetesCluster.getNodeRootDiskSize() > 0) { resizeNodeVolume(vm); } @@ -385,18 +385,38 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl network.getName(), kubernetesCluster.getName())); } + createFirewallRules(publicIp, clusterVMIds); + + // Load balancer rule fo API access for control node VMs + try { + provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); + } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { + throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + + // Port forwarding rule fo SSH access on each node VM + try { + provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); + } catch (ResourceUnavailableException | NetworkRuleConflictException e) { + throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); + } + } + + private void createFirewallRules(IpAddress publicIp, List clusterVMIds) throws ManagementServerException { + // Firewall rule fo API access for control node VMs try { provisionFirewallRules(publicIp, owner, CLUSTER_API_PORT, CLUSTER_API_PORT); if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster ID: %s", - CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getUuid())); + LOGGER.info(String.format("Provisioned firewall rule to open up port %d on %s for Kubernetes cluster %s", + CLUSTER_API_PORT, publicIp.getAddress().addr(), kubernetesCluster.getName())); } } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } + // Firewall rule fo SSH access on each node VM try { - int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMs.size() - 1; + int endPort = CLUSTER_NODES_DEFAULT_START_SSH_PORT + clusterVMIds.size() - 1; provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort); if (LOGGER.isInfoEnabled()) { LOGGER.info(String.format("Provisioned firewall rule to open up port %d to %d on %s for Kubernetes cluster : %s", CLUSTER_NODES_DEFAULT_START_SSH_PORT, endPort, publicIp.getAddress().addr(), kubernetesCluster.getName())); @@ -404,20 +424,6 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl } catch (NoSuchFieldException | IllegalAccessException | ResourceUnavailableException | NetworkRuleConflictException e) { throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } - - // Load balancer rule fo API access for control node VMs - try { - provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); - } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { - throw new ManagementServerException(String.format("Failed to provision load balancer rule for API access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } - - // Port forwarding rule fo SSH access on each node VM - try { - provisionSshPortForwardingRules(publicIp, network, owner, clusterVMIds, CLUSTER_NODES_DEFAULT_START_SSH_PORT); - } catch (ResourceUnavailableException | NetworkRuleConflictException e) { - throw new ManagementServerException(String.format("Failed to activate SSH port forwarding rules for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); - } } private void startKubernetesClusterVMs() { @@ -427,6 +433,7 @@ private void startKubernetesClusterVMs() { logTransitStateAndThrow(Level.ERROR, String.format("Failed to start all VMs in Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.OperationFailed); } try { + resizeNodeVolume(vm); startKubernetesVM(vm); } catch (ManagementServerException ex) { LOGGER.warn(String.format("Failed to start VM : %s in Kubernetes cluster : %s due to ", vm.getDisplayName(), kubernetesCluster.getName()) + ex); @@ -506,6 +513,10 @@ public boolean startKubernetesClusterOnCreate() { (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getControlNodeCount() > 1)) { // Shared network, single-control node cluster won't have an IP yet logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster" , kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); } + // Allow account creating the kubernetes cluster to access systemVM template + LaunchPermissionVO launchPermission = new LaunchPermissionVO(clusterTemplate.getId(), owner.getId()); + launchPermissionDao.persist(launchPermission); + List clusterVMs = new ArrayList<>(); UserVm k8sControlVM = null; try { @@ -571,6 +582,7 @@ public boolean startKubernetesClusterOnCreate() { if (!isKubernetesClusterDashboardServiceRunning(true, startTimeoutTime)) { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to get Dashboard service running for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(),KubernetesCluster.Event.OperationFailed); } + taintControlNodes(); deployProvider(); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.OperationSucceeded); return true; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index e8b61d4a26da..ad289bf81a95 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -17,10 +17,7 @@ package com.cloud.kubernetes.cluster.actionworkers; -import java.io.BufferedWriter; import java.io.File; -import java.io.FileWriter; -import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -45,6 +42,7 @@ public class KubernetesClusterUpgradeWorker extends KubernetesClusterActionWorke private List clusterVMs = new ArrayList<>(); private KubernetesSupportedVersion upgradeVersion; + private final String upgradeScriptFilename = "upgrade-kubernetes.sh"; private File upgradeScriptFile; private long upgradeTimeoutTime; @@ -57,16 +55,9 @@ public KubernetesClusterUpgradeWorker(final KubernetesCluster kubernetesCluster, this.keys = keys; } - private void retrieveUpgradeScriptFile() { - try { - String upgradeScriptData = readResourceFile("/script/upgrade-kubernetes.sh"); - upgradeScriptFile = File.createTempFile("upgrade-kuberntes", ".sh"); - BufferedWriter upgradeScriptFileWriter = new BufferedWriter(new FileWriter(upgradeScriptFile)); - upgradeScriptFileWriter.write(upgradeScriptData); - upgradeScriptFileWriter.close(); - } catch (IOException e) { - logAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to prepare upgrade script", kubernetesCluster.getName()), e); - } + protected void retrieveScriptFiles() { + super.retrieveScriptFiles(); + upgradeScriptFile = retrieveScriptFile(upgradeScriptFilename); } private Pair runInstallScriptOnVM(final UserVm vm, final int index) throws Exception { @@ -95,12 +86,12 @@ private void upgradeKubernetesClusterNodes() { } result = null; if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Upgrading node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s", - vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + LOGGER.info(String.format("Upgrading node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", + vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); } try { result = SshHelper.sshExecute(publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, null, - String.format("sudo kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), + String.format("sudo /opt/bin/kubectl drain %s --ignore-daemonsets --delete-local-data", hostName), 10000, 10000, 60000); } catch (Exception e) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to drain Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, e); @@ -112,7 +103,6 @@ private void upgradeKubernetesClusterNodes() { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, upgrade action timed out", kubernetesCluster.getName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } try { - int port = (sshPort == CLUSTER_NODES_DEFAULT_START_SSH_PORT) ? sshPort + i : sshPort; deployProvider(); result = runInstallScriptOnVM(vm, i); } catch (Exception e) { @@ -133,8 +123,8 @@ private void upgradeKubernetesClusterNodes() { } } if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Successfully upgraded node on VM ID: %s in Kubernetes cluster ID: %s with Kubernetes version(%s) ID: %s", - vm.getUuid(), kubernetesCluster.getUuid(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); + LOGGER.info(String.format("Successfully upgraded node on VM %s in Kubernetes cluster %s with Kubernetes version(%s) ID: %s", + vm.getDisplayName(), kubernetesCluster.getName(), upgradeVersion.getSemanticVersion(), upgradeVersion.getUuid())); } } } @@ -155,7 +145,7 @@ public boolean upgradeCluster() throws CloudRuntimeException { if (CollectionUtils.isEmpty(clusterVMs)) { logAndThrow(Level.ERROR, String.format("Upgrade failed for Kubernetes cluster : %s, unable to retrieve VMs for cluster", kubernetesCluster.getName())); } - retrieveUpgradeScriptFile(); + retrieveScriptFiles(); stateTransitTo(kubernetesCluster.getId(), KubernetesCluster.Event.UpgradeRequested); attachIsoKubernetesVMs(clusterVMs, upgradeVersion); upgradeKubernetesClusterNodes(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java index 8b08dd37d553..42061cde1f0f 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDao.java @@ -23,4 +23,5 @@ public interface KubernetesClusterVmMapDao extends GenericDao { public List listByClusterId(long clusterId); + public List listByClusterIdAndVmIdsIn(long clusterId, List vmIds); } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java index 0b86b2c1a622..c5a9ad47814c 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/dao/KubernetesClusterVmMapDaoImpl.java @@ -21,6 +21,7 @@ import org.springframework.stereotype.Component; import com.cloud.kubernetes.cluster.KubernetesClusterVmMapVO; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -34,6 +35,7 @@ public class KubernetesClusterVmMapDaoImpl extends GenericDaoBase listByClusterId(long clusterId) { SearchCriteria sc = clusterIdSearch.create(); sc.setParameters("clusterId", clusterId); - return listBy(sc, null); + Filter filter = new Filter(KubernetesClusterVmMapVO.class, "id", Boolean.TRUE, null, null); + return listBy(sc, filter); + } + + @Override + public List listByClusterIdAndVmIdsIn(long clusterId, List vmIds) { + SearchCriteria sc = clusterIdSearch.create(); + sc.setParameters("clusterId", clusterId); + sc.setParameters("vmIdsIN", vmIds.toArray()); + return listBy(sc); } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index 48a39f52e0c1..c16ac429e869 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -49,7 +49,7 @@ public static boolean isKubernetesClusterNodeReady(final KubernetesCluster kuber String user, File sshKeyFile, String nodeName) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - String.format("sudo kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), + String.format("sudo /opt/bin/kubectl get nodes | awk '{if ($1 == \"%s\" && $2 == \"Ready\") print $1}'", nodeName.toLowerCase()), 10000, 10000, 20000); if (result.first() && nodeName.equals(result.second().trim())) { return true; @@ -110,7 +110,7 @@ public static boolean uncordonKubernetesClusterNode(final KubernetesCluster kube Pair result = null; try { result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - String.format("sudo kubectl uncordon %s", hostName), + String.format("sudo /opt/bin/kubectl uncordon %s", hostName), 10000, 10000, 30000); if (result.first()) { return true; @@ -133,9 +133,9 @@ public static boolean isKubernetesClusterAddOnServiceRunning(final KubernetesClu final int port, final String user, final File sshKeyFile, final String namespace, String serviceName) { try { - String cmd = "sudo kubectl get pods --all-namespaces"; + String cmd = "sudo /opt/bin/kubectl get pods --all-namespaces"; if (!Strings.isNullOrEmpty(namespace)) { - cmd = String.format("sudo kubectl get pods --namespace=%s", namespace); + cmd = String.format("sudo /opt/bin/kubectl get pods --namespace=%s", namespace); } Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, cmd, @@ -211,7 +211,7 @@ public static int getKubernetesClusterReadyNodesCount(final KubernetesCluster ku final int port, final String user, final File sshKeyFile) throws Exception { Pair result = SshHelper.sshExecute(ipAddress, port, user, sshKeyFile, null, - "sudo kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", + "sudo /opt/bin/kubectl get nodes | awk '{if ($2 == \"Ready\") print $1}' | wc -l", 10000, 10000, 20000); if (result.first()) { return Integer.parseInt(result.second().trim().replace("\"", "")); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java index 72a1c3794871..9e58cf395d5d 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/version/KubernetesVersionManagerImpl.java @@ -78,6 +78,8 @@ public class KubernetesVersionManagerImpl extends ManagerBase implements Kuberne @Inject private TemplateApiService templateService; + public static final String MINIMUN_AUTOSCALER_SUPPORTED_VERSION = "1.15.0"; + private KubernetesSupportedVersionResponse createKubernetesSupportedVersionResponse(final KubernetesSupportedVersion kubernetesSupportedVersion) { KubernetesSupportedVersionResponse response = new KubernetesSupportedVersionResponse(); response.setObjectName("kubernetessupportedversion"); @@ -94,12 +96,9 @@ private KubernetesSupportedVersionResponse createKubernetesSupportedVersionRespo response.setZoneId(zone.getUuid()); response.setZoneName(zone.getName()); } - if (compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(), - KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0) { - response.setSupportsHA(true); - } else { - response.setSupportsHA(false); - } + response.setSupportsHA(compareSemanticVersions(kubernetesSupportedVersion.getSemanticVersion(), + KubernetesClusterService.MIN_KUBERNETES_VERSION_HA_SUPPORT)>=0); + response.setSupportsAutoscaling(versionSupportsAutoscaling(kubernetesSupportedVersion)); TemplateJoinVO template = templateJoinDao.findById(kubernetesSupportedVersion.getIsoId()); if (template != null) { response.setIsoId(template.getUuid()); @@ -202,6 +201,10 @@ public static int compareSemanticVersions(String v1, String v2) throws IllegalAr return 0; } + public static boolean versionSupportsAutoscaling(KubernetesSupportedVersion clusterVersion) { + return clusterVersion.getSemanticVersion().compareTo(MINIMUN_AUTOSCALER_SUPPORTED_VERSION) >= 0; + } + /** * Returns a boolean value whether Kubernetes cluster upgrade can be carried from a given currentVersion to upgradeVersion * Kubernetes clusters can only be upgraded from one MINOR version to the next MINOR version, or between PATCH versions of the same MINOR. @@ -214,9 +217,7 @@ public static int compareSemanticVersions(String v1, String v2) throws IllegalAr */ public static boolean canUpgradeKubernetesVersion(final String currentVersion, final String upgradeVersion) throws IllegalArgumentException { int versionDiff = compareSemanticVersions(upgradeVersion, currentVersion); - if (versionDiff == 0) { - throw new IllegalArgumentException(String.format("Kubernetes clusters can not be upgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion)); - } else if (versionDiff < 0) { + if (versionDiff < 0) { throw new IllegalArgumentException(String.format("Kubernetes clusters can not be downgraded, current version: %s, upgrade version: %s", currentVersion, upgradeVersion)); } String[] thisParts = currentVersion.split("\\."); diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java index 8921d691142b..376c148abb7a 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.api.command.user.kubernetes.cluster; +import java.security.InvalidParameterException; + import javax.inject.Inject; import org.apache.cloudstack.acl.RoleType; @@ -55,6 +57,7 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { public static final Logger LOGGER = Logger.getLogger(CreateKubernetesClusterCmd.class.getName()); public static final String APINAME = "createKubernetesCluster"; + private static final Long DEFAULT_NODE_ROOT_DISK_SIZE = 8L; @Inject public KubernetesClusterService kubernetesClusterService; @@ -142,7 +145,7 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { private String dockerRegistryEmail; @Parameter(name = ApiConstants.NODE_ROOT_DISK_SIZE, type = CommandType.LONG, - description = "root disk size of root disk for each node") + description = "root disk size in GB for each node") private Long nodeRootDiskSize; ///////////////////////////////////////////////////// @@ -228,7 +231,14 @@ public String getDockerRegistryEmail() { } public Long getNodeRootDiskSize() { - return nodeRootDiskSize; + if (nodeRootDiskSize != null) { + if (nodeRootDiskSize < DEFAULT_NODE_ROOT_DISK_SIZE) { + throw new InvalidParameterException("Provided node root disk size is lesser than default size of " + DEFAULT_NODE_ROOT_DISK_SIZE +"GB"); + } + return nodeRootDiskSize; + } else { + return DEFAULT_NODE_ROOT_DISK_SIZE; + } } ///////////////////////////////////////////////////// diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java index 574d8a70395e..11b74441bbde 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/ScaleKubernetesClusterCmd.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.api.command.user.kubernetes.cluster; +import java.util.List; + import javax.inject.Inject; import org.apache.cloudstack.acl.RoleType; @@ -30,6 +32,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.KubernetesClusterResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; +import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; @@ -58,19 +61,38 @@ public class ScaleKubernetesClusterCmd extends BaseAsyncCmd { //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// @Parameter(name = ApiConstants.ID, type = CommandType.UUID, required = true, - entityType = KubernetesClusterResponse.class, - description = "the ID of the Kubernetes cluster") + entityType = KubernetesClusterResponse.class, + description = "the ID of the Kubernetes cluster") private Long id; @ACL(accessType = SecurityChecker.AccessType.UseEntry) @Parameter(name = ApiConstants.SERVICE_OFFERING_ID, type = CommandType.UUID, entityType = ServiceOfferingResponse.class, - description = "the ID of the service offering for the virtual machines in the cluster.") + description = "the ID of the service offering for the virtual machines in the cluster.") private Long serviceOfferingId; @Parameter(name=ApiConstants.SIZE, type = CommandType.LONG, - description = "number of Kubernetes cluster nodes") + description = "number of Kubernetes cluster nodes") private Long clusterSize; + @Parameter(name = ApiConstants.NODE_IDS, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType = UserVmResponse.class, + description = "the IDs of the nodes to be removed") + private List nodeIds; + + @Parameter(name=ApiConstants.AUTOSCALING_ENABLED, type = CommandType.BOOLEAN, + description = "Whether autoscaling is enabled for the cluster") + private Boolean isAutoscalingEnabled; + + @Parameter(name=ApiConstants.MIN_SIZE, type = CommandType.LONG, + description = "Minimum number of worker nodes in the cluster") + private Long minSize; + + @Parameter(name=ApiConstants.MAX_SIZE, type = CommandType.LONG, + description = "Maximum number of worker nodes in the cluster") + private Long maxSize; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -87,6 +109,22 @@ public Long getClusterSize() { return clusterSize; } + public List getNodeIds() { + return nodeIds; + } + + public Boolean isAutoscalingEnabled() { + return isAutoscalingEnabled; + } + + public Long getMinSize() { + return minSize; + } + + public Long getMaxSize() { + return maxSize; + } + @Override public String getEventType() { return KubernetesClusterEventTypes.EVENT_KUBERNETES_CLUSTER_SCALE; diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java index 8324771c03ad..7969d7596382 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java @@ -146,6 +146,18 @@ public class KubernetesClusterResponse extends BaseResponseWithAnnotations imple @Param(description = "Public IP Address ID of the cluster") private String ipAddressId; + @SerializedName(ApiConstants.AUTOSCALING_ENABLED) + @Param(description = "Whether autoscaling is enabled for the cluster") + private boolean isAutoscalingEnabled; + + @SerializedName(ApiConstants.MIN_SIZE) + @Param(description = "Minimum size of the cluster") + private Long minSize; + + @SerializedName(ApiConstants.MAX_SIZE) + @Param(description = "Maximum size of the cluster") + private Long maxSize; + public KubernetesClusterResponse() { } @@ -353,4 +365,16 @@ public void setIpAddress(String ipAddress) { public void setIpAddressId(String ipAddressId) { this.ipAddressId = ipAddressId; } + + public void setAutoscalingEnabled(boolean isAutoscalingEnabled) { + this.isAutoscalingEnabled = isAutoscalingEnabled; + } + + public void setMinSize(Long minSize) { + this.minSize = minSize; + } + + public void setMaxSize(Long maxSize) { + this.maxSize = maxSize; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java index 449bd9570551..74d2b2551ca7 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java @@ -64,6 +64,10 @@ public class KubernetesSupportedVersionResponse extends BaseResponse { @Param(description = "whether Kubernetes supported version supports HA, multi-control nodes") private Boolean supportsHA; + @SerializedName(ApiConstants.SUPPORTS_AUTOSCALING) + @Param(description = "whether Kubernetes supported version supports Autoscaling") + private Boolean supportsAutoscaling; + @SerializedName(ApiConstants.STATE) @Param(description = "the enabled or disabled state of the Kubernetes supported version") private String state; @@ -171,4 +175,12 @@ public Integer getMinimumRamSize() { public void setMinimumRamSize(Integer minimumRamSize) { this.minimumRamSize = minimumRamSize; } + + public Boolean getSupportsAutoscaling() { + return supportsAutoscaling; + } + + public void setSupportsAutoscaling(Boolean supportsAutoscaling) { + this.supportsAutoscaling = supportsAutoscaling; + } } diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml index 601df21454d4..bea272b691fa 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml @@ -17,12 +17,16 @@ # under the License. --- -ssh_authorized_keys: - {{ k8s.ssh.pub.key }} - -write-files: +users: + - name: core + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh_authorized_keys: + {{ k8s.ssh.pub.key }} + +write_files: - path: /opt/bin/setup-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e @@ -96,7 +100,7 @@ write-files: mkdir -p /opt/bin cd /opt/bin - cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} . chmod +x {kubeadm,kubelet,kubectl} sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service @@ -125,6 +129,10 @@ write-files: done <<< "$output" setup_complete=true fi + if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then + mkdir -p /opt/autoscaler + cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + fi if [ -e "${BINARIES_DIR}/provider.yaml" ]; then mkdir -p /opt/provider cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml @@ -179,14 +187,14 @@ write-files: fi - path: /opt/bin/deploy-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e if [[ -f "/home/core/success" ]]; then - echo "Already provisioned!" - exit 0 + echo "Already provisioned!" + exit 0 fi if [[ $(systemctl is-active setup-kube-system) != "inactive" ]]; then @@ -196,7 +204,7 @@ write-files: modprobe ip_vs modprobe ip_vs_wrr modprobe ip_vs_sh - modprobe nf_conntrack_ipv4 + modprobe nf_conntrack if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then export PATH=$PATH:/opt/bin fi @@ -205,37 +213,34 @@ write-files: sudo touch /home/core/success echo "true" > /home/core/success -coreos: - units: - - name: docker.service - command: start - enable: true - - - name: setup-kube-system.service - command: start - content: | - [Unit] - Requires=docker.service - After=docker.service - - [Service] - Type=simple - StartLimitInterval=0 - ExecStart=/opt/bin/setup-kube-system - - - name: deploy-kube-system.service - command: start - content: | - [Unit] - After=setup-kube-system.service - - [Service] - Type=simple - StartLimitInterval=0 - Restart=on-failure - ExecStartPre=/usr/bin/curl -k https://{{ k8s_control_node.join_ip }}:6443/version - ExecStart=/opt/bin/deploy-kube-system - - update: - group: stable - reboot-strategy: off + - path: /etc/systemd/system/setup-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - path: /etc/systemd/system/deploy-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://{{ k8s_control_node.join_ip }}:6443/version + ExecStart=/opt/bin/deploy-kube-system + +runcmd: + - [ systemctl, start, setup-kube-system ] + - [ systemctl, start, deploy-kube-system ] + diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml index 44a78a346e1f..df742231a436 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml @@ -17,10 +17,14 @@ # under the License. --- -ssh_authorized_keys: - {{ k8s.ssh.pub.key }} - -write-files: +users: + - name: core + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh_authorized_keys: + {{ k8s.ssh.pub.key }} + +write_files: - path: /etc/conf.d/nfs permissions: '0644' content: | @@ -42,7 +46,7 @@ write-files: {{ k8s_control_node.apiserver.key }} - path: /opt/bin/setup-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e @@ -116,7 +120,7 @@ write-files: mkdir -p /opt/bin cd /opt/bin - cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} . chmod +x {kubeadm,kubelet,kubectl} sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service @@ -147,6 +151,10 @@ write-files: fi mkdir -p "${K8S_CONFIG_SCRIPTS_COPY_DIR}" cp ${BINARIES_DIR}/*.yaml "${K8S_CONFIG_SCRIPTS_COPY_DIR}" + if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then + mkdir -p /opt/autoscaler + cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + fi if [ -e "${BINARIES_DIR}/provider.yaml" ]; then mkdir -p /opt/provider cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml @@ -218,7 +226,7 @@ write-files: done - path: /opt/bin/deploy-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e @@ -247,52 +255,49 @@ write-files: if [ -d "$K8S_CONFIG_SCRIPTS_COPY_DIR" ]; then ### Network, dashboard configs available offline ### echo "Offline configs are available!" - kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml - kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml + /opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/network.yaml + /opt/bin/kubectl apply -f ${K8S_CONFIG_SCRIPTS_COPY_DIR}/dashboard.yaml rm -rf "${K8S_CONFIG_SCRIPTS_COPY_DIR}" else - kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" - kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml + /opt/bin/kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(/opt/bin/kubectl version | base64 | tr -d '\n')" + /opt/bin/kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta6/aio/deploy/recommended.yaml fi - kubectl create rolebinding admin-binding --role=admin --user=admin || true - kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true - kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true + /opt/bin/kubectl create rolebinding admin-binding --role=admin --user=admin || true + /opt/bin/kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=admin || true + /opt/bin/kubectl create clusterrolebinding kubernetes-dashboard-ui --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard || true sudo touch /home/core/success echo "true" > /home/core/success -coreos: - units: - - name: docker.service - command: start - enable: true - - - name: setup-kube-system.service - command: start - content: | - [Unit] - Requires=docker.service - After=docker.service - - [Service] - Type=simple - StartLimitInterval=0 - ExecStart=/opt/bin/setup-kube-system - - - name: deploy-kube-system.service - command: start - content: | - [Unit] - After=setup-kube-system.service - - [Service] - Type=simple - StartLimitInterval=0 - Restart=on-failure - ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version - ExecStart=/opt/bin/deploy-kube-system - - update: - group: stable - reboot-strategy: off + - path: /etc/systemd/system/setup-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - path: /etc/systemd/system/deploy-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://127.0.0.1:6443/version + ExecStart=/opt/bin/deploy-kube-system + +runcmd: + - [ systemctl, start, setup-kube-system ] + - [ systemctl, start, deploy-kube-system ] + diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml index 03ed7013a4ca..97e4160d414f 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml +++ b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-node.yml @@ -17,12 +17,16 @@ # under the License. --- -ssh_authorized_keys: - {{ k8s.ssh.pub.key }} - -write-files: +users: + - name: core + sudo: ALL=(ALL) NOPASSWD:ALL + shell: /bin/bash + ssh_authorized_keys: + {{ k8s.ssh.pub.key }} + +write_files: - path: /opt/bin/setup-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e @@ -96,7 +100,7 @@ write-files: mkdir -p /opt/bin cd /opt/bin - cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} /opt/bin + cp -a ${BINARIES_DIR}/k8s/{kubeadm,kubelet,kubectl} . chmod +x {kubeadm,kubelet,kubectl} sed "s:/usr/bin:/opt/bin:g" ${BINARIES_DIR}/kubelet.service > /etc/systemd/system/kubelet.service @@ -125,6 +129,10 @@ write-files: done <<< "$output" setup_complete=true fi + if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then + mkdir -p /opt/autoscaler + cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + fi if [ -e "${BINARIES_DIR}/provider.yaml" ]; then mkdir -p /opt/provider cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml @@ -179,7 +187,7 @@ write-files: fi - path: /opt/bin/deploy-kube-system - permissions: 0700 + permissions: '0700' owner: root:root content: | #!/bin/bash -e @@ -196,7 +204,7 @@ write-files: modprobe ip_vs modprobe ip_vs_wrr modprobe ip_vs_sh - modprobe nf_conntrack_ipv4 + modprobe nf_conntrack if [[ "$PATH" != *:/opt/bin && "$PATH" != *:/opt/bin:* ]]; then export PATH=$PATH:/opt/bin fi @@ -205,37 +213,33 @@ write-files: sudo touch /home/core/success echo "true" > /home/core/success -coreos: - units: - - name: docker.service - command: start - enable: true - - - name: setup-kube-system.service - command: start - content: | - [Unit] - Requires=docker.service - After=docker.service - - [Service] - Type=simple - StartLimitInterval=0 - ExecStart=/opt/bin/setup-kube-system - - - name: deploy-kube-system.service - command: start - content: | - [Unit] - After=setup-kube-system.service - - [Service] - Type=simple - StartLimitInterval=0 - Restart=on-failure - ExecStartPre=/usr/bin/curl -k https://{{ k8s_control_node.join_ip }}:6443/version - ExecStart=/opt/bin/deploy-kube-system - - update: - group: stable - reboot-strategy: off + - path: /etc/systemd/system/setup-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + Type=simple + StartLimitInterval=0 + ExecStart=/opt/bin/setup-kube-system + + - path: /etc/systemd/system/deploy-kube-system.service + permissions: '0755' + owner: root:root + content: | + [Unit] + After=setup-kube-system.service + + [Service] + Type=simple + StartLimitInterval=0 + Restart=on-failure + ExecStartPre=/usr/bin/curl -k https://{{ k8s_control_node.join_ip }}:6443/version + ExecStart=/opt/bin/deploy-kube-system + +runcmd: + - [ systemctl, start, setup-kube-system ] + - [ systemctl, start, deploy-kube-system ] diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster b/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster new file mode 100755 index 000000000000..4f2254fdc051 --- /dev/null +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/autoscale-kube-cluster @@ -0,0 +1,93 @@ +#!/bin/bash -e +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +function usage() { + cat << USAGE +Usage: ./autoscale-kube-cluster [OPTIONS]... +Enables autoscaling for the kubernetes cluster. +Arguments: + -i, --id string ID of the cluster + -e, --enable Enables autoscaling + -d, --disable Disables autoscaling + -M, --maxsize number Maximum size of the cluster + -m, --minsize number Minimum size of the cluster +Other arguments: + -h, --help Display this help message and exit +Examples: + ./autoscale-kube-cluster -e -M 3 -m 1 + ./autoscale-kube-cluster -d +USAGE + exit 0 +} +ID="" +ENABLE="" +MINSIZE="" +MAXSIZE="" +while [ -n "$1" ]; do + case "$1" in + -h | --help) + usage + ;; + -i | --id) + ID=$2 + shift 2 + ;; + -e | --enable) + ENABLE="true" + shift 1 + ;; + -d | --enable) + ENABLE="false" + shift 1 + ;; + -M | --maxsize) + MAXSIZE=$2 + shift 2 + ;; + -m | --minsize) + MINSIZE=$2 + shift 2 + ;; + -*|*) + echo "ERROR: no such option $1. -h or --help for help" + exit 1 + ;; + esac +done +if [ $ENABLE == "true" ] ; then + if [ -e /opt/autoscaler/autoscaler_tmpl.yaml ]; then + sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml + /opt/bin/kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + exit 0 + else + mkdir -p /opt/autoscaler + AUTOSCALER_URL="https://raw.githubusercontent.com/shapeblue/autoscaler/add-acs/cluster-autoscaler/cloudprovider/cloudstack/examples/cluster-autoscaler-standard.yaml" + autoscaler_conf_file="/opt/autoscaler/autoscaler_tmpl.yaml" + curl -sSL ${AUTOSCALER_URL} -o ${autoscaler_conf_file} + if [ $? -ne 0 ]; then + echo "Unable to connect to the internet to download the autoscaler deployment and image" + exit 1 + else + sed -e "s//$ID/g" -e "s//$MINSIZE/g" -e "s//$MAXSIZE/g" /opt/autoscaler/autoscaler_tmpl.yaml > /opt/autoscaler/autoscaler_now.yaml + /opt/bin/kubectl apply -f /opt/autoscaler/autoscaler_now.yaml + exit 0 + fi + fi +else + /opt/bin/kubectl delete deployment -n kube-system cluster-autoscaler +fi diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-provider b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-provider index e707b5991e0e..ce71e21072b6 100755 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-provider +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/deploy-provider @@ -16,7 +16,7 @@ # specific language governing permissions and limitations # under the License. -(kubectl get pods -A | grep cloud-controller-manager) && exit 0 +(/opt/bin/kubectl get pods -A | grep cloud-controller-manager) && exit 0 if [ -e /opt/provider/provider.yaml ]; then /opt/bin/kubectl apply -f /opt/provider/provider.yaml diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh old mode 100644 new mode 100755 index 99153c9cb881..cbec8282c3ac --- a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh @@ -101,22 +101,28 @@ if [ -d "$BINARIES_DIR" ]; then cp "${BINARIES_DIR}/provider.yaml" /opt/provider/provider.yaml fi + # Fetch the autoscaler if present + if [ -e "${BINARIES_DIR}/autoscaler.yaml" ]; then + mkdir -p /opt/autoscaler + cp "${BINARIES_DIR}/autoscaler.yaml" /opt/autoscaler/autoscaler_tmpl.yaml + fi + tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz if [ "${IS_MAIN_CONTROL}" == 'true' ]; then set +e - kubeadm upgrade apply ${UPGRADE_VERSION} -y + kubeadm --v=5 upgrade apply ${UPGRADE_VERSION} -y retval=$? set -e if [ $retval -ne 0 ]; then - kubeadm upgrade apply ${UPGRADE_VERSION} --ignore-preflight-errors=CoreDNSUnsupportedPlugins -y + kubeadm --v=5 upgrade apply ${UPGRADE_VERSION} --ignore-preflight-errors=CoreDNSUnsupportedPlugins -y fi else if [ "${IS_OLD_VERSION}" == 'true' ]; then - kubeadm upgrade node config --kubelet-version ${UPGRADE_VERSION} + kubeadm --v=5 upgrade node config --kubelet-version ${UPGRADE_VERSION} else - kubeadm upgrade node + kubeadm --v=5 upgrade node fi fi @@ -126,8 +132,8 @@ if [ -d "$BINARIES_DIR" ]; then systemctl restart kubelet if [ "${IS_MAIN_CONTROL}" == 'true' ]; then - kubectl apply -f ${BINARIES_DIR}/network.yaml - kubectl apply -f ${BINARIES_DIR}/dashboard.yaml + /opt/bin/kubectl apply -f ${BINARIES_DIR}/network.yaml + /opt/bin/kubectl apply -f ${BINARIES_DIR}/dashboard.yaml fi umount "${ISO_MOUNT_DIR}" && rmdir "${ISO_MOUNT_DIR}" diff --git a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index bc35b34ea468..6975f76e9681 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/main/java/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -476,7 +476,8 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl if (defaultDns2 != null) { buf.append(" dns2=").append(defaultDns2); } - + String msPublicKey = _configDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); if (s_logger.isDebugEnabled()) { s_logger.debug("Boot Args for " + profile + ": " + buf.toString()); } diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java index 7232c1a032a3..ee880cb6b666 100644 --- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java +++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java @@ -218,6 +218,8 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile buf.append(" localgw=").append(dest.getPod().getGateway()); } } + String msPublicKey = _configDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); } if (controlNic == null) { diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java index fb1eaa198bc7..5cd90c930089 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/MockAccountManager.java @@ -457,7 +457,6 @@ public Map getKeys(GetUserKeysCmd cmd){ return null; } - @Override public Map getKeys(Long userId) { return null; diff --git a/pom.xml b/pom.xml index 8b9839272c30..c343d6cdb066 100644 --- a/pom.xml +++ b/pom.xml @@ -17,7 +17,7 @@ under the License. --> + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> 4.0.0 @@ -173,6 +173,8 @@ 3.1.3 1.4.15 5.3.3 + 0.5.4 + 1.12.0 @@ -1036,10 +1038,10 @@ 128m 512m - -XDignore.symbol.file=true - --add-opens=java.base/java.lang=ALL-UNNAMED - --add-exports=java.base/sun.security.x509=ALL-UNNAMED - --add-exports=java.base/sun.security.provider=ALL-UNNAMED + -XDignore.symbol.file=true + --add-opens=java.base/java.lang=ALL-UNNAMED + --add-exports=java.base/sun.security.x509=ALL-UNNAMED + --add-exports=java.base/sun.security.provider=ALL-UNNAMED diff --git a/scripts/storage/secondary/createtmplt.sh b/scripts/storage/secondary/createtmplt.sh index 391b291d3bbd..4bf90d6350a3 100755 --- a/scripts/storage/secondary/createtmplt.sh +++ b/scripts/storage/secondary/createtmplt.sh @@ -15,7 +15,6 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - # $Id: createtmplt.sh 9132 2010-06-04 20:17:43Z manuel $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/storage/secondary/createtmplt.sh $ @@ -110,6 +109,16 @@ create_from_file() { } +create_from_file_user() { + local tmpltfs=$1 + local tmpltimg=$2 + local tmpltname=$3 + + [ -n "$verbose" ] && echo "Copying to $tmpltfs/$tmpltname...could take a while" >&2 + sudo cp $tmpltimg /$tmpltfs/$tmpltname + +} + tflag= nflag= fflag= @@ -118,8 +127,9 @@ hflag= hvm=false cleanup=false dflag= +cloud=false -while getopts 'vuht:n:f:s:d:S:' OPTION +while getopts 'vcuht:n:f:s:d:S:' OPTION do case $OPTION in t) tflag=1 @@ -144,6 +154,8 @@ do h) hflag=1 hvm="true" ;; + c) cloud="true" + ;; u) cleanup="true" ;; v) verbose="true" @@ -199,7 +211,14 @@ fi imgsize=$(ls -l $tmpltimg2| awk -F" " '{print $5}') -create_from_file $tmpltfs $tmpltimg2 $tmpltname +if [ "$cloud" == "true" ] +then + create_from_file_user $tmpltfs $tmpltimg2 $tmpltname + tmpltfs=/tmp/cloud/templates/ +else + create_from_file $tmpltfs $tmpltimg2 $tmpltname +fi + touch /$tmpltfs/template.properties rollback_if_needed $tmpltfs $? "Failed to create template.properties file" @@ -213,7 +232,7 @@ echo "description=$descr" >> /$tmpltfs/template.properties echo "hvm=$hvm" >> /$tmpltfs/template.properties echo "size=$imgsize" >> /$tmpltfs/template.properties -if [ "$cleanup" == "true" ] +if [[ "$cleanup" == "true" ]] && [[ $cloud != "true" ]] then rm -f $tmpltimg fi diff --git a/scripts/storage/secondary/setup-sysvm-tmplt b/scripts/storage/secondary/setup-sysvm-tmplt new file mode 100755 index 000000000000..fa33f33f0408 --- /dev/null +++ b/scripts/storage/secondary/setup-sysvm-tmplt @@ -0,0 +1,172 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Usage: e.g. failed $? "this is an error" +set -x + +failed() { + local returnval=$1 + local returnmsg=$2 + + # check for an message, if there is no one dont print anything + if [[ -z $returnmsg ]]; then + : + else + echo -e $returnmsg + fi + if [[ $returnval -eq 0 ]]; then + return 0 + else + echo "Installation failed" + exit $returnval + fi +} + +# check if first parameter is not a dash (-) then print the usage block +if [[ ! $@ =~ ^\-.+ ]]; then + usage + exit 0 +fi + +OPTERR=0 +while getopts 'h:f:d:u::'# OPTION +do + case $OPTION in + u) uflag=1 + uuid="$OPTARG" + ;; + f) fflag=1 + tmpltimg="$OPTARG" + ;; + h) hyper="$OPTARG" + ;; + d) destdir="$OPTARG" + ;; + ?) usage + exit 0 + ;; + *) usage + exit 0 + ;; + esac +done + +if [[ "$hyper" == "kvm" ]]; then + ext="qcow2" + qemuimgcmd=$(which qemu-img) + elif [[ "$hyper" == "xenserver" ]]; then + ext="vhd" + elif [[ "$hyper" == "vmware" ]]; then + ext="ova" + elif [[ "$hyper" == "lxc" ]]; then + ext="qcow2" + elif [[ "$hyper" == "hyperv" ]]; then + ext="vhd" + elif [[ "$hyper" == "ovm3" ]]; then + ext="raw" + else + failed 2 "Please add a correct hypervisor name like: kvm|vmware|xenserver|hyperv|ovm3" +fi + + +localfile=$uuid.$ext + + +sudo mkdir -p $destdir +if [[ $? -ne 0 ]]; then + failed 2 "Failed to write to destdir $destdir -- is it mounted?\n" +fi + +if [[ -f $destdir/template.properties ]]; then + failed 2 "Data already exists at destination $destdir" +fi + +destfiles=$(find $destdir -name \*.$ext) +if [[ "$destfiles" != "" ]]; then + failed 2 "Data already exists at destination $destdir" +fi + +tmpfolder=/tmp/cloud/templates/ +mkdir -p $tmpfolder +tmplfile=$tmpfolder/$localfile + +sudo touch $tmplfile +if [[ $? -ne 0 ]]; then + failed 2 "Failed to create temporary file in directory $tmpfolder -- is it read-only or full?\n" +fi + +destcap=$(df -P $destdir | awk '{print $4}' | tail -1 ) +[ $destcap -lt $DISKSPACE ] && echo "Insufficient free disk space for target folder $destdir: avail=${destcap}k req=${DISKSPACE}k" && failed 4 + +localcap=$(df -P $tmpfolder | awk '{print $4}' | tail -1 ) +[ $localcap -lt $DISKSPACE ] && echo "Insufficient free disk space for local temporary folder $tmpfolder: avail=${localcap}k req=${DISKSPACE}k" && failed 4 + + +if [[ "$fflag" == "1" ]]; then + sudo cp $tmpltimg $tmplfile + if [[ $? -ne 0 ]]; then + failed 2 "Failed to create temporary file in directory $tmpfolder -- is it read-only or full?\n" + fi +fi + +installrslt=$($(dirname $0)/createtmplt.sh -s 2 -d "SystemVM Template ( $hyper )" -n $localfile -t $destdir/ -f $tmplfile -u -v -c) + +if [[ $? -ne 0 ]]; then + failed 2 "Failed to install system vm template $tmpltimg to $destdir: $installrslt" +fi + +tmpdestdir=$tmpfolder + +if [ "$ext" == "ova" ] +then + tar xvf $tmpdestdir/$localfile -C $tmpdestdir &> /dev/null + sudo cp $tmpdestdir/*.vmdk $tmpdestdir/*.mf $tmpdestdir/*.ovf $destdir/ + rm -rf $tmpdestdir/*.vmdk $tmpdestdir/*.mf $tmpdestdir/*.ovf $tmpdestdir/*.ova +else + rm -rf $tmpdestdir/*.tmp +fi + + +tmpltfile=$destdir/$localfile +tmpltsize=$(ls -l $tmpltfile | awk -F" " '{print $5}') +if [[ "$ext" == "qcow2" ]]; then + vrtmpltsize=$($qemuimgcmd info $tmpltfile | grep -i 'virtual size' | sed -ne 's/.*(\([0-9]*\).*/\1/p' | xargs) +else + vrtmpltsize=$tmpltsize +fi + +templateId=${destdir##*/} +sudo touch $destdir/template.properties +echo "$ext=true" >> $tmpdestdir/template.properties +echo "id=$templateId" >> $tmpdestdir/template.properties +echo "public=true" >> $tmpdestdir/template.properties +echo "$ext.filename=$localfile" >> $tmpdestdir/template.properties +echo "uniquename=routing-$templateId" >> $tmpdestdir/template.properties +echo "$ext.virtualsize=$vrtmpltsize" >> $tmpdestdir/template.properties +echo "virtualsize=$vrtmpltsize" >> $tmpdestdir/template.properties +echo "$ext.size=$tmpltsize" >> $tmpdestdir/template.properties + +sudo cp $tmpdestdir/template.properties $destdir/template.properties +if [ -f "$tmpdestdir/template.properties" ] +then + rm -rf $tmpdestdir/template.properties +fi + +echo "Successfully installed system VM template $tmpltimg and template.properties to $destdir" +exit 0 \ No newline at end of file diff --git a/scripts/util/create-kubernetes-binaries-iso.sh b/scripts/util/create-kubernetes-binaries-iso.sh index 241b45e8b33a..fc19caa470ce 100755 --- a/scripts/util/create-kubernetes-binaries-iso.sh +++ b/scripts/util/create-kubernetes-binaries-iso.sh @@ -25,6 +25,7 @@ if [ $# -lt 6 ]; then fi RELEASE="v${2}" +VAL="1.18.0" output_dir="${1}" start_dir="$PWD" iso_dir="/tmp/iso" @@ -60,12 +61,20 @@ echo "Downloading kubelet.service ${RELEASE}..." cd "${start_dir}" kubelet_service_file="${working_dir}/kubelet.service" touch "${kubelet_service_file}" -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} +if [[ `echo "${2} $VAL" | awk '{print ($1 < $2)}'` == 1 ]]; then + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} +else + curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/main/cks/kubelet.service" | sed "s:/usr/bin:/opt/bin:g" > ${kubelet_service_file} +fi echo "Downloading 10-kubeadm.conf ${RELEASE}..." kubeadm_conf_file="${working_dir}/10-kubeadm.conf" touch "${kubeadm_conf_file}" -curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} +if [[ `echo "${2} $val" | awk '{print ($1 < $2)}'` == 1 ]]; then + curl -sSL "https://raw.githubusercontent.com/kubernetes/kubernetes/${RELEASE}/build/debs/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} +else + curl -sSL "https://raw.githubusercontent.com/shapeblue/cloudstack-nonoss/main/cks/10-kubeadm.conf" | sed "s:/usr/bin:/opt/bin:g" > ${kubeadm_conf_file} +fi NETWORK_CONFIG_URL="${5}" echo "Downloading network config ${NETWORK_CONFIG_URL}" @@ -77,6 +86,12 @@ echo "Downloading dashboard config ${DASHBORAD_CONFIG_URL}" dashboard_conf_file="${working_dir}/dashboard.yaml" curl -sSL ${DASHBORAD_CONFIG_URL} -o ${dashboard_conf_file} +# TODO : Change the url once merged +AUTOSCALER_URL="https://raw.githubusercontent.com/shapeblue/autoscaler/add-acs/cluster-autoscaler/cloudprovider/cloudstack/examples/cluster-autoscaler-standard.yaml" +echo "Downloading kubernetes cluster autoscaler ${AUTOSCALER_URL}" +autoscaler_conf_file="${working_dir}/autoscaler.yaml" +curl -sSL ${AUTOSCALER_URL} -o ${autoscaler_conf_file} + PROVIDER_URL="https://raw.githubusercontent.com/apache/cloudstack-kubernetes-provider/main/deployment.yaml" echo "Downloading kubernetes cluster provider ${PROVIDER_URL}" provider_conf_file="${working_dir}/provider.yaml" @@ -107,6 +122,10 @@ do output=`printf "%s\n" ${output} ${images}` done +# Don't forget about the other image ! +autoscaler_image=`grep "image:" ${autoscaler_conf_file} | cut -d ':' -f2- | tr -d ' '` +output=`printf "%s\n" ${output} ${autoscaler_image}` + provider_image=`grep "image:" ${provider_conf_file} | cut -d ':' -f2- | tr -d ' '` output=`printf "%s\n" ${output} ${provider_image}` diff --git a/scripts/vm/hypervisor/kvm/patch.sh b/scripts/vm/hypervisor/kvm/patch.sh index e7c79fd9a739..84c6b5b14509 100755 --- a/scripts/vm/hypervisor/kvm/patch.sh +++ b/scripts/vm/hypervisor/kvm/patch.sh @@ -25,6 +25,7 @@ while getopts "n:c:h" opt; do name=$OPTARG ;; c ) + bootargs=$OPTARG cmdline=$(echo $OPTARG | base64 -w 0) ;; h ) @@ -70,11 +71,5 @@ do sleep 0.1 done -# Write ssh public key -send_file $name "/root/.ssh/authorized_keys" $sshkey - -# Fix ssh public key permission -virsh qemu-agent-command $name '{"execute":"guest-exec","arguments":{"path":"chmod","arg":["go-rwx","/root/.ssh/authorized_keys"]}}' > /dev/null - # Write cmdline payload send_file $name "/var/cache/cloud/cmdline" $cmdline diff --git a/scripts/vm/systemvm/injectkeys.sh b/scripts/vm/systemvm/injectkeys.sh index 6f006ea130ef..c05d232c0e7c 100755 --- a/scripts/vm/systemvm/injectkeys.sh +++ b/scripts/vm/systemvm/injectkeys.sh @@ -18,8 +18,7 @@ # Copies keys that enable SSH communication with system vms -# $1 = new public key -# $2 = new private key +# $1 = new private key #set -x set -e @@ -33,34 +32,6 @@ clean_up() { $SUDO umount $MOUNTPATH } -inject_into_iso() { - local isofile=${systemvmpath} - local newpubkey=$2 - local backup=${isofile}.bak - local tmpiso=${TMP}/$1 - mkdir -p $MOUNTPATH - [ ! -f $isofile ] && echo "$(basename $0): Could not find systemvm iso patch file $isofile" && return 1 - $SUDO mount -o loop $isofile $MOUNTPATH - [ $? -ne 0 ] && echo "$(basename $0): Failed to mount original iso $isofile" && clean_up && return 1 - diff -q $MOUNTPATH/authorized_keys $newpubkey &> /dev/null && echo "New public key is the same as the one in the systemvm.iso, not injecting it, not modifying systemvm.iso" && clean_up && return 0 - $SUDO cp -b $isofile $backup - [ $? -ne 0 ] && echo "$(basename $0): Failed to backup original iso $isofile" && clean_up && return 1 - rm -rf $TMPDIR - mkdir -p $TMPDIR - [ ! -d $TMPDIR ] && echo "$(basename $0): Could not find/create temporary dir $TMPDIR" && clean_up && return 1 - $SUDO cp -fr $MOUNTPATH/* $TMPDIR/ - [ $? -ne 0 ] && echo "$(basename $0): Failed to copy from original iso $isofile" && clean_up && return 1 - $SUDO cp $newpubkey $TMPDIR/authorized_keys - [ $? -ne 0 ] && echo "$(basename $0): Failed to copy key $newpubkey from original iso to new iso " && clean_up && return 1 - mkisofs -quiet -r -o $tmpiso $TMPDIR - [ $? -ne 0 ] && echo "$(basename $0): Failed to create new iso $tmpiso from $TMPDIR" && clean_up && return 1 - $SUDO umount $MOUNTPATH - [ $? -ne 0 ] && echo "$(basename $0): Failed to unmount old iso from $MOUNTPATH" && return 1 - $SUDO cp -f $tmpiso $isofile - [ $? -ne 0 ] && echo "$(basename $0): Failed to overwrite old iso $isofile with $tmpiso" && return 1 - rm -rf $TMPDIR -} - copy_priv_key() { local newprivkey=$1 diff -q $newprivkey $(dirname $0)/id_rsa.cloud && return 0 @@ -76,28 +47,19 @@ fi $SUDO mkdir -p $MOUNTPATH -[ $# -ne 3 ] && echo "Usage: $(basename $0) " && exit 3 -newpubkey=$1 -newprivkey=$2 -systemvmpath=$3 -[ ! -f $newpubkey ] && echo "$(basename $0): Could not open $newpubkey" && exit 3 +[ $# -ne 1 ] && echo "Usage: $(basename $0) " && exit 3 +newprivkey=$1 [ ! -f $newprivkey ] && echo "$(basename $0): Could not open $newprivkey" && exit 3 -command -v mkisofs > /dev/null || (echo "$(basename $0): mkisofs not found, please install or ensure PATH is accurate" ; exit 4) - # if running into Docker as unprivileges, skip ssh verification as iso cannot be mounted due to missing loop device. if [ -f /.dockerenv ]; then if [ -e /dev/loop0 ]; then # it's a docker instance with privileges. - inject_into_iso systemvm.iso $newpubkey - [ $? -ne 0 ] && exit 5 copy_priv_key $newprivkey else - # this mean it's a docker instance, ssh key cannot be verify. - echo "We run inside Docker, skipping ssh key insertion in systemvm.iso" + # this mean it's a docker instance, ssh key cannot be verified. + echo "We run inside Docker, skipping copying private key" fi else - inject_into_iso systemvm.iso $newpubkey - [ $? -ne 0 ] && exit 5 copy_priv_key $newprivkey fi diff --git a/server/conf/cloudstack-sudoers.in b/server/conf/cloudstack-sudoers.in index 908d2f4733f4..5c879f3303f9 100644 --- a/server/conf/cloudstack-sudoers.in +++ b/server/conf/cloudstack-sudoers.in @@ -18,7 +18,7 @@ # The CloudStack management server needs sudo permissions # without a password. -Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool +Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch Defaults:@MSUSER@ !requiretty diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 15d02ce4997f..3eaaf536902d 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -168,7 +168,7 @@ public TemplateResponse newTemplateResponse(EnumSet List storesInZone = dataStoreDao.listStoresByZoneId(template.getDataCenterId()); Long[] storeIds = storesInZone.stream().map(ImageStoreVO::getId).toArray(Long[]::new); List templatesInStore = _templateStoreDao.listByTemplateNotBypassed(template.getId(), storeIds); - List> downloadProgressDetails = new ArrayList(); + List> downloadProgressDetails = new ArrayList<>(); HashMap downloadDetailInImageStores = null; for (TemplateDataStoreVO templateInStore : templatesInStore) { downloadDetailInImageStores = new HashMap<>(); diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 6cc0ace1e056..a3177fa77054 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -1225,6 +1225,9 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl buf.append(" disable_rp_filter=true"); } + String msPublicKey = configurationDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); + boolean externalDhcp = false; String externalDhcpStr = configurationDao.getValue("direct.attach.network.externalIpAllocator.enabled"); if (externalDhcpStr != null && externalDhcpStr.equalsIgnoreCase("true")) { @@ -1326,7 +1329,6 @@ public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile prof if(profile.getHypervisorType() == HypervisorType.Hyperv) { controlNic = managementNic; } - CheckSshCommand check = new CheckSshCommand(profile.getInstanceName(), controlNic.getIPv4Address(), 3922); cmds.addCommand("checkSsh", check); diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index 5e7f0a952643..11ecfb503a29 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -1336,7 +1336,7 @@ private long createNewVM(AutoScaleVmGroupVO asGroup) { } else { vm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), - null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null, null, true); + null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null, null, true, null); } } diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 893ac58fba83..3e2e3b37c65b 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -1939,6 +1939,8 @@ public boolean finalizeVirtualMachineProfile(final VirtualMachineProfile profile if (Boolean.valueOf(_configDao.getValue("system.vm.random.password"))) { buf.append(" vmpassword=").append(_configDao.getValue("system.vm.password")); } + String msPublicKey = _configDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); NicProfile controlNic = null; String defaultDns1 = null; diff --git a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java index 1d5b5821b467..c00ed1d9a6e9 100644 --- a/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/main/java/com/cloud/server/ConfigurationServerImpl.java @@ -669,7 +669,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } s_logger.info("Going to update systemvm iso with generated keypairs if needed"); try { - injectSshKeysIntoSystemVmIsoPatch(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath()); + copyPrivateKeyToHosts(pubkeyfile.getAbsolutePath(), privkeyfile.getAbsolutePath()); } catch (CloudRuntimeException e) { if (!devel) { throw new CloudRuntimeException(e.getMessage()); @@ -738,8 +738,8 @@ private void updateKeyPairsOnDisk(String homeDir) { } } - protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String privKeyPath) { - s_logger.info("Trying to inject public and private keys into systemvm iso"); + protected void copyPrivateKeyToHosts(String publicKeyPath, String privKeyPath) { + s_logger.info("Trying to copy private keys to hosts"); String injectScript = getInjectScript(); String scriptPath = Script.findScript("", injectScript); String systemVmIsoPath = Script.findScript("", "vms/systemvm.iso"); @@ -757,15 +757,11 @@ protected void injectSshKeysIntoSystemVmIsoPatch(String publicKeyPath, String pr } if (isOnWindows()) { scriptPath = scriptPath.replaceAll("\\\\" ,"/" ); - systemVmIsoPath = systemVmIsoPath.replaceAll("\\\\" ,"/" ); - publicKeyPath = publicKeyPath.replaceAll("\\\\" ,"/" ); privKeyPath = privKeyPath.replaceAll("\\\\" ,"/" ); } + command.add(scriptPath); - command.add(publicKeyPath); command.add(privKeyPath); - command.add(systemVmIsoPath); - final String result = command.execute(); s_logger.info("The script injectkeys.sh was run with result : " + result); if (result != null) { diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 07e36689d3c5..c511013410b6 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -22,6 +22,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; +import java.nio.file.Files; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.util.ArrayList; @@ -40,11 +41,14 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import javax.inject.Inject; import com.cloud.agent.api.GetStoragePoolCapabilitiesAnswer; import com.cloud.agent.api.GetStoragePoolCapabilitiesCommand; +import com.cloud.network.router.VirtualNetworkApplianceManager; +import com.cloud.upgrade.SystemVmTemplateRegistration; import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.ApiConstants; @@ -2643,6 +2647,29 @@ public String getName() { return null; } + private String getValidTemplateName(Long zoneId, HypervisorType hType) { + String templateName = null; + switch (hType) { + case XenServer: + templateName = VirtualNetworkApplianceManager.RouterTemplateXen.valueIn(zoneId); + break; + case KVM: + templateName = VirtualNetworkApplianceManager.RouterTemplateKvm.valueIn(zoneId); + break; + case VMware: + templateName = VirtualNetworkApplianceManager.RouterTemplateVmware.valueIn(zoneId); + break; + case Hyperv: + templateName = VirtualNetworkApplianceManager.RouterTemplateHyperV.valueIn(zoneId); + break; + case LXC: + templateName = VirtualNetworkApplianceManager.RouterTemplateLxc.valueIn(zoneId); + break; + default: + break; + } + return templateName; + } @Override public ImageStore discoverImageStore(String name, String url, String providerName, Long zoneId, Map details) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(providerName); @@ -2727,6 +2754,7 @@ public ImageStore discoverImageStore(String name, String url, String providerNam // populate template_store_ref table _imageSrv.addSystemVMTemplatesToSecondary(store); _imageSrv.handleTemplateSync(store); + registerSystemVmTemplateOnFirstNfsStore(zoneId, providerName, url, store); } // associate builtin template with zones associated with this image store @@ -2740,6 +2768,69 @@ public ImageStore discoverImageStore(String name, String url, String providerNam return (ImageStore)_dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Image); } + private void registerSystemVmTemplateOnFirstNfsStore(Long zoneId, String providerName, String url, DataStore store) { + if (DataStoreProvider.NFS_IMAGE.equals(providerName) && zoneId != null) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(final TransactionStatus status) { + List stores = _imageStoreDao.listAllStoresInZone(zoneId, providerName, DataStoreRole.Image); + stores = stores.stream().filter(str -> str.getId() != store.getId()).collect(Collectors.toList()); + // Check if it's the only/first store in the zone + if (stores.size() == 0) { + List hypervisorTypes = _clusterDao.getAvailableHypervisorInZone(zoneId); + Set hypSet = new HashSet(hypervisorTypes); + TransactionLegacy txn = TransactionLegacy.open("AutomaticTemplateRegister"); + SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); + String filePath = null; + try { + filePath = Files.createTempDirectory(SystemVmTemplateRegistration.TEMPORARY_SECONDARY_STORE).toString(); + if (filePath == null) { + throw new CloudRuntimeException("Failed to create temporary file path to mount the store"); + } + Pair storeUrlAndId = new Pair<>(url, store.getId()); + for (HypervisorType hypervisorType : hypSet) { + try { + String templateName = getValidTemplateName(zoneId, hypervisorType); + Pair hypervisorAndTemplateName = + new Pair<>(hypervisorType, templateName); + Long templateId = systemVmTemplateRegistration.getRegisteredTemplateId(hypervisorAndTemplateName); + VMTemplateVO vmTemplateVO = null; + TemplateDataStoreVO templateVO = null; + if (templateId != null) { + vmTemplateVO = _templateDao.findById(templateId); + templateVO = _templateStoreDao.findByTemplate(templateId, DataStoreRole.Image); + if (templateVO != null) { + try { + if (SystemVmTemplateRegistration.validateIfSeeded(url, templateVO.getInstallPath())) { + continue; + } + } catch (Exception e) { + s_logger.error("Failed to validated if template is seeded", e); + } + } + } + SystemVmTemplateRegistration.mountStore(storeUrlAndId.first(), filePath); + if (templateVO != null && vmTemplateVO != null) { + systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, vmTemplateVO, filePath); + } else { + systemVmTemplateRegistration.registerTemplate(hypervisorAndTemplateName, storeUrlAndId, filePath); + } + } catch (CloudRuntimeException e) { + SystemVmTemplateRegistration.unmountStore(filePath); + s_logger.error(String.format("Failed to register systemVM template for hypervisor: %s", hypervisorType.name()), e); + } + } + } catch (Exception e) { + s_logger.error("Failed to register systemVM template(s)"); + } finally { + SystemVmTemplateRegistration.unmountStore(filePath); + txn.close(); + } + } + } + }); + } + } @Override public ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException, InvalidParameterValueException { // check if current cloud is ready to migrate, we only support cloud with only NFS secondary storages diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index c900197b9b37..22de777157c2 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -2790,6 +2790,14 @@ private Snapshot orchestrateTakeVolumeSnapshot(Long volumeId, Long policyId, Lon return volService.takeSnapshot(volume); } + private boolean isOperationSupported(VMTemplateVO template, UserVmVO userVm) { + if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && + (userVm == null || !UserVmManager.CKS_NODE.equals(userVm.getUserVmType()))) { + return false; + } + return true; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "allocating snapshot", create = true) public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, Snapshot.LocationType locationType) throws ResourceAllocationException { @@ -2818,7 +2826,12 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, if (volume.getTemplateId() != null) { VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) { + Long instanceId = volume.getInstanceId(); + UserVmVO userVmVO = null; + if (instanceId != null) { + userVmVO = _userVmDao.findById(instanceId); + } + if (!isOperationSupported(template, userVmVO)) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } @@ -2875,7 +2888,12 @@ public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName if (volume.getTemplateId() != null) { VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) { + Long instanceId = volume.getInstanceId(); + UserVmVO userVmVO = null; + if (instanceId != null) { + userVmVO = _userVmDao.findById(instanceId); + } + if (!isOperationSupported(template, userVmVO)) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 6e10d0540199..d94811a53a3b 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -134,6 +134,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -844,7 +845,12 @@ public SnapshotPolicyVO createPolicy(CreateSnapshotPolicyCmd cmd, Account policy if (volume.getTemplateId() != null) { VMTemplateVO template = _templateDao.findById(volume.getTemplateId()); - if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM) { + Long instanceId = volume.getInstanceId(); + UserVmVO userVmVO = null; + if (instanceId != null) { + userVmVO = _vmDao.findById(instanceId); + } + if (template != null && template.getTemplateType() == Storage.TemplateType.SYSTEM && (userVmVO == null || !UserVmManager.CKS_NODE.equals(userVmVO.getUserVmType()))) { throw new InvalidParameterValueException("VolumeId: " + volumeId + " is for System VM , Creating snapshot against System VM volumes is not supported"); } } diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index 98b7cb2ad0d7..8d4cf453a565 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -57,6 +57,8 @@ public interface UserVmManager extends UserVmService { static final int MAX_USER_DATA_LENGTH_BYTES = 2048; + public static final String CKS_NODE = "cksnode"; + /** * @param hostId get all of the virtual machines that belong to one host. * @return collection of VirtualMachine. diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index e97526766ba5..2e85c99bbdc4 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -3427,7 +3427,7 @@ public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOff return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap, - dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled); + dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, null); } @@ -3538,7 +3538,7 @@ public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, Service return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod, userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap, - userVmOVFProperties, dynamicScalingEnabled); + userVmOVFProperties, dynamicScalingEnabled, null); } @Override @@ -3547,7 +3547,7 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData, String sshKeyPair, Map requestedIps, IpAddresses defaultIps, Boolean displayvm, String keyboard, List affinityGroupIdList, Map customParametrs, String customId, Map> dhcpOptionsMap, Map dataDiskTemplateToDiskOfferingMap, - Map userVmOVFPropertiesMap, boolean dynamicScalingEnabled) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, + Map userVmOVFPropertiesMap, boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException { Account caller = CallContext.current().getCallingAccount(); @@ -3599,7 +3599,7 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, httpmethod, userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap, - dataDiskTemplateToDiskOfferingMap, userVmOVFPropertiesMap, dynamicScalingEnabled); + dataDiskTemplateToDiskOfferingMap, userVmOVFPropertiesMap, dynamicScalingEnabled, type); } private NetworkVO getNetworkToAddToNetworkList(VirtualMachineTemplate template, Account owner, HypervisorType hypervisor, @@ -3718,7 +3718,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe String sshKeyPair, HypervisorType hypervisor, Account caller, Map requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard, List affinityGroupIdList, Map customParameters, String customId, Map> dhcpOptionMap, Map datadiskTemplateToDiskOfferringMap, - Map userVmOVFPropertiesMap, boolean dynamicScalingEnabled) throws InsufficientCapacityException, ResourceUnavailableException, + Map userVmOVFPropertiesMap, boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException { _accountMgr.checkAccess(caller, null, true, owner); @@ -3893,7 +3893,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe } } - if (template.getTemplateType().equals(TemplateType.SYSTEM)) { + if (template.getTemplateType().equals(TemplateType.SYSTEM) && !CKS_NODE.equals(type)) { throw new InvalidParameterValueException("Unable to use system template " + template.getId() + " to deploy a user vm"); } List listZoneTemplate = _templateZoneDao.listByZoneTemplate(zone.getId(), template.getId()); @@ -4082,7 +4082,7 @@ private UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffe UserVmVO vm = commitUserVm(zone, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard, accountId, userId, offering, isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap, - datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled); + datadiskTemplateToDiskOfferringMap, userVmOVFPropertiesMap, dynamicScalingEnabled, type); // Assign instance to the group try { @@ -4194,7 +4194,7 @@ private UserVmVO commitUserVm(final boolean isImport, final DataCenter zone, fin final long accountId, final long userId, final ServiceOffering offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap> networkNicMap, final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map customParameters, final Map> extraDhcpOptionMap, final Map dataDiskTemplateToDiskOfferingMap, - final Map userVmOVFPropertiesMap, final VirtualMachine.PowerState powerState, final boolean dynamicScalingEnabled) throws InsufficientCapacityException { + final Map userVmOVFPropertiesMap, final VirtualMachine.PowerState powerState, final boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException { return Transaction.execute(new TransactionCallbackWithException() { @Override public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCapacityException { @@ -4281,6 +4281,7 @@ public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCap } } + vm.setUserVmType(type); _vmDao.persist(vm); for (String key : customParameters.keySet()) { if (key.equalsIgnoreCase(VmDetailConstants.CPU_NUMBER) || @@ -4386,13 +4387,13 @@ private UserVmVO commitUserVm(final DataCenter zone, final VirtualMachineTemplat final long accountId, final long userId, final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap> networkNicMap, final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map customParameters, final Map> extraDhcpOptionMap, final Map dataDiskTemplateToDiskOfferingMap, - Map userVmOVFPropertiesMap, final boolean dynamicScalingEnabled) throws InsufficientCapacityException { + Map userVmOVFPropertiesMap, final boolean dynamicScalingEnabled, String type) throws InsufficientCapacityException { return commitUserVm(false, zone, null, null, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard, accountId, userId, offering, isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap, - userVmOVFPropertiesMap, null, dynamicScalingEnabled); + userVmOVFPropertiesMap, null, dynamicScalingEnabled, type); } public void validateRootDiskResize(final HypervisorType hypervisorType, Long rootDiskSize, VMTemplateVO templateVO, UserVmVO vm, final Map customParameters) throws InvalidParameterValueException @@ -4705,12 +4706,54 @@ private UserVm startVirtualMachine(long vmId, Long podId, Long clusterId, Long h return vm; } + private void addUserVMCmdlineArgs(Long vmId, VirtualMachineProfile profile, DeployDestination dest, StringBuilder buf) { + UserVmVO k8sVM = _vmDao.findById(vmId); + buf.append(" template=domP"); + buf.append(" name=").append(profile.getHostName()); + buf.append(" type=").append(k8sVM.getUserVmType()); + for (NicProfile nic : profile.getNics()) { + int deviceId = nic.getDeviceId(); + if (nic.getIPv4Address() == null) { + buf.append(" eth").append(deviceId).append("ip=").append("0.0.0.0"); + buf.append(" eth").append(deviceId).append("mask=").append("0.0.0.0"); + } else { + buf.append(" eth").append(deviceId).append("ip=").append(nic.getIPv4Address()); + buf.append(" eth").append(deviceId).append("mask=").append(nic.getIPv4Netmask()); + } + + if (nic.isDefaultNic()) { + buf.append(" gateway=").append(nic.getIPv4Gateway()); + } + + if (nic.getTrafficType() == TrafficType.Management) { + String mgmt_cidr = _configDao.getValue(Config.ManagementNetwork.key()); + if (NetUtils.isValidIp4Cidr(mgmt_cidr)) { + buf.append(" mgmtcidr=").append(mgmt_cidr); + } + buf.append(" localgw=").append(dest.getPod().getGateway()); + } + } + DataCenterVO dc = _dcDao.findById(profile.getVirtualMachine().getDataCenterId()); + buf.append(" internaldns1=").append(dc.getInternalDns1()); + if (dc.getInternalDns2() != null) { + buf.append(" internaldns2=").append(dc.getInternalDns2()); + } + buf.append(" dns1=").append(dc.getDns1()); + if (dc.getDns2() != null) { + buf.append(" dns2=").append(dc.getDns2()); + } + s_logger.info("cmdline details: "+ buf.toString()); + } + @Override public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { UserVmVO vm = _vmDao.findById(profile.getId()); Map details = userVmDetailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); - + StringBuilder buf = profile.getBootArgsBuilder(); + if (CKS_NODE.equals(vm.getUserVmType())) { + addUserVMCmdlineArgs(vm.getId(), profile, dest, buf); + } // add userdata info into vm profile Nic defaultNic = _networkModel.getDefaultNic(vm.getId()); if(defaultNic != null) { @@ -5586,7 +5629,7 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE } vm = createAdvancedVirtualMachine(zone, serviceOffering, template, networkIds, owner, name, displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(), - cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled); + cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap, userVmOVFProperties, dynamicScalingEnabled, null); } } // check if this templateId has a child ISO @@ -7720,7 +7763,7 @@ public UserVm importVM(final DataCenter zone, final Host host, final VirtualMach null, null, userData, caller, isDisplayVm, keyboard, accountId, userId, serviceOffering, template.getFormat().equals(ImageFormat.ISO), sshPublicKey, null, id, instanceName, uuidName, hypervisorType, customParameters, - null, null, null, powerState, dynamicScalingEnabled); + null, null, null, powerState, dynamicScalingEnabled, null); } @Override diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java index f5bba77b2ab8..483f9aef7ef7 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java @@ -340,6 +340,7 @@ private Pair copyToSecondaryStorageVMware(final DataStore store File dataDirectory = new File(dataDirectoryInSecondaryStore); boolean existsInSecondaryStore = dataDirectory.exists() || dataDirectory.mkdir(); if (existsInSecondaryStore) { + // scp from system VM to mounted sec storage directory String homeDir = System.getProperty("user.home"); File permKey = new File(homeDir + "/.ssh/id_rsa"); SshHelper.scpFrom(vmSshIp, 3922, "root", permKey, dataDirectoryInSecondaryStore, diagnosticsFile); diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index f7c22c209852..99539a29dcb3 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -1084,6 +1084,8 @@ public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, Depl buf.append(" guid=").append(profile.getVirtualMachine().getHostName()); buf.append(" workers=").append(_configDao.getValue("workers")); + String msPublicKey = _configDao.getValue("ssh.publickey"); + buf.append(" authorized_key=").append(VirtualMachineGuru.getEncodedMsPublicKey(msPublicKey)); if (_configDao.isPremium()) { s_logger.debug("VMWare hypervisor was configured, informing secondary storage VM to load the PremiumSecondaryStorageResource."); diff --git a/systemvm/debian/opt/cloud/bin/setup/bootstrap.sh b/systemvm/debian/opt/cloud/bin/setup/bootstrap.sh index 769078ea8f09..2335d649a950 100755 --- a/systemvm/debian/opt/cloud/bin/setup/bootstrap.sh +++ b/systemvm/debian/opt/cloud/bin/setup/bootstrap.sh @@ -173,6 +173,7 @@ patch_systemvm() { patch() { local PATCH_MOUNT=/media/cdrom local logfile="/var/log/patchsystemvm.log" + if [ "$TYPE" == "consoleproxy" ] || [ "$TYPE" == "secstorage" ] && [ -f ${PATCH_MOUNT}/agent.zip ] && [ -f /var/cache/cloud/patch.required ] then echo "Patching systemvm for cloud service with mount=$PATCH_MOUNT for type=$TYPE" >> $logfile diff --git a/systemvm/debian/opt/cloud/bin/setup/cksnode.sh b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh new file mode 100755 index 000000000000..a864d188d009 --- /dev/null +++ b/systemvm/debian/opt/cloud/bin/setup/cksnode.sh @@ -0,0 +1,76 @@ +#!/bin/bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +. /opt/cloud/bin/setup/common.sh + +setup_k8s_node() { + log_it "Setting up k8s node" + + update-alternatives --set iptables /usr/sbin/iptables-legacy + update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy + update-alternatives --set arptables /usr/sbin/arptables-legacy + update-alternatives --set ebtables /usr/sbin/ebtables-legacy + + # set default ssh port and restart sshd service + sed -i 's/3922/22/g' /etc/ssh/sshd_config + + # Prevent root login + > /root/.ssh/authorized_keys + passwd -l root + #sed -i 's#root:x:0:0:root:/root:/bin/bash#root:x:0:0:root:/root:/sbin/nologin#' /etc/passwd + + swapoff -a + sudo sed -i '/ swap / s/^/#/' /etc/fstab + log_it "Swap disabled" + + log_it "Setting up interfaces" + setup_common eth0 + setup_system_rfc1918_internal + + log_it "Setting up entry in hosts" + sed -i /$NAME/d /etc/hosts + echo "$ETH0_IP $NAME" >> /etc/hosts + + public_ip=`getPublicIp` + echo "$public_ip $NAME" >> /etc/hosts + + echo "export PATH='$PATH:/opt/bin/'">> ~/.bashrc + + disable_rpfilter + enable_fwding 1 + enable_irqbalance 0 + setup_ntp + dhclient -1 + + rm -f /etc/logrotate.d/cloud + + log_it "Starting cloud-init services" + systemctl enable --now --no-block containerd + systemctl enable --now --no-block docker.socket + systemctl enable --now --no-block docker.service + if [ -f /home/core/success ]; then + systemctl stop cloud-init cloud-config cloud-final + systemctl disable cloud-init cloud-config cloud-final + else + systemctl start --no-block cloud-init + systemctl start --no-block cloud-config + systemctl start --no-block cloud-final + fi +} + +setup_k8s_node \ No newline at end of file diff --git a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config index 02593a37affb..d0ebd0b68146 100755 --- a/systemvm/debian/opt/cloud/bin/setup/cloud-early-config +++ b/systemvm/debian/opt/cloud/bin/setup/cloud-early-config @@ -61,7 +61,6 @@ patch() { [ -f ${md5file} ] && oldmd5=$(cat ${md5file}) local newmd5= [ -f ${patchfile} ] && newmd5=$(md5sum ${patchfile} | awk '{print $1}') - log_it "Scripts checksum detected: oldmd5=$oldmd5 newmd5=$newmd5" if [ "$oldmd5" != "$newmd5" ] && [ -f ${patchfile} ] && [ "$newmd5" != "" ] then @@ -89,6 +88,14 @@ start() { rm -f /root/.rnd echo "" > /root/.ssh/known_hosts + if which growpart > /dev/null; then + ROOT_MOUNT_POINT=$(df -h / | tail -n 1 | cut -d' ' -f1) + ROOT_DISK=$(echo $ROOT_MOUNT_POINT | sed 's/[0-9]*$//g') + growpart $ROOT_DISK 2 + growpart $ROOT_DISK 6 + resize2fs $ROOT_MOUNT_POINT + fi + patch sync /opt/cloud/bin/setup/bootstrap.sh diff --git a/systemvm/debian/opt/cloud/bin/setup/common.sh b/systemvm/debian/opt/cloud/bin/setup/common.sh index 987f07d7659d..60b88754bee3 100755 --- a/systemvm/debian/opt/cloud/bin/setup/common.sh +++ b/systemvm/debian/opt/cloud/bin/setup/common.sh @@ -543,7 +543,7 @@ setup_system_rfc1918_internal() { public_ip=`getPublicIp` echo "$public_ip" | grep -E "^((127\.)|(10\.)|(172\.1[6-9]\.)|(172\.2[0-9]\.)|(172\.3[0-1]\.)|(192\.168\.))" if [ "$?" == "0" ]; then - log_it "Not setting up route of RFC1918 space to $LOCAL_GW befause $public_ip is RFC1918." + log_it "Not setting up route of RFC1918 space to $LOCAL_GW because $public_ip is RFC1918." else log_it "Setting up route of RFC1918 space to $LOCAL_GW" # Setup general route for RFC 1918 space, as otherwise it will be sent to @@ -759,6 +759,9 @@ parse_cmd_line() { ntpserverlist) export NTP_SERVER_LIST=$VALUE ;; + authorized_key) + export AUTHORIZED_KEYS=$VALUE + ;; esac done echo -e "\n\t}\n}" >> ${CHEF_TMP_FILE} @@ -767,6 +770,17 @@ parse_cmd_line() { mv ${CHEF_TMP_FILE} /var/cache/cloud/cmd_line.json fi + TMP_KEY_PATH=/tmp/.auth_key + AUTHORIZED_KEYS_PATH=/root/.ssh/authorized_keys + if [ ! -z "$AUTHORIZED_KEYS" ] + then + echo "$AUTHORIZED_KEYS" > $TMP_KEY_PATH + base64Val=$(base64 -d $TMP_KEY_PATH) + echo "$base64Val" > $AUTHORIZED_KEYS_PATH + chmod go-rwx $AUTHORIZED_KEYS_PATH + rm -rf $TMP_KEY_PATH + fi + [ $ETH0_IP ] && export LOCAL_ADDRS=$ETH0_IP [ $ETH0_IP6 ] && export LOCAL_ADDRS=$ETH0_IP6 [ $ETH0_IP ] && [ $ETH0_IP6 ] && export LOCAL_ADDRS="$ETH0_IP,$ETH0_IP6" diff --git a/systemvm/debian/opt/cloud/bin/setup/postinit.sh b/systemvm/debian/opt/cloud/bin/setup/postinit.sh index 5e7e4c01a228..04929302f513 100755 --- a/systemvm/debian/opt/cloud/bin/setup/postinit.sh +++ b/systemvm/debian/opt/cloud/bin/setup/postinit.sh @@ -18,8 +18,17 @@ # # This scripts before ssh.service but after cloud-early-config +log_it() { + echo "$(date) $@" >> /var/log/cloud.log + log_action_msg "$@" +} + # Eject cdrom if any -eject || true +CMDLINE=/var/cache/cloud/cmdline +export TYPE=$(grep -Po 'type=\K[a-zA-Z]*' $CMDLINE) +if [ "$TYPE" != "cksnode" ]; then + eject || true +fi # Restart journald for setting changes to apply systemctl restart systemd-journald @@ -33,6 +42,10 @@ then fi fi +if [ "$TYPE" == "cksnode" ]; then + pkill -9 dhclient +fi + [ ! -f /var/cache/cloud/enabled_svcs ] && touch /var/cache/cloud/enabled_svcs for svc in $(cat /var/cache/cloud/enabled_svcs) do diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index d4be6b8d3c67..0e0ee675a9ed 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -31,6 +31,7 @@ deleteKubernetesCluster, upgradeKubernetesCluster, scaleKubernetesCluster, + getKubernetesClusterConfig, destroyVirtualMachine, deleteNetwork) from marvin.cloudstackException import CloudstackAPIException @@ -49,7 +50,8 @@ from nose.plugins.attrib import attr from marvin.lib.decoratorGenerators import skipTestIf -import time +from kubernetes import client, config +import time, io, yaml _multiprocess_shared_ = True @@ -59,13 +61,12 @@ class TestKubernetesCluster(cloudstackTestCase): @classmethod def setUpClass(cls): - cls.testClient = super(TestKubernetesCluster, cls).getClsTestClient() - cls.apiclient = cls.testClient.getApiClient() - cls.services = cls.testClient.getParsedTestDataConfig() - cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) - cls.hypervisor = cls.testClient.getHypervisorInfo() + testClient = super(TestKubernetesCluster, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.hypervisor = testClient.getHypervisorInfo() cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ - cls.cks_template_name_key = "cloud.kubernetes.cluster.template.name." + cls.hypervisor.lower() cls.hypervisorNotSupported = False if cls.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: @@ -88,59 +89,26 @@ def setUpClass(cls): "true") cls.restartServer() cls.updateVmwareSettings(False) - cls.cks_template = None - cls.initial_configuration_cks_template_name = None cls.cks_service_offering = None if cls.setup_failed == False: try: - cls.kubernetes_version_1 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.14.9"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_1.id) + cls.kubernetes_version_1_20_9 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.20.9"]) + cls.kubernetes_version_ids.append(cls.kubernetes_version_1_20_9.id) except Exception as e: cls.setup_failed = True cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % - (cls.services["cks_kubernetes_versions"]["1.14.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.14.9"]["url"], e)) + (cls.services["cks_kubernetes_versions"]["1.20.9"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.20.9"]["url"], e)) if cls.setup_failed == False: try: - cls.kubernetes_version_2 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.15.0"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_2.id) + cls.kubernetes_version_1_21_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.21.3"]) + cls.kubernetes_version_ids.append(cls.kubernetes_version_1_21_3.id) except Exception as e: cls.setup_failed = True cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % - (cls.services["cks_kubernetes_versions"]["1.15.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.15.0"]["url"], e)) - if cls.setup_failed == False: - try: - cls.kubernetes_version_3 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.0"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_3.id) - except Exception as e: - cls.setup_failed = True - cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % - (cls.services["cks_kubernetes_versions"]["1.16.0"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.0"]["url"], e)) - if cls.setup_failed == False: - try: - cls.kubernetes_version_4 = cls.addKubernetesSupportedVersion(cls.services["cks_kubernetes_versions"]["1.16.3"]) - cls.kubernetes_version_ids.append(cls.kubernetes_version_4.id) - except Exception as e: - cls.setup_failed = True - cls.debug("Failed to get Kubernetes version ISO in ready state, version=%s, url=%s, %s" % - (cls.services["cks_kubernetes_versions"]["1.16.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.16.3"]["url"], e)) + (cls.services["cks_kubernetes_versions"]["1.21.3"]["semanticversion"], cls.services["cks_kubernetes_versions"]["1.21.3"]["url"], e)) if cls.setup_failed == False: - cls.cks_template, existAlready = cls.getKubernetesTemplate() - if cls.cks_template == FAILED: - assert False, "getKubernetesTemplate() failed to return template for hypervisor %s" % cls.hypervisor - cls.setup_failed = True - else: - if not existAlready: - cls._cleanup.append(cls.cks_template) - - if cls.setup_failed == False: - cls.initial_configuration_cks_template_name = Configurations.list(cls.apiclient, - name=cls.cks_template_name_key)[0].value - Configurations.update(cls.apiclient, - cls.cks_template_name_key, - cls.cks_template.name) - cks_offering_data = cls.services["cks_service_offering"] cks_offering_data["name"] = 'CKS-Instance-' + random_gen() cls.cks_service_offering = ServiceOffering.create( @@ -159,6 +127,10 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): + if k8s_cluster != None and k8s_cluster.id != None: + clsObj = TestKubernetesCluster() + clsObj.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + version_delete_failed = False # Delete added Kubernetes supported version for version_id in cls.kubernetes_version_ids: @@ -168,11 +140,6 @@ def tearDownClass(cls): version_delete_failed = True cls.debug("Error: Exception during cleanup for added Kubernetes supported versions: %s" % e) try: - # Restore original CKS template - if cls.hypervisorNotSupported == False and cls.initial_configuration_cks_template_name != None: - Configurations.update(cls.apiclient, - cls.cks_template_name_key, - cls.initial_configuration_cks_template_name) # Restore CKS enabled if cls.initial_configuration_cks_enabled not in ["true", True]: cls.debug("Restoring Kubernetes Service enabled value") @@ -242,41 +209,6 @@ def isManagementUp(cls): except Exception: return False - @classmethod - def getKubernetesTemplate(cls, cks_templates=None): - - if cks_templates is None: - cks_templates = cls.services["cks_templates"] - - hypervisor = cls.hypervisor.lower() - - if hypervisor not in list(cks_templates.keys()): - cls.debug("Provided hypervisor has no CKS template") - return FAILED, False - - cks_template = cks_templates[hypervisor] - - cmd = listTemplates.listTemplatesCmd() - cmd.name = cks_template['name'] - cmd.templatefilter = 'all' - cmd.zoneid = cls.zone.id - cmd.hypervisor = hypervisor - templates = cls.apiclient.listTemplates(cmd) - - if validateList(templates)[0] != PASS: - details = None - if hypervisor in ["vmware"] and "details" in cks_template: - details = cks_template["details"] - template = Template.register(cls.apiclient, cks_template, zoneid=cls.zone.id, hypervisor=hypervisor.lower(), randomize_name=False, details=details) - template.download(cls.apiclient) - return template, False - - for template in templates: - if template.isready and template.ispublic: - return Template(template.__dict__), True - - return FAILED, False - @classmethod def waitForKubernetesSupportedVersionIsoReadyState(cls, version_id, retries=30, interval=60): """Check if Kubernetes supported version ISO is in Ready state""" @@ -319,9 +251,79 @@ def addKubernetesSupportedVersion(cls, version_service): def deleteKubernetesSupportedVersion(cls, version_id): deleteKubernetesSupportedVersionCmd = deleteKubernetesSupportedVersion.deleteKubernetesSupportedVersionCmd() deleteKubernetesSupportedVersionCmd.id = version_id - deleteKubernetesSupportedVersionCmd.deleteiso = True cls.apiclient.deleteKubernetesSupportedVersion(deleteKubernetesSupportedVersionCmd) + @classmethod + def listKubernetesCluster(cls, cluster_id = None): + listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd() + listKubernetesClustersCmd.listall = True + if cluster_id != None: + listKubernetesClustersCmd.id = cluster_id + clusterResponse = cls.apiclient.listKubernetesClusters(listKubernetesClustersCmd) + if cluster_id != None and clusterResponse != None: + return clusterResponse[0] + return clusterResponse + + @classmethod + def deleteKubernetesCluster(cls, cluster_id): + deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd() + deleteKubernetesClusterCmd.id = cluster_id + response = cls.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd) + return response + + @classmethod + def stopKubernetesCluster(cls, cluster_id): + stopKubernetesClusterCmd = stopKubernetesCluster.stopKubernetesClusterCmd() + stopKubernetesClusterCmd.id = cluster_id + response = cls.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd) + return response + + + + def deleteKubernetesClusterAndVerify(self, cluster_id, verify = True, forced = False): + """Delete Kubernetes cluster and check if it is really deleted""" + + delete_response = {} + forceDeleted = False + try: + delete_response = self.deleteKubernetesCluster(cluster_id) + except Exception as e: + if forced: + cluster = self.listKubernetesCluster(cluster_id) + if cluster != None: + if cluster.state in ['Starting', 'Running', 'Upgrading', 'Scaling']: + self.stopKubernetesCluster(cluster_id) + self.deleteKubernetesCluster(cluster_id) + else: + forceDeleted = True + for cluster_vm in cluster.virtualmachines: + cmd = destroyVirtualMachine.destroyVirtualMachineCmd() + cmd.id = cluster_vm.id + cmd.expunge = True + self.apiclient.destroyVirtualMachine(cmd) + cmd = deleteNetwork.deleteNetworkCmd() + cmd.id = cluster.networkid + cmd.forced = True + self.apiclient.deleteNetwork(cmd) + self.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id) + else: + raise Exception("Error: Exception during delete cluster : %s" % e) + + if verify == True and forceDeleted == False: + self.assertEqual( + delete_response.success, + True, + "Check KubernetesCluster delete response {}, {}".format(delete_response.success, True) + ) + + db_cluster_removed = self.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0] + + self.assertNotEqual( + db_cluster_removed, + None, + "KubernetesCluster not removed in DB, {}".format(db_cluster_removed) + ) + def setUp(self): self.services = self.testClient.getParsedTestDataConfig() self.apiclient = self.testClient.getApiClient() @@ -347,24 +349,25 @@ def test_01_invalid_upgrade_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster() + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_3) - self.debug("Upgrading Kubernetes cluster with ID: %s to a lower version" % k8s_cluster.id) + self.debug("Downgrading Kubernetes cluster with ID: %s to a lower version. This should fail!" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1.id) - self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % kubernetes_version_1.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_20_9.id) + self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % self.kubernetes_version_1_20_9.id) self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) - self.fail("Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error.") + self.fail("Kubernetes cluster downgrade to a lower Kubernetes supported version. Must be an error.") except Exception as e: self.debug("Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id) return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_02_deploy_and_upgrade_kubernetes_cluster(self): + def test_02_upgrade_kubernetes_cluster(self): """Test to deploy a new Kubernetes cluster and upgrade it to newer version # Validate the following: @@ -373,19 +376,17 @@ def test_02_deploy_and_upgrade_kubernetes_cluster(self): if self.setup_failed == True: self.fail("Setup incomplete") global k8s_cluster - k8s_cluster = self.getValidKubernetesCluster() - time.sleep(self.services["sleep"]) + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_20_9) + time.sleep(self.services["sleep"]) self.debug("Upgrading Kubernetes cluster with ID: %s" % k8s_cluster.id) - try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_3.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_3.id) except Exception as e: self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Failed to upgrade Kubernetes cluster due to: %s" % e) - self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_3.id) - + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id) return @attr(tags=["advanced", "smoke"], required_hardware="true") @@ -403,7 +404,6 @@ def test_03_deploy_and_scale_kubernetes_cluster(self): k8s_cluster = self.getValidKubernetesCluster() self.debug("Upscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) - try: k8s_cluster = self.scaleKubernetesCluster(k8s_cluster.id, 2) except Exception as e: @@ -411,7 +411,6 @@ def test_03_deploy_and_scale_kubernetes_cluster(self): self.fail("Failed to upscale Kubernetes cluster due to: %s" % e) self.verifyKubernetesClusterScale(k8s_cluster, 2) - self.debug("Kubernetes cluster with ID: %s successfully upscaled, now downscaling it" % k8s_cluster.id) try: @@ -421,14 +420,38 @@ def test_03_deploy_and_scale_kubernetes_cluster(self): self.fail("Failed to downscale Kubernetes cluster due to: %s" % e) self.verifyKubernetesClusterScale(k8s_cluster) - self.debug("Kubernetes cluster with ID: %s successfully downscaled" % k8s_cluster.id) + return + + @attr(tags=["advanced", "smoke"], required_hardware="true") + @skipTestIf("hypervisorNotSupported") + def test_04_autoscale_kubernetes_cluster(self): + """Test to enable autoscaling a Kubernetes cluster + # Validate the following: + # 1. scaleKubernetesCluster should return valid info for the cluster when it is autoscaled + # 2. cluster-autoscaler pod should be running + """ + if self.setup_failed == True: + self.fail("Setup incomplete") + global k8s_cluster + k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_1_21_3) + self.debug("Autoscaling Kubernetes cluster with ID: %s" % k8s_cluster.id) + try: + k8s_cluster = self.autoscaleKubernetesCluster(k8s_cluster.id, 1, 2) + self.verifyKubernetesClusterAutocale(k8s_cluster, 1, 2) + + up = self.waitForAutoscalerPodInRunningState(k8s_cluster.id) + self.assertTrue(up, "Autoscaler pod failed to run") + self.debug("Kubernetes cluster with ID: %s has autoscaler running" % k8s_cluster.id) + except Exception as e: + self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) + self.fail("Failed to autoscale Kubernetes cluster due to: %s" % e) return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_04_basic_lifecycle_kubernetes_cluster(self): + def test_05_basic_lifecycle_kubernetes_cluster(self): """Test to deploy a new Kubernetes cluster # Validate the following: @@ -456,9 +479,10 @@ def test_04_basic_lifecycle_kubernetes_cluster(self): self.verifyKubernetesClusterState(k8s_cluster, 'Running') return + @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_05_delete_kubernetes_cluster(self): + def test_06_delete_kubernetes_cluster(self): """Test to delete an existing Kubernetes cluster # Validate the following: @@ -479,29 +503,6 @@ def test_05_delete_kubernetes_cluster(self): return - @attr(tags=["advanced", "smoke"], required_hardware="true") - @skipTestIf("hypervisorNotSupported") - def test_06_deploy_invalid_kubernetes_ha_cluster(self): - """Test to deploy an invalid HA Kubernetes cluster - - # Validate the following: - # 1. createKubernetesCluster should fail as version doesn't support HA - """ - if self.setup_failed == True: - self.fail("Setup incomplete") - name = 'testcluster-' + random_gen() - self.debug("Creating for Kubernetes cluster with name %s" % name) - - try: - cluster_response = self.createKubernetesCluster(name, self.kubernetes_version_2.id, 1, 2) - self.debug("Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id) - self.deleteKubernetesClusterAndVerify(cluster_response.id, False, True) - self.fail("HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error.") - except CloudstackAPIException as e: - self.debug("HA Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) - - return - @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") def test_07_deploy_kubernetes_ha_cluster(self): @@ -515,15 +516,13 @@ def test_07_deploy_kubernetes_ha_cluster(self): self.fail("Setup incomplete") global k8s_cluster k8s_cluster = self.getValidKubernetesCluster(1, 2) - self.debug("HA Kubernetes cluster with ID: %s successfully deployed" % k8s_cluster.id) - return @attr(tags=["advanced", "smoke"], required_hardware="true") @skipTestIf("hypervisorNotSupported") - def test_08_deploy_and_upgrade_kubernetes_ha_cluster(self): - """Test to deploy a new HA Kubernetes cluster and upgrade it to newer version + def test_08_upgrade_kubernetes_ha_cluster(self): + """Test to upgrade a Kubernetes cluster to newer version # Validate the following: # 1. upgradeKubernetesCluster should return valid info for the cluster @@ -536,15 +535,13 @@ def test_08_deploy_and_upgrade_kubernetes_ha_cluster(self): self.debug("Upgrading HA Kubernetes cluster with ID: %s" % k8s_cluster.id) try: - k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_4.id) + k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_1_21_3.id) except Exception as e: self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True) self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e) - self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_4.id) - + self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_1_21_3.id) self.debug("Kubernetes cluster with ID: %s successfully upgraded" % k8s_cluster.id) - return @attr(tags=["advanced", "smoke"], required_hardware="true") @@ -561,22 +558,8 @@ def test_09_delete_kubernetes_ha_cluster(self): k8s_cluster = self.getValidKubernetesCluster(1, 2) self.debug("Deleting Kubernetes cluster with ID: %s" % k8s_cluster.id) - - self.deleteKubernetesClusterAndVerify(k8s_cluster.id) - - self.debug("Kubernetes cluster with ID: %s successfully deleted" % k8s_cluster.id) - return - def listKubernetesCluster(self, cluster_id = None): - listKubernetesClustersCmd = listKubernetesClusters.listKubernetesClustersCmd() - if cluster_id != None: - listKubernetesClustersCmd.id = cluster_id - clusterResponse = self.apiclient.listKubernetesClusters(listKubernetesClustersCmd) - if cluster_id != None and clusterResponse != None: - return clusterResponse[0] - return clusterResponse - def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1): createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd() createKubernetesClusterCmd.name = name @@ -594,24 +577,12 @@ def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1): self.cleanup.append(clusterResponse) return clusterResponse - def stopKubernetesCluster(self, cluster_id): - stopKubernetesClusterCmd = stopKubernetesCluster.stopKubernetesClusterCmd() - stopKubernetesClusterCmd.id = cluster_id - response = self.apiclient.stopKubernetesCluster(stopKubernetesClusterCmd) - return response - def startKubernetesCluster(self, cluster_id): startKubernetesClusterCmd = startKubernetesCluster.startKubernetesClusterCmd() startKubernetesClusterCmd.id = cluster_id response = self.apiclient.startKubernetesCluster(startKubernetesClusterCmd) return response - def deleteKubernetesCluster(self, cluster_id): - deleteKubernetesClusterCmd = deleteKubernetesCluster.deleteKubernetesClusterCmd() - deleteKubernetesClusterCmd.id = cluster_id - response = self.apiclient.deleteKubernetesCluster(deleteKubernetesClusterCmd) - return response - def upgradeKubernetesCluster(self, cluster_id, version_id): upgradeKubernetesClusterCmd = upgradeKubernetesCluster.upgradeKubernetesClusterCmd() upgradeKubernetesClusterCmd.id = cluster_id @@ -626,42 +597,96 @@ def scaleKubernetesCluster(self, cluster_id, size): response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd) return response - def getValidKubernetesCluster(self, size=1, control_nodes=1): + def autoscaleKubernetesCluster(self, cluster_id, minsize, maxsize): + scaleKubernetesClusterCmd = scaleKubernetesCluster.scaleKubernetesClusterCmd() + scaleKubernetesClusterCmd.id = cluster_id + scaleKubernetesClusterCmd.autoscalingenabled = True + scaleKubernetesClusterCmd.minsize = minsize + scaleKubernetesClusterCmd.maxsize = maxsize + response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd) + return response + + def fetchKubernetesClusterConfig(self, cluster_id): + getKubernetesClusterConfigCmd = getKubernetesClusterConfig.getKubernetesClusterConfigCmd() + getKubernetesClusterConfigCmd.id = cluster_id + response = self.apiclient.getKubernetesClusterConfig(getKubernetesClusterConfigCmd) + return response + + def waitForAutoscalerPodInRunningState(self, cluster_id, retries=5, interval=60): + k8s_config = self.fetchKubernetesClusterConfig(cluster_id) + cfg = io.StringIO(k8s_config.configdata) + cfg = yaml.load(cfg) + # Adding this so we don't get certificate exceptions + cfg['clusters'][0]['cluster']['insecure-skip-tls-verify']=True + config.load_kube_config_from_dict(cfg) + v1 = client.CoreV1Api() + + while retries > 0: + time.sleep(interval) + pods = v1.list_pod_for_all_namespaces(watch=False, label_selector="app=cluster-autoscaler").items + if len(pods) == 0 : + self.debug("Autoscaler pod still not up") + continue + pod = pods[0] + if pod.status.phase == 'Running' : + self.debug("Autoscaler pod %s up and running!" % pod.metadata.name) + return True + self.debug("Autoscaler pod %s up but not running on retry %d. State is : %s" %(pod.metadata.name, retries, pod.status.phase)) + retries = retries - 1 + return False + + def getValidKubernetesCluster(self, size=1, control_nodes=1, version={}): cluster = k8s_cluster - version = self.kubernetes_version_2 - if control_nodes != 1: - version = self.kubernetes_version_3 - valid = True - if cluster == None: - valid = False + + # Does a cluster already exist ? + if cluster == None or cluster.id == None: + if not version: + version = self.kubernetes_version_1_20_9 self.debug("No existing cluster available, k8s_cluster: %s" % cluster) - if valid == True and cluster.id == None: - valid = False - self.debug("ID for existing cluster not found, k8s_cluster ID: %s" % cluster.id) - if valid == True: + return self.createNewKubernetesCluster(version, size, control_nodes) + + # Is the existing cluster what is needed ? + valid = cluster.size == size and cluster.controlnodes == control_nodes + if version: + # Check the version only if specified + valid = valid and cluster.kubernetesversionid == version.id + else: + version = self.kubernetes_version_1_20_9 + + if valid: cluster_id = cluster.id cluster = self.listKubernetesCluster(cluster_id) if cluster == None: - valid = False + # Looks like the cluster disappeared ! self.debug("Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id) - if valid == True: + return self.createNewKubernetesCluster(version, size, control_nodes) + + if valid: try: self.verifyKubernetesCluster(cluster, cluster.name, None, size, control_nodes) self.debug("Existing Kubernetes cluster available with name %s" % cluster.name) + return cluster except AssertionError as error: - valid = False self.debug("Existing cluster failed verification due to %s, need to deploy a new one" % error) - if valid == False: - name = 'testcluster-' + random_gen() - self.debug("Creating for Kubernetes cluster with name %s" % name) - try: - self.deleteAllLeftoverClusters() - cluster = self.createKubernetesCluster(name, version.id, size, control_nodes) - self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes) - except Exception as ex: - self.fail("Kubernetes cluster deployment failed: %s" % ex) - except AssertionError as err: - self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err) + self.deleteKubernetesClusterAndVerify(cluster.id, False, True) + + # Can't have too many loose clusters running around + if cluster.id != None: + self.deleteKubernetesClusterAndVerify(cluster.id, False, True) + + self.debug("No valid cluster, need to deploy a new one") + return self.createNewKubernetesCluster(version, size, control_nodes) + + def createNewKubernetesCluster(self, version, size, control_nodes) : + name = 'testcluster-' + random_gen() + self.debug("Creating for Kubernetes cluster with name %s" % name) + try: + cluster = self.createKubernetesCluster(name, version.id, size, control_nodes) + self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes) + except Exception as ex: + self.fail("Kubernetes cluster deployment failed: %s" % ex) + except AssertionError as err: + self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err) return cluster def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size=1, control_nodes=1): @@ -740,6 +765,21 @@ def verifyKubernetesClusterScale(self, cluster_response, size=1, control_nodes=1 self.verifyKubernetesClusterState(cluster_response, 'Running') self.verifyKubernetesClusterSize(cluster_response, size, control_nodes) + def verifyKubernetesClusterAutocale(self, cluster_response, minsize, maxsize): + """Check if Kubernetes cluster state and node sizes are valid after upgrade""" + + self.verifyKubernetesClusterState(cluster_response, 'Running') + self.assertEqual( + cluster_response.minsize, + minsize, + "Check KubernetesCluster minsize {}, {}".format(cluster_response.minsize, minsize) + ) + self.assertEqual( + cluster_response.maxsize, + maxsize, + "Check KubernetesCluster maxsize {}, {}".format(cluster_response.maxsize, maxsize) + ) + def stopAndVerifyKubernetesCluster(self, cluster_id): """Stop Kubernetes cluster and check if it is really stopped""" @@ -758,52 +798,3 @@ def stopAndVerifyKubernetesCluster(self, cluster_id): 'Stopped', "KubernetesCluster not stopped in DB, {}".format(db_cluster_state) ) - - def deleteKubernetesClusterAndVerify(self, cluster_id, verify = True, forced = False): - """Delete Kubernetes cluster and check if it is really deleted""" - - forceDeleted = False - try: - delete_response = self.deleteKubernetesCluster(cluster_id) - except Exception as e: - if forced: - cluster = self.listKubernetesCluster(cluster_id) - if cluster != None: - if cluster.state in ['Starting', 'Running', 'Upgrading', 'Scaling']: - self.stopKubernetesCluster(cluster_id) - self.deleteKubernetesCluster(cluster_id) - else: - forceDeleted = True - for cluster_vm in cluster.virtualmachines: - cmd = destroyVirtualMachine.destroyVirtualMachineCmd() - cmd.id = cluster_vm.id - cmd.expunge = True - self.apiclient.destroyVirtualMachine(cmd) - cmd = deleteNetwork.deleteNetworkCmd() - cmd.id = cluster.networkid - cmd.forced = True - self.apiclient.deleteNetwork(cmd) - self.dbclient.execute("update kubernetes_cluster set state='Destroyed', removed=now() where uuid = '%s';" % cluster.id) - else: - raise Exception("Error: Exception during delete cluster : %s" % e) - - if verify == True and forceDeleted == False: - self.assertEqual( - delete_response.success, - True, - "Check KubernetesCluster delete response {}, {}".format(delete_response.success, True) - ) - - db_cluster_removed = self.dbclient.execute("select removed from kubernetes_cluster where uuid = '%s';" % cluster_id)[0][0] - - self.assertNotEqual( - db_cluster_removed, - None, - "KubernetesCluster not removed in DB, {}".format(db_cluster_removed) - ) - - def deleteAllLeftoverClusters(self): - clusters = self.listKubernetesCluster() - if clusters != None: - for cluster in clusters: - self.deleteKubernetesClusterAndVerify(cluster.id, False, True) diff --git a/tools/appliance/systemvmtemplate/http/preseed.cfg b/tools/appliance/systemvmtemplate/http/preseed.cfg index 42718933ecba..ae71ed5c0638 100644 --- a/tools/appliance/systemvmtemplate/http/preseed.cfg +++ b/tools/appliance/systemvmtemplate/http/preseed.cfg @@ -66,13 +66,13 @@ d-i partman-auto/expert_recipe string \ use_filesystem{ } filesystem{ ext2 } \ mountpoint{ /boot } \ . \ + 256 1000 256 linux-swap \ + method{ swap } format{ } \ + . \ 2240 40 4000 ext4 \ method{ format } format{ } \ use_filesystem{ } filesystem{ ext4 } \ mountpoint{ / } \ - . \ - 256 1000 256 linux-swap \ - method{ swap } format{ } \ . d-i partman-md/confirm boolean true diff --git a/tools/appliance/systemvmtemplate/scripts/cleanup.sh b/tools/appliance/systemvmtemplate/scripts/cleanup.sh index 8f2408a325a3..ab0ceb628611 100644 --- a/tools/appliance/systemvmtemplate/scripts/cleanup.sh +++ b/tools/appliance/systemvmtemplate/scripts/cleanup.sh @@ -17,11 +17,10 @@ # under the License. set -e -set -x function cleanup_apt() { export DEBIAN_FRONTEND=noninteractive - apt-get -y remove --purge dictionaries-common busybox isc-dhcp-client isc-dhcp-common \ + apt-get -y remove --purge dictionaries-common busybox \ task-english task-ssh-server tasksel tasksel-data laptop-detect wamerican sharutils \ nano util-linux-locales krb5-locales diff --git a/tools/appliance/systemvmtemplate/scripts/configure_conntrack.sh b/tools/appliance/systemvmtemplate/scripts/configure_conntrack.sh index 7202717d73b5..63016a98d003 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_conntrack.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_conntrack.sh @@ -34,8 +34,6 @@ function load_conntrack_modules() { grep nf_conntrack_ipv4 /etc/modules && return cat >> /etc/modules << EOF -nf_conntrack_ipv4 -nf_conntrack_ipv6 nf_conntrack nf_conntrack_ftp nf_conntrack_pptp diff --git a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh index fa9e38a0e8ce..db3eec5cb40c 100644 --- a/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh +++ b/tools/appliance/systemvmtemplate/scripts/configure_systemvm_services.sh @@ -41,7 +41,7 @@ function configure_issue() { __?.o/ Apache CloudStack SystemVM $CLOUDSTACK_RELEASE ( )# https://cloudstack.apache.org - (___(_) Debian GNU/Linux 10 \n \l + (___(_) Debian GNU/Linux 11 \n \l EOF } @@ -124,6 +124,26 @@ function configure_services() { systemctl disable hyperv-daemons.hv-vss-daemon.service systemctl disable qemu-guest-agent + # Disable container services + systemctl disable containerd + systemctl disable docker.service + systemctl stop docker.service + systemctl disable docker.socket + systemctl stop docker.socket + + # Disable cloud init by default +cat < /etc/cloud/cloud.cfg.d/cloudstack.cfg +datasource_list: ['CloudStack'] +datasource: + CloudStack: + max_wait: 120 + timeout: 50 +EOF + + touch /etc/cloud/cloud-init.disabled + systemctl stop cloud-init + systemctl disable cloud-init + configure_apache2 configure_strongswan configure_issue diff --git a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh index 8f8a704aa04f..555a00c28edb 100644 --- a/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh +++ b/tools/appliance/systemvmtemplate/scripts/install_systemvm_packages.sh @@ -35,6 +35,12 @@ function debconf_packages() { echo "libc6 libraries/restart-without-asking boolean false" | debconf-set-selections } +function apt_clean() { + apt-get -y autoremove --purge + apt-get clean + apt-get autoclean +} + function install_packages() { export DEBIAN_FRONTEND=noninteractive export DEBIAN_PRIORITY=critical @@ -69,23 +75,33 @@ function install_packages() { radvd \ sharutils genisoimage \ strongswan libcharon-extra-plugins libstrongswan-extra-plugins strongswan-charon strongswan-starter \ - virt-what open-vm-tools qemu-guest-agent hyperv-daemons + virt-what open-vm-tools qemu-guest-agent hyperv-daemons cloud-guest-utils \ + conntrack apt-transport-https ca-certificates curl gnupg gnupg-agent software-properties-common + + apt-get install -y python3-json-pointer python3-jsonschema cloud-init # python2-netaddr workaround wget https://github.com/shapeblue/cloudstack-nonoss/raw/main/python-netaddr_0.7.19-1_all.deb dpkg -i python-netaddr_0.7.19-1_all.deb - apt-get -y autoremove --purge - apt-get clean - apt-get autoclean + apt_clean - #32 bit architecture support for vhd-util: not required for 32 bit template + # 32 bit architecture support for vhd-util if [ "${arch}" != "i386" ]; then dpkg --add-architecture i386 apt-get update ${apt_get} install libuuid1:i386 libc6:i386 fi + # Install docker and containerd for CKS + curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add - + apt-key fingerprint 0EBFCD88 + add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" + apt-get update + ${apt_get} install docker-ce docker-ce-cli containerd.io + + apt_clean + install_vhd_util # Install xenserver guest utilities as debian repos don't have it wget https://mirrors.kernel.org/ubuntu/pool/main/x/xe-guest-utilities/xe-guest-utilities_7.10.0-0ubuntu1_amd64.deb diff --git a/tools/appliance/systemvmtemplate/template.json b/tools/appliance/systemvmtemplate/template.json index 4a1ac0a1c098..bd932bf6cdef 100644 --- a/tools/appliance/systemvmtemplate/template.json +++ b/tools/appliance/systemvmtemplate/template.json @@ -73,4 +73,3 @@ } ] } - diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index cc26759612c1..9a53c41f6d56 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -2048,12 +2048,6 @@ } }, "cks_kubernetes_versions": { - "1.14.9": { - "semanticversion": "1.14.9", - "url": "http://download.cloudstack.org/cks/setup-1.14.9.iso", - "mincpunumber": 2, - "minmemory": 2048 - }, "1.15.0": { "semanticversion": "1.15.0", "url": "http://download.cloudstack.org/cks/setup-1.15.0.iso", @@ -2073,41 +2067,6 @@ "minmemory": 2048 } }, - "cks_templates": { - "kvm": { - "name": "Kubernetes-Service-Template-kvm", - "displaytext": "Kubernetes-Service-Template kvm", - "format": "qcow2", - "hypervisor": "kvm", - "ostype": "CoreOS", - "url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-kvm.qcow2.bz2", - "requireshvm": "True", - "ispublic": "True", - "isextractable": "True" - }, - "xenserver": { - "name": "Kubernetes-Service-Template-xen", - "displaytext": "Kubernetes-Service-Template xen", - "format": "vhd", - "hypervisor": "xenserver", - "ostype": "CoreOS", - "url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-xen.vhd.bz2", - "requireshvm": "True", - "ispublic": "True", - "isextractable": "True" - }, - "vmware": { - "name": "Kubernetes-Service-Template-vmware", - "displaytext": "Kubernetes-Service-Template vmware", - "format": "ova", - "hypervisor": "vmware", - "ostype": "CoreOS", - "url": "http://dl.openvm.eu/cloudstack/coreos/x86_64/coreos_production_cloudstack_image-vmware.ova", - "requireshvm": "True", - "ispublic": "True", - "details": [{"keyboard":"us","nicAdapter":"Vmxnet3","rootDiskController":"pvscsi"}] - } - }, "cks_service_offering": { "name": "CKS-Instance", "displaytext": "CKS Instance", diff --git a/tools/marvin/setup.py b/tools/marvin/setup.py index 8eb8932f875b..3da62333113a 100644 --- a/tools/marvin/setup.py +++ b/tools/marvin/setup.py @@ -58,7 +58,9 @@ "pytz", "retries", "PyCrypt", - "urllib3" + "kubernetes", + "urllib3", + "setuptools >= 40.3.0" ], py_modules=['marvin.marvinPlugin'], zip_safe=False, diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index ba60c9cdb1d1..85721b473b30 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -121,6 +121,7 @@ "label.action.delete.network.processing": "Deleting Network....", "label.action.delete.nexusvswitch": "Delete Nexus 1000v", "label.action.delete.nic": "Remove NIC", +"label.action.delete.node": "Delete node", "label.action.delete.physical.network": "Delete physical network", "label.action.delete.pod": "Delete Pod", "label.action.delete.pod.processing": "Deleting Pod....", @@ -473,6 +474,7 @@ "label.auto.assign.random.ip": "Automatically assign a random IP address", "label.autoscale": "AutoScale", "label.autoscale.configuration.wizard": "AutoScale Configuration Wizard", +"label.autoscalingenabled": "Autoscaling", "label.availability": "Availability", "label.availabilityzone": "Availability Zone", "label.available": "Available", @@ -567,6 +569,9 @@ "label.cisco.nexus1000v.password": "Nexus 1000v Password", "label.cisco.nexus1000v.username": "Nexus 1000v Username", "label.ciscovnmc.resource.details": "CiscoVNMC resource details", +"label.cks.cluster.autoscalingenabled": "Enable autoscaling on this cluster", +"label.cks.cluster.maxsize": "Maximum cluster size (Worker nodes)", +"label.cks.cluster.minsize": "Minimum cluster size (Worker nodes)", "label.cks.cluster.size": "Cluster size (Worker nodes)", "label.cleanup": "Clean up", "label.clear": "Clear", @@ -1352,6 +1357,7 @@ "label.maxproject": "Max. Projects", "label.maxpublicip": "Max. Public IPs", "label.maxsecondarystorage": "Max. Secondary Storage (GiB)", +"label.maxsize": "Maximum size", "label.maxsnapshot": "Max. Snapshots", "label.maxtemplate": "Max. Templates", "label.maxuservm": "Max. User VMs", @@ -1437,6 +1443,7 @@ "label.miniops": "Min IOPS", "label.minmaxiops": "Min IOPS / Max IOPS", "label.minmemory": "Min Memory (in MB)", +"label.minsize": "Minimum size", "label.minute.past.hour": "minute(s) past the hour", "label.minutes.past.hour": "minutes(s) past the hour", "label.monday": "Monday", @@ -2065,6 +2072,7 @@ "label.start.rolling.maintenance": "Start Rolling Maintenance", "label.start.rolling.maintenance.payload": "Payload", "label.start.vlan": "Start VLAN", +"label.start.vm": "Start VM", "label.start.vxlan": "Start VXLAN", "label.startdate": "By date (start)", "label.startip": "Start IP", @@ -2128,6 +2136,7 @@ "label.summary": "Summary", "label.sunday": "Sunday", "label.supportedservices": "Supported Services", +"label.supportsautoscaling": "Supports Autoscaling", "label.supportsha": "Supports HA", "label.supportspublicaccess": "Supports Public Access", "label.supportsregionlevelvpc": "Supports Region Level VPC", @@ -2490,6 +2499,7 @@ "message.action.delete.iso.for.all.zones": "The ISO is used by all zones. Please confirm that you want to delete it from all zones.", "message.action.delete.network": "Please confirm that you want to delete this network.", "message.action.delete.nexusvswitch": "Please confirm that you want to delete this nexus 1000v", +"message.action.delete.node": "Please confirm that you want to delete this node.", "message.action.delete.physical.network": "Please confirm that you want to delete this physical network", "message.action.delete.pod": "Please confirm that you want to delete this pod.", "message.action.delete.primary.storage": "Please confirm that you want to delete this primary storage.", @@ -2541,6 +2551,7 @@ "message.action.revert.snapshot": "Please confirm that you want to revert the owning volume to this snapshot.", "message.action.router.health.checks": "Health checks result will be fetched from router.", "message.action.router.health.checks.disabled.warning": "Please enable router health checks.", +"message.action.scale.kubernetes.cluster.warning": "Please do not manually scale the cluster if cluster autoscaling is enabled", "message.action.secondary.storage.read.only": "Please confirm that you want to make this secondary storage read only.", "message.action.secondary.storage.read.write": "Please confirm that you want to make this secondary storage read write.", "message.action.secure.host": "This will restart the host agent and libvirtd process after applying new X509 certificates, please confirm?", @@ -2794,6 +2805,7 @@ "message.delete.vpn.connection": "Please confirm that you want to delete VPN connection", "message.delete.vpn.customer.gateway": "Please confirm that you want to delete this VPN Customer Gateway", "message.delete.vpn.gateway": "Please confirm that you want to delete this VPN Gateway", +"message.deleting.node": "Deleting Node", "message.deleting.vm": "Deleting VM", "message.deployasis": "Selected template is Deploy As-Is i.e., the VM is deployed by importing an OVA with vApps directly into vCenter. Root disk(s) resize is allowed only on stopped VMs for such templates.", "message.desc.add.new.lb.sticky.rule": "Add new LB sticky rule", @@ -3257,6 +3269,7 @@ "message.success.delete.acl.rule": "Successfully removed ACL rule", "message.success.delete.backup.schedule": "Successfully deleted Configure VM backup schedule", "message.success.delete.icon": "Successfully deleted icon of", +"message.success.delete.node": "Successfully Deleted Node", "message.success.delete.snapshot.policy": "Successfully deleted snapshot policy", "message.success.delete.static.route": "Successfully deleted static route", "message.success.delete.tag": "Successfully deleted tag", diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index 53ad021ba2e6..79b47c346cf9 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -268,6 +268,10 @@ + + + {{ record.autoscalingenabled ? 'Enabled' : 'Disabled' }} + diff --git a/ui/src/components/view/ResourceView.vue b/ui/src/components/view/ResourceView.vue index 013310feba01..fb23bef91605 100644 --- a/ui/src/components/view/ResourceView.vue +++ b/ui/src/components/view/ResourceView.vue @@ -135,27 +135,8 @@ export default { this.$emit('onTabChange', key) }, showTab (tab) { - if ('networkServiceFilter' in tab) { - if (this.resource && this.resource.virtualmachineid && !this.resource.vpcid && tab.name !== 'firewall') { - return false - } - if (this.resource && this.resource.virtualmachineid && this.resource.vpcid) { - return false - } - // dont display any option for source NAT IP of VPC - if (this.resource && this.resource.vpcid && !this.resource.issourcenat && tab.name !== 'firewall') { - return true - } - // display LB and PF options for isolated networks if static nat is disabled - if (this.resource && !this.resource.vpcid) { - if (!this.resource.isstaticnat) { - return true - } else if (tab.name === 'firewall') { - return true - } - } - return this.networkService && this.networkService.service && - tab.networkServiceFilter(this.networkService.service) + if (this.networkService && this.networkService.service && tab.networkServiceFilter) { + return tab.networkServiceFilter(this.networkService.service) } else if ('show' in tab) { return tab.show(this.resource, this.$route, this.$store.getters.userInfo) } else { diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index 8bd694e03712..835923d9f07e 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -428,15 +428,18 @@ export default { icon: kubernetes, docHelp: 'plugins/cloudstack-kubernetes-service.html', permission: ['listKubernetesClusters'], - columns: () => { + columns: (store) => { var fields = ['name', 'state', 'size', 'cpunumber', 'memory'] - if (['Admin', 'DomainAdmin'].includes(store.getters.userInfo.roletype)) { + if (['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) { fields.push('account') } + if (store.apis.scaleKubernetesCluster.params.filter(x => x.name === 'autoscalingenabled').length > 0) { + fields.splice(2, 0, 'autoscalingenabled') + } fields.push('zonename') return fields }, - details: ['name', 'description', 'zonename', 'kubernetesversionname', 'size', 'controlnodes', 'cpunumber', 'memory', 'keypair', 'associatednetworkname', 'account', 'domain', 'zonename'], + details: ['name', 'description', 'zonename', 'kubernetesversionname', 'autoscalingenabled', 'minsize', 'maxsize', 'size', 'controlnodes', 'cpunumber', 'memory', 'keypair', 'associatednetworkname', 'account', 'domain', 'zonename'], tabs: [{ name: 'k8s', component: () => import('@/views/compute/KubernetesServiceTab.vue') diff --git a/ui/src/config/section/image.js b/ui/src/config/section/image.js index 77345372bf49..13f4d0bc3876 100644 --- a/ui/src/config/section/image.js +++ b/ui/src/config/section/image.js @@ -318,7 +318,7 @@ export default { docHelp: 'plugins/cloudstack-kubernetes-service.html#kubernetes-supported-versions', permission: ['listKubernetesSupportedVersions'], columns: ['name', 'state', 'semanticversion', 'isostate', 'mincpunumber', 'minmemory', 'zonename'], - details: ['name', 'semanticversion', 'zoneid', 'zonename', 'isoid', 'isoname', 'isostate', 'mincpunumber', 'minmemory', 'supportsha', 'state'], + details: ['name', 'semanticversion', 'supportsautoscaling', 'zoneid', 'zonename', 'isoid', 'isoname', 'isostate', 'mincpunumber', 'minmemory', 'supportsha', 'state'], actions: [ { api: 'addKubernetesSupportedVersion', diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index fe250af68634..0ee4e3b6c3ab 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -758,7 +758,7 @@ export default { if (this.$route.meta.columns) { const columns = this.$route.meta.columns if (columns && typeof columns === 'function') { - this.columnKeys = columns() + this.columnKeys = columns(this.$store.getters) } else { this.columnKeys = columns } diff --git a/ui/src/views/compute/CreateKubernetesCluster.vue b/ui/src/views/compute/CreateKubernetesCluster.vue index a1590609c72c..9bd9516f6c5c 100644 --- a/ui/src/views/compute/CreateKubernetesCluster.vue +++ b/ui/src/views/compute/CreateKubernetesCluster.vue @@ -106,10 +106,11 @@ @@ -107,6 +107,26 @@ + @@ -149,6 +169,7 @@ export default { AnnotationsTab }, mixins: [mixinDevice], + inject: ['parentFetchData'], props: { resource: { type: Object, @@ -230,6 +251,14 @@ export default { } }, mounted () { + if (this.$store.getters.apis.scaleKubernetesCluster.params.filter(x => x.name === 'nodeids').length > 0) { + this.vmColumns.push({ + title: this.$t('label.action'), + dataIndex: 'action', + scopedSlots: { customRender: 'action' } + }) + } + this.handleFetchData() this.setCurrentTab() }, methods: { @@ -381,6 +410,35 @@ export default { elem.click() document.body.removeChild(elem) } + }, + deleteNode (node) { + const params = { + id: this.resource.id, + nodeids: node.id + } + api('scaleKubernetesCluster', params).then(json => { + const jobId = json.scalekubernetesclusterresponse.jobid + console.log(jobId) + this.$store.dispatch('AddAsyncJob', { + title: this.$t('label.action.delete.node'), + jobid: jobId, + description: node.name, + status: 'progress' + }) + this.$pollJob({ + jobId, + loadingMessage: `${this.$t('message.deleting.node')} ${node.name}`, + catchMessage: this.$t('error.fetching.async.job.result'), + successMessage: `${this.$t('message.success.delete.node')} ${node.name}`, + successMethod: () => { + this.parentFetchData() + } + }) + }).catch(error => { + this.$notifyError(error) + }).finally(() => { + this.parentFetchData() + }) } } } diff --git a/ui/src/views/compute/ScaleKubernetesCluster.vue b/ui/src/views/compute/ScaleKubernetesCluster.vue index 3d998dbbc6d3..e6139c097aa7 100644 --- a/ui/src/views/compute/ScaleKubernetesCluster.vue +++ b/ui/src/views/compute/ScaleKubernetesCluster.vue @@ -19,48 +19,88 @@
- +
- - - - - - - - - {{ opt.name || opt.description }} - - + + + - + + + + + + + + + + + + + + + + {{ opt.name || opt.description }} + + + + + + + +
{{ this.$t('label.cancel') }} {{ this.$t('label.ok') }} @@ -91,7 +131,11 @@ export default { serviceOfferingLoading: false, minCpu: 2, minMemory: 2048, - loading: false + loading: false, + originalSize: 1, + autoscalingenabled: null, + minsize: null, + maxsize: null } }, beforeCreate () { @@ -99,7 +143,16 @@ export default { this.apiParams = this.$getApiParams('scaleKubernetesCluster') }, created () { - this.originalSize = !this.isObjectEmpty(this.resource) ? this.resource.size : 1 + if (!this.isObjectEmpty(this.resource)) { + this.originalSize = this.resource.size + if (this.apiParams.autoscalingenabled) { + this.autoscalingenabled = this.resource.autoscalingenabled ? true : null + this.minsize = this.resource.minsize + this.maxsize = this.resource.maxsize + } + } + }, + mounted () { this.fetchData() }, methods: { @@ -169,12 +222,21 @@ export default { const params = { id: this.resource.id } + if (this.autoscalingenabled != null) { + params.autoscalingenabled = this.autoscalingenabled + } if (this.isValidValueForKey(values, 'size') && values.size > 0) { params.size = values.size } - if (this.isValidValueForKey(values, 'serviceofferingid') && this.arrayHasItems(this.serviceOfferings)) { + if (this.isValidValueForKey(values, 'serviceofferingid') && this.arrayHasItems(this.serviceOfferings) && this.autoscalingenabled == null) { params.serviceofferingid = this.serviceOfferings[values.serviceofferingid].id } + if (this.isValidValueForKey(values, 'minsize')) { + params.minsize = values.minsize + } + if (this.isValidValueForKey(values, 'maxsize')) { + params.maxsize = values.maxsize + } api('scaleKubernetesCluster', params).then(json => { const jobId = json.scalekubernetesclusterresponse.jobid this.$pollJob({ diff --git a/ui/src/views/network/PublicIpResource.vue b/ui/src/views/network/PublicIpResource.vue index e80ed175ea49..ee758769b94b 100644 --- a/ui/src/views/network/PublicIpResource.vue +++ b/ui/src/views/network/PublicIpResource.vue @@ -66,6 +66,10 @@ export default { name: 'details', component: () => import('@/components/view/DetailsTab.vue') }], + defaultTabs: [{ + name: 'details', + component: () => import('@/components/view/DetailsTab.vue') + }], activeTab: '' } }, @@ -100,39 +104,64 @@ export default { } this.loading = true - this.portFWRuleCount = await this.fetchPortFWRule() + await this.filterTabs() + await this.fetchAction() + this.loading = false + }, + async filterTabs () { + // VPC IPs with source nat have only VPN + if (this.resource && this.resource.vpcid && this.resource.issourcenat) { + this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'vpn')) + return + } + // VPC IPs with vpnenabled have only VPN + if (this.resource && this.resource.vpcid && this.resource.vpnenabled) { + this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'vpn')) + return + } + // VPC IPs with static nat have nothing + if (this.resource && this.resource.vpcid && this.resource.isstaticnat) { + return + } + if (this.resource && this.resource.vpcid) { + // VPC IPs don't have firewall + let tabs = this.$route.meta.tabs.filter(tab => tab.name !== 'firewall') - // disable load balancing rules only if port forwarding is enabled and - // network belongs to VPC - if (this.portFWRuleCount > 0 && this.resource.vpcid) { - this.tabs = this.$route.meta.tabs.filter(tab => tab.name !== 'loadbalancing') - } else { + this.portFWRuleCount = await this.fetchPortFWRule() this.loadBalancerRuleCount = await this.fetchLoadBalancerRule() - // for isolated networks, display both LB and PF - // for VPC they are mutually exclusive + // VPC IPs with PF only have PF + if (this.portFWRuleCount > 0) { + tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'portforwarding')) + } + + // VPC IPs with LB rules only have LB if (this.loadBalancerRuleCount > 0) { - this.tabs = - this.resource.vpcid ? this.$route.meta.tabs.filter(tab => tab.name !== 'portforwarding') : this.$route.meta.tabs - this.loading = false - } else { - this.tabs = this.$route.meta.tabs + tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'loadbalancing')) } + this.tabs = tabs + return } - await this.fetchAction() - this.loading = false - }, - fetchAction () { - this.actions = [] - if (this.$route.meta.actions) { - this.actions = this.$route.meta.actions + // Regular guest networks with Source Nat have everything + if (this.resource && !this.resource.vpcid && this.resource.issourcenat) { + this.tabs = this.$route.meta.tabs + return + } + // Regular guest networks with Static Nat only have Firewall + if (this.resource && !this.resource.vpcid && this.resource.isstaticnat) { + this.tabs = this.defaultTabs.concat(this.$route.meta.tabs.filter(tab => tab.name === 'firewall')) + return } - if (this.portFWRuleCount > 0 || this.loadBalancerRuleCount > 0) { - this.actions = this.actions.filter(action => action.api !== 'enableStaticNat') + // Regular guest networks have all tabs + if (this.resource && !this.resource.vpcid) { + this.tabs = this.$route.meta.tabs } }, + fetchAction () { + this.actions = this.$route.meta.actions || [] + }, fetchPortFWRule () { return new Promise((resolve, reject) => { api('listPortForwardingRules', {