From 9c85c6b24e1fd9b8c9878edef6e757230461eed8 Mon Sep 17 00:00:00 2001 From: "Locharla, Sandeep" Date: Mon, 10 Nov 2025 18:35:00 +0530 Subject: [PATCH 001/271] CSTACKEX-46: Disable, Re-Enable, Delete Storage pool and Enter, Exit Storage pool workflows --- .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 2cdd7de0b7c5..5c0a8f05eee2 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -42,6 +42,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao; import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; import org.apache.cloudstack.storage.feign.model.ExportPolicy; import org.apache.cloudstack.storage.feign.model.OntapStorage; @@ -67,10 +68,10 @@ public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycl @Inject private StorageManager _storageMgr; @Inject private ResourceManager _resourceMgr; @Inject private PrimaryDataStoreHelper _dataStoreHelper; - @Inject private PrimaryDataStoreDao storagePoolDao; - @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao; @Inject private StoragePoolAutomation _storagePoolAutomation; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class); // ONTAP minimum volume size is 1.56 GB (1677721600 bytes) @@ -411,7 +412,6 @@ public boolean deleteDataStore(DataStore store) { return _dataStoreHelper.deletePrimaryDataStore(store); } - @Override public boolean migrateToObjectStore(DataStore store) { return true; From 41e832491549d441ac6b49ad7cc4f5f77c3106da Mon Sep 17 00:00:00 2001 From: "Locharla, Sandeep" Date: Tue, 18 Nov 2025 11:31:54 +0530 Subject: [PATCH 002/271] CSTACKEX-50: Fixed some issues seen while testing --- .../main/java/org/apache/cloudstack/storage/utils/Constants.java | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java index 23425aa6b797..e71a26577fa9 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java @@ -55,6 +55,7 @@ public class Constants { public static final String FIELDS = "fields"; public static final String AGGREGATES = "aggregates"; public static final String STATE = "state"; + public static final String SVMDOTNAME = "svm.name"; public static final String DATA_NFS = "data_nfs"; public static final String DATA_ISCSI = "data_iscsi"; public static final String IP_ADDRESS = "ip.address"; From 7f47d930b606f99d17fc6f3d2a0e69762a1ab352 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Wed, 5 Nov 2025 15:28:09 +0530 Subject: [PATCH 003/271] CSTACKEX-46 Create Async, Attach Cluster/Zone and Grant/Revoke Access --- .../driver/OntapPrimaryDatastoreDriver.java | 161 +++++++++++++++++- .../OntapPrimaryDatastoreLifecycle.java | 78 +++++++++ .../storage/service/StorageStrategy.java | 59 +++---- .../storage/service/UnifiedNASStrategy.java | 8 +- .../storage/service/UnifiedSANStrategy.java | 91 ++++++++-- .../cloudstack/storage/utils/Utility.java | 65 +++++++ 6 files changed, 410 insertions(+), 52 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 5e79aa2298da..f4322f27226f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -27,6 +27,9 @@ import com.cloud.storage.Storage; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.ScopeType; +import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; @@ -45,6 +48,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.cloudstack.storage.utils.Constants; @@ -62,7 +66,7 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private PrimaryDataStoreDao storagePoolDao; - + @Inject private VolumeDao volumeDao; @Override public Map getCapabilities() { s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called"); @@ -98,8 +102,13 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet throw new InvalidParameterValueException("createAsync: callback should not be null"); } try { - s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]", - dataStore, dataObject, dataObject.getType()); + s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]", dataStore, dataObject, dataObject.getType()); + + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if(storagePool == null) { + s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); + } if (dataObject.getType() == DataObjectType.VOLUME) { VolumeInfo volumeInfo = (VolumeInfo) dataObject; path = createCloudStackVolumeForTypeVolume(dataStore, volumeInfo); @@ -197,11 +206,157 @@ public ChapInfo getChapInfo(DataObject dataObject) { @Override public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { + if (dataStore == null) { + throw new InvalidParameterValueException("grantAccess: dataStore should not be null"); + } + if (dataObject == null) { + throw new InvalidParameterValueException("grantAccess: dataObject should not be null"); + } + if (host == null) { + throw new InvalidParameterValueException("grantAccess: host should not be null"); + } + try { + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if(storagePool == null) { + s_logger.error("grantAccess : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("grantAccess : Storage Pool not found for id: " + dataStore.getId()); + } + if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { + s_logger.error("grantAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName()); + throw new CloudRuntimeException("grantAccess: Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName()); + } + + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); + if(volumeVO == null) { + s_logger.error("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); + throw new CloudRuntimeException("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); + } + grantAccessForVolume(storagePool, volumeVO, host); + } else { + s_logger.error("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess"); + throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess"); + } + } catch(Exception e){ + s_logger.error("grantAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage()); + throw new CloudRuntimeException("grantAccess: Failed with error :" + e.getMessage()); + } return true; } + private void grantAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) { + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); + StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); + String svmName = details.get(Constants.SVM_NAME); + long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); + + if(ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + String accessGroupName = Utility.getIgroupName(svmName, scopeId); + CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, volumeVO.getPath()); + s_logger.info("grantAccessForVolume: Retrieved LUN [{}] details for volume [{}]", cloudStackVolume.getLun().getName(), volumeVO.getName()); + AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); + if(accessGroup.getIgroup().getInitiators() == null || accessGroup.getIgroup().getInitiators().size() == 0 || !accessGroup.getIgroup().getInitiators().contains(host.getStorageUrl())) { + s_logger.error("grantAccess: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); + throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName); + } + + Map enableLogicalAccessMap = new HashMap<>(); + enableLogicalAccessMap.put(Constants.LUN_DOT_NAME, volumeVO.getPath()); + enableLogicalAccessMap.put(Constants.SVM_DOT_NAME, svmName); + enableLogicalAccessMap.put(Constants.IGROUP_DOT_NAME, accessGroupName); + storageStrategy.enableLogicalAccess(enableLogicalAccessMap); + } else { + String errMsg = "grantAccessForVolume: Unsupported protocol type for volume grantAccess: " + details.get(Constants.PROTOCOL); + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } + @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { + if (dataStore == null) { + throw new InvalidParameterValueException("revokeAccess: data store should not be null"); + } + if (dataObject == null) { + throw new InvalidParameterValueException("revokeAccess: data object should not be null"); + } + if (host == null) { + throw new InvalidParameterValueException("revokeAccess: host should not be null"); + } + try { + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if(storagePool == null) { + s_logger.error("revokeAccess : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("revokeAccess : Storage Pool not found for id: " + dataStore.getId()); + } + if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { + s_logger.error("revokeAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName()); + throw new CloudRuntimeException("revokeAccess: Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName()); + } + + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); + if(volumeVO == null) { + s_logger.error("revokeAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); + throw new CloudRuntimeException("revokeAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); + } + revokeAccessForVolume(storagePool, volumeVO, host); + } else { + s_logger.error("revokeAccess: Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess"); + throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess"); + } + } catch(Exception e){ + s_logger.error("revokeAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage()); + throw new CloudRuntimeException("revokeAccess: Failed with error :" + e.getMessage()); + } + } + + private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) { + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); + StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); + String svmName = details.get(Constants.SVM_NAME); + long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); + + if(ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + String accessGroupName = Utility.getIgroupName(svmName, scopeId); + CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, volumeVO.getPath()); + AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); + //TODO check if initiator does exits in igroup, will throw the error ? + if(!accessGroup.getIgroup().getInitiators().contains(host.getStorageUrl())) { + s_logger.error("grantAccess: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); + throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName); + } + + Map disableLogicalAccessMap = new HashMap<>(); + disableLogicalAccessMap.put(Constants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid().toString()); + disableLogicalAccessMap.put(Constants.IGROUP_DOT_UUID, accessGroup.getIgroup().getUuid()); + storageStrategy.disableLogicalAccess(disableLogicalAccessMap); + } + } + + + private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrategy, String svmName, String cloudStackVolumeName) { + Map getCloudStackVolumeMap = new HashMap<>(); + getCloudStackVolumeMap.put(Constants.NAME, cloudStackVolumeName); + getCloudStackVolumeMap.put(Constants.SVM_DOT_NAME, svmName); + CloudStackVolume cloudStackVolume = storageStrategy.getCloudStackVolume(getCloudStackVolumeMap); + if(cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) { + s_logger.error("getCloudStackVolumeByName: Failed to get LUN details [{}]", cloudStackVolumeName); + throw new CloudRuntimeException("getCloudStackVolumeByName: Failed to get LUN [" + cloudStackVolumeName + "]"); + } + return cloudStackVolume; + } + + private AccessGroup getAccessGroupByName(StorageStrategy storageStrategy, String svmName, String accessGroupName) { + Map getAccessGroupMap = new HashMap<>(); + getAccessGroupMap.put(Constants.NAME, accessGroupName); + getAccessGroupMap.put(Constants.SVM_DOT_NAME, svmName); + AccessGroup accessGroup = storageStrategy.getAccessGroup(getAccessGroupMap); + if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getName() == null) { + s_logger.error("getAccessGroupByName: Failed to get iGroup details [{}]", accessGroupName); + throw new CloudRuntimeException("getAccessGroupByName: Failed to get iGroup details [" + accessGroupName + "]"); + } + return accessGroup; } @Override diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 5c0a8f05eee2..8ae2036f468a 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -23,6 +23,7 @@ import com.cloud.agent.api.StoragePoolInfo; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.HostVO; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ResourceManager; @@ -58,6 +59,7 @@ import org.apache.logging.log4j.Logger; import javax.inject.Inject; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; @@ -288,6 +290,22 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); logger.debug(" datastore object received is {} ",primaryStore ); + if (dataStore == null) { + throw new InvalidParameterValueException("attachCluster: dataStore should not be null"); + } + if (scope == null) { + throw new InvalidParameterValueException("attachCluster: scope should not be null"); + } + List hostsIdentifier = new ArrayList<>(); + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if(storagePool == null) { + s_logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId()); + } + PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; + List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); + // TODO- need to check if no host to connect then throw exception or just continue + logger.debug("attachCluster: Eligible Up and Enabled hosts: {} in cluster {}", hostsToConnect, primaryStore.getClusterId()); logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId())); @@ -302,6 +320,20 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { accessGroupRequest.setPolicy(exportPolicy); strategy.createAccessGroup(accessGroupRequest); + logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); + Map details = primaryStore.getDetails(); + StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); + //TODO- Check if we have to handle heterogeneous host within the cluster + if (!isProtocolSupportedByAllHosts(hostsToConnect, protocol, hostsIdentifier)) { + s_logger.error("attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name()); + throw new CloudRuntimeException("attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name()); + } + //TODO - check if no host to connect then also need to create access group without initiators + if (hostsIdentifier != null && hostsIdentifier.size() > 0) { + AccessGroup accessGroupRequest = Utility.createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); + strategy.createAccessGroup(accessGroupRequest); + } logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); for (HostVO host : hostsToConnect) { try { @@ -309,6 +341,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { } catch (Exception e) { logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); return false; + logger.warn("attachCluster: Unable to establish a connection between " + host + " and " + dataStore, e); } } _dataStoreHelper.attachCluster(dataStore); @@ -323,6 +356,18 @@ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo exis @Override public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) { logger.debug("In attachZone for ONTAP primary storage"); + if (dataStore == null) { + throw new InvalidParameterValueException("attachZone: dataStore should not be null"); + } + if (scope == null) { + throw new InvalidParameterValueException("attachZone: scope should not be null"); + } + List hostsIdentifier = new ArrayList<>(); + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); + if(storagePool == null) { + s_logger.error("attachZone : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("attachZone : Storage Pool not found for id: " + dataStore.getId()); + } PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM); @@ -339,6 +384,21 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper accessGroupRequest.setPolicy(exportPolicy); strategy.createAccessGroup(accessGroupRequest); + // TODO- need to check if no host to connect then throw exception or just continue + logger.debug("attachZone: Eligible Up and Enabled hosts: {}", hostsToConnect); + + Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); + StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); + ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); + //TODO- Check if we have to handle heterogeneous host within the zone + if (!isProtocolSupportedByAllHosts(hostsToConnect, protocol, hostsIdentifier)) { + s_logger.error("attachZone: Not all hosts in the cluster support the protocol: " + protocol.name()); + throw new CloudRuntimeException("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); + } + if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) { + AccessGroup accessGroupRequest = Utility.createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); + strategy.createAccessGroup(accessGroupRequest); + } for (HostVO host : hostsToConnect) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); @@ -351,6 +411,24 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper return true; } + private boolean isProtocolSupportedByAllHosts(List hosts, ProtocolType protocolType, List hostIdentifiers) { + switch (protocolType) { + case ISCSI: + String protocolPrefix = Constants.IQN; + for (HostVO host : hosts) { + if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty() + || !host.getStorageUrl().startsWith(protocolPrefix)) { + return false; + } + hostIdentifiers.add(host.getStorageUrl()); + } + break; + default: + throw new CloudRuntimeException("isProtocolSupportedByAllHosts : Unsupported protocol: " + protocolType.name()); + } + return true; + } + @Override public boolean maintain(DataStore store) { _storagePoolAutomation.maintain(store); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 822e09851f39..edfda1f4bd2d 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -147,7 +147,7 @@ public boolean connect() { * throw exception in case of disaggregated ONTAP storage * * @param volumeName the name of the volume to create - * @param size the size of the volume in bytes + * @param size the size of the volume in bytes * @return the created Volume object */ public Volume createStorageVolume(String volumeName, Long size) { @@ -477,21 +477,19 @@ public String getNetworkInterface() { /** * Method encapsulates the behavior based on the opted protocol in subclasses. * it is going to mimic - * getLun for iSCSI, FC protocols - * getFile for NFS3.0 and NFS4.1 protocols - * getNameSpace for Nvme/TCP and Nvme/FC protocol - * - * @param cloudstackVolume the CloudStack volume to retrieve + * getLun for iSCSI, FC protocols + * getFile for NFS3.0 and NFS4.1 protocols + * getNameSpace for Nvme/TCP and Nvme/FC protocol + * @param cloudStackVolumeMap the CloudStack volume to retrieve * @return the retrieved CloudStackVolume object */ - abstract CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume); + abstract public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap); /** * Method encapsulates the behavior based on the opted protocol in subclasses - * createiGroup for iSCSI and FC protocols - * createExportPolicy for NFS 3.0 and NFS 4.1 protocols - * createSubsystem for Nvme/TCP and Nvme/FC protocols - * + * createiGroup for iSCSI and FC protocols + * createExportPolicy for NFS 3.0 and NFS 4.1 protocols + * createSubsystem for Nvme/TCP and Nvme/FC protocols * @param accessGroup the access group to create * @return the created AccessGroup object */ @@ -499,20 +497,18 @@ public String getNetworkInterface() { /** * Method encapsulates the behavior based on the opted protocol in subclasses - * deleteiGroup for iSCSI and FC protocols - * deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols - * deleteSubsystem for Nvme/TCP and Nvme/FC protocols - * + * deleteiGroup for iSCSI and FC protocols + * deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols + * deleteSubsystem for Nvme/TCP and Nvme/FC protocols * @param accessGroup the access group to delete */ abstract public void deleteAccessGroup(AccessGroup accessGroup); /** * Method encapsulates the behavior based on the opted protocol in subclasses - * updateiGroup example add/remove-Iqn for iSCSI and FC protocols - * updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols - * //TODO for Nvme/TCP and Nvme/FC protocols - * + * updateiGroup example add/remove-Iqn for iSCSI and FC protocols + * updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols + * //TODO for Nvme/TCP and Nvme/FC protocols * @param accessGroup the access group to update * @return the updated AccessGroup object */ @@ -520,32 +516,27 @@ public String getNetworkInterface() { /** * Method encapsulates the behavior based on the opted protocol in subclasses - * getiGroup for iSCSI and FC protocols - * getExportPolicy for NFS 3.0 and NFS 4.1 protocols - * getNameSpace for Nvme/TCP and Nvme/FC protocols - * - * @param accessGroup the access group to retrieve - * @return the retrieved AccessGroup object + @@ -306,22 +306,22 @@ public Volume getStorageVolume(Volume volume) + * getNameSpace for Nvme/TCP and Nvme/FC protocols + * @param values */ - abstract AccessGroup getAccessGroup(AccessGroup accessGroup); + abstract public AccessGroup getAccessGroup(Map values); /** * Method encapsulates the behavior based on the opted protocol in subclasses - * lunMap for iSCSI and FC protocols - * //TODO for Nvme/TCP and Nvme/FC protocols - * + * lunMap for iSCSI and FC protocols + * //TODO for Nvme/TCP and Nvme/FC protocols * @param values */ - abstract void enableLogicalAccess(Map values); + abstract public void enableLogicalAccess(Map values); /** * Method encapsulates the behavior based on the opted protocol in subclasses - * lunUnmap for iSCSI and FC protocols - * //TODO for Nvme/TCP and Nvme/FC protocols - * + * lunUnmap for iSCSI and FC protocols + * //TODO for Nvme/TCP and Nvme/FC protocols * @param values */ - abstract void disableLogicalAccess(Map values); + abstract public void disableLogicalAccess(Map values); private Boolean jobPollForSuccess(String jobUUID) { //Create URI for GET Job API diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index b35bedf2ef3c..861d22ff68d9 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -115,7 +115,7 @@ void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { } @Override - CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { + public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap) { //TODO return null; } @@ -188,18 +188,18 @@ public AccessGroup updateAccessGroup(AccessGroup accessGroup) { } @Override - public AccessGroup getAccessGroup(AccessGroup accessGroup) { + public AccessGroup getAccessGroup(Map values) { //TODO return null; } @Override - void enableLogicalAccess(Map values) { + public void enableLogicalAccess(Map values) { //TODO } @Override - void disableLogicalAccess(Map values) { + public void disableLogicalAccess(Map values) { //TODO } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 7b5372c69bdd..d594354bd143 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -27,7 +27,9 @@ import org.apache.cloudstack.storage.feign.client.SANFeignClient; import org.apache.cloudstack.storage.feign.model.Igroup; import org.apache.cloudstack.storage.feign.model.Initiator; +import org.apache.cloudstack.storage.feign.model.Igroup; import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.LunMap; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; @@ -104,7 +106,7 @@ void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { } @Override - CloudStackVolume getCloudStackVolume(CloudStackVolume cloudstackVolume) { + public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap) { //TODO return null; } @@ -305,19 +307,86 @@ public AccessGroup updateAccessGroup(AccessGroup accessGroup) { return null; } - @Override - public AccessGroup getAccessGroup(AccessGroup accessGroup) { - //TODO - return null; + public AccessGroup getAccessGroup(Map values) { + s_logger.info("getAccessGroup : fetching Igroup with params {} ", values); + if (values == null || values.isEmpty()) { + s_logger.error("getAccessGroup: get Igroup failed. Invalid request: {}", values); + throw new CloudRuntimeException("getAccessGroup : get Igroup Failed, invalid request"); + } + String svmName = values.get(Constants.SVM_DOT_NAME); + String igroupName = values.get(Constants.IGROUP_DOT_NAME); + if(svmName == null || igroupName == null || svmName.isEmpty() || igroupName.isEmpty()) { + s_logger.error("getAccessGroup: get Igroup failed. Invalid svm:{} or igroup name: {}", svmName, igroupName); + throw new CloudRuntimeException("getAccessGroup : Fget Igroup failed, invalid request"); + } + try { + // Get AuthHeader + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + // get Igroup + Map queryParams = Map.of(Constants.SVM_DOT_NAME, svmName, Constants.IGROUP_DOT_NAME, igroupName); + OntapResponse igroupResponse = sanFeignClient.getIgroupResponse(authHeader, queryParams); + if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().size() == 0) { + s_logger.error("getAccessGroup: Failed to fetch Igroup"); + throw new CloudRuntimeException("Failed to fetch Igroup"); + } + Igroup igroup = igroupResponse.getRecords().get(0); + s_logger.debug("getAccessGroup: Igroup Details : {}", igroup); + s_logger.info("getAccessGroup: Fetched the Igroup successfully. LunName: {}", igroup.getName()); + + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setIgroup(igroup); + return accessGroup; + } catch (Exception e) { + s_logger.error("Exception occurred while fetching Igroup. Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage()); + } } - @Override - void enableLogicalAccess(Map values) { - //TODO + public void enableLogicalAccess(Map values) { + s_logger.info("enableLogicalAccess : Creating LunMap with values {} ", values); + LunMap lunMapRequest = new LunMap(); + String svmName = values.get(Constants.SVM_DOT_NAME); + String lunName = values.get(Constants.LUN_DOT_NAME); + String igroupName = values.get(Constants.IGROUP_DOT_NAME); + if(svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) { + s_logger.error("enableLogicalAccess: LunMap creation failed. Invalid request values: {}", values); + throw new CloudRuntimeException("enableLogicalAccess : Failed to create LunMap, invalid request"); + } + try { + // Get AuthHeader + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + // Create LunMap + OntapResponse createdLunMap = sanFeignClient.createLunMap(authHeader, true, lunMapRequest); + if (createdLunMap == null || createdLunMap.getRecords() == null || createdLunMap.getRecords().size() == 0) { + s_logger.error("enableLogicalAccess: LunMap failed for Lun: {} and igroup: {}", lunName, igroupName); + throw new CloudRuntimeException("Failed to perform LunMap for Lun: " +lunName+ " and igroup: " + igroupName); + } + LunMap lunMap = createdLunMap.getRecords().get(0); + s_logger.debug("enableLogicalAccess: LunMap created successfully. LunMap: {}", lunMap); + s_logger.info("enableLogicalAccess: LunMap created successfully."); + } catch (Exception e) { + s_logger.error("Exception occurred while creating LunMap: {}. Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to create LunMap: " + e.getMessage()); + } } - @Override - void disableLogicalAccess(Map values) { - //TODO + public void disableLogicalAccess(Map values) { + s_logger.info("disableLogicalAccess : Deleting LunMap with values {} ", values); + String lunUUID = values.get(Constants.LUN_DOT_UUID); + String igroupUUID = values.get(Constants.IGROUP_DOT_UUID); + if(lunUUID == null || igroupUUID == null || lunUUID.isEmpty() || igroupUUID.isEmpty()) { + s_logger.error("disableLogicalAccess: LunMap deletion failed. Invalid request values: {}", values); + throw new CloudRuntimeException("disableLogicalAccess : Failed to delete LunMap, invalid request"); + } + try { + // Get AuthHeader + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + // LunMap delete + sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID); + s_logger.info("disableLogicalAccess: LunMap deleted successfully."); + } catch (Exception e) { + s_logger.error("Exception occurred while deleting LunMap: {}. Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); + } } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index c20c9d6dd151..ebe7017b672b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.utils; import com.cloud.storage.ScopeType; +import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -30,12 +31,19 @@ import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.model.Initiator; +import org.apache.cloudstack.storage.provider.StorageProviderFactory; +import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.springframework.util.Base64Utils; +import java.util.ArrayList; +import java.util.List; import java.util.Map; public class Utility { @@ -143,4 +151,61 @@ public static String getIgroupName(String svmName, ScopeType scopeType, Long sco public static String generateExportPolicyName(String svmName, String volumeName){ return Constants.EXPORT + Constants.HYPHEN + svmName + Constants.HYPHEN + volumeName; } + + public static AccessGroup createAccessGroupRequestByProtocol(StoragePoolVO storagePool, long scopeId, Map details, List hostsIdentifier) { + ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toLowerCase()); + String svmName = details.get(Constants.SVM_NAME); + switch (protocol) { + case ISCSI: + // Access group name format: cs_svmName_scopeId + String igroupName = getIgroupName(svmName, scopeId); + Hypervisor.HypervisorType hypervisorType = storagePool.getHypervisor(); + return createSANAccessGroupRequest(svmName, igroupName, hypervisorType, hostsIdentifier); + default: + s_logger.error("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol); + throw new CloudRuntimeException("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol); + } + } + + public static AccessGroup createSANAccessGroupRequest(String svmName, String igroupName, Hypervisor.HypervisorType hypervisorType, List hostsIdentifier) { + AccessGroup accessGroupRequest = new AccessGroup(); + Igroup igroup = new Igroup(); + + if (svmName != null && !svmName.isEmpty()) { + Svm svm = new Svm(); + svm.setName(svmName); + igroup.setSvm(svm); + } + + if (igroupName != null && !igroupName.isEmpty()) { + igroup.setName(igroupName); + } + + if (hypervisorType != null) { + String hypervisorName = hypervisorType.name(); + igroup.setOsType(Igroup.OsTypeEnum.valueOf(getOSTypeFromHypervisor(hypervisorName))); + } + + if (hostsIdentifier != null && hostsIdentifier.size() > 0) { + List initiators = new ArrayList<>(); + for (String hostIdentifier : hostsIdentifier) { + Initiator initiator = new Initiator(); + initiator.setName(hostIdentifier); + initiators.add(initiator); + } + igroup.setInitiators(initiators); + } + accessGroupRequest.setIgroup(igroup); + return accessGroupRequest; + } + + public static String getLunName(String volName, String lunName) { + //Lun name in unified "/vol/VolumeName/LunName" + return Constants.VOLUME_PATH_PREFIX + volName + Constants.SLASH + lunName; + } + + public static String getIgroupName(String svmName, long scopeId) { + // Igroup name format: cs_svmName_scopeId + return Constants.CS + Constants.UNDERSCORE + svmName + Constants.UNDERSCORE + scopeId; + } } From d6e1d70cc49b0a3a3a63a993ac1439987632c069 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Wed, 5 Nov 2025 15:29:56 +0530 Subject: [PATCH 004/271] CSTACKEX-46 Added Logging --- .../main/java/org/apache/cloudstack/storage/utils/Utility.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index ebe7017b672b..26420d79aee5 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -205,7 +205,7 @@ public static String getLunName(String volName, String lunName) { } public static String getIgroupName(String svmName, long scopeId) { - // Igroup name format: cs_svmName_scopeId + //Igroup name format: cs_svmName_scopeId return Constants.CS + Constants.UNDERSCORE + svmName + Constants.UNDERSCORE + scopeId; } } From eff6d97455b5c784f63eb740652960f7f0d28b3e Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Thu, 6 Nov 2025 10:17:52 +0530 Subject: [PATCH 005/271] CSTACKEX-46 Resolve Copilot review comments --- .../driver/OntapPrimaryDatastoreDriver.java | 20 +++++++-- .../OntapPrimaryDatastoreLifecycle.java | 2 +- .../storage/service/UnifiedSANStrategy.java | 44 ++++++++++++++++--- 3 files changed, 54 insertions(+), 12 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index f4322f27226f..398d5f46d557 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -47,6 +47,8 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; @@ -255,7 +257,7 @@ private void grantAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, volumeVO.getPath()); s_logger.info("grantAccessForVolume: Retrieved LUN [{}] details for volume [{}]", cloudStackVolume.getLun().getName(), volumeVO.getName()); AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); - if(accessGroup.getIgroup().getInitiators() == null || accessGroup.getIgroup().getInitiators().size() == 0 || !accessGroup.getIgroup().getInitiators().contains(host.getStorageUrl())) { + if(!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { s_logger.error("grantAccess: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName); } @@ -271,6 +273,16 @@ private void grantAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, throw new CloudRuntimeException(errMsg); } } + private boolean hostInitiatorFoundInIgroup(String hostInitiator, Igroup igroup) { + if(igroup != null || igroup.getInitiators() != null || hostInitiator != null || !hostInitiator.isEmpty()) { + for(Initiator initiator : igroup.getInitiators()) { + if(initiator.getName().equalsIgnoreCase(hostInitiator)) { + return true; + } + } + } + return false; + } @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { @@ -322,9 +334,9 @@ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, volumeVO.getPath()); AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); //TODO check if initiator does exits in igroup, will throw the error ? - if(!accessGroup.getIgroup().getInitiators().contains(host.getStorageUrl())) { - s_logger.error("grantAccess: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); - throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName); + if(!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { + s_logger.error("revokeAccessForVolume: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); + throw new CloudRuntimeException("revokeAccessForVolume: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName); } Map disableLogicalAccessMap = new HashMap<>(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 8ae2036f468a..74d2a20771f5 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -392,7 +392,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the zone if (!isProtocolSupportedByAllHosts(hostsToConnect, protocol, hostsIdentifier)) { - s_logger.error("attachZone: Not all hosts in the cluster support the protocol: " + protocol.name()); + s_logger.error("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); throw new CloudRuntimeException("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); } if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index d594354bd143..64fe1dc8ef76 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -106,9 +106,39 @@ void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { } @Override - public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap) { - //TODO - return null; + public CloudStackVolume getCloudStackVolume(Map values) { + s_logger.info("getCloudStackVolume : fetching Igroup with params {} ", values); + if (values == null || values.isEmpty()) { + s_logger.error("getCloudStackVolume: get Igroup failed. Invalid request: {}", values); + throw new CloudRuntimeException("getCloudStackVolume : get Igroup Failed, invalid request"); + } + String svmName = values.get(Constants.SVM_DOT_NAME); + String lunName = values.get(Constants.NAME); + if(svmName == null || lunName == null || svmName.isEmpty() || lunName.isEmpty()) { + s_logger.error("getCloudStackVolume: get Igroup failed. Invalid svm:{} or igroup name: {}", svmName, lunName); + throw new CloudRuntimeException("getCloudStackVolume : Fget Igroup failed, invalid request"); + } + try { + // Get AuthHeader + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + // get Igroup + Map queryParams = Map.of(Constants.SVM_DOT_NAME, svmName, Constants.NAME, lunName); + OntapResponse lunResponse = sanFeignClient.getLunResponse(authHeader, queryParams); + if (lunResponse == null || lunResponse.getRecords() == null || lunResponse.getRecords().size() == 0) { + s_logger.error("getCloudStackVolume: Failed to fetch Igroup"); + throw new CloudRuntimeException("getCloudStackVolume: Failed to fetch Igroup"); + } + Lun lun = lunResponse.getRecords().get(0); + s_logger.debug("getCloudStackVolume: Lun Details : {}", lun); + s_logger.info("getCloudStackVolume: Fetched the Lun successfully. LunName: {}", lun.getName()); + + CloudStackVolume cloudStackVolume = new CloudStackVolume(); + cloudStackVolume.setLun(lun); + return cloudStackVolume; + } catch (Exception e) { + s_logger.error("Exception occurred while fetching Lun. Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage()); + } } @Override @@ -317,7 +347,7 @@ public AccessGroup getAccessGroup(Map values) { String igroupName = values.get(Constants.IGROUP_DOT_NAME); if(svmName == null || igroupName == null || svmName.isEmpty() || igroupName.isEmpty()) { s_logger.error("getAccessGroup: get Igroup failed. Invalid svm:{} or igroup name: {}", svmName, igroupName); - throw new CloudRuntimeException("getAccessGroup : Fget Igroup failed, invalid request"); + throw new CloudRuntimeException("getAccessGroup : Failed to get Igroup, invalid request"); } try { // Get AuthHeader @@ -331,7 +361,7 @@ public AccessGroup getAccessGroup(Map values) { } Igroup igroup = igroupResponse.getRecords().get(0); s_logger.debug("getAccessGroup: Igroup Details : {}", igroup); - s_logger.info("getAccessGroup: Fetched the Igroup successfully. LunName: {}", igroup.getName()); + s_logger.info("getAccessGroup: Fetched the Igroup successfully. IgroupName: {}", igroup.getName()); AccessGroup accessGroup = new AccessGroup(); accessGroup.setIgroup(igroup); @@ -365,7 +395,7 @@ public void enableLogicalAccess(Map values) { s_logger.debug("enableLogicalAccess: LunMap created successfully. LunMap: {}", lunMap); s_logger.info("enableLogicalAccess: LunMap created successfully."); } catch (Exception e) { - s_logger.error("Exception occurred while creating LunMap: {}. Exception: {}", e.getMessage()); + s_logger.error("Exception occurred while creating LunMap: {}. Exception: {}", e.getMessage(), e); throw new CloudRuntimeException("Failed to create LunMap: " + e.getMessage()); } } @@ -385,7 +415,7 @@ public void disableLogicalAccess(Map values) { sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID); s_logger.info("disableLogicalAccess: LunMap deleted successfully."); } catch (Exception e) { - s_logger.error("Exception occurred while deleting LunMap: {}. Exception: {}", e.getMessage()); + s_logger.error("Exception occurred while deleting LunMap: {}. Exception: {}", e.getMessage(), e); throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); } } From faf8dbf76044467ec8a8be63bd0590cf2a85dde1 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Thu, 6 Nov 2025 15:16:58 +0530 Subject: [PATCH 006/271] CSTACKEX-46 Resolve review comments --- .../driver/OntapPrimaryDatastoreDriver.java | 46 ++++++++++++-- .../storage/feign/client/SANFeignClient.java | 5 ++ .../OntapPrimaryDatastoreLifecycle.java | 60 +++++++++++++++++-- .../cloudstack/storage/utils/Utility.java | 49 +-------------- 4 files changed, 102 insertions(+), 58 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 398d5f46d557..71e88541b5d3 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -47,8 +47,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.feign.model.Igroup; -import org.apache.cloudstack.storage.feign.model.Initiator; +import org.apache.cloudstack.storage.feign.model.*; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; @@ -112,8 +111,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); } if (dataObject.getType() == DataObjectType.VOLUME) { - VolumeInfo volumeInfo = (VolumeInfo) dataObject; - path = createCloudStackVolumeForTypeVolume(dataStore, volumeInfo); + path = createCloudStackVolumeForTypeVolume(storagePool, (VolumeInfo)dataObject); createCmdResult = new CreateCmdResult(path, new Answer(null, true, null)); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; @@ -155,6 +153,44 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeIn } } + private CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeInfo) { + CloudStackVolume cloudStackVolumeRequest = null; + + String protocol = details.get(Constants.PROTOCOL); + if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { + cloudStackVolumeRequest = new CloudStackVolume(); + Lun lunRequest = new Lun(); + Svm svm = new Svm(); + svm.setName(details.get(Constants.SVM_NAME)); + lunRequest.setSvm(svm); + + LunSpace lunSpace = new LunSpace(); + lunSpace.setSize(volumeInfo.getSize()); + lunRequest.setSpace(lunSpace); + //Lun name is full path like in unified "/vol/VolumeName/LunName" + String lunFullName = Utility.getLunName(storagePool.getName(), volumeInfo.getName()); + lunRequest.setName(lunFullName); + + String hypervisorType = storagePool.getHypervisor().name(); + String osType = null; + switch (hypervisorType) { + case Constants.KVM: + osType = Lun.OsTypeEnum.LINUX.getValue(); + break; + default: + String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage"; + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); + + cloudStackVolumeRequest.setLun(lunRequest); + return cloudStackVolumeRequest; + } else { + throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol); + } + } + @Override public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) { CommandResult commandResult = new CommandResult(); @@ -336,7 +372,7 @@ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, //TODO check if initiator does exits in igroup, will throw the error ? if(!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { s_logger.error("revokeAccessForVolume: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); - throw new CloudRuntimeException("revokeAccessForVolume: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName); + return; } Map disableLogicalAccessMap = new HashMap<>(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java index 868aab293518..d67c1f10c5a0 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java @@ -40,6 +40,11 @@ public interface SANFeignClient { @RequestLine("POST /api/storage/luns?return_records={returnRecords}") @Headers({"Authorization: {authHeader}"}) OntapResponse createLun(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Lun lun); + @RequestLine("POST /api/storage/luns") + @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"}) + OntapResponse createLun(@Param("authHeader") String authHeader, + @Param("returnRecords") boolean returnRecords, + Lun lun); @RequestLine("GET /api/storage/luns") @Headers({"Authorization: {authHeader}"}) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 74d2a20771f5..9cc5712ca450 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -46,7 +46,10 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao; import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; import org.apache.cloudstack.storage.feign.model.ExportPolicy; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -325,13 +328,13 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the cluster - if (!isProtocolSupportedByAllHosts(hostsToConnect, protocol, hostsIdentifier)) { + if (!validateProtocolSupportAndFetchHostsIndentifier(hostsToConnect, protocol, hostsIdentifier)) { s_logger.error("attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name()); throw new CloudRuntimeException("attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name()); } //TODO - check if no host to connect then also need to create access group without initiators if (hostsIdentifier != null && hostsIdentifier.size() > 0) { - AccessGroup accessGroupRequest = Utility.createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); + AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); strategy.createAccessGroup(accessGroupRequest); } logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); @@ -391,12 +394,12 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the zone - if (!isProtocolSupportedByAllHosts(hostsToConnect, protocol, hostsIdentifier)) { + if (!validateProtocolSupportAndFetchHostsIndentifier(hostsToConnect, protocol, hostsIdentifier)) { s_logger.error("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); throw new CloudRuntimeException("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); } if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) { - AccessGroup accessGroupRequest = Utility.createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); + AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); strategy.createAccessGroup(accessGroupRequest); } for (HostVO host : hostsToConnect) { @@ -411,7 +414,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper return true; } - private boolean isProtocolSupportedByAllHosts(List hosts, ProtocolType protocolType, List hostIdentifiers) { + private boolean validateProtocolSupportAndFetchHostsIndentifier(List hosts, ProtocolType protocolType, List hostIdentifiers) { switch (protocolType) { case ISCSI: String protocolPrefix = Constants.IQN; @@ -429,6 +432,53 @@ private boolean isProtocolSupportedByAllHosts(List hosts, ProtocolType p return true; } + private AccessGroup createAccessGroupRequestByProtocol(StoragePoolVO storagePool, long scopeId, Map details, List hostsIdentifier) { + ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toLowerCase()); + String svmName = details.get(Constants.SVM_NAME); + switch (protocol) { + case ISCSI: + // Access group name format: cs_svmName_scopeId + String igroupName = Utility.getIgroupName(svmName, scopeId); + Hypervisor.HypervisorType hypervisorType = storagePool.getHypervisor(); + return createSANAccessGroupRequest(svmName, igroupName, hypervisorType, hostsIdentifier); + default: + s_logger.error("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol); + throw new CloudRuntimeException("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol); + } + } + + private AccessGroup createSANAccessGroupRequest(String svmName, String igroupName, Hypervisor.HypervisorType hypervisorType, List hostsIdentifier) { + AccessGroup accessGroupRequest = new AccessGroup(); + Igroup igroup = new Igroup(); + + if (svmName != null && !svmName.isEmpty()) { + Svm svm = new Svm(); + svm.setName(svmName); + igroup.setSvm(svm); + } + + if (igroupName != null && !igroupName.isEmpty()) { + igroup.setName(igroupName); + } + + if (hypervisorType != null) { + String hypervisorName = hypervisorType.name(); + igroup.setOsType(Igroup.OsTypeEnum.valueOf(Utility.getOSTypeFromHypervisor(hypervisorName))); + } + + if (hostsIdentifier != null && hostsIdentifier.size() > 0) { + List initiators = new ArrayList<>(); + for (String hostIdentifier : hostsIdentifier) { + Initiator initiator = new Initiator(); + initiator.setName(hostIdentifier); + initiators.add(initiator); + } + igroup.setInitiators(initiators); + } + accessGroupRequest.setIgroup(igroup); + return accessGroupRequest; + } + @Override public boolean maintain(DataStore store) { _storagePoolAutomation.maintain(store); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index 26420d79aee5..3cd4343b827a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -152,54 +152,7 @@ public static String generateExportPolicyName(String svmName, String volumeName) return Constants.EXPORT + Constants.HYPHEN + svmName + Constants.HYPHEN + volumeName; } - public static AccessGroup createAccessGroupRequestByProtocol(StoragePoolVO storagePool, long scopeId, Map details, List hostsIdentifier) { - ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toLowerCase()); - String svmName = details.get(Constants.SVM_NAME); - switch (protocol) { - case ISCSI: - // Access group name format: cs_svmName_scopeId - String igroupName = getIgroupName(svmName, scopeId); - Hypervisor.HypervisorType hypervisorType = storagePool.getHypervisor(); - return createSANAccessGroupRequest(svmName, igroupName, hypervisorType, hostsIdentifier); - default: - s_logger.error("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol); - throw new CloudRuntimeException("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol); - } - } - - public static AccessGroup createSANAccessGroupRequest(String svmName, String igroupName, Hypervisor.HypervisorType hypervisorType, List hostsIdentifier) { - AccessGroup accessGroupRequest = new AccessGroup(); - Igroup igroup = new Igroup(); - - if (svmName != null && !svmName.isEmpty()) { - Svm svm = new Svm(); - svm.setName(svmName); - igroup.setSvm(svm); - } - - if (igroupName != null && !igroupName.isEmpty()) { - igroup.setName(igroupName); - } - - if (hypervisorType != null) { - String hypervisorName = hypervisorType.name(); - igroup.setOsType(Igroup.OsTypeEnum.valueOf(getOSTypeFromHypervisor(hypervisorName))); - } - - if (hostsIdentifier != null && hostsIdentifier.size() > 0) { - List initiators = new ArrayList<>(); - for (String hostIdentifier : hostsIdentifier) { - Initiator initiator = new Initiator(); - initiator.setName(hostIdentifier); - initiators.add(initiator); - } - igroup.setInitiators(initiators); - } - accessGroupRequest.setIgroup(igroup); - return accessGroupRequest; - } - - public static String getLunName(String volName, String lunName) { + public static String getLunName(String volName, String lunName) { //Lun name in unified "/vol/VolumeName/LunName" return Constants.VOLUME_PATH_PREFIX + volName + Constants.SLASH + lunName; } From 8852bb4e5392ca0328ddabcdf4d56d5c45774efa Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Thu, 6 Nov 2025 15:49:37 +0530 Subject: [PATCH 007/271] CSTACKEX-46 Resolve review bot comments --- .../driver/OntapPrimaryDatastoreDriver.java | 2 +- .../storage/service/UnifiedSANStrategy.java | 26 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 71e88541b5d3..025eb06ca866 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -310,7 +310,7 @@ private void grantAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, } } private boolean hostInitiatorFoundInIgroup(String hostInitiator, Igroup igroup) { - if(igroup != null || igroup.getInitiators() != null || hostInitiator != null || !hostInitiator.isEmpty()) { + if(igroup != null && igroup.getInitiators() != null && hostInitiator != null && !hostInitiator.isEmpty()) { for(Initiator initiator : igroup.getInitiators()) { if(initiator.getName().equalsIgnoreCase(hostInitiator)) { return true; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 64fe1dc8ef76..754f902c587b 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -89,7 +89,7 @@ public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume createdCloudStackVolume.setLun(lun); return createdCloudStackVolume; } catch (Exception e) { - s_logger.error("Exception occurred while creating LUN: {}. Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage()); + s_logger.error("Exception occurred while creating LUN: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage()); throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage()); } } @@ -107,16 +107,16 @@ void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public CloudStackVolume getCloudStackVolume(Map values) { - s_logger.info("getCloudStackVolume : fetching Igroup with params {} ", values); + s_logger.info("getCloudStackVolume : fetching Lun with params {} ", values); if (values == null || values.isEmpty()) { - s_logger.error("getCloudStackVolume: get Igroup failed. Invalid request: {}", values); - throw new CloudRuntimeException("getCloudStackVolume : get Igroup Failed, invalid request"); + s_logger.error("getCloudStackVolume: get Lun failed. Invalid request: {}", values); + throw new CloudRuntimeException("getCloudStackVolume : get Lun Failed, invalid request"); } String svmName = values.get(Constants.SVM_DOT_NAME); String lunName = values.get(Constants.NAME); if(svmName == null || lunName == null || svmName.isEmpty() || lunName.isEmpty()) { - s_logger.error("getCloudStackVolume: get Igroup failed. Invalid svm:{} or igroup name: {}", svmName, lunName); - throw new CloudRuntimeException("getCloudStackVolume : Fget Igroup failed, invalid request"); + s_logger.error("getCloudStackVolume: get Lun failed. Invalid svm:{} or igroup name: {}", svmName, lunName); + throw new CloudRuntimeException("getCloudStackVolume : Failed to get Lun, invalid request"); } try { // Get AuthHeader @@ -125,8 +125,8 @@ public CloudStackVolume getCloudStackVolume(Map values) { Map queryParams = Map.of(Constants.SVM_DOT_NAME, svmName, Constants.NAME, lunName); OntapResponse lunResponse = sanFeignClient.getLunResponse(authHeader, queryParams); if (lunResponse == null || lunResponse.getRecords() == null || lunResponse.getRecords().size() == 0) { - s_logger.error("getCloudStackVolume: Failed to fetch Igroup"); - throw new CloudRuntimeException("getCloudStackVolume: Failed to fetch Igroup"); + s_logger.error("getCloudStackVolume: Failed to fetch Lun"); + throw new CloudRuntimeException("getCloudStackVolume: Failed to fetch Lun"); } Lun lun = lunResponse.getRecords().get(0); s_logger.debug("getCloudStackVolume: Lun Details : {}", lun); @@ -136,7 +136,7 @@ public CloudStackVolume getCloudStackVolume(Map values) { cloudStackVolume.setLun(lun); return cloudStackVolume; } catch (Exception e) { - s_logger.error("Exception occurred while fetching Lun. Exception: {}", e.getMessage()); + s_logger.error("Exception occurred while fetching Lun, Exception: {}", e.getMessage()); throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage()); } } @@ -367,7 +367,7 @@ public AccessGroup getAccessGroup(Map values) { accessGroup.setIgroup(igroup); return accessGroup; } catch (Exception e) { - s_logger.error("Exception occurred while fetching Igroup. Exception: {}", e.getMessage()); + s_logger.error("Exception occurred while fetching Igroup, Exception: {}", e.getMessage()); throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage()); } } @@ -392,10 +392,10 @@ public void enableLogicalAccess(Map values) { throw new CloudRuntimeException("Failed to perform LunMap for Lun: " +lunName+ " and igroup: " + igroupName); } LunMap lunMap = createdLunMap.getRecords().get(0); - s_logger.debug("enableLogicalAccess: LunMap created successfully. LunMap: {}", lunMap); + s_logger.debug("enableLogicalAccess: LunMap created successfully, LunMap: {}", lunMap); s_logger.info("enableLogicalAccess: LunMap created successfully."); } catch (Exception e) { - s_logger.error("Exception occurred while creating LunMap: {}. Exception: {}", e.getMessage(), e); + s_logger.error("Exception occurred while creating LunMap: {}, Exception: {}", e.getMessage(), e); throw new CloudRuntimeException("Failed to create LunMap: " + e.getMessage()); } } @@ -415,7 +415,7 @@ public void disableLogicalAccess(Map values) { sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID); s_logger.info("disableLogicalAccess: LunMap deleted successfully."); } catch (Exception e) { - s_logger.error("Exception occurred while deleting LunMap: {}. Exception: {}", e.getMessage(), e); + s_logger.error("Exception occurred while deleting LunMap: {}, Exception: {}", e.getMessage(), e); throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); } } From dc5f01a6605d8bf70ea281885e15362393b51d54 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Fri, 7 Nov 2025 10:57:50 +0530 Subject: [PATCH 008/271] CSTACKEX-46 Change method name --- .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 9cc5712ca450..b83fcdd77a3d 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -328,7 +328,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the cluster - if (!validateProtocolSupportAndFetchHostsIndentifier(hostsToConnect, protocol, hostsIdentifier)) { + if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) { s_logger.error("attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name()); throw new CloudRuntimeException("attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name()); } @@ -394,7 +394,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the zone - if (!validateProtocolSupportAndFetchHostsIndentifier(hostsToConnect, protocol, hostsIdentifier)) { + if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) { s_logger.error("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); throw new CloudRuntimeException("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); } @@ -414,7 +414,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper return true; } - private boolean validateProtocolSupportAndFetchHostsIndentifier(List hosts, ProtocolType protocolType, List hostIdentifiers) { + private boolean validateProtocolSupportAndFetchHostsIdentifier(List hosts, ProtocolType protocolType, List hostIdentifiers) { switch (protocolType) { case ISCSI: String protocolPrefix = Constants.IQN; @@ -427,7 +427,7 @@ private boolean validateProtocolSupportAndFetchHostsIndentifier(List hos } break; default: - throw new CloudRuntimeException("isProtocolSupportedByAllHosts : Unsupported protocol: " + protocolType.name()); + throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name()); } return true; } From 14ec034feaf9315ee7ca7d549ead8565c3bdc2ca Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Fri, 7 Nov 2025 11:42:03 +0530 Subject: [PATCH 009/271] CSTACKEX-46 Resolve check styling issues --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 6 +++++- .../java/org/apache/cloudstack/storage/utils/Utility.java | 7 ------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 025eb06ca866..f9750215cab2 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -47,7 +47,11 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.feign.model.*; +import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.LunSpace; +import org.apache.cloudstack.storage.feign.model.Svm; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index 3cd4343b827a..134b928dc26e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -23,10 +23,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.feign.model.Lun; -import org.apache.cloudstack.storage.feign.model.LunSpace; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.provider.StorageProviderFactory; @@ -35,15 +32,11 @@ import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; -import org.apache.cloudstack.storage.service.model.AccessGroup; -import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.springframework.util.Base64Utils; -import java.util.ArrayList; -import java.util.List; import java.util.Map; public class Utility { From 423aee2f364782eeca6d59628ea79c462e18cf0b Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Fri, 7 Nov 2025 12:50:46 +0530 Subject: [PATCH 010/271] CSTACKEX-46 Testing: Resolve fetch Storage details issue --- .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 2 +- .../org/apache/cloudstack/storage/service/StorageStrategy.java | 2 +- .../apache/cloudstack/storage/service/UnifiedSANStrategy.java | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index b83fcdd77a3d..e6f4cc0fad00 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -324,7 +324,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { strategy.createAccessGroup(accessGroupRequest); logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); - Map details = primaryStore.getDetails(); + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the cluster diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index edfda1f4bd2d..bd86a7e591d2 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -516,7 +516,7 @@ public String getNetworkInterface() { /** * Method encapsulates the behavior based on the opted protocol in subclasses - @@ -306,22 +306,22 @@ public Volume getStorageVolume(Volume volume) + @@ -306,22 +306,22 @@ public AccessGroup getAccessGroup(Map values) * getNameSpace for Nvme/TCP and Nvme/FC protocols * @param values */ diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 754f902c587b..822ba19b41bf 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -115,7 +115,7 @@ public CloudStackVolume getCloudStackVolume(Map values) { String svmName = values.get(Constants.SVM_DOT_NAME); String lunName = values.get(Constants.NAME); if(svmName == null || lunName == null || svmName.isEmpty() || lunName.isEmpty()) { - s_logger.error("getCloudStackVolume: get Lun failed. Invalid svm:{} or igroup name: {}", svmName, lunName); + s_logger.error("getCloudStackVolume: get Lun failed. Invalid svm:{} or Lun name: {}", svmName, lunName); throw new CloudRuntimeException("getCloudStackVolume : Failed to get Lun, invalid request"); } try { From 905ae4ad89223e253497ed1af4b595a736ee31c9 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Fri, 7 Nov 2025 13:08:16 +0530 Subject: [PATCH 011/271] CSTACKEX-46 Testing: Protocol Type fetch issue --- .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index e6f4cc0fad00..9de19303d6db 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -433,7 +433,7 @@ private boolean validateProtocolSupportAndFetchHostsIdentifier(List host } private AccessGroup createAccessGroupRequestByProtocol(StoragePoolVO storagePool, long scopeId, Map details, List hostsIdentifier) { - ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toLowerCase()); + ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toUpperCase()); String svmName = details.get(Constants.SVM_NAME); switch (protocol) { case ISCSI: @@ -476,6 +476,7 @@ private AccessGroup createSANAccessGroupRequest(String svmName, String igroupNam igroup.setInitiators(initiators); } accessGroupRequest.setIgroup(igroup); + s_logger.debug("createSANAccessGroupRequest: request: " + accessGroupRequest); return accessGroupRequest; } From 3ee5ead3be3ae788d40016966de6335e6b71f79b Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Fri, 7 Nov 2025 13:33:15 +0530 Subject: [PATCH 012/271] CSTACKEX-46 Testing: Added debug log --- .../apache/cloudstack/storage/service/UnifiedSANStrategy.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 822ba19b41bf..95a04adc1434 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -222,6 +222,8 @@ public AccessGroup createAccessGroup(AccessGroup accessGroup) { throw feignEx; } + s_logger.debug("createAccessGroup: createdIgroup: {}", createdIgroup); + s_logger.debug("createAccessGroup: createdIgroup Records: {}", createdIgroup.getRecords()); if (createdIgroup == null || createdIgroup.getRecords() == null || createdIgroup.getRecords().isEmpty()) { s_logger.error("createAccessGroup: Igroup creation failed for Igroup Name {}", igroupName); throw new CloudRuntimeException("Failed to create Igroup: " + igroupName); From dc0e84e046c7c84da19c8dd4575f29bc8c3b754b Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Fri, 7 Nov 2025 13:36:04 +0530 Subject: [PATCH 013/271] CSTACKEX-46 Testing: Added debug log --- .../lifecycle/OntapPrimaryDatastoreLifecycle.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 9de19303d6db..baf12d3b0d00 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -334,8 +334,13 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { } //TODO - check if no host to connect then also need to create access group without initiators if (hostsIdentifier != null && hostsIdentifier.size() > 0) { - AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); - strategy.createAccessGroup(accessGroupRequest); + try { + AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); + strategy.createAccessGroup(accessGroupRequest); + } catch (Exception e) { + s_logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId(), e); + throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId(), e); + } } logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); for (HostVO host : hostsToConnect) { From b2056ff73a7a58763a04d3696846fc381adfe2ca Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Fri, 7 Nov 2025 13:50:48 +0530 Subject: [PATCH 014/271] CSTACKEX-46 Testing: Corrected the URL for Create Lun and Igroup --- .../cloudstack/storage/feign/client/SANFeignClient.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java index d67c1f10c5a0..0da826f04fd2 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java @@ -40,11 +40,9 @@ public interface SANFeignClient { @RequestLine("POST /api/storage/luns?return_records={returnRecords}") @Headers({"Authorization: {authHeader}"}) OntapResponse createLun(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Lun lun); - @RequestLine("POST /api/storage/luns") - @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"}) - OntapResponse createLun(@Param("authHeader") String authHeader, - @Param("returnRecords") boolean returnRecords, - Lun lun); + @RequestLine("POST /api/storage/luns?return_records={returnRecords}") + @Headers({"Authorization: {authHeader}"}) + OntapResponse createLun(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Lun lun); @RequestLine("GET /api/storage/luns") @Headers({"Authorization: {authHeader}"}) From 67bf958ab86930c122124301739717b5dad06ea5 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Fri, 7 Nov 2025 16:07:15 +0530 Subject: [PATCH 015/271] CSTACKEX-46 Resolve bot review comments --- .../driver/OntapPrimaryDatastoreDriver.java | 14 ++------------ .../lifecycle/OntapPrimaryDatastoreLifecycle.java | 10 ++++++---- .../storage/service/StorageStrategy.java | 7 ++++--- .../storage/service/UnifiedSANStrategy.java | 5 +---- 4 files changed, 13 insertions(+), 23 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index f9750215cab2..233815eb305e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -175,17 +175,7 @@ private CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO s String lunFullName = Utility.getLunName(storagePool.getName(), volumeInfo.getName()); lunRequest.setName(lunFullName); - String hypervisorType = storagePool.getHypervisor().name(); - String osType = null; - switch (hypervisorType) { - case Constants.KVM: - osType = Lun.OsTypeEnum.LINUX.getValue(); - break; - default: - String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage"; - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); - } + String osType = Utility.getOSTypeFromHypervisor(storagePool.getHypervisor().name()); lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); cloudStackVolumeRequest.setLun(lunRequest); @@ -299,7 +289,7 @@ private void grantAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); if(!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { s_logger.error("grantAccess: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); - throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName); + throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName + "]"); } Map enableLogicalAccessMap = new HashMap<>(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index baf12d3b0d00..83b564158af9 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -329,8 +329,9 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the cluster if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) { - s_logger.error("attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name()); - throw new CloudRuntimeException("attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name()); + String errMsg = "attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name(); + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); } //TODO - check if no host to connect then also need to create access group without initiators if (hostsIdentifier != null && hostsIdentifier.size() > 0) { @@ -400,8 +401,9 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the zone if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) { - s_logger.error("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); - throw new CloudRuntimeException("attachZone: Not all hosts in the zone support the protocol: " + protocol.name()); + String errMsg = "attachZone: Not all hosts in the zone support the protocol: " + protocol.name(); + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); } if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) { AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index bd86a7e591d2..c5df71ebd652 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -516,9 +516,10 @@ public String getNetworkInterface() { /** * Method encapsulates the behavior based on the opted protocol in subclasses - @@ -306,22 +306,22 @@ public AccessGroup getAccessGroup(Map values) - * getNameSpace for Nvme/TCP and Nvme/FC protocols - * @param values + * getIGroup example getIgroup for iSCSI and FC protocols + * getExportPolicy example getExportPolicy for NFS 3.0 and NFS 4.1 protocols + * //TODO for Nvme/TCP and Nvme/FC protocols + * @param values map to get access group values like name, svm name etc. */ abstract public AccessGroup getAccessGroup(Map values); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 95a04adc1434..62e59cf61105 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -362,9 +362,6 @@ public AccessGroup getAccessGroup(Map values) { throw new CloudRuntimeException("Failed to fetch Igroup"); } Igroup igroup = igroupResponse.getRecords().get(0); - s_logger.debug("getAccessGroup: Igroup Details : {}", igroup); - s_logger.info("getAccessGroup: Fetched the Igroup successfully. IgroupName: {}", igroup.getName()); - AccessGroup accessGroup = new AccessGroup(); accessGroup.setIgroup(igroup); return accessGroup; @@ -391,7 +388,7 @@ public void enableLogicalAccess(Map values) { OntapResponse createdLunMap = sanFeignClient.createLunMap(authHeader, true, lunMapRequest); if (createdLunMap == null || createdLunMap.getRecords() == null || createdLunMap.getRecords().size() == 0) { s_logger.error("enableLogicalAccess: LunMap failed for Lun: {} and igroup: {}", lunName, igroupName); - throw new CloudRuntimeException("Failed to perform LunMap for Lun: " +lunName+ " and igroup: " + igroupName); + throw new CloudRuntimeException("Failed to perform LunMap for Lun: " + lunName + " and igroup: " + igroupName); } LunMap lunMap = createdLunMap.getRecords().get(0); s_logger.debug("enableLogicalAccess: LunMap created successfully, LunMap: {}", lunMap); From 8732f7ff062a7094dfe6ef21d7cb1e2c6d058281 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Mon, 10 Nov 2025 12:35:15 +0530 Subject: [PATCH 016/271] CSTACKEX-46 Resolve review comments --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 9 ++++----- .../lifecycle/OntapPrimaryDatastoreLifecycle.java | 1 + 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 233815eb305e..733296fc3395 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -107,7 +107,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet throw new InvalidParameterValueException("createAsync: callback should not be null"); } try { - s_logger.info("createAsync: Started for data store [{}] and data object [{}] of type [{}]", dataStore, dataObject, dataObject.getType()); + s_logger.info("createAsync: Started for data store name [{}] and data object name [{}] of type [{}]", dataStore.getName(), dataObject.getName(), dataObject.getType()); StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if(storagePool == null) { @@ -124,7 +124,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet } } catch (Exception e) { errMsg = e.getMessage(); - s_logger.error("createAsync: Failed for dataObject [{}]: {}", dataObject, errMsg); + s_logger.error("createAsync: Failed for dataObject name [{}]: {}", dataObject.getName(), errMsg); createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg)); createCmdResult.setResult(e.toString()); } finally { @@ -159,13 +159,12 @@ private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeIn private CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeInfo) { CloudStackVolume cloudStackVolumeRequest = null; - + Svm svm = new Svm(); + svm.setName(details.get(Constants.SVM_NAME)); String protocol = details.get(Constants.PROTOCOL); if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { cloudStackVolumeRequest = new CloudStackVolume(); Lun lunRequest = new Lun(); - Svm svm = new Svm(); - svm.setName(details.get(Constants.SVM_NAME)); lunRequest.setSvm(svm); LunSpace lunSpace = new LunSpace(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 83b564158af9..a8cb81514aba 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -436,6 +436,7 @@ private boolean validateProtocolSupportAndFetchHostsIdentifier(List host default: throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name()); } + logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name()); return true; } From f1b7dbd99504d6e97bc1ece6e27b68240595644a Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Mon, 10 Nov 2025 14:07:14 +0530 Subject: [PATCH 017/271] CSTACKEX-46 Resolve review comments again --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 2 +- .../apache/cloudstack/storage/service/StorageStrategy.java | 4 ++-- .../java/org/apache/cloudstack/storage/utils/Utility.java | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 733296fc3395..2e3d9438e21f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -369,7 +369,7 @@ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, } Map disableLogicalAccessMap = new HashMap<>(); - disableLogicalAccessMap.put(Constants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid().toString()); + disableLogicalAccessMap.put(Constants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid()); disableLogicalAccessMap.put(Constants.IGROUP_DOT_UUID, accessGroup.getIgroup().getUuid()); storageStrategy.disableLogicalAccess(disableLogicalAccessMap); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index c5df71ebd652..bb25928438c6 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -516,8 +516,8 @@ public String getNetworkInterface() { /** * Method encapsulates the behavior based on the opted protocol in subclasses - * getIGroup example getIgroup for iSCSI and FC protocols - * getExportPolicy example getExportPolicy for NFS 3.0 and NFS 4.1 protocols + * e.g., getIGroup for iSCSI and FC protocols + * e.g., getExportPolicy for NFS 3.0 and NFS 4.1 protocols * //TODO for Nvme/TCP and Nvme/FC protocols * @param values map to get access group values like name, svm name etc. */ diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index 134b928dc26e..09529117cfc9 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -146,7 +146,7 @@ public static String generateExportPolicyName(String svmName, String volumeName) } public static String getLunName(String volName, String lunName) { - //Lun name in unified "/vol/VolumeName/LunName" + //LUN name in ONTAP unified : "/vol/VolumeName/LunName" return Constants.VOLUME_PATH_PREFIX + volName + Constants.SLASH + lunName; } From e9ba8b34907b00759b72eb1a1f6b38d75f70d7f5 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Mon, 10 Nov 2025 14:15:14 +0530 Subject: [PATCH 018/271] CSTACKEX-46 Change log level --- .../storage/service/UnifiedSANStrategy.java | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 62e59cf61105..6ecac4338a79 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -107,7 +107,8 @@ void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { @Override public CloudStackVolume getCloudStackVolume(Map values) { - s_logger.info("getCloudStackVolume : fetching Lun with params {} ", values); + s_logger.info("getCloudStackVolume : fetching Lun"); + s_logger.debug("getCloudStackVolume : fetching Lun with params {} ", values); if (values == null || values.isEmpty()) { s_logger.error("getCloudStackVolume: get Lun failed. Invalid request: {}", values); throw new CloudRuntimeException("getCloudStackVolume : get Lun Failed, invalid request"); @@ -144,9 +145,10 @@ public CloudStackVolume getCloudStackVolume(Map values) { @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { s_logger.info("createAccessGroup : Create Igroup"); - String igroupName = "unknown"; - if (accessGroup == null) { - throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid accessGroup object passed"); + s_logger.debug("createAccessGroup : Creating Igroup with access group request {} ", accessGroup); + if (accessGroup == null || accessGroup.getIgroup() == null) { + s_logger.error("createAccessGroup: Igroup creation failed. Invalid request: {}", accessGroup); + throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid request"); } try { // Get StoragePool details @@ -340,7 +342,8 @@ public AccessGroup updateAccessGroup(AccessGroup accessGroup) { } public AccessGroup getAccessGroup(Map values) { - s_logger.info("getAccessGroup : fetching Igroup with params {} ", values); + s_logger.info("getAccessGroup : fetch Igroup"); + s_logger.debug("getAccessGroup : fetching Igroup with params {} ", values); if (values == null || values.isEmpty()) { s_logger.error("getAccessGroup: get Igroup failed. Invalid request: {}", values); throw new CloudRuntimeException("getAccessGroup : get Igroup Failed, invalid request"); @@ -372,7 +375,8 @@ public AccessGroup getAccessGroup(Map values) { } public void enableLogicalAccess(Map values) { - s_logger.info("enableLogicalAccess : Creating LunMap with values {} ", values); + s_logger.info("enableLogicalAccess : Create LunMap"); + s_logger.debug("enableLogicalAccess : Creating LunMap with values {} ", values); LunMap lunMapRequest = new LunMap(); String svmName = values.get(Constants.SVM_DOT_NAME); String lunName = values.get(Constants.LUN_DOT_NAME); @@ -400,7 +404,8 @@ public void enableLogicalAccess(Map values) { } public void disableLogicalAccess(Map values) { - s_logger.info("disableLogicalAccess : Deleting LunMap with values {} ", values); + s_logger.info("disableLogicalAccess : Delete LunMap"); + s_logger.debug("disableLogicalAccess : Deleting LunMap with values {} ", values); String lunUUID = values.get(Constants.LUN_DOT_UUID); String igroupUUID = values.get(Constants.IGROUP_DOT_UUID); if(lunUUID == null || igroupUUID == null || lunUUID.isEmpty() || igroupUUID.isEmpty()) { From f067b500572d1bee76c75ece7ce8f69557b388f8 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Mon, 10 Nov 2025 15:02:37 +0530 Subject: [PATCH 019/271] CSTACKEX-46 added valiodation for lun name --- .../driver/OntapPrimaryDatastoreDriver.java | 36 +++++++++++++------ .../OntapPrimaryDatastoreLifecycle.java | 4 +-- .../storage/service/UnifiedSANStrategy.java | 9 +++-- 3 files changed, 33 insertions(+), 16 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 2e3d9438e21f..46e1306543e8 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -106,11 +106,15 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet if (callback == null) { throw new InvalidParameterValueException("createAsync: callback should not be null"); } + if(!isValidName(dataObject.getName())) { + errMsg = "createAsync: Invalid dataObject name [" + dataObject.getName() + "]. It must start with a letter and can only contain letters, digits, and underscores, and be up to 200 characters long."; + s_logger.error(errMsg); + throw new InvalidParameterValueException(errMsg); + } try { s_logger.info("createAsync: Started for data store name [{}] and data object name [{}] of type [{}]", dataStore.getName(), dataObject.getName(), dataObject.getType()); - StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); - if(storagePool == null) { + if (storagePool == null) { s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); } @@ -135,6 +139,16 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet } } + public boolean isValidName(String name) { + // Check for null and length constraint first + if (name == null || name.length() > 200) { + return false; + } + // Regex: Starts with a letter, followed by letters, digits, or underscores + String regex = "^[a-zA-Z][a-zA-Z0-9_]*$"; + return name.matches(regex); + } + private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeInfo volumeObject) { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if(storagePool == null) { @@ -248,7 +262,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore } try { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); - if(storagePool == null) { + if (storagePool == null) { s_logger.error("grantAccess : Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("grantAccess : Storage Pool not found for id: " + dataStore.getId()); } @@ -259,7 +273,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore if (dataObject.getType() == DataObjectType.VOLUME) { VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); - if(volumeVO == null) { + if (volumeVO == null) { s_logger.error("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); throw new CloudRuntimeException("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); } @@ -281,12 +295,12 @@ private void grantAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, String svmName = details.get(Constants.SVM_NAME); long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); - if(ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { String accessGroupName = Utility.getIgroupName(svmName, scopeId); CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, volumeVO.getPath()); s_logger.info("grantAccessForVolume: Retrieved LUN [{}] details for volume [{}]", cloudStackVolume.getLun().getName(), volumeVO.getName()); AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); - if(!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { + if (!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { s_logger.error("grantAccess: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName + "]"); } @@ -326,7 +340,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } try { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); - if(storagePool == null) { + if (storagePool == null) { s_logger.error("revokeAccess : Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("revokeAccess : Storage Pool not found for id: " + dataStore.getId()); } @@ -337,7 +351,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) if (dataObject.getType() == DataObjectType.VOLUME) { VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); - if(volumeVO == null) { + if (volumeVO == null) { s_logger.error("revokeAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); throw new CloudRuntimeException("revokeAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); } @@ -358,12 +372,12 @@ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, String svmName = details.get(Constants.SVM_NAME); long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); - if(ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { String accessGroupName = Utility.getIgroupName(svmName, scopeId); CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, volumeVO.getPath()); AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); //TODO check if initiator does exits in igroup, will throw the error ? - if(!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { + if (!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { s_logger.error("revokeAccessForVolume: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); return; } @@ -381,7 +395,7 @@ private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrate getCloudStackVolumeMap.put(Constants.NAME, cloudStackVolumeName); getCloudStackVolumeMap.put(Constants.SVM_DOT_NAME, svmName); CloudStackVolume cloudStackVolume = storageStrategy.getCloudStackVolume(getCloudStackVolumeMap); - if(cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) { + if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) { s_logger.error("getCloudStackVolumeByName: Failed to get LUN details [{}]", cloudStackVolumeName); throw new CloudRuntimeException("getCloudStackVolumeByName: Failed to get LUN [" + cloudStackVolumeName + "]"); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index a8cb81514aba..ff233d1fac5e 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -301,7 +301,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { } List hostsIdentifier = new ArrayList<>(); StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); - if(storagePool == null) { + if (storagePool == null) { s_logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId()); } @@ -373,7 +373,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper } List hostsIdentifier = new ArrayList<>(); StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); - if(storagePool == null) { + if (storagePool == null) { s_logger.error("attachZone : Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("attachZone : Storage Pool not found for id: " + dataStore.getId()); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 6ecac4338a79..2291728b92cc 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -377,7 +377,6 @@ public AccessGroup getAccessGroup(Map values) { public void enableLogicalAccess(Map values) { s_logger.info("enableLogicalAccess : Create LunMap"); s_logger.debug("enableLogicalAccess : Creating LunMap with values {} ", values); - LunMap lunMapRequest = new LunMap(); String svmName = values.get(Constants.SVM_DOT_NAME); String lunName = values.get(Constants.LUN_DOT_NAME); String igroupName = values.get(Constants.IGROUP_DOT_NAME); @@ -389,6 +388,10 @@ public void enableLogicalAccess(Map values) { // Get AuthHeader String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); // Create LunMap + LunMap lunMapRequest = new LunMap(); + lunMapRequest.getSvm().setName(svmName); + lunMapRequest.getLun().setName(lunName); + lunMapRequest.getIgroup().setName(igroupName); OntapResponse createdLunMap = sanFeignClient.createLunMap(authHeader, true, lunMapRequest); if (createdLunMap == null || createdLunMap.getRecords() == null || createdLunMap.getRecords().size() == 0) { s_logger.error("enableLogicalAccess: LunMap failed for Lun: {} and igroup: {}", lunName, igroupName); @@ -398,7 +401,7 @@ public void enableLogicalAccess(Map values) { s_logger.debug("enableLogicalAccess: LunMap created successfully, LunMap: {}", lunMap); s_logger.info("enableLogicalAccess: LunMap created successfully."); } catch (Exception e) { - s_logger.error("Exception occurred while creating LunMap: {}, Exception: {}", e.getMessage(), e); + s_logger.error("Exception occurred while creating LunMap, Exception: {}", e); throw new CloudRuntimeException("Failed to create LunMap: " + e.getMessage()); } } @@ -419,7 +422,7 @@ public void disableLogicalAccess(Map values) { sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID); s_logger.info("disableLogicalAccess: LunMap deleted successfully."); } catch (Exception e) { - s_logger.error("Exception occurred while deleting LunMap: {}, Exception: {}", e.getMessage(), e); + s_logger.error("Exception occurred while deleting LunMap, Exception: {}", e); throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); } } From dac753f6583ecbfbef2edabadee1f537c81dd13a Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Mon, 10 Nov 2025 18:03:08 +0530 Subject: [PATCH 020/271] CSTACKEX-46 resolve check style issues --- .../driver/OntapPrimaryDatastoreDriver.java | 3 +-- .../cloudstack/storage/feign/model/Svm.java | 1 - .../OntapPrimaryDatastoreLifecycle.java | 13 +++++++++---- .../storage/service/UnifiedSANStrategy.java | 19 ++++++++++++------- .../cloudstack/storage/utils/Utility.java | 2 +- 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 46e1306543e8..c06762fc0659 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -145,8 +145,7 @@ public boolean isValidName(String name) { return false; } // Regex: Starts with a letter, followed by letters, digits, or underscores - String regex = "^[a-zA-Z][a-zA-Z0-9_]*$"; - return name.matches(regex); + return name.matches(Constants.ONTAP_NAME_REGEX); } private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeInfo volumeObject) { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java index 65821739f1b2..b1462c593863 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java @@ -143,5 +143,4 @@ public int hashCode() { @JsonInclude(JsonInclude.Include.NON_NULL) public static class Links { } - } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index ff233d1fac5e..edd804bd32a2 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -339,8 +339,8 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); strategy.createAccessGroup(accessGroupRequest); } catch (Exception e) { - s_logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId(), e); - throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId(), e); + s_logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage(), e); + throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage(), e); } } logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); @@ -406,8 +406,13 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper throw new CloudRuntimeException(errMsg); } if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) { - AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); - strategy.createAccessGroup(accessGroupRequest); + try { + AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); + strategy.createAccessGroup(accessGroupRequest); + } catch (Exception e) { + s_logger.error("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage()); + throw new CloudRuntimeException("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage()); + } } for (HostVO host : hostsToConnect) { try { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 2291728b92cc..84c1b0b01d7e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -27,10 +27,7 @@ import org.apache.cloudstack.storage.feign.client.SANFeignClient; import org.apache.cloudstack.storage.feign.model.Igroup; import org.apache.cloudstack.storage.feign.model.Initiator; -import org.apache.cloudstack.storage.feign.model.Igroup; -import org.apache.cloudstack.storage.feign.model.Lun; -import org.apache.cloudstack.storage.feign.model.LunMap; -import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.*; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import org.apache.cloudstack.storage.service.model.AccessGroup; @@ -389,9 +386,17 @@ public void enableLogicalAccess(Map values) { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); // Create LunMap LunMap lunMapRequest = new LunMap(); - lunMapRequest.getSvm().setName(svmName); - lunMapRequest.getLun().setName(lunName); - lunMapRequest.getIgroup().setName(igroupName); + Svm svm = new Svm(); + svm.setName(svmName); + lunMapRequest.setSvm(svm); + //Set Lun name + Lun lun = new Lun(); + lun.setName(lunName); + lunMapRequest.setLun(lun); + //Set Igroup name + Igroup igroup = new Igroup(); + igroup.setName(igroupName); + lunMapRequest.setIgroup(igroup); OntapResponse createdLunMap = sanFeignClient.createLunMap(authHeader, true, lunMapRequest); if (createdLunMap == null || createdLunMap.getRecords() == null || createdLunMap.getRecords().size() == 0) { s_logger.error("enableLogicalAccess: LunMap failed for Lun: {} and igroup: {}", lunName, igroupName); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index 09529117cfc9..9532bf36fd9a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -146,7 +146,7 @@ public static String generateExportPolicyName(String svmName, String volumeName) } public static String getLunName(String volName, String lunName) { - //LUN name in ONTAP unified : "/vol/VolumeName/LunName" + //LUN name in ONTAP unified format: "/vol/VolumeName/LunName" return Constants.VOLUME_PATH_PREFIX + volName + Constants.SLASH + lunName; } From 716fcaf146b6516cadcc5e00ed60ce38f4b3384a Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Mon, 10 Nov 2025 18:49:57 +0530 Subject: [PATCH 021/271] CSTACKEX-46 Resolve import issues --- .../storage/driver/OntapPrimaryDatastoreDriver.java | 2 +- .../cloudstack/storage/service/UnifiedSANStrategy.java | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index c06762fc0659..1f1591e8ad5e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -139,7 +139,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet } } - public boolean isValidName(String name) { + public boolean isValidName(String name) { // Check for null and length constraint first if (name == null || name.length() > 200) { return false; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 84c1b0b01d7e..47f529968deb 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -29,6 +29,11 @@ import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.feign.model.*; import org.apache.cloudstack.storage.feign.model.Svm; +import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.Igroup; +import org.apache.cloudstack.storage.feign.model.LunMap; +import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; From e6ece89d05c605526b91b9329abda3f539595986 Mon Sep 17 00:00:00 2001 From: "Gupta, Surya" Date: Mon, 10 Nov 2025 19:12:50 +0530 Subject: [PATCH 022/271] CSTACKEX-46 Resolve copilot comments --- .../storage/lifecycle/OntapPrimaryDatastoreLifecycle.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index edd804bd32a2..9a37c8e65d44 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -339,8 +339,8 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); strategy.createAccessGroup(accessGroupRequest); } catch (Exception e) { - s_logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage(), e); - throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage(), e); + s_logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage()); + throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage()); } } logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); From d111436649252294f05efc580a1f738f52716906 Mon Sep 17 00:00:00 2001 From: "Locharla, Sandeep" Date: Wed, 21 Jan 2026 21:33:13 +0530 Subject: [PATCH 023/271] CSTACKEX-46: Create, Delete iSCSI type Cloudstack volumes, Enter, Cancel Maintenance mode --- .../kvm/storage/IscsiAdmStorageAdaptor.java | 61 +++- .../driver/OntapPrimaryDatastoreDriver.java | 302 ++++++++++++------ .../storage/feign/client/SANFeignClient.java | 7 +- .../storage/feign/model/Igroup.java | 2 +- .../cloudstack/storage/feign/model/Lun.java | 43 +++ .../OntapPrimaryDatastoreLifecycle.java | 135 +++----- .../storage/service/StorageStrategy.java | 64 +++- .../storage/service/UnifiedNASStrategy.java | 16 +- .../storage/service/UnifiedSANStrategy.java | 190 ++++++++--- .../cloudstack/storage/utils/Constants.java | 3 +- .../cloudstack/storage/utils/Utility.java | 50 +-- 11 files changed, 595 insertions(+), 278 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java index ba689d5107f7..155e97b90558 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java @@ -19,6 +19,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.nio.file.Files; +import java.nio.file.Paths; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; @@ -35,6 +37,7 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; +import com.cloud.utils.script.OutputInterpreter.AllLinesParser; import com.cloud.utils.script.Script; public class IscsiAdmStorageAdaptor implements StorageAdaptor { @@ -96,10 +99,15 @@ public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map 0) { @@ -238,6 +268,15 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { } private long getDeviceSize(String deviceByPath) { + try { + if (!Files.exists(Paths.get(deviceByPath))) { + logger.debug("Device by-path does not exist yet: " + deviceByPath); + return 0L; + } + } catch (Exception ignore) { + // If FS check fails for any reason, fall back to blockdev call + } + Script iScsiAdmCmd = new Script(true, "blockdev", 0, logger); iScsiAdmCmd.add("--getsize64", deviceByPath); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 1f1591e8ad5e..04a2300b5578 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -30,8 +30,14 @@ import com.cloud.storage.VolumeVO; import com.cloud.storage.ScopeType; import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.storage.dao.VMTemplateDao; +//import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -47,11 +53,9 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.feign.model.Lun; -import org.apache.cloudstack.storage.feign.model.LunSpace; -import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Igroup; import org.apache.cloudstack.storage.feign.model.Initiator; +import org.apache.cloudstack.storage.feign.model.Lun; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; @@ -62,6 +66,7 @@ import org.apache.logging.log4j.Logger; import javax.inject.Inject; +import java.util.Arrays; import java.util.HashMap; import java.util.Map; @@ -71,7 +76,9 @@ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { @Inject private StoragePoolDetailsDao storagePoolDetailsDao; @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private VMInstanceDao vmDao; @Inject private VolumeDao volumeDao; + @Inject private VolumeDetailsDao volumeDetailsDao; @Override public Map getCapabilities() { s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called"); @@ -95,7 +102,6 @@ public DataTO getTO(DataObject data) { @Override public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { CreateCmdResult createCmdResult = null; - String path = null; String errMsg = null; if (dataStore == null) { throw new InvalidParameterValueException("createAsync: dataStore should not be null"); @@ -106,11 +112,6 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet if (callback == null) { throw new InvalidParameterValueException("createAsync: callback should not be null"); } - if(!isValidName(dataObject.getName())) { - errMsg = "createAsync: Invalid dataObject name [" + dataObject.getName() + "]. It must start with a letter and can only contain letters, digits, and underscores, and be up to 200 characters long."; - s_logger.error(errMsg); - throw new InvalidParameterValueException(errMsg); - } try { s_logger.info("createAsync: Started for data store name [{}] and data object name [{}] of type [{}]", dataStore.getName(), dataObject.getName(), dataObject.getType()); StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); @@ -118,9 +119,49 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); } + Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); + if (dataObject.getType() == DataObjectType.VOLUME) { - path = createCloudStackVolumeForTypeVolume(storagePool, (VolumeInfo)dataObject); - createCmdResult = new CreateCmdResult(path, new Answer(null, true, null)); + VolumeInfo volInfo = (VolumeInfo) dataObject; + // Create LUN/backing for volume and record relevant details + CloudStackVolume created = createCloudStackVolume(dataStore, volInfo); + + // Immediately ensure LUN-map exists and update VolumeVO path + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + String svmName = details.get(Constants.SVM_NAME); + String lunName = volumeDetailsDao.findDetail(volInfo.getId(), Constants.LUN_DOT_NAME) != null ? + volumeDetailsDao.findDetail(volInfo.getId(), Constants.LUN_DOT_NAME).getValue() : null; + if (lunName == null) { + // Fallback from returned LUN + lunName = created != null && created.getLun() != null ? created.getLun().getName() : null; + } + if (lunName == null) { + throw new CloudRuntimeException("createAsync: Missing LUN name for volume " + volInfo.getId()); + } + long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? storagePool.getClusterId() : storagePool.getDataCenterId(); + String lunNumber = ensureLunMapped(storagePool, svmName, lunName, scopeId); + + VolumeVO volumeVO = volumeDao.findById(volInfo.getId()); + if (volumeVO != null) { + String iscsiPath = Constants.SLASH + storagePool.getPath() + Constants.SLASH + lunNumber; + volumeVO.set_iScsiName(iscsiPath); + volumeVO.setPath(iscsiPath); + volumeVO.setPoolType(storagePool.getPoolType()); + volumeVO.setPoolId(storagePool.getId()); + volumeDao.update(volumeVO.getId(), volumeVO); + s_logger.info("createAsync: Volume [{}] iSCSI path set to {}", volumeVO.getId(), iscsiPath); + } + } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + // Ensure pool fields are recorded for managed NFS as well + VolumeVO volumeVO = volumeDao.findById(volInfo.getId()); + if (volumeVO != null) { + volumeVO.setPoolType(storagePool.getPoolType()); + volumeVO.setPoolId(storagePool.getId()); + volumeDao.update(volumeVO.getId(), volumeVO); + s_logger.info("createAsync: Managed NFS volume [{}] associated with pool {}", volumeVO.getId(), storagePool.getId()); + } + } + createCmdResult = new CreateCmdResult(null, new Answer(null, true, null)); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; s_logger.error(errMsg); @@ -133,68 +174,86 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet createCmdResult.setResult(e.toString()); } finally { if (createCmdResult != null && createCmdResult.isSuccess()) { - s_logger.info("createAsync: Volume created successfully. Path: {}", path); + s_logger.info("createAsync: Operation completed successfully for {}", dataObject.getType()); } callback.complete(createCmdResult); } } - public boolean isValidName(String name) { - // Check for null and length constraint first - if (name == null || name.length() > 200) { - return false; - } - // Regex: Starts with a letter, followed by letters, digits, or underscores - return name.matches(Constants.ONTAP_NAME_REGEX); - } - - private String createCloudStackVolumeForTypeVolume(DataStore dataStore, VolumeInfo volumeObject) { + private CloudStackVolume createCloudStackVolume(DataStore dataStore, DataObject dataObject) { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); - if(storagePool == null) { - s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); - throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); + if (storagePool == null) { + s_logger.error("createCloudStackVolume: Storage Pool not found for id: {}", dataStore.getId()); + throw new CloudRuntimeException("createCloudStackVolume: Storage Pool not found for id: " + dataStore.getId()); } Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); - s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME)); - CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject); - CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); - if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { - return cloudStackVolume.getLun().getName(); - } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - return volumeObject.getUuid(); // return the volume UUID for agent as path for mounting + + if (dataObject.getType() == DataObjectType.VOLUME) { + VolumeInfo volumeObject = (VolumeInfo) dataObject; + + CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject); + CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { + s_logger.info("createCloudStackVolume: iSCSI LUN object created for volume [{}]", volumeObject.getId()); + volumeDetailsDao.addDetail(volumeObject.getId(), Constants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid(), false); + volumeDetailsDao.addDetail(volumeObject.getId(), Constants.LUN_DOT_NAME, cloudStackVolume.getLun().getName(), false); + VolumeVO volumeVO = volumeDao.findById(volumeObject.getId()); + if (volumeVO != null) { + volumeVO.setPath(null); + if (cloudStackVolume.getLun().getUuid() != null) { + volumeVO.setFolder(cloudStackVolume.getLun().getUuid()); + } + volumeVO.setPoolType(storagePool.getPoolType()); + volumeVO.setPoolId(storagePool.getId()); + volumeDao.update(volumeVO.getId(), volumeVO); + } + } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + s_logger.info("createCloudStackVolume: Managed NFS object created for volume [{}]", volumeObject.getId()); + // For Managed NFS, set pool fields on Volume + VolumeVO volumeVO = volumeDao.findById(volumeObject.getId()); + if (volumeVO != null) { + volumeVO.setPoolType(storagePool.getPoolType()); + volumeVO.setPoolId(storagePool.getId()); + volumeDao.update(volumeVO.getId(), volumeVO); + } + } else { + String errMsg = "createCloudStackVolume: Volume creation failed for dataObject: " + volumeObject; + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + return cloudStackVolume; } else { - String errMsg = "createCloudStackVolumeForTypeVolume: Volume creation failed. Lun or Lun Path is null for dataObject: " + volumeObject; - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException("createCloudStackVolume: Unsupported DataObjectType: " + dataObject.getType()); } } - private CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, VolumeInfo volumeInfo) { - CloudStackVolume cloudStackVolumeRequest = null; - Svm svm = new Svm(); - svm.setName(details.get(Constants.SVM_NAME)); - String protocol = details.get(Constants.PROTOCOL); - if (ProtocolType.ISCSI.name().equalsIgnoreCase(protocol)) { - cloudStackVolumeRequest = new CloudStackVolume(); - Lun lunRequest = new Lun(); - lunRequest.setSvm(svm); - - LunSpace lunSpace = new LunSpace(); - lunSpace.setSize(volumeInfo.getSize()); - lunRequest.setSpace(lunSpace); - //Lun name is full path like in unified "/vol/VolumeName/LunName" - String lunFullName = Utility.getLunName(storagePool.getName(), volumeInfo.getName()); - lunRequest.setName(lunFullName); - - String osType = Utility.getOSTypeFromHypervisor(storagePool.getHypervisor().name()); - lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); - - cloudStackVolumeRequest.setLun(lunRequest); - return cloudStackVolumeRequest; - } else { - throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol); + private String ensureLunMapped(StoragePoolVO storagePool, String svmName, String lunName, long scopeId) { + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); + StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); + String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + + // Check existing map first. getLogicalAccess returns null (no exception) when map doesn't exist. + Map getMap = new HashMap<>(); + getMap.put(Constants.LUN_DOT_NAME, lunName); + getMap.put(Constants.SVM_DOT_NAME, svmName); + getMap.put(Constants.IGROUP_DOT_NAME, accessGroupName); + Map mapResp = storageStrategy.getLogicalAccess(getMap); + if (mapResp != null && mapResp.containsKey(Constants.LOGICAL_UNIT_NUMBER)) { + String lunNumber = mapResp.get(Constants.LOGICAL_UNIT_NUMBER); + s_logger.info("ensureLunMapped: Existing LunMap found for LUN [{}] in igroup [{}] with LUN number [{}]", lunName, accessGroupName, lunNumber); + return lunNumber; } + // Create if not exists + Map enableMap = new HashMap<>(); + enableMap.put(Constants.LUN_DOT_NAME, lunName); + enableMap.put(Constants.SVM_DOT_NAME, svmName); + enableMap.put(Constants.IGROUP_DOT_NAME, accessGroupName); + Map response = storageStrategy.enableLogicalAccess(enableMap); + if (response == null || !response.containsKey(Constants.LOGICAL_UNIT_NUMBER)) { + throw new CloudRuntimeException("ensureLunMapped: Failed to map LUN [" + lunName + "] to iGroup [" + accessGroupName + "]"); + } + return response.get(Constants.LOGICAL_UNIT_NUMBER); } @Override @@ -214,9 +273,32 @@ public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallbac if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { // ManagedNFS qcow2 backing file deletion handled by KVM host/libvirt; nothing to do via ONTAP REST. s_logger.info("deleteAsync: ManagedNFS volume {} no-op ONTAP deletion", data.getId()); + } else if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); + VolumeInfo volumeObject = (VolumeInfo) data; + s_logger.info("deleteAsync: Deleting volume & LUN for volume id [{}]", volumeObject.getId()); + String lunName = volumeDetailsDao.findDetail(volumeObject.getId(), Constants.LUN_DOT_NAME).getValue(); + String lunUUID = volumeDetailsDao.findDetail(volumeObject.getId(), Constants.LUN_DOT_UUID).getValue(); + if (lunName == null) { + throw new CloudRuntimeException("deleteAsync: Missing LUN name for volume " + volumeObject.getId()); + } + CloudStackVolume delRequest = new CloudStackVolume(); + Lun lun = new Lun(); + lun.setName(lunName); + lun.setUuid(lunUUID); + delRequest.setLun(lun); + storageStrategy.deleteCloudStackVolume(delRequest); + // Set the result + commandResult.setResult(null); + commandResult.setSuccess(true); + s_logger.info("deleteAsync: Volume LUN [{}] deleted successfully", lunName); + } else { + throw new CloudRuntimeException("deleteAsync: Unsupported protocol for deletion: " + details.get(Constants.PROTOCOL)); } } } catch (Exception e) { + s_logger.error("deleteAsync: Failed for data object [{}]: {}", data, e.getMessage()); + commandResult.setSuccess(false); commandResult.setResult(e.getMessage()); } finally { callback.complete(commandResult); @@ -225,7 +307,6 @@ public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallbac @Override public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { - } @Override @@ -262,7 +343,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore try { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if (storagePool == null) { - s_logger.error("grantAccess : Storage Pool not found for id: " + dataStore.getId()); + s_logger.error("grantAccess: Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("grantAccess : Storage Pool not found for id: " + dataStore.getId()); } if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { @@ -276,7 +357,24 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore s_logger.error("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); throw new CloudRuntimeException("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); } - grantAccessForVolume(storagePool, volumeVO, host); + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); + String svmName = details.get(Constants.SVM_NAME); + String cloudStackVolumeName = volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME).getValue(); + long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); + // Validate initiator membership + validateHostInitiatorInIgroup(storagePool, svmName, scopeId, host); + // Ensure mapping exists + String lunNumber = ensureLunMapped(storagePool, svmName, cloudStackVolumeName, scopeId); + // Update Volume path if missing or changed + String iscsiPath = Constants.SLASH + storagePool.getPath() + Constants.SLASH + lunNumber; + if (volumeVO.getPath() == null || !volumeVO.getPath().equals(iscsiPath)) { + volumeVO.set_iScsiName(iscsiPath); + volumeVO.setPath(iscsiPath); + } + // Ensure pool fields are set (align with SolidFire) + volumeVO.setPoolType(storagePool.getPoolType()); + volumeVO.setPoolId(storagePool.getId()); + volumeDao.update(volumeVO.getId(), volumeVO); } else { s_logger.error("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess"); throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess"); @@ -288,33 +386,20 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore return true; } - private void grantAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) { + private void validateHostInitiatorInIgroup(StoragePoolVO storagePool, String svmName, long scopeId, Host host) { Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); - String svmName = details.get(Constants.SVM_NAME); - long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); - - if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - String accessGroupName = Utility.getIgroupName(svmName, scopeId); - CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, volumeVO.getPath()); - s_logger.info("grantAccessForVolume: Retrieved LUN [{}] details for volume [{}]", cloudStackVolume.getLun().getName(), volumeVO.getName()); - AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); - if (!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { - s_logger.error("grantAccess: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); - throw new CloudRuntimeException("grantAccess: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName + "]"); - } - - Map enableLogicalAccessMap = new HashMap<>(); - enableLogicalAccessMap.put(Constants.LUN_DOT_NAME, volumeVO.getPath()); - enableLogicalAccessMap.put(Constants.SVM_DOT_NAME, svmName); - enableLogicalAccessMap.put(Constants.IGROUP_DOT_NAME, accessGroupName); - storageStrategy.enableLogicalAccess(enableLogicalAccessMap); - } else { - String errMsg = "grantAccessForVolume: Unsupported protocol type for volume grantAccess: " + details.get(Constants.PROTOCOL); - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); + String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); + if (host == null || host.getStorageUrl() == null) { + throw new CloudRuntimeException("validateHostInitiatorInIgroup: host/initiator required but not provided"); + } + if (!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { + s_logger.error("validateHostInitiatorInIgroup: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); + throw new CloudRuntimeException("validateHostInitiatorInIgroup: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName + "]"); } } + private boolean hostInitiatorFoundInIgroup(String hostInitiator, Igroup igroup) { if(igroup != null && igroup.getInitiators() != null && hostInitiator != null && !hostInitiator.isEmpty()) { for(Initiator initiator : igroup.getInitiators()) { @@ -337,10 +422,21 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) if (host == null) { throw new InvalidParameterValueException("revokeAccess: host should not be null"); } + if (dataObject.getType() == DataObjectType.VOLUME) { + Volume volume = volumeDao.findById(dataObject.getId()); + if (volume.getInstanceId() != null) { + VirtualMachine vm = vmDao.findById(volume.getInstanceId()); + if (vm != null && !Arrays.asList(VirtualMachine.State.Destroyed, VirtualMachine.State.Expunging, VirtualMachine.State.Error).contains(vm.getState())) { + s_logger.debug("revokeAccess: Volume [{}] is still attached to VM [{}] in state [{}], skipping revokeAccess", + dataObject.getId(), vm.getInstanceName(), vm.getState()); + return; + } + } + } try { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if (storagePool == null) { - s_logger.error("revokeAccess : Storage Pool not found for id: " + dataStore.getId()); + s_logger.error("revokeAccess: Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("revokeAccess : Storage Pool not found for id: " + dataStore.getId()); } if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { @@ -351,7 +447,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) if (dataObject.getType() == DataObjectType.VOLUME) { VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); if (volumeVO == null) { - s_logger.error("revokeAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); + s_logger.error("revokeAccess: Cloud Stack Volume not found for id: " + dataObject.getId()); throw new CloudRuntimeException("revokeAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); } revokeAccessForVolume(storagePool, volumeVO, host); @@ -366,16 +462,34 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) { + s_logger.info("revokeAccessForVolume: Revoking access to volume [{}] for host [{}]", volumeVO.getName(), host.getName()); Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); String svmName = details.get(Constants.SVM_NAME); long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - String accessGroupName = Utility.getIgroupName(svmName, scopeId); - CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, volumeVO.getPath()); + String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + + String lunName = volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME) != null ? + volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME).getValue() : null; + if (lunName == null) { + s_logger.warn("revokeAccessForVolume: No LUN name detail found for volume [{}]; assuming no backend LUN to revoke", volumeVO.getId()); + return; + } + + CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, lunName); + if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getUuid() == null) { + s_logger.warn("revokeAccessForVolume: LUN for volume [{}] not found on ONTAP, assuming already deleted", volumeVO.getId()); + return; + } + AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); - //TODO check if initiator does exits in igroup, will throw the error ? + if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getUuid() == null) { + s_logger.warn("revokeAccessForVolume: iGroup [{}] not found on ONTAP, assuming already deleted", accessGroupName); + return; + } + if (!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { s_logger.error("revokeAccessForVolume: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); return; @@ -395,8 +509,8 @@ private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrate getCloudStackVolumeMap.put(Constants.SVM_DOT_NAME, svmName); CloudStackVolume cloudStackVolume = storageStrategy.getCloudStackVolume(getCloudStackVolumeMap); if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) { - s_logger.error("getCloudStackVolumeByName: Failed to get LUN details [{}]", cloudStackVolumeName); - throw new CloudRuntimeException("getCloudStackVolumeByName: Failed to get LUN [" + cloudStackVolumeName + "]"); + s_logger.error("getCloudStackVolumeByName: LUN [{}] not found on ONTAP; returning null", cloudStackVolumeName); + return null; } return cloudStackVolume; } @@ -407,8 +521,8 @@ private AccessGroup getAccessGroupByName(StorageStrategy storageStrategy, String getAccessGroupMap.put(Constants.SVM_DOT_NAME, svmName); AccessGroup accessGroup = storageStrategy.getAccessGroup(getAccessGroupMap); if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getName() == null) { - s_logger.error("getAccessGroupByName: Failed to get iGroup details [{}]", accessGroupName); - throw new CloudRuntimeException("getAccessGroupByName: Failed to get iGroup details [" + accessGroupName + "]"); + s_logger.error("getAccessGroupByName: iGroup [{}] not found on ONTAP; returning null", accessGroupName); + return null; } return accessGroup; } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java index 0da826f04fd2..45a20fe876fe 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java @@ -40,9 +40,6 @@ public interface SANFeignClient { @RequestLine("POST /api/storage/luns?return_records={returnRecords}") @Headers({"Authorization: {authHeader}"}) OntapResponse createLun(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Lun lun); - @RequestLine("POST /api/storage/luns?return_records={returnRecords}") - @Headers({"Authorization: {authHeader}"}) - OntapResponse createLun(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Lun lun); @RequestLine("GET /api/storage/luns") @Headers({"Authorization: {authHeader}"}) @@ -56,9 +53,9 @@ public interface SANFeignClient { @Headers({"Authorization: {authHeader}"}) void updateLun(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Lun lun); - @RequestLine("DELETE /{uuid}") + @RequestLine("DELETE /api/storage/luns/{uuid}") @Headers({"Authorization: {authHeader}"}) - void deleteLun(@Param("authHeader") String authHeader, @Param("uuid") String uuid); + void deleteLun(@Param("authHeader") String authHeader, @Param("uuid") String uuid, @QueryMap Map queryMap); // iGroup Operation APIs @RequestLine("POST /api/protocols/san/igroups?return_records={returnRecords}") diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Igroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Igroup.java index 877d60de830c..4dc07e349fad 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Igroup.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Igroup.java @@ -48,7 +48,7 @@ public class Igroup { private String name = null; @JsonProperty("protocol") - private ProtocolEnum protocol = ProtocolEnum.mixed; + private ProtocolEnum protocol = null; @JsonProperty("svm") private Svm svm = null; @JsonProperty("uuid") diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Lun.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Lun.java index 48ebc9c739cb..364790958c8a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Lun.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Lun.java @@ -83,6 +83,9 @@ public static PropertyClassEnum fromValue(String value) { @JsonProperty("name") private String name = null; + @JsonProperty("clone") + private Clone clone = null; + /** * The operating system type of the LUN.<br/> Required in POST when creating a LUN that is not a clone of another. Disallowed in POST when creating a LUN clone. */ @@ -249,6 +252,14 @@ public void setUuid(String uuid) { this.uuid = uuid; } + public Clone getClone() { + return clone; + } + + public void setClone(Clone clone) { + this.clone = clone; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -295,4 +306,36 @@ private String toIndentedString(Object o) { } return o.toString().replace("\n", "\n "); } + + + public static class Clone { + @JsonProperty("source") + private Source source = null; + public Source getSource() { + return source; + } + public void setSource(Source source) { + this.source = source; + } + } + + public static class Source { + @JsonProperty("name") + private String name = null; + @JsonProperty("uuid") + private String uuid = null; + + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public String getUuid() { + return uuid; + } + public void setUuid(String uuid) { + this.uuid = uuid; + } + } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 9a37c8e65d44..6bbd88db14b0 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -43,13 +43,9 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl; -import org.apache.cloudstack.storage.feign.model.ExportPolicy; -import org.apache.cloudstack.storage.feign.model.Igroup; -import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.feign.model.OntapStorage; -import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.Volume; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; @@ -289,10 +285,6 @@ public DataStore initialize(Map dsInfos) { @Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { logger.debug("In attachCluster for ONTAP primary storage"); - PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; - List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); - - logger.debug(" datastore object received is {} ",primaryStore ); if (dataStore == null) { throw new InvalidParameterValueException("attachCluster: dataStore should not be null"); } @@ -307,25 +299,12 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { } PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore; List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore); - // TODO- need to check if no host to connect then throw exception or just continue + // TODO- need to check if no host to connect then throw exception or just continue? logger.debug("attachCluster: Eligible Up and Enabled hosts: {} in cluster {}", hostsToConnect, primaryStore.getClusterId()); - logger.debug(String.format("Attaching the pool to each of the hosts %s in the cluster: %s", hostsToConnect, primaryStore.getClusterId())); - Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); - ExportPolicy exportPolicy = new ExportPolicy(); - AccessGroup accessGroupRequest = new AccessGroup(); - accessGroupRequest.setHostsToConnect(hostsToConnect); - accessGroupRequest.setScope(scope); - primaryStore.setDetails(details);// setting details as it does not come from cloudstack - accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); - accessGroupRequest.setPolicy(exportPolicy); - strategy.createAccessGroup(accessGroupRequest); - logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); - Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); - StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the cluster if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) { @@ -333,10 +312,16 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { s_logger.error(errMsg); throw new CloudRuntimeException(errMsg); } + + logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId()); //TODO - check if no host to connect then also need to create access group without initiators if (hostsIdentifier != null && hostsIdentifier.size() > 0) { try { - AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); + AccessGroup accessGroupRequest = new AccessGroup(); + accessGroupRequest.setHostsToConnect(hostsToConnect); + accessGroupRequest.setScope(scope); + primaryStore.setDetails(details);// setting details as it does not come from cloudstack + accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); strategy.createAccessGroup(accessGroupRequest); } catch (Exception e) { s_logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage()); @@ -348,9 +333,8 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { try { _storageMgr.connectHostToSharedPool(host, dataStore.getId()); } catch (Exception e) { - logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); - return false; logger.warn("attachCluster: Unable to establish a connection between " + host + " and " + dataStore, e); + return false; } } _dataStoreHelper.attachCluster(dataStore); @@ -384,20 +368,9 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId()); StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); - ExportPolicy exportPolicy = new ExportPolicy(); - AccessGroup accessGroupRequest = new AccessGroup(); - accessGroupRequest.setHostsToConnect(hostsToConnect); - accessGroupRequest.setScope(scope); - primaryStore.setDetails(details); // setting details as it does not come from cloudstack - accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); - accessGroupRequest.setPolicy(exportPolicy); - strategy.createAccessGroup(accessGroupRequest); // TODO- need to check if no host to connect then throw exception or just continue logger.debug("attachZone: Eligible Up and Enabled hosts: {}", hostsToConnect); - - Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); - StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details); ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL)); //TODO- Check if we have to handle heterogeneous host within the zone if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) { @@ -407,7 +380,11 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper } if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) { try { - AccessGroup accessGroupRequest = createAccessGroupRequestByProtocol(storagePool, scope.getScopeId(), details, hostsIdentifier); + AccessGroup accessGroupRequest = new AccessGroup(); + accessGroupRequest.setHostsToConnect(hostsToConnect); + accessGroupRequest.setScope(scope); + primaryStore.setDetails(details); // setting details as it does not come from cloudstack + accessGroupRequest.setPrimaryDataStoreInfo(primaryStore); strategy.createAccessGroup(accessGroupRequest); } catch (Exception e) { s_logger.error("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage()); @@ -445,64 +422,24 @@ private boolean validateProtocolSupportAndFetchHostsIdentifier(List host return true; } - private AccessGroup createAccessGroupRequestByProtocol(StoragePoolVO storagePool, long scopeId, Map details, List hostsIdentifier) { - ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL).toUpperCase()); - String svmName = details.get(Constants.SVM_NAME); - switch (protocol) { - case ISCSI: - // Access group name format: cs_svmName_scopeId - String igroupName = Utility.getIgroupName(svmName, scopeId); - Hypervisor.HypervisorType hypervisorType = storagePool.getHypervisor(); - return createSANAccessGroupRequest(svmName, igroupName, hypervisorType, hostsIdentifier); - default: - s_logger.error("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol); - throw new CloudRuntimeException("createAccessGroupRequestByProtocol: Unsupported protocol " + protocol); - } - } - - private AccessGroup createSANAccessGroupRequest(String svmName, String igroupName, Hypervisor.HypervisorType hypervisorType, List hostsIdentifier) { - AccessGroup accessGroupRequest = new AccessGroup(); - Igroup igroup = new Igroup(); - - if (svmName != null && !svmName.isEmpty()) { - Svm svm = new Svm(); - svm.setName(svmName); - igroup.setSvm(svm); - } - - if (igroupName != null && !igroupName.isEmpty()) { - igroup.setName(igroupName); - } - - if (hypervisorType != null) { - String hypervisorName = hypervisorType.name(); - igroup.setOsType(Igroup.OsTypeEnum.valueOf(Utility.getOSTypeFromHypervisor(hypervisorName))); - } - - if (hostsIdentifier != null && hostsIdentifier.size() > 0) { - List initiators = new ArrayList<>(); - for (String hostIdentifier : hostsIdentifier) { - Initiator initiator = new Initiator(); - initiator.setName(hostIdentifier); - initiators.add(initiator); - } - igroup.setInitiators(initiators); - } - accessGroupRequest.setIgroup(igroup); - s_logger.debug("createSANAccessGroupRequest: request: " + accessGroupRequest); - return accessGroupRequest; - } - @Override public boolean maintain(DataStore store) { - _storagePoolAutomation.maintain(store); - return _dataStoreHelper.maintain(store); + logger.info("Placing storage pool {} in maintenance mode", store); + if (_storagePoolAutomation.maintain(store)) { + return _dataStoreHelper.maintain(store); + } else { + return false; + } } @Override public boolean cancelMaintain(DataStore store) { - _storagePoolAutomation.cancelMaintain(store); - return _dataStoreHelper.cancelMaintain(store); + logger.info("Cancelling storage pool maintenance for {}", store); + if (_dataStoreHelper.cancelMaintain(store)) { + return _storagePoolAutomation.cancelMaintain(store); + } else { + return false; + } } @Override @@ -543,6 +480,24 @@ public boolean deleteDataStore(DataStore store) { s_logger.info("deleteDataStore: Successfully deleted access groups for storage pool '{}'", storagePool.getName()); + // Call deleteStorageVolume to delete the underlying ONTAP volume + s_logger.info("deleteDataStore: Deleting ONTAP volume for storage pool '{}'", storagePool.getName()); + Volume volume = new Volume(); + volume.setUuid(details.get(Constants.VOLUME_UUID)); + volume.setName(details.get(Constants.VOLUME_NAME)); + try { + if (volume.getUuid() == null || volume.getUuid().isEmpty() || volume.getName() == null || volume.getName().isEmpty()) { + s_logger.error("deleteDataStore: Volume UUID/Name not found in details for storage pool id: {}, cannot delete volume", storagePoolId); + throw new CloudRuntimeException("Volume UUID/Name not found in details, cannot delete ONTAP volume"); + } + storageStrategy.deleteStorageVolume(volume); + s_logger.info("deleteDataStore: Successfully deleted ONTAP volume '{}' (UUID: {}) for storage pool '{}'", + volume.getName(), volume.getUuid(), storagePool.getName()); + } catch (Exception e) { + s_logger.error("deleteDataStore: Exception while retrieving volume UUID for storage pool id: {}. Error: {}", + storagePoolId, e.getMessage(), e); + } + } catch (Exception e) { s_logger.error("deleteDataStore: Failed to delete access groups for storage pool id: {}. Error: {}", storagePoolId, e.getMessage(), e); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index bb25928438c6..7b20c946d6af 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -108,33 +108,55 @@ public boolean connect() { if (svms != null && svms.getRecords() != null && !svms.getRecords().isEmpty()) { svm = svms.getRecords().get(0); } else { - throw new CloudRuntimeException("No SVM found on the ONTAP cluster by the name" + svmName + "."); + s_logger.error("No SVM found on the ONTAP cluster by the name" + svmName + "."); + return false; } // Validations s_logger.info("Validating SVM state and protocol settings..."); if (!Objects.equals(svm.getState(), Constants.RUNNING)) { s_logger.error("SVM " + svmName + " is not in running state."); - throw new CloudRuntimeException("SVM " + svmName + " is not in running state."); + return false; } if (Objects.equals(storage.getProtocol(), Constants.NFS) && !svm.getNfsEnabled()) { s_logger.error("NFS protocol is not enabled on SVM " + svmName); - throw new CloudRuntimeException("NFS protocol is not enabled on SVM " + svmName); + return false; } else if (Objects.equals(storage.getProtocol(), Constants.ISCSI) && !svm.getIscsiEnabled()) { s_logger.error("iSCSI protocol is not enabled on SVM " + svmName); - throw new CloudRuntimeException("iSCSI protocol is not enabled on SVM " + svmName); + return false; } - + // TODO: Implement logic to select appropriate aggregate based on storage requirements List aggrs = svm.getAggregates(); if (aggrs == null || aggrs.isEmpty()) { s_logger.error("No aggregates are assigned to SVM " + svmName); - throw new CloudRuntimeException("No aggregates are assigned to SVM " + svmName); + return false; + } + // Set the aggregates which are according to the storage requirements + for (Aggregate aggr : aggrs) { + s_logger.debug("Found aggregate: " + aggr.getName() + " with UUID: " + aggr.getUuid()); + Aggregate aggrResp = aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid()); + if (!Objects.equals(aggrResp.getState(), Aggregate.StateEnum.ONLINE)) { + s_logger.warn("Aggregate " + aggr.getName() + " is not in online state. Skipping this aggregate."); + continue; + } else if (aggrResp.getSpace() == null || aggrResp.getAvailableBlockStorageSpace() == null || + aggrResp.getAvailableBlockStorageSpace() <= storage.getSize().doubleValue()) { + s_logger.warn("Aggregate " + aggr.getName() + " does not have sufficient available space. Skipping this aggregate."); + continue; + } + s_logger.info("Selected aggregate: " + aggr.getName() + " for volume operations."); + this.aggregates = List.of(aggr); + break; + } + if (this.aggregates == null || this.aggregates.isEmpty()) { + s_logger.error("No suitable aggregates found on SVM " + svmName + " for volume creation."); + return false; } this.aggregates = aggrs; s_logger.info("Successfully connected to ONTAP cluster and validated ONTAP details provided"); } catch (Exception e) { - throw new CloudRuntimeException("Failed to connect to ONTAP cluster: " + e.getMessage(), e); + s_logger.error("Failed to connect to ONTAP cluster: " + e.getMessage(), e); + return false; } return true; } @@ -472,7 +494,17 @@ public String getNetworkInterface() { * * @param cloudstackVolume the CloudStack volume to delete */ - abstract void deleteCloudStackVolume(CloudStackVolume cloudstackVolume); + abstract public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume); + + /** + * Method encapsulates the behavior based on the opted protocol in subclasses. + * it is going to mimic + * cloneLun for iSCSI, FC protocols + * cloneFile for NFS3.0 and NFS4.1 protocols + * cloneNameSpace for Nvme/TCP and Nvme/FC protocol + * @param cloudstackVolume the CloudStack volume to copy + */ + abstract public void copyCloudStackVolume(CloudStackVolume cloudstackVolume); /** * Method encapsulates the behavior based on the opted protocol in subclasses. @@ -527,18 +559,28 @@ public String getNetworkInterface() { * Method encapsulates the behavior based on the opted protocol in subclasses * lunMap for iSCSI and FC protocols * //TODO for Nvme/TCP and Nvme/FC protocols - * @param values + * @param values map including SVM name, LUN name, and igroup name + * @return map containing logical unit number for the new/existing mapping */ - abstract public void enableLogicalAccess(Map values); + abstract public Map enableLogicalAccess(Map values); /** * Method encapsulates the behavior based on the opted protocol in subclasses * lunUnmap for iSCSI and FC protocols * //TODO for Nvme/TCP and Nvme/FC protocols - * @param values + * @param values map including LUN UUID and iGroup UUID */ abstract public void disableLogicalAccess(Map values); + /** + * Method encapsulates the behavior based on the opted protocol in subclasses + * lunMap lookup for iSCSI/FC protocols (GET-only, no side-effects) + * //TODO for Nvme/TCP and Nvme/FC protocols + * @param values map with SVM name, LUN name, and igroup name + * @return map containing logical unit number if mapping exists; otherwise null + */ + abstract public Map getLogicalAccess(Map values); + private Boolean jobPollForSuccess(String jobUUID) { //Create URI for GET Job API int jobRetryCount = 0; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java index 861d22ff68d9..9657edc614fa 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java @@ -110,10 +110,15 @@ CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) { } @Override - void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { + public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { //TODO } + @Override + public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) { + + } + @Override public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap) { //TODO @@ -194,8 +199,9 @@ public AccessGroup getAccessGroup(Map values) { } @Override - public void enableLogicalAccess(Map values) { + public Map enableLogicalAccess(Map values) { //TODO + return null; } @Override @@ -203,6 +209,12 @@ public void disableLogicalAccess(Map values) { //TODO } + @Override + public Map getLogicalAccess(Map values) { + // NAS does not use LUN mapping; nothing to fetch + return null; + } + private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) { s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 47f529968deb..fd1f2bb6b84a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -27,13 +27,10 @@ import org.apache.cloudstack.storage.feign.client.SANFeignClient; import org.apache.cloudstack.storage.feign.model.Igroup; import org.apache.cloudstack.storage.feign.model.Initiator; -import org.apache.cloudstack.storage.feign.model.*; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Lun; -import org.apache.cloudstack.storage.feign.model.Igroup; import org.apache.cloudstack.storage.feign.model.LunMap; -import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.feign.model.response.OntapResponse; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; @@ -103,8 +100,60 @@ CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) { } @Override - void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { - //TODO + public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { + if (cloudstackVolume == null || cloudstackVolume.getLun() == null) { + s_logger.error("deleteCloudStackVolume: Lun deletion failed. Invalid request: {}", cloudstackVolume); + throw new CloudRuntimeException("deleteCloudStackVolume : Failed to delete Lun, invalid request"); + } + s_logger.info("deleteCloudStackVolume : Deleting Lun: {}", cloudstackVolume.getLun().getName()); + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + Map queryParams = Map.of("allow_delete_while_mapped", "true"); + try { + sanFeignClient.deleteLun(authHeader, cloudstackVolume.getLun().getUuid(), queryParams); + } catch (Exception ex) { + String errMsg = ex.getMessage(); + if (errMsg != null && (errMsg.contains("entry doesn't exist") + || errMsg.contains("does not exist") + || errMsg.contains("not found") + || errMsg.contains("status 404"))) { + s_logger.warn("deleteCloudStackVolume: Lun {} does not exist ({}), skipping deletion", cloudstackVolume.getLun().getName(), errMsg); + return; + } + throw ex; + } + s_logger.info("deleteCloudStackVolume: Lun deleted successfully. LunName: {}", cloudstackVolume.getLun().getName()); + } catch (Exception e) { + s_logger.error("Exception occurred while deleting Lun: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage()); + throw new CloudRuntimeException("Failed to delete Lun: " + e.getMessage()); + } + } + + @Override + public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) { + s_logger.debug("copyCloudStackVolume: Creating clone of the cloudstack volume: {}", cloudstackVolume.getLun().getName()); + if (cloudstackVolume == null || cloudstackVolume.getLun() == null) { + s_logger.error("copyCloudStackVolume: Lun clone creation failed. Invalid request: {}", cloudstackVolume); + throw new CloudRuntimeException("copyCloudStackVolume : Failed to create Lun clone, invalid request"); + } + + try { + // Get AuthHeader + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + // Create URI for lun clone creation + Lun lunCloneRequest = cloudstackVolume.getLun(); + Lun.Clone clone = new Lun.Clone(); + Lun.Source source = new Lun.Source(); + source.setName(cloudstackVolume.getLun().getName()); + clone.setSource(source); + lunCloneRequest.setClone(clone); + String lunCloneName = cloudstackVolume.getLun().getName() + "_clone"; + lunCloneRequest.setName(lunCloneName); + sanFeignClient.createLun(authHeader, true, lunCloneRequest); + } catch (Exception e) { + s_logger.error("Exception occurred while creating Lun clone: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage()); + throw new CloudRuntimeException("Failed to create Lun clone: " + e.getMessage()); + } } @Override @@ -117,19 +166,17 @@ public CloudStackVolume getCloudStackVolume(Map values) { } String svmName = values.get(Constants.SVM_DOT_NAME); String lunName = values.get(Constants.NAME); - if(svmName == null || lunName == null || svmName.isEmpty() || lunName.isEmpty()) { + if (svmName == null || lunName == null || svmName.isEmpty() || lunName.isEmpty()) { s_logger.error("getCloudStackVolume: get Lun failed. Invalid svm:{} or Lun name: {}", svmName, lunName); throw new CloudRuntimeException("getCloudStackVolume : Failed to get Lun, invalid request"); } try { - // Get AuthHeader String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - // get Igroup Map queryParams = Map.of(Constants.SVM_DOT_NAME, svmName, Constants.NAME, lunName); OntapResponse lunResponse = sanFeignClient.getLunResponse(authHeader, queryParams); - if (lunResponse == null || lunResponse.getRecords() == null || lunResponse.getRecords().size() == 0) { - s_logger.error("getCloudStackVolume: Failed to fetch Lun"); - throw new CloudRuntimeException("getCloudStackVolume: Failed to fetch Lun"); + if (lunResponse == null || lunResponse.getRecords() == null || lunResponse.getRecords().isEmpty()) { + s_logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found. Returning null.", lunName, svmName); + return null; } Lun lun = lunResponse.getRecords().get(0); s_logger.debug("getCloudStackVolume: Lun Details : {}", lun); @@ -139,16 +186,22 @@ public CloudStackVolume getCloudStackVolume(Map values) { cloudStackVolume.setLun(lun); return cloudStackVolume; } catch (Exception e) { - s_logger.error("Exception occurred while fetching Lun, Exception: {}", e.getMessage()); - throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage()); + String errMsg = e.getMessage(); + if (errMsg != null && errMsg.contains("not found")) { + s_logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found ({}). Returning null.", lunName, svmName, errMsg); + return null; + } + s_logger.error("Exception occurred while fetching Lun, Exception: {}", errMsg); + throw new CloudRuntimeException("Failed to fetch Lun details: " + errMsg); } } @Override public AccessGroup createAccessGroup(AccessGroup accessGroup) { s_logger.info("createAccessGroup : Create Igroup"); + String igroupName = "unknown"; s_logger.debug("createAccessGroup : Creating Igroup with access group request {} ", accessGroup); - if (accessGroup == null || accessGroup.getIgroup() == null) { + if (accessGroup == null) { s_logger.error("createAccessGroup: Igroup creation failed. Invalid request: {}", accessGroup); throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid request"); } @@ -351,38 +404,42 @@ public AccessGroup getAccessGroup(Map values) { throw new CloudRuntimeException("getAccessGroup : get Igroup Failed, invalid request"); } String svmName = values.get(Constants.SVM_DOT_NAME); - String igroupName = values.get(Constants.IGROUP_DOT_NAME); - if(svmName == null || igroupName == null || svmName.isEmpty() || igroupName.isEmpty()) { + String igroupName = values.get(Constants.NAME); + if (svmName == null || igroupName == null || svmName.isEmpty() || igroupName.isEmpty()) { s_logger.error("getAccessGroup: get Igroup failed. Invalid svm:{} or igroup name: {}", svmName, igroupName); throw new CloudRuntimeException("getAccessGroup : Failed to get Igroup, invalid request"); } try { - // Get AuthHeader String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - // get Igroup - Map queryParams = Map.of(Constants.SVM_DOT_NAME, svmName, Constants.IGROUP_DOT_NAME, igroupName); + Map queryParams = Map.of(Constants.SVM_DOT_NAME, svmName, Constants.NAME, igroupName, Constants.FIELDS, Constants.INITIATORS); OntapResponse igroupResponse = sanFeignClient.getIgroupResponse(authHeader, queryParams); - if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().size() == 0) { - s_logger.error("getAccessGroup: Failed to fetch Igroup"); - throw new CloudRuntimeException("Failed to fetch Igroup"); + if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().isEmpty()) { + s_logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}'. Returning null.", igroupName, svmName); + return null; } Igroup igroup = igroupResponse.getRecords().get(0); AccessGroup accessGroup = new AccessGroup(); accessGroup.setIgroup(igroup); return accessGroup; } catch (Exception e) { - s_logger.error("Exception occurred while fetching Igroup, Exception: {}", e.getMessage()); - throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage()); + String errMsg = e.getMessage(); + if (errMsg != null && errMsg.contains("not found")) { + s_logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}' ({}). Returning null.", igroupName, svmName, errMsg); + return null; + } + s_logger.error("Exception occurred while fetching Igroup, Exception: {}", errMsg); + throw new CloudRuntimeException("Failed to fetch Igroup details: " + errMsg); } } - public void enableLogicalAccess(Map values) { + public Map enableLogicalAccess(Map values) { s_logger.info("enableLogicalAccess : Create LunMap"); s_logger.debug("enableLogicalAccess : Creating LunMap with values {} ", values); + Map response = null; String svmName = values.get(Constants.SVM_DOT_NAME); String lunName = values.get(Constants.LUN_DOT_NAME); String igroupName = values.get(Constants.IGROUP_DOT_NAME); - if(svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) { + if (svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) { s_logger.error("enableLogicalAccess: LunMap creation failed. Invalid request values: {}", values); throw new CloudRuntimeException("enableLogicalAccess : Failed to create LunMap, invalid request"); } @@ -402,18 +459,41 @@ public void enableLogicalAccess(Map values) { Igroup igroup = new Igroup(); igroup.setName(igroupName); lunMapRequest.setIgroup(igroup); - OntapResponse createdLunMap = sanFeignClient.createLunMap(authHeader, true, lunMapRequest); - if (createdLunMap == null || createdLunMap.getRecords() == null || createdLunMap.getRecords().size() == 0) { - s_logger.error("enableLogicalAccess: LunMap failed for Lun: {} and igroup: {}", lunName, igroupName); - throw new CloudRuntimeException("Failed to perform LunMap for Lun: " + lunName + " and igroup: " + igroupName); + try { + sanFeignClient.createLunMap(authHeader, true, lunMapRequest); + } catch (Exception feignEx) { + String errMsg = feignEx.getMessage(); + if (errMsg != null && errMsg.contains(("LUN already mapped to this group"))) { + s_logger.warn("enableLogicalAccess: LunMap for Lun: {} and igroup: {} already exists.", lunName, igroupName); + } else { + s_logger.error("enableLogicalAccess: Exception during Feign call: {}", feignEx.getMessage(), feignEx); + throw feignEx; + } + } + // Get the LunMap details + OntapResponse lunMapResponse = null; + try { + lunMapResponse = sanFeignClient.getLunMapResponse(authHeader, + Map.of( + Constants.SVM_DOT_NAME, svmName, + Constants.LUN_DOT_NAME, lunName, + Constants.IGROUP_DOT_NAME, igroupName, + Constants.FIELDS, Constants.LOGICAL_UNIT_NUMBER + )); + response = Map.of( + Constants.LOGICAL_UNIT_NUMBER, lunMapResponse.getRecords().get(0).getLogicalUnitNumber().toString() + ); + } catch (Exception e) { + s_logger.error("enableLogicalAccess: Failed to fetch LunMap details for Lun: {} and igroup: {}, Exception: {}", lunName, igroupName, e); + throw new CloudRuntimeException("Failed to fetch LunMap details for Lun: " + lunName + " and igroup: " + igroupName); } - LunMap lunMap = createdLunMap.getRecords().get(0); - s_logger.debug("enableLogicalAccess: LunMap created successfully, LunMap: {}", lunMap); + s_logger.debug("enableLogicalAccess: LunMap created successfully, LunMap: {}", lunMapResponse.getRecords().get(0)); s_logger.info("enableLogicalAccess: LunMap created successfully."); } catch (Exception e) { - s_logger.error("Exception occurred while creating LunMap, Exception: {}", e); + s_logger.error("Exception occurred while creating LunMap", e); throw new CloudRuntimeException("Failed to create LunMap: " + e.getMessage()); } + return response; } public void disableLogicalAccess(Map values) { @@ -421,19 +501,53 @@ public void disableLogicalAccess(Map values) { s_logger.debug("disableLogicalAccess : Deleting LunMap with values {} ", values); String lunUUID = values.get(Constants.LUN_DOT_UUID); String igroupUUID = values.get(Constants.IGROUP_DOT_UUID); - if(lunUUID == null || igroupUUID == null || lunUUID.isEmpty() || igroupUUID.isEmpty()) { + if (lunUUID == null || igroupUUID == null || lunUUID.isEmpty() || igroupUUID.isEmpty()) { s_logger.error("disableLogicalAccess: LunMap deletion failed. Invalid request values: {}", values); throw new CloudRuntimeException("disableLogicalAccess : Failed to delete LunMap, invalid request"); } try { - // Get AuthHeader String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); - // LunMap delete sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID); s_logger.info("disableLogicalAccess: LunMap deleted successfully."); } catch (Exception e) { - s_logger.error("Exception occurred while deleting LunMap, Exception: {}", e); - throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); + String errMsg = e.getMessage(); + if (errMsg != null && errMsg.contains("not found")) { + s_logger.warn("disableLogicalAccess: LunMap with Lun UUID: {} and igroup UUID: {} does not exist ({}), skipping deletion", lunUUID, igroupUUID, errMsg); + return; + } + s_logger.error("Exception occurred while deleting LunMap", e); + throw new CloudRuntimeException("Failed to delete LunMap: " + errMsg); + } + } + + // GET-only helper: fetch LUN-map and return logical unit number if it exists; otherwise return null + public Map getLogicalAccess(Map values) { + s_logger.info("getLogicalAccess : Fetch LunMap"); + s_logger.debug("getLogicalAccess : Fetching LunMap with values {} ", values); + String svmName = values.get(Constants.SVM_DOT_NAME); + String lunName = values.get(Constants.LUN_DOT_NAME); + String igroupName = values.get(Constants.IGROUP_DOT_NAME); + if (svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) { + s_logger.error("getLogicalAccess: Invalid request values: {}", values); + throw new CloudRuntimeException("getLogicalAccess : Invalid request"); } + try { + String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); + OntapResponse lunMapResponse = sanFeignClient.getLunMapResponse(authHeader, + Map.of( + Constants.SVM_DOT_NAME, svmName, + Constants.LUN_DOT_NAME, lunName, + Constants.IGROUP_DOT_NAME, igroupName, + Constants.FIELDS, Constants.LOGICAL_UNIT_NUMBER + )); + if (lunMapResponse != null && lunMapResponse.getRecords() != null && !lunMapResponse.getRecords().isEmpty()) { + String lunNumber = lunMapResponse.getRecords().get(0).getLogicalUnitNumber() != null ? + lunMapResponse.getRecords().get(0).getLogicalUnitNumber().toString() : null; + return lunNumber != null ? Map.of(Constants.LOGICAL_UNIT_NUMBER, lunNumber) : null; + } + } catch (Exception e) { + s_logger.warn("getLogicalAccess: LunMap not found for Lun: {} and igroup: {} ({}).", lunName, igroupName, e.getMessage()); + } + return null; } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java index e71a26577fa9..43f8511967e7 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java @@ -53,9 +53,9 @@ public class Constants { // Query params public static final String NAME = "name"; public static final String FIELDS = "fields"; + public static final String INITIATORS = "initiators"; public static final String AGGREGATES = "aggregates"; public static final String STATE = "state"; - public static final String SVMDOTNAME = "svm.name"; public static final String DATA_NFS = "data_nfs"; public static final String DATA_ISCSI = "data_iscsi"; public static final String IP_ADDRESS = "ip.address"; @@ -81,6 +81,7 @@ public class Constants { public static final String LUN_DOT_NAME = "lun.name"; public static final String IQN = "iqn"; public static final String LUN_DOT_UUID = "lun.uuid"; + public static final String LOGICAL_UNIT_NUMBER = "logical_unit_number"; public static final String IGROUP_DOT_NAME = "igroup.name"; public static final String IGROUP_DOT_UUID = "igroup.uuid"; public static final String UNDERSCORE = "_"; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index 9532bf36fd9a..9d5eac8b2cea 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -19,19 +19,19 @@ package org.apache.cloudstack.storage.utils; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.storage.ScopeType; -import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.feign.model.LunSpace; import org.apache.cloudstack.storage.feign.model.OntapStorage; import org.apache.cloudstack.storage.feign.model.Svm; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; -import org.apache.cloudstack.storage.feign.model.Igroup; -import org.apache.cloudstack.storage.feign.model.Initiator; -import org.apache.cloudstack.storage.provider.StorageProviderFactory; -import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -70,31 +70,27 @@ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePo cloudStackVolumeRequest.setVolumeInfo(volumeObject); break; case ISCSI: - cloudStackVolumeRequest = new CloudStackVolume(); - Lun lunRequest = new Lun(); Svm svm = new Svm(); svm.setName(details.get(Constants.SVM_NAME)); + cloudStackVolumeRequest = new CloudStackVolume(); + Lun lunRequest = new Lun(); lunRequest.setSvm(svm); LunSpace lunSpace = new LunSpace(); lunSpace.setSize(volumeObject.getSize()); lunRequest.setSpace(lunSpace); //Lun name is full path like in unified "/vol/VolumeName/LunName" - String lunFullName = Constants.VOLUME_PATH_PREFIX + storagePool.getName() + Constants.SLASH + volumeObject.getName(); + String lunName = volumeObject.getName().replace(Constants.HYPHEN, Constants.UNDERSCORE); + if(!isValidName(lunName)) { + String errMsg = "createAsync: Invalid dataObject name [" + lunName + "]. It must start with a letter and can only contain letters, digits, and underscores, and be up to 200 characters long."; + throw new InvalidParameterValueException(errMsg); + } + String lunFullName = getLunName(storagePool.getName(), lunName); lunRequest.setName(lunFullName); - String hypervisorType = storagePool.getHypervisor().name(); - String osType = null; - switch (hypervisorType) { - case Constants.KVM: - osType = Lun.OsTypeEnum.LINUX.getValue(); - break; - default: - String errMsg = "createCloudStackVolume : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage"; - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); - } + String osType = getOSTypeFromHypervisor(storagePool.getHypervisor().name()); lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType)); + cloudStackVolumeRequest.setLun(lunRequest); break; default: @@ -104,6 +100,15 @@ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePo return cloudStackVolumeRequest; } + public static boolean isValidName(String name) { + // Check for null and length constraint first + if (name == null || name.length() > 200) { + return false; + } + // Regex: Starts with a letter, followed by letters, digits, or underscores + return name.matches(Constants.ONTAP_NAME_REGEX); + } + public static String getOSTypeFromHypervisor(String hypervisorType){ switch (hypervisorType) { case Constants.KVM: @@ -137,7 +142,7 @@ public static StorageStrategy getStrategyByStoragePoolDetails(Map Date: Thu, 22 Jan 2026 13:24:08 +0530 Subject: [PATCH 024/271] CSTACKEX-46: Fixed a check style issue which got introduced during rebase and renamed plugin name --- .../cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java | 3 --- .../storage/provider/OntapPrimaryDatastoreProvider.java | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 04a2300b5578..98c34ef94b72 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -31,9 +31,6 @@ import com.cloud.storage.ScopeType; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; -import com.cloud.storage.dao.VMTemplateDao; -//import com.cloud.storage.VMTemplateStoragePoolVO; -import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java index 91bfd0a8584c..1aadec79b3ab 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java @@ -65,7 +65,7 @@ public HypervisorHostListener getHostListener() { @Override public String getName() { s_logger.trace("OntapPrimaryDatastoreProvider: getName: Called"); - return "ONTAP Primary Datastore Provider"; + return "ONTAP"; } @Override From 3eea5c442e3f4416562131975c0dd9b7cd614614 Mon Sep 17 00:00:00 2001 From: "Locharla, Sandeep" Date: Thu, 22 Jan 2026 15:04:51 +0530 Subject: [PATCH 025/271] CSTACKEX-46: Fixed a check style issue which got introduced during rebase in agent code --- .../com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java index 155e97b90558..433e173fbbf9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java @@ -37,7 +37,6 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.OutputInterpreter; -import com.cloud.utils.script.OutputInterpreter.AllLinesParser; import com.cloud.utils.script.Script; public class IscsiAdmStorageAdaptor implements StorageAdaptor { From 63addcd3e7046f5b99b3564f2d3b233128be1814 Mon Sep 17 00:00:00 2001 From: "Locharla, Sandeep" Date: Tue, 27 Jan 2026 12:45:14 +0530 Subject: [PATCH 026/271] CSTACKEX-46: Fixed a couple of issues observed while testing NFS3 storagePool create and delete --- .../OntapPrimaryDatastoreLifecycle.java | 28 +++++++++++++------ .../storage/service/StorageStrategy.java | 15 ++++++++-- 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index 6bbd88db14b0..eabd6482572c 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -415,6 +415,20 @@ private boolean validateProtocolSupportAndFetchHostsIdentifier(List host hostIdentifiers.add(host.getStorageUrl()); } break; + case NFS3: + String ip = ""; + for (HostVO host : hosts) { + if (host != null) { + ip = host.getStorageIpAddress() != null ? host.getStorageIpAddress().trim() : ""; + if (ip.isEmpty() && host.getPrivateIpAddress() != null || host.getPrivateIpAddress().trim().isEmpty()) { + return false; + } else { + ip = ip.isEmpty() ? host.getPrivateIpAddress().trim() : ip; + } + } + hostIdentifiers.add(ip); + } + break; default: throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name()); } @@ -471,15 +485,6 @@ public boolean deleteDataStore(DataStore store) { PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) store; primaryDataStoreInfo.setDetails(details); - // Create AccessGroup object with PrimaryDataStoreInfo - AccessGroup accessGroup = new AccessGroup(); - accessGroup.setPrimaryDataStoreInfo(primaryDataStoreInfo); - - // Call deleteAccessGroup - it will figure out scope, protocol, and all details internally - storageStrategy.deleteAccessGroup(accessGroup); - - s_logger.info("deleteDataStore: Successfully deleted access groups for storage pool '{}'", storagePool.getName()); - // Call deleteStorageVolume to delete the underlying ONTAP volume s_logger.info("deleteDataStore: Deleting ONTAP volume for storage pool '{}'", storagePool.getName()); Volume volume = new Volume(); @@ -497,6 +502,11 @@ public boolean deleteDataStore(DataStore store) { s_logger.error("deleteDataStore: Exception while retrieving volume UUID for storage pool id: {}. Error: {}", storagePoolId, e.getMessage(), e); } + AccessGroup accessGroup = new AccessGroup(); + accessGroup.setPrimaryDataStoreInfo(primaryDataStoreInfo); + // Delete access groups associated with this storage pool + storageStrategy.deleteAccessGroup(accessGroup); + s_logger.info("deleteDataStore: Successfully deleted access groups for storage pool '{}'", storagePool.getName()); } catch (Exception e) { s_logger.error("deleteDataStore: Failed to delete access groups for storage pool id: {}. Error: {}", diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 7b20c946d6af..89ad712e9665 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -447,8 +447,19 @@ public String getNetworkInterface() { OntapResponse response = networkFeignClient.getNetworkIpInterfaces(authHeader, queryParams); if (response != null && response.getRecords() != null && !response.getRecords().isEmpty()) { - // For simplicity, return the first interface's name - IpInterface ipInterface = response.getRecords().get(0); + IpInterface ipInterface = null; + // For simplicity, return the first interface's name (Of IPv4 type for NFS3) + if (storage.getProtocol() == ProtocolType.ISCSI) { + ipInterface = response.getRecords().get(0); + } else if (storage.getProtocol() == ProtocolType.NFS3) { + for (IpInterface iface : response.getRecords()) { + if (iface.getIp().getAddress().contains(".")) { + ipInterface = iface; + break; + } + } + } + s_logger.info("Retrieved network interface: " + ipInterface.getIp().getAddress()); return ipInterface.getIp().getAddress(); } else { From dfc1ee97bf85dedf06c4005f0fa3854d26ef2d88 Mon Sep 17 00:00:00 2001 From: "Locharla, Sandeep" Date: Wed, 28 Jan 2026 20:27:55 +0530 Subject: [PATCH 027/271] CSTACKEX-46: Refactored driver class --- .../driver/OntapPrimaryDatastoreDriver.java | 460 ++++++++++-------- .../storage/service/SANStrategy.java | 21 + .../storage/service/StorageStrategy.java | 11 +- .../storage/service/UnifiedSANStrategy.java | 63 +++ 4 files changed, 359 insertions(+), 196 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index 98c34ef94b72..ea3b8a89673a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -50,10 +50,10 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.feign.model.Igroup; -import org.apache.cloudstack.storage.feign.model.Initiator; import org.apache.cloudstack.storage.feign.model.Lun; +import org.apache.cloudstack.storage.service.SANStrategy; import org.apache.cloudstack.storage.service.StorageStrategy; +import org.apache.cloudstack.storage.service.UnifiedSANStrategy; import org.apache.cloudstack.storage.service.model.AccessGroup; import org.apache.cloudstack.storage.service.model.CloudStackVolume; import org.apache.cloudstack.storage.service.model.ProtocolType; @@ -67,6 +67,19 @@ import java.util.HashMap; import java.util.Map; +/** + * Primary datastore driver for NetApp ONTAP storage systems. + * This driver handles volume lifecycle operations (create, delete, grant/revoke access) + * for both iSCSI (LUN-based) and NFS protocols against ONTAP storage backends. + * + * For iSCSI protocol: + * - Creates LUNs on ONTAP and maps them to initiator groups (igroups) + * - Manages LUN mappings for host access control + * + * For NFS protocol: + * - Delegates file operations to KVM host/libvirt + * - ONTAP volume/export management handled at storage pool creation time + */ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreDriver.class); @@ -84,7 +97,6 @@ public Map getCapabilities() { // TODO Set it to false once we start supporting snapshot feature mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString()); - return mapCapabilities; } @@ -94,12 +106,31 @@ public DataTO getTO(DataObject data) { } @Override - public DataStoreTO getStoreTO(DataStore store) { return null; } + public DataStoreTO getStoreTO(DataStore store) { + return null; + } + /** + * Asynchronously creates a volume on the ONTAP storage system. + * + * For iSCSI protocol: + * - Creates a LUN on ONTAP via the SAN strategy + * - Stores LUN UUID and name in volume_details table for later reference + * - Creates a LUN mapping to the appropriate igroup (based on cluster/zone scope) + * - Sets the iSCSI path on the volume for host attachment + * + * For NFS protocol: + * - Associates the volume with the storage pool (actual file creation handled by hypervisor) + * + * @param dataStore The target data store (storage pool) + * @param dataObject The volume to create + * @param callback Callback to notify completion + */ @Override public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { CreateCmdResult createCmdResult = null; - String errMsg = null; + String errMsg; + if (dataStore == null) { throw new InvalidParameterValueException("createAsync: dataStore should not be null"); } @@ -109,54 +140,68 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet if (callback == null) { throw new InvalidParameterValueException("createAsync: callback should not be null"); } + try { - s_logger.info("createAsync: Started for data store name [{}] and data object name [{}] of type [{}]", dataStore.getName(), dataObject.getName(), dataObject.getType()); + s_logger.info("createAsync: Started for data store name [{}] and data object name [{}] of type [{}]", + dataStore.getName(), dataObject.getName(), dataObject.getType()); + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if (storagePool == null) { - s_logger.error("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); - throw new CloudRuntimeException("createCloudStackVolume : Storage Pool not found for id: " + dataStore.getId()); + s_logger.error("createAsync: Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("createAsync: Storage Pool not found for id: " + dataStore.getId()); } + Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); if (dataObject.getType() == DataObjectType.VOLUME) { VolumeInfo volInfo = (VolumeInfo) dataObject; - // Create LUN/backing for volume and record relevant details - CloudStackVolume created = createCloudStackVolume(dataStore, volInfo); - // Immediately ensure LUN-map exists and update VolumeVO path - if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - String svmName = details.get(Constants.SVM_NAME); - String lunName = volumeDetailsDao.findDetail(volInfo.getId(), Constants.LUN_DOT_NAME) != null ? - volumeDetailsDao.findDetail(volInfo.getId(), Constants.LUN_DOT_NAME).getValue() : null; - if (lunName == null) { - // Fallback from returned LUN - lunName = created != null && created.getLun() != null ? created.getLun().getName() : null; - } - if (lunName == null) { - throw new CloudRuntimeException("createAsync: Missing LUN name for volume " + volInfo.getId()); - } - long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? storagePool.getClusterId() : storagePool.getDataCenterId(); - String lunNumber = ensureLunMapped(storagePool, svmName, lunName, scopeId); + // Create the backend storage object (LUN for iSCSI, no-op for NFS) + CloudStackVolume created = createCloudStackVolume(dataStore, volInfo, details); - VolumeVO volumeVO = volumeDao.findById(volInfo.getId()); - if (volumeVO != null) { + // Update CloudStack volume record with storage pool association and protocol-specific details + VolumeVO volumeVO = volumeDao.findById(volInfo.getId()); + if (volumeVO != null) { + volumeVO.setPoolType(storagePool.getPoolType()); + volumeVO.setPoolId(storagePool.getId()); + + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + String svmName = details.get(Constants.SVM_NAME); + String lunName = created != null && created.getLun() != null ? created.getLun().getName() : null; + if (lunName == null) { + throw new CloudRuntimeException("createAsync: Missing LUN name for volume " + volInfo.getId()); + } + + // Determine scope ID based on storage pool scope (cluster or zone level igroup) + long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) + ? storagePool.getClusterId() + : storagePool.getDataCenterId(); + + // Persist LUN details for future operations (delete, grant/revoke access) + volumeDetailsDao.addDetail(volInfo.getId(), Constants.LUN_DOT_UUID, created.getLun().getUuid(), false); + volumeDetailsDao.addDetail(volInfo.getId(), Constants.LUN_DOT_NAME, lunName, false); + if (created.getLun().getUuid() != null) { + volumeVO.setFolder(created.getLun().getUuid()); + } + + // Create LUN-to-igroup mapping and retrieve the assigned LUN ID + UnifiedSANStrategy sanStrategy = (UnifiedSANStrategy) Utility.getStrategyByStoragePoolDetails(details); + String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + String lunNumber = sanStrategy.ensureLunMapped(svmName, lunName, accessGroupName); + + // Construct iSCSI path: // format for KVM/libvirt attachment String iscsiPath = Constants.SLASH + storagePool.getPath() + Constants.SLASH + lunNumber; volumeVO.set_iScsiName(iscsiPath); volumeVO.setPath(iscsiPath); - volumeVO.setPoolType(storagePool.getPoolType()); - volumeVO.setPoolId(storagePool.getId()); - volumeDao.update(volumeVO.getId(), volumeVO); s_logger.info("createAsync: Volume [{}] iSCSI path set to {}", volumeVO.getId(), iscsiPath); + + } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + // For NFS, the hypervisor handles file creation; we only track pool association + s_logger.info("createAsync: Managed NFS volume [{}] associated with pool {}", + volumeVO.getId(), storagePool.getId()); } - } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - // Ensure pool fields are recorded for managed NFS as well - VolumeVO volumeVO = volumeDao.findById(volInfo.getId()); - if (volumeVO != null) { - volumeVO.setPoolType(storagePool.getPoolType()); - volumeVO.setPoolId(storagePool.getId()); - volumeDao.update(volumeVO.getId(), volumeVO); - s_logger.info("createAsync: Managed NFS volume [{}] associated with pool {}", volumeVO.getId(), storagePool.getId()); - } + + volumeDao.update(volumeVO.getId(), volumeVO); } createCmdResult = new CreateCmdResult(null, new Answer(null, true, null)); } else { @@ -177,82 +222,46 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet } } - private CloudStackVolume createCloudStackVolume(DataStore dataStore, DataObject dataObject) { + /** + * Creates a CloudStack volume on the ONTAP backend using the appropriate storage strategy. + * + * @param dataStore The target data store + * @param dataObject The volume to create + * @param details Storage pool configuration details + * @return CloudStackVolume containing the created backend object (LUN for iSCSI) + */ + private CloudStackVolume createCloudStackVolume(DataStore dataStore, DataObject dataObject, Map details) { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if (storagePool == null) { s_logger.error("createCloudStackVolume: Storage Pool not found for id: {}", dataStore.getId()); throw new CloudRuntimeException("createCloudStackVolume: Storage Pool not found for id: " + dataStore.getId()); } - Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); + StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); if (dataObject.getType() == DataObjectType.VOLUME) { VolumeInfo volumeObject = (VolumeInfo) dataObject; - CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject); - CloudStackVolume cloudStackVolume = storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); - if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL)) && cloudStackVolume.getLun() != null && cloudStackVolume.getLun().getName() != null) { - s_logger.info("createCloudStackVolume: iSCSI LUN object created for volume [{}]", volumeObject.getId()); - volumeDetailsDao.addDetail(volumeObject.getId(), Constants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid(), false); - volumeDetailsDao.addDetail(volumeObject.getId(), Constants.LUN_DOT_NAME, cloudStackVolume.getLun().getName(), false); - VolumeVO volumeVO = volumeDao.findById(volumeObject.getId()); - if (volumeVO != null) { - volumeVO.setPath(null); - if (cloudStackVolume.getLun().getUuid() != null) { - volumeVO.setFolder(cloudStackVolume.getLun().getUuid()); - } - volumeVO.setPoolType(storagePool.getPoolType()); - volumeVO.setPoolId(storagePool.getId()); - volumeDao.update(volumeVO.getId(), volumeVO); - } - } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - s_logger.info("createCloudStackVolume: Managed NFS object created for volume [{}]", volumeObject.getId()); - // For Managed NFS, set pool fields on Volume - VolumeVO volumeVO = volumeDao.findById(volumeObject.getId()); - if (volumeVO != null) { - volumeVO.setPoolType(storagePool.getPoolType()); - volumeVO.setPoolId(storagePool.getId()); - volumeDao.update(volumeVO.getId(), volumeVO); - } - } else { - String errMsg = "createCloudStackVolume: Volume creation failed for dataObject: " + volumeObject; - s_logger.error(errMsg); - throw new CloudRuntimeException(errMsg); - } - return cloudStackVolume; + return storageStrategy.createCloudStackVolume(cloudStackVolumeRequest); } else { throw new CloudRuntimeException("createCloudStackVolume: Unsupported DataObjectType: " + dataObject.getType()); } } - private String ensureLunMapped(StoragePoolVO storagePool, String svmName, String lunName, long scopeId) { - Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); - StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); - String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); - - // Check existing map first. getLogicalAccess returns null (no exception) when map doesn't exist. - Map getMap = new HashMap<>(); - getMap.put(Constants.LUN_DOT_NAME, lunName); - getMap.put(Constants.SVM_DOT_NAME, svmName); - getMap.put(Constants.IGROUP_DOT_NAME, accessGroupName); - Map mapResp = storageStrategy.getLogicalAccess(getMap); - if (mapResp != null && mapResp.containsKey(Constants.LOGICAL_UNIT_NUMBER)) { - String lunNumber = mapResp.get(Constants.LOGICAL_UNIT_NUMBER); - s_logger.info("ensureLunMapped: Existing LunMap found for LUN [{}] in igroup [{}] with LUN number [{}]", lunName, accessGroupName, lunNumber); - return lunNumber; - } - // Create if not exists - Map enableMap = new HashMap<>(); - enableMap.put(Constants.LUN_DOT_NAME, lunName); - enableMap.put(Constants.SVM_DOT_NAME, svmName); - enableMap.put(Constants.IGROUP_DOT_NAME, accessGroupName); - Map response = storageStrategy.enableLogicalAccess(enableMap); - if (response == null || !response.containsKey(Constants.LOGICAL_UNIT_NUMBER)) { - throw new CloudRuntimeException("ensureLunMapped: Failed to map LUN [" + lunName + "] to iGroup [" + accessGroupName + "]"); - } - return response.get(Constants.LOGICAL_UNIT_NUMBER); - } - + /** + * Asynchronously deletes a volume from the ONTAP storage system. + * + * For iSCSI protocol: + * - Retrieves LUN details from volume_details table + * - Deletes the LUN from ONTAP (LUN mappings are automatically removed) + * + * For NFS protocol: + * - No ONTAP operation needed; file deletion handled by KVM host/libvirt + * + * @param store The data store containing the volume + * @param data The volume to delete + * @param callback Callback to notify completion + */ @Override public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) { CommandResult commandResult = new CommandResult(); @@ -260,37 +269,45 @@ public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallbac if (store == null || data == null) { throw new CloudRuntimeException("deleteAsync: store or data is null"); } + if (data.getType() == DataObjectType.VOLUME) { StoragePoolVO storagePool = storagePoolDao.findById(store.getId()); - if(storagePool == null) { - s_logger.error("deleteAsync : Storage Pool not found for id: " + store.getId()); - throw new CloudRuntimeException("deleteAsync : Storage Pool not found for id: " + store.getId()); + if (storagePool == null) { + s_logger.error("deleteAsync: Storage Pool not found for id: " + store.getId()); + throw new CloudRuntimeException("deleteAsync: Storage Pool not found for id: " + store.getId()); } + Map details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId()); + if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - // ManagedNFS qcow2 backing file deletion handled by KVM host/libvirt; nothing to do via ONTAP REST. - s_logger.info("deleteAsync: ManagedNFS volume {} no-op ONTAP deletion", data.getId()); + // NFS file deletion is handled by the hypervisor; no ONTAP REST call needed + s_logger.info("deleteAsync: ManagedNFS volume {} - file deletion handled by hypervisor", data.getId()); + } else if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); VolumeInfo volumeObject = (VolumeInfo) data; - s_logger.info("deleteAsync: Deleting volume & LUN for volume id [{}]", volumeObject.getId()); + s_logger.info("deleteAsync: Deleting LUN for volume id [{}]", volumeObject.getId()); + + // Retrieve LUN identifiers stored during volume creation String lunName = volumeDetailsDao.findDetail(volumeObject.getId(), Constants.LUN_DOT_NAME).getValue(); String lunUUID = volumeDetailsDao.findDetail(volumeObject.getId(), Constants.LUN_DOT_UUID).getValue(); if (lunName == null) { throw new CloudRuntimeException("deleteAsync: Missing LUN name for volume " + volumeObject.getId()); } + CloudStackVolume delRequest = new CloudStackVolume(); Lun lun = new Lun(); lun.setName(lunName); lun.setUuid(lunUUID); delRequest.setLun(lun); storageStrategy.deleteCloudStackVolume(delRequest); - // Set the result + commandResult.setResult(null); commandResult.setSuccess(true); - s_logger.info("deleteAsync: Volume LUN [{}] deleted successfully", lunName); + s_logger.info("deleteAsync: LUN [{}] deleted successfully", lunName); + } else { - throw new CloudRuntimeException("deleteAsync: Unsupported protocol for deletion: " + details.get(Constants.PROTOCOL)); + throw new CloudRuntimeException("deleteAsync: Unsupported protocol: " + details.get(Constants.PROTOCOL)); } } } catch (Exception e) { @@ -326,23 +343,42 @@ public ChapInfo getChapInfo(DataObject dataObject) { return null; } + /** + * Grants a host access to a volume on the ONTAP storage system. + * + * For iSCSI protocol: + * - Validates that the host's iSCSI initiator (IQN) is present in the target igroup + * - Ensures the LUN is mapped to the igroup (creates mapping if not exists) + * - Updates the volume's iSCSI path with the assigned LUN ID + * + * For NFS protocol: + * - No explicit grant needed; NFS exports are configured at storage pool level + * + * @param dataObject The volume to grant access to + * @param host The host requesting access + * @param dataStore The data store containing the volume + * @return true if access was granted successfully + */ @Override public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { - if (dataStore == null) { - throw new InvalidParameterValueException("grantAccess: dataStore should not be null"); - } - if (dataObject == null) { - throw new InvalidParameterValueException("grantAccess: dataObject should not be null"); - } - if (host == null) { - throw new InvalidParameterValueException("grantAccess: host should not be null"); - } try { + if (dataStore == null) { + throw new InvalidParameterValueException("grantAccess: dataStore should not be null"); + } + if (dataObject == null) { + throw new InvalidParameterValueException("grantAccess: dataObject should not be null"); + } + if (host == null) { + throw new InvalidParameterValueException("grantAccess: host should not be null"); + } + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if (storagePool == null) { s_logger.error("grantAccess: Storage Pool not found for id: " + dataStore.getId()); - throw new CloudRuntimeException("grantAccess : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("grantAccess: Storage Pool not found for id: " + dataStore.getId()); } + + // ONTAP managed storage only supports cluster and zone scoped pools if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { s_logger.error("grantAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName()); throw new CloudRuntimeException("grantAccess: Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName()); @@ -351,24 +387,36 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore if (dataObject.getType() == DataObjectType.VOLUME) { VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); if (volumeVO == null) { - s_logger.error("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); - throw new CloudRuntimeException("grantAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); + s_logger.error("grantAccess: CloudStack Volume not found for id: " + dataObject.getId()); + throw new CloudRuntimeException("grantAccess: CloudStack Volume not found for id: " + dataObject.getId()); } + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); String svmName = details.get(Constants.SVM_NAME); String cloudStackVolumeName = volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME).getValue(); long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); - // Validate initiator membership - validateHostInitiatorInIgroup(storagePool, svmName, scopeId, host); - // Ensure mapping exists - String lunNumber = ensureLunMapped(storagePool, svmName, cloudStackVolumeName, scopeId); - // Update Volume path if missing or changed - String iscsiPath = Constants.SLASH + storagePool.getPath() + Constants.SLASH + lunNumber; - if (volumeVO.getPath() == null || !volumeVO.getPath().equals(iscsiPath)) { - volumeVO.set_iScsiName(iscsiPath); - volumeVO.setPath(iscsiPath); + + if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { + UnifiedSANStrategy sanStrategy = (UnifiedSANStrategy) Utility.getStrategyByStoragePoolDetails(details); + String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + + // Verify host initiator is registered in the igroup before allowing access + if (!sanStrategy.validateInitiatorInAccessGroup(host.getStorageUrl(), svmName, accessGroupName)) { + throw new CloudRuntimeException("grantAccess: Host initiator [" + host.getStorageUrl() + + "] is not present in iGroup [" + accessGroupName + "]"); + } + + // Create or retrieve existing LUN mapping + String lunNumber = sanStrategy.ensureLunMapped(svmName, cloudStackVolumeName, accessGroupName); + + // Update volume path if changed (e.g., after migration or re-mapping) + String iscsiPath = Constants.SLASH + storagePool.getPath() + Constants.SLASH + lunNumber; + if (volumeVO.getPath() == null || !volumeVO.getPath().equals(iscsiPath)) { + volumeVO.set_iScsiName(iscsiPath); + volumeVO.setPath(iscsiPath); + } } - // Ensure pool fields are set (align with SolidFire) + volumeVO.setPoolType(storagePool.getPoolType()); volumeVO.setPoolId(storagePool.getId()); volumeDao.update(volumeVO.getId(), volumeVO); @@ -376,66 +424,62 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore s_logger.error("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess"); throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess"); } - } catch(Exception e){ + return true; + } catch (Exception e) { s_logger.error("grantAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage()); - throw new CloudRuntimeException("grantAccess: Failed with error :" + e.getMessage()); - } - return true; - } - - private void validateHostInitiatorInIgroup(StoragePoolVO storagePool, String svmName, long scopeId, Host host) { - Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); - StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); - String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); - AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); - if (host == null || host.getStorageUrl() == null) { - throw new CloudRuntimeException("validateHostInitiatorInIgroup: host/initiator required but not provided"); - } - if (!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { - s_logger.error("validateHostInitiatorInIgroup: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); - throw new CloudRuntimeException("validateHostInitiatorInIgroup: initiator [" + host.getStorageUrl() + "] is not present in iGroup [" + accessGroupName + "]"); - } - } - - private boolean hostInitiatorFoundInIgroup(String hostInitiator, Igroup igroup) { - if(igroup != null && igroup.getInitiators() != null && hostInitiator != null && !hostInitiator.isEmpty()) { - for(Initiator initiator : igroup.getInitiators()) { - if(initiator.getName().equalsIgnoreCase(hostInitiator)) { - return true; - } - } + throw new CloudRuntimeException("grantAccess: Failed with error: " + e.getMessage(), e); } - return false; } + /** + * Revokes a host's access to a volume on the ONTAP storage system. + * + * For iSCSI protocol: + * - Validates the volume is not attached to an active VM + * - Removes the LUN mapping from the igroup + * + * For NFS protocol: + * - No explicit revoke needed; NFS exports remain at storage pool level + * + * @param dataObject The volume to revoke access from + * @param host The host losing access + * @param dataStore The data store containing the volume + */ @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { - if (dataStore == null) { - throw new InvalidParameterValueException("revokeAccess: data store should not be null"); - } - if (dataObject == null) { - throw new InvalidParameterValueException("revokeAccess: data object should not be null"); - } - if (host == null) { - throw new InvalidParameterValueException("revokeAccess: host should not be null"); - } - if (dataObject.getType() == DataObjectType.VOLUME) { - Volume volume = volumeDao.findById(dataObject.getId()); - if (volume.getInstanceId() != null) { - VirtualMachine vm = vmDao.findById(volume.getInstanceId()); - if (vm != null && !Arrays.asList(VirtualMachine.State.Destroyed, VirtualMachine.State.Expunging, VirtualMachine.State.Error).contains(vm.getState())) { - s_logger.debug("revokeAccess: Volume [{}] is still attached to VM [{}] in state [{}], skipping revokeAccess", - dataObject.getId(), vm.getInstanceName(), vm.getState()); - return; + try { + if (dataStore == null) { + throw new InvalidParameterValueException("revokeAccess: dataStore should not be null"); + } + if (dataObject == null) { + throw new InvalidParameterValueException("revokeAccess: dataObject should not be null"); + } + if (host == null) { + throw new InvalidParameterValueException("revokeAccess: host should not be null"); + } + + // Safety check: don't revoke access if volume is still attached to an active VM + if (dataObject.getType() == DataObjectType.VOLUME) { + Volume volume = volumeDao.findById(dataObject.getId()); + if (volume.getInstanceId() != null) { + VirtualMachine vm = vmDao.findById(volume.getInstanceId()); + if (vm != null && !Arrays.asList( + VirtualMachine.State.Destroyed, + VirtualMachine.State.Expunging, + VirtualMachine.State.Error).contains(vm.getState())) { + s_logger.debug("revokeAccess: Volume [{}] is still attached to VM [{}] in state [{}], skipping revokeAccess", + dataObject.getId(), vm.getInstanceName(), vm.getState()); + return; + } } } - } - try { + StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); if (storagePool == null) { s_logger.error("revokeAccess: Storage Pool not found for id: " + dataStore.getId()); - throw new CloudRuntimeException("revokeAccess : Storage Pool not found for id: " + dataStore.getId()); + throw new CloudRuntimeException("revokeAccess: Storage Pool not found for id: " + dataStore.getId()); } + if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { s_logger.error("revokeAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName()); throw new CloudRuntimeException("revokeAccess: Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName()); @@ -444,22 +488,31 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) if (dataObject.getType() == DataObjectType.VOLUME) { VolumeVO volumeVO = volumeDao.findById(dataObject.getId()); if (volumeVO == null) { - s_logger.error("revokeAccess: Cloud Stack Volume not found for id: " + dataObject.getId()); - throw new CloudRuntimeException("revokeAccess : Cloud Stack Volume not found for id: " + dataObject.getId()); + s_logger.error("revokeAccess: CloudStack Volume not found for id: " + dataObject.getId()); + throw new CloudRuntimeException("revokeAccess: CloudStack Volume not found for id: " + dataObject.getId()); } revokeAccessForVolume(storagePool, volumeVO, host); } else { s_logger.error("revokeAccess: Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess"); throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess"); } - } catch(Exception e){ + } catch (Exception e) { s_logger.error("revokeAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage()); - throw new CloudRuntimeException("revokeAccess: Failed with error :" + e.getMessage()); + throw new CloudRuntimeException("revokeAccess: Failed with error: " + e.getMessage(), e); } } + /** + * Revokes volume access by removing the LUN mapping from the igroup. + * This method handles the iSCSI-specific logic for access revocation. + * + * @param storagePool The storage pool containing the volume + * @param volumeVO The volume to revoke access from + * @param host The host losing access + */ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) { s_logger.info("revokeAccessForVolume: Revoking access to volume [{}] for host [{}]", volumeVO.getName(), host.getName()); + Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); String svmName = details.get(Constants.SVM_NAME); @@ -468,57 +521,84 @@ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + // Retrieve LUN name from volume details; if missing, volume may not have been fully created String lunName = volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME) != null ? volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME).getValue() : null; if (lunName == null) { - s_logger.warn("revokeAccessForVolume: No LUN name detail found for volume [{}]; assuming no backend LUN to revoke", volumeVO.getId()); + s_logger.warn("revokeAccessForVolume: No LUN name found for volume [{}]; skipping revoke", volumeVO.getId()); return; } + // Verify LUN still exists on ONTAP (may have been manually deleted) CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, lunName); if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getUuid() == null) { - s_logger.warn("revokeAccessForVolume: LUN for volume [{}] not found on ONTAP, assuming already deleted", volumeVO.getId()); + s_logger.warn("revokeAccessForVolume: LUN for volume [{}] not found on ONTAP, skipping revoke", volumeVO.getId()); return; } + // Verify igroup still exists on ONTAP AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName); if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getUuid() == null) { - s_logger.warn("revokeAccessForVolume: iGroup [{}] not found on ONTAP, assuming already deleted", accessGroupName); + s_logger.warn("revokeAccessForVolume: iGroup [{}] not found on ONTAP, skipping revoke", accessGroupName); return; } - if (!hostInitiatorFoundInIgroup(host.getStorageUrl(), accessGroup.getIgroup())) { - s_logger.error("revokeAccessForVolume: initiator [{}] is not present in iGroup [{}]", host.getStorageUrl(), accessGroupName); + // Verify host initiator is in the igroup before attempting to remove mapping + SANStrategy sanStrategy = (UnifiedSANStrategy) storageStrategy; + if (!sanStrategy.validateInitiatorInAccessGroup(host.getStorageUrl(), svmName, accessGroup.getIgroup().getName())) { + s_logger.warn("revokeAccessForVolume: Initiator [{}] is not in iGroup [{}], skipping revoke", + host.getStorageUrl(), accessGroupName); return; } + // Remove the LUN mapping from the igroup Map disableLogicalAccessMap = new HashMap<>(); disableLogicalAccessMap.put(Constants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid()); disableLogicalAccessMap.put(Constants.IGROUP_DOT_UUID, accessGroup.getIgroup().getUuid()); storageStrategy.disableLogicalAccess(disableLogicalAccessMap); + + s_logger.info("revokeAccessForVolume: Successfully revoked access to LUN [{}] for host [{}]", + lunName, host.getName()); } } - + /** + * Retrieves a CloudStack volume (LUN) from ONTAP by name. + * + * @param storageStrategy The storage strategy to use for the lookup + * @param svmName The SVM name containing the LUN + * @param cloudStackVolumeName The LUN name to look up + * @return CloudStackVolume if found, null otherwise + */ private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrategy, String svmName, String cloudStackVolumeName) { Map getCloudStackVolumeMap = new HashMap<>(); getCloudStackVolumeMap.put(Constants.NAME, cloudStackVolumeName); getCloudStackVolumeMap.put(Constants.SVM_DOT_NAME, svmName); + CloudStackVolume cloudStackVolume = storageStrategy.getCloudStackVolume(getCloudStackVolumeMap); if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) { - s_logger.error("getCloudStackVolumeByName: LUN [{}] not found on ONTAP; returning null", cloudStackVolumeName); + s_logger.warn("getCloudStackVolumeByName: LUN [{}] not found on ONTAP", cloudStackVolumeName); return null; } return cloudStackVolume; } + /** + * Retrieves an access group (igroup) from ONTAP by name. + * + * @param storageStrategy The storage strategy to use for the lookup + * @param svmName The SVM name containing the igroup + * @param accessGroupName The igroup name to look up + * @return AccessGroup if found, null otherwise + */ private AccessGroup getAccessGroupByName(StorageStrategy storageStrategy, String svmName, String accessGroupName) { Map getAccessGroupMap = new HashMap<>(); getAccessGroupMap.put(Constants.NAME, accessGroupName); getAccessGroupMap.put(Constants.SVM_DOT_NAME, svmName); + AccessGroup accessGroup = storageStrategy.getAccessGroup(getAccessGroupMap); if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getName() == null) { - s_logger.error("getAccessGroupByName: iGroup [{}] not found on ONTAP; returning null", accessGroupName); + s_logger.warn("getAccessGroupByName: iGroup [{}] not found on ONTAP", accessGroupName); return null; } return accessGroup; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java index ce3b2806ef75..6be5ecfaf3f2 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java @@ -26,4 +26,25 @@ public SANStrategy(OntapStorage ontapStorage) { super(ontapStorage); } + /** + * Ensures the LUN is mapped to the specified access group (igroup). + * If a mapping already exists, returns the existing LUN number. + * If not, creates a new mapping and returns the assigned LUN number. + * + * @param svmName the SVM name + * @param lunName the LUN name + * @param accessGroupName the igroup name + * @return the logical unit number as a String + */ + public abstract String ensureLunMapped(String svmName, String lunName, String accessGroupName); + + /** + * Validates that the host initiator is present in the access group (igroup). + * + * @param hostInitiator the host initiator IQN + * @param svmName the SVM name + * @param accessGroupName the igroup name + * @return true if the initiator is found in the igroup, false otherwise + */ + public abstract boolean validateInitiatorInAccessGroup(String hostInitiator, String svmName, String accessGroupName); } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 89ad712e9665..5bdebc5d716f 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -569,25 +569,24 @@ public String getNetworkInterface() { /** * Method encapsulates the behavior based on the opted protocol in subclasses * lunMap for iSCSI and FC protocols + * //TODO for NFS 3.0 and NFS 4.1 protocols (e.g., export rule management) * //TODO for Nvme/TCP and Nvme/FC protocols - * @param values map including SVM name, LUN name, and igroup name - * @return map containing logical unit number for the new/existing mapping + * @param values map including SVM name, LUN name, and igroup name (for SAN) or equivalent for NAS + * @return map containing logical unit number for the new/existing mapping (SAN) or relevant info for NAS */ abstract public Map enableLogicalAccess(Map values); /** * Method encapsulates the behavior based on the opted protocol in subclasses * lunUnmap for iSCSI and FC protocols - * //TODO for Nvme/TCP and Nvme/FC protocols - * @param values map including LUN UUID and iGroup UUID + * @param values map including LUN UUID and iGroup UUID (for SAN) or equivalent for NAS */ abstract public void disableLogicalAccess(Map values); /** * Method encapsulates the behavior based on the opted protocol in subclasses * lunMap lookup for iSCSI/FC protocols (GET-only, no side-effects) - * //TODO for Nvme/TCP and Nvme/FC protocols - * @param values map with SVM name, LUN name, and igroup name + * @param values map with SVM name, LUN name, and igroup name (for SAN) or equivalent for NAS * @return map containing logical unit number if mapping exists; otherwise null */ abstract public Map getLogicalAccess(Map values); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index fd1f2bb6b84a..204249f1d16a 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -550,4 +550,67 @@ public Map getLogicalAccess(Map values) { } return null; } + + @Override + public String ensureLunMapped(String svmName, String lunName, String accessGroupName) { + s_logger.info("ensureLunMapped: Ensuring LUN [{}] is mapped to igroup [{}] on SVM [{}]", lunName, accessGroupName, svmName); + + // Check existing map first + Map getMap = Map.of( + Constants.LUN_DOT_NAME, lunName, + Constants.SVM_DOT_NAME, svmName, + Constants.IGROUP_DOT_NAME, accessGroupName + ); + Map mapResp = getLogicalAccess(getMap); + if (mapResp != null && mapResp.containsKey(Constants.LOGICAL_UNIT_NUMBER)) { + String lunNumber = mapResp.get(Constants.LOGICAL_UNIT_NUMBER); + s_logger.info("ensureLunMapped: Existing LunMap found for LUN [{}] in igroup [{}] with LUN number [{}]", lunName, accessGroupName, lunNumber); + return lunNumber; + } + + // Create if not exists + Map enableMap = Map.of( + Constants.LUN_DOT_NAME, lunName, + Constants.SVM_DOT_NAME, svmName, + Constants.IGROUP_DOT_NAME, accessGroupName + ); + Map response = enableLogicalAccess(enableMap); + if (response == null || !response.containsKey(Constants.LOGICAL_UNIT_NUMBER)) { + throw new CloudRuntimeException("ensureLunMapped: Failed to map LUN [" + lunName + "] to iGroup [" + accessGroupName + "]"); + } + s_logger.info("ensureLunMapped: Successfully mapped LUN [{}] to igroup [{}] with LUN number [{}]", lunName, accessGroupName, response.get(Constants.LOGICAL_UNIT_NUMBER)); + return response.get(Constants.LOGICAL_UNIT_NUMBER); + } + + @Override + public boolean validateInitiatorInAccessGroup(String hostInitiator, String svmName, String accessGroupName) { + s_logger.info("validateInitiatorInAccessGroup: Validating initiator [{}] is in igroup [{}] on SVM [{}]", hostInitiator, accessGroupName, svmName); + + if (hostInitiator == null || hostInitiator.isEmpty()) { + s_logger.warn("validateInitiatorInAccessGroup: host initiator is null or empty"); + return false; + } + + Map getAccessGroupMap = Map.of( + Constants.NAME, accessGroupName, + Constants.SVM_DOT_NAME, svmName + ); + AccessGroup accessGroup = getAccessGroup(getAccessGroupMap); + if (accessGroup == null || accessGroup.getIgroup() == null) { + s_logger.warn("validateInitiatorInAccessGroup: iGroup [{}] not found on SVM [{}]", accessGroupName, svmName); + return false; + } + + Igroup igroup = accessGroup.getIgroup(); + if (igroup.getInitiators() != null) { + for (Initiator initiator : igroup.getInitiators()) { + if (initiator.getName().equalsIgnoreCase(hostInitiator)) { + s_logger.info("validateInitiatorInAccessGroup: Initiator [{}] validated successfully in igroup [{}]", hostInitiator, accessGroupName); + return true; + } + } + } + s_logger.warn("validateInitiatorInAccessGroup: Initiator [{}] NOT found in igroup [{}]", hostInitiator, accessGroupName); + return false; + } } From 5895ae3feef7de9ba622e02f78bd52ec26e60c9e Mon Sep 17 00:00:00 2001 From: "Locharla, Sandeep" Date: Thu, 5 Feb 2026 11:35:10 +0530 Subject: [PATCH 028/271] CSTACKEX-46: Fixed code as per comments received --- plugins/storage/volume/ontap/README.md | 123 ++++++++++ .../driver/OntapPrimaryDatastoreDriver.java | 109 ++------- .../OntapPrimaryDatastoreLifecycle.java | 13 +- .../OntapPrimaryDatastoreProvider.java | 3 +- .../storage/service/StorageStrategy.java | 22 -- .../storage/service/UnifiedSANStrategy.java | 99 ++++---- .../cloudstack/storage/utils/Constants.java | 4 + .../cloudstack/storage/utils/Utility.java | 6 +- .../OntapPrimaryDatastoreProviderTest.java | 216 ++++++++++++++++++ 9 files changed, 434 insertions(+), 161 deletions(-) create mode 100644 plugins/storage/volume/ontap/README.md create mode 100644 plugins/storage/volume/ontap/src/test/java/provider/OntapPrimaryDatastoreProviderTest.java diff --git a/plugins/storage/volume/ontap/README.md b/plugins/storage/volume/ontap/README.md new file mode 100644 index 000000000000..e7e066aafb55 --- /dev/null +++ b/plugins/storage/volume/ontap/README.md @@ -0,0 +1,123 @@ + + +# Apache CloudStack - NetApp ONTAP Storage Plugin + +## Overview + +The NetApp ONTAP Storage Plugin provides integration between Apache CloudStack and NetApp ONTAP storage systems. This plugin enables CloudStack to provision and manage primary storage on ONTAP clusters, supporting both NAS (NFS) and SAN (iSCSI) protocols. + +## Features + +- **Primary Storage Support**: Provision and manage primary storage pools on NetApp ONTAP +- **Multiple Protocols**: Support for NFS 3.0 and iSCSI protocols +- **Unified Storage**: Integration with traditional ONTAP unified storage architecture +- **KVM Hypervisor Support**: Supports KVM hypervisor environments +- **Managed Storage**: Operates as managed storage with full lifecycle management +- **Flexible Scoping**: Support for Zone-wide and Cluster-scoped storage pools + +## Architecture + +### Component Structure + +| Package | Description | +|---------|-------------------------------------------------------| +| `driver` | Primary datastore driver implementation | +| `feign` | REST API clients and data models for ONTAP operations | +| `lifecycle` | Storage pool lifecycle management | +| `listener` | Host connection event handlers | +| `provider` | Main provider and strategy factory | +| `service` | ONTAP Storage strategy implementations (NAS/SAN) | +| `utils` | Constants and helper utilities | + +## Requirements + +### ONTAP Requirements + +- NetApp ONTAP 9.15.1 or higher +- Storage Virtual Machine (SVM) configured with appropriate protocols enabled +- Management LIF accessible from CloudStack management server +- Data LIF(s) accessible from hypervisor hosts and are of IPv4 type +- Aggregates assigned to the SVM with sufficient capacity + +### CloudStack Requirements + +- Apache CloudStack current version or higher +- KVM hypervisor hosts +- For iSCSI: Hosts must have iSCSI initiator configured with valid IQN +- For NFS: Hosts must have NFS client packages installed + +### Minimum Volume Size + +ONTAP requires a minimum volume size of **1.56 GB** (1,677,721,600 bytes). The plugin will automatically adjust requested sizes below this threshold. + +## Configuration + +### Storage Pool Creation Parameters + +When creating an ONTAP primary storage pool, provide the following details in the URL field (semicolon-separated key=value pairs): + +| Parameter | Required | Description | +|-----------|----------|-------------| +| `username` | Yes | ONTAP cluster admin username | +| `password` | Yes | ONTAP cluster admin password | +| `svmName` | Yes | Storage Virtual Machine name | +| `protocol` | Yes | Storage protocol (`NFS3` or `ISCSI`) | +| `managementLIF` | Yes | ONTAP cluster management LIF IP address | + +### Example URL Format + +``` +username=admin;password=secretpass;svmName=svm1;protocol=ISCSI;managementLIF=192.168.1.100 +``` + +## Port Configuration + +| Protocol | Default Port | +|----------|--------------| +| NFS | 2049 | +| iSCSI | 3260 | +| ONTAP Management API | 443 (HTTPS) | + +## Limitations + +- Supports only **KVM** hypervisor +- Supports only **Unified ONTAP** storage (disaggregated not supported) +- Supports only **NFS3** and **iSCSI** protocols +- IPv6 type and FQDN LIFs are not supported + +## Troubleshooting + +### Common Issues + +1. **Connection Failures** + - Verify management LIF is reachable from CloudStack management server + - Check firewall rules for port 443 + +2. **Protocol Errors** + - Ensure the protocol (NFS/iSCSI) is enabled on the SVM + - Verify Data LIFs are configured for the protocol + +3. **Capacity Errors** + - Check aggregate space availability + - Ensure requested volume size meets minimum requirements (1.56 GB) + +4. **Host Connection Issues** + - For iSCSI: Verify host IQN is properly configured in host's storage URL + - For NFS: Ensure NFS client is installed and running diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java index ea3b8a89673a..f912449f269e 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java @@ -69,16 +69,7 @@ /** * Primary datastore driver for NetApp ONTAP storage systems. - * This driver handles volume lifecycle operations (create, delete, grant/revoke access) - * for both iSCSI (LUN-based) and NFS protocols against ONTAP storage backends. - * - * For iSCSI protocol: - * - Creates LUNs on ONTAP and maps them to initiator groups (igroups) - * - Manages LUN mappings for host access control - * - * For NFS protocol: - * - Delegates file operations to KVM host/libvirt - * - ONTAP volume/export management handled at storage pool creation time + * Handles volume lifecycle operations for iSCSI and NFS protocols. */ public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver { @@ -111,26 +102,16 @@ public DataStoreTO getStoreTO(DataStore store) { } /** - * Asynchronously creates a volume on the ONTAP storage system. - * - * For iSCSI protocol: - * - Creates a LUN on ONTAP via the SAN strategy - * - Stores LUN UUID and name in volume_details table for later reference - * - Creates a LUN mapping to the appropriate igroup (based on cluster/zone scope) - * - Sets the iSCSI path on the volume for host attachment - * - * For NFS protocol: - * - Associates the volume with the storage pool (actual file creation handled by hypervisor) - * - * @param dataStore The target data store (storage pool) - * @param dataObject The volume to create - * @param callback Callback to notify completion + * Creates a volume on the ONTAP storage system. */ @Override public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { CreateCmdResult createCmdResult = null; String errMsg; + if (dataObject == null) { + throw new InvalidParameterValueException("createAsync: dataObject should not be null"); + } if (dataStore == null) { throw new InvalidParameterValueException("createAsync: dataStore should not be null"); } @@ -150,6 +131,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet s_logger.error("createAsync: Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("createAsync: Storage Pool not found for id: " + dataStore.getId()); } + String storagePoolUuid = dataStore.getUuid(); Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId()); @@ -186,7 +168,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet // Create LUN-to-igroup mapping and retrieve the assigned LUN ID UnifiedSANStrategy sanStrategy = (UnifiedSANStrategy) Utility.getStrategyByStoragePoolDetails(details); - String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + String accessGroupName = Utility.getIgroupName(svmName, storagePoolUuid); String lunNumber = sanStrategy.ensureLunMapped(svmName, lunName, accessGroupName); // Construct iSCSI path: // format for KVM/libvirt attachment @@ -223,12 +205,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet } /** - * Creates a CloudStack volume on the ONTAP backend using the appropriate storage strategy. - * - * @param dataStore The target data store - * @param dataObject The volume to create - * @param details Storage pool configuration details - * @return CloudStackVolume containing the created backend object (LUN for iSCSI) + * Creates a volume on the ONTAP backend. */ private CloudStackVolume createCloudStackVolume(DataStore dataStore, DataObject dataObject, Map details) { StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId()); @@ -249,18 +226,7 @@ private CloudStackVolume createCloudStackVolume(DataStore dataStore, DataObject } /** - * Asynchronously deletes a volume from the ONTAP storage system. - * - * For iSCSI protocol: - * - Retrieves LUN details from volume_details table - * - Deletes the LUN from ONTAP (LUN mappings are automatically removed) - * - * For NFS protocol: - * - No ONTAP operation needed; file deletion handled by KVM host/libvirt - * - * @param store The data store containing the volume - * @param data The volume to delete - * @param callback Callback to notify completion + * Deletes a volume from the ONTAP storage system. */ @Override public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) { @@ -344,20 +310,7 @@ public ChapInfo getChapInfo(DataObject dataObject) { } /** - * Grants a host access to a volume on the ONTAP storage system. - * - * For iSCSI protocol: - * - Validates that the host's iSCSI initiator (IQN) is present in the target igroup - * - Ensures the LUN is mapped to the igroup (creates mapping if not exists) - * - Updates the volume's iSCSI path with the assigned LUN ID - * - * For NFS protocol: - * - No explicit grant needed; NFS exports are configured at storage pool level - * - * @param dataObject The volume to grant access to - * @param host The host requesting access - * @param dataStore The data store containing the volume - * @return true if access was granted successfully + * Grants a host access to a volume. */ @Override public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { @@ -377,6 +330,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore s_logger.error("grantAccess: Storage Pool not found for id: " + dataStore.getId()); throw new CloudRuntimeException("grantAccess: Storage Pool not found for id: " + dataStore.getId()); } + String storagePoolUuid = dataStore.getUuid(); // ONTAP managed storage only supports cluster and zone scoped pools if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) { @@ -398,7 +352,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { UnifiedSANStrategy sanStrategy = (UnifiedSANStrategy) Utility.getStrategyByStoragePoolDetails(details); - String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + String accessGroupName = Utility.getIgroupName(svmName, storagePoolUuid); // Verify host initiator is registered in the igroup before allowing access if (!sanStrategy.validateInitiatorInAccessGroup(host.getStorageUrl(), svmName, accessGroupName)) { @@ -432,18 +386,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore } /** - * Revokes a host's access to a volume on the ONTAP storage system. - * - * For iSCSI protocol: - * - Validates the volume is not attached to an active VM - * - Removes the LUN mapping from the igroup - * - * For NFS protocol: - * - No explicit revoke needed; NFS exports remain at storage pool level - * - * @param dataObject The volume to revoke access from - * @param host The host losing access - * @param dataStore The data store containing the volume + * Revokes a host's access to a volume. */ @Override public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { @@ -467,7 +410,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) VirtualMachine.State.Destroyed, VirtualMachine.State.Expunging, VirtualMachine.State.Error).contains(vm.getState())) { - s_logger.debug("revokeAccess: Volume [{}] is still attached to VM [{}] in state [{}], skipping revokeAccess", + s_logger.warn("revokeAccess: Volume [{}] is still attached to VM [{}] in state [{}], skipping revokeAccess", dataObject.getId(), vm.getInstanceName(), vm.getState()); return; } @@ -503,12 +446,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } /** - * Revokes volume access by removing the LUN mapping from the igroup. - * This method handles the iSCSI-specific logic for access revocation. - * - * @param storagePool The storage pool containing the volume - * @param volumeVO The volume to revoke access from - * @param host The host losing access + * Revokes volume access for the specified host. */ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) { s_logger.info("revokeAccessForVolume: Revoking access to volume [{}] for host [{}]", volumeVO.getName(), host.getName()); @@ -516,10 +454,11 @@ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId()); StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details); String svmName = details.get(Constants.SVM_NAME); + String storagePoolUuid = storagePool.getUuid(); long scopeId = (storagePool.getScope() == ScopeType.CLUSTER) ? host.getClusterId() : host.getDataCenterId(); if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) { - String accessGroupName = Utility.getIgroupName(svmName, storagePool.getScope(), scopeId); + String accessGroupName = Utility.getIgroupName(svmName, storagePoolUuid); // Retrieve LUN name from volume details; if missing, volume may not have been fully created String lunName = volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME) != null ? @@ -563,12 +502,7 @@ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, } /** - * Retrieves a CloudStack volume (LUN) from ONTAP by name. - * - * @param storageStrategy The storage strategy to use for the lookup - * @param svmName The SVM name containing the LUN - * @param cloudStackVolumeName The LUN name to look up - * @return CloudStackVolume if found, null otherwise + * Retrieves a volume from ONTAP by name. */ private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrategy, String svmName, String cloudStackVolumeName) { Map getCloudStackVolumeMap = new HashMap<>(); @@ -584,12 +518,7 @@ private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrate } /** - * Retrieves an access group (igroup) from ONTAP by name. - * - * @param storageStrategy The storage strategy to use for the lookup - * @param svmName The SVM name containing the igroup - * @param accessGroupName The igroup name to look up - * @return AccessGroup if found, null otherwise + * Retrieves an access group from ONTAP by name. */ private AccessGroup getAccessGroupByName(StorageStrategy storageStrategy, String svmName, String accessGroupName) { Map getAccessGroupMap = new HashMap<>(); diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index eabd6482572c..ff767fb81c63 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -152,16 +152,21 @@ public DataStore initialize(Map dsInfos) { throw new CloudRuntimeException("ONTAP primary storage must be managed"); } - // Required ONTAP detail keys Set requiredKeys = Set.of( Constants.USERNAME, Constants.PASSWORD, Constants.SVM_NAME, Constants.PROTOCOL, - Constants.MANAGEMENT_LIF, + Constants.MANAGEMENT_LIF + ); + + Set optionalKeys = Set.of( Constants.IS_DISAGGREGATED ); + Set allowedKeys = new java.util.HashSet<>(requiredKeys); + allowedKeys.addAll(optionalKeys); + // Parse key=value pairs from URL into details (skip empty segments) if (url != null && !url.isEmpty()) { for (String segment : url.split(Constants.SEMICOLON)) { @@ -249,13 +254,13 @@ public DataStore initialize(Map dsInfos) { case NFS3: parameters.setType(Storage.StoragePoolType.NetworkFilesystem); path = Constants.SLASH + storagePoolName; - port = 2049; + port = Constants.NFS3_PORT; s_logger.info("Setting NFS path for storage pool: " + path + ", port: " + port); break; case ISCSI: parameters.setType(Storage.StoragePoolType.Iscsi); path = storageStrategy.getStoragePath(); - port = 3260; + port = Constants.ISCSI_PORT; s_logger.info("Setting iSCSI path for storage pool: " + path + ", port: " + port); break; default: diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java index 1aadec79b3ab..5b44c951a5fa 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.storage.driver.OntapPrimaryDatastoreDriver; import org.apache.cloudstack.storage.lifecycle.OntapPrimaryDatastoreLifecycle; import org.apache.cloudstack.storage.listener.OntapHostListener; +import org.apache.cloudstack.storage.utils.Constants; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.springframework.stereotype.Component; @@ -65,7 +66,7 @@ public HypervisorHostListener getHostListener() { @Override public String getName() { s_logger.trace("OntapPrimaryDatastoreProvider: getName: Called"); - return "ONTAP"; + return Constants.ONTAP_PLUGIN_NAME; } @Override diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java index 5bdebc5d716f..565cf0399663 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java @@ -125,33 +125,11 @@ public boolean connect() { s_logger.error("iSCSI protocol is not enabled on SVM " + svmName); return false; } - // TODO: Implement logic to select appropriate aggregate based on storage requirements List aggrs = svm.getAggregates(); if (aggrs == null || aggrs.isEmpty()) { s_logger.error("No aggregates are assigned to SVM " + svmName); return false; } - // Set the aggregates which are according to the storage requirements - for (Aggregate aggr : aggrs) { - s_logger.debug("Found aggregate: " + aggr.getName() + " with UUID: " + aggr.getUuid()); - Aggregate aggrResp = aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid()); - if (!Objects.equals(aggrResp.getState(), Aggregate.StateEnum.ONLINE)) { - s_logger.warn("Aggregate " + aggr.getName() + " is not in online state. Skipping this aggregate."); - continue; - } else if (aggrResp.getSpace() == null || aggrResp.getAvailableBlockStorageSpace() == null || - aggrResp.getAvailableBlockStorageSpace() <= storage.getSize().doubleValue()) { - s_logger.warn("Aggregate " + aggr.getName() + " does not have sufficient available space. Skipping this aggregate."); - continue; - } - s_logger.info("Selected aggregate: " + aggr.getName() + " for volume operations."); - this.aggregates = List.of(aggr); - break; - } - if (this.aggregates == null || this.aggregates.isEmpty()) { - s_logger.error("No suitable aggregates found on SVM " + svmName + " for volume creation."); - return false; - } - this.aggregates = aggrs; s_logger.info("Successfully connected to ONTAP cluster and validated ONTAP details provided"); } catch (Exception e) { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java index 204249f1d16a..cab598702ecb 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java @@ -22,6 +22,7 @@ import com.cloud.host.HostVO; import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.exception.CloudRuntimeException; +import feign.FeignException; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.storage.feign.FeignClientFactory; import org.apache.cloudstack.storage.feign.client.SANFeignClient; @@ -87,6 +88,10 @@ public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume CloudStackVolume createdCloudStackVolume = new CloudStackVolume(); createdCloudStackVolume.setLun(lun); return createdCloudStackVolume; + } catch (FeignException e) { + s_logger.error("FeignException occurred while creating LUN: {}, Status: {}, Exception: {}", + cloudstackVolume.getLun().getName(), e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage()); } catch (Exception e) { s_logger.error("Exception occurred while creating LUN: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage()); throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage()); @@ -111,16 +116,12 @@ public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) { Map queryParams = Map.of("allow_delete_while_mapped", "true"); try { sanFeignClient.deleteLun(authHeader, cloudstackVolume.getLun().getUuid(), queryParams); - } catch (Exception ex) { - String errMsg = ex.getMessage(); - if (errMsg != null && (errMsg.contains("entry doesn't exist") - || errMsg.contains("does not exist") - || errMsg.contains("not found") - || errMsg.contains("status 404"))) { - s_logger.warn("deleteCloudStackVolume: Lun {} does not exist ({}), skipping deletion", cloudstackVolume.getLun().getName(), errMsg); + } catch (FeignException feignEx) { + if (feignEx.status() == 404) { + s_logger.warn("deleteCloudStackVolume: Lun {} does not exist (status 404), skipping deletion", cloudstackVolume.getLun().getName()); return; } - throw ex; + throw feignEx; } s_logger.info("deleteCloudStackVolume: Lun deleted successfully. LunName: {}", cloudstackVolume.getLun().getName()); } catch (Exception e) { @@ -150,6 +151,9 @@ public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) { String lunCloneName = cloudstackVolume.getLun().getName() + "_clone"; lunCloneRequest.setName(lunCloneName); sanFeignClient.createLun(authHeader, true, lunCloneRequest); + } catch (FeignException e) { + s_logger.error("FeignException occurred while creating Lun clone: {}, Status: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to create Lun clone: " + e.getMessage()); } catch (Exception e) { s_logger.error("Exception occurred while creating Lun clone: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage()); throw new CloudRuntimeException("Failed to create Lun clone: " + e.getMessage()); @@ -185,14 +189,16 @@ public CloudStackVolume getCloudStackVolume(Map values) { CloudStackVolume cloudStackVolume = new CloudStackVolume(); cloudStackVolume.setLun(lun); return cloudStackVolume; - } catch (Exception e) { - String errMsg = e.getMessage(); - if (errMsg != null && errMsg.contains("not found")) { - s_logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found ({}). Returning null.", lunName, svmName, errMsg); + } catch (FeignException e) { + if (e.status() == 404) { + s_logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found (status 404). Returning null.", lunName, svmName); return null; } - s_logger.error("Exception occurred while fetching Lun, Exception: {}", errMsg); - throw new CloudRuntimeException("Failed to fetch Lun details: " + errMsg); + s_logger.error("FeignException occurred while fetching Lun, Status: {}, Exception: {}", e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception occurred while fetching Lun, Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage()); } } @@ -221,7 +227,8 @@ public AccessGroup createAccessGroup(AccessGroup accessGroup) { Igroup igroupRequest = new Igroup(); List hostsIdentifier = new ArrayList<>(); String svmName = dataStoreDetails.get(Constants.SVM_NAME); - igroupName = Utility.getIgroupName(svmName, accessGroup.getScope().getScopeType(), accessGroup.getScope().getScopeId()); + String storagePoolUuid = accessGroup.getPrimaryDataStoreInfo().getUuid(); + igroupName = Utility.getIgroupName(svmName, storagePoolUuid); Hypervisor.HypervisorType hypervisorType = accessGroup.getPrimaryDataStoreInfo().getHypervisor(); ProtocolType protocol = ProtocolType.valueOf(dataStoreDetails.get(Constants.PROTOCOL)); @@ -261,21 +268,20 @@ public AccessGroup createAccessGroup(AccessGroup accessGroup) { } igroupRequest.setInitiators(initiators); } - igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf("iscsi")); + igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf(Constants.ISCSI)); // Create Igroup s_logger.debug("createAccessGroup: About to call sanFeignClient.createIgroup with igroupName: {}", igroupName); AccessGroup createdAccessGroup = new AccessGroup(); OntapResponse createdIgroup = null; try { createdIgroup = sanFeignClient.createIgroup(authHeader, true, igroupRequest); - } catch (Exception feignEx) { - String errMsg = feignEx.getMessage(); - if (errMsg != null && errMsg.contains(("5374023"))) { - s_logger.warn("createAccessGroup: Igroup with name {} already exists. Fetching existing Igroup.", igroupName); + } catch (FeignException feignEx) { + if (feignEx.status() == 409) { + s_logger.warn("createAccessGroup: Igroup with name {} already exists (status 409). Fetching existing Igroup.", igroupName); // TODO: Currently we aren't doing anything with the returned AccessGroup object, so, haven't added code here to fetch the existing Igroup and set it in AccessGroup. return createdAccessGroup; } - s_logger.error("createAccessGroup: Exception during Feign call: {}", feignEx.getMessage(), feignEx); + s_logger.error("createAccessGroup: FeignException during Igroup creation: Status: {}, Exception: {}", feignEx.status(), feignEx.getMessage(), feignEx); throw feignEx; } @@ -317,14 +323,16 @@ public void deleteAccessGroup(AccessGroup accessGroup) { // Extract SVM name from storage (already initialized in constructor via OntapStorage) String svmName = storage.getSvmName(); + String storagePoolUuid = primaryDataStoreInfo.getUuid(); // Determine scope and generate iGroup name - String igroupName; + String igroupName = Utility.getIgroupName(svmName, storagePoolUuid); + s_logger.info("deleteAccessGroup: Generated iGroup name '{}'", igroupName); if (primaryDataStoreInfo.getClusterId() != null) { - igroupName = Utility.getIgroupName(svmName, com.cloud.storage.ScopeType.CLUSTER, primaryDataStoreInfo.getClusterId()); + igroupName = Utility.getIgroupName(svmName, storagePoolUuid); s_logger.info("deleteAccessGroup: Deleting cluster-scoped iGroup '{}'", igroupName); } else { - igroupName = Utility.getIgroupName(svmName, com.cloud.storage.ScopeType.ZONE, primaryDataStoreInfo.getDataCenterId()); + igroupName = Utility.getIgroupName(svmName, storagePoolUuid); s_logger.info("deleteAccessGroup: Deleting zone-scoped iGroup '{}'", igroupName); } @@ -355,16 +363,21 @@ public void deleteAccessGroup(AccessGroup accessGroup) { s_logger.info("deleteAccessGroup: Successfully deleted iGroup '{}'", igroupName); - } catch (Exception e) { - String errorMsg = e.getMessage(); - // Check if iGroup doesn't exist (ONTAP error code: 5374852 - "The initiator group does not exist.") - if (errorMsg != null && (errorMsg.contains("5374852") || errorMsg.contains("not found"))) { - s_logger.warn("deleteAccessGroup: iGroup '{}' does not exist, skipping deletion", igroupName); + } catch (FeignException e) { + if (e.status() == 404) { + s_logger.warn("deleteAccessGroup: iGroup '{}' does not exist (status 404), skipping deletion", igroupName); } else { + s_logger.error("deleteAccessGroup: FeignException occurred: Status: {}, Exception: {}", e.status(), e.getMessage(), e); throw e; } + } catch (Exception e) { + s_logger.error("deleteAccessGroup: Exception occurred: {}", e.getMessage(), e); + throw e; } + } catch (FeignException e) { + s_logger.error("deleteAccessGroup: FeignException occurred while deleting iGroup. Status: {}, Exception: {}", e.status(), e.getMessage(), e); + throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e); } catch (Exception e) { s_logger.error("deleteAccessGroup: Failed to delete iGroup. Exception: {}", e.getMessage(), e); throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e); @@ -421,14 +434,16 @@ public AccessGroup getAccessGroup(Map values) { AccessGroup accessGroup = new AccessGroup(); accessGroup.setIgroup(igroup); return accessGroup; - } catch (Exception e) { - String errMsg = e.getMessage(); - if (errMsg != null && errMsg.contains("not found")) { - s_logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}' ({}). Returning null.", igroupName, svmName, errMsg); + } catch (FeignException e) { + if (e.status() == 404) { + s_logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}' (status 404). Returning null.", igroupName, svmName); return null; } - s_logger.error("Exception occurred while fetching Igroup, Exception: {}", errMsg); - throw new CloudRuntimeException("Failed to fetch Igroup details: " + errMsg); + s_logger.error("FeignException occurred while fetching Igroup, Status: {}, Exception: {}", e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception occurred while fetching Igroup, Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage()); } } @@ -509,14 +524,16 @@ public void disableLogicalAccess(Map values) { String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword()); sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID); s_logger.info("disableLogicalAccess: LunMap deleted successfully."); - } catch (Exception e) { - String errMsg = e.getMessage(); - if (errMsg != null && errMsg.contains("not found")) { - s_logger.warn("disableLogicalAccess: LunMap with Lun UUID: {} and igroup UUID: {} does not exist ({}), skipping deletion", lunUUID, igroupUUID, errMsg); + } catch (FeignException e) { + if (e.status() == 404) { + s_logger.warn("disableLogicalAccess: LunMap with Lun UUID: {} and igroup UUID: {} does not exist, skipping deletion", lunUUID, igroupUUID); return; } - s_logger.error("Exception occurred while deleting LunMap", e); - throw new CloudRuntimeException("Failed to delete LunMap: " + errMsg); + s_logger.error("FeignException occurred while deleting LunMap, Status: {}, Exception: {}", e.status(), e.getMessage()); + throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); + } catch (Exception e) { + s_logger.error("Exception occurred while deleting LunMap, Exception: {}", e.getMessage()); + throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage()); } } diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java index 43f8511967e7..920bd45fd0d7 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java @@ -22,6 +22,10 @@ public class Constants { + public static final String ONTAP_PLUGIN_NAME = "ONTAP"; + public static final int NFS3_PORT = 2049; + public static final int ISCSI_PORT = 3260; + public static final String NFS = "nfs"; public static final String ISCSI = "iscsi"; public static final String SIZE = "size"; diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index 9d5eac8b2cea..75890ae2e2cf 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -141,9 +141,9 @@ public static StorageStrategy getStrategyByStoragePoolDetails(Map types = provider.getTypes(); + assertNotNull(types); + assertEquals(1, types.size()); + assertTrue(types.contains(DataStoreProviderType.PRIMARY)); + } + + @Test + public void testGetDataStoreLifeCycle_beforeConfigure() { + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); + assertNull(lifeCycle); + } + + @Test + public void testGetDataStoreDriver_beforeConfigure() { + DataStoreDriver driver = provider.getDataStoreDriver(); + assertNull(driver); + } + + @Test + public void testGetHostListener_beforeConfigure() { + HypervisorHostListener listener = provider.getHostListener(); + assertNull(listener); + } + + @Test + public void testConfigure() { + OntapPrimaryDatastoreDriver mockDriver = mock(OntapPrimaryDatastoreDriver.class); + OntapPrimaryDatastoreLifecycle mockLifecycle = mock(OntapPrimaryDatastoreLifecycle.class); + OntapHostListener mockListener = mock(OntapHostListener.class); + + try (MockedStatic componentContext = Mockito.mockStatic(ComponentContext.class)) { + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreDriver.class)) + .thenReturn(mockDriver); + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class)) + .thenReturn(mockLifecycle); + componentContext.when(() -> ComponentContext.inject(OntapHostListener.class)) + .thenReturn(mockListener); + + Map params = new HashMap<>(); + boolean result = provider.configure(params); + + assertTrue(result); + } + } + + @Test + public void testGetDataStoreLifeCycle_afterConfigure() { + OntapPrimaryDatastoreDriver mockDriver = mock(OntapPrimaryDatastoreDriver.class); + OntapPrimaryDatastoreLifecycle mockLifecycle = mock(OntapPrimaryDatastoreLifecycle.class); + OntapHostListener mockListener = mock(OntapHostListener.class); + + try (MockedStatic componentContext = Mockito.mockStatic(ComponentContext.class)) { + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreDriver.class)) + .thenReturn(mockDriver); + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class)) + .thenReturn(mockLifecycle); + componentContext.when(() -> ComponentContext.inject(OntapHostListener.class)) + .thenReturn(mockListener); + + provider.configure(new HashMap<>()); + + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); + assertNotNull(lifeCycle); + assertEquals(mockLifecycle, lifeCycle); + } + } + + @Test + public void testGetDataStoreDriver_afterConfigure() { + OntapPrimaryDatastoreDriver mockDriver = mock(OntapPrimaryDatastoreDriver.class); + OntapPrimaryDatastoreLifecycle mockLifecycle = mock(OntapPrimaryDatastoreLifecycle.class); + OntapHostListener mockListener = mock(OntapHostListener.class); + + try (MockedStatic componentContext = Mockito.mockStatic(ComponentContext.class)) { + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreDriver.class)) + .thenReturn(mockDriver); + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class)) + .thenReturn(mockLifecycle); + componentContext.when(() -> ComponentContext.inject(OntapHostListener.class)) + .thenReturn(mockListener); + + provider.configure(new HashMap<>()); + + DataStoreDriver driver = provider.getDataStoreDriver(); + assertNotNull(driver); + assertEquals(mockDriver, driver); + } + } + + @Test + public void testGetHostListener_afterConfigure() { + OntapPrimaryDatastoreDriver mockDriver = mock(OntapPrimaryDatastoreDriver.class); + OntapPrimaryDatastoreLifecycle mockLifecycle = mock(OntapPrimaryDatastoreLifecycle.class); + OntapHostListener mockListener = mock(OntapHostListener.class); + + try (MockedStatic componentContext = Mockito.mockStatic(ComponentContext.class)) { + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreDriver.class)) + .thenReturn(mockDriver); + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class)) + .thenReturn(mockLifecycle); + componentContext.when(() -> ComponentContext.inject(OntapHostListener.class)) + .thenReturn(mockListener); + + provider.configure(new HashMap<>()); + + HypervisorHostListener listener = provider.getHostListener(); + assertNotNull(listener); + assertEquals(mockListener, listener); + } + } + + @Test + public void testConfigure_withNullParams() { + OntapPrimaryDatastoreDriver mockDriver = mock(OntapPrimaryDatastoreDriver.class); + OntapPrimaryDatastoreLifecycle mockLifecycle = mock(OntapPrimaryDatastoreLifecycle.class); + OntapHostListener mockListener = mock(OntapHostListener.class); + + try (MockedStatic componentContext = Mockito.mockStatic(ComponentContext.class)) { + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreDriver.class)) + .thenReturn(mockDriver); + componentContext.when(() -> ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class)) + .thenReturn(mockLifecycle); + componentContext.when(() -> ComponentContext.inject(OntapHostListener.class)) + .thenReturn(mockListener); + + boolean result = provider.configure(null); + + assertTrue(result); + assertNotNull(provider.getDataStoreDriver()); + assertNotNull(provider.getDataStoreLifeCycle()); + assertNotNull(provider.getHostListener()); + } + } + + @Test + public void testGetTypes_returnsOnlyPrimaryType() { + Set types = provider.getTypes(); + + assertNotNull(types); + assertEquals(1, types.size()); + assertTrue(types.contains(DataStoreProviderType.PRIMARY)); + + // Verify it doesn't contain other types + for (DataStoreProviderType type : types) { + assertEquals(DataStoreProviderType.PRIMARY, type); + } + } +} From 887e5891085a897e1409b4732d84ae561a50dce3 Mon Sep 17 00:00:00 2001 From: "Locharla, Sandeep" Date: Thu, 5 Feb 2026 16:27:28 +0530 Subject: [PATCH 029/271] CSTACKEX-46: Added some code that was added to community PR for PrimaryStoragePool --- .../OntapPrimaryDatastoreLifecycle.java | 2 +- .../cloudstack/storage/utils/Utility.java | 1 - .../OntapPrimaryDatastoreLifecycleTest.java | 40 +++++++++++++++---- 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java index ff767fb81c63..a7df490b9b95 100755 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java @@ -184,7 +184,7 @@ public DataStore initialize(Map dsInfos) { for (Map.Entry e : details.entrySet()) { String key = e.getKey(); String val = e.getValue(); - if (!requiredKeys.contains(key)) { + if (!allowedKeys.contains(key)) { throw new CloudRuntimeException("Unexpected ONTAP detail key in URL: " + key); } if (val == null || val.isEmpty()) { diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java index 75890ae2e2cf..2f805c1784d6 100644 --- a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java +++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java @@ -20,7 +20,6 @@ package org.apache.cloudstack.storage.utils; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.storage.ScopeType; import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java index af8aac84f490..789615a9f43b 100644 --- a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java @@ -78,6 +78,30 @@ void setUp() { @Test public void testInitialize_positive() { + Map dsInfos = new HashMap<>(); + dsInfos.put("username", "testUser"); + dsInfos.put("password", "testPassword"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); + dsInfos.put("zoneId",1L); + dsInfos.put("podId",1L); + dsInfos.put("clusterId", 1L); + dsInfos.put("name", "testStoragePool"); + dsInfos.put("providerName", "testProvider"); + dsInfos.put("capacityBytes",200000L); + dsInfos.put("managed",true); + dsInfos.put("tags", "testTag"); + dsInfos.put("isTagARule", false); + dsInfos.put("details", new HashMap()); + + try(MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) { + storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy); + ontapPrimaryDatastoreLifecycle.initialize(dsInfos); + } + } + + @Test + public void testInitialize_positiveWithIsDisaggregated() { + Map dsInfos = new HashMap<>(); dsInfos.put("username", "testUser"); dsInfos.put("password", "testPassword"); @@ -109,7 +133,7 @@ public void testInitialize_null_Arg() { @Test public void testInitialize_missingRequiredDetailKey() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -131,7 +155,7 @@ public void testInitialize_missingRequiredDetailKey() { @Test public void testInitialize_invalidCapacityBytes() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -152,7 +176,7 @@ public void testInitialize_invalidCapacityBytes() { @Test public void testInitialize_unmanagedStorage() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -176,7 +200,7 @@ public void testInitialize_unmanagedStorage() { @Test public void testInitialize_nullStoragePoolName() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -200,7 +224,7 @@ public void testInitialize_nullStoragePoolName() { @Test public void testInitialize_nullProviderName() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); @@ -224,7 +248,7 @@ public void testInitialize_nullProviderName() { @Test public void testInitialize_nullPodAndClusterAndZone() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",null); dsInfos.put("podId",null); dsInfos.put("clusterId", null); @@ -252,7 +276,7 @@ public void testInitialize_clusterNotKVM() { when(_clusterDao.findById(2L)).thenReturn(clusterVO); Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 2L); @@ -276,7 +300,7 @@ public void testInitialize_clusterNotKVM() { @Test public void testInitialize_unexpectedDetailKey() { Map dsInfos = new HashMap<>(); - dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;isDisaggregated=false;unexpectedKey=unexpectedValue"); + dsInfos.put("url", "username=testUser;password=testPassword;svmName=testSVM;protocol=NFS3;managementLIF=192.168.1.1;unexpectedKey=unexpectedValue"); dsInfos.put("zoneId",1L); dsInfos.put("podId",1L); dsInfos.put("clusterId", 1L); From 1d1f596376c4bfcc924c95bdeab13baa7b04f3c5 Mon Sep 17 00:00:00 2001 From: piyush5netapp <91685498+piyush5netapp@users.noreply.github.com> Date: Mon, 2 Feb 2026 10:13:18 +0530 Subject: [PATCH 030/271] UTs for NFS storage pool creation code (#29) Co-authored-by: Srivastava, Piyush --- .../OntapPrimaryDatastoreLifecycleTest.java | 467 ++++++++++ .../storage/service/StorageStrategyTest.java | 807 ++++++++++++++++++ 2 files changed, 1274 insertions(+) create mode 100644 plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/StorageStrategyTest.java diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java index 789615a9f43b..dca90ada0f94 100644 --- a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java @@ -32,15 +32,34 @@ import com.cloud.dc.dao.ClusterDao; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.dc.ClusterVO; +import com.cloud.host.HostVO; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.StorageManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.service.model.AccessGroup; +import com.cloud.hypervisor.Hypervisor; import java.util.Map; +import java.util.List; +import java.util.ArrayList; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.withSettings; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; import java.util.HashMap; import org.apache.cloudstack.storage.provider.StorageProviderFactory; import org.apache.cloudstack.storage.service.StorageStrategy; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.cloudstack.storage.utils.Utility; @ExtendWith(MockitoExtension.class) @@ -58,8 +77,33 @@ public class OntapPrimaryDatastoreLifecycleTest { @Mock private PrimaryDataStoreHelper _dataStoreHelper; + @Mock + private ResourceManager _resourceMgr; + + @Mock + private StorageManager _storageMgr; + + @Mock + private StoragePoolDetailsDao storagePoolDetailsDao; + + // Mock object that implements both DataStore and PrimaryDataStoreInfo + // This is needed because attachCluster(DataStore) casts DataStore to PrimaryDataStoreInfo internally + private DataStore dataStore; + + @Mock + private ClusterScope clusterScope; + + @Mock + private ZoneScope zoneScope; + + private List mockHosts; + private Map poolDetails; + @BeforeEach void setUp() { + // Create a mock that implements both DataStore and PrimaryDataStoreInfo interfaces + dataStore = Mockito.mock(DataStore.class, withSettings() + .extraInterfaces(PrimaryDataStoreInfo.class)); ClusterVO clusterVO = new ClusterVO(1L, 1L, "clusterName"); clusterVO.setHypervisorType("KVM"); @@ -73,6 +117,26 @@ void setUp() { volume.setName("testVolume"); when(storageStrategy.createStorageVolume(any(), any())).thenReturn(volume); + // Setup for attachCluster tests + // Configure dataStore mock with necessary methods (works for both DataStore and PrimaryDataStoreInfo) + when(dataStore.getId()).thenReturn(1L); + when(((PrimaryDataStoreInfo) dataStore).getClusterId()).thenReturn(1L); + mockHosts = new ArrayList<>(); + HostVO host1 = new HostVO("host1-guid"); + host1.setPrivateIpAddress("192.168.1.10"); + host1.setClusterId(1L); + HostVO host2 = new HostVO("host2-guid"); + host2.setPrivateIpAddress("192.168.1.11"); + host2.setClusterId(1L); + mockHosts.add(host1); + mockHosts.add(host2); + poolDetails = new HashMap<>(); + poolDetails.put("username", "admin"); + poolDetails.put("password", "password"); + poolDetails.put("svmName", "svm1"); + poolDetails.put("protocol", "NFS3"); + poolDetails.put("managementLIF", "192.168.1.100"); + poolDetails.put("isDisaggregated", "false"); } @Test @@ -321,4 +385,407 @@ public void testInitialize_unexpectedDetailKey() { assertTrue(ex.getMessage().contains("Unexpected ONTAP detail key in URL")); } + // ========== attachCluster Tests ========== + + @Test + public void testAttachCluster_positive() throws Exception { + // Setup + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock successful host connections + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertTrue(result, "attachCluster should return true on success"); + verify(_resourceMgr, times(1)) + .getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()); + verify(storagePoolDetailsDao, times(1)).listDetailsKeyPairs(1L); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_withSingleHost() throws Exception { + // Setup - only one host in cluster + List singleHost = new ArrayList<>(); + singleHost.add(mockHosts.get(0)); + + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(singleHost); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertTrue(result, "attachCluster should return true with single host"); + verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_withMultipleHosts() throws Exception { + // Setup - add more hosts + HostVO host3 = new HostVO("host3-guid"); + host3.setPrivateIpAddress("192.168.1.12"); + host3.setClusterId(1L); + mockHosts.add(host3); + + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertTrue(result, "attachCluster should return true with multiple hosts"); + verify(_storageMgr, times(3)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_hostConnectionFailure() throws Exception { + // Setup + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock host connection failure for first host + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())) + .thenThrow(new CloudRuntimeException("Connection failed")); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertFalse(result, "attachCluster should return false on host connection failure"); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + // _dataStoreHelper.attachCluster should NOT be called due to early return + verify(_dataStoreHelper, times(0)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_emptyHostList() throws Exception { + // Setup - no hosts in cluster + List emptyHosts = new ArrayList<>(); + + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(emptyHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertTrue(result, "attachCluster should return true even with no hosts"); + verify(_storageMgr, times(0)).connectHostToSharedPool(any(HostVO.class), anyLong()); + verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_secondHostConnectionFails() throws Exception { + // Setup + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock: first host succeeds, second host fails + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())) + .thenReturn(true) + .thenThrow(new CloudRuntimeException("Connection failed")); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify + assertFalse(result, "attachCluster should return false when any host connection fails"); + verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(0)).attachCluster(any(DataStore.class)); + } + } + + @Test + public void testAttachCluster_createAccessGroupCalled() throws Exception { + // Setup + when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any())) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachCluster( + dataStore, clusterScope); + + // Verify - createAccessGroup is called with correct AccessGroup structure + assertTrue(result); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + } + } + + // ========== attachZone Tests ========== + + @Test + public void testAttachZone_positive() throws Exception { + // Setup + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock successful host connections + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertTrue(result, "attachZone should return true on success"); + verify(_resourceMgr, times(1)) + .getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)); + verify(storagePoolDetailsDao, times(1)).listDetailsKeyPairs(1L); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_withSingleHost() throws Exception { + // Setup - only one host in zone + List singleHost = new ArrayList<>(); + singleHost.add(mockHosts.get(0)); + + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(singleHost); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertTrue(result, "attachZone should return true with single host"); + verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_withMultipleHosts() throws Exception { + // Setup - add more hosts + HostVO host3 = new HostVO("host3-guid"); + host3.setPrivateIpAddress("192.168.1.12"); + host3.setClusterId(1L); + mockHosts.add(host3); + + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertTrue(result, "attachZone should return true with multiple hosts"); + verify(_storageMgr, times(3)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_hostConnectionFailure() throws Exception { + // Setup + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock host connection failure for first host + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())) + .thenThrow(new CloudRuntimeException("Connection failed")); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertFalse(result, "attachZone should return false on host connection failure"); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + // _dataStoreHelper.attachZone should NOT be called due to early return + verify(_dataStoreHelper, times(0)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_emptyHostList() throws Exception { + // Setup - no hosts in zone + List emptyHosts = new ArrayList<>(); + + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(emptyHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertTrue(result, "attachZone should return true even with no hosts"); + verify(_storageMgr, times(0)).connectHostToSharedPool(any(HostVO.class), anyLong()); + verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_secondHostConnectionFails() throws Exception { + // Setup + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + + // Mock: first host succeeds, second host fails + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())) + .thenReturn(true) + .thenThrow(new CloudRuntimeException("Connection failed")); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify + assertFalse(result, "attachZone should return false when any host connection fails"); + verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L)); + verify(_dataStoreHelper, times(0)).attachZone(any(DataStore.class)); + } + } + + @Test + public void testAttachZone_createAccessGroupCalled() throws Exception { + // Setup + when(zoneScope.getScopeId()).thenReturn(1L); + when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM))) + .thenReturn(mockHosts); + when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails); + when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore); + + try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) { + utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any())) + .thenReturn(storageStrategy); + when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null); + when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true); + + // Execute + boolean result = ontapPrimaryDatastoreLifecycle.attachZone( + dataStore, zoneScope, Hypervisor.HypervisorType.KVM); + + // Verify - createAccessGroup is called with correct AccessGroup structure + assertTrue(result); + verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class)); + } + } + } diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/StorageStrategyTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/StorageStrategyTest.java new file mode 100644 index 000000000000..d298a6afe937 --- /dev/null +++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/service/StorageStrategyTest.java @@ -0,0 +1,807 @@ + /* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.service; + +import com.cloud.utils.exception.CloudRuntimeException; +import feign.FeignException; +import org.apache.cloudstack.storage.feign.FeignClientFactory; +import org.apache.cloudstack.storage.feign.client.AggregateFeignClient; +import org.apache.cloudstack.storage.feign.client.JobFeignClient; +import org.apache.cloudstack.storage.feign.client.NetworkFeignClient; +import org.apache.cloudstack.storage.feign.client.SANFeignClient; +import org.apache.cloudstack.storage.feign.client.SvmFeignClient; +import org.apache.cloudstack.storage.feign.client.VolumeFeignClient; +import org.apache.cloudstack.storage.feign.model.Aggregate; +import org.apache.cloudstack.storage.feign.model.IpInterface; +import org.apache.cloudstack.storage.feign.model.IscsiService; +import org.apache.cloudstack.storage.feign.model.Job; +import org.apache.cloudstack.storage.feign.model.OntapStorage; +import org.apache.cloudstack.storage.feign.model.Svm; +import org.apache.cloudstack.storage.feign.model.Volume; +import org.apache.cloudstack.storage.feign.model.response.JobResponse; +import org.apache.cloudstack.storage.feign.model.response.OntapResponse; +import org.apache.cloudstack.storage.service.model.ProtocolType; +import org.apache.cloudstack.storage.utils.Constants; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; + +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.LENIENT) +public class StorageStrategyTest { + + @Mock + private FeignClientFactory feignClientFactory; + + @Mock + private AggregateFeignClient aggregateFeignClient; + + @Mock + private VolumeFeignClient volumeFeignClient; + + @Mock + private SvmFeignClient svmFeignClient; + + @Mock + private JobFeignClient jobFeignClient; + + @Mock + private NetworkFeignClient networkFeignClient; + + @Mock + private SANFeignClient sanFeignClient; + + private TestableStorageStrategy storageStrategy; + + // Concrete implementation for testing abstract class + private static class TestableStorageStrategy extends StorageStrategy { + public TestableStorageStrategy(OntapStorage ontapStorage, + AggregateFeignClient aggregateFeignClient, + VolumeFeignClient volumeFeignClient, + SvmFeignClient svmFeignClient, + JobFeignClient jobFeignClient, + NetworkFeignClient networkFeignClient, + SANFeignClient sanFeignClient) { + super(ontapStorage); + // Use reflection to replace the private Feign client fields with mocked ones + injectMockedClient("aggregateFeignClient", aggregateFeignClient); + injectMockedClient("volumeFeignClient", volumeFeignClient); + injectMockedClient("svmFeignClient", svmFeignClient); + injectMockedClient("jobFeignClient", jobFeignClient); + injectMockedClient("networkFeignClient", networkFeignClient); + injectMockedClient("sanFeignClient", sanFeignClient); + } + + private void injectMockedClient(String fieldName, Object mockedClient) { + try { + Field field = StorageStrategy.class.getDeclaredField(fieldName); + field.setAccessible(true); + field.set(this, mockedClient); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException("Failed to inject mocked client: " + fieldName, e); + } + } + + @Override + public org.apache.cloudstack.storage.service.model.CloudStackVolume createCloudStackVolume( + org.apache.cloudstack.storage.service.model.CloudStackVolume cloudstackVolume) { + return null; + } + + @Override + org.apache.cloudstack.storage.service.model.CloudStackVolume updateCloudStackVolume( + org.apache.cloudstack.storage.service.model.CloudStackVolume cloudstackVolume) { + return null; + } + + @Override + void deleteCloudStackVolume(org.apache.cloudstack.storage.service.model.CloudStackVolume cloudstackVolume) { + } + + @Override + org.apache.cloudstack.storage.service.model.CloudStackVolume getCloudStackVolume( + org.apache.cloudstack.storage.service.model.CloudStackVolume cloudstackVolume) { + return null; + } + + @Override + public org.apache.cloudstack.storage.service.model.AccessGroup createAccessGroup( + org.apache.cloudstack.storage.service.model.AccessGroup accessGroup) { + return null; + } + + @Override + public void deleteAccessGroup(org.apache.cloudstack.storage.service.model.AccessGroup accessGroup) { + } + + @Override + org.apache.cloudstack.storage.service.model.AccessGroup updateAccessGroup( + org.apache.cloudstack.storage.service.model.AccessGroup accessGroup) { + return null; + } + + @Override + org.apache.cloudstack.storage.service.model.AccessGroup getAccessGroup( + org.apache.cloudstack.storage.service.model.AccessGroup accessGroup) { + return null; + } + + @Override + void enableLogicalAccess(Map values) { + } + + @Override + void disableLogicalAccess(Map values) { + } + } + + @BeforeEach + void setUp() { + // Create OntapStorage using constructor (immutable object) + OntapStorage ontapStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.NFS3, false); + + // Note: In real implementation, StorageStrategy constructor creates Feign clients + // For testing, we'll need to mock the FeignClientFactory behavior + storageStrategy = new TestableStorageStrategy(ontapStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + } + + // ========== connect() Tests ========== + + @Test + public void testConnect_positive() { + // Setup + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(Constants.RUNNING); + svm.setNfsEnabled(true); + + Aggregate aggregate = new Aggregate(); + aggregate.setName("aggr1"); + aggregate.setUuid("aggr-uuid-1"); + svm.setAggregates(List.of(aggregate)); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute + boolean result = storageStrategy.connect(); + + // Verify + assertTrue(result, "connect() should return true on success"); + verify(svmFeignClient, times(1)).getSvmResponse(anyMap(), anyString()); + } + + @Test + public void testConnect_svmNotFound() { + // Setup + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(new ArrayList<>()); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, () -> storageStrategy.connect()); + assertTrue(ex.getMessage().contains("No SVM found")); + } + + @Test + public void testConnect_svmNotRunning() { + // Setup + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState("stopped"); + svm.setNfsEnabled(true); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, () -> storageStrategy.connect()); + assertTrue(ex.getMessage().contains("not in running state")); + } + + @Test + public void testConnect_nfsNotEnabled() { + // Setup + // Note: Protocol validation is currently broken in StorageStrategy (enum vs string comparison) + // so this test verifies connection succeeds even when NFS is disabled + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(Constants.RUNNING); + svm.setNfsEnabled(false); + + Aggregate aggregate = new Aggregate(); + aggregate.setName("aggr1"); + aggregate.setUuid("aggr-uuid-1"); + svm.setAggregates(List.of(aggregate)); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute & Verify - connection succeeds because protocol check doesn't work + boolean result = storageStrategy.connect(); + assertTrue(result, "connect() should succeed"); + } + + @Test + public void testConnect_iscsiNotEnabled() { + // Setup - recreate with iSCSI protocol + // Note: Protocol validation is currently broken in StorageStrategy (enum vs string comparison) + // so this test verifies connection succeeds even when iSCSI is disabled + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI, false); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(Constants.RUNNING); + svm.setIscsiEnabled(false); + + Aggregate aggregate = new Aggregate(); + aggregate.setName("aggr1"); + aggregate.setUuid("aggr-uuid-1"); + svm.setAggregates(List.of(aggregate)); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute & Verify - connection succeeds because protocol check doesn't work + boolean result = storageStrategy.connect(); + assertTrue(result, "connect() should succeed"); + } + + @Test + public void testConnect_noAggregates() { + // Setup + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(Constants.RUNNING); + svm.setNfsEnabled(true); + svm.setAggregates(new ArrayList<>()); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, () -> storageStrategy.connect()); + assertTrue(ex.getMessage().contains("No aggregates are assigned")); + } + + @Test + public void testConnect_nullSvmResponse() { + // Setup + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(null); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, () -> storageStrategy.connect()); + assertTrue(ex.getMessage().contains("No SVM found")); + } + + // ========== createStorageVolume() Tests ========== + + @Test + public void testCreateStorageVolume_positive() throws Exception { + // Setup - First connect to populate aggregates + setupSuccessfulConnect(); + storageStrategy.connect(); + + // Setup aggregate details + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE); + when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class)); // Mock non-null space + when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0); + + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))) + .thenReturn(aggregateDetail); + + // Setup job response + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class))) + .thenReturn(jobResponse); + + // Setup job polling + Job completedJob = new Job(); + completedJob.setUuid("job-uuid-1"); + completedJob.setState(Constants.JOB_SUCCESS); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(completedJob); + + // Setup volume retrieval after creation + Volume createdVolume = new Volume(); + createdVolume.setName("test-volume"); + createdVolume.setUuid("vol-uuid-1"); + OntapResponse volumeResponse = new OntapResponse<>(); + volumeResponse.setRecords(List.of(createdVolume)); + + when(volumeFeignClient.getAllVolumes(anyString(), anyMap())) + .thenReturn(volumeResponse); + when(volumeFeignClient.getVolume(anyString(), anyMap())) + .thenReturn(volumeResponse); + + // Execute + Volume result = storageStrategy.createStorageVolume("test-volume", 5000000000L); + + // Verify + assertNotNull(result); + assertEquals("test-volume", result.getName()); + assertEquals("vol-uuid-1", result.getUuid()); + verify(volumeFeignClient, times(1)).createVolumeWithJob(anyString(), any(Volume.class)); + verify(jobFeignClient, atLeastOnce()).getJobByUUID(anyString(), eq("job-uuid-1")); + } + + @Test + public void testCreateStorageVolume_invalidSize() { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", -1L)); + assertTrue(ex.getMessage().contains("Invalid volume size")); + } + + @Test + public void testCreateStorageVolume_nullSize() { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", null)); + assertTrue(ex.getMessage().contains("Invalid volume size")); + } + + @Test + public void testCreateStorageVolume_noAggregates() { + // Execute & Verify - without calling connect first + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); + assertTrue(ex.getMessage().contains("No aggregates available")); + } + + @Test + public void testCreateStorageVolume_aggregateNotOnline() throws Exception { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(null); // null state to simulate offline + + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))) + .thenReturn(aggregateDetail); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); + assertTrue(ex.getMessage().contains("No suitable aggregates found")); + } + + @Test + public void testCreateStorageVolume_insufficientSpace() throws Exception { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE); + when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(1000000.0); // Only 1MB available + + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))) + .thenReturn(aggregateDetail); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); // Request 5GB + assertTrue(ex.getMessage().contains("No suitable aggregates found")); + } + + @Test + public void testCreateStorageVolume_jobFailed() throws Exception { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + + setupAggregateForVolumeCreation(); + + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class))) + .thenReturn(jobResponse); + + // Setup failed job + Job failedJob = new Job(); + failedJob.setUuid("job-uuid-1"); + failedJob.setState(Constants.JOB_FAILURE); + failedJob.setMessage("Volume creation failed"); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(failedJob); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); + assertTrue(ex.getMessage().contains("failed") || ex.getMessage().contains("Job failed")); + } + + @Test + public void testCreateStorageVolume_volumeNotFoundAfterCreation() throws Exception { + // Setup + setupSuccessfulConnect(); + storageStrategy.connect(); + setupAggregateForVolumeCreation(); + setupSuccessfulJobCreation(); + + // Setup empty volume response + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + when(volumeFeignClient.getAllVolumes(anyString(), anyMap())) + .thenReturn(emptyResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.createStorageVolume("test-volume", 5000000000L)); + assertTrue(ex.getMessage() != null && ex.getMessage().contains("not found after creation")); + } + + // ========== deleteStorageVolume() Tests ========== + + @Test + public void testDeleteStorageVolume_positive() throws Exception { + // Setup + Volume volume = new Volume(); + volume.setName("test-volume"); + volume.setUuid("vol-uuid-1"); + + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1"))) + .thenReturn(jobResponse); + + Job completedJob = new Job(); + completedJob.setUuid("job-uuid-1"); + completedJob.setState(Constants.JOB_SUCCESS); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(completedJob); + + // Execute + storageStrategy.deleteStorageVolume(volume); + + // Verify + verify(volumeFeignClient, times(1)).deleteVolume(anyString(), eq("vol-uuid-1")); + verify(jobFeignClient, atLeastOnce()).getJobByUUID(anyString(), eq("job-uuid-1")); + } + + @Test + public void testDeleteStorageVolume_jobFailed() throws Exception { + // Setup + Volume volume = new Volume(); + volume.setName("test-volume"); + volume.setUuid("vol-uuid-1"); + + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1"))) + .thenReturn(jobResponse); + + Job failedJob = new Job(); + failedJob.setUuid("job-uuid-1"); + failedJob.setState(Constants.JOB_FAILURE); + failedJob.setMessage("Deletion failed"); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(failedJob); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.deleteStorageVolume(volume)); + assertTrue(ex.getMessage().contains("Job failed")); + } + + @Test + public void testDeleteStorageVolume_feignException() { + // Setup + Volume volume = new Volume(); + volume.setName("test-volume"); + volume.setUuid("vol-uuid-1"); + + when(volumeFeignClient.deleteVolume(anyString(), eq("vol-uuid-1"))) + .thenThrow(mock(FeignException.FeignClientException.class)); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.deleteStorageVolume(volume)); + assertTrue(ex.getMessage().contains("Failed to delete volume")); + } + + // ========== getStoragePath() Tests ========== + + @Test + public void testGetStoragePath_iscsi() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI, false); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + IscsiService.IscsiServiceTarget target = new IscsiService.IscsiServiceTarget(); + target.setName("iqn.1992-08.com.netapp:sn.123456:vs.1"); + + IscsiService iscsiService = new IscsiService(); + iscsiService.setTarget(target); + + OntapResponse iscsiResponse = new OntapResponse<>(); + iscsiResponse.setRecords(List.of(iscsiService)); + + when(sanFeignClient.getIscsiServices(anyString(), anyMap())) + .thenReturn(iscsiResponse); + + // Execute + String result = storageStrategy.getStoragePath(); + + // Verify + assertNotNull(result); + assertEquals("iqn.1992-08.com.netapp:sn.123456:vs.1", result); + verify(sanFeignClient, times(1)).getIscsiServices(anyString(), anyMap()); + } + + @Test + public void testGetStoragePath_iscsi_noService() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI, false); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + when(sanFeignClient.getIscsiServices(anyString(), anyMap())) + .thenReturn(emptyResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.getStoragePath()); + assertTrue(ex.getMessage().contains("No iSCSI service found")); + } + + @Test + public void testGetStoragePath_iscsi_noTargetIqn() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI, false); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + IscsiService iscsiService = new IscsiService(); + iscsiService.setTarget(null); + + OntapResponse iscsiResponse = new OntapResponse<>(); + iscsiResponse.setRecords(List.of(iscsiService)); + + when(sanFeignClient.getIscsiServices(anyString(), anyMap())) + .thenReturn(iscsiResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.getStoragePath()); + assertTrue(ex.getMessage().contains("iSCSI target IQN not found")); + } + + // ========== getNetworkInterface() Tests ========== + + @Test + public void testGetNetworkInterface_nfs() { + // Setup + IpInterface.IpInfo ipInfo = new IpInterface.IpInfo(); + ipInfo.setAddress("192.168.1.50"); + + IpInterface ipInterface = new IpInterface(); + ipInterface.setIp(ipInfo); + + OntapResponse interfaceResponse = new OntapResponse<>(); + interfaceResponse.setRecords(List.of(ipInterface)); + + when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap())) + .thenReturn(interfaceResponse); + + // Execute + String result = storageStrategy.getNetworkInterface(); + + // Verify + assertNotNull(result); + assertEquals("192.168.1.50", result); + verify(networkFeignClient, times(1)).getNetworkIpInterfaces(anyString(), anyMap()); + } + + @Test + public void testGetNetworkInterface_iscsi() { + // Setup - recreate with iSCSI protocol + OntapStorage iscsiStorage = new OntapStorage("admin", "password", "192.168.1.100", + "svm1", null, ProtocolType.ISCSI, false); + storageStrategy = new TestableStorageStrategy(iscsiStorage, + aggregateFeignClient, volumeFeignClient, svmFeignClient, + jobFeignClient, networkFeignClient, sanFeignClient); + + IpInterface.IpInfo ipInfo = new IpInterface.IpInfo(); + ipInfo.setAddress("192.168.1.51"); + + IpInterface ipInterface = new IpInterface(); + ipInterface.setIp(ipInfo); + + OntapResponse interfaceResponse = new OntapResponse<>(); + interfaceResponse.setRecords(List.of(ipInterface)); + + when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap())) + .thenReturn(interfaceResponse); + + // Execute + String result = storageStrategy.getNetworkInterface(); + + // Verify + assertNotNull(result); + assertEquals("192.168.1.51", result); + } + + @Test + public void testGetNetworkInterface_noInterfaces() { + // Setup + OntapResponse emptyResponse = new OntapResponse<>(); + emptyResponse.setRecords(new ArrayList<>()); + + when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap())) + .thenReturn(emptyResponse); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.getNetworkInterface()); + assertTrue(ex.getMessage().contains("No network interfaces found")); + } + + @Test + public void testGetNetworkInterface_feignException() { + // Setup + when(networkFeignClient.getNetworkIpInterfaces(anyString(), anyMap())) + .thenThrow(mock(FeignException.FeignClientException.class)); + + // Execute & Verify + Exception ex = assertThrows(CloudRuntimeException.class, + () -> storageStrategy.getNetworkInterface()); + assertTrue(ex.getMessage().contains("Failed to retrieve network interfaces")); + } + + // ========== Helper Methods ========== + + private void setupSuccessfulConnect() { + Svm svm = new Svm(); + svm.setName("svm1"); + svm.setState(Constants.RUNNING); + svm.setNfsEnabled(true); + + Aggregate aggregate = new Aggregate(); + aggregate.setName("aggr1"); + aggregate.setUuid("aggr-uuid-1"); + svm.setAggregates(List.of(aggregate)); + + OntapResponse svmResponse = new OntapResponse<>(); + svmResponse.setRecords(List.of(svm)); + + when(svmFeignClient.getSvmResponse(anyMap(), anyString())).thenReturn(svmResponse); + } + + private void setupAggregateForVolumeCreation() { + Aggregate aggregateDetail = mock(Aggregate.class); + when(aggregateDetail.getName()).thenReturn("aggr1"); + when(aggregateDetail.getUuid()).thenReturn("aggr-uuid-1"); + when(aggregateDetail.getState()).thenReturn(Aggregate.StateEnum.ONLINE); + when(aggregateDetail.getSpace()).thenReturn(mock(Aggregate.AggregateSpace.class)); // Mock non-null space + when(aggregateDetail.getAvailableBlockStorageSpace()).thenReturn(10000000000.0); + + when(aggregateFeignClient.getAggregateByUUID(anyString(), eq("aggr-uuid-1"))) + .thenReturn(aggregateDetail); + } + + private void setupSuccessfulJobCreation() throws InterruptedException { + Job job = new Job(); + job.setUuid("job-uuid-1"); + JobResponse jobResponse = new JobResponse(); + jobResponse.setJob(job); + + when(volumeFeignClient.createVolumeWithJob(anyString(), any(Volume.class))) + .thenReturn(jobResponse); + + Job completedJob = new Job(); + completedJob.setUuid("job-uuid-1"); + completedJob.setState(Constants.JOB_SUCCESS); + when(jobFeignClient.getJobByUUID(anyString(), eq("job-uuid-1"))) + .thenReturn(completedJob); + + Volume createdVolume = new Volume(); + createdVolume.setName("test-volume"); + createdVolume.setUuid("vol-uuid-1"); + OntapResponse volumeResponse = new OntapResponse<>(); + volumeResponse.setRecords(List.of(createdVolume)); + + when(volumeFeignClient.getAllVolumes(anyString(), anyMap())) + .thenReturn(volumeResponse); + when(volumeFeignClient.getVolume(anyString(), anyMap())) + .thenReturn(volumeResponse); + } +} From 0e3a51ec00b77506f99b0523027d7ea588136a3e Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Fri, 7 Nov 2025 14:01:11 +0530 Subject: [PATCH 031/271] uefi property typo (#11929) --- .../kvm/resource/LibvirtComputingResource.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index fbfe3ef20eb0..0c97e93b64bb 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -3293,25 +3293,25 @@ private void configureGuestIfUefiEnabled(boolean isSecureBoot, String bootMode, setGuestLoader(bootMode, SECURE, guest, GuestDef.GUEST_LOADER_SECURE); setGuestLoader(bootMode, LEGACY, guest, GuestDef.GUEST_LOADER_LEGACY); - if (isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_PATH)) { + if (isUefiPropertyNotNull(GuestDef.GUEST_NVRAM_PATH)) { guest.setNvram(uefiProperties.getProperty(GuestDef.GUEST_NVRAM_PATH)); } - if (isSecureBoot && isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE) && SECURE.equalsIgnoreCase(bootMode)) { + if (isSecureBoot && isUefiPropertyNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE) && SECURE.equalsIgnoreCase(bootMode)) { guest.setNvramTemplate(uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_SECURE)); - } else if (isUefiPropertieNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY)) { + } else if (isUefiPropertyNotNull(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY)) { guest.setNvramTemplate(uefiProperties.getProperty(GuestDef.GUEST_NVRAM_TEMPLATE_LEGACY)); } } - private void setGuestLoader(String bootMode, String mode, GuestDef guest, String propertie) { - if (isUefiPropertieNotNull(propertie) && mode.equalsIgnoreCase(bootMode)) { - guest.setLoader(uefiProperties.getProperty(propertie)); + private void setGuestLoader(String bootMode, String mode, GuestDef guest, String property) { + if (isUefiPropertyNotNull(property) && mode.equalsIgnoreCase(bootMode)) { + guest.setLoader(uefiProperties.getProperty(property)); } } - private boolean isUefiPropertieNotNull(String propertie) { - return uefiProperties.getProperty(propertie) != null; + private boolean isUefiPropertyNotNull(String property) { + return uefiProperties.getProperty(property) != null; } public boolean isGuestAarch64() { From 5ef9a222fa678c7c78584b1f2f0ba318c8a432f5 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Fri, 7 Nov 2025 14:07:11 +0530 Subject: [PATCH 032/271] DB setup: support db schema creation (with --schema-only) without force recreate option (#12004) --- setup/bindir/cloud-setup-databases.in | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/setup/bindir/cloud-setup-databases.in b/setup/bindir/cloud-setup-databases.in index 8c453edda447..eb68c1e0181e 100755 --- a/setup/bindir/cloud-setup-databases.in +++ b/setup/bindir/cloud-setup-databases.in @@ -199,6 +199,10 @@ for full help self.info("No mysql root user specified, will not create Cloud DB schema\n", None) return + if self.areCloudDatabasesCreated() and not self.options.schemaonly and not self.options.forcerecreate: + self.errorAndExit("Aborting script as the databases (cloud, cloud_usage) already exist.\n" \ + "Please use the --force-recreate parameter if you want to recreate the databases and schemas, or use --schema-only if you only want to create the schemas only.") + replacements = ( ("CREATE USER cloud identified by 'cloud';", "CREATE USER %s@`localhost` identified by '%s'; CREATE USER %s@`%%` identified by '%s';"%( @@ -239,10 +243,6 @@ for full help ("DROP USER 'cloud'@'%' ;", "DO NULL;") ) - if self.areCloudDatabasesCreated() and not self.options.forcerecreate: - self.errorAndExit("Aborting script as the databases (cloud, cloud_usage) already exist.\n" \ - "Please use the --force-recreate parameter if you want to recreate the schemas.") - scriptsToRun = ["create-database","create-schema", "create-database-premium","create-schema-premium"] if self.options.schemaonly: scriptsToRun = ["create-schema", "create-schema-premium"] @@ -617,11 +617,11 @@ for example: self.parser.add_option("-d", "--deploy-as", action="store", type="string", dest="rootcreds", default="", help="Colon-separated user name and password of a MySQL user with administrative privileges") self.parser.add_option("-s", "--schema-only", action="store_true", dest="schemaonly", default=False, - help="Creates the db schema without having to pass root credentials - " \ + help="Creates the db schema only without having to pass root credentials - " \ "Please note: The databases (cloud, cloud_usage) and user (cloud) has to be configured " \ "manually prior to running this script when using this flag.") self.parser.add_option("--force-recreate", action="store_true", dest="forcerecreate", default=False, - help="Force recreation of the existing DB schemas. This option is disabled by default." \ + help="Force recreation of the existing DB databases and schemas. This option is disabled by default." \ "Please note: The databases (cloud, cloud_usage) and its tables data will be lost and recreated.") self.parser.add_option("-a", "--auto", action="store", type="string", dest="serversetup", default="", From bb71248ad9b0c82213c6aac0525890ed4b7beee5 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Fri, 7 Nov 2025 19:24:02 +0530 Subject: [PATCH 033/271] Enable UEFI on KVM hosts (by default), and configure with some default settings (#11740) --- agent/conf/uefi.properties.in | 24 +++++++++++++++++++ debian/cloudstack-agent.install | 1 + debian/cloudstack-agent.postinst | 2 +- debian/control | 2 +- packaging/debian/replace.properties | 5 ++++ packaging/el8/cloud.spec | 13 +++++++++- packaging/el8/replace.properties | 5 ++++ pom.xml | 4 ++++ .../cloud/server/ManagementServerImpl.java | 5 +++- systemvm/systemvm-agent-descriptor.xml | 1 + 10 files changed, 58 insertions(+), 4 deletions(-) create mode 100644 agent/conf/uefi.properties.in diff --git a/agent/conf/uefi.properties.in b/agent/conf/uefi.properties.in new file mode 100644 index 000000000000..3c8866f634bc --- /dev/null +++ b/agent/conf/uefi.properties.in @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Configuration file for UEFI + +guest.nvram.template.legacy=@GUESTNVRAMTEMPLATELEGACY@ +guest.loader.legacy=@GUESTLOADERLEGACY@ +guest.nvram.template.secure=@GUESTNVRAMTEMPLATESECURE@ +guest.loader.secure=@GUESTLOADERSECURE@ +guest.nvram.path=@GUESTNVRAMPATH@ diff --git a/debian/cloudstack-agent.install b/debian/cloudstack-agent.install index 58715e0746ba..0b9e874cb423 100644 --- a/debian/cloudstack-agent.install +++ b/debian/cloudstack-agent.install @@ -16,6 +16,7 @@ # under the License. /etc/cloudstack/agent/agent.properties +/etc/cloudstack/agent/uefi.properties /etc/cloudstack/agent/environment.properties /etc/cloudstack/agent/log4j-cloud.xml /etc/default/cloudstack-agent diff --git a/debian/cloudstack-agent.postinst b/debian/cloudstack-agent.postinst index 758af6e068ff..cd070c2f7853 100755 --- a/debian/cloudstack-agent.postinst +++ b/debian/cloudstack-agent.postinst @@ -23,7 +23,7 @@ case "$1" in configure) OLDCONFDIR="/etc/cloud/agent" NEWCONFDIR="/etc/cloudstack/agent" - CONFFILES="agent.properties log4j.xml log4j-cloud.xml" + CONFFILES="agent.properties uefi.properties log4j.xml log4j-cloud.xml" mkdir -m 0755 -p /usr/share/cloudstack-agent/tmp diff --git a/debian/control b/debian/control index 1292639ef304..78842e38ed27 100644 --- a/debian/control +++ b/debian/control @@ -24,7 +24,7 @@ Description: CloudStack server library Package: cloudstack-agent Architecture: all -Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, rsync, lsb-release, ufw, apparmor, cpu-checker, libvirt-daemon-driver-storage-rbd, sysstat +Depends: ${python:Depends}, ${python3:Depends}, openjdk-17-jre-headless | java17-runtime-headless | java17-runtime | zulu-17, cloudstack-common (= ${source:Version}), lsb-base (>= 9), openssh-client, qemu-kvm (>= 2.5) | qemu-system-x86 (>= 5.2), libvirt-bin (>= 1.3) | libvirt-daemon-system (>= 3.0), iproute2, ebtables, vlan, ipset, python3-libvirt, ethtool, iptables, cryptsetup, rng-tools, rsync, ovmf, swtpm, lsb-release, ufw, apparmor, cpu-checker, libvirt-daemon-driver-storage-rbd, sysstat Recommends: init-system-helpers Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts Description: CloudStack agent diff --git a/packaging/debian/replace.properties b/packaging/debian/replace.properties index 5ea4a03b275d..bd0c14889590 100644 --- a/packaging/debian/replace.properties +++ b/packaging/debian/replace.properties @@ -59,3 +59,8 @@ USAGELOG=/var/log/cloudstack/usage/usage.log USAGESYSCONFDIR=/etc/cloudstack/usage PACKAGE=cloudstack EXTENSIONSDEPLOYMENTMODE=production +GUESTNVRAMTEMPLATELEGACY=/usr/share/OVMF/OVMF_VARS_4M.fd +GUESTLOADERLEGACY=/usr/share/OVMF/OVMF_CODE_4M.fd +GUESTNVRAMTEMPLATESECURE=/usr/share/OVMF/OVMF_VARS_4M.ms.fd +GUESTLOADERSECURE=/usr/share/OVMF/OVMF_CODE_4M.secboot.fd +GUESTNVRAMPATH=/var/lib/libvirt/qemu/nvram/ diff --git a/packaging/el8/cloud.spec b/packaging/el8/cloud.spec index 7e97957473c5..abfab23f7052 100644 --- a/packaging/el8/cloud.spec +++ b/packaging/el8/cloud.spec @@ -115,6 +115,8 @@ Requires: ipset Requires: perl Requires: rsync Requires: cifs-utils +Requires: edk2-ovmf +Requires: swtpm Requires: (python3-libvirt or python3-libvirt-python) Requires: (qemu-img or qemu-tools) Requires: qemu-kvm @@ -356,6 +358,7 @@ install -D packaging/systemd/cloudstack-agent.service ${RPM_BUILD_ROOT}%{_unitdi install -D packaging/systemd/cloudstack-rolling-maintenance@.service ${RPM_BUILD_ROOT}%{_unitdir}/%{name}-rolling-maintenance@.service install -D packaging/systemd/cloudstack-agent.default ${RPM_BUILD_ROOT}%{_sysconfdir}/default/%{name}-agent install -D agent/target/transformed/agent.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/agent.properties +install -D agent/target/transformed/uefi.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/uefi.properties install -D agent/target/transformed/environment.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/environment.properties install -D agent/target/transformed/log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/log4j-cloud.xml install -D agent/target/transformed/cloud-setup-agent ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-agent @@ -523,7 +526,7 @@ mkdir -m 0755 -p /usr/share/cloudstack-agent/tmp /usr/bin/systemctl enable cloudstack-rolling-maintenance@p > /dev/null 2>&1 || true /usr/bin/systemctl enable --now rngd > /dev/null 2>&1 || true -# if saved configs from upgrade exist, copy them over +# if saved agent.properties from upgrade exist, copy them over if [ -f "%{_sysconfdir}/cloud.rpmsave/agent/agent.properties" ]; then mv %{_sysconfdir}/%{name}/agent/agent.properties %{_sysconfdir}/%{name}/agent/agent.properties.rpmnew cp -p %{_sysconfdir}/cloud.rpmsave/agent/agent.properties %{_sysconfdir}/%{name}/agent @@ -531,6 +534,14 @@ if [ -f "%{_sysconfdir}/cloud.rpmsave/agent/agent.properties" ]; then mv %{_sysconfdir}/cloud.rpmsave/agent/agent.properties %{_sysconfdir}/cloud.rpmsave/agent/agent.properties.rpmsave fi +# if saved uefi.properties from upgrade exist, copy them over +if [ -f "%{_sysconfdir}/cloud.rpmsave/agent/uefi.properties" ]; then + mv %{_sysconfdir}/%{name}/agent/uefi.properties %{_sysconfdir}/%{name}/agent/uefi.properties.rpmnew + cp -p %{_sysconfdir}/cloud.rpmsave/agent/uefi.properties %{_sysconfdir}/%{name}/agent + # make sure we only do this on the first install of this RPM, don't want to overwrite on a reinstall + mv %{_sysconfdir}/cloud.rpmsave/agent/uefi.properties %{_sysconfdir}/cloud.rpmsave/agent/uefi.properties.rpmsave +fi + systemctl daemon-reload # Print help message diff --git a/packaging/el8/replace.properties b/packaging/el8/replace.properties index a6094b59c73b..a5afab94ff2b 100644 --- a/packaging/el8/replace.properties +++ b/packaging/el8/replace.properties @@ -58,3 +58,8 @@ USAGECLASSPATH= USAGELOG=/var/log/cloudstack/usage/usage.log USAGESYSCONFDIR=/etc/sysconfig EXTENSIONSDEPLOYMENTMODE=production +GUESTNVRAMTEMPLATELEGACY=/usr/share/edk2/ovmf/OVMF_VARS.fd +GUESTLOADERLEGACY=/usr/share/edk2/ovmf/OVMF_CODE.cc.fd +GUESTNVRAMTEMPLATESECURE=/usr/share/edk2/ovmf/OVMF_VARS.secboot.fd +GUESTLOADERSECURE=/usr/share/edk2/ovmf/OVMF_CODE.secboot.fd +GUESTNVRAMPATH=/var/lib/libvirt/qemu/nvram/ diff --git a/pom.xml b/pom.xml index 2a36c1cc4efa..fcf11e357d23 100644 --- a/pom.xml +++ b/pom.xml @@ -1038,15 +1038,19 @@ dist/console-proxy/js/jquery.js engine/schema/dist/** plugins/hypervisors/hyperv/conf/agent.properties + plugins/hypervisors/hyperv/conf/uefi.properties plugins/hypervisors/hyperv/DotNet/ServerResource/** scripts/installer/windows/acs_license.rtf scripts/vm/systemvm/id_rsa.cloud services/console-proxy/server/conf/agent.properties + services/console-proxy/server/conf/uefi.properties services/console-proxy/server/conf/environment.properties services/console-proxy/server/js/jquery.js services/secondary-storage/conf/agent.properties + services/secondary-storage/conf/uefi.properties services/secondary-storage/conf/environment.properties systemvm/agent/conf/agent.properties + systemvm/agent/conf/uefi.properties systemvm/agent/conf/environment.properties systemvm/agent/js/jquery.js systemvm/agent/js/jquery.flot.navigate.js diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 3f811c152f00..9e8fdb60694e 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -1410,7 +1410,7 @@ protected Pair> filterUefiHostsForMigration(List a if (vmInstanceDetailVO != null && (ApiConstants.BootMode.LEGACY.toString().equalsIgnoreCase(vmInstanceDetailVO.getValue()) || ApiConstants.BootMode.SECURE.toString().equalsIgnoreCase(vmInstanceDetailVO.getValue()))) { - logger.info(" Live Migration of UEFI enabled VM : " + vm.getInstanceName() + " is not supported"); + logger.debug("{} VM is UEFI enabled, Checking for other UEFI enabled hosts as it can be live migrated to UEFI enabled host only.", vm.getInstanceName()); if (CollectionUtils.isEmpty(filteredHosts)) { filteredHosts = new ArrayList<>(allHosts); } @@ -1420,6 +1420,9 @@ protected Pair> filterUefiHostsForMigration(List a return new Pair<>(false, null); } filteredHosts.removeIf(host -> !uefiEnabledHosts.contains(host.getId())); + if (filteredHosts.isEmpty()) { + logger.warn("No UEFI enabled hosts are available for the live migration of VM {}", vm.getInstanceName()); + } return new Pair<>(!filteredHosts.isEmpty(), filteredHosts); } return new Pair<>(true, filteredHosts); diff --git a/systemvm/systemvm-agent-descriptor.xml b/systemvm/systemvm-agent-descriptor.xml index 8cf40a162766..1d6e338eb236 100644 --- a/systemvm/systemvm-agent-descriptor.xml +++ b/systemvm/systemvm-agent-descriptor.xml @@ -60,6 +60,7 @@ log4j-cloud.xml consoleproxy.properties agent.properties + uefi.properties From ce8b1e400a6a74ed451792acd82b39ff017d7a9a Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Thu, 6 Nov 2025 11:13:53 +0100 Subject: [PATCH 034/271] CKS: update cloud.kubernetes.cluster.network.offering to dynamic (#11847) --- .../com/cloud/kubernetes/cluster/KubernetesClusterService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java index a809628a9762..6bdb4265e019 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterService.java @@ -61,7 +61,7 @@ public interface KubernetesClusterService extends PluggableService, Configurable "cloud.kubernetes.cluster.network.offering", "DefaultNetworkOfferingforKubernetesService", "Name of the network offering that will be used to create isolated network in which Kubernetes cluster VMs will be launched", - false, + true, KubernetesServiceEnabled.key()); static final ConfigKey KubernetesClusterStartTimeout = new ConfigKey("Advanced", Long.class, "cloud.kubernetes.cluster.start.timeout", From c09aebd994a8262ef2f2fe508b70b4cee7a832f3 Mon Sep 17 00:00:00 2001 From: Manoj Kumar Date: Fri, 7 Nov 2025 15:13:46 +0530 Subject: [PATCH 035/271] consider Instance in Starting state for listPodsByUserConcentration (#11845) --- engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java index d3775fe35409..2431e63f5e83 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java @@ -101,7 +101,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use ReservationDao reservationDao; private static final String LIST_PODS_HAVING_VMS_FOR_ACCOUNT = - "SELECT pod_id FROM cloud.vm_instance WHERE data_center_id = ? AND account_id = ? AND pod_id IS NOT NULL AND (state = 'Running' OR state = 'Stopped') " + "SELECT pod_id FROM cloud.vm_instance WHERE data_center_id = ? AND account_id = ? AND pod_id IS NOT NULL AND state IN ('Starting', 'Running', 'Stopped') " + "GROUP BY pod_id HAVING count(id) > 0 ORDER BY count(id) DESC"; private static final String VM_DETAILS = "select vm_instance.id, " From 6848e303f2d11c6c3a6ca31fb5aa97f59dfd8500 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Fri, 7 Nov 2025 11:55:27 +0100 Subject: [PATCH 036/271] Veeam: get templateId from vm instance if vm is created from ISO (#10705) --- .../main/java/com/cloud/hypervisor/guru/VMwareGuru.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index 9e1466b7f9cc..1a8d9f7b59e2 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -695,7 +695,12 @@ private Long getImportingVMTemplate(List virtualDisks, long zoneId, updateTemplateRef(templateId, poolId, templatePath, templateSize); return templateId; } else { - return volumeVO.getTemplateId(); + Long templateId = volumeVO.getTemplateId(); + if (templateId == null && volumeVO.getInstanceId() != null) { + VMInstanceVO vmInstanceVO = vmDao.findByIdIncludingRemoved(volumeVO.getInstanceId()); + return vmInstanceVO.getTemplateId(); + } + return templateId; } } } From 8fc0c935e3d5d8c371cb4b9f1bdbfe115ec96e81 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Fri, 7 Nov 2025 16:13:10 +0100 Subject: [PATCH 037/271] Veeam: use pre-defined object mapper (#10715) --- .../org/apache/cloudstack/backup/veeam/VeeamClientTest.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java index 3485f402417e..0c70c75939ef 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java @@ -483,7 +483,9 @@ public void testListVmRestorePointsViaVeeamAPI() { " xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"\n" + " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n" + " xmlns=\"http://www.veeam.com/ent/v1.0\">\n" + - " \n" + + " \n" + " \n" + " \n" + " \n" + From da1e48b1c2552b2492a01b2f676d798854f73918 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 11 Nov 2025 03:29:54 -0500 Subject: [PATCH 038/271] UI: Update and reset domain level configuration (#11571) --- ui/src/components/view/SettingsTab.vue | 3 ++- ui/src/views/setting/ConfigurationTable.vue | 6 +++++- ui/src/views/setting/ConfigurationValue.vue | 16 ++++++++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/ui/src/components/view/SettingsTab.vue b/ui/src/components/view/SettingsTab.vue index be55d03c4b94..ab75bef83949 100644 --- a/ui/src/components/view/SettingsTab.vue +++ b/ui/src/components/view/SettingsTab.vue @@ -25,7 +25,8 @@ @search="handleSearch" /> + :config="items" + :resource="resource" /> diff --git a/ui/src/views/setting/ConfigurationTable.vue b/ui/src/views/setting/ConfigurationTable.vue index 56518d2570b1..da05b9342a0a 100644 --- a/ui/src/views/setting/ConfigurationTable.vue +++ b/ui/src/views/setting/ConfigurationTable.vue @@ -32,7 +32,7 @@ {{record.displaytext }} {{ ' (' + record.name + ')' }}
{{ record.description }} @@ -85,6 +85,10 @@ export default { pagesize: { type: Number, default: 20 + }, + resource: { + type: Object, + required: false } }, data () { diff --git a/ui/src/views/setting/ConfigurationValue.vue b/ui/src/views/setting/ConfigurationValue.vue index e438f0eb8315..662e5ef142e5 100644 --- a/ui/src/views/setting/ConfigurationValue.vue +++ b/ui/src/views/setting/ConfigurationValue.vue @@ -217,6 +217,10 @@ export default { actions: { type: Array, default: () => [] + }, + resource: { + type: Object, + required: false } }, data () { @@ -254,6 +258,12 @@ export default { this.setConfigData() }, watch: { + configrecord: { + handler () { + this.setConfigData() + }, + deep: true + } }, methods: { setConfigData () { @@ -280,6 +290,9 @@ export default { name: configrecord.name, value: newValue } + if (this.scopeKey === 'domainid' && !params[this.scopeKey]) { + params[this.scopeKey] = this.resource?.id + } postAPI('updateConfiguration', params).then(json => { this.editableValue = this.getEditableValue(json.updateconfigurationresponse.configuration) this.actualValue = this.editableValue @@ -315,6 +328,9 @@ export default { [this.scopeKey]: this.$route.params?.id, name: configrecord.name } + if (this.scopeKey === 'domainid' && !params[this.scopeKey]) { + params[this.scopeKey] = this.resource?.id + } postAPI('resetConfiguration', params).then(json => { this.editableValue = this.getEditableValue(json.resetconfigurationresponse.configuration) this.actualValue = this.editableValue From cea35ea17dca3cac1662abbe01f8edc1f006b113 Mon Sep 17 00:00:00 2001 From: Abhisar Sinha <63767682+abh1sar@users.noreply.github.com> Date: Wed, 12 Nov 2025 14:02:01 +0530 Subject: [PATCH 039/271] Track volume usage data at a vm granularity as well (#11531) Co-authored-by: Vishesh <8760112+vishesh92@users.noreply.github.com> --- .../java/com/cloud/event/UsageEventUtils.java | 12 ++ .../orchestration/VolumeOrchestrator.java | 4 +- .../java/com/cloud/event/UsageEventVO.java | 22 +++ .../cloud/event/dao/UsageEventDaoImpl.java | 8 +- .../java/com/cloud/usage/UsageVolumeVO.java | 14 +- .../cloud/usage/dao/UsageStorageDaoImpl.java | 2 + .../com/cloud/usage/dao/UsageVolumeDao.java | 6 +- .../cloud/usage/dao/UsageVolumeDaoImpl.java | 86 ++++----- .../META-INF/db/schema-42100to42200.sql | 7 + .../java/com/cloud/api/ApiResponseHelper.java | 3 + .../java/com/cloud/hypervisor/KVMGuru.java | 6 + .../cloud/storage/VolumeApiServiceImpl.java | 15 +- .../storage/listener/VolumeStateListener.java | 2 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 6 +- .../storage/VolumeApiServiceImplTest.java | 8 +- .../com/cloud/vm/UserVmManagerImplTest.java | 8 +- .../com/cloud/usage/UsageManagerImpl.java | 167 ++++++++++-------- .../cloud/usage/parser/VolumeUsageParser.java | 23 ++- 18 files changed, 243 insertions(+), 156 deletions(-) diff --git a/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java b/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java index 94fbb7a80af8..1c88c7df124c 100644 --- a/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java +++ b/engine/components-api/src/main/java/com/cloud/event/UsageEventUtils.java @@ -94,6 +94,14 @@ public static void publishUsageEvent(String usageType, long accountId, long zone } + public static void publishUsageEvent(String usageType, long accountId, long zoneId, long resourceId, String resourceName, Long offeringId, Long templateId, + Long size, String entityType, String entityUUID, Long vmId, boolean displayResource) { + if (displayResource) { + saveUsageEvent(usageType, accountId, zoneId, resourceId, offeringId, templateId, size, vmId, resourceName); + } + publishUsageEvent(usageType, accountId, zoneId, entityType, entityUUID); + } + public static void publishUsageEvent(String usageType, long accountId, long zoneId, long resourceId, String resourceName, Long offeringId, Long templateId, Long size, Long virtualSize, String entityType, String entityUUID, Map details) { saveUsageEvent(usageType, accountId, zoneId, resourceId, resourceName, offeringId, templateId, size, virtualSize, details); @@ -202,6 +210,10 @@ public static void saveUsageEvent(String usageType, long accountId, long zoneId, s_usageEventDao.persist(new UsageEventVO(usageType, accountId, zoneId, vmId, securityGroupId)); } + public static void saveUsageEvent(String usageType, long accountId, long zoneId, long resourceId, Long offeringId, Long templateId, Long size, Long vmId, String resourceName) { + s_usageEventDao.persist(new UsageEventVO(usageType, accountId, zoneId, resourceId, offeringId, templateId, size, vmId, resourceName)); + } + private static void publishUsageEvent(String usageEventType, Long accountId, Long zoneId, String resourceType, String resourceUUID) { String configKey = "publish.usage.events"; String value = s_configDao.getValue(configKey); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 2b759235ac81..430b7cbc5aa4 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -903,7 +903,7 @@ public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offeri // Save usage event and update resource count for user vm volumes if (vm.getType() == VirtualMachine.Type.User) { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), offering.getId(), null, size, - Volume.class.getName(), vol.getUuid(), vol.isDisplayVolume()); + Volume.class.getName(), vol.getUuid(), vol.getInstanceId(), vol.isDisplayVolume()); _resourceLimitMgr.incrementVolumeResourceCount(vm.getAccountId(), vol.isDisplayVolume(), vol.getSize(), offering); } DiskProfile diskProfile = toDiskProfile(vol, offering); @@ -981,7 +981,7 @@ private DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering } UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), offeringId, vol.getTemplateId(), size, - Volume.class.getName(), vol.getUuid(), vol.isDisplayVolume()); + Volume.class.getName(), vol.getUuid(), vol.getInstanceId(), vol.isDisplayVolume()); _resourceLimitMgr.incrementVolumeResourceCount(vm.getAccountId(), vol.isDisplayVolume(), vol.getSize(), offering); } diff --git a/engine/schema/src/main/java/com/cloud/event/UsageEventVO.java b/engine/schema/src/main/java/com/cloud/event/UsageEventVO.java index 3fc9fda94873..41ecec0c7fb8 100644 --- a/engine/schema/src/main/java/com/cloud/event/UsageEventVO.java +++ b/engine/schema/src/main/java/com/cloud/event/UsageEventVO.java @@ -75,6 +75,9 @@ public enum DynamicParameters { @Column(name = "virtual_size") private Long virtualSize; + @Column(name = "vm_id") + private Long vmId; + public UsageEventVO() { } @@ -143,6 +146,18 @@ public UsageEventVO(String usageType, long accountId, long zoneId, long vmId, lo this.offeringId = securityGroupId; } + public UsageEventVO(String usageType, long accountId, long zoneId, long resourceId, Long offeringId, Long templateId, Long size, Long vmId, String resourceName) { + this.type = usageType; + this.accountId = accountId; + this.zoneId = zoneId; + this.resourceId = resourceId; + this.offeringId = offeringId; + this.templateId = templateId; + this.size = size; + this.vmId = vmId; + this.resourceName = resourceName; + } + @Override public long getId() { return id; @@ -248,4 +263,11 @@ public void setVirtualSize(Long virtualSize) { this.virtualSize = virtualSize; } + public Long getVmId() { + return vmId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } } diff --git a/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDaoImpl.java b/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDaoImpl.java index fdef509da5bd..bce9c474e2d2 100644 --- a/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/event/dao/UsageEventDaoImpl.java @@ -45,11 +45,11 @@ public class UsageEventDaoImpl extends GenericDaoBase implem private final SearchBuilder latestEventsSearch; private final SearchBuilder IpeventsSearch; private static final String COPY_EVENTS = - "INSERT INTO cloud_usage.usage_event (id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size) " - + "SELECT id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size FROM cloud.usage_event vmevt WHERE vmevt.id > ? and vmevt.id <= ? "; + "INSERT INTO cloud_usage.usage_event (id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size, vm_id) " + + "SELECT id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size, vm_id FROM cloud.usage_event vmevt WHERE vmevt.id > ? and vmevt.id <= ? "; private static final String COPY_ALL_EVENTS = - "INSERT INTO cloud_usage.usage_event (id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size) " - + "SELECT id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size FROM cloud.usage_event vmevt WHERE vmevt.id <= ?"; + "INSERT INTO cloud_usage.usage_event (id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size, vm_id) " + + "SELECT id, type, account_id, created, zone_id, resource_id, resource_name, offering_id, template_id, size, resource_type, virtual_size, vm_id FROM cloud.usage_event vmevt WHERE vmevt.id <= ?"; private static final String COPY_EVENT_DETAILS = "INSERT INTO cloud_usage.usage_event_details (id, usage_event_id, name, value) " + "SELECT id, usage_event_id, name, value FROM cloud.usage_event_details vmevtDetails WHERE vmevtDetails.usage_event_id > ? and vmevtDetails.usage_event_id <= ? "; private static final String COPY_ALL_EVENT_DETAILS = "INSERT INTO cloud_usage.usage_event_details (id, usage_event_id, name, value) " diff --git a/engine/schema/src/main/java/com/cloud/usage/UsageVolumeVO.java b/engine/schema/src/main/java/com/cloud/usage/UsageVolumeVO.java index 96abd2d69c08..6d5315e33464 100644 --- a/engine/schema/src/main/java/com/cloud/usage/UsageVolumeVO.java +++ b/engine/schema/src/main/java/com/cloud/usage/UsageVolumeVO.java @@ -59,6 +59,9 @@ public class UsageVolumeVO implements InternalIdentity { @Column(name = "size") private long size; + @Column(name = "vm_id") + private Long vmId; + @Column(name = "created") @Temporal(value = TemporalType.TIMESTAMP) private Date created = null; @@ -70,13 +73,14 @@ public class UsageVolumeVO implements InternalIdentity { protected UsageVolumeVO() { } - public UsageVolumeVO(long id, long zoneId, long accountId, long domainId, Long diskOfferingId, Long templateId, long size, Date created, Date deleted) { + public UsageVolumeVO(long id, long zoneId, long accountId, long domainId, Long diskOfferingId, Long templateId, Long vmId, long size, Date created, Date deleted) { this.volumeId = id; this.zoneId = zoneId; this.accountId = accountId; this.domainId = domainId; this.diskOfferingId = diskOfferingId; this.templateId = templateId; + this.vmId = vmId; this.size = size; this.created = created; this.deleted = deleted; @@ -126,4 +130,12 @@ public void setDeleted(Date deleted) { public long getVolumeId() { return volumeId; } + + public Long getVmId() { + return vmId; + } + + public void setVmId(Long vmId) { + this.vmId = vmId; + } } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageStorageDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageStorageDaoImpl.java index 1da533493997..f863cd1e3a35 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageStorageDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageStorageDaoImpl.java @@ -57,6 +57,7 @@ public UsageStorageDaoImpl() { IdSearch.and("accountId", IdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); IdSearch.and("id", IdSearch.entity().getEntityId(), SearchCriteria.Op.EQ); IdSearch.and("type", IdSearch.entity().getStorageType(), SearchCriteria.Op.EQ); + IdSearch.and("deleted", IdSearch.entity().getDeleted(), SearchCriteria.Op.NULL); IdSearch.done(); IdZoneSearch = createSearchBuilder(); @@ -74,6 +75,7 @@ public List listById(long accountId, long id, int type) { sc.setParameters("accountId", accountId); sc.setParameters("id", id); sc.setParameters("type", type); + sc.setParameters("deleted", null); return listBy(sc, null); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDao.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDao.java index 09590b739930..05287240f254 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDao.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDao.java @@ -23,9 +23,7 @@ import com.cloud.utils.db.GenericDao; public interface UsageVolumeDao extends GenericDao { - public void removeBy(long userId, long id); - - public void update(UsageVolumeVO usage); - public List getUsageRecords(Long accountId, Long domainId, Date startDate, Date endDate, boolean limit, int page); + + List listByVolumeId(long volumeId, long accountId); } diff --git a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDaoImpl.java b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDaoImpl.java index 4662a6f26ce8..095070feac1c 100644 --- a/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/usage/dao/UsageVolumeDaoImpl.java @@ -18,81 +18,46 @@ import java.sql.PreparedStatement; import java.sql.ResultSet; -import java.sql.SQLException; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.TimeZone; -import com.cloud.exception.CloudException; +import javax.annotation.PostConstruct; + import org.springframework.stereotype.Component; import com.cloud.usage.UsageVolumeVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.TransactionLegacy; @Component public class UsageVolumeDaoImpl extends GenericDaoBase implements UsageVolumeDao { - protected static final String REMOVE_BY_USERID_VOLID = "DELETE FROM usage_volume WHERE account_id = ? AND volume_id = ?"; - protected static final String UPDATE_DELETED = "UPDATE usage_volume SET deleted = ? WHERE account_id = ? AND volume_id = ? and deleted IS NULL"; - protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, size, created, deleted " + protected static final String GET_USAGE_RECORDS_BY_ACCOUNT = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, vm_id, size, created, deleted " + "FROM usage_volume " + "WHERE account_id = ? AND ((deleted IS NULL) OR (created BETWEEN ? AND ?) OR " + " (deleted BETWEEN ? AND ?) OR ((created <= ?) AND (deleted >= ?)))"; - protected static final String GET_USAGE_RECORDS_BY_DOMAIN = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, size, created, deleted " + protected static final String GET_USAGE_RECORDS_BY_DOMAIN = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, vm_id, size, created, deleted " + "FROM usage_volume " + "WHERE domain_id = ? AND ((deleted IS NULL) OR (created BETWEEN ? AND ?) OR " + " (deleted BETWEEN ? AND ?) OR ((created <= ?) AND (deleted >= ?)))"; - protected static final String GET_ALL_USAGE_RECORDS = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, size, created, deleted " + protected static final String GET_ALL_USAGE_RECORDS = "SELECT volume_id, zone_id, account_id, domain_id, disk_offering_id, template_id, vm_id, size, created, deleted " + "FROM usage_volume " + "WHERE (deleted IS NULL) OR (created BETWEEN ? AND ?) OR " + " (deleted BETWEEN ? AND ?) OR ((created <= ?) AND (deleted >= ?))"; + private SearchBuilder volumeSearch; public UsageVolumeDaoImpl() { } - @Override - public void removeBy(long accountId, long volId) { - TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); - try { - txn.start(); - try(PreparedStatement pstmt = txn.prepareStatement(REMOVE_BY_USERID_VOLID);) { - if (pstmt != null) { - pstmt.setLong(1, accountId); - pstmt.setLong(2, volId); - pstmt.executeUpdate(); - } - }catch (SQLException e) { - throw new CloudException("Error removing usageVolumeVO:"+e.getMessage(), e); - } - txn.commit(); - } catch (Exception e) { - txn.rollback(); - logger.warn("Error removing usageVolumeVO:"+e.getMessage(), e); - } finally { - txn.close(); - } - } - - @Override - public void update(UsageVolumeVO usage) { - TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); - PreparedStatement pstmt = null; - try { - txn.start(); - if (usage.getDeleted() != null) { - pstmt = txn.prepareAutoCloseStatement(UPDATE_DELETED); - pstmt.setString(1, DateUtil.getDateDisplayString(TimeZone.getTimeZone("GMT"), usage.getDeleted())); - pstmt.setLong(2, usage.getAccountId()); - pstmt.setLong(3, usage.getVolumeId()); - pstmt.executeUpdate(); - } - txn.commit(); - } catch (Exception e) { - txn.rollback(); - logger.warn("Error updating UsageVolumeVO", e); - } finally { - txn.close(); - } + @PostConstruct + protected void init() { + volumeSearch = createSearchBuilder(); + volumeSearch.and("accountId", volumeSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + volumeSearch.and("volumeId", volumeSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + volumeSearch.and("deleted", volumeSearch.entity().getDeleted(), SearchCriteria.Op.NULL); + volumeSearch.done(); } @Override @@ -150,11 +115,15 @@ public List getUsageRecords(Long accountId, Long domainId, Date s if (tId == 0) { tId = null; } - long size = Long.valueOf(rs.getLong(7)); + Long vmId = Long.valueOf(rs.getLong(7)); + if (vmId == 0) { + vmId = null; + } + long size = Long.valueOf(rs.getLong(8)); Date createdDate = null; Date deletedDate = null; - String createdTS = rs.getString(8); - String deletedTS = rs.getString(9); + String createdTS = rs.getString(9); + String deletedTS = rs.getString(10); if (createdTS != null) { createdDate = DateUtil.parseDateString(s_gmtTimeZone, createdTS); @@ -163,7 +132,7 @@ public List getUsageRecords(Long accountId, Long domainId, Date s deletedDate = DateUtil.parseDateString(s_gmtTimeZone, deletedTS); } - usageRecords.add(new UsageVolumeVO(vId, zoneId, acctId, dId, doId, tId, size, createdDate, deletedDate)); + usageRecords.add(new UsageVolumeVO(vId, zoneId, acctId, dId, doId, tId, vmId, size, createdDate, deletedDate)); } } catch (Exception e) { txn.rollback(); @@ -174,4 +143,13 @@ public List getUsageRecords(Long accountId, Long domainId, Date s return usageRecords; } + + @Override + public List listByVolumeId(long volumeId, long accountId) { + SearchCriteria sc = volumeSearch.create(); + sc.setParameters("accountId", accountId); + sc.setParameters("volumeId", volumeId); + sc.setParameters("deleted", null); + return listBy(sc); + } } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql index b523016aa3dc..ecca3482d593 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql @@ -41,6 +41,13 @@ CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.ldap_configuration', 'uuid', 'VARCHA -- Populate uuid for existing rows where uuid is NULL or empty UPDATE `cloud`.`ldap_configuration` SET uuid = UUID() WHERE uuid IS NULL OR uuid = ''; +-- Add vm_id column to usage_event table for volume usage events +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.usage_event','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with volume usage events"'); +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_event','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with volume usage events"'); + +-- Add vm_id column to cloud_usage.usage_volume table +CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_volume','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with the volume usage"'); + -- Add the column cross_zone_instance_creation to cloud.backup_repository. if enabled it means that new Instance can be created on all Zones from Backups on this Repository. CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_repository', 'cross_zone_instance_creation', 'TINYINT(1) DEFAULT NULL COMMENT ''Backup Repository can be used for disaster recovery on another zone'''); diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 1ebbd4f35b5c..8cc10ce41673 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -4302,6 +4302,9 @@ public UsageRecordResponse createUsageResponse(Usage usageRecord, Map t // For the Resize Volume Event, this publishes an event with an incorrect disk offering ID, so do nothing for now } else { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), vol.getDiskOfferingId(), null, vol.getSize(), - Volume.class.getName(), vol.getUuid(), vol.isDisplayVolume()); + Volume.class.getName(), vol.getUuid(), instanceId, vol.isDisplayVolume()); } } else if (transition.getToState() == State.Destroy && vol.getVolumeType() != Volume.Type.ROOT) { //Do not Publish Usage Event for ROOT Disk as it would have been published already while destroying a VM UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_DELETE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 96c87c5376d8..2e30b4ecbd8c 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2408,6 +2408,8 @@ protected void recoverRootVolume(VolumeVO volume, Long vmId) { if (Volume.State.Destroy.equals(volume.getState())) { _volumeService.recoverVolume(volume.getId()); _volsDao.attachVolume(volume.getId(), vmId, ROOT_DEVICE_ID); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_ATTACH, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), + volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), vmId, volume.isDisplay()); } else { _volumeService.publishVolumeCreationUsageEvent(volume); } @@ -8156,7 +8158,7 @@ protected void updateVolumesOwner(final List volumes, Account oldAccou logger.trace("Generating a create volume event for volume [{}].", volume); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(), - volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.isDisplayVolume()); + volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid(), volume.getInstanceId(), volume.isDisplayVolume()); } } @@ -8959,6 +8961,8 @@ public Pair doInTransaction(final TransactionStatus status) th handleManagedStorage(vm, root); _volsDao.attachVolume(newVol.getId(), vmId, newVol.getDeviceId()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_ATTACH, newVol.getAccountId(), newVol.getDataCenterId(), newVol.getId(), newVol.getName(), + newVol.getDiskOfferingId(), newVol.getTemplateId(), newVol.getSize(), Volume.class.getName(), newVol.getUuid(), vmId, newVol.isDisplay()); // Detach, destroy and create the usage event for the old root volume. _volsDao.detachVolume(root.getId()); diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 79be3695fbde..0575b430ef10 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -1545,7 +1545,7 @@ public void publishVolumeCreationUsageEventTestNullDiskOfferingId() { volumeApiServiceImpl.publishVolumeCreationUsageEvent(volumeVoMock); usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volumeVoMock.getAccountId(), volumeVoMock.getDataCenterId(), volumeVoMock.getId(), volumeVoMock.getName(), - null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.isDisplay())); + null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.getInstanceId(), volumeVoMock.isDisplay())); } } @@ -1558,7 +1558,7 @@ public void publishVolumeCreationUsageEventTestNullDiskOfferingVo() { volumeApiServiceImpl.publishVolumeCreationUsageEvent(volumeVoMock); usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volumeVoMock.getAccountId(), volumeVoMock.getDataCenterId(), volumeVoMock.getId(), volumeVoMock.getName(), - null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.isDisplay())); + null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.getInstanceId(), volumeVoMock.isDisplay())); } } @@ -1573,7 +1573,7 @@ public void publishVolumeCreationUsageEventTestDiskOfferingVoTypeNotDisk() { volumeApiServiceImpl.publishVolumeCreationUsageEvent(volumeVoMock); usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volumeVoMock.getAccountId(), volumeVoMock.getDataCenterId(), volumeVoMock.getId(), volumeVoMock.getName(), - null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.isDisplay())); + null, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.getInstanceId(), volumeVoMock.isDisplay())); } } @@ -1589,7 +1589,7 @@ public void publishVolumeCreationUsageEventTestOfferingIdNotNull() { volumeApiServiceImpl.publishVolumeCreationUsageEvent(volumeVoMock); usageEventUtilsMocked.verify(() -> UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volumeVoMock.getAccountId(), volumeVoMock.getDataCenterId(), volumeVoMock.getId(), volumeVoMock.getName(), - offeringMockId, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.isDisplay())); + offeringMockId, volumeVoMock.getTemplateId(), volumeVoMock.getSize(), Volume.class.getName(), volumeVoMock.getUuid(), volumeVoMock.getInstanceId(), volumeVoMock.isDisplay())); } } diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index a21477aeb80e..fe4ea0838f16 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -1120,10 +1120,12 @@ public void testResetVMUserDataSuccessResetWithUserdataId() { public void recoverRootVolumeTestDestroyState() { Mockito.doReturn(Volume.State.Destroy).when(volumeVOMock).getState(); - userVmManagerImpl.recoverRootVolume(volumeVOMock, vmId); + try (MockedStatic ignored = Mockito.mockStatic(UsageEventUtils.class)) { + userVmManagerImpl.recoverRootVolume(volumeVOMock, vmId); - Mockito.verify(volumeApiService).recoverVolume(volumeVOMock.getId()); - Mockito.verify(volumeDaoMock).attachVolume(volumeVOMock.getId(), vmId, UserVmManagerImpl.ROOT_DEVICE_ID); + Mockito.verify(volumeApiService).recoverVolume(volumeVOMock.getId()); + Mockito.verify(volumeDaoMock).attachVolume(volumeVOMock.getId(), vmId, UserVmManagerImpl.ROOT_DEVICE_ID); + } } @Test(expected = InvalidParameterValueException.class) diff --git a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java index 49d799997162..864ad35c9229 100644 --- a/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java +++ b/usage/src/main/java/com/cloud/usage/UsageManagerImpl.java @@ -1008,7 +1008,12 @@ private boolean isIPEvent(String eventType) { private boolean isVolumeEvent(String eventType) { return eventType != null && - (eventType.equals(EventTypes.EVENT_VOLUME_CREATE) || eventType.equals(EventTypes.EVENT_VOLUME_DELETE) || eventType.equals(EventTypes.EVENT_VOLUME_RESIZE) || eventType.equals(EventTypes.EVENT_VOLUME_UPLOAD)); + (eventType.equals(EventTypes.EVENT_VOLUME_CREATE) || + eventType.equals(EventTypes.EVENT_VOLUME_DELETE) || + eventType.equals(EventTypes.EVENT_VOLUME_RESIZE) || + eventType.equals(EventTypes.EVENT_VOLUME_UPLOAD) || + eventType.equals(EventTypes.EVENT_VOLUME_ATTACH) || + eventType.equals(EventTypes.EVENT_VOLUME_DETACH)); } private boolean isTemplateEvent(String eventType) { @@ -1424,92 +1429,112 @@ private void createIPHelperEvent(UsageEventVO event) { } } + private void deleteExistingSecondaryStorageUsageForVolume(long volId, long accountId, Date deletedDate) { + List storageVOs = _usageStorageDao.listById(accountId, volId, StorageTypes.VOLUME); + for (UsageStorageVO storageVO : storageVOs) { + logger.debug("Setting the volume with id: {} to 'deleted' in the usage_storage table for account: {}.", volId, accountId); + storageVO.setDeleted(deletedDate); + _usageStorageDao.update(storageVO); + } + } + + private void deleteExistingInstanceVolumeUsage(long volId, long accountId, Date deletedDate) { + List volumesVOs = _usageVolumeDao.listByVolumeId(volId, accountId); + for (UsageVolumeVO volumesVO : volumesVOs) { + if (volumesVO.getVmId() != null) { + logger.debug("Setting the volume with id: {} for instance id: {} to 'deleted' in the usage_volume table for account {}.", + volumesVO.getVolumeId(), volumesVO.getVmId(), accountId); + volumesVO.setDeleted(deletedDate); + _usageVolumeDao.update(volumesVO.getId(), volumesVO); + } + } + } + + private void deleteExistingVolumeUsage(long volId, long accountId, Date deletedDate) { + List volumesVOs = _usageVolumeDao.listByVolumeId(volId, accountId); + for (UsageVolumeVO volumesVO : volumesVOs) { + logger.debug("Setting the volume with id: {} to 'deleted' in the usage_volume table for account: {}.", volId, accountId); + volumesVO.setDeleted(deletedDate); + _usageVolumeDao.update(volumesVO.getId(), volumesVO); + } + } + private void createVolumeHelperEvent(UsageEventVO event) { long volId = event.getResourceId(); + Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); + List volumesVOs; + UsageVolumeVO volumeVO; - if (EventTypes.EVENT_VOLUME_CREATE.equals(event.getType())) { - //For volumes which are 'attached' successfully, set the 'deleted' column in the usage_storage table, + switch (event.getType()) { + case EventTypes.EVENT_VOLUME_CREATE: + //For volumes which are 'attached' successfully from uploaded state, set the 'deleted' column in the usage_storage table, //so that the secondary storage should stop accounting and only primary will be accounted. - SearchCriteria sc = _usageStorageDao.createSearchCriteria(); - sc.addAnd("entityId", SearchCriteria.Op.EQ, volId); - sc.addAnd("storageType", SearchCriteria.Op.EQ, StorageTypes.VOLUME); - List volumesVOs = _usageStorageDao.search(sc, null); - if (volumesVOs != null) { - if (volumesVOs.size() == 1) { - logger.debug("Setting the volume with id: " + volId + " to 'deleted' in the usage_storage table."); - volumesVOs.get(0).setDeleted(event.getCreateDate()); - _usageStorageDao.update(volumesVOs.get(0)); - } - } - } - if (EventTypes.EVENT_VOLUME_CREATE.equals(event.getType()) || EventTypes.EVENT_VOLUME_RESIZE.equals(event.getType())) { - SearchCriteria sc = _usageVolumeDao.createSearchCriteria(); - sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); - sc.addAnd("volumeId", SearchCriteria.Op.EQ, volId); - sc.addAnd("deleted", SearchCriteria.Op.NULL); - List volumesVOs = _usageVolumeDao.search(sc, null); + deleteExistingSecondaryStorageUsageForVolume(volId, event.getAccountId(), event.getCreateDate()); + + volumesVOs = _usageVolumeDao.listByVolumeId(volId, event.getAccountId()); if (volumesVOs.size() > 0) { //This is a safeguard to avoid double counting of volumes. logger.error("Found duplicate usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking as deleted..."); + deleteExistingVolumeUsage(volId, event.getAccountId(), event.getCreateDate()); } - //an entry exists if it is a resize volume event. marking the existing deleted and creating a new one in the case of resize. - for (UsageVolumeVO volumesVO : volumesVOs) { - if (logger.isDebugEnabled()) { - logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); - } - volumesVO.setDeleted(event.getCreateDate()); - _usageVolumeDao.update(volumesVO); - } - if (logger.isDebugEnabled()) { - logger.debug("create volume with id : " + volId + " for account: " + event.getAccountId()); - } - Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); - UsageVolumeVO volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), event.getSize(), event.getCreateDate(), null); + + logger.debug("Creating a new entry in usage_volume for volume with id: {} for account: {}", volId, event.getAccountId()); + volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), null, event.getSize(), event.getCreateDate(), null); _usageVolumeDao.persist(volumeVO); - } else if (EventTypes.EVENT_VOLUME_DELETE.equals(event.getType())) { - SearchCriteria sc = _usageVolumeDao.createSearchCriteria(); - sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); - sc.addAnd("volumeId", SearchCriteria.Op.EQ, volId); - sc.addAnd("deleted", SearchCriteria.Op.NULL); - List volumesVOs = _usageVolumeDao.search(sc, null); - if (volumesVOs.size() > 1) { - logger.warn("More that one usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); - } - for (UsageVolumeVO volumesVO : volumesVOs) { - if (logger.isDebugEnabled()) { - logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); - } - volumesVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one - _usageVolumeDao.update(volumesVO); - } - } else if (EventTypes.EVENT_VOLUME_UPLOAD.equals(event.getType())) { - //For Upload event add an entry to the usage_storage table. - SearchCriteria sc = _usageStorageDao.createSearchCriteria(); - sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); - sc.addAnd("entityId", SearchCriteria.Op.EQ, volId); - sc.addAnd("storageType", SearchCriteria.Op.EQ, StorageTypes.VOLUME); - sc.addAnd("deleted", SearchCriteria.Op.NULL); - List volumesVOs = _usageStorageDao.search(sc, null); - if (volumesVOs.size() > 0) { - //This is a safeguard to avoid double counting of volumes. - logger.error("Found duplicate usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking as deleted..."); + if (event.getVmId() != null) { + volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), event.getVmId(), event.getSize(), event.getCreateDate(), null); + _usageVolumeDao.persist(volumeVO); } - for (UsageStorageVO volumesVO : volumesVOs) { - if (logger.isDebugEnabled()) { - logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); + break; + + case EventTypes.EVENT_VOLUME_RESIZE: + volumesVOs = _usageVolumeDao.listByVolumeId(volId, event.getAccountId()); + for (UsageVolumeVO volumesVO : volumesVOs) { + String delete_msg = String.format("Setting the volume with id: %s to 'deleted' in the usage_volume table for account: %s.", volId, event.getAccountId()); + String create_msg = String.format("Creating a new entry in usage_volume for volume with id: %s after resize for account: %s", volId, event.getAccountId()); + Long vmId = volumesVO.getVmId(); + if (vmId != null) { + delete_msg = String.format("Setting the volume with id: %s for instance id: %s to 'deleted' in the usage_volume table for account: %s.", + volId, vmId, event.getAccountId()); + create_msg = String.format("Creating a new entry in usage_volume for volume with id: %s and instance id: %s after resize for account: %s", + volId, vmId, event.getAccountId()); } + logger.debug(delete_msg); volumesVO.setDeleted(event.getCreateDate()); - _usageStorageDao.update(volumesVO); - } + _usageVolumeDao.update(volumesVO.getId(), volumesVO); - if (logger.isDebugEnabled()) { - logger.debug("create volume with id : " + volId + " for account: " + event.getAccountId()); + logger.debug(create_msg); + volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), vmId, event.getSize(), event.getCreateDate(), null); + _usageVolumeDao.persist(volumeVO); } - Account acct = _accountDao.findByIdIncludingRemoved(event.getAccountId()); - UsageStorageVO volumeVO = new UsageStorageVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), StorageTypes.VOLUME, event.getTemplateId(), event.getSize(), event.getCreateDate(), null); - _usageStorageDao.persist(volumeVO); + break; + + case EventTypes.EVENT_VOLUME_DELETE: + deleteExistingVolumeUsage(volId, event.getAccountId(), event.getCreateDate()); + break; + + case EventTypes.EVENT_VOLUME_ATTACH: + deleteExistingInstanceVolumeUsage(event.getResourceId(), event.getAccountId(), event.getCreateDate()); + + logger.debug("Creating a new entry in usage_volume for volume with id: {}, and instance id: {} for account: {}", + volId, event.getVmId(), event.getAccountId()); + volumeVO = new UsageVolumeVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), event.getOfferingId(), event.getTemplateId(), event.getVmId(), event.getSize(), event.getCreateDate(), null); + _usageVolumeDao.persist(volumeVO); + break; + + case EventTypes.EVENT_VOLUME_DETACH: + deleteExistingInstanceVolumeUsage(event.getResourceId(), event.getAccountId(), event.getCreateDate()); + break; + + case EventTypes.EVENT_VOLUME_UPLOAD: + deleteExistingSecondaryStorageUsageForVolume(volId, event.getAccountId(), event.getCreateDate()); + + logger.debug("Creating a new entry in usage_storage for volume with id : {} for account: {}", volId, event.getAccountId()); + UsageStorageVO storageVO = new UsageStorageVO(volId, event.getZoneId(), event.getAccountId(), acct.getDomainId(), StorageTypes.VOLUME, event.getTemplateId(), event.getSize(), event.getCreateDate(), null); + _usageStorageDao.persist(storageVO); + break; } } diff --git a/usage/src/main/java/com/cloud/usage/parser/VolumeUsageParser.java b/usage/src/main/java/com/cloud/usage/parser/VolumeUsageParser.java index e834b713d420..0210b899e8c9 100644 --- a/usage/src/main/java/com/cloud/usage/parser/VolumeUsageParser.java +++ b/usage/src/main/java/com/cloud/usage/parser/VolumeUsageParser.java @@ -73,12 +73,13 @@ protected boolean parse(AccountVO account, Date startDate, Date endDate) { for (UsageVolumeVO usageVol : usageUsageVols) { long volId = usageVol.getVolumeId(); Long doId = usageVol.getDiskOfferingId(); + Long vmId = usageVol.getVmId(); long zoneId = usageVol.getZoneId(); Long templateId = usageVol.getTemplateId(); long size = usageVol.getSize(); - String key = volId + "-" + doId + "-" + size; + String key = volId + "-" + doId + "-" + vmId + "-" + size; - diskOfferingMap.put(key, new VolInfo(volId, zoneId, doId, templateId, size)); + diskOfferingMap.put(key, new VolInfo(volId, zoneId, doId, templateId, size, vmId)); Date volCreateDate = usageVol.getCreated(); Date volDeleteDate = usageVol.getDeleted(); @@ -110,7 +111,7 @@ protected boolean parse(AccountVO account, Date startDate, Date endDate) { if (useTime > 0L) { VolInfo info = diskOfferingMap.get(volIdKey); createUsageRecord(UsageTypes.VOLUME, useTime, startDate, endDate, account, info.getVolumeId(), info.getZoneId(), info.getDiskOfferingId(), - info.getTemplateId(), info.getSize()); + info.getTemplateId(), info.getVmId(), info.getSize()); } } @@ -130,7 +131,7 @@ private void updateVolUsageData(Map> usageDataMap, Stri } private void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long volId, long zoneId, Long doId, - Long templateId, long size) { + Long templateId, Long vmId, long size) { // Our smallest increment is hourly for now logger.debug("Total running time {} ms", runningTime); @@ -152,7 +153,11 @@ private void createUsageRecord(int type, long runningTime, Date startDate, Date usageDesc += " (DiskOffering: " + doId + ")"; } - UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, new Double(usage), null, null, doId, templateId, volId, + if (vmId != null) { + usageDesc += " (VM: " + vmId + ")"; + } + + UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, new Double(usage), vmId, null, doId, templateId, volId, size, startDate, endDate); usageDao.persist(usageRecord); } @@ -163,13 +168,15 @@ private static class VolInfo { private Long diskOfferingId; private Long templateId; private long size; + private Long vmId; - public VolInfo(long volId, long zoneId, Long diskOfferingId, Long templateId, long size) { + public VolInfo(long volId, long zoneId, Long diskOfferingId, Long templateId, long size, Long vmId) { this.volId = volId; this.zoneId = zoneId; this.diskOfferingId = diskOfferingId; this.templateId = templateId; this.size = size; + this.vmId = vmId; } public long getZoneId() { @@ -191,5 +198,9 @@ public Long getTemplateId() { public long getSize() { return size; } + + public Long getVmId() { + return vmId; + } } } From 6fb5bdfbec50a06c54e028576b93c3c71fca7188 Mon Sep 17 00:00:00 2001 From: dahn Date: Wed, 12 Nov 2025 16:09:28 +0100 Subject: [PATCH 040/271] add isPerson check to query for AD (#11843) --- .../ldap/ADLdapUserManagerImpl.java | 18 ++- .../ldap/OpenLdapUserManagerImpl.java | 139 ++++++++---------- .../ldap/ADLdapUserManagerImplTest.java | 5 +- 3 files changed, 78 insertions(+), 84 deletions(-) diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/ADLdapUserManagerImpl.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/ADLdapUserManagerImpl.java index e96606dca2f9..bf5d503e8416 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/ADLdapUserManagerImpl.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/ADLdapUserManagerImpl.java @@ -49,7 +49,7 @@ public List getUsersInGroup(String groupName, LdapContext context, Lon searchControls.setReturningAttributes(_ldapConfiguration.getReturnAttributes(domainId)); NamingEnumeration results = context.search(basedn, generateADGroupSearchFilter(groupName, domainId), searchControls); - final List users = new ArrayList(); + final List users = new ArrayList<>(); while (results.hasMoreElements()) { final SearchResult result = results.nextElement(); users.add(createUser(result, domainId)); @@ -58,10 +58,8 @@ public List getUsersInGroup(String groupName, LdapContext context, Lon } String generateADGroupSearchFilter(String groupName, Long domainId) { - final StringBuilder userObjectFilter = new StringBuilder(); - userObjectFilter.append("(objectClass="); - userObjectFilter.append(_ldapConfiguration.getUserObject(domainId)); - userObjectFilter.append(")"); + + final StringBuilder userObjectFilter = getUserObjectFilter(domainId); final StringBuilder memberOfFilter = new StringBuilder(); String groupCnName = _ldapConfiguration.getCommonNameAttribute() + "=" +groupName + "," + _ldapConfiguration.getBaseDn(domainId); @@ -75,10 +73,18 @@ String generateADGroupSearchFilter(String groupName, Long domainId) { result.append(memberOfFilter); result.append(")"); - logger.debug("group search filter = " + result); + logger.debug("group search filter = {}", result); return result.toString(); } + StringBuilder getUserObjectFilter(Long domainId) { + final StringBuilder userObjectFilter = new StringBuilder(); + userObjectFilter.append("(&(objectCategory=person)"); + userObjectFilter.append(super.getUserObjectFilter(domainId)); + userObjectFilter.append(")"); + return userObjectFilter; + } + protected boolean isUserDisabled(SearchResult result) throws NamingException { boolean isDisabledUser = false; String userAccountControl = LdapUtils.getAttributeValue(result.getAttributes(), _ldapConfiguration.getUserAccountControlAttribute()); diff --git a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/OpenLdapUserManagerImpl.java b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/OpenLdapUserManagerImpl.java index d0b6bc4bd34d..80d394d7478c 100644 --- a/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/OpenLdapUserManagerImpl.java +++ b/plugins/user-authenticators/ldap/src/main/java/org/apache/cloudstack/ldap/OpenLdapUserManagerImpl.java @@ -75,23 +75,15 @@ protected LdapUser createUser(final SearchResult result, Long domainId) throws N } private String generateSearchFilter(final String username, Long domainId) { - final StringBuilder userObjectFilter = new StringBuilder(); - userObjectFilter.append("(objectClass="); - userObjectFilter.append(_ldapConfiguration.getUserObject(domainId)); - userObjectFilter.append(")"); + final StringBuilder userObjectFilter = getUserObjectFilter(domainId); - final StringBuilder usernameFilter = new StringBuilder(); - usernameFilter.append("("); - usernameFilter.append(_ldapConfiguration.getUsernameAttribute(domainId)); - usernameFilter.append("="); - usernameFilter.append((username == null ? "*" : LdapUtils.escapeLDAPSearchFilter(username))); - usernameFilter.append(")"); + final StringBuilder usernameFilter = getUsernameFilter(username, domainId); String memberOfAttribute = getMemberOfAttribute(domainId); StringBuilder ldapGroupsFilter = new StringBuilder(); // this should get the trustmaps for this domain List ldapGroups = getMappedLdapGroups(domainId); - if (null != ldapGroups && ldapGroups.size() > 0) { + if (!ldapGroups.isEmpty()) { ldapGroupsFilter.append("(|"); for (String ldapGroup : ldapGroups) { ldapGroupsFilter.append(getMemberOfGroupString(ldapGroup, memberOfAttribute)); @@ -104,21 +96,35 @@ private String generateSearchFilter(final String username, Long domainId) { if (null != pricipleGroup) { principleGroupFilter.append(getMemberOfGroupString(pricipleGroup, memberOfAttribute)); } - final StringBuilder result = new StringBuilder(); - result.append("(&"); - result.append(userObjectFilter); - result.append(usernameFilter); - result.append(ldapGroupsFilter); - result.append(principleGroupFilter); - result.append(")"); - - String returnString = result.toString(); - if (logger.isTraceEnabled()) { - logger.trace("constructed ldap query: " + returnString); - } + + String returnString = "(&" + + userObjectFilter + + usernameFilter + + ldapGroupsFilter + + principleGroupFilter + + ")"; + logger.trace("constructed ldap query: {}", returnString); return returnString; } + private StringBuilder getUsernameFilter(String username, Long domainId) { + final StringBuilder usernameFilter = new StringBuilder(); + usernameFilter.append("("); + usernameFilter.append(_ldapConfiguration.getUsernameAttribute(domainId)); + usernameFilter.append("="); + usernameFilter.append((username == null ? "*" : LdapUtils.escapeLDAPSearchFilter(username))); + usernameFilter.append(")"); + return usernameFilter; + } + + StringBuilder getUserObjectFilter(Long domainId) { + final StringBuilder userObjectFilter = new StringBuilder(); + userObjectFilter.append("(objectClass="); + userObjectFilter.append(_ldapConfiguration.getUserObject(domainId)); + userObjectFilter.append(")"); + return userObjectFilter; + } + private List getMappedLdapGroups(Long domainId) { List ldapGroups = new ArrayList<>(); // first get the trustmaps @@ -134,37 +140,31 @@ private List getMappedLdapGroups(Long domainId) { private String getMemberOfGroupString(String group, String memberOfAttribute) { final StringBuilder memberOfFilter = new StringBuilder(); if (null != group) { - if(logger.isDebugEnabled()) { - logger.debug("adding search filter for '" + group + - "', using '" + memberOfAttribute + "'"); - } - memberOfFilter.append("(" + memberOfAttribute + "="); - memberOfFilter.append(group); - memberOfFilter.append(")"); + logger.debug("adding search filter for '{}', using '{}'", group, memberOfAttribute); + memberOfFilter.append("(") + .append(memberOfAttribute) + .append("=") + .append(group) + .append(")"); } return memberOfFilter.toString(); } private String generateGroupSearchFilter(final String groupName, Long domainId) { - final StringBuilder groupObjectFilter = new StringBuilder(); - groupObjectFilter.append("(objectClass="); - groupObjectFilter.append(_ldapConfiguration.getGroupObject(domainId)); - groupObjectFilter.append(")"); - - final StringBuilder groupNameFilter = new StringBuilder(); - groupNameFilter.append("("); - groupNameFilter.append(_ldapConfiguration.getCommonNameAttribute()); - groupNameFilter.append("="); - groupNameFilter.append((groupName == null ? "*" : LdapUtils.escapeLDAPSearchFilter(groupName))); - groupNameFilter.append(")"); - - final StringBuilder result = new StringBuilder(); - result.append("(&"); - result.append(groupObjectFilter); - result.append(groupNameFilter); - result.append(")"); - - return result.toString(); + String groupObjectFilter = "(objectClass=" + + _ldapConfiguration.getGroupObject(domainId) + + ")"; + + String groupNameFilter = "(" + + _ldapConfiguration.getCommonNameAttribute() + + "=" + + (groupName == null ? "*" : LdapUtils.escapeLDAPSearchFilter(groupName)) + + ")"; + + return "(&" + + groupObjectFilter + + groupNameFilter + + ")"; } @Override @@ -186,17 +186,9 @@ public LdapUser getUser(final String username, final String type, final String n basedn = _ldapConfiguration.getBaseDn(domainId); } - final StringBuilder userObjectFilter = new StringBuilder(); - userObjectFilter.append("(objectClass="); - userObjectFilter.append(_ldapConfiguration.getUserObject(domainId)); - userObjectFilter.append(")"); + final StringBuilder userObjectFilter = getUserObjectFilter(domainId); - final StringBuilder usernameFilter = new StringBuilder(); - usernameFilter.append("("); - usernameFilter.append(_ldapConfiguration.getUsernameAttribute(domainId)); - usernameFilter.append("="); - usernameFilter.append((username == null ? "*" : LdapUtils.escapeLDAPSearchFilter(username))); - usernameFilter.append(")"); + final StringBuilder usernameFilter = getUsernameFilter(username, domainId); final StringBuilder memberOfFilter = new StringBuilder(); if ("GROUP".equals(type)) { @@ -205,18 +197,17 @@ public LdapUser getUser(final String username, final String type, final String n memberOfFilter.append(")"); } - final StringBuilder searchQuery = new StringBuilder(); - searchQuery.append("(&"); - searchQuery.append(userObjectFilter); - searchQuery.append(usernameFilter); - searchQuery.append(memberOfFilter); - searchQuery.append(")"); + String searchQuery = "(&" + + userObjectFilter + + usernameFilter + + memberOfFilter + + ")"; - return searchUser(basedn, searchQuery.toString(), context, domainId); + return searchUser(basedn, searchQuery, context, domainId); } protected String getMemberOfAttribute(final Long domainId) { - return _ldapConfiguration.getUserMemberOfAttribute(domainId); + return LdapConfiguration.getUserMemberOfAttribute(domainId); } @Override @@ -243,7 +234,7 @@ public List getUsersInGroup(String groupName, LdapContext context, Lon NamingEnumeration result = context.search(_ldapConfiguration.getBaseDn(domainId), generateGroupSearchFilter(groupName, domainId), controls); - final List users = new ArrayList(); + final List users = new ArrayList<>(); //Expecting only one result which has all the users if (result.hasMoreElements()) { Attribute attribute = result.nextElement().getAttributes().get(attributeName); @@ -254,7 +245,7 @@ public List getUsersInGroup(String groupName, LdapContext context, Lon try{ users.add(getUserForDn(userdn, context, domainId)); } catch (NamingException e){ - logger.info("Userdn: " + userdn + " Not Found:: Exception message: " + e.getMessage()); + logger.info("Userdn: {} Not Found:: Exception message: {}", userdn, e.getMessage()); } } } @@ -286,17 +277,15 @@ protected boolean isUserDisabled(SearchResult result) throws NamingException { return false; } - public LdapUser searchUser(final String basedn, final String searchString, final LdapContext context, Long domainId) throws NamingException, IOException { + public LdapUser searchUser(final String basedn, final String searchString, final LdapContext context, Long domainId) throws NamingException { final SearchControls searchControls = new SearchControls(); searchControls.setSearchScope(_ldapConfiguration.getScope()); searchControls.setReturningAttributes(_ldapConfiguration.getReturnAttributes(domainId)); NamingEnumeration results = context.search(basedn, searchString, searchControls); - if(logger.isDebugEnabled()) { - logger.debug("searching user(s) with filter: \"" + searchString + "\""); - } - final List users = new ArrayList(); + logger.debug("searching user(s) with filter: \"{}\"", searchString); + final List users = new ArrayList<>(); while (results.hasMoreElements()) { final SearchResult result = results.nextElement(); users.add(createUser(result, domainId)); @@ -324,7 +313,7 @@ public List searchUsers(final String username, final LdapContext conte byte[] cookie = null; int pageSize = _ldapConfiguration.getLdapPageSize(domainId); context.setRequestControls(new Control[]{new PagedResultsControl(pageSize, Control.NONCRITICAL)}); - final List users = new ArrayList(); + final List users = new ArrayList<>(); NamingEnumeration results; do { results = context.search(basedn, generateSearchFilter(username, domainId), searchControls); diff --git a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/ADLdapUserManagerImplTest.java b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/ADLdapUserManagerImplTest.java index 58b14ec3684e..f2ac1dffaf95 100644 --- a/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/ADLdapUserManagerImplTest.java +++ b/plugins/user-authenticators/ldap/src/test/java/org/apache/cloudstack/ldap/ADLdapUserManagerImplTest.java @@ -54,9 +54,8 @@ public void testGenerateADSearchFilterWithNestedGroupsEnabled() { String [] groups = {"dev", "dev-hyd"}; for (String group: groups) { String result = adLdapUserManager.generateADGroupSearchFilter(group, 1L); - assertTrue(("(&(objectClass=user)(memberOf:1.2.840.113556.1.4.1941:=CN=" + group + ",DC=cloud,DC=citrix,DC=com))").equals(result)); + assertTrue(("(&(&(objectCategory=person)(objectClass=user))(memberOf:1.2.840.113556.1.4.1941:=CN=" + group + ",DC=cloud,DC=citrix,DC=com))").equals(result)); } - } @Test @@ -69,7 +68,7 @@ public void testGenerateADSearchFilterWithNestedGroupsDisabled() { String [] groups = {"dev", "dev-hyd"}; for (String group: groups) { String result = adLdapUserManager.generateADGroupSearchFilter(group, 1L); - assertTrue(("(&(objectClass=user)(memberOf=CN=" + group + ",DC=cloud,DC=citrix,DC=com))").equals(result)); + assertTrue(("(&(&(objectCategory=person)(objectClass=user))(memberOf=CN=" + group + ",DC=cloud,DC=citrix,DC=com))").equals(result)); } } From bdd32a29f5b2a9cdd0d68cb753c706d648c4da1d Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 14 Nov 2025 15:13:42 +0530 Subject: [PATCH 041/271] ui: fix zone options for image instance deploy button (#12060) Signed-off-by: Abhishek Kumar --- .../view/ImageDeployInstanceButton.vue | 35 +++++++++++-------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/ui/src/components/view/ImageDeployInstanceButton.vue b/ui/src/components/view/ImageDeployInstanceButton.vue index b2d4b55bc6a3..2cdd5a0af460 100644 --- a/ui/src/components/view/ImageDeployInstanceButton.vue +++ b/ui/src/components/view/ImageDeployInstanceButton.vue @@ -71,9 +71,14 @@ export default { if (this.$route.meta.name === 'iso') { this.imageApi = 'listIsos' } - setTimeout(() => { - this.fetchData() - }, 100) + this.fetchData() + }, + watch: { + resource (newValue) { + if (newValue?.id) { + this.fetchData() + } + } }, computed: { allowed () { @@ -82,23 +87,22 @@ export default { } }, methods: { - arrayHasItems (array) { - return array !== null && array !== undefined && Array.isArray(array) && array.length > 0 - }, fetchData () { this.fetchResourceData() }, fetchResourceData () { - const params = {} - params.id = this.resource.id - params.templatefilter = 'executable' - params.listall = true - params.page = this.page - params.pagesize = this.pageSize + if (!this.resource || !this.resource.id) { + return + } + const params = { + id: this.resource.id, + templatefilter: 'executable', + listall: true + } this.dataSource = [] this.itemCount = 0 - this.fetchLoading = true + this.loading = true this.zones = [] getAPI(this.imageApi, params).then(json => { const imageResponse = json?.[this.imageApi.toLowerCase() + 'response']?.[this.$route.meta.name] || [] @@ -108,8 +112,8 @@ export default { })) }).catch(error => { this.$notifyError(error) - this.loading = false }).finally(() => { + this.loading = false if (this.zones.length !== 0) { this.$emit('update-zones', this.zones) } @@ -122,7 +126,8 @@ export default { } const zoneids = this.zones.map(z => z.id) this.loading = true - getAPI('listZones', { showicon: true, ids: zoneids.join(',') }).then(json => { + const params = { showicon: true, ids: zoneids.join(',') } + getAPI('listZones', params).then(json => { this.zones = json.listzonesresponse.zone || [] }).finally(() => { this.loading = false From 1fd6d84cf747dc9d8e5f0e60a4c6b5b96f7b2e71 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Mon, 24 Nov 2025 11:10:43 +0100 Subject: [PATCH 042/271] UI: fix list of zones if zone has icon (#12083) --- ui/src/views/compute/wizard/ZoneBlockRadioGroupSelect.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/src/views/compute/wizard/ZoneBlockRadioGroupSelect.vue b/ui/src/views/compute/wizard/ZoneBlockRadioGroupSelect.vue index 2250fc7977d1..9c889d3aa5aa 100644 --- a/ui/src/views/compute/wizard/ZoneBlockRadioGroupSelect.vue +++ b/ui/src/views/compute/wizard/ZoneBlockRadioGroupSelect.vue @@ -29,7 +29,7 @@ + + + + + + + + + + + {{ opt.path || opt.name || opt.description }} + + + +
{{ this.$t('label.cancel') }} {{ this.$t('label.ok') }} @@ -96,6 +123,7 @@ diff --git a/ui/src/config/router.js b/ui/src/config/router.js index 08df799dd898..3e5d8677b347 100644 --- a/ui/src/config/router.js +++ b/ui/src/config/router.js @@ -81,6 +81,7 @@ function generateRouterMap (section) { filters: child.filters, params: child.params ? child.params : {}, columns: child.columns, + advisories: !vueProps.$config.advisoriesDisabled ? child.advisories : undefined, details: child.details, searchFilters: child.searchFilters, related: child.related, @@ -180,6 +181,10 @@ function generateRouterMap (section) { map.meta.columns = section.columns } + if (!vueProps.$config.advisoriesDisabled && section.advisories) { + map.meta.advisories = section.advisories + } + if (section.actions) { map.meta.actions = section.actions } diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index a03693e351d8..32e888bb53dc 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -18,6 +18,8 @@ import { shallowRef, defineAsyncComponent } from 'vue' import store from '@/store' import { isZoneCreated } from '@/utils/zone' +import { getAPI, postAPI, getBaseUrl } from '@/api' +import { getLatestKubernetesIsoParams } from '@/utils/acsrepo' import kubernetesIcon from '@/assets/icons/kubernetes.svg?inline' export default { @@ -582,6 +584,182 @@ export default { } ], resourceType: 'KubernetesCluster', + advisories: [ + { + id: 'cks-min-offering', + severity: 'warning', + message: 'message.advisory.cks.min.offering', + docsHelp: 'plugins/cloudstack-kubernetes-service.html', + dismissOnConditionFail: true, + condition: async (store) => { + if (!('listServiceOfferings' in store.getters.apis)) { + return false + } + const params = { + cpunumber: 2, + memory: 2048, + issystem: false + } + try { + const json = await getAPI('listServiceOfferings', params) + const offerings = json?.listserviceofferingsresponse?.serviceoffering || [] + return !offerings.some(o => !o.iscustomized) + } catch (error) {} + return false + }, + actions: [ + { + primary: true, + label: 'label.add.minimum.required.compute.offering', + loadingLabel: 'message.adding.minimum.required.compute.offering.kubernetes.cluster', + show: (store) => { return ('createServiceOffering' in store.getters.apis) }, + run: async () => { + const params = { + name: 'CKS Instance', + cpunumber: 2, + cpuspeed: 1000, + memory: 2048, + iscustomized: false, + issystem: false + } + try { + const json = await postAPI('createServiceOffering', params) + if (json?.createserviceofferingresponse?.serviceoffering) { + return true + } + } catch (error) {} + return false + }, + successMessage: 'message.added.minimum.required.compute.offering.kubernetes.cluster', + errorMessage: 'message.add.minimum.required.compute.offering.kubernetes.cluster.failed' + }, + { + label: 'label.go.to.compute.offerings', + show: (store) => { return ('listServiceOfferings' in store.getters.apis) }, + run: (store, router) => { + router.push({ name: 'computeoffering' }) + return false + } + } + ] + }, + { + id: 'cks-version-check', + severity: 'warning', + message: 'message.advisory.cks.version.check', + docsHelp: 'plugins/cloudstack-kubernetes-service.html', + dismissOnConditionFail: true, + condition: async (store) => { + const api = 'listKubernetesSupportedVersions' + if (!(api in store.getters.apis)) { + return false + } + try { + const json = await getAPI(api, {}) + const versions = json?.listkubernetessupportedversionsresponse?.kubernetessupportedversion || [] + return versions.length === 0 + } catch (error) {} + return false + }, + actions: [ + { + primary: true, + label: 'label.add.latest.kubernetes.iso', + loadingLabel: 'message.adding.latest.kubernetes.iso', + show: (store) => { return ('addKubernetesSupportedVersion' in store.getters.apis) }, + run: async () => { + let arch = 'x86_64' + if ('listClusters' in store.getters.apis) { + try { + const json = await getAPI('listClusters', { allocationstate: 'Enabled', page: 1, pagesize: 1 }) + const cluster = json?.listclustersresponse?.cluster?.[0] || {} + arch = cluster.architecture || 'x86_64' + } catch (error) {} + } + const params = await getLatestKubernetesIsoParams(arch) + try { + const json = await postAPI('addKubernetesSupportedVersion', params) + if (json?.addkubernetessupportedversionresponse?.kubernetessupportedversion) { + return true + } + } catch (error) {} + return false + }, + successMessage: 'message.added.latest.kubernetes.iso', + errorMessage: 'message.add.latest.kubernetes.iso.failed' + }, + { + label: 'label.go.to.kubernetes.isos', + show: true, + run: (store, router) => { + router.push({ name: 'kubernetesiso' }) + return false + } + } + ] + }, + { + id: 'cks-endpoint-url', + severity: 'warning', + message: 'message.advisory.cks.endpoint.url.not.configured', + docsHelp: 'plugins/cloudstack-kubernetes-service.html', + dismissOnConditionFail: true, + condition: async (store) => { + if (!['Admin'].includes(store.getters.userInfo.roletype)) { + return false + } + let url = '' + const baseUrl = getBaseUrl() + if (baseUrl.startsWith('/')) { + url = window.location.origin + baseUrl + } + if (!url || url.startsWith('http://localhost')) { + return false + } + const params = { + name: 'endpoint.url' + } + const json = await getAPI('listConfigurations', params) + const configuration = json?.listconfigurationsresponse?.configuration?.[0] || {} + return !configuration.value || configuration.value.startsWith('http://localhost') + }, + actions: [ + { + primary: true, + label: 'label.fix.global.setting', + show: (store) => { return ('updateConfiguration' in store.getters.apis) }, + run: async () => { + let url = '' + const baseUrl = getBaseUrl() + if (baseUrl.startsWith('/')) { + url = window.location.origin + baseUrl + } + const params = { + name: 'endpoint.url', + value: url + } + try { + const json = await postAPI('updateConfiguration', params) + if (json?.updateconfigurationresponse?.configuration) { + return true + } + } catch (error) {} + return false + }, + successMessage: 'message.global.setting.updated', + errorMessage: 'message.global.setting.update.failed' + }, + { + label: 'label.go.to.global.settings', + show: (store) => { return ('listConfigurations' in store.getters.apis) }, + run: (store, router) => { + router.push({ name: 'globalsetting' }) + return false + } + } + ] + } + ], actions: [ { api: 'createKubernetesCluster', diff --git a/ui/src/utils/acsrepo/index.js b/ui/src/utils/acsrepo/index.js new file mode 100644 index 000000000000..809bd7f17483 --- /dev/null +++ b/ui/src/utils/acsrepo/index.js @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +const BASE_KUBERNETES_ISO_URL = 'https://download.cloudstack.org/cks/' + +function getDefaultLatestKubernetesIsoParams (arch) { + return { + name: 'v1.33.1-calico-' + arch, + semanticversion: '1.33.1', + url: BASE_KUBERNETES_ISO_URL + 'setup-v1.33.1-calico-' + arch + '.iso', + arch: arch, + mincpunumber: 2, + minmemory: 2048 + } +} + +/** + * Returns the latest Kubernetes ISO info for the given architecture. + * Falls back to a hardcoded default if fetching fails. + * @param {string} arch + * @returns {Promise<{name: string, semanticversion: string, url: string, arch: string}>} + */ +export async function getLatestKubernetesIsoParams (arch) { + arch = arch || 'x86_64' + try { + const html = await fetch(BASE_KUBERNETES_ISO_URL, { cache: 'no-store' }).then(r => r.text()) + + const hrefs = [...html.matchAll(/href="([^"]+\.iso)"/gi)].map(m => m[1]) + + // Prefer files that explicitly include the arch (e.g. ...-x86_64.iso) + let isoHrefs = hrefs.filter(h => new RegExp(`${arch}\\.iso$`, 'i').test(h)) + + // Fallback: older files without arch suffix (e.g. setup-1.28.4.iso) + if (isoHrefs.length === 0) { + isoHrefs = hrefs.filter(h => /setup-\d+\.\d+\.\d+\.iso$/i.test(h)) + } + + const entries = isoHrefs.map(h => { + const m = h.match(/setup-(?:v)?(\d+\.\d+\.\d+)(?:-calico)?(?:-(x86_64|arm64))?/i) + return m + ? { + name: h.replace('.iso', ''), + semanticversion: m[1], + url: new URL(h, BASE_KUBERNETES_ISO_URL).toString(), + arch: m[2] || arch, + mincpunumber: 2, + minmemory: 2048 + } + : null + }).filter(Boolean) + + if (entries.length === 0) throw new Error('No matching ISOs found') + + entries.sort((a, b) => { + const pa = a.semanticversion.split('.').map(Number) + const pb = b.semanticversion.split('.').map(Number) + for (let i = 0; i < 3; i++) { + if ((pb[i] ?? 0) !== (pa[i] ?? 0)) return (pb[i] ?? 0) - (pa[i] ?? 0) + } + return 0 + }) + + return entries[0] + } catch { + return { ...getDefaultLatestKubernetesIsoParams(arch) } + } +} diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index e0583cd97a4e..cfbaf580507d 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -540,6 +540,9 @@ class="row-element" v-else > + Date: Thu, 29 Jan 2026 13:52:07 +0530 Subject: [PATCH 218/271] api,server: apis return their http request type (#11382) * api,server: apis return their http request type Signed-off-by: Abhishek Kumar * fix and unit test Signed-off-by: Abhishek Kumar * more test Signed-off-by: Abhishek Kumar * address copilot Signed-off-by: Abhishek Kumar * Update plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java --------- Signed-off-by: Abhishek Kumar Co-authored-by: dahn Co-authored-by: Harikrishna --- .../org/apache/cloudstack/api/APICommand.java | 2 + .../apache/cloudstack/api/ApiConstants.java | 1 + ...ntAllowedToCreateOfferingsWithTagsCmd.java | 3 +- .../api/response/ApiDiscoveryResponse.java | 13 ++ .../discovery/ApiDiscoveryServiceImpl.java | 9 +- .../ApiDiscoveryServiceImplTest.java | 123 +++++++++++++++ .../api/command/QuotaBalanceCmd.java | 3 +- .../api/command/QuotaEnabledCmd.java | 3 +- .../api/command/QuotaStatementCmd.java | 3 +- .../api/command/QuotaSummaryCmd.java | 3 +- .../api/command/QuotaTariffListCmd.java | 3 +- .../cloudian/api/CloudianIsEnabledCmd.java | 3 +- .../api/command/ReadyForShutdownCmd.java | 3 +- .../command/VerifyOAuthCodeAndGetUserCmd.java | 14 +- .../main/java/com/cloud/api/ApiServlet.java | 46 ++++-- .../java/com/cloud/api/ApiServletTest.java | 143 ++++++++++++++---- 16 files changed, 319 insertions(+), 56 deletions(-) create mode 100644 plugins/api/discovery/src/test/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImplTest.java diff --git a/api/src/main/java/org/apache/cloudstack/api/APICommand.java b/api/src/main/java/org/apache/cloudstack/api/APICommand.java index c559be081165..b77649046ca9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/APICommand.java +++ b/api/src/main/java/org/apache/cloudstack/api/APICommand.java @@ -50,4 +50,6 @@ RoleType[] authorized() default {}; Class[] entityType() default {}; + + String httpMethod() default ""; } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 2e0e843b5354..d00e339de2fd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -282,6 +282,7 @@ public class ApiConstants { public static final String HOST = "host"; public static final String HOST_CONTROL_STATE = "hostcontrolstate"; public static final String HOSTS_MAP = "hostsmap"; + public static final String HTTP_REQUEST_TYPE = "httprequesttype"; public static final String HYPERVISOR = "hypervisor"; public static final String INLINE = "inline"; public static final String INSTANCE = "instance"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java index fcd6b03d3e59..4b1cd2ff7255 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/IsAccountAllowedToCreateOfferingsWithTagsCmd.java @@ -26,7 +26,8 @@ import org.apache.cloudstack.api.response.IsAccountAllowedToCreateOfferingsWithTagsResponse; @APICommand(name = "isAccountAllowedToCreateOfferingsWithTags", description = "Return true if the specified account is allowed to create offerings with tags.", - responseObject = IsAccountAllowedToCreateOfferingsWithTagsResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) + responseObject = IsAccountAllowedToCreateOfferingsWithTagsResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class IsAccountAllowedToCreateOfferingsWithTagsCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = AccountResponse.class, description = "Account UUID", required = true) diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java index 4b243f2e8a1a..90b3b89d3fb5 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/api/response/ApiDiscoveryResponse.java @@ -59,6 +59,10 @@ public class ApiDiscoveryResponse extends BaseResponse { @Param(description = "Response field type") private String type; + @SerializedName(ApiConstants.HTTP_REQUEST_TYPE) + @Param(description = "Preferred HTTP request type for the API", since = "4.23.0") + private String httpRequestType; + public ApiDiscoveryResponse() { params = new HashSet(); apiResponse = new HashSet(); @@ -74,6 +78,7 @@ public ApiDiscoveryResponse(ApiDiscoveryResponse another) { this.params = new HashSet<>(another.getParams()); this.apiResponse = new HashSet<>(another.getApiResponse()); this.type = another.getType(); + this.httpRequestType = another.getHttpRequestType(); this.setObjectName(another.getObjectName()); } @@ -140,4 +145,12 @@ public Set getApiResponse() { public String getType() { return type; } + + public String getHttpRequestType() { + return httpRequestType; + } + + public void setHttpRequestType(String httpRequestType) { + this.httpRequestType = httpRequestType; + } } diff --git a/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java b/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java index 452b95cf2c05..d6d235162efb 100644 --- a/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java +++ b/plugins/api/discovery/src/main/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java @@ -50,6 +50,7 @@ import org.reflections.ReflectionUtils; import org.springframework.stereotype.Component; +import com.cloud.api.ApiServlet; import com.cloud.exception.PermissionDeniedException; import com.cloud.serializer.Param; import com.cloud.user.Account; @@ -189,7 +190,7 @@ private ApiResponseResponse getFieldResponseMap(Field responseField) { return responseResponse; } - private ApiDiscoveryResponse getCmdRequestMap(Class cmdClass, APICommand apiCmdAnnotation) { + protected ApiDiscoveryResponse getCmdRequestMap(Class cmdClass, APICommand apiCmdAnnotation) { String apiName = apiCmdAnnotation.name(); ApiDiscoveryResponse response = new ApiDiscoveryResponse(); response.setName(apiName); @@ -197,6 +198,12 @@ private ApiDiscoveryResponse getCmdRequestMap(Class cmdClass, APICommand apiC if (!apiCmdAnnotation.since().isEmpty()) { response.setSince(apiCmdAnnotation.since()); } + String httpRequestType = apiCmdAnnotation.httpMethod(); + if (StringUtils.isBlank(httpRequestType)) { + httpRequestType = ApiServlet.GET_REQUEST_COMMANDS.matcher(apiName.toLowerCase()).matches() ? + "GET" : "POST"; + } + response.setHttpRequestType(httpRequestType); Set fields = ReflectUtil.getAllFieldsForClass(cmdClass, new Class[] {BaseCmd.class, BaseAsyncCmd.class, BaseAsyncCreateCmd.class}); diff --git a/plugins/api/discovery/src/test/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImplTest.java b/plugins/api/discovery/src/test/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImplTest.java new file mode 100644 index 000000000000..e69b9523d449 --- /dev/null +++ b/plugins/api/discovery/src/test/java/org/apache/cloudstack/discovery/ApiDiscoveryServiceImplTest.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.discovery; + +import static org.mockito.ArgumentMatchers.any; + +import java.lang.reflect.Field; +import java.util.Set; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.command.admin.account.CreateAccountCmd; +import org.apache.cloudstack.api.command.admin.user.GetUserCmd; +import org.apache.cloudstack.api.command.user.discovery.ListApisCmd; +import org.apache.cloudstack.api.response.ApiDiscoveryResponse; +import org.apache.cloudstack.api.response.ApiParameterResponse; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; +import org.springframework.test.util.ReflectionTestUtils; + +import com.cloud.utils.ReflectUtil; + +@RunWith(MockitoJUnitRunner.class) +public class ApiDiscoveryServiceImplTest { + + @Mock + APICommand apiCommandMock; + + @Spy + @InjectMocks + ApiDiscoveryServiceImpl discoveryServiceSpy; + + @Before + public void setUp() { + Mockito.when(apiCommandMock.name()).thenReturn("listApis"); + Mockito.when(apiCommandMock.since()).thenReturn(""); + } + + @Test + public void getCmdRequestMapReturnsResponseWithCorrectApiNameAndDescription() { + Mockito.when(apiCommandMock.description()).thenReturn("Lists all APIs"); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(ListApisCmd.class, apiCommandMock); + Assert.assertEquals("listApis", response.getName()); + Assert.assertEquals("Lists all APIs", response.getDescription()); + } + + @Test + public void getCmdRequestMapSetsHttpRequestTypeToGetWhenApiNameMatchesGetPattern() { + Mockito.when(apiCommandMock.name()).thenReturn("getUser"); + Mockito.when(apiCommandMock.httpMethod()).thenReturn(""); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(GetUserCmd.class, apiCommandMock); + Assert.assertEquals("GET", response.getHttpRequestType()); + } + + @Test + public void getCmdRequestMapSetsHttpRequestTypeToPostWhenApiNameDoesNotMatchGetPattern() { + Mockito.when(apiCommandMock.name()).thenReturn("createAccount"); + Mockito.when(apiCommandMock.httpMethod()).thenReturn(""); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(CreateAccountCmd.class, apiCommandMock); + Assert.assertEquals("POST", response.getHttpRequestType()); + } + + @Test + public void getCmdRequestMapSetsAsyncToTrueForAsyncCommand() { + Mockito.when(apiCommandMock.name()).thenReturn("asyncApi"); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(BaseAsyncCmd.class, apiCommandMock); + Assert.assertTrue(response.getAsync()); + } + + @Test + public void getCmdRequestMapDoesNotAddParamsWithoutParameterAnnotation() { + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(BaseCmd.class, apiCommandMock); + Assert.assertFalse(response.getParams().isEmpty()); + Assert.assertEquals(1, response.getParams().size()); + } + + @Test + public void getCmdRequestMapAddsParamsWithExposedAndIncludedInApiDocAnnotations() { + Field fieldMock = Mockito.mock(Field.class); + Parameter parameterMock = Mockito.mock(Parameter.class); + Mockito.when(parameterMock.expose()).thenReturn(true); + Mockito.when(parameterMock.includeInApiDoc()).thenReturn(true); + Mockito.when(parameterMock.name()).thenReturn("paramName"); + Mockito.when(parameterMock.since()).thenReturn(""); + Mockito.when(parameterMock.entityType()).thenReturn(new Class[]{Object.class}); + Mockito.when(parameterMock.description()).thenReturn("paramDescription"); + Mockito.when(parameterMock.type()).thenReturn(BaseCmd.CommandType.STRING); + Mockito.when(fieldMock.getAnnotation(Parameter.class)).thenReturn(parameterMock); + try (MockedStatic reflectUtilMockedStatic = Mockito.mockStatic(ReflectUtil.class)) { + reflectUtilMockedStatic.when(() -> ReflectUtil.getAllFieldsForClass(any(Class.class), any(Class[].class))) + .thenReturn(Set.of(fieldMock)); + ApiDiscoveryResponse response = discoveryServiceSpy.getCmdRequestMap(ListApisCmd.class, apiCommandMock); + Set params = response.getParams(); + Assert.assertEquals(1, params.size()); + ApiParameterResponse paramResponse = params.iterator().next(); + Assert.assertEquals("paramName", ReflectionTestUtils.getField(paramResponse, "name")); + } + } +} diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java index cf39f802d34f..0cec0df66182 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaBalanceCmd.java @@ -35,7 +35,8 @@ import org.apache.cloudstack.quota.vo.QuotaBalanceVO; import org.apache.cloudstack.api.response.QuotaStatementItemResponse; -@APICommand(name = "quotaBalance", responseObject = QuotaStatementItemResponse.class, description = "Create a quota balance statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaBalance", responseObject = QuotaStatementItemResponse.class, description = "Create a quota balance statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaBalanceCmd extends BaseCmd { diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java index 4035a5205e6c..af1d146ea9dc 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaEnabledCmd.java @@ -26,7 +26,8 @@ import javax.inject.Inject; -@APICommand(name = "quotaIsEnabled", responseObject = QuotaEnabledResponse.class, description = "Return true if the plugin is enabled", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaIsEnabled", responseObject = QuotaEnabledResponse.class, description = "Return true if the plugin is enabled", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaEnabledCmd extends BaseCmd { diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java index 18f9bc48a6e2..d3bd3868ed16 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaStatementCmd.java @@ -35,7 +35,8 @@ import com.cloud.user.Account; -@APICommand(name = "quotaStatement", responseObject = QuotaStatementItemResponse.class, description = "Create a quota statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaStatement", responseObject = QuotaStatementItemResponse.class, description = "Create a quota statement", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaStatementCmd extends BaseCmd { diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java index 42a598042b39..87322b01f4d4 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaSummaryCmd.java @@ -33,7 +33,8 @@ import javax.inject.Inject; -@APICommand(name = "quotaSummary", responseObject = QuotaSummaryResponse.class, description = "Lists balance and quota usage for all Accounts", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaSummary", responseObject = QuotaSummaryResponse.class, description = "Lists balance and quota usage for all Accounts", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaSummaryCmd extends BaseListCmd { @Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, required = false, description = "Optional, Account Id for which statement needs to be generated") diff --git a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java index d054d5459313..e0bab07501b6 100644 --- a/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java +++ b/plugins/database/quota/src/main/java/org/apache/cloudstack/api/command/QuotaTariffListCmd.java @@ -38,7 +38,8 @@ import java.util.Date; import java.util.List; -@APICommand(name = "quotaTariffList", responseObject = QuotaTariffResponse.class, description = "Lists all quota tariff plans", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +@APICommand(name = "quotaTariffList", responseObject = QuotaTariffResponse.class, description = "Lists all quota tariff plans", since = "4.7.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class QuotaTariffListCmd extends BaseListCmd { @Inject diff --git a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/api/CloudianIsEnabledCmd.java b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/api/CloudianIsEnabledCmd.java index 56cb74e3cab7..3c334ba55c2e 100644 --- a/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/api/CloudianIsEnabledCmd.java +++ b/plugins/integrations/cloudian/src/main/java/org/apache/cloudstack/cloudian/api/CloudianIsEnabledCmd.java @@ -31,7 +31,8 @@ responseObject = CloudianEnabledResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.11.0", - authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}) + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, + httpMethod = "GET") public class CloudianIsEnabledCmd extends BaseCmd { @Inject diff --git a/plugins/maintenance/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java b/plugins/maintenance/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java index 782b23a04222..36ec4fff9c95 100644 --- a/plugins/maintenance/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java +++ b/plugins/maintenance/src/main/java/org/apache/cloudstack/api/command/ReadyForShutdownCmd.java @@ -26,7 +26,8 @@ description = "Returns the status of CloudStack, whether a shutdown has been triggered and if ready to shutdown", since = "4.19.0", responseObject = ManagementServerMaintenanceResponse.class, - requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, + httpMethod = "GET") public class ReadyForShutdownCmd extends BaseMSMaintenanceActionCmd { public static final String APINAME = "readyForShutdown"; diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java index bd49f87d6273..b3d2d335ba25 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/api/command/VerifyOAuthCodeAndGetUserCmd.java @@ -20,8 +20,10 @@ import java.util.List; import java.util.Map; -import com.cloud.api.response.ApiResponseSerializer; -import com.cloud.user.Account; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; + import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; @@ -37,13 +39,13 @@ import org.apache.cloudstack.oauth2.api.response.OauthProviderResponse; import org.apache.commons.lang.ArrayUtils; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpSession; +import com.cloud.api.response.ApiResponseSerializer; +import com.cloud.user.Account; @APICommand(name = "verifyOAuthCodeAndGetUser", description = "Verify the OAuth Code and fetch the corresponding user from provider", responseObject = OauthProviderResponse.class, entityType = {}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, - authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, since = "4.19.0") + authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User}, since = "4.19.0", + httpMethod = "GET") public class VerifyOAuthCodeAndGetUserCmd extends BaseListCmd implements APIAuthenticator { ///////////////////////////////////////////////////// diff --git a/server/src/main/java/com/cloud/api/ApiServlet.java b/server/src/main/java/com/cloud/api/ApiServlet.java index db17daaf146b..158df2240717 100644 --- a/server/src/main/java/com/cloud/api/ApiServlet.java +++ b/server/src/main/java/com/cloud/api/ApiServlet.java @@ -25,8 +25,8 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.regex.Pattern; import java.util.Set; +import java.util.regex.Pattern; import javax.inject.Inject; import javax.servlet.ServletConfig; @@ -52,10 +52,9 @@ import org.apache.cloudstack.managed.context.ManagedContext; import org.apache.cloudstack.utils.consoleproxy.ConsoleAccessUtils; import org.apache.commons.collections.MapUtils; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import org.apache.commons.lang3.EnumUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; import org.springframework.stereotype.Component; import org.springframework.web.context.support.SpringBeanAutowiringSupport; @@ -70,12 +69,12 @@ import com.cloud.user.AccountService; import com.cloud.user.User; import com.cloud.user.UserAccount; - import com.cloud.utils.HttpUtils; -import com.cloud.utils.HttpUtils.ApiSessionKeySameSite; import com.cloud.utils.HttpUtils.ApiSessionKeyCheckOption; +import com.cloud.utils.HttpUtils.ApiSessionKeySameSite; import com.cloud.utils.StringUtils; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @Component("apiServlet") @@ -84,9 +83,7 @@ public class ApiServlet extends HttpServlet { private static final Logger ACCESSLOGGER = LogManager.getLogger("apiserver." + ApiServlet.class.getName()); private static final String REPLACEMENT = "_"; private static final String LOGGER_REPLACEMENTS = "[\n\r\t]"; - private static final Pattern GET_REQUEST_COMMANDS = Pattern.compile("^(get|list|query|find)(\\w+)+$"); - private static final HashSet GET_REQUEST_COMMANDS_LIST = new HashSet<>(Set.of("isaccountallowedtocreateofferingswithtags", - "readyforshutdown", "cloudianisenabled", "quotabalance", "quotasummary", "quotatarifflist", "quotaisenabled", "quotastatement", "verifyoauthcodeandgetuser")); + public static final Pattern GET_REQUEST_COMMANDS = Pattern.compile("^(get|list|query|find)(\\w+)+$"); private static final HashSet POST_REQUESTS_TO_DISABLE_LOGGING = new HashSet<>(Set.of( "login", "oauthlogin", @@ -367,7 +364,7 @@ void processRequestInContext(final HttpServletRequest req, final HttpServletResp } } - if (apiServer.isPostRequestsAndTimestampsEnforced() && !isStateChangingCommandUsingPOST(command, req.getMethod(), params)) { + if (apiServer.isPostRequestsAndTimestampsEnforced() && isStateChangingCommandNotUsingPOST(command, req.getMethod(), params)) { String errorText = String.format("State changing command %s needs to be sent using POST request", command); if (command.equalsIgnoreCase("updateConfiguration") && params.containsKey("name")) { errorText = String.format("Changes for configuration %s needs to be sent using POST request", params.get("name")[0]); @@ -485,13 +482,32 @@ private boolean checkIfAuthenticatorIsOf2FA(String command) { return verify2FA; } - private boolean isStateChangingCommandUsingPOST(String command, String method, Map params) { - if (command == null || (!GET_REQUEST_COMMANDS.matcher(command.toLowerCase()).matches() && !GET_REQUEST_COMMANDS_LIST.contains(command.toLowerCase()) - && !command.equalsIgnoreCase("updateConfiguration") && !method.equals("POST"))) { + protected boolean isStateChangingCommandNotUsingPOST(String command, String method, Map params) { + if (BaseCmd.HTTPMethod.POST.toString().equalsIgnoreCase(method)) { + return false; + } + if (command == null || method == null) { + return true; + } + String commandHttpMethod = null; + try { + Class cmdClass = apiServer.getCmdClass(command); + if (cmdClass != null) { + APICommand at = cmdClass.getAnnotation(APICommand.class); + if (at != null && org.apache.commons.lang3.StringUtils.isNotBlank(at.httpMethod())) { + commandHttpMethod = at.httpMethod(); + } + } + } catch (CloudRuntimeException e) { + LOGGER.trace("Command class not found for {}; falling back to pattern match", command, e); + } + if (BaseCmd.HTTPMethod.GET.toString().equalsIgnoreCase(commandHttpMethod) || + GET_REQUEST_COMMANDS.matcher(command.toLowerCase()).matches()) { return false; } - return !command.equalsIgnoreCase("updateConfiguration") || method.equals("POST") || (params.containsKey("name") - && params.get("name")[0].toString().equalsIgnoreCase(ApiServer.EnforcePostRequestsAndTimestamps.key())); + return !command.equalsIgnoreCase("updateConfiguration") || + !params.containsKey("name") || + !ApiServer.EnforcePostRequestsAndTimestamps.key().equalsIgnoreCase(params.get("name")[0].toString()); } protected boolean skip2FAcheckForAPIs(String command) { diff --git a/server/src/test/java/com/cloud/api/ApiServletTest.java b/server/src/test/java/com/cloud/api/ApiServletTest.java index 79fe4b86f859..c5ee9f58154d 100644 --- a/server/src/test/java/com/cloud/api/ApiServletTest.java +++ b/server/src/test/java/com/cloud/api/ApiServletTest.java @@ -16,22 +16,30 @@ // under the License. package com.cloud.api; -import com.cloud.api.auth.ListUserTwoFactorAuthenticatorProvidersCmd; -import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; -import com.cloud.api.auth.ValidateUserTwoFactorAuthenticationCodeCmd; -import com.cloud.server.ManagementServer; -import com.cloud.user.Account; -import com.cloud.user.AccountManagerImpl; -import com.cloud.user.AccountService; -import com.cloud.user.User; -import com.cloud.user.UserAccount; -import com.cloud.utils.HttpUtils; -import com.cloud.vm.UserVmManager; +import static org.mockito.ArgumentMatchers.nullable; + +import java.io.IOException; +import java.io.PrintWriter; +import java.io.StringWriter; +import java.io.UnsupportedEncodingException; +import java.lang.reflect.Field; +import java.net.InetAddress; +import java.net.URLEncoder; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.Map; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import javax.servlet.http.HttpSession; + +import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.auth.APIAuthenticationManager; import org.apache.cloudstack.api.auth.APIAuthenticationType; import org.apache.cloudstack.api.auth.APIAuthenticator; import org.apache.cloudstack.api.command.admin.config.ListCfgsByCmd; +import org.apache.cloudstack.api.command.admin.offering.IsAccountAllowedToCreateOfferingsWithTagsCmd; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.impl.ConfigDepotImpl; import org.junit.After; @@ -43,25 +51,24 @@ import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpSession; -import java.io.IOException; -import java.io.PrintWriter; -import java.io.StringWriter; -import java.io.UnsupportedEncodingException; -import java.lang.reflect.Field; -import java.net.InetAddress; -import java.net.URLEncoder; -import java.net.UnknownHostException; -import java.util.HashMap; -import java.util.Map; - -import static org.mockito.ArgumentMatchers.nullable; +import com.cloud.api.auth.ListUserTwoFactorAuthenticatorProvidersCmd; +import com.cloud.api.auth.SetupUserTwoFactorAuthenticationCmd; +import com.cloud.api.auth.ValidateUserTwoFactorAuthenticationCodeCmd; +import com.cloud.server.ManagementServer; +import com.cloud.user.Account; +import com.cloud.user.AccountManagerImpl; +import com.cloud.user.AccountService; +import com.cloud.user.User; +import com.cloud.user.UserAccount; +import com.cloud.utils.HttpUtils; +import com.cloud.vm.UserVmManager; @RunWith(MockitoJUnitRunner.class) public class ApiServletTest { + private static final String[] STATE_CHANGING_COMMAND_CHECK_NAME_PARAM = + {ApiServer.EnforcePostRequestsAndTimestamps.key()}; + @Mock ApiServer apiServer; @@ -461,4 +468,88 @@ public void testVerify2FAWhenExpectedCommandIsNotCalled() throws UnknownHostExce Assert.assertEquals(false, result); } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForPostMethod() { + String command = "updateConfiguration"; + String method = "POST"; + Map params = new HashMap<>(); + + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + + Assert.assertFalse(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsTrueForNullCommandAndMethod() { + String command = null; + String method = null; + Map params = new HashMap<>(); + + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + + Assert.assertTrue(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForGetHttpMethodAnnotation() { + String command = "isAccountAllowedToCreateOfferingsWithTags"; + String method = "GET"; + Map params = new HashMap<>(); + Class cmdClass = IsAccountAllowedToCreateOfferingsWithTagsCmd.class; + APICommand apiCommand = cmdClass.getAnnotation(APICommand.class); + Mockito.doReturn(cmdClass).when(apiServer).getCmdClass(command); + Assert.assertNotNull(apiCommand); + Assert.assertEquals("GET", apiCommand.httpMethod()); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertFalse(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForMatchingGetRequestPattern() { + String command = "listZones"; + String method = "GET"; + Map params = new HashMap<>(); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertFalse(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsTrueForMissingNameParameter() { + String command = "updateConfiguration"; + String method = "GET"; + Map params = new HashMap<>(); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertTrue(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForUpdateConfigurationEnforcePostRequestsKey() { + String command = "updateConfiguration"; + String method = "GET"; + Map params = new HashMap<>(); + params.put("name", STATE_CHANGING_COMMAND_CHECK_NAME_PARAM); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertFalse(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForWrongApiEnforcePostRequestsKey() { + String command = "updateSomeApi"; + String method = "GET"; + Map params = new HashMap<>(); + params.put("name", STATE_CHANGING_COMMAND_CHECK_NAME_PARAM); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertTrue(result); + } + + @Test + public void isStateChangingCommandNotUsingPOSTReturnsFalseForUpdateConfigurationNonEnforcePostRequestsKey() { + String command = "updateConfiguration"; + String method = "GET"; + Map params = new HashMap<>(); + params.put("name", new String[] { "key" }); + boolean result = servlet.isStateChangingCommandNotUsingPOST(command, method, params); + Assert.assertTrue(result); + } } From 374fe1e3f8297b13254aa6a83336bb7c4322c005 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bernardo=20De=20Marco=20Gon=C3=A7alves?= Date: Thu, 29 Jan 2026 06:01:54 -0300 Subject: [PATCH 219/271] Change `vmsnapshot.max` setting scope to the account level (#11616) --- .../com/cloud/vm/snapshot/VMSnapshotManager.java | 2 +- .../cloud/vm/snapshot/VMSnapshotManagerTest.java | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java b/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java index 6478469f190c..6831552b83db 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java +++ b/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java @@ -31,7 +31,7 @@ public interface VMSnapshotManager extends VMSnapshotService, Manager { static final ConfigKey VMSnapshotExpireInterval = new ConfigKey("Advanced", Integer.class, "vmsnapshot.expire.interval", "-1", "VM Snapshot expire interval in hours", true, ConfigKey.Scope.Account); - ConfigKey VMSnapshotMax = new ConfigKey("Advanced", Integer.class, "vmsnapshot.max", "10", "Maximum vm snapshots for a single vm", true, ConfigKey.Scope.Global); + ConfigKey VMSnapshotMax = new ConfigKey("Advanced", Integer.class, "vmsnapshot.max", "10", "Maximum VM snapshots for a single VM", true, ConfigKey.Scope.Account); /** * Delete all VM snapshots belonging to one VM diff --git a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java index a0f09981a401..b696d743ac64 100644 --- a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java +++ b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java @@ -41,6 +41,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; +import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; @@ -136,6 +137,8 @@ public class VMSnapshotManagerTest { VMSnapshotDetailsDao _vmSnapshotDetailsDao; @Mock UserVmManager _userVmManager; + @Mock + private AccountVO accountVOMock; private static final long TEST_VM_ID = 3L; private static final long SERVICE_OFFERING_ID = 1L; @@ -285,8 +288,12 @@ public void testCreateVMSnapshotF3() throws AgentUnavailableException, Operation @SuppressWarnings("unchecked") @Test(expected = CloudRuntimeException.class) public void testAllocVMSnapshotF4() throws ResourceAllocationException { + long accountId = 1L; List mockList = mock(List.class); when(mockList.size()).thenReturn(10); + when(_userVMDao.findById(TEST_VM_ID)).thenReturn(vmMock); + when(userVm.getAccountId()).thenReturn(accountId); + when(_accountMgr.getAccount(accountId)).thenReturn(accountVOMock); when(_vmSnapshotDao.findByVm(TEST_VM_ID)).thenReturn(mockList); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID, "", "", true); } @@ -295,8 +302,12 @@ public void testAllocVMSnapshotF4() throws ResourceAllocationException { @SuppressWarnings("unchecked") @Test(expected = CloudRuntimeException.class) public void testAllocVMSnapshotF5() throws ResourceAllocationException { + long accountId = 1L; List mockList = mock(List.class); when(mockList.size()).thenReturn(1); + when(_userVMDao.findById(TEST_VM_ID)).thenReturn(vmMock); + when(userVm.getAccountId()).thenReturn(accountId); + when(_accountMgr.getAccount(accountId)).thenReturn(accountVOMock); when(_snapshotDao.listByInstanceId(TEST_VM_ID, Snapshot.State.Creating, Snapshot.State.CreatedOnPrimary, Snapshot.State.BackingUp)).thenReturn(mockList); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID, "", "", true); } @@ -304,6 +315,10 @@ public void testAllocVMSnapshotF5() throws ResourceAllocationException { // successful creation case @Test public void testCreateVMSnapshot() throws AgentUnavailableException, OperationTimedoutException, ResourceAllocationException, NoTransitionException { + long accountId = 1L; + when(_userVMDao.findById(TEST_VM_ID)).thenReturn(vmMock); + when(userVm.getAccountId()).thenReturn(accountId); + when(_accountMgr.getAccount(accountId)).thenReturn(accountVOMock); when(vmMock.getState()).thenReturn(State.Running); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID, "", "", true); } From ba2901a0e6b216ba515f5d4a3d1e8ce1d275452a Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 28 Jan 2026 12:40:34 +0530 Subject: [PATCH 220/271] ui: add cache for oslogo request using osId (#11422) When OsLogo component is used in the items of a list having same OS type it was causing listOsTypes API call multiple time. This change allows caching request and response value for 30 seconds. Caching behaviour is controlled using `useCache` flag. Signed-off-by: Abhishek Kumar --- ui/src/components/widgets/OsLogo.vue | 78 ++++++++++--------- .../compute/wizard/OsBasedImageRadioGroup.vue | 3 +- 2 files changed, 42 insertions(+), 39 deletions(-) diff --git a/ui/src/components/widgets/OsLogo.vue b/ui/src/components/widgets/OsLogo.vue index 643953012c18..f19aac56a1a6 100644 --- a/ui/src/components/widgets/OsLogo.vue +++ b/ui/src/components/widgets/OsLogo.vue @@ -31,6 +31,9 @@ - - diff --git a/ui/src/views/compute/wizard/OsBasedImageRadioGroup.vue b/ui/src/views/compute/wizard/OsBasedImageRadioGroup.vue index 2518ed0c0420..45ea347553c3 100644 --- a/ui/src/views/compute/wizard/OsBasedImageRadioGroup.vue +++ b/ui/src/views/compute/wizard/OsBasedImageRadioGroup.vue @@ -42,7 +42,8 @@ class="radio-group__os-logo" size="2x" :osId="item.ostypeid" - :os-name="item.osName" /> + :os-name="item.osName" + :use-cache="true" />   {{ item.displaytext }} From 3f02773024410e3f5fe65ed00ebec01b3324610e Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Wed, 28 Jan 2026 16:46:34 +0530 Subject: [PATCH 221/271] ui: bump nodejs v24 LTS usage (#12471) --- ui/README.md | 6 +++--- ui/package.json | 9 ++++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/ui/README.md b/ui/README.md index 170232b574e5..3f7bcb8120ea 100644 --- a/ui/README.md +++ b/ui/README.md @@ -27,18 +27,18 @@ A modern role-based progressive CloudStack UI based on Vue.js and Ant Design. Install node: (Debian/Ubuntu) - curl -sL https://deb.nodesource.com/setup_20.x | sudo -E bash - + curl -sL https://deb.nodesource.com/setup_24.x | sudo -E bash - sudo apt-get install -y nodejs # Or use distro provided: sudo apt-get install npm nodejs Install node: (CentOS/Fedora/RHEL) - curl -sL https://rpm.nodesource.com/setup_20.x | sudo bash - + curl -sL https://rpm.nodesource.com/setup_24.x | sudo bash - sudo yum install nodejs Install node: (Mac OS) - brew install node@20 + brew install node@24 Optionally, you may also install system-wide dev tools: diff --git a/ui/package.json b/ui/package.json index 48f337500bda..9801c1b18153 100644 --- a/ui/package.json +++ b/ui/package.json @@ -101,15 +101,18 @@ "eslint-plugin-vue": "^7.0.0", "less": "^3.0.4", "less-loader": "^5.0.0", + "nan": "2.18.0", + "node-gyp": "10.0.1", "sass": "^1.49.9", "sass-loader": "^8.0.2", "uglifyjs-webpack-plugin": "^2.2.0", "vue-jest": "^5.0.0-0", "vue-svg-loader": "^0.17.0-beta.2", - "webpack": "^4.46.0", - "node-gyp": "10.0.1", "nan": "2.18.0" + "webpack": "^4.46.0" + }, + "resolutions": { + "nan": "2.18.0" }, - "resolutions": { "nan": "2.18.0" }, "eslintConfig": { "root": true, "env": { From c7b11c9775989db28609dd475d70dd1d6f4b98ef Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Wed, 28 Jan 2026 17:17:50 +0530 Subject: [PATCH 222/271] Fix url in password reset email (#12078) --- .../org/apache/cloudstack/ServerDaemon.java | 15 +++----- .../user/UserPasswordResetManager.java | 4 ++- .../user/UserPasswordResetManagerImpl.java | 26 +++++++++++--- .../cloud/utils/server/ServerProperties.java | 36 +++++++++++++++++-- 4 files changed, 64 insertions(+), 17 deletions(-) diff --git a/client/src/main/java/org/apache/cloudstack/ServerDaemon.java b/client/src/main/java/org/apache/cloudstack/ServerDaemon.java index 09bdb11a6b39..06477fff8986 100644 --- a/client/src/main/java/org/apache/cloudstack/ServerDaemon.java +++ b/client/src/main/java/org/apache/cloudstack/ServerDaemon.java @@ -71,11 +71,6 @@ public class ServerDaemon implements Daemon { private static final String BIND_INTERFACE = "bind.interface"; private static final String CONTEXT_PATH = "context.path"; private static final String SESSION_TIMEOUT = "session.timeout"; - private static final String HTTP_ENABLE = "http.enable"; - private static final String HTTP_PORT = "http.port"; - private static final String HTTPS_ENABLE = "https.enable"; - private static final String HTTPS_PORT = "https.port"; - private static final String KEYSTORE_FILE = "https.keystore"; private static final String KEYSTORE_PASSWORD = "https.keystore.password"; private static final String WEBAPP_DIR = "webapp.dir"; private static final String ACCESS_LOG = "access.log"; @@ -137,11 +132,11 @@ public void init(final DaemonContext context) { } setBindInterface(properties.getProperty(BIND_INTERFACE, null)); setContextPath(properties.getProperty(CONTEXT_PATH, "/client")); - setHttpEnable(Boolean.valueOf(properties.getProperty(HTTP_ENABLE, "true"))); - setHttpPort(Integer.valueOf(properties.getProperty(HTTP_PORT, "8080"))); - setHttpsEnable(Boolean.valueOf(properties.getProperty(HTTPS_ENABLE, "false"))); - setHttpsPort(Integer.valueOf(properties.getProperty(HTTPS_PORT, "8443"))); - setKeystoreFile(properties.getProperty(KEYSTORE_FILE)); + setHttpEnable(Boolean.valueOf(properties.getProperty(ServerProperties.HTTP_ENABLE, "true"))); + setHttpPort(Integer.valueOf(properties.getProperty(ServerProperties.HTTP_PORT, "8080"))); + setHttpsEnable(Boolean.valueOf(properties.getProperty(ServerProperties.HTTPS_ENABLE, "false"))); + setHttpsPort(Integer.valueOf(properties.getProperty(ServerProperties.HTTPS_PORT, "8443"))); + setKeystoreFile(properties.getProperty(ServerProperties.KEYSTORE_FILE)); setKeystorePassword(properties.getProperty(KEYSTORE_PASSWORD)); setWebAppLocation(properties.getProperty(WEBAPP_DIR)); setAccessLogFile(properties.getProperty(ACCESS_LOG, "access.log")); diff --git a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManager.java b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManager.java index 377e57b31e93..ca14f6a1654e 100644 --- a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManager.java +++ b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManager.java @@ -78,7 +78,9 @@ public interface UserPasswordResetManager { ConfigKey UserPasswordResetDomainURL = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, String.class, "user.password.reset.mail.domain.url", null, - "Domain URL for reset password links sent to the user via email", true, + "Domain URL (along with scheme - http:// or https:// and port as applicable) for reset password links sent to the user via email. " + + "If this is not set, CloudStack would determine the domain url based on the first management server from 'host' setting " + + "and http scheme based on the https.enabled flag from server.properties file in the management server.", true, ConfigKey.Scope.Global); void setResetTokenAndSend(UserAccount userAccount); diff --git a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java index 618ad5c86572..c62bca8eca4b 100644 --- a/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/user/UserPasswordResetManagerImpl.java @@ -23,6 +23,7 @@ import com.cloud.user.dao.UserDao; import com.cloud.utils.StringUtils; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.server.ServerProperties; import com.github.mustachejava.DefaultMustacheFactory; import com.github.mustachejava.Mustache; import com.github.mustachejava.MustacheFactory; @@ -48,6 +49,7 @@ import java.util.Set; import java.util.UUID; +import static org.apache.cloudstack.config.ApiServiceConfiguration.ManagementServerAddresses; import static org.apache.cloudstack.resourcedetail.UserDetailVO.PasswordResetToken; import static org.apache.cloudstack.resourcedetail.UserDetailVO.PasswordResetTokenExpiryDate; @@ -68,7 +70,7 @@ public class UserPasswordResetManagerImpl extends ManagerBase implements UserPas new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, String.class, "user.password.reset.mail.template", "Hello {{username}}!\n" + "You have requested to reset your password. Please click the following link to reset your password:\n" + - "{{{domainUrl}}}{{{resetLink}}}\n" + + "{{{resetLink}}}\n" + "If you did not request a password reset, please ignore this email.\n" + "\n" + "Regards,\n" + @@ -179,10 +181,26 @@ public void setResetTokenAndSend(UserAccount userAccount) { final String email = userAccount.getEmail(); final String username = userAccount.getUsername(); final String subject = "Password Reset Request"; - final String domainUrl = UserPasswordResetDomainURL.value(); + String domainUrl = UserPasswordResetDomainURL.value(); + if (StringUtils.isBlank(domainUrl)) { + String mgmtServerAddr = ManagementServerAddresses.value().split(",")[0]; + if (ServerProperties.isHttpsEnabled()) { + domainUrl = "https://" + mgmtServerAddr + ":" + ServerProperties.getHttpsPort(); + } else { + domainUrl = "http://" + mgmtServerAddr + ":" + ServerProperties.getHttpPort(); + } + } else if (!domainUrl.startsWith("http://") && !domainUrl.startsWith("https://")) { + if (ServerProperties.isHttpsEnabled()) { + domainUrl = "https://" + domainUrl; + } else { + domainUrl = "http://" + domainUrl; + } + } + + domainUrl = domainUrl.replaceAll("/+$", ""); - String resetLink = String.format("/client/#/user/resetPassword?username=%s&token=%s", - username, resetToken); + String resetLink = String.format("%s/client/#/user/resetPassword?username=%s&token=%s", + domainUrl, username, resetToken); String content = getMessageBody(userAccount, resetToken, resetLink); SMTPMailProperties mailProperties = new SMTPMailProperties(); diff --git a/utils/src/main/java/com/cloud/utils/server/ServerProperties.java b/utils/src/main/java/com/cloud/utils/server/ServerProperties.java index 36d8614e68f1..9e81fff90f01 100644 --- a/utils/src/main/java/com/cloud/utils/server/ServerProperties.java +++ b/utils/src/main/java/com/cloud/utils/server/ServerProperties.java @@ -17,10 +17,12 @@ package com.cloud.utils.server; import com.cloud.utils.crypt.EncryptionSecretKeyChecker; +import com.cloud.utils.StringUtils; import org.apache.commons.io.IOUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.util.Properties; @@ -28,9 +30,20 @@ public class ServerProperties { protected Logger logger = LogManager.getLogger(getClass()); + public static final String HTTP_ENABLE = "http.enable"; + public static final String HTTP_PORT = "http.port"; + public static final String HTTPS_ENABLE = "https.enable"; + public static final String HTTPS_PORT = "https.port"; + public static final String KEYSTORE_FILE = "https.keystore"; + public static final String PASSWORD_ENCRYPTION_TYPE = "password.encryption.type"; + private static Properties properties = new Properties(); private static boolean loaded = false; - public static final String passwordEncryptionType = "password.encryption.type"; + + private static int httpPort = 8080; + + private static boolean httpsEnable = false; + private static int httpsPort = 8443; public synchronized static Properties getServerProperties(InputStream inputStream) { if (!loaded) { @@ -39,7 +52,7 @@ public synchronized static Properties getServerProperties(InputStream inputStrea serverProps.load(inputStream); EncryptionSecretKeyChecker checker = new EncryptionSecretKeyChecker(); - checker.check(serverProps, passwordEncryptionType); + checker.check(serverProps, PASSWORD_ENCRYPTION_TYPE); if (EncryptionSecretKeyChecker.useEncryption()) { EncryptionSecretKeyChecker.decryptAnyProperties(serverProps); @@ -50,10 +63,29 @@ public synchronized static Properties getServerProperties(InputStream inputStrea IOUtils.closeQuietly(inputStream); } + httpPort = Integer.parseInt(serverProps.getProperty(ServerProperties.HTTP_PORT, "8080")); + + boolean httpsEnabled = Boolean.parseBoolean(serverProps.getProperty(ServerProperties.HTTPS_ENABLE, "false")); + String keystoreFile = serverProps.getProperty(KEYSTORE_FILE); + httpsEnable = httpsEnabled && StringUtils.isNotEmpty(keystoreFile) && new File(keystoreFile).exists(); + httpsPort = Integer.parseInt(serverProps.getProperty(ServerProperties.HTTPS_PORT, "8443")); + properties = serverProps; loaded = true; } return properties; } + + public static int getHttpPort() { + return httpPort; + } + + public static boolean isHttpsEnabled() { + return httpsEnable; + } + + public static int getHttpsPort() { + return httpsPort; + } } From 2a59658b290bc2827443c1798694f13b73a567e0 Mon Sep 17 00:00:00 2001 From: Wei Zhou Date: Wed, 28 Jan 2026 13:09:10 +0100 Subject: [PATCH 223/271] VR: fix dns list in redundant VPC VRs (#12161) --- systemvm/debian/opt/cloud/bin/cs/CsDhcp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py index e15714af212f..a2309067289d 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py @@ -110,7 +110,7 @@ def configure_server(self): if gn.get_dns() and device: sline = "dhcp-option=tag:interface-%s-%s,6" % (device, idx) dns_list = [x for x in gn.get_dns() if x] - if (self.config.is_vpc() or self.config.is_router()) and ('is_vr_guest_gateway' in gn.data and gn.data['is_vr_guest_gateway']): + if self.config.is_vpc() and not gn.is_vr_guest_gateway(): if gateway in dns_list: dns_list.remove(gateway) if gn.data['router_guest_ip'] != ip: From 219399ccbcad6d0f99117b8d8136a7290c2db3f8 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 28 Jan 2026 19:36:04 +0530 Subject: [PATCH 224/271] ui: allow actions for other users of root admin (#11319) Fixes #10306 Signed-off-by: Abhishek Kumar --- ui/src/config/section/user.js | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/ui/src/config/section/user.js b/ui/src/config/section/user.js index a18994fd6ce1..65c1a17f760b 100644 --- a/ui/src/config/section/user.js +++ b/ui/src/config/section/user.js @@ -105,9 +105,10 @@ export default { message: 'message.enable.user', dataView: true, show: (record, store) => { - return ['Admin', 'DomainAdmin'].includes(store.userInfo.roletype) && !record.isdefault && - !(record.domain === 'ROOT' && record.account === 'admin' && record.accounttype === 1) && - ['disabled', 'locked'].includes(record.state) + if (!['disabled', 'locked'].includes(record.state) || record.isdefault || !['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) { + return false + } + return ![1, 4].includes(record.accounttype) || store.userInfo.roletype === 'Admin' } }, { @@ -117,9 +118,10 @@ export default { message: 'message.disable.user', dataView: true, show: (record, store) => { - return ['Admin', 'DomainAdmin'].includes(store.userInfo.roletype) && !record.isdefault && - !(record.domain === 'ROOT' && record.account === 'admin' && record.accounttype === 1) && - record.state === 'enabled' + if (record.state !== 'enabled' || record.isdefault || !['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) { + return false + } + return ![1, 4].includes(record.accounttype) || (store.userInfo.roletype === 'Admin' && record.id !== store.userInfo.id) } }, { @@ -131,9 +133,10 @@ export default { dataView: true, popup: true, show: (record, store) => { - return ['Admin', 'DomainAdmin'].includes(store.userInfo.roletype) && !record.isdefault && - !(record.domain === 'ROOT' && record.account === 'admin' && record.accounttype === 1) && - record.state === 'enabled' + if (record.state !== 'enabled' || record.isdefault || !['Admin', 'DomainAdmin'].includes(store.userInfo.roletype)) { + return false + } + return ![1, 4].includes(record.accounttype) || (store.userInfo.roletype === 'Admin' && record.id !== store.userInfo.id) } }, { From ecd610484f8fd7bf5ef901780bbbb978aa0cdaf6 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Wed, 28 Jan 2026 19:39:37 +0530 Subject: [PATCH 225/271] Add the procedure files for insert extensions and update guest os category (#12482) * Add the procedure files for insert extensions and update guestos category * fixed indentation * Apply suggestions from code review Co-authored-by: Vishesh <8760112+vishesh92@users.noreply.github.com> --------- Co-authored-by: Vishesh <8760112+vishesh92@users.noreply.github.com> --- .../cloud.insert_category_if_not_exists.sql | 27 +++++++++++ ...on_custom_action_details_if_not_exists.sql | 46 +++++++++++++++++++ ..._extension_custom_action_if_not_exists.sql | 46 +++++++++++++++++++ ....insert_extension_detail_if_not_exists.sql | 39 ++++++++++++++++ .../cloud.insert_extension_if_not_exists.sql | 38 +++++++++++++++ .../cloud.update_category_for_guest_oses.sql | 33 +++++++++++++ ...w_and_delete_old_category_for_guest_os.sql | 35 ++++++++++++++ 7 files changed, 264 insertions(+) create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_category_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_details_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_detail_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_if_not_exists.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_category_for_guest_oses.sql create mode 100644 engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_new_and_delete_old_category_for_guest_os.sql diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_category_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_category_if_not_exists.sql new file mode 100644 index 000000000000..a82dc7204c2e --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_category_if_not_exists.sql @@ -0,0 +1,27 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- Add new OS categories if not present +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_CATEGORY_IF_NOT_EXIST`; +CREATE PROCEDURE `cloud`.`INSERT_CATEGORY_IF_NOT_EXIST`(IN os_name VARCHAR(255)) +BEGIN + IF NOT EXISTS ((SELECT 1 FROM `cloud`.`guest_os_category` WHERE name = os_name)) + THEN + INSERT INTO `cloud`.`guest_os_category` (name, uuid) + VALUES (os_name, UUID()) +; END IF +; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_details_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_details_if_not_exists.sql new file mode 100644 index 000000000000..77b162236266 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_details_if_not_exists.sql @@ -0,0 +1,46 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_EXTENSION_CUSTOM_ACTION_DETAILS_IF_NOT_EXISTS`; +CREATE PROCEDURE `cloud`.`INSERT_EXTENSION_CUSTOM_ACTION_DETAILS_IF_NOT_EXISTS` ( + IN ext_name VARCHAR(255), + IN action_name VARCHAR(255), + IN param_json TEXT +) +BEGIN + DECLARE action_id BIGINT UNSIGNED +; SELECT `eca`.`id` INTO action_id FROM `cloud`.`extension_custom_action` `eca` + JOIN `cloud`.`extension` `e` ON `e`.`id` = `eca`.`extension_id` + WHERE `eca`.`name` = action_name AND `e`.`name` = ext_name LIMIT 1 +; IF NOT EXISTS ( + SELECT 1 FROM `cloud`.`extension_custom_action_details` + WHERE `extension_custom_action_id` = action_id + AND `name` = 'parameters' + ) THEN + INSERT INTO `cloud`.`extension_custom_action_details` ( + `extension_custom_action_id`, + `name`, + `value`, + `display` + ) VALUES ( + action_id, + 'parameters', + param_json, + 0 + ) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_if_not_exists.sql new file mode 100644 index 000000000000..9dbffa630f84 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_custom_action_if_not_exists.sql @@ -0,0 +1,46 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_EXTENSION_CUSTOM_ACTION_IF_NOT_EXISTS`; +CREATE PROCEDURE `cloud`.`INSERT_EXTENSION_CUSTOM_ACTION_IF_NOT_EXISTS`( + IN ext_name VARCHAR(255), + IN action_name VARCHAR(255), + IN action_desc VARCHAR(4096), + IN resource_type VARCHAR(255), + IN allowed_roles INT UNSIGNED, + IN success_msg VARCHAR(4096), + IN error_msg VARCHAR(4096), + IN timeout_seconds INT UNSIGNED +) +BEGIN + DECLARE ext_id BIGINT +; SELECT `id` INTO ext_id FROM `cloud`.`extension` WHERE `name` = ext_name LIMIT 1 +; IF NOT EXISTS ( + SELECT 1 FROM `cloud`.`extension_custom_action` WHERE `name` = action_name AND `extension_id` = ext_id + ) THEN + INSERT INTO `cloud`.`extension_custom_action` ( + `uuid`, `name`, `description`, `extension_id`, `resource_type`, + `allowed_role_types`, `success_message`, `error_message`, + `enabled`, `timeout`, `created`, `removed` + ) + VALUES ( + UUID(), action_name, action_desc, ext_id, resource_type, + allowed_roles, success_msg, error_msg, + 1, timeout_seconds, NOW(), NULL + ) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_detail_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_detail_if_not_exists.sql new file mode 100644 index 000000000000..f9d6c5da9512 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_detail_if_not_exists.sql @@ -0,0 +1,39 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_EXTENSION_DETAIL_IF_NOT_EXISTS`; +CREATE PROCEDURE `cloud`.`INSERT_EXTENSION_DETAIL_IF_NOT_EXISTS`( + IN ext_name VARCHAR(255), + IN detail_key VARCHAR(255), + IN detail_value TEXT, + IN display TINYINT(1) +) +BEGIN + DECLARE ext_id BIGINT +; SELECT `id` INTO ext_id FROM `cloud`.`extension` WHERE `name` = ext_name LIMIT 1 +; IF NOT EXISTS ( + SELECT 1 FROM `cloud`.`extension_details` + WHERE `extension_id` = ext_id AND `name` = detail_key + ) THEN + INSERT INTO `cloud`.`extension_details` ( + `extension_id`, `name`, `value`, `display` + ) + VALUES ( + ext_id, detail_key, detail_value, display + ) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_if_not_exists.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_if_not_exists.sql new file mode 100644 index 000000000000..8d74f9b2a986 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.insert_extension_if_not_exists.sql @@ -0,0 +1,38 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +DROP PROCEDURE IF EXISTS `cloud`.`INSERT_EXTENSION_IF_NOT_EXISTS`; +CREATE PROCEDURE `cloud`.`INSERT_EXTENSION_IF_NOT_EXISTS`( + IN ext_name VARCHAR(255), + IN ext_desc VARCHAR(255), + IN ext_path VARCHAR(255) +) +BEGIN + IF NOT EXISTS ( + SELECT 1 FROM `cloud`.`extension` WHERE `name` = ext_name + ) THEN + INSERT INTO `cloud`.`extension` ( + `uuid`, `name`, `description`, `type`, + `relative_path`, `path_ready`, + `is_user_defined`, `state`, `created`, `removed` + ) + VALUES ( + UUID(), ext_name, ext_desc, 'Orchestrator', + ext_path, 1, 0, 'Enabled', NOW(), NULL + ) +; END IF +;END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_category_for_guest_oses.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_category_for_guest_oses.sql new file mode 100644 index 000000000000..87f3a85d27ef --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_category_for_guest_oses.sql @@ -0,0 +1,33 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- Move existing guest OS to new categories +DROP PROCEDURE IF EXISTS `cloud`.`UPDATE_CATEGORY_FOR_GUEST_OSES`; +CREATE PROCEDURE `cloud`.`UPDATE_CATEGORY_FOR_GUEST_OSES`(IN category_name VARCHAR(255), IN os_name VARCHAR(255)) +BEGIN + DECLARE category_id BIGINT +; SELECT `id` INTO category_id + FROM `cloud`.`guest_os_category` + WHERE `name` = category_name + LIMIT 1 +; IF category_id IS NULL THEN + SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'Category not found' +; END IF +; UPDATE `cloud`.`guest_os` + SET `category_id` = category_id + WHERE `display_name` LIKE CONCAT('%', os_name, '%') +; END; diff --git a/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_new_and_delete_old_category_for_guest_os.sql b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_new_and_delete_old_category_for_guest_os.sql new file mode 100644 index 000000000000..42f7aa738cff --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/procedures/cloud.update_new_and_delete_old_category_for_guest_os.sql @@ -0,0 +1,35 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- Move existing guest OS whose category will be deleted to Other category +DROP PROCEDURE IF EXISTS `cloud`.`UPDATE_NEW_AND_DELETE_OLD_CATEGORY_FOR_GUEST_OS`; +CREATE PROCEDURE `cloud`.`UPDATE_NEW_AND_DELETE_OLD_CATEGORY_FOR_GUEST_OS`(IN to_category_name VARCHAR(255), IN from_category_name VARCHAR(255)) +BEGIN + DECLARE done INT DEFAULT 0 +; DECLARE to_category_id BIGINT +; SELECT id INTO to_category_id + FROM `cloud`.`guest_os_category` + WHERE `name` = to_category_name + LIMIT 1 +; IF to_category_id IS NULL THEN + SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'ToCategory not found' +; END IF +; UPDATE `cloud`.`guest_os` + SET `category_id` = to_category_id + WHERE `category_id` = (SELECT `id` FROM `cloud`.`guest_os_category` WHERE `name` = from_category_name) +; UPDATE `cloud`.`guest_os_category` SET `removed`=now() WHERE `name` = from_category_name +; END; From 17769267a804341c7e5eec71f4062d0082136d52 Mon Sep 17 00:00:00 2001 From: Daman Arora <61474540+Damans227@users.noreply.github.com> Date: Wed, 28 Jan 2026 09:11:14 -0500 Subject: [PATCH 226/271] Fix delete snapshot policy expunged volume (#12474) * use findByIdIncludingRemoved for volume retrieval in snapshot policy validation * add unit tests * add cleanup for orphan snapshot policies * delete snapshot policies when expunging volumes * update orphan cleanup to remove policies for volumes that are in expunged state or null --------- Co-authored-by: Daman Arora --- .../storage/volume/VolumeServiceImpl.java | 4 +- .../storage/snapshot/SnapshotManagerImpl.java | 18 +++- .../snapshot/SnapshotManagerImplTest.java | 92 +++++++++++++++++++ 3 files changed, 112 insertions(+), 2 deletions(-) diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 78b3088415a1..7132d61ed3b0 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -387,6 +387,7 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { logger.info("Expunge volume with no data store specified"); if (canVolumeBeRemoved(volume.getId())) { logger.info("Volume {} is not referred anywhere, remove it from volumes table", volume); + snapshotMgr.deletePoliciesForVolume(volume.getId()); volDao.remove(volume.getId()); } future.complete(result); @@ -422,6 +423,7 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { } VMTemplateVO template = templateDao.findById(vol.getTemplateId()); if (template != null && !template.isDeployAsIs()) { + snapshotMgr.deletePoliciesForVolume(vol.getId()); volDao.remove(vol.getId()); future.complete(result); return future; @@ -493,6 +495,7 @@ public Void deleteVolumeCallback(AsyncCallbackDispatcher policies = _snapshotPolicyDao.listActivePolicies(); + if (CollectionUtils.isEmpty(policies)) { + return; + } + for (SnapshotPolicyVO policy : policies) { + VolumeVO volume = _volsDao.findByIdIncludingRemoved(policy.getVolumeId()); + if (volume == null || volume.getState() == Volume.State.Expunged) { + logger.info("Removing orphan snapshot policy {} for non-existent volume {}", policy.getId(), policy.getVolumeId()); + deletePolicy(policy.getId()); + } + } + } + @Override public boolean stop() { backupSnapshotExecutor.shutdown(); @@ -1926,7 +1942,7 @@ public boolean deleteSnapshotPolicies(DeleteSnapshotPoliciesCmd cmd) { if (snapshotPolicyVO == null) { throw new InvalidParameterValueException("Policy id given: " + policy + " does not exist"); } - VolumeVO volume = _volsDao.findById(snapshotPolicyVO.getVolumeId()); + VolumeVO volume = _volsDao.findByIdIncludingRemoved(snapshotPolicyVO.getVolumeId()); if (volume == null) { throw new InvalidParameterValueException("Policy id given: " + policy + " does not belong to a valid volume"); } diff --git a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java index 367a49a801ff..2d3cb04ab962 100644 --- a/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java +++ b/server/src/test/java/com/cloud/storage/snapshot/SnapshotManagerImplTest.java @@ -30,6 +30,7 @@ import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.SnapshotVO; import com.cloud.storage.VolumeVO; +import com.cloud.server.TaggedResourceService; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotPolicyDao; import com.cloud.storage.dao.SnapshotZoneDao; @@ -44,6 +45,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -100,6 +102,10 @@ public class SnapshotManagerImplTest { VolumeDao volumeDao; @Mock SnapshotPolicyDao snapshotPolicyDao; + @Mock + SnapshotScheduler snapshotScheduler; + @Mock + TaggedResourceService taggedResourceService; @InjectMocks SnapshotManagerImpl snapshotManager = new SnapshotManagerImpl(); @@ -108,6 +114,8 @@ public void setUp() { snapshotManager._snapshotPolicyDao = snapshotPolicyDao; snapshotManager._volsDao = volumeDao; snapshotManager._accountMgr = accountManager; + snapshotManager._snapSchedMgr = snapshotScheduler; + snapshotManager.taggedResourceService = taggedResourceService; } @After @@ -520,4 +528,88 @@ public void testListSnapshotPolicies_RootAdmin() { Assert.assertEquals(1, result.first().size()); Assert.assertEquals(Integer.valueOf(1), result.second()); } + + @Test + public void testDeleteSnapshotPoliciesForRemovedVolume() { + Long policyId = 1L; + Long volumeId = 10L; + Long accountId = 2L; + + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(policyId); + Mockito.when(cmd.getIds()).thenReturn(null); + + Account caller = Mockito.mock(Account.class); + Mockito.when(caller.getId()).thenReturn(accountId); + CallContext.register(Mockito.mock(User.class), caller); + + SnapshotPolicyVO policyVO = Mockito.mock(SnapshotPolicyVO.class); + Mockito.when(policyVO.getId()).thenReturn(policyId); + Mockito.when(policyVO.getVolumeId()).thenReturn(volumeId); + Mockito.when(policyVO.getUuid()).thenReturn("policy-uuid"); + Mockito.when(snapshotPolicyDao.findById(policyId)).thenReturn(policyVO); + + // Volume is removed (expunged) but findByIdIncludingRemoved should still return it + VolumeVO volumeVO = Mockito.mock(VolumeVO.class); + Mockito.when(volumeDao.findByIdIncludingRemoved(volumeId)).thenReturn(volumeVO); + + Mockito.when(snapshotPolicyDao.remove(policyId)).thenReturn(true); + + boolean result = snapshotManager.deleteSnapshotPolicies(cmd); + + Assert.assertTrue(result); + Mockito.verify(volumeDao).findByIdIncludingRemoved(volumeId); + Mockito.verify(snapshotScheduler).removeSchedule(volumeId, policyId); + Mockito.verify(snapshotPolicyDao).remove(policyId); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSnapshotPoliciesNoPolicyId() { + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(null); + Mockito.when(cmd.getIds()).thenReturn(null); + + snapshotManager.deleteSnapshotPolicies(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSnapshotPoliciesPolicyNotFound() { + Long policyId = 1L; + + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(policyId); + Mockito.when(cmd.getIds()).thenReturn(null); + + Mockito.when(snapshotPolicyDao.findById(policyId)).thenReturn(null); + + snapshotManager.deleteSnapshotPolicies(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSnapshotPoliciesVolumeNotFound() { + Long policyId = 1L; + Long volumeId = 10L; + + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(policyId); + Mockito.when(cmd.getIds()).thenReturn(null); + + SnapshotPolicyVO policyVO = Mockito.mock(SnapshotPolicyVO.class); + Mockito.when(policyVO.getVolumeId()).thenReturn(volumeId); + Mockito.when(snapshotPolicyDao.findById(policyId)).thenReturn(policyVO); + + // Volume doesn't exist at all (even when including removed) + Mockito.when(volumeDao.findByIdIncludingRemoved(volumeId)).thenReturn(null); + + snapshotManager.deleteSnapshotPolicies(cmd); + } + + @Test(expected = InvalidParameterValueException.class) + public void testDeleteSnapshotPoliciesManualPolicyId() { + DeleteSnapshotPoliciesCmd cmd = Mockito.mock(DeleteSnapshotPoliciesCmd.class); + Mockito.when(cmd.getId()).thenReturn(Snapshot.MANUAL_POLICY_ID); + Mockito.when(cmd.getIds()).thenReturn(null); + + snapshotManager.deleteSnapshotPolicies(cmd); + } } From 1dcf89ab8d6112731370093d4cb772bbcf1bd572 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 28 Jan 2026 09:13:56 -0500 Subject: [PATCH 227/271] Veeam: Use restore timeout as an interval as opposed to a counter (#11772) * Veeam: Use restore timeout as a time interval as opposed to a counter * fix log * fix unit test * remove unused imports * fix comment * unused import * change to while - issure refactoring --- .../org/apache/cloudstack/backup/veeam/VeeamClient.java | 7 +++++-- .../apache/cloudstack/backup/veeam/VeeamClientTest.java | 5 ++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java index 44726a37e186..c7ede2b6e71e 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/veeam/VeeamClient.java @@ -366,7 +366,9 @@ private boolean checkTaskStatus(final HttpResponse response) throws IOException * that is used to wait for the restore to complete before throwing a {@link CloudRuntimeException}. */ protected void checkIfRestoreSessionFinished(String type, String path) throws IOException { - for (int j = 0; j < restoreTimeout; j++) { + long startTime = System.currentTimeMillis(); + long timeoutMs = restoreTimeout * 1000L; + while (System.currentTimeMillis() - startTime < timeoutMs) { HttpResponse relatedResponse = get(path); RestoreSession session = parseRestoreSessionResponse(relatedResponse); if (session.getResult().equals("Success")) { @@ -380,7 +382,8 @@ protected void checkIfRestoreSessionFinished(String type, String path) throws IO getRestoreVmErrorDescription(StringUtils.substringAfterLast(sessionUid, ":")))); throw new CloudRuntimeException(String.format("Restore job [%s] failed.", sessionUid)); } - logger.debug(String.format("Waiting %s seconds, out of a total of %s seconds, for the restore backup process to finish.", j, restoreTimeout)); + logger.debug("Waiting {} seconds, out of a total of {} seconds, for the restore backup process to finish.", + (System.currentTimeMillis() - startTime) / 1000, restoreTimeout); try { Thread.sleep(1000); diff --git a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java index 0c70c75939ef..333c3e16053a 100644 --- a/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java +++ b/plugins/backup/veeam/src/test/java/org/apache/cloudstack/backup/veeam/VeeamClientTest.java @@ -25,7 +25,6 @@ import static com.github.tomakehurst.wiremock.client.WireMock.urlMatching; import static com.github.tomakehurst.wiremock.client.WireMock.verify; import static org.junit.Assert.fail; -import static org.mockito.Mockito.times; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -157,7 +156,7 @@ public void getRepositoryNameFromJobTestSuccess() throws Exception { @Test public void checkIfRestoreSessionFinishedTestTimeoutException() throws IOException { try { - ReflectionTestUtils.setField(mockClient, "restoreTimeout", 10); + ReflectionTestUtils.setField(mockClient, "restoreTimeout", 2); RestoreSession restoreSession = Mockito.mock(RestoreSession.class); HttpResponse httpResponse = Mockito.mock(HttpResponse.class); Mockito.when(mockClient.get(Mockito.anyString())).thenReturn(httpResponse); @@ -169,7 +168,7 @@ public void checkIfRestoreSessionFinishedTestTimeoutException() throws IOExcepti } catch (Exception e) { Assert.assertEquals("Related job type: RestoreTest was not successful", e.getMessage()); } - Mockito.verify(mockClient, times(10)).get(Mockito.anyString()); + Mockito.verify(mockClient, Mockito.atLeastOnce()).get(Mockito.anyString()); } @Test From 03975f1591df70fc220eed779f2453c55b82fc1e Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Thu, 29 Jan 2026 11:46:10 +0530 Subject: [PATCH 228/271] Show parent snapshot (along with the chain size) for incremental snapshots (#12468) * Show parent snapshot (along with the chain size) for incremental snapshots * review * review changes --- .../user/config/ListCapabilitiesCmd.java | 1 + .../api/response/CapabilitiesResponse.java | 8 ++++++++ .../api/response/SnapshotResponse.java | 16 ++++++++++++++++ .../api/query/dao/SnapshotJoinDaoImpl.java | 2 ++ .../com/cloud/server/ManagementServerImpl.java | 2 ++ ui/src/components/view/ListView.vue | 14 ++++++++++++++ ui/src/config/section/storage.js | 17 +++++++++++++---- 7 files changed, 56 insertions(+), 4 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java index 94b6062b6212..2cb64070950b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java @@ -63,6 +63,7 @@ public void execute() { response.setDiskOffMaxSize((Long)capabilities.get("customDiskOffMaxSize")); response.setRegionSecondaryEnabled((Boolean)capabilities.get("regionSecondaryEnabled")); response.setKVMSnapshotEnabled((Boolean)capabilities.get("KVMSnapshotEnabled")); + response.setSnapshotShowChainSize((Boolean)capabilities.get("SnapshotShowChainSize")); response.setAllowUserViewDestroyedVM((Boolean)capabilities.get("allowUserViewDestroyedVM")); response.setAllowUserExpungeRecoverVM((Boolean)capabilities.get("allowUserExpungeRecoverVM")); response.setAllowUserExpungeRecoverVolume((Boolean)capabilities.get("allowUserExpungeRecoverVolume")); diff --git a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java index 816216962808..7ef627ec33ce 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java @@ -75,6 +75,10 @@ public class CapabilitiesResponse extends BaseResponse { @Param(description = "True if Snapshot is supported for KVM host, false otherwise") private boolean kvmSnapshotEnabled; + @SerializedName("snapshotshowchainsize") + @Param(description = "True to show the parent and chain size (sum of physical size of snapshot and all its parents) for incremental snapshots", since = "4.22.1") + private boolean snapshotShowChainSize; + @SerializedName("apilimitmax") @Param(description = "Max allowed number of api requests within the specified interval") private Integer apiLimitMax; @@ -203,6 +207,10 @@ public void setKVMSnapshotEnabled(boolean kvmSnapshotEnabled) { this.kvmSnapshotEnabled = kvmSnapshotEnabled; } + public void setSnapshotShowChainSize(boolean snapshotShowChainSize) { + this.snapshotShowChainSize = snapshotShowChainSize; + } + public void setApiLimitInterval(Integer apiLimitInterval) { this.apiLimitInterval = apiLimitInterval; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java index 827a55b18754..3db6fd87ed59 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/SnapshotResponse.java @@ -155,6 +155,14 @@ public class SnapshotResponse extends BaseResponseWithTagInformation implements @Param(description = "download progress of a snapshot", since = "4.19.0") private Map downloadDetails; + @SerializedName("parent") + @Param(description = "The parent ID of the Snapshot", since = "4.22.1") + private String parent; + + @SerializedName("parentname") + @Param(description = "The parent name of the Snapshot", since = "4.22.1") + private String parentName; + public SnapshotResponse() { tags = new LinkedHashSet(); } @@ -313,4 +321,12 @@ public void setDatastoreType(String datastoreType) { public void setDownloadDetails(Map downloadDetails) { this.downloadDetails = downloadDetails; } + + public void setParent(String parent) { + this.parent = parent; + } + + public void setParentName(String parentName) { + this.parentName = parentName; + } } diff --git a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java index 9ea14edf2b73..0b0d9b269f3e 100644 --- a/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/SnapshotJoinDaoImpl.java @@ -109,6 +109,8 @@ private void setSnapshotInfoDetailsInResponse(SnapshotJoinVO snapshot, SnapshotR if (showChainSize && snapshotInfo.getParent() != null) { long chainSize = calculateChainSize(snapshotInfo); snapshotResponse.setChainSize(chainSize); + snapshotResponse.setParent(snapshotInfo.getParent().getUuid()); + snapshotResponse.setParentName(snapshotInfo.getParent().getName()); } } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index d77298da48d1..9f27ce9387dc 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -4721,6 +4721,7 @@ public Map listCapabilities(final ListCapabilitiesCmd cmd) { final long diskOffMinSize = VolumeOrchestrationService.CustomDiskOfferingMinSize.value(); final long diskOffMaxSize = VolumeOrchestrationService.CustomDiskOfferingMaxSize.value(); final boolean KVMSnapshotEnabled = SnapshotManager.KVMSnapshotEnabled.value(); + final boolean SnapshotShowChainSize = SnapshotManager.snapshotShowChainSize.value(); final boolean userPublicTemplateEnabled = TemplateManager.AllowPublicUserTemplates.valueIn(caller.getId()); @@ -4761,6 +4762,7 @@ public Map listCapabilities(final ListCapabilitiesCmd cmd) { capabilities.put("customDiskOffMaxSize", diskOffMaxSize); capabilities.put("regionSecondaryEnabled", regionSecondaryEnabled); capabilities.put("KVMSnapshotEnabled", KVMSnapshotEnabled); + capabilities.put("SnapshotShowChainSize", SnapshotShowChainSize); capabilities.put("allowUserViewDestroyedVM", allowUserViewDestroyedVM); capabilities.put("allowUserExpungeRecoverVM", allowUserExpungeRecoverVM); capabilities.put("allowUserExpungeRecoverVolume", allowUserExpungeRecoverVolume); diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index de34527296e1..0098211be03a 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -758,6 +758,20 @@ >{{ text }} {{ text }} + + diff --git a/ui/src/config/section/storage.js b/ui/src/config/section/storage.js index 41875ec4db53..75432314b034 100644 --- a/ui/src/config/section/storage.js +++ b/ui/src/config/section/storage.js @@ -92,7 +92,7 @@ export default { } ], searchFilters: () => { - var filters = ['name', 'zoneid', 'domainid', 'account', 'state', 'tags', 'serviceofferingid', 'diskofferingid', 'isencrypted'] + const filters = ['name', 'zoneid', 'domainid', 'account', 'state', 'tags', 'serviceofferingid', 'diskofferingid', 'isencrypted'] if (['Admin', 'DomainAdmin'].includes(store.getters.userInfo.roletype)) { filters.push('storageid') } @@ -311,7 +311,10 @@ export default { permission: ['listSnapshots'], resourceType: 'Snapshot', columns: () => { - var fields = ['name', 'state', 'volumename', 'intervaltype', 'physicalsize', 'created'] + const fields = ['name', 'state', 'volumename', 'intervaltype', 'physicalsize', 'created'] + if (store.getters.features.snapshotshowchainsize) { + fields.splice(fields.indexOf('created'), 0, 'chainsize', 'parentname') + } if (['Admin', 'DomainAdmin'].includes(store.getters.userInfo.roletype)) { fields.push('account') if (store.getters.listAllProjects) { @@ -324,7 +327,13 @@ export default { fields.push('zonename') return fields }, - details: ['name', 'id', 'volumename', 'volumetype', 'snapshottype', 'intervaltype', 'physicalsize', 'virtualsize', 'chainsize', 'account', 'domain', 'created'], + details: () => { + const fields = ['name', 'id', 'volumename', 'volumetype', 'snapshottype', 'intervaltype', 'physicalsize', 'virtualsize', 'account', 'domain', 'created'] + if (store.getters.features.snapshotshowchainsize) { + fields.splice(fields.indexOf('account'), 0, 'chainsize', 'parentname') + } + return fields + }, tabs: [ { name: 'details', @@ -346,7 +355,7 @@ export default { } ], searchFilters: () => { - var filters = ['name', 'domainid', 'account', 'tags', 'zoneid'] + const filters = ['name', 'domainid', 'account', 'tags', 'zoneid'] if (['Admin', 'DomainAdmin'].includes(store.getters.userInfo.roletype)) { filters.push('storageid') filters.push('imagestoreid') From a4dcc866919d72cdc0928f2efd3d6db6ad2b262b Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 29 Jan 2026 12:05:51 +0530 Subject: [PATCH 229/271] ui: clear selected resource when no items in listview (#11946) Fixes #11922 Signed-off-by: Abhishek Kumar --- ui/src/views/AutogenView.vue | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue index cfbaf580507d..fc17eee75914 100644 --- a/ui/src/views/AutogenView.vue +++ b/ui/src/views/AutogenView.vue @@ -1196,15 +1196,12 @@ export default { } } } - if (this.items.length > 0) { - if (!this.showAction || this.dataView) { - this.resource = this.items[0] - this.$emit('change-resource', this.resource) - } - } else { - if (this.dataView) { - this.$router.push({ path: '/exception/404' }) - } + if (this.items.length <= 0 && this.dataView) { + this.$router.push({ path: '/exception/404' }) + } + if (!this.showAction || this.dataView) { + this.resource = this.items?.[0] || {} + this.$emit('change-resource', this.resource) } }).catch(error => { if (!error || !error.message) { From ba3d6031bdb839709c18d09757a058097887395a Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 29 Jan 2026 13:30:54 +0530 Subject: [PATCH 230/271] ui: allow viewing hosts from management server connected agents (#12149) Signed-off-by: Abhishek Kumar --- ui/src/components/view/ListView.vue | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index 0098211be03a..a8a6b26ac6a7 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -887,6 +887,14 @@ + @@ -313,13 +334,15 @@ import { getAPI, postAPI } from '@/api' import draggable from 'vuedraggable' import { mixinForm } from '@/utils/mixin' import TooltipButton from '@/components/widgets/TooltipButton' +import ImportNetworkACL from './ImportNetworkACL' export default { name: 'AclListRulesTab', mixins: [mixinForm], components: { draggable, - TooltipButton + TooltipButton, + ImportNetworkACL }, props: { resource: { @@ -344,6 +367,7 @@ export default { tagsModalVisible: false, tagsLoading: false, ruleModalVisible: false, + showImportModal: false, ruleModalTitle: this.$t('label.edit.rule'), ruleFormMode: 'edit' } @@ -788,6 +812,12 @@ export default { }, capitalise (val) { return val.toUpperCase() + }, + handleImportRules () { + this.showImportModal = true + }, + closeImportModal () { + this.showImportModal = false } } } diff --git a/ui/src/views/network/ImportNetworkACL.vue b/ui/src/views/network/ImportNetworkACL.vue new file mode 100644 index 000000000000..2456a75af45d --- /dev/null +++ b/ui/src/views/network/ImportNetworkACL.vue @@ -0,0 +1,381 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + + + From 46d518bf90feea4a5e30d9b1cf7272622151501d Mon Sep 17 00:00:00 2001 From: Edward-x <30854794+YLChen-007@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:56:44 +0800 Subject: [PATCH 232/271] Fix that Sensitive information logged in SshHelper.sshExecute method (#12026) * Sensitive information logged in SshHelper.sshExecute method * Fix that Sensitive information logged in SshHelper.sshExecute method2 * Fix sensitive information handling in SshHelper and its tests --------- Co-authored-by: chenyoulong20g@ict.ac.cn --- .../java/com/cloud/utils/ssh/SshHelper.java | 73 ++++++++++++++++++- .../com/cloud/utils/ssh/SshHelperTest.java | 60 +++++++++++++++ 2 files changed, 129 insertions(+), 4 deletions(-) diff --git a/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java b/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java index 87221ab5ac8e..caf2b28c52ff 100644 --- a/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java +++ b/utils/src/main/java/com/cloud/utils/ssh/SshHelper.java @@ -23,6 +23,8 @@ import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.StringUtils; @@ -40,6 +42,23 @@ public class SshHelper { private static final int DEFAULT_CONNECT_TIMEOUT = 180000; private static final int DEFAULT_KEX_TIMEOUT = 60000; private static final int DEFAULT_WAIT_RESULT_TIMEOUT = 120000; + private static final String MASKED_VALUE = "*****"; + + private static final Pattern[] SENSITIVE_COMMAND_PATTERNS = new Pattern[] { + Pattern.compile("(?i)(\\s+-p\\s+['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(\\s+-p\\s+)([^\\s]+)"), + Pattern.compile("(?i)(\\s+-p=['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(\\s+-p=)([^\\s]+)"), + Pattern.compile("(?i)(--password=['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(--password=)([^\\s]+)"), + Pattern.compile("(?i)(--password\\s+['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(--password\\s+)([^\\s]+)"), + Pattern.compile("(?i)(\\s+-u\\s+['\"][^,'\":]+[,:])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(\\s+-u\\s+[^\\s,:]+[,:])([^\\s]+)"), + Pattern.compile("(?i)(\\s+-s\\s+['\"])([^'\"]*)(['\"])"), + Pattern.compile("(?i)(\\s+-s\\s+)([^\\s]+)"), + + }; protected static Logger LOGGER = LogManager.getLogger(SshHelper.class); @@ -145,7 +164,7 @@ public static void scpTo(String host, int port, String user, File pemKeyFile, St } public static void scpTo(String host, int port, String user, File pemKeyFile, String password, String remoteTargetDirectory, String[] localFiles, String fileMode, - int connectTimeoutInMs, int kexTimeoutInMs) throws Exception { + int connectTimeoutInMs, int kexTimeoutInMs) throws Exception { com.trilead.ssh2.Connection conn = null; com.trilead.ssh2.SCPClient scpClient = null; @@ -291,13 +310,16 @@ public static Pair sshExecute(String host, int port, String use } if (sess.getExitStatus() == null) { - //Exit status is NOT available. Returning failure result. - LOGGER.error(String.format("SSH execution of command %s has no exit status set. Result output: %s", command, result)); + // Exit status is NOT available. Returning failure result. + LOGGER.error(String.format("SSH execution of command %s has no exit status set. Result output: %s", + sanitizeForLogging(command), sanitizeForLogging(result))); return new Pair(false, result); } if (sess.getExitStatus() != null && sess.getExitStatus().intValue() != 0) { - LOGGER.error(String.format("SSH execution of command %s has an error status code in return. Result output: %s", command, result)); + LOGGER.error(String.format( + "SSH execution of command %s has an error status code in return. Result output: %s", + sanitizeForLogging(command), sanitizeForLogging(result))); return new Pair(false, result); } return new Pair(true, result); @@ -366,4 +388,47 @@ protected static void throwSshExceptionIfStdoutOrStdeerIsNull(InputStream stdout throw new SshException(msg); } } + + private static String sanitizeForLogging(String value) { + if (value == null) { + return null; + } + String masked = maskSensitiveValue(value); + String cleaned = com.cloud.utils.StringUtils.cleanString(masked); + if (StringUtils.isBlank(cleaned)) { + return masked; + } + return cleaned; + } + + private static String maskSensitiveValue(String value) { + String masked = value; + for (Pattern pattern : SENSITIVE_COMMAND_PATTERNS) { + masked = replaceWithMask(masked, pattern); + } + return masked; + } + + private static String replaceWithMask(String value, Pattern pattern) { + Matcher matcher = pattern.matcher(value); + if (!matcher.find()) { + return value; + } + + StringBuffer buffer = new StringBuffer(); + do { + StringBuilder replacement = new StringBuilder(); + replacement.append(matcher.group(1)); + if (matcher.groupCount() >= 3) { + replacement.append(MASKED_VALUE); + replacement.append(matcher.group(matcher.groupCount())); + } else { + replacement.append(MASKED_VALUE); + } + matcher.appendReplacement(buffer, Matcher.quoteReplacement(replacement.toString())); + } while (matcher.find()); + + matcher.appendTail(buffer); + return buffer.toString(); + } } diff --git a/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java b/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java index 61d746bc12db..8a14f60527b6 100644 --- a/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java +++ b/utils/src/test/java/com/cloud/utils/ssh/SshHelperTest.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InputStream; +import java.lang.reflect.Method; import org.junit.Assert; import org.junit.Test; @@ -140,4 +141,63 @@ public void openConnectionSessionTest() throws IOException, InterruptedException Mockito.verify(conn).openSession(); } + + @Test + public void sanitizeForLoggingMasksShortPasswordFlag() throws Exception { + String command = "/opt/cloud/bin/script -v 10.0.0.1 -p superSecret"; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain flag", sanitized.contains("-p *****")); + Assert.assertFalse("Sanitized command should not contain original password", sanitized.contains("superSecret")); + } + + @Test + public void sanitizeForLoggingMasksQuotedPasswordFlag() throws Exception { + String command = "/opt/cloud/bin/script -v 10.0.0.1 -p \"super Secret\""; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain quoted flag", sanitized.contains("-p *****")); + Assert.assertFalse("Sanitized command should not contain original password", + sanitized.contains("super Secret")); + } + + @Test + public void sanitizeForLoggingMasksLongPasswordAssignments() throws Exception { + String command = "tool --password=superSecret"; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain assignment", sanitized.contains("--password=*****")); + Assert.assertFalse("Sanitized command should not contain original password", sanitized.contains("superSecret")); + } + + @Test + public void sanitizeForLoggingMasksUsernamePasswordPairs() throws Exception { + String command = "/opt/cloud/bin/vpn_l2tp.sh -u alice,topSecret"; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain username and mask password", + sanitized.contains("-u alice,*****")); + Assert.assertFalse("Sanitized command should not contain original password", sanitized.contains("topSecret")); + } + + @Test + public void sanitizeForLoggingMasksUsernamePasswordPairsWithColon() throws Exception { + String command = "curl -u alice:topSecret https://example.com"; + String sanitized = invokeSanitizeForLogging(command); + + Assert.assertTrue("Sanitized command should retain username and mask password", + sanitized.contains("-u alice:*****")); + Assert.assertFalse("Sanitized command should not contain original password", sanitized.contains("topSecret")); + } + + @Test + public void sanitizeForLoggingHandlesNullValues() throws Exception { + Assert.assertNull(invokeSanitizeForLogging(null)); + } + + private String invokeSanitizeForLogging(String value) throws Exception { + Method method = SshHelper.class.getDeclaredMethod("sanitizeForLogging", String.class); + method.setAccessible(true); + return (String) method.invoke(null, value); + } } From 816273606cf0518b6a88a351cd0928ca473de4f4 Mon Sep 17 00:00:00 2001 From: Harikrishna Date: Wed, 28 Jan 2026 16:00:30 +0530 Subject: [PATCH 233/271] Allow copy of templates from secondary storages of other zone when adding a new secondary storage (#12296) * Allow copy of templates from secondary storages of other zone when adding a new secondary storage * Add API param and UI changes on add secondary storage page * Make copy template across zones non blocking * Code fixes * unused imports * Add copy template flag in zone wizard and remove NFS checks * Fix UI * Label fixes * code optimizations * code refactoring * missing changes * Combine template copy and download into a single asynchronous operation * unused import and fixed conflicts * unused code * update config message * Fix configuration setting value on add secondary storage page * Removed unused code * Update unit tests --- .../admin/host/AddSecondaryStorageCmd.java | 24 ++- .../service/StorageOrchestrationService.java | 3 +- .../api/storage/TemplateService.java | 4 +- .../orchestration/StorageOrchestrator.java | 45 +++-- .../storage/image/TemplateServiceImpl.java | 157 +++++++++++++--- .../image/TemplateServiceImplTest.java | 171 +++++++++++++++++- .../cloud/storage/ImageStoreDetailsUtil.java | 11 ++ .../infra/zone/ZoneWizardAddResources.vue | 25 ++- .../views/infra/zone/ZoneWizardLaunchZone.vue | 5 + 9 files changed, 394 insertions(+), 51 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java index 9a7eff7e2e59..585fd1b87a88 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/AddSecondaryStorageCmd.java @@ -29,6 +29,11 @@ import com.cloud.exception.DiscoveryException; import com.cloud.storage.ImageStore; import com.cloud.user.Account; +import org.apache.commons.collections.MapUtils; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; @APICommand(name = "addSecondaryStorage", description = "Adds secondary storage.", responseObject = ImageStoreResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) @@ -44,6 +49,9 @@ public class AddSecondaryStorageCmd extends BaseCmd { @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "The Zone ID for the secondary storage") protected Long zoneId; + @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, description = "Details in key/value pairs using format details[i].keyname=keyvalue. Example: details[0].copytemplatesfromothersecondarystorages=true") + protected Map details; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -56,6 +64,20 @@ public Long getZoneId() { return zoneId; } + public Map getDetails() { + Map detailsMap = new HashMap<>(); + if (MapUtils.isNotEmpty(details)) { + Collection props = details.values(); + for (Object prop : props) { + HashMap detail = (HashMap) prop; + for (Map.Entry entry: detail.entrySet()) { + detailsMap.put(entry.getKey(),entry.getValue()); + } + } + } + return detailsMap; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -68,7 +90,7 @@ public long getEntityOwnerId() { @Override public void execute(){ try{ - ImageStore result = _storageService.discoverImageStore(null, getUrl(), "NFS", getZoneId(), null); + ImageStore result = _storageService.discoverImageStore(null, getUrl(), "NFS", getZoneId(), getDetails()); ImageStoreResponse storeResponse = null; if (result != null ) { storeResponse = _responseGenerator.createImageStoreResponse(result); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java index 8be2015bfef6..4af0c806060b 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java @@ -22,7 +22,6 @@ import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult; import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; @@ -31,5 +30,5 @@ public interface StorageOrchestrationService { MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreId, List templateIdList, List snapshotIdList); - Future orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore); + Future orchestrateTemplateCopyFromSecondaryStores(long templateId, DataStore destStore); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index a8861d5acc68..269eb4f1c213 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -80,4 +80,6 @@ public TemplateInfo getTemplate() { List getTemplateDatadisksOnImageStore(TemplateInfo templateInfo, String configurationId); AsyncCallFuture copyTemplateToImageStore(DataObject source, DataStore destStore); -} + + void handleTemplateCopyFromSecondaryStores(long templateId, DataStore destStore); + } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 37a1f8dc196e..933b4e0c5ce6 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -36,6 +36,9 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.template.TemplateManager; import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -45,6 +48,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService.DataObjectResult; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult; @@ -103,6 +107,15 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra VolumeDataStoreDao volumeDataStoreDao; @Inject DataMigrationUtility migrationHelper; + @Inject + TemplateManager templateManager; + @Inject + VMTemplateDao templateDao; + @Inject + TemplateDataFactory templateDataFactory; + @Inject + DataCenterDao dcDao; + ConfigKey ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class, "image.store.imbalance.threshold", @@ -304,8 +317,9 @@ public MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreI } @Override - public Future orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore) { - return submit(destStore.getScope().getScopeId(), new CopyTemplateTask(source, destStore)); + public Future orchestrateTemplateCopyFromSecondaryStores(long srcTemplateId, DataStore destStore) { + Long dstZoneId = destStore.getScope().getScopeId(); + return submit(dstZoneId, new CopyTemplateFromSecondaryStorageTask(srcTemplateId, destStore)); } protected Pair migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List files, MigrationPolicy migrationPolicy, int skipped) { @@ -624,13 +638,13 @@ public DataObjectResult call() { } } - private class CopyTemplateTask implements Callable { - private TemplateInfo sourceTmpl; - private DataStore destStore; - private String logid; + private class CopyTemplateFromSecondaryStorageTask implements Callable { + private final long srcTemplateId; + private final DataStore destStore; + private final String logid; - public CopyTemplateTask(TemplateInfo sourceTmpl, DataStore destStore) { - this.sourceTmpl = sourceTmpl; + CopyTemplateFromSecondaryStorageTask(long srcTemplateId, DataStore destStore) { + this.srcTemplateId = srcTemplateId; this.destStore = destStore; this.logid = ThreadContext.get(LOGCONTEXTID); } @@ -639,17 +653,16 @@ public CopyTemplateTask(TemplateInfo sourceTmpl, DataStore destStore) { public TemplateApiResult call() { ThreadContext.put(LOGCONTEXTID, logid); TemplateApiResult result; - AsyncCallFuture future = templateService.copyTemplateToImageStore(sourceTmpl, destStore); + long destZoneId = destStore.getScope().getScopeId(); + TemplateInfo sourceTmpl = templateDataFactory.getTemplate(srcTemplateId, DataStoreRole.Image); try { - result = future.get(); - } catch (ExecutionException | InterruptedException e) { - logger.warn("Exception while copying template [{}] from image store [{}] to image store [{}]: {}", - sourceTmpl.getUniqueName(), sourceTmpl.getDataStore().getName(), destStore.getName(), e.toString()); + templateService.handleTemplateCopyFromSecondaryStores(srcTemplateId, destStore); result = new TemplateApiResult(sourceTmpl); - result.setResult(e.getMessage()); + } finally { + tryCleaningUpExecutor(destZoneId); + ThreadContext.clearAll(); } - tryCleaningUpExecutor(destStore.getScope().getScopeId()); - ThreadContext.clearAll(); + return result; } } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index a59e0373c1d7..e29e89cf431c 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -31,6 +31,8 @@ import javax.inject.Inject; +import com.cloud.exception.StorageUnavailableException; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -67,9 +69,11 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.image.store.TemplateObject; import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.ThreadContext; import org.springframework.stereotype.Component; import com.cloud.agent.api.Answer; @@ -569,10 +573,7 @@ public void handleTemplateSync(DataStore store) { } if (availHypers.contains(tmplt.getHypervisorType())) { - boolean copied = isCopyFromOtherStoragesEnabled(zoneId) && tryCopyingTemplateToImageStore(tmplt, store); - if (!copied) { - tryDownloadingTemplateToImageStore(tmplt, store); - } + storageOrchestrator.orchestrateTemplateCopyFromSecondaryStores(tmplt.getId(), store); } else { logger.info("Skip downloading template {} since current data center does not have hypervisor {}", tmplt, tmplt.getHypervisorType()); } @@ -619,6 +620,16 @@ public void handleTemplateSync(DataStore store) { } + @Override + public void handleTemplateCopyFromSecondaryStores(long templateId, DataStore destStore) { + VMTemplateVO template = _templateDao.findById(templateId); + long zoneId = destStore.getScope().getScopeId(); + boolean copied = imageStoreDetailsUtil.isCopyTemplatesFromOtherStoragesEnabled(destStore.getId(), zoneId) && tryCopyingTemplateToImageStore(template, destStore); + if (!copied) { + tryDownloadingTemplateToImageStore(template, destStore); + } + } + protected void tryDownloadingTemplateToImageStore(VMTemplateVO tmplt, DataStore destStore) { if (tmplt.getUrl() == null) { logger.info("Not downloading template [{}] to image store [{}], as it has no URL.", tmplt.getUniqueName(), @@ -636,28 +647,134 @@ protected void tryDownloadingTemplateToImageStore(VMTemplateVO tmplt, DataStore } protected boolean tryCopyingTemplateToImageStore(VMTemplateVO tmplt, DataStore destStore) { - Long zoneId = destStore.getScope().getScopeId(); - List storesInZone = _storeMgr.getImageStoresByZoneIds(zoneId); - for (DataStore sourceStore : storesInZone) { - Map existingTemplatesInSourceStore = listTemplate(sourceStore); - if (existingTemplatesInSourceStore == null || !existingTemplatesInSourceStore.containsKey(tmplt.getUniqueName())) { - logger.debug("Template [{}] does not exist on image store [{}]; searching on another one.", - tmplt.getUniqueName(), sourceStore.getName()); + if (searchAndCopyWithinZone(tmplt, destStore)) { + return true; + } + + Long destZoneId = destStore.getScope().getScopeId(); + logger.debug("Template [{}] not found in any image store of zone [{}]. Checking other zones.", + tmplt.getUniqueName(), destZoneId); + + return searchAndCopyAcrossZones(tmplt, destStore, destZoneId); + } + + private boolean searchAndCopyAcrossZones(VMTemplateVO tmplt, DataStore destStore, Long destZoneId) { + List allZoneIds = _dcDao.listAllIds(); + for (Long otherZoneId : allZoneIds) { + if (otherZoneId.equals(destZoneId)) { continue; } - TemplateObject sourceTmpl = (TemplateObject) _templateFactory.getTemplate(tmplt.getId(), sourceStore); - if (sourceTmpl.getInstallPath() == null) { - logger.warn("Can not copy template [{}] from image store [{}], as it returned a null install path.", tmplt.getUniqueName(), - sourceStore.getName()); + + List storesInOtherZone = _storeMgr.getImageStoresByZoneIds(otherZoneId); + logger.debug("Checking zone [{}] for template [{}]...", otherZoneId, tmplt.getUniqueName()); + + if (CollectionUtils.isEmpty(storesInOtherZone)) { + logger.debug("Zone [{}] has no image stores. Skipping.", otherZoneId); continue; } - storageOrchestrator.orchestrateTemplateCopyToImageStore(sourceTmpl, destStore); - return true; + + TemplateObject sourceTmpl = findUsableTemplate(tmplt, storesInOtherZone); + if (sourceTmpl == null) { + logger.debug("Template [{}] not found with a valid install path in any image store of zone [{}].", + tmplt.getUniqueName(), otherZoneId); + continue; + } + + logger.info("Template [{}] found in zone [{}]. Initiating cross-zone copy to zone [{}].", + tmplt.getUniqueName(), otherZoneId, destZoneId); + + return copyTemplateAcrossZones(destStore, sourceTmpl); } - logger.debug("Can't copy template [{}] from another image store.", tmplt.getUniqueName()); + + logger.debug("Template [{}] was not found in any zone. Cannot perform zone-to-zone copy.", tmplt.getUniqueName()); return false; } + protected TemplateObject findUsableTemplate(VMTemplateVO tmplt, List imageStores) { + for (DataStore store : imageStores) { + + Map templates = listTemplate(store); + if (templates == null || !templates.containsKey(tmplt.getUniqueName())) { + continue; + } + + TemplateObject tmpl = (TemplateObject) _templateFactory.getTemplate(tmplt.getId(), store); + if (tmpl.getInstallPath() == null) { + logger.debug("Template [{}] found in image store [{}] but install path is null. Skipping.", + tmplt.getUniqueName(), store.getName()); + continue; + } + return tmpl; + } + return null; + } + + private boolean searchAndCopyWithinZone(VMTemplateVO tmplt, DataStore destStore) { + Long destZoneId = destStore.getScope().getScopeId(); + List storesInSameZone = _storeMgr.getImageStoresByZoneIds(destZoneId); + + TemplateObject sourceTmpl = findUsableTemplate(tmplt, storesInSameZone); + if (sourceTmpl == null) { + return false; + } + + TemplateApiResult result; + AsyncCallFuture future = copyTemplateToImageStore(sourceTmpl, destStore); + try { + result = future.get(); + } catch (ExecutionException | InterruptedException e) { + logger.warn("Exception while copying template [{}] from image store [{}] to image store [{}]: {}", + sourceTmpl.getUniqueName(), sourceTmpl.getDataStore().getName(), destStore.getName(), e.toString()); + result = new TemplateApiResult(sourceTmpl); + result.setResult(e.getMessage()); + } + return result.isSuccess(); + } + + private boolean copyTemplateAcrossZones(DataStore destStore, TemplateObject sourceTmpl) { + Long dstZoneId = destStore.getScope().getScopeId(); + DataCenterVO dstZone = _dcDao.findById(dstZoneId); + + if (dstZone == null) { + logger.warn("Destination zone [{}] not found for template [{}].", dstZoneId, sourceTmpl.getUniqueName()); + return false; + } + + TemplateApiResult result; + try { + VMTemplateVO template = _templateDao.findById(sourceTmpl.getId()); + try { + DataStore sourceStore = sourceTmpl.getDataStore(); + long userId = CallContext.current().getCallingUserId(); + boolean success = _tmpltMgr.copy(userId, template, sourceStore, dstZone); + + result = new TemplateApiResult(sourceTmpl); + if (!success) { + result.setResult("Cross-zone template copy failed"); + } + } catch (StorageUnavailableException | ResourceAllocationException e) { + logger.error("Exception while copying template [{}] from zone [{}] to zone [{}]", + template, + sourceTmpl.getDataStore().getScope().getScopeId(), + dstZone.getId(), + e); + result = new TemplateApiResult(sourceTmpl); + result.setResult(e.getMessage()); + } finally { + ThreadContext.clearAll(); + } + } catch (Exception e) { + logger.error("Failed to copy template [{}] from zone [{}] to zone [{}].", + sourceTmpl.getUniqueName(), + sourceTmpl.getDataStore().getScope().getScopeId(), + dstZoneId, + e); + return false; + } + + return result.isSuccess(); + } + @Override public AsyncCallFuture copyTemplateToImageStore(DataObject source, DataStore destStore) { TemplateObject sourceTmpl = (TemplateObject) source; @@ -701,10 +818,6 @@ protected Void copyTemplateToImageStoreCallback(AsyncCallbackDispatcher