Skip to content

Commit

Permalink
adding tests and fixing bug when checking areEqual images
Browse files Browse the repository at this point in the history
  • Loading branch information
gerardcl committed Dec 18, 2024
1 parent 435870a commit ea5432a
Show file tree
Hide file tree
Showing 3 changed files with 137 additions and 4 deletions.
10 changes: 6 additions & 4 deletions src/org/ods/component/HelmDeploymentStrategy.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -199,12 +199,13 @@ class HelmDeploymentStrategy extends AbstractDeploymentStrategy {

// We need to find the pod that was created as a result of the deployment.
// The previous pod may still be alive when we use a rollout strategy.
// We can tell one from the other using their creation timestamp
// We can tell one from the other using their creation timestamp,
// being the most recent the one we are interested in.
def latestPods = getLatestPods(podData)
// While very unlikely, it may happen that there is more than one pod with the same timestamp.
// Note that timestamp resolution is seconds.
// If that happens, we are unable to know which is the correct pod.
// However, we it doesn't matter which pod is the right one, if they all have the same images.
// However, it doesn't matter which pod is the right one, if they all have the same images.
def sameImages = haveSameImages(latestPods)
if (!sameImages) {
throw new RuntimeException("Unable to determine the most recent Pod. Multiple pods running with the same latest creation timestamp and different images found for ${resourceName}")
Expand Down Expand Up @@ -285,6 +286,7 @@ class HelmDeploymentStrategy extends AbstractDeploymentStrategy {
*/
@NonCPS
private static boolean areEqual(Iterable iterable, Closure equals) {
def equal = true
if (iterable) {
def first = true
def base = null
Expand All @@ -293,10 +295,10 @@ class HelmDeploymentStrategy extends AbstractDeploymentStrategy {
base = it
first = false
} else if (!equals(base, it)) {
return false
equal = false
}
}
}
return true
return equal
}
}
2 changes: 2 additions & 0 deletions src/org/ods/util/PodData.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ class PodData {

// podMetaDataCreationTimestamp equals .metadata.creationTimestamp.
// Example: 2020-11-02T10:57:35Z
// We can use String to compare timestamps in this case,
// because ISO 8601 timestamps are designed to be sortable as strings.
String podMetaDataCreationTimestamp

// deploymentId is the name of the pod manager, such as the ReplicaSet or
Expand Down
129 changes: 129 additions & 0 deletions test/groovy/org/ods/component/HelmDeploymentStrategySpec.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -83,4 +83,133 @@ class HelmDeploymentStrategySpec extends PipelineSpockTestBase {

assert expectedDeploymentMeans == actualDeploymentMeans
}

def "rollout: check deploymentMean when multiple pods then accept only latest"() {
given:

def expectedDeploymentMeans = [
"builds": [:],
"deployments": [
"bar-deploymentMean": [
"type": "helm",
"selector": "app=foo-bar",
"chartDir": "chart",
"helmReleaseName": "bar",
"helmEnvBasedValuesFiles": [],
"helmValuesFiles": ["values.yaml"],
"helmValues": [:],
"helmDefaultFlags": ["--install", "--atomic"],
"helmAdditionalFlags": []
],
"bar":[
"podName": null,
"podNamespace": null,
"podMetaDataCreationTimestamp": "2024-12-12T20:10:47Z",
"deploymentId": "bar-124",
"podNode": null,
"podIp": null,
"podStatus": null,
"podStartupTimeStamp": null,
"containers": [
"containerA": "imageAnew",
"containerB": "imageBnew",
],
]
]
]
def config = [:]

def ctxData = contextData + [environment: 'dev', targetProject: 'foo-dev', openshiftRolloutTimeoutRetries: 5, chartDir: 'chart']
IContext context = new Context(null, ctxData, logger)
OpenShiftService openShiftService = Mock(OpenShiftService.class)
openShiftService.checkForPodData(*_) >> [
new PodData([deploymentId: "${contextData.componentId}-124", podMetaDataCreationTimestamp: "2024-12-12T20:10:46Z", containers: ["containerA": "imageAold", "containerB": "imageBold"]]),
new PodData([deploymentId: "${contextData.componentId}-124", podMetaDataCreationTimestamp: "2024-12-12T20:10:47Z", containers: ["containerA": "imageAnew", "containerB": "imageBnew"]]),
new PodData([deploymentId: "${contextData.componentId}-123", podMetaDataCreationTimestamp: "2024-11-11T20:10:46Z"])
]
ServiceRegistry.instance.add(OpenShiftService, openShiftService)

JenkinsService jenkinsService = Stub(JenkinsService.class)
jenkinsService.maybeWithPrivateKeyCredentials(*_) >> { args -> args[1]('/tmp/file') }
ServiceRegistry.instance.add(JenkinsService, jenkinsService)

HelmDeploymentStrategy strategy = Spy(HelmDeploymentStrategy, constructorArgs: [null, context, config, openShiftService, jenkinsService, logger])

when:
def deploymentResources = [Deployment: ['bar']]
def rolloutData = strategy.getRolloutData(deploymentResources)
def actualDeploymentMeans = context.getBuildArtifactURIs()


then:
printCallStack()
assertJobStatusSuccess()

assert expectedDeploymentMeans == actualDeploymentMeans
}

def "rollout: check deploymentMean when multiple pods with same timestamp but different image then pipeline fails"() {
given:

def expectedDeploymentMeans = [
"builds": [:],
"deployments": [
"bar-deploymentMean": [
"type": "helm",
"selector": "app=foo-bar",
"chartDir": "chart",
"helmReleaseName": "bar",
"helmEnvBasedValuesFiles": [],
"helmValuesFiles": ["values.yaml"],
"helmValues": [:],
"helmDefaultFlags": ["--install", "--atomic"],
"helmAdditionalFlags": []
],
"bar":[
"podName": null,
"podNamespace": null,
"podMetaDataCreationTimestamp": "2024-12-12T20:10:47Z",
"deploymentId": "bar-124",
"podNode": null,
"podIp": null,
"podStatus": null,
"podStartupTimeStamp": null,
"containers": [
"containerA": "imageAnew",
"containerB": "imageBnew",
],
]
]
]
def config = [:]

def ctxData = contextData + [environment: 'dev', targetProject: 'foo-dev', openshiftRolloutTimeoutRetries: 5, chartDir: 'chart']
IContext context = new Context(null, ctxData, logger)
OpenShiftService openShiftService = Mock(OpenShiftService.class)
openShiftService.checkForPodData(*_) >> [
new PodData([deploymentId: "${contextData.componentId}-124", podMetaDataCreationTimestamp: "2024-12-12T20:10:47Z", containers: ["containerA": "imageAnew", "containerB": "imageBnew"]]),
new PodData([deploymentId: "${contextData.componentId}-124", podMetaDataCreationTimestamp: "2024-12-12T20:10:47Z", containers: ["containerA": "imageAold", "containerB": "imageBold"]]),
]
ServiceRegistry.instance.add(OpenShiftService, openShiftService)

JenkinsService jenkinsService = Stub(JenkinsService.class)
jenkinsService.maybeWithPrivateKeyCredentials(*_) >> { args -> args[1]('/tmp/file') }
ServiceRegistry.instance.add(JenkinsService, jenkinsService)

HelmDeploymentStrategy strategy = Spy(HelmDeploymentStrategy, constructorArgs: [null, context, config, openShiftService, jenkinsService, logger])

when:
def deploymentResources = [Deployment: ['bar']]
def rolloutData = strategy.getRolloutData(deploymentResources)
def actualDeploymentMeans = context.getBuildArtifactURIs()


then:
def e = thrown(RuntimeException)
e.message == "Unable to determine the most recent Pod. Multiple pods running with the same latest creation timestamp and different images found for bar"

// TODO question: is this expected that still pipeline is successful?
assertJobStatusSuccess()
}

}

0 comments on commit ea5432a

Please sign in to comment.