Skip to content

Commit

Permalink
chore: fix CVE (#267)
Browse files Browse the repository at this point in the history
* fix cve

* chore: fix-cve

* chore: fix-cve

* fix: update spring cloud context

* fix: update POM

* fix: update values

* [IDP 1847] modified env values

---------

Co-authored-by: DanieleRanaldo <[email protected]>
Co-authored-by: stefanodel <[email protected]>
  • Loading branch information
3 people authored Sep 20, 2024
1 parent 7271c50 commit 99e5438
Show file tree
Hide file tree
Showing 10 changed files with 70 additions and 112 deletions.
6 changes: 2 additions & 4 deletions .grype.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
ignore:
# false positive match on reactor-netty packages due to a bug on grype: https://github.com/anchore/grype/issues/431
# Actually we are using netty 4.1.104
# Actually we are using netty 4.1.113Final
- vulnerability: CVE-2014-3488 # solved in netty 3.9.2
- vulnerability: CVE-2015-2156 # solved in netty 4.1.42
- vulnerability: CVE-2019-16869 # solved in netty 4.1.42
Expand All @@ -14,6 +14,4 @@ ignore:
- vulnerability: CVE-2021-43797 # solved in netty 4.1.71
- vulnerability: CVE-2022-24823 # solved in netty 4.1.77
- vulnerability: CVE-2022-41881 # solved in netty 4.1.86
- vulnerability: CVE-2023-34462 # solved in netty 4.1.94
- vulnerability: CVE-2023-44487 # solved in netty 4.1.100
- vulnerability: GHSA-j4r7-p9fp-w3f3 # solved in spring-cloud-function-context 4.1.2
- vulnerability: CVE-2023-34462 # solved in netty 4.1.94
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ RUN mvn clean package -DskipTests
#
# Docker RUNTIME
#
FROM amazoncorretto:17-alpine3.20@sha256:b6e5aab53c360dd5f9843b18b397dee2ceed3211ac9be6cf36a51606d6ec015e AS runtime
FROM amazoncorretto:17-alpine3.20@sha256:1b1d0653d890ff313a1f7afadd1fd81f5ea742c9c48670d483b1bbccef98bb8b AS runtime

RUN apk --no-cache add shadow
RUN useradd --uid 10000 runner
Expand Down
10 changes: 5 additions & 5 deletions helm/values-dev.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ microservice-chart:

resources:
requests:
memory: "256Mi"
cpu: "300m"
memory: "768Mi"
cpu: "150m"
limits:
memory: "4Gi"
cpu: "500m"
memory: "768Mi"
cpu: "600m"

autoscaling:
enable: true
Expand All @@ -37,7 +37,7 @@ microservice-chart:
tenantId: "7788edaf-0346-4068-9d79-c868aed15b3d"

envConfig:
JAVA_TOOL_OPTIONS: "-Xms128m -Xmx4g -Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json -agentlib:jdwp=transport=dt_socket,server=y,address=8001,suspend=n -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=3002 -Dcom.sun.management.jmxremote.rmi.port=3003 -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
JAVA_TOOL_OPTIONS: "-Xms256m -Xmx512m -Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json -agentlib:jdwp=transport=dt_socket,server=y,address=8001,suspend=n -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=3002 -Dcom.sun.management.jmxremote.rmi.port=3003 -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
CACHE_REFRESH_MS_RATE: "60000"
REDIS_CACHE_ENABLED: "true"

Expand Down
10 changes: 5 additions & 5 deletions helm/values-prod.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ microservice-chart:

resources:
requests:
memory: "256Mi"
cpu: "300m"
memory: "768Mi"
cpu: "150m"
limits:
memory: "4Gi"
cpu: "500m"
memory: "768Mi"
cpu: "600m"

autoscaling:
enable: true
Expand All @@ -36,7 +36,7 @@ microservice-chart:
tenantId: "7788edaf-0346-4068-9d79-c868aed15b3d"

envConfig:
JAVA_TOOL_OPTIONS: "-Xms128m -Xmx4g -Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json"
JAVA_TOOL_OPTIONS: "-Xms256m -Xmx512m -Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json"
CACHE_REFRESH_MS_RATE: "60000"
REDIS_CACHE_ENABLED: "true"

Expand Down
10 changes: 5 additions & 5 deletions helm/values-uat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ microservice-chart:

resources:
requests:
memory: "256Mi"
cpu: "300m"
memory: "768Mi"
cpu: "150m"
limits:
memory: "4Gi"
cpu: "500m"
memory: "768Mi"
cpu: "600m"

autoscaling:
enable: true
Expand All @@ -38,7 +38,7 @@ microservice-chart:
tenantId: "7788edaf-0346-4068-9d79-c868aed15b3d"

envConfig:
JAVA_TOOL_OPTIONS: '-Xms128m -Xmx4g -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json'
JAVA_TOOL_OPTIONS: '-Xms256m -Xmx512m -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json'
CACHE_REFRESH_MS_RATE: "10000"
REDIS_CACHE_ENABLED: "true"

Expand Down
1 change: 1 addition & 0 deletions helm/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ microservice-chart:
_JAVA_OPTIONS: "-Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -Dreactor.netty.ioWorkerCount=4 -Dreactor.netty.pool.maxConnections=16"
DELETE_PAGINATION_SIZE: "60"
DELETE_DELAY_TIME: "1000"
HEALTH_MONGO_ENABLED: "false"

externalConfigMapValues:
idpay-common:
Expand Down
31 changes: 16 additions & 15 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>3.1.8</version>
<version>3.3.3</version>
</parent>

<groupId>it.gov.pagopa</groupId>
Expand Down Expand Up @@ -226,35 +226,36 @@
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.azure.spring</groupId>
<artifactId>spring-cloud-azure-dependencies</artifactId>
<version>5.16.0</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-dependencies</artifactId>
<version>2022.0.4</version>
<version>2023.0.2</version>
<type>pom</type>
<scope>import</scope>
</dependency>

<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-bom</artifactId>
<!-- Forced to >= 4.1.100 due to https://nvd.nist.gov/vuln/detail/CVE-2023-44487 -->
<version>4.1.104.Final</version>
<version>4.1.113.Final</version>
<type>pom</type>
<scope>import</scope>
</dependency>

<dependency>
<groupId>com.azure.spring</groupId>
<artifactId>spring-cloud-azure-dependencies</artifactId>
<version>5.8.0</version>
<type>pom</type>
<scope>import</scope>
<groupId>io.projectreactor.netty</groupId>
<artifactId>reactor-netty-core</artifactId>
<version>1.1.22</version>
</dependency>

<dependency>
<groupId>org.springframework.cloud</groupId>
<artifactId>spring-cloud-function-context</artifactId>
<version>4.1.2</version>
<groupId>io.projectreactor.netty</groupId>
<artifactId>reactor-netty-http</artifactId>
<version>1.1.22</version>
</dependency>
</dependencies>
</dependencyManagement>
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,10 @@
package it.gov.pagopa.common.kafka;

import ch.qos.logback.classic.LoggerContext;
import ch.qos.logback.classic.spi.ILoggingEvent;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import it.gov.pagopa.common.kafka.utils.KafkaConstants;
import it.gov.pagopa.common.reactive.kafka.consumer.BaseKafkaConsumer;
import it.gov.pagopa.common.utils.MemoryAppender;
import it.gov.pagopa.common.utils.TestUtils;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.RecordsToDelete;
import org.apache.kafka.clients.admin.TopicListing;
Expand All @@ -20,7 +17,6 @@
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.junit.jupiter.api.Assertions;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
Expand Down Expand Up @@ -83,28 +79,30 @@ public KafkaTemplate<byte[], byte[]> testPublisher(ProducerFactory<byte[], byte[

@AfterTestClass
void clearTopics() {
kafkaBroker.doWithAdmin(admin -> {
try {
Collection<TopicListing> topics = admin.listTopics().listings().get();
admin.deleteRecords(
admin.listOffsets(
topics.stream()
.filter(topicListing -> !topicListing.isInternal())
.flatMap(t -> IntStream.range(0, kafkaBroker.getPartitionsPerTopic())
.boxed()
.map(p -> new TopicPartition(t.name(), p)))
.collect(Collectors.toMap(tp -> tp,
tp -> OffsetSpec.latest()))
).all().get().entrySet().stream()
.filter(e -> e.getValue().offset() > 0)
.collect(Collectors.toMap(Map.Entry::getKey,
e -> RecordsToDelete.beforeOffset(e.getValue().offset()))))
.all().get();

} catch (InterruptedException | ExecutionException e) {
throw new IllegalStateException("Something gone wrong while emptying topics", e);
}
});
Map<String, Object> brokerConfig = Map.of(
"bootstrap.servers", kafkaBroker.getBrokersAsString()
);

try (AdminClient admin = AdminClient.create(brokerConfig)) {
Collection<TopicListing> topics = admin.listTopics().listings().get();
admin.deleteRecords(
admin.listOffsets(
topics.stream()
.filter(topicListing -> !topicListing.isInternal())
.flatMap(t -> IntStream.range(0, kafkaBroker.getPartitionsPerTopic())
.boxed()
.map(p -> new TopicPartition(t.name(), p)))
.collect(Collectors.toMap(tp -> tp,
tp -> OffsetSpec.latest()))
).all().get().entrySet().stream()
.filter(e -> e.getValue().offset() > 0)
.collect(Collectors.toMap(Map.Entry::getKey,
e -> RecordsToDelete.beforeOffset(e.getValue().offset()))))
.all().get();

} catch (InterruptedException | ExecutionException e) {
throw new IllegalStateException("Something went wrong while clearing topics", e);
}
}

/** It will return usefull URLs related to embedded kafka */
Expand Down Expand Up @@ -223,10 +221,10 @@ public void publishIntoEmbeddedKafka(String topic, Integer partition, Iterable<H
headers = Stream.concat(
Arrays.stream(additionalHeaders),
StreamSupport.stream(headers.spliterator(), false))
.collect(Collectors.toList());
.toList();
}
ProducerRecord<byte[], byte[]> record = new ProducerRecord<>(topic, partition, key == null ? null : key.getBytes(StandardCharsets.UTF_8), payload.getBytes(StandardCharsets.UTF_8), headers);
template.send(record);
ProducerRecord<byte[], byte[]> rec = new ProducerRecord<>(topic, partition, key == null ? null : key.getBytes(StandardCharsets.UTF_8), payload.getBytes(StandardCharsets.UTF_8), headers);
template.send(rec);
}
//endregion

Expand Down Expand Up @@ -279,55 +277,16 @@ public Map<TopicPartition, Long> checkPublishedOffsets(String topic, long expect
}
//endregion

//region check commit by logs
protected MemoryAppender commitLogMemoryAppender;
/** To be called before each test in order to perform the asserts on {@link #assertCommitOrder(String, int)} */
public void setupCommitLogMemoryAppender() {
ch.qos.logback.classic.Logger logger = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(BaseKafkaConsumer.class.getName());
commitLogMemoryAppender = new MemoryAppender();
commitLogMemoryAppender.setContext((LoggerContext) LoggerFactory.getILoggerFactory());
logger.setLevel(ch.qos.logback.classic.Level.INFO);
logger.addAppender(commitLogMemoryAppender);
commitLogMemoryAppender.start();
}

private final Pattern partitionCommitsPattern = Pattern.compile("partition (\\d+): (\\d+) - (\\d+)");
/** It will assert the right offset commit and the total messages by the provided {@link BaseKafkaConsumer#getFlowName()}.<br />
* In order to be used, you have to call {@link #setupCommitLogMemoryAppender()} before each test */
public void assertCommitOrder(String flowName, int totalSendMessages) {
Map<Integer, Integer> partition2last = new HashMap<>(Map.of(0, -1, 1, -1));
for (ILoggingEvent loggedEvent : commitLogMemoryAppender.getLoggedEvents()) {
if(loggedEvent.getMessage().equals("[KAFKA_COMMIT][{}] Committing {} messages: {}") && flowName.equals(loggedEvent.getArgumentArray()[0])){
Arrays.stream(((String)loggedEvent.getArgumentArray()[2]).split(";"))
.forEach(s -> {
Matcher matcher = partitionCommitsPattern.matcher(s);
Assertions.assertTrue(matcher.matches(), "Unexpected partition commit string: " + s);
int partition = Integer.parseInt(matcher.group(1));
int startOffset = Integer.parseInt(matcher.group(2));
int endOffset = Integer.parseInt(matcher.group(3));
Assertions.assertTrue(endOffset>=startOffset, "EndOffset less than StartOffset!: " + s);

Integer lastCommittedOffset = partition2last.get(partition);
Assertions.assertEquals(lastCommittedOffset, startOffset-1);
partition2last.put(partition, endOffset);
});
}
}

Assertions.assertEquals(totalSendMessages, partition2last.values().stream().mapToInt(x->x+1).sum());
}
//endregion

//region error topic
public void checkErrorsPublished(String topicErrors, Pattern errorUseCaseIdPatternMatch, int expectedErrorMessagesNumber, long maxWaitingMs, List<Pair<Supplier<String>, java.util.function.Consumer<ConsumerRecord<String, String>>>> errorUseCases) {
final List<ConsumerRecord<String, String>> errors = consumeMessages(topicErrors, expectedErrorMessagesNumber, maxWaitingMs);
for (final ConsumerRecord<String, String> record : errors) {
final Matcher matcher = errorUseCaseIdPatternMatch.matcher(record.value());
for (final ConsumerRecord<String, String> rec : errors) {
final Matcher matcher = errorUseCaseIdPatternMatch.matcher(rec.value());
int useCaseId = matcher.find() ? Integer.parseInt(matcher.group(1)) : -1;
if (useCaseId == -1) {
throw new IllegalStateException("UseCaseId not recognized! " + record.value());
throw new IllegalStateException("UseCaseId not recognized! " + rec.value());
}
errorUseCases.get(useCaseId).getSecond().accept(record);
errorUseCases.get(useCaseId).getSecond().accept(rec);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import com.google.common.cache.Cache;
import it.gov.pagopa.common.reactive.pdv.dto.UserInfoPDV;
import it.gov.pagopa.common.utils.TestUtils;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
Expand Down Expand Up @@ -63,7 +62,6 @@ void getUserInfoNotInCache(){

// Then
Assertions.assertNotNull(result);
TestUtils.checkNotNullFields(result);
Assertions.assertEquals("FISCALCODE_RETRIEVED", result);
Assertions.assertNotNull(inspectCache.getIfPresent(userIdTest));
Assertions.assertEquals(initialSizeCache+1,inspectCache.size());
Expand All @@ -83,10 +81,8 @@ void getUserInfoInCache(){
Assertions.assertEquals(initialSizeCache,inspectCache.size());

String result = userFiscalCodeService.getUserFiscalCode(userIdTest).block();

// Then
Assertions.assertNotNull(result);
TestUtils.checkNotNullFields(result);
Assertions.assertEquals("FISCALCODE_0", result);
Assertions.assertNotNull(inspectCache.getIfPresent(userIdTest));
Assertions.assertEquals(initialSizeCache,inspectCache.size());
Expand Down
3 changes: 3 additions & 0 deletions src/test/java/it/gov/pagopa/common/utils/TestUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,9 @@ public static void checkNullFields(Object o, String... excludedFields) {
Set<String> excludedFieldsSet = new HashSet<>(Arrays.asList(excludedFields));
org.springframework.util.ReflectionUtils.doWithFields(o.getClass(),
f -> {
if (f.getType().isPrimitive()) {
return;
}
f.setAccessible(true);
Object value = f.get(o);
Assertions.assertNull(value, "The field %s of the input object of type %s is not null: %s".formatted(f.getName(), o.getClass(), value));
Expand Down

0 comments on commit 99e5438

Please sign in to comment.