diff --git a/.grype.yaml b/.grype.yaml
index b030c206..fdbe1dcf 100644
--- a/.grype.yaml
+++ b/.grype.yaml
@@ -1,6 +1,6 @@
ignore:
# false positive match on reactor-netty packages due to a bug on grype: https://github.com/anchore/grype/issues/431
- # Actually we are using netty 4.1.104
+ # Actually we are using netty 4.1.113Final
- vulnerability: CVE-2014-3488 # solved in netty 3.9.2
- vulnerability: CVE-2015-2156 # solved in netty 4.1.42
- vulnerability: CVE-2019-16869 # solved in netty 4.1.42
@@ -14,5 +14,4 @@ ignore:
- vulnerability: CVE-2021-43797 # solved in netty 4.1.71
- vulnerability: CVE-2022-24823 # solved in netty 4.1.77
- vulnerability: CVE-2022-41881 # solved in netty 4.1.86
- - vulnerability: CVE-2023-34462 # solved in netty 4.1.94
- - vulnerability: CVE-2023-44487 # solved in netty 4.1.100
+ - vulnerability: CVE-2023-34462 # solved in netty 4.1.94
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index 6646e666..acecd50f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -11,7 +11,7 @@ RUN mvn clean package -DskipTests
#
# Docker RUNTIME
#
-FROM amazoncorretto:17-alpine3.19@sha256:539a0a188ce5a2bed985aa311e9a26d473c6c3f37d08d4fc8b6cf6c18075b9ab AS runtime
+FROM amazoncorretto:17-alpine3.20@sha256:1b1d0653d890ff313a1f7afadd1fd81f5ea742c9c48670d483b1bbccef98bb8b AS runtime
RUN apk --no-cache add shadow
RUN useradd --uid 10000 runner
diff --git a/helm/values-dev.yaml b/helm/values-dev.yaml
index e6930a1e..062d999d 100644
--- a/helm/values-dev.yaml
+++ b/helm/values-dev.yaml
@@ -9,11 +9,11 @@ microservice-chart:
resources:
requests:
- memory: "256Mi"
- cpu: "300m"
+ memory: "768Mi"
+ cpu: "150m"
limits:
- memory: "4Gi"
- cpu: "500m"
+ memory: "768Mi"
+ cpu: "600m"
autoscaling:
enable: true
@@ -37,7 +37,7 @@ microservice-chart:
tenantId: "7788edaf-0346-4068-9d79-c868aed15b3d"
envConfig:
- JAVA_TOOL_OPTIONS: "-Xms128m -Xmx4g -Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json -agentlib:jdwp=transport=dt_socket,server=y,address=8001,suspend=n -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=3002 -Dcom.sun.management.jmxremote.rmi.port=3003 -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+ JAVA_TOOL_OPTIONS: "-Xms256m -Xmx512m -Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json -agentlib:jdwp=transport=dt_socket,server=y,address=8001,suspend=n -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.port=3002 -Dcom.sun.management.jmxremote.rmi.port=3003 -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
CACHE_REFRESH_MS_RATE: "60000"
REDIS_CACHE_ENABLED: "true"
diff --git a/helm/values-prod.yaml b/helm/values-prod.yaml
index 20fc9e60..d5ee5278 100644
--- a/helm/values-prod.yaml
+++ b/helm/values-prod.yaml
@@ -36,7 +36,7 @@ microservice-chart:
tenantId: "7788edaf-0346-4068-9d79-c868aed15b3d"
envConfig:
- JAVA_TOOL_OPTIONS: "-Xms128m -Xmx4g -Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json"
+ JAVA_TOOL_OPTIONS: "-Xms256m -Xmx512m -Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json"
CACHE_REFRESH_MS_RATE: "60000"
REDIS_CACHE_ENABLED: "true"
diff --git a/helm/values-uat.yaml b/helm/values-uat.yaml
index 6aaf1545..696c923e 100644
--- a/helm/values-uat.yaml
+++ b/helm/values-uat.yaml
@@ -38,7 +38,7 @@ microservice-chart:
tenantId: "7788edaf-0346-4068-9d79-c868aed15b3d"
envConfig:
- JAVA_TOOL_OPTIONS: '-Xms128m -Xmx4g -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json'
+ JAVA_TOOL_OPTIONS: '-Xms256m -Xmx512m -javaagent:/app/applicationinsights-agent.jar -Dapplicationinsights.configuration.file=/mnt/file-config-external/appinsights-config/applicationinsights.json'
CACHE_REFRESH_MS_RATE: "10000"
REDIS_CACHE_ENABLED: "true"
diff --git a/helm/values.yaml b/helm/values.yaml
index c01f2445..ca6af1c4 100644
--- a/helm/values.yaml
+++ b/helm/values.yaml
@@ -64,6 +64,7 @@ microservice-chart:
_JAVA_OPTIONS: "-Djava.util.concurrent.ForkJoinPool.common.parallelism=7 -Dio.netty.eventLoopThreads=100 -Dreactor.netty.ioWorkerCount=4 -Dreactor.netty.pool.maxConnections=16"
DELETE_PAGINATION_SIZE: "60"
DELETE_DELAY_TIME: "1000"
+ HEALTH_MONGO_ENABLED: "false"
externalConfigMapValues:
idpay-common:
diff --git a/pom.xml b/pom.xml
index 36670843..eb7e8268 100644
--- a/pom.xml
+++ b/pom.xml
@@ -4,7 +4,7 @@
org.springframework.boot
spring-boot-starter-parent
- 3.1.8
+ 3.3.3
it.gov.pagopa
@@ -226,29 +226,36 @@
+
+ com.azure.spring
+ spring-cloud-azure-dependencies
+ 5.16.0
+ pom
+ import
+
org.springframework.cloud
spring-cloud-dependencies
- 2022.0.4
+ 2023.0.2
pom
import
-
io.netty
netty-bom
-
- 4.1.104.Final
+ 4.1.113.Final
pom
import
-
- com.azure.spring
- spring-cloud-azure-dependencies
- 5.8.0
- pom
- import
+ io.projectreactor.netty
+ reactor-netty-core
+ 1.1.22
+
+
+ io.projectreactor.netty
+ reactor-netty-http
+ 1.1.22
diff --git a/src/test/java/it/gov/pagopa/common/kafka/KafkaTestUtilitiesService.java b/src/test/java/it/gov/pagopa/common/kafka/KafkaTestUtilitiesService.java
index 7167b68e..99ef1cda 100644
--- a/src/test/java/it/gov/pagopa/common/kafka/KafkaTestUtilitiesService.java
+++ b/src/test/java/it/gov/pagopa/common/kafka/KafkaTestUtilitiesService.java
@@ -1,13 +1,10 @@
package it.gov.pagopa.common.kafka;
-import ch.qos.logback.classic.LoggerContext;
-import ch.qos.logback.classic.spi.ILoggingEvent;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import it.gov.pagopa.common.kafka.utils.KafkaConstants;
-import it.gov.pagopa.common.reactive.kafka.consumer.BaseKafkaConsumer;
-import it.gov.pagopa.common.utils.MemoryAppender;
import it.gov.pagopa.common.utils.TestUtils;
+import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.RecordsToDelete;
import org.apache.kafka.clients.admin.TopicListing;
@@ -20,7 +17,6 @@
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.junit.jupiter.api.Assertions;
-import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
@@ -83,28 +79,30 @@ public KafkaTemplate testPublisher(ProducerFactory {
- try {
- Collection topics = admin.listTopics().listings().get();
- admin.deleteRecords(
- admin.listOffsets(
- topics.stream()
- .filter(topicListing -> !topicListing.isInternal())
- .flatMap(t -> IntStream.range(0, kafkaBroker.getPartitionsPerTopic())
- .boxed()
- .map(p -> new TopicPartition(t.name(), p)))
- .collect(Collectors.toMap(tp -> tp,
- tp -> OffsetSpec.latest()))
- ).all().get().entrySet().stream()
- .filter(e -> e.getValue().offset() > 0)
- .collect(Collectors.toMap(Map.Entry::getKey,
- e -> RecordsToDelete.beforeOffset(e.getValue().offset()))))
- .all().get();
-
- } catch (InterruptedException | ExecutionException e) {
- throw new IllegalStateException("Something gone wrong while emptying topics", e);
- }
- });
+ Map brokerConfig = Map.of(
+ "bootstrap.servers", kafkaBroker.getBrokersAsString()
+ );
+
+ try (AdminClient admin = AdminClient.create(brokerConfig)) {
+ Collection topics = admin.listTopics().listings().get();
+ admin.deleteRecords(
+ admin.listOffsets(
+ topics.stream()
+ .filter(topicListing -> !topicListing.isInternal())
+ .flatMap(t -> IntStream.range(0, kafkaBroker.getPartitionsPerTopic())
+ .boxed()
+ .map(p -> new TopicPartition(t.name(), p)))
+ .collect(Collectors.toMap(tp -> tp,
+ tp -> OffsetSpec.latest()))
+ ).all().get().entrySet().stream()
+ .filter(e -> e.getValue().offset() > 0)
+ .collect(Collectors.toMap(Map.Entry::getKey,
+ e -> RecordsToDelete.beforeOffset(e.getValue().offset()))))
+ .all().get();
+
+ } catch (InterruptedException | ExecutionException e) {
+ throw new IllegalStateException("Something went wrong while clearing topics", e);
+ }
}
/** It will return usefull URLs related to embedded kafka */
@@ -223,10 +221,10 @@ public void publishIntoEmbeddedKafka(String topic, Integer partition, Iterable record = new ProducerRecord<>(topic, partition, key == null ? null : key.getBytes(StandardCharsets.UTF_8), payload.getBytes(StandardCharsets.UTF_8), headers);
- template.send(record);
+ ProducerRecord rec = new ProducerRecord<>(topic, partition, key == null ? null : key.getBytes(StandardCharsets.UTF_8), payload.getBytes(StandardCharsets.UTF_8), headers);
+ template.send(rec);
}
//endregion
@@ -279,55 +277,16 @@ public Map checkPublishedOffsets(String topic, long expect
}
//endregion
-//region check commit by logs
- protected MemoryAppender commitLogMemoryAppender;
- /** To be called before each test in order to perform the asserts on {@link #assertCommitOrder(String, int)} */
- public void setupCommitLogMemoryAppender() {
- ch.qos.logback.classic.Logger logger = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(BaseKafkaConsumer.class.getName());
- commitLogMemoryAppender = new MemoryAppender();
- commitLogMemoryAppender.setContext((LoggerContext) LoggerFactory.getILoggerFactory());
- logger.setLevel(ch.qos.logback.classic.Level.INFO);
- logger.addAppender(commitLogMemoryAppender);
- commitLogMemoryAppender.start();
- }
-
- private final Pattern partitionCommitsPattern = Pattern.compile("partition (\\d+): (\\d+) - (\\d+)");
- /** It will assert the right offset commit and the total messages by the provided {@link BaseKafkaConsumer#getFlowName()}.
- * In order to be used, you have to call {@link #setupCommitLogMemoryAppender()} before each test */
- public void assertCommitOrder(String flowName, int totalSendMessages) {
- Map partition2last = new HashMap<>(Map.of(0, -1, 1, -1));
- for (ILoggingEvent loggedEvent : commitLogMemoryAppender.getLoggedEvents()) {
- if(loggedEvent.getMessage().equals("[KAFKA_COMMIT][{}] Committing {} messages: {}") && flowName.equals(loggedEvent.getArgumentArray()[0])){
- Arrays.stream(((String)loggedEvent.getArgumentArray()[2]).split(";"))
- .forEach(s -> {
- Matcher matcher = partitionCommitsPattern.matcher(s);
- Assertions.assertTrue(matcher.matches(), "Unexpected partition commit string: " + s);
- int partition = Integer.parseInt(matcher.group(1));
- int startOffset = Integer.parseInt(matcher.group(2));
- int endOffset = Integer.parseInt(matcher.group(3));
- Assertions.assertTrue(endOffset>=startOffset, "EndOffset less than StartOffset!: " + s);
-
- Integer lastCommittedOffset = partition2last.get(partition);
- Assertions.assertEquals(lastCommittedOffset, startOffset-1);
- partition2last.put(partition, endOffset);
- });
- }
- }
-
- Assertions.assertEquals(totalSendMessages, partition2last.values().stream().mapToInt(x->x+1).sum());
- }
-//endregion
-
//region error topic
public void checkErrorsPublished(String topicErrors, Pattern errorUseCaseIdPatternMatch, int expectedErrorMessagesNumber, long maxWaitingMs, List, java.util.function.Consumer>>> errorUseCases) {
final List> errors = consumeMessages(topicErrors, expectedErrorMessagesNumber, maxWaitingMs);
- for (final ConsumerRecord record : errors) {
- final Matcher matcher = errorUseCaseIdPatternMatch.matcher(record.value());
+ for (final ConsumerRecord rec : errors) {
+ final Matcher matcher = errorUseCaseIdPatternMatch.matcher(rec.value());
int useCaseId = matcher.find() ? Integer.parseInt(matcher.group(1)) : -1;
if (useCaseId == -1) {
- throw new IllegalStateException("UseCaseId not recognized! " + record.value());
+ throw new IllegalStateException("UseCaseId not recognized! " + rec.value());
}
- errorUseCases.get(useCaseId).getSecond().accept(record);
+ errorUseCases.get(useCaseId).getSecond().accept(rec);
}
}
diff --git a/src/test/java/it/gov/pagopa/common/reactive/pdv/service/UserFiscalCodeServiceImplTest.java b/src/test/java/it/gov/pagopa/common/reactive/pdv/service/UserFiscalCodeServiceImplTest.java
index a918e014..7a322ae2 100644
--- a/src/test/java/it/gov/pagopa/common/reactive/pdv/service/UserFiscalCodeServiceImplTest.java
+++ b/src/test/java/it/gov/pagopa/common/reactive/pdv/service/UserFiscalCodeServiceImplTest.java
@@ -2,7 +2,6 @@
import com.google.common.cache.Cache;
import it.gov.pagopa.common.reactive.pdv.dto.UserInfoPDV;
-import it.gov.pagopa.common.utils.TestUtils;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -63,7 +62,6 @@ void getUserInfoNotInCache(){
// Then
Assertions.assertNotNull(result);
- TestUtils.checkNotNullFields(result);
Assertions.assertEquals("FISCALCODE_RETRIEVED", result);
Assertions.assertNotNull(inspectCache.getIfPresent(userIdTest));
Assertions.assertEquals(initialSizeCache+1,inspectCache.size());
@@ -83,10 +81,8 @@ void getUserInfoInCache(){
Assertions.assertEquals(initialSizeCache,inspectCache.size());
String result = userFiscalCodeService.getUserFiscalCode(userIdTest).block();
-
// Then
Assertions.assertNotNull(result);
- TestUtils.checkNotNullFields(result);
Assertions.assertEquals("FISCALCODE_0", result);
Assertions.assertNotNull(inspectCache.getIfPresent(userIdTest));
Assertions.assertEquals(initialSizeCache,inspectCache.size());
diff --git a/src/test/java/it/gov/pagopa/common/utils/TestUtils.java b/src/test/java/it/gov/pagopa/common/utils/TestUtils.java
index e3dfa401..f5678bbc 100644
--- a/src/test/java/it/gov/pagopa/common/utils/TestUtils.java
+++ b/src/test/java/it/gov/pagopa/common/utils/TestUtils.java
@@ -62,6 +62,9 @@ public static void checkNullFields(Object o, String... excludedFields) {
Set excludedFieldsSet = new HashSet<>(Arrays.asList(excludedFields));
org.springframework.util.ReflectionUtils.doWithFields(o.getClass(),
f -> {
+ if (f.getType().isPrimitive()) {
+ return;
+ }
f.setAccessible(true);
Object value = f.get(o);
Assertions.assertNull(value, "The field %s of the input object of type %s is not null: %s".formatted(f.getName(), o.getClass(), value));