From e8de4c5d68267b023238cb8899198cfab3440b14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:50:23 +0900 Subject: [PATCH 1/3] Bump the dependencies group with 2 updates (#2451) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index e8a69417e..069cd7e34 100644 --- a/build.gradle +++ b/build.gradle @@ -33,14 +33,14 @@ subprojects { postgresqlDriverVersion = '42.7.4' oracleDriverVersion = '21.16.0.0' sqlserverDriverVersion = '11.2.3.jre8' - sqliteDriverVersion = '3.47.1.0' + sqliteDriverVersion = '3.47.2.0' yugabyteDriverVersion = '42.7.3-yb-2' mariadDbDriverVersion = '3.5.1' picocliVersion = '4.7.6' commonsTextVersion = '1.13.0' junitVersion = '5.11.4' commonsLangVersion = '3.17.0' - assertjVersion = '3.27.0' + assertjVersion = '3.27.2' mockitoVersion = '4.11.0' spotbugsVersion = '4.8.6' errorproneVersion = '2.10.0' From a2a68f9264e99617bf57db8b83251c359ab33890 Mon Sep 17 00:00:00 2001 From: inv-jishnu <31100916+inv-jishnu@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:48:52 +0530 Subject: [PATCH 2/3] Add export options validator (#2435) Co-authored-by: Peckstadt Yves --- .../com/scalar/db/common/error/CoreError.java | 22 +++ .../ExportOptionsValidationException.java | 14 ++ .../validation/ExportOptionsValidator.java | 156 +++++++++++++++ .../ExportOptionsValidatorTest.java | 183 ++++++++++++++++++ 4 files changed, 375 insertions(+) create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidationException.java create mode 100644 data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidator.java create mode 100644 data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidatorTest.java diff --git a/core/src/main/java/com/scalar/db/common/error/CoreError.java b/core/src/main/java/com/scalar/db/common/error/CoreError.java index eeafaea87..397ac6ac0 100644 --- a/core/src/main/java/com/scalar/db/common/error/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/error/CoreError.java @@ -696,6 +696,28 @@ public enum CoreError implements ScalarDbError { "The attribute-based access control feature is not enabled. To use this feature, you must enable it. Note that this feature is supported only in the ScalarDB Enterprise edition", "", ""), + DATA_LOADER_CLUSTERING_KEY_NOT_FOUND( + Category.USER_ERROR, "0153", "The provided clustering key %s was not found", "", ""), + DATA_LOADER_INVALID_PROJECTION( + Category.USER_ERROR, "0154", "The column '%s' was not found", "", ""), + DATA_LOADER_INCOMPLETE_PARTITION_KEY( + Category.USER_ERROR, + "0155", + "The provided partition key is incomplete. Required key: %s", + "", + ""), + DATA_LOADER_CLUSTERING_KEY_ORDER_MISMATCH( + Category.USER_ERROR, + "0156", + "The provided clustering key order does not match the table schema. Required order: %s", + "", + ""), + DATA_LOADER_PARTITION_KEY_ORDER_MISMATCH( + Category.USER_ERROR, + "0157", + "The provided partition key order does not match the table schema. Required order: %s", + "", + ""), // // Errors for the concurrency error category diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidationException.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidationException.java new file mode 100644 index 000000000..42e342dec --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidationException.java @@ -0,0 +1,14 @@ +package com.scalar.db.dataloader.core.dataexport.validation; + +/** A custom exception for export options validation errors */ +public class ExportOptionsValidationException extends Exception { + + /** + * Class constructor + * + * @param message error message + */ + public ExportOptionsValidationException(String message) { + super(message); + } +} diff --git a/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidator.java b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidator.java new file mode 100644 index 000000000..7bf7645b0 --- /dev/null +++ b/data-loader/core/src/main/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidator.java @@ -0,0 +1,156 @@ +package com.scalar.db.dataloader.core.dataexport.validation; + +import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.dataloader.core.ScanRange; +import com.scalar.db.dataloader.core.dataexport.ExportOptions; +import com.scalar.db.io.Column; +import com.scalar.db.io.Key; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import lombok.AccessLevel; +import lombok.NoArgsConstructor; + +/** + * A validator for ensuring that export options are consistent with the ScalarDB table metadata and + * follow the defined constraints. + */ +@NoArgsConstructor(access = AccessLevel.PRIVATE) +public class ExportOptionsValidator { + + /** + * Validates the export request. + * + * @param exportOptions The export options provided by the user. + * @param tableMetadata The metadata of the ScalarDB table to validate against. + * @throws ExportOptionsValidationException If the export options are invalid. + */ + public static void validate(ExportOptions exportOptions, TableMetadata tableMetadata) + throws ExportOptionsValidationException { + LinkedHashSet partitionKeyNames = tableMetadata.getPartitionKeyNames(); + LinkedHashSet clusteringKeyNames = tableMetadata.getClusteringKeyNames(); + ScanRange scanRange = exportOptions.getScanRange(); + + validatePartitionKey(partitionKeyNames, exportOptions.getScanPartitionKey()); + validateProjectionColumns(tableMetadata.getColumnNames(), exportOptions.getProjectionColumns()); + validateSortOrders(clusteringKeyNames, exportOptions.getSortOrders()); + + if (scanRange.getScanStartKey() != null) { + validateClusteringKey(clusteringKeyNames, scanRange.getScanStartKey()); + } + if (scanRange.getScanEndKey() != null) { + validateClusteringKey(clusteringKeyNames, scanRange.getScanEndKey()); + } + } + + /* + * Check if the provided partition key is available in the ScalarDB table + * @param partitionKeyNames List of partition key names available in a + * @param key To be validated ScalarDB key + * @throws ExportOptionsValidationException if the key could not be found or is not a partition + */ + private static void validatePartitionKey(LinkedHashSet partitionKeyNames, Key key) + throws ExportOptionsValidationException { + if (partitionKeyNames == null || key == null) { + return; + } + + // Make sure that all partition key columns are provided + if (partitionKeyNames.size() != key.getColumns().size()) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_INCOMPLETE_PARTITION_KEY.buildMessage(partitionKeyNames)); + } + + // Check if the order of columns in key.getColumns() matches the order in partitionKeyNames + Iterator partitionKeyIterator = partitionKeyNames.iterator(); + for (Column column : key.getColumns()) { + // Check if the column names match in order + if (!partitionKeyIterator.hasNext() + || !partitionKeyIterator.next().equals(column.getName())) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_PARTITION_KEY_ORDER_MISMATCH.buildMessage(partitionKeyNames)); + } + } + } + + private static void validateSortOrders( + LinkedHashSet clusteringKeyNames, List sortOrders) + throws ExportOptionsValidationException { + if (sortOrders == null || sortOrders.isEmpty()) { + return; + } + + for (Scan.Ordering sortOrder : sortOrders) { + checkIfColumnExistsAsClusteringKey(clusteringKeyNames, sortOrder.getColumnName()); + } + } + + /** + * Validates that the clustering key columns in the given Key object match the expected order + * defined in the clusteringKeyNames. The Key can be a prefix of the clusteringKeyNames, but the + * order must remain consistent. + * + * @param clusteringKeyNames the expected ordered set of clustering key names + * @param key the Key object containing the actual clustering key columns + * @throws ExportOptionsValidationException if the order or names of clustering keys do not match + */ + private static void validateClusteringKey(LinkedHashSet clusteringKeyNames, Key key) + throws ExportOptionsValidationException { + // If either clusteringKeyNames or key is null, no validation is needed + if (clusteringKeyNames == null || key == null) { + return; + } + + // Create an iterator to traverse the clusteringKeyNames in order + Iterator clusteringKeyIterator = clusteringKeyNames.iterator(); + + // Iterate through the columns in the given Key + for (Column column : key.getColumns()) { + // If clusteringKeyNames have been exhausted but columns still exist in the Key, + // it indicates a mismatch + if (!clusteringKeyIterator.hasNext()) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_CLUSTERING_KEY_ORDER_MISMATCH.buildMessage(clusteringKeyNames)); + } + + // Get the next expected clustering key name + String expectedKey = clusteringKeyIterator.next(); + + // Check if the current column name matches the expected clustering key name + if (!column.getName().equals(expectedKey)) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_CLUSTERING_KEY_ORDER_MISMATCH.buildMessage(clusteringKeyNames)); + } + } + } + + private static void checkIfColumnExistsAsClusteringKey( + LinkedHashSet clusteringKeyNames, String columnName) + throws ExportOptionsValidationException { + if (clusteringKeyNames == null || columnName == null) { + return; + } + + if (!clusteringKeyNames.contains(columnName)) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_CLUSTERING_KEY_NOT_FOUND.buildMessage(columnName)); + } + } + + private static void validateProjectionColumns( + LinkedHashSet columnNames, List columns) + throws ExportOptionsValidationException { + if (columns == null || columns.isEmpty()) { + return; + } + + for (String column : columns) { + if (!columnNames.contains(column)) { + throw new ExportOptionsValidationException( + CoreError.DATA_LOADER_INVALID_PROJECTION.buildMessage(column)); + } + } + } +} diff --git a/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidatorTest.java b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidatorTest.java new file mode 100644 index 000000000..b36522a0f --- /dev/null +++ b/data-loader/core/src/test/java/com/scalar/db/dataloader/core/dataexport/validation/ExportOptionsValidatorTest.java @@ -0,0 +1,183 @@ +package com.scalar.db.dataloader.core.dataexport.validation; + +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.error.CoreError; +import com.scalar.db.dataloader.core.FileFormat; +import com.scalar.db.dataloader.core.ScanRange; +import com.scalar.db.dataloader.core.dataexport.ExportOptions; +import com.scalar.db.io.DataType; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.Key; +import com.scalar.db.io.TextColumn; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +class ExportOptionsValidatorTest { + + private TableMetadata singlePkCkMetadata; + private TableMetadata multiplePkCkMetadata; + private List projectedColumns; + + @BeforeEach + void setup() { + singlePkCkMetadata = createMockMetadata(1, 1); + multiplePkCkMetadata = createMockMetadata(2, 2); + projectedColumns = createProjectedColumns(); + } + + private TableMetadata createMockMetadata(int pkCount, int ckCount) { + TableMetadata.Builder builder = TableMetadata.newBuilder(); + + // Add partition keys + for (int i = 1; i <= pkCount; i++) { + builder.addColumn("pk" + i, DataType.INT); + builder.addPartitionKey("pk" + i); + } + + // Add clustering keys + for (int i = 1; i <= ckCount; i++) { + builder.addColumn("ck" + i, DataType.TEXT); + builder.addClusteringKey("ck" + i); + } + + return builder.build(); + } + + private List createProjectedColumns() { + List columns = new ArrayList<>(); + columns.add("pk1"); + columns.add("ck1"); + return columns; + } + + @Test + void validate_withValidExportOptionsForSinglePkCk_ShouldNotThrowException() + throws ExportOptionsValidationException { + + Key partitionKey = Key.newBuilder().add(IntColumn.of("pk1", 1)).build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", partitionKey, FileFormat.JSON) + .projectionColumns(projectedColumns) + .scanRange(new ScanRange(null, null, false, false)) + .build(); + + ExportOptionsValidator.validate(exportOptions, singlePkCkMetadata); + } + + @Test + void validate_withValidExportOptionsForMultiplePkCk_ShouldNotThrowException() + throws ExportOptionsValidationException { + + Key partitionKey = + Key.newBuilder().add(IntColumn.of("pk1", 1)).add(IntColumn.of("pk2", 2)).build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", partitionKey, FileFormat.JSON) + .projectionColumns(projectedColumns) + .scanRange(new ScanRange(null, null, false, false)) + .build(); + + ExportOptionsValidator.validate(exportOptions, multiplePkCkMetadata); + } + + @Test + void validate_withIncompletePartitionKeyForSinglePk_ShouldThrowException() { + Key incompletePartitionKey = Key.newBuilder().build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", incompletePartitionKey, FileFormat.JSON).build(); + + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, singlePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage( + CoreError.DATA_LOADER_INCOMPLETE_PARTITION_KEY.buildMessage( + singlePkCkMetadata.getPartitionKeyNames())); + } + + @Test + void validate_withIncompletePartitionKeyForMultiplePks_ShouldThrowException() { + Key incompletePartitionKey = Key.newBuilder().add(IntColumn.of("pk1", 1)).build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", incompletePartitionKey, FileFormat.JSON).build(); + + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, multiplePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage( + CoreError.DATA_LOADER_INCOMPLETE_PARTITION_KEY.buildMessage( + multiplePkCkMetadata.getPartitionKeyNames())); + } + + @Test + void validate_withInvalidProjectionColumn_ShouldThrowException() { + ExportOptions exportOptions = + ExportOptions.builder( + "test", + "sample", + Key.newBuilder().add(IntColumn.of("pk1", 1)).build(), + FileFormat.JSON) + .projectionColumns(Collections.singletonList("invalid_column")) + .build(); + + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, singlePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage(CoreError.DATA_LOADER_INVALID_PROJECTION.buildMessage("invalid_column")); + } + + @Test + void validate_withInvalidClusteringKeyInScanRange_ShouldThrowException() { + ScanRange scanRange = + new ScanRange( + Key.newBuilder().add(TextColumn.of("invalid_ck", "value")).build(), + Key.newBuilder().add(TextColumn.of("ck1", "value")).build(), + false, + false); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", createValidPartitionKey(), FileFormat.JSON) + .scanRange(scanRange) + .build(); + + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, singlePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage(CoreError.DATA_LOADER_CLUSTERING_KEY_ORDER_MISMATCH.buildMessage("[ck1]")); + } + + @Test + void validate_withInvalidPartitionKeyOrder_ShouldThrowException() { + // Partition key names are expected to be "pk1", "pk2" + LinkedHashSet partitionKeyNames = new LinkedHashSet<>(); + partitionKeyNames.add("pk1"); + partitionKeyNames.add("pk2"); + + // Create a partition key with reversed order, expecting an error + Key invalidPartitionKey = + Key.newBuilder() + .add(IntColumn.of("pk2", 2)) // Incorrect order + .add(IntColumn.of("pk1", 1)) // Incorrect order + .build(); + + ExportOptions exportOptions = + ExportOptions.builder("test", "sample", invalidPartitionKey, FileFormat.JSON) + .projectionColumns(projectedColumns) + .scanRange(new ScanRange(null, null, false, false)) + .build(); + + // Verify that the validator throws the correct exception + assertThatThrownBy(() -> ExportOptionsValidator.validate(exportOptions, multiplePkCkMetadata)) + .isInstanceOf(ExportOptionsValidationException.class) + .hasMessage( + CoreError.DATA_LOADER_PARTITION_KEY_ORDER_MISMATCH.buildMessage(partitionKeyNames)); + } + + private Key createValidPartitionKey() { + return Key.newBuilder().add(IntColumn.of("pk1", 1)).build(); + } +} From 822db196c41c4305e33576eea350aec7e8b57661 Mon Sep 17 00:00:00 2001 From: Toshihiro Suzuki Date: Thu, 9 Jan 2025 14:27:04 +0900 Subject: [PATCH 3/3] Add utility setter methods for ImmutableMap to AbacOperationAttributes (#2448) --- .../db/api/AbacOperationAttributes.java | 11 ++++++ .../db/api/AbacOperationAttributesTest.java | 35 +++++++++++++++++-- 2 files changed, 44 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java b/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java index b558f8726..c4095f6a7 100644 --- a/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java +++ b/core/src/main/java/com/scalar/db/api/AbacOperationAttributes.java @@ -1,5 +1,6 @@ package com.scalar.db.api; +import com.google.common.collect.ImmutableMap; import java.util.Map; import java.util.Optional; @@ -16,6 +17,11 @@ public static void setReadTag(Map attributes, String policyName, attributes.put(READ_TAG_PREFIX + policyName, readTag); } + public static void setReadTag( + ImmutableMap.Builder attributesBuilder, String policyName, String readTag) { + attributesBuilder.put(READ_TAG_PREFIX + policyName, readTag); + } + public static void clearReadTag(Map attributes, String policyName) { attributes.remove(READ_TAG_PREFIX + policyName); } @@ -29,6 +35,11 @@ public static void setWriteTag( attributes.put(WRITE_TAG_PREFIX + policyName, writeTag); } + public static void setWriteTag( + ImmutableMap.Builder attributesBuilder, String policyName, String writeTag) { + attributesBuilder.put(WRITE_TAG_PREFIX + policyName, writeTag); + } + public static void clearWriteTag(Map attributes, String policyName) { attributes.remove(WRITE_TAG_PREFIX + policyName); } diff --git a/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java b/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java index 56ed98af0..f58351234 100644 --- a/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java +++ b/core/src/test/java/com/scalar/db/api/AbacOperationAttributesTest.java @@ -2,6 +2,7 @@ import static org.assertj.core.api.Assertions.assertThat; +import com.google.common.collect.ImmutableMap; import com.scalar.db.io.Key; import java.util.HashMap; import java.util.Map; @@ -11,7 +12,7 @@ public class AbacOperationAttributesTest { @Test - public void setReadTag_ShouldSetReadTag() { + public void setReadTag_MapGiven_ShouldSetReadTag() { // Arrange Map attributes = new HashMap<>(); String policyName = "policyName"; @@ -25,6 +26,21 @@ public void setReadTag_ShouldSetReadTag() { .containsEntry(AbacOperationAttributes.READ_TAG_PREFIX + policyName, readTag); } + @Test + public void setReadTag_ImmutableMapBuilderGiven_ShouldSetReadTag() { + // Arrange + ImmutableMap.Builder attributesBuilder = ImmutableMap.builder(); + String policyName = "policyName"; + String readTag = "readTag"; + + // Act + AbacOperationAttributes.setReadTag(attributesBuilder, policyName, readTag); + + // Assert + assertThat(attributesBuilder.build()) + .containsEntry(AbacOperationAttributes.READ_TAG_PREFIX + policyName, readTag); + } + @Test public void clearReadTag_ShouldClearReadTag() { // Arrange @@ -60,7 +76,7 @@ public void clearReadTags_ShouldClearReadTags() { } @Test - public void setWriteTag_ShouldSetWriteTag() { + public void setWriteTag_MapGiven_ShouldSetWriteTag() { // Arrange Map attributes = new HashMap<>(); String policyName = "policyName"; @@ -74,6 +90,21 @@ public void setWriteTag_ShouldSetWriteTag() { .containsEntry(AbacOperationAttributes.WRITE_TAG_PREFIX + policyName, writeTag); } + @Test + public void setWriteTag_ImmutableMapBuilderGiven_ShouldSetWriteTag() { + // Arrange + ImmutableMap.Builder attributesBuilder = ImmutableMap.builder(); + String policyName = "policyName"; + String writeTag = "writeTag"; + + // Act + AbacOperationAttributes.setWriteTag(attributesBuilder, policyName, writeTag); + + // Assert + assertThat(attributesBuilder.build()) + .containsEntry(AbacOperationAttributes.WRITE_TAG_PREFIX + policyName, writeTag); + } + @Test public void clearWriteTag_ShouldClearWriteTag() { // Arrange