diff --git a/sdk/cosmosdb/cosmos/CHANGELOG.md b/sdk/cosmosdb/cosmos/CHANGELOG.md index fcf947fb92cf..3ccfee075fff 100644 --- a/sdk/cosmosdb/cosmos/CHANGELOG.md +++ b/sdk/cosmosdb/cosmos/CHANGELOG.md @@ -4,6 +4,8 @@ ### Features Added +- Partition merge support: This feature adds support for Partition merge (preview) feature. Requests from JS SDK will not be blocked, when the feature is enabled. [docs](https://learn.microsoft.com/azure/cosmos-db/merge) + ### Breaking Changes ### Bugs Fixed diff --git a/sdk/cosmosdb/cosmos/review/cosmos.api.md b/sdk/cosmosdb/cosmos/review/cosmos.api.md index 454f36a290fc..94ddebef82c8 100644 --- a/sdk/cosmosdb/cosmos/review/cosmos.api.md +++ b/sdk/cosmosdb/cosmos/review/cosmos.api.md @@ -502,6 +502,7 @@ export const Constants: { EnableCrossPartitionQuery: string; ParallelizeCrossPartitionQuery: string; ResponseContinuationTokenLimitInKB: string; + SDKSupportedCapabilities: string; PopulateQueryMetrics: string; QueryMetrics: string; PopulateIndexMetrics: string; diff --git a/sdk/cosmosdb/cosmos/src/common/constants.ts b/sdk/cosmosdb/cosmos/src/common/constants.ts index 75328fa7909a..a3a1a8132be8 100644 --- a/sdk/cosmosdb/cosmos/src/common/constants.ts +++ b/sdk/cosmosdb/cosmos/src/common/constants.ts @@ -98,6 +98,7 @@ export const Constants = { EnableCrossPartitionQuery: "x-ms-documentdb-query-enablecrosspartition", ParallelizeCrossPartitionQuery: "x-ms-documentdb-query-parallelizecrosspartitionquery", ResponseContinuationTokenLimitInKB: "x-ms-documentdb-responsecontinuationtokenlimitinkb", + SDKSupportedCapabilities: "x-ms-cosmos-sdk-supportedcapabilities", // QueryMetrics // Request header to tell backend to give you query metrics. @@ -494,3 +495,7 @@ export enum QueryFeature { CountIf = "CountIf", HybridSearch = "HybridSearch", } + +export enum SDKSupportedCapabilities { + PartitionMerge = 1, +} diff --git a/sdk/cosmosdb/cosmos/src/queryExecutionContext/documentProducer.ts b/sdk/cosmosdb/cosmos/src/queryExecutionContext/documentProducer.ts index 67bb7aba629d..a720062d6c4f 100644 --- a/sdk/cosmosdb/cosmos/src/queryExecutionContext/documentProducer.ts +++ b/sdk/cosmosdb/cosmos/src/queryExecutionContext/documentProducer.ts @@ -33,6 +33,9 @@ export class DocumentProducer { public generation: number = 0; private respHeaders: CosmosHeaders; private internalExecutionContext: DefaultQueryExecutionContext; + public startEpk: string; + public endEpk: string; + public populateEpkRangeHeaders: boolean; /** * Provides the Target Partition Range Query Execution Context. @@ -49,6 +52,9 @@ export class DocumentProducer { targetPartitionKeyRange: PartitionKeyRange, options: FeedOptions, correlatedActivityId: string, + startEpk?: string, + endEpk?: string, + populateEpkRangeHeaders?: boolean, ) { // TODO: any options this.collectionLink = collectionLink; @@ -68,6 +74,9 @@ export class DocumentProducer { this.fetchFunction, correlatedActivityId, ); + this.startEpk = startEpk; + this.endEpk = endEpk; + this.populateEpkRangeHeaders = populateEpkRangeHeaders; } /** * Synchronously gives the contiguous buffered results (stops at the first non result) if any @@ -101,6 +110,8 @@ export class DocumentProducer { const path = getPathFromLink(this.collectionLink, ResourceType.item); diagnosticNode.addData({ partitionKeyRangeId: this.targetPartitionKeyRange.id }); const id = getIdFromLink(this.collectionLink); + const startEpk = this.populateEpkRangeHeaders ? this.startEpk : undefined; + const endEpk = this.populateEpkRangeHeaders ? this.endEpk : undefined; return this.clientContext.queryFeed({ path, @@ -112,6 +123,8 @@ export class DocumentProducer { diagnosticNode, partitionKeyRangeId: this.targetPartitionKeyRange["id"], correlatedActivityId: correlatedActivityId, + startEpk: startEpk, + endEpk: endEpk, }); }; diff --git a/sdk/cosmosdb/cosmos/src/queryExecutionContext/parallelQueryExecutionContextBase.ts b/sdk/cosmosdb/cosmos/src/queryExecutionContext/parallelQueryExecutionContextBase.ts index 2ef38260fdc3..8d0dae71e145 100644 --- a/sdk/cosmosdb/cosmos/src/queryExecutionContext/parallelQueryExecutionContextBase.ts +++ b/sdk/cosmosdb/cosmos/src/queryExecutionContext/parallelQueryExecutionContextBase.ts @@ -133,8 +133,15 @@ export abstract class ParallelQueryExecutionContextBase implements ExecutionCont filteredPartitionKeyRanges.forEach((partitionTargetRange: any) => { // TODO: any partitionTargetRange // no async callback + const queryRange = QueryRange.parsePartitionKeyRange(partitionTargetRange); targetPartitionQueryExecutionContextList.push( - this._createTargetPartitionQueryExecutionContext(partitionTargetRange), + this._createTargetPartitionQueryExecutionContext( + partitionTargetRange, + undefined, + queryRange.min, + queryRange.max, + false, + ), ); }); @@ -220,7 +227,7 @@ export abstract class ParallelQueryExecutionContextBase implements ExecutionCont } /** - * Gets the replacement ranges for a partitionkeyrange that has been split + * Gets the replacement ranges for a partitionkeyrange that has been split or merged */ private async _getReplacementPartitionKeyRanges( documentProducer: DocumentProducer, @@ -254,50 +261,76 @@ export abstract class ParallelQueryExecutionContextBase implements ExecutionCont try { const replacementPartitionKeyRanges: any[] = await this._getReplacementPartitionKeyRanges(parentDocumentProducer); - const replacementDocumentProducers: DocumentProducer[] = []; - // Create the replacement documentProducers - replacementPartitionKeyRanges.forEach((partitionKeyRange) => { - // Create replacment document producers with the parent's continuationToken + + if (replacementPartitionKeyRanges.length === 0) { + throw new Error("PartitionKeyRangeGone error but no replacement partition key ranges"); + } + + if (replacementPartitionKeyRanges.length === 1) { + // Partition is gone due to Merge + // Create the replacement documentProducer with populateEpkRangeHeaders Flag set to true to set startEpk and endEpk headers const replacementDocumentProducer = this._createTargetPartitionQueryExecutionContext( - partitionKeyRange, + replacementPartitionKeyRanges[0], parentDocumentProducer.continuationToken, + parentDocumentProducer.startEpk, + parentDocumentProducer.endEpk, + true, ); - replacementDocumentProducers.push(replacementDocumentProducer); - }); - // We need to check if the documentProducers even has anything left to fetch from before enqueing them - const checkAndEnqueueDocumentProducer = async ( - documentProducerToCheck: DocumentProducer, - checkNextDocumentProducerCallback: any, - ): Promise => { - try { - const { result: afterItem } = await documentProducerToCheck.current(diagnosticNode); - if (afterItem === undefined) { - // no more results left in this document producer, so we don't enqueue it + // Enqueue the document producer and reexecutes the originFunction with the corrrected executionContext + this.orderByPQ.enq(replacementDocumentProducer); + return originFunction(); + } else { + // Partition is gone due to Split + const replacementDocumentProducers: DocumentProducer[] = []; + // Create the replacement documentProducers with populateEpkRangeHeaders Flag set to false + replacementPartitionKeyRanges.forEach((partitionKeyRange) => { + // Create replacment document producers with the parent's continuationToken + const queryRange = QueryRange.parsePartitionKeyRange(partitionKeyRange); + const replacementDocumentProducer = this._createTargetPartitionQueryExecutionContext( + partitionKeyRange, + parentDocumentProducer.continuationToken, + queryRange.min, + queryRange.max, + false, + ); + replacementDocumentProducers.push(replacementDocumentProducer); + }); + + // We need to check if the documentProducers even has anything left to fetch from before enqueing them + const checkAndEnqueueDocumentProducer = async ( + documentProducerToCheck: DocumentProducer, + checkNextDocumentProducerCallback: any, + ): Promise => { + try { + const { result: afterItem } = await documentProducerToCheck.current(diagnosticNode); + if (afterItem === undefined) { + // no more results left in this document producer, so we don't enqueue it + } else { + // Safe to put document producer back in the queue + this.orderByPQ.enq(documentProducerToCheck); + } + + await checkNextDocumentProducerCallback(); + } catch (err: any) { + this.err = err; + return; + } + }; + const checkAndEnqueueDocumentProducers = async (rdp: DocumentProducer[]): Promise => { + if (rdp.length > 0) { + // We still have a replacementDocumentProducer to check + const replacementDocumentProducer = rdp.shift(); + await checkAndEnqueueDocumentProducer(replacementDocumentProducer, async () => { + await checkAndEnqueueDocumentProducers(rdp); + }); } else { - // Safe to put document producer back in the queue - this.orderByPQ.enq(documentProducerToCheck); + // reexecutes the originFunction with the corrrected executionContext + return originFunction(); } - - await checkNextDocumentProducerCallback(); - } catch (err: any) { - this.err = err; - return; - } - }; - const checkAndEnqueueDocumentProducers = async (rdp: DocumentProducer[]): Promise => { - if (rdp.length > 0) { - // We still have a replacementDocumentProducer to check - const replacementDocumentProducer = rdp.shift(); - await checkAndEnqueueDocumentProducer(replacementDocumentProducer, async () => { - await checkAndEnqueueDocumentProducers(rdp); - }); - } else { - // reexecutes the originFunction with the corrrected executionContext - return originFunction(); - } - }; - // Invoke the recursive function to get the ball rolling - await checkAndEnqueueDocumentProducers(replacementDocumentProducers); + }; + // Invoke the recursive function to get the ball rolling + await checkAndEnqueueDocumentProducers(replacementDocumentProducers); + } } catch (err: any) { this.err = err; throw err; @@ -324,13 +357,13 @@ export abstract class ParallelQueryExecutionContextBase implements ExecutionCont elseCallback: any, ): Promise { const documentProducer = this.orderByPQ.peek(); - // Check if split happened + // Check if split or merge happened try { await documentProducer.current(diagnosticNode); elseCallback(); } catch (err: any) { if (ParallelQueryExecutionContextBase._needPartitionKeyRangeCacheRefresh(err)) { - // Split has happened so we need to repair execution context before continueing + // Split or merge has happened so we need to repair execution context before continueing return addDignosticChild( (childNode) => this._repairExecutionContext(childNode, ifCallback), diagnosticNode, @@ -509,6 +542,9 @@ export abstract class ParallelQueryExecutionContextBase implements ExecutionCont private _createTargetPartitionQueryExecutionContext( partitionKeyTargetRange: any, continuationToken?: any, + startEpk?: string, + endEpk?: string, + populateEpkRangeHeaders?: boolean, ): DocumentProducer { // TODO: any // creates target partition range Query Execution Context @@ -539,6 +575,9 @@ export abstract class ParallelQueryExecutionContextBase implements ExecutionCont partitionKeyTargetRange, options, this.correlatedActivityId, + startEpk, + endEpk, + populateEpkRangeHeaders, ); } } diff --git a/sdk/cosmosdb/cosmos/src/request/request.ts b/sdk/cosmosdb/cosmos/src/request/request.ts index ce731aa20ef3..878041b98fb1 100644 --- a/sdk/cosmosdb/cosmos/src/request/request.ts +++ b/sdk/cosmosdb/cosmos/src/request/request.ts @@ -1,7 +1,13 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. import { setAuthorizationHeader } from "../auth"; -import { Constants, HTTPMethod, jsonStringifyAndEscapeNonASCII, ResourceType } from "../common"; +import { + Constants, + HTTPMethod, + jsonStringifyAndEscapeNonASCII, + ResourceType, + SDKSupportedCapabilities, +} from "../common"; import type { CosmosClientOptions } from "../CosmosClientOptions"; import type { PartitionKeyInternal } from "../documents"; import type { CosmosHeaders } from "../queryExecutionContext"; @@ -68,6 +74,9 @@ export async function getHeaders({ ...defaultHeaders, }; + // Adding SDKSupportedCapabilities header to hint that SDK supports partition merge + headers[Constants.HttpHeaders.SDKSupportedCapabilities] = SDKSupportedCapabilities.PartitionMerge; + if (useMultipleWriteLocations) { headers[Constants.HttpHeaders.ALLOW_MULTIPLE_WRITES] = true; } diff --git a/sdk/cosmosdb/cosmos/test/internal/unit/common/TestParallelQueryExecutionContext.ts b/sdk/cosmosdb/cosmos/test/internal/unit/common/TestParallelQueryExecutionContext.ts new file mode 100644 index 000000000000..b2dead9c286c --- /dev/null +++ b/sdk/cosmosdb/cosmos/test/internal/unit/common/TestParallelQueryExecutionContext.ts @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +import { + DocumentProducer, + ExecutionContext, + ParallelQueryExecutionContextBase, +} from "../../../../src/queryExecutionContext"; + +export class TestParallelQueryExecutionContext + extends ParallelQueryExecutionContextBase + implements ExecutionContext +{ + public documentProducerComparator( + docProd1: DocumentProducer, + docProd2: DocumentProducer, + ): number { + return docProd1.generation - docProd2.generation; + } +} diff --git a/sdk/cosmosdb/cosmos/test/internal/unit/partitionMerge.spec.ts b/sdk/cosmosdb/cosmos/test/internal/unit/partitionMerge.spec.ts new file mode 100644 index 000000000000..169dba1f70b7 --- /dev/null +++ b/sdk/cosmosdb/cosmos/test/internal/unit/partitionMerge.spec.ts @@ -0,0 +1,270 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +import { + ClientConfigDiagnostic, + ClientContext, + ConsistencyLevel, + Constants, + CosmosClientOptions, + CosmosDbDiagnosticLevel, + DiagnosticNodeInternal, + FeedOptions, + GlobalEndpointManager, + QueryInfo, + RequestOptions, + QueryIterator, + PartitionKeyRange, + Resource, + StatusCodes, +} from "../../../src"; +import { expect, assert } from "chai"; +import { TestParallelQueryExecutionContext } from "./common/TestParallelQueryExecutionContext"; +import sinon from "sinon"; +import { SubStatusCodes } from "../../../src/common"; + +const createMockPartitionKeyRange = (id: string, minInclusive: string, maxExclusive: string) => ({ + id, // Range ID + _rid: "range-rid", // Resource ID of the partition key range + minInclusive, // Minimum value of the partition key range + maxExclusive, // Maximum value of the partition key range + _etag: "sample-etag", // ETag for concurrency control + _self: `/dbs/sample-db/colls/sample-collection/pkranges/${id}`, // Self-link + throughputFraction: 1.0, // Throughput assigned to this partition + status: "Online", // Status of the partition +}); + +const createMockDocument = (id: string, name: string, value: string) => ({ + id, + _rid: "sample-rid-2", + _ts: Date.now(), + _self: "/dbs/sample-db/colls/sample-collection/docs/sample-id-2", + _etag: "sample-etag-2", + name: name, + value: value, +}); + +function createTestClientContext( + options: Partial, + diagnosticLevel: CosmosDbDiagnosticLevel, +) { + const clientOps: CosmosClientOptions = { + endpoint: "", + connectionPolicy: { + enableEndpointDiscovery: false, + preferredLocations: ["https://localhhost"], + }, + ...options, + }; + const globalEndpointManager = new GlobalEndpointManager( + clientOps, + async (diagnosticNode: DiagnosticNodeInternal, opts: RequestOptions) => { + expect(opts).to.exist; // eslint-disable-line no-unused-expressions + const dummyAccount: any = diagnosticNode; + return dummyAccount; + }, + ); + const clientConfig: ClientConfigDiagnostic = { + endpoint: "", + resourceTokensConfigured: true, + tokenProviderConfigured: true, + aadCredentialsConfigured: true, + connectionPolicyConfigured: true, + consistencyLevel: ConsistencyLevel.BoundedStaleness, + defaultHeaders: {}, + agentConfigured: true, + userAgentSuffix: "", + pluginsConfigured: true, + sDKVersion: Constants.SDKVersion, + ...options, + }; + const clientContext = new ClientContext( + clientOps, + globalEndpointManager, + clientConfig, + diagnosticLevel, + ); + return clientContext; +} + +const collectionLink = "/dbs/testDb/colls/testCollection"; // Sample collection link +const query = "SELECT * FROM c"; // Example query string or SqlQuerySpec object +const options: FeedOptions = { maxItemCount: 2, maxDegreeOfParallelism: 1 }; +const queryInfo: QueryInfo = { + orderBy: ["Ascending"], + rewrittenQuery: "SELECT * FROM c", +} as QueryInfo; +const partitionedQueryExecutionInfo = { + queryRanges: [ + { + min: "", + max: "1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + isMinInclusive: true, // Whether the minimum value is inclusive + isMaxInclusive: false, + }, + { + min: "1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + max: "FF", + isMinInclusive: true, // Whether the minimum value is inclusive + isMaxInclusive: false, + }, + ], + queryInfo: queryInfo, + partitionedQueryExecutionInfoVersion: 1, +}; +const cosmosClientOptions = { + endpoint: "https://your-cosmos-db.documents.azure.com:443/", + key: "your-cosmos-db-key", + userAgentSuffix: "MockClient", +}; +const correlatedActivityId = "sample-activity-id"; // Example correlated activity ID + +const diagnosticLevel = CosmosDbDiagnosticLevel.info; + +describe("Partition Merge", function () { + const clientContext = createTestClientContext(cosmosClientOptions, diagnosticLevel); // Mock ClientContext instance + const mockPartitionKeyRange1 = createMockPartitionKeyRange( + "parent1", + "", + "1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + ); + const mockPartitionKeyRange2 = createMockPartitionKeyRange( + "parent2", + "1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "FF", + ); + + const fetchAllInternalStub = sinon.stub().resolves({ + resources: [mockPartitionKeyRange1, mockPartitionKeyRange2], + headers: { "x-ms-request-charge": "1.23" }, + code: 200, + }); + sinon.stub(clientContext, "queryPartitionKeyRanges").returns({ + fetchAllInternal: fetchAllInternalStub, // Add fetchAllInternal to mimic expected structure + } as unknown as QueryIterator); + + const mockDocument1 = createMockDocument( + "sample-id-1", + "Sample Document 1", + "This is the first sample document", + ); + const mockDocument2 = createMockDocument( + "sample-id-2", + "Sample Document 2", + "This is the second sample document", + ); + + // Define a stub for queryFeed in clientContext + sinon.stub(clientContext, "queryFeed").resolves({ + result: [mockDocument1, mockDocument2] as unknown as Resource, // Add result to mimic expected structure + headers: { + "x-ms-request-charge": "3.5", // Example RU charge + "x-ms-continuation": "token-for-next-page", // Continuation token for pagination + }, + code: 200, // Optional status code + }); + + // Create a new instance of TestParallelQueryExecutionContext + const context = new TestParallelQueryExecutionContext( + clientContext, + collectionLink, + query, + options, + partitionedQueryExecutionInfo, + correlatedActivityId, + ); + context["options"] = options; + + it("there should be 2 document producers in the priority queue as there are two partition key ranges", async function () { + // Assert that the priority queue has 2 document producers + assert.equal(context["orderByPQ"].size(), 2); + + // Assert that the document producers have the correct start and end EPKs and populateEpkRangeHeaders is false + context["orderByPQ"].forEach((docProd) => { + if (docProd.targetPartitionKeyRange.id === mockPartitionKeyRange1.id) { + assert.equal(docProd.startEpk, mockPartitionKeyRange1.minInclusive); + assert.equal(docProd.endEpk, mockPartitionKeyRange1.maxExclusive); + } else if (docProd.targetPartitionKeyRange.id === mockPartitionKeyRange2.id) { + assert.equal(docProd.startEpk, mockPartitionKeyRange2.minInclusive); + assert.equal(docProd.endEpk, mockPartitionKeyRange2.maxExclusive); + } + assert.equal(docProd.populateEpkRangeHeaders, false); + }); + }); + + it("Correct parent epk ranges are picked up in the newly created child document producers and _repairExecutionContext function should be called if partition is gone due to merge", async function () { + // Stub the current function of the first document producer to throw a Gone error + const parentDocProd1 = context["orderByPQ"].peek(); + sinon.stub(parentDocProd1, "current").rejects({ + code: StatusCodes.Gone, + substatus: SubStatusCodes.PartitionKeyRangeGone, + message: "Partition key range is gone", + }); + + const parentDocumentProducer1StartEpk = parentDocProd1.startEpk; + const parentDocumentProducer1EndEpk = parentDocProd1.endEpk; + + // Mocking the _getReplacementPartitionKeyRanges function to return a single partition key range + const getReplacementPartitionKeyRangesStub = sinon + .stub(context as any, "_getReplacementPartitionKeyRanges") + .resolves([createMockPartitionKeyRange("child1", "", "1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")]); + + // Creating a spy on the _repairExecutionContext function + const repairSpy = sinon.spy(context as any, "_repairExecutionContext"); + + try { + // The query fails because the current function of the first document producer throws a Gone error + await context.nextItem(context["diagnosticNodeWrapper"]["diagnosticNode"]); + assert.fail("Expected query to fail"); + } catch (err) { + assert(err); + } + + // Assert that the _repairExecutionContext function was called once + assert(repairSpy.calledOnce); + repairSpy.restore(); + + // Assert that the priority queue has 2 document producers. One parent and one newly created child + assert.equal(context["orderByPQ"].size(), 2); + + // Assert that the newly created document producer has the correct start and end EPKs from Parent and populateEpkRangeHeaders is true + context["orderByPQ"].forEach((docProd) => { + if (docProd.targetPartitionKeyRange.id === "child1") { + assert.equal(docProd.startEpk, parentDocumentProducer1StartEpk); + assert.equal(docProd.endEpk, parentDocumentProducer1EndEpk); + assert.equal(docProd.populateEpkRangeHeaders, true); + } + }); + + // Removing the child document producer from the priority queue + context["orderByPQ"].deq(); + + // Assert that the priority queue has 1 document producer + assert.equal(context["orderByPQ"].size(), 1); + + const parentDocProd2 = context["orderByPQ"].peek(); + + sinon.stub(parentDocProd2, "current").rejects({ + code: StatusCodes.Gone, + substatus: SubStatusCodes.PartitionKeyRangeGone, + message: "Partition key range is gone", + }); + + const parentDocumentProducer2StartEpk = parentDocProd2.startEpk; + const parentDocumentProducer2EndEpk = parentDocProd2.endEpk; + + // Restoring and mocking again the _getReplacementPartitionKeyRanges function + getReplacementPartitionKeyRangesStub.restore(); + sinon + .stub(context as any, "_getReplacementPartitionKeyRanges") + .resolves([createMockPartitionKeyRange("child2", "1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", "FF")]); + + // Assert that the newly created document producer has the correct start and end EPKs from Parent and populateEpkRangeHeaders is true + context["orderByPQ"].forEach((docProd) => { + if (docProd.targetPartitionKeyRange.id === "child2") { + assert.equal(docProd.startEpk, parentDocumentProducer2StartEpk); + assert.equal(docProd.endEpk, parentDocumentProducer2EndEpk); + assert.equal(docProd.populateEpkRangeHeaders, true); + } + }); + }); +}); diff --git a/sdk/cosmosdb/cosmos/tsconfig.strict.json b/sdk/cosmosdb/cosmos/tsconfig.strict.json index 7381b95c5f02..39aa63c4e858 100644 --- a/sdk/cosmosdb/cosmos/tsconfig.strict.json +++ b/sdk/cosmosdb/cosmos/tsconfig.strict.json @@ -180,6 +180,8 @@ "test/internal/unit/timeoutFailoverRetryPolicy.spec.ts", "test/internal/unit/nonStreamingOrderByMap.spec.ts", "test/internal/unit/utils/supportedQueryFeaturesBuilder.spec.ts", + "test/internal/unit/partitionMerge.spec.ts", + "test/internal/unit/common/TestParallelQueryExecutionContext.ts", "test/internal/unit/getHeader.spec.ts", "test/public/common/BaselineTest.PathParser.ts", "test/public/common/TestData.ts",