-
Notifications
You must be signed in to change notification settings - Fork 18
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
private/multipart: method to begin upload with metadata
Custom method to begin upload and set metadata at the same time. It won't be part of public API for now and main consumer will be gateway. Fixes storj/team-metainfo#106 Change-Id: Iff88a08c6a880e84c35cd9a502f3e49b01cd6dfc
- Loading branch information
Showing
7 changed files
with
328 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,182 @@ | ||
// Copyright (C) 2022 Storj Labs, Inc. | ||
// See LICENSE for copying information. | ||
|
||
package multipart | ||
|
||
import ( | ||
"context" | ||
"crypto/rand" | ||
"time" | ||
_ "unsafe" // for go:linkname | ||
|
||
"github.com/spacemonkeygo/monkit/v3" | ||
"github.com/zeebo/errs" | ||
|
||
"storj.io/common/base58" | ||
"storj.io/common/encryption" | ||
"storj.io/common/paths" | ||
"storj.io/common/pb" | ||
"storj.io/common/storj" | ||
"storj.io/uplink" | ||
"storj.io/uplink/private/metaclient" | ||
) | ||
|
||
var mon = monkit.Package() | ||
|
||
// UploadOptions contains additional options for uploading. | ||
type UploadOptions struct { | ||
// When Expires is zero, there is no expiration. | ||
Expires time.Time | ||
|
||
CustomMetadata uplink.CustomMetadata | ||
} | ||
|
||
// BeginUpload begins a new multipart upload to bucket and key. | ||
// | ||
// Use project.UploadPart to upload individual parts. | ||
// | ||
// Use project.CommitUpload to finish the upload. | ||
// | ||
// Use project.AbortUpload to cancel the upload at any time. | ||
// | ||
// UploadObject is a convenient way to upload single part objects. | ||
func BeginUpload(ctx context.Context, project *uplink.Project, bucket, key string, options *UploadOptions) (info uplink.UploadInfo, err error) { | ||
defer mon.Task()(&ctx)(&err) | ||
|
||
switch { | ||
case bucket == "": | ||
return uplink.UploadInfo{}, convertKnownErrors(metaclient.ErrNoBucket.New(""), bucket, key) | ||
case key == "": | ||
return uplink.UploadInfo{}, convertKnownErrors(metaclient.ErrNoPath.New(""), bucket, key) | ||
} | ||
|
||
if options == nil { | ||
options = &UploadOptions{} | ||
} | ||
|
||
encPath, err := encryptPath(project, bucket, key) | ||
if err != nil { | ||
return uplink.UploadInfo{}, convertKnownErrors(err, bucket, key) | ||
} | ||
|
||
metainfoClient, err := dialMetainfoClient(ctx, project) | ||
if err != nil { | ||
return uplink.UploadInfo{}, convertKnownErrors(err, bucket, key) | ||
} | ||
defer func() { err = errs.Combine(err, metainfoClient.Close()) }() | ||
|
||
metadata, err := encryptMetadata(project, bucket, key, options.CustomMetadata) | ||
if err != nil { | ||
return uplink.UploadInfo{}, convertKnownErrors(err, bucket, key) | ||
} | ||
|
||
response, err := metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{ | ||
Bucket: []byte(bucket), | ||
EncryptedObjectKey: []byte(encPath.Raw()), | ||
ExpiresAt: options.Expires, | ||
EncryptionParameters: encryptionParameters(project), | ||
|
||
EncryptedMetadata: metadata.EncryptedContent, | ||
EncryptedMetadataEncryptedKey: metadata.EncryptedKey, | ||
EncryptedMetadataNonce: metadata.EncryptedKeyNonce, | ||
}) | ||
if err != nil { | ||
return uplink.UploadInfo{}, convertKnownErrors(err, bucket, key) | ||
} | ||
|
||
encodedStreamID := base58.CheckEncode(response.StreamID[:], 1) | ||
return uplink.UploadInfo{ | ||
Key: key, | ||
UploadID: encodedStreamID, | ||
System: uplink.SystemMetadata{ | ||
Expires: options.Expires, | ||
}, | ||
Custom: options.CustomMetadata, | ||
}, nil | ||
} | ||
|
||
type encryptedMetadata struct { | ||
EncryptedContent []byte | ||
EncryptedKey []byte | ||
EncryptedKeyNonce storj.Nonce | ||
} | ||
|
||
func encryptMetadata(project *uplink.Project, bucket, key string, metadata uplink.CustomMetadata) (encryptedMetadata, error) { | ||
if len(metadata) == 0 { | ||
return encryptedMetadata{}, nil | ||
} | ||
|
||
metadataBytes, err := pb.Marshal(&pb.SerializableMeta{ | ||
UserDefined: metadata.Clone(), | ||
}) | ||
if err != nil { | ||
return encryptedMetadata{}, errs.Wrap(err) | ||
} | ||
|
||
streamInfo, err := pb.Marshal(&pb.StreamInfo{ | ||
Metadata: metadataBytes, | ||
}) | ||
if err != nil { | ||
return encryptedMetadata{}, errs.Wrap(err) | ||
} | ||
|
||
derivedKey, err := deriveContentKey(project, bucket, key) | ||
if err != nil { | ||
return encryptedMetadata{}, errs.Wrap(err) | ||
} | ||
|
||
var metadataKey storj.Key | ||
// generate random key for encrypting the segment's content | ||
_, err = rand.Read(metadataKey[:]) | ||
if err != nil { | ||
return encryptedMetadata{}, errs.Wrap(err) | ||
} | ||
|
||
var encryptedKeyNonce storj.Nonce | ||
// generate random nonce for encrypting the metadata key | ||
_, err = rand.Read(encryptedKeyNonce[:]) | ||
if err != nil { | ||
return encryptedMetadata{}, errs.Wrap(err) | ||
} | ||
|
||
encryptionParameters := encryptionParameters(project) | ||
encryptedKey, err := encryption.EncryptKey(&metadataKey, encryptionParameters.CipherSuite, derivedKey, &encryptedKeyNonce) | ||
if err != nil { | ||
return encryptedMetadata{}, errs.Wrap(err) | ||
} | ||
|
||
// encrypt metadata with the content encryption key and zero nonce. | ||
encryptedStreamInfo, err := encryption.Encrypt(streamInfo, encryptionParameters.CipherSuite, &metadataKey, &storj.Nonce{}) | ||
if err != nil { | ||
return encryptedMetadata{}, errs.Wrap(err) | ||
} | ||
|
||
// TODO should we commit StreamMeta or commit only encrypted StreamInfo | ||
streamMetaBytes, err := pb.Marshal(&pb.StreamMeta{ | ||
EncryptedStreamInfo: encryptedStreamInfo, | ||
}) | ||
if err != nil { | ||
return encryptedMetadata{}, errs.Wrap(err) | ||
} | ||
|
||
return encryptedMetadata{ | ||
EncryptedContent: streamMetaBytes, | ||
EncryptedKey: encryptedKey, | ||
EncryptedKeyNonce: encryptedKeyNonce, | ||
}, nil | ||
} | ||
|
||
//go:linkname convertKnownErrors storj.io/uplink.convertKnownErrors | ||
func convertKnownErrors(err error, bucket, key string) error | ||
|
||
//go:linkname dialMetainfoClient storj.io/uplink.dialMetainfoClient | ||
func dialMetainfoClient(ctx context.Context, project *uplink.Project) (_ *metaclient.Client, err error) | ||
|
||
//go:linkname encryptionParameters storj.io/uplink.encryptionParameters | ||
func encryptionParameters(project *uplink.Project) storj.EncryptionParameters | ||
|
||
//go:linkname encryptPath storj.io/uplink.encryptPath | ||
func encryptPath(project *uplink.Project, bucket, key string) (paths.Encrypted, error) | ||
|
||
//go:linkname deriveContentKey storj.io/uplink.deriveContentKey | ||
func deriveContentKey(project *uplink.Project, bucket, key string) (*storj.Key, error) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,132 @@ | ||
// Copyright (C) 2022 Storj Labs, Inc. | ||
// See LICENSE for copying information. | ||
|
||
package multipart_test | ||
|
||
import ( | ||
"context" | ||
"errors" | ||
"testing" | ||
|
||
"github.com/stretchr/testify/assert" | ||
"github.com/stretchr/testify/require" | ||
|
||
"storj.io/common/testcontext" | ||
"storj.io/storj/private/testplanet" | ||
"storj.io/uplink" | ||
"storj.io/uplink/private/multipart" | ||
) | ||
|
||
func TestBeginUpload(t *testing.T) { | ||
testplanet.Run(t, testplanet.Config{ | ||
SatelliteCount: 1, | ||
StorageNodeCount: 0, | ||
UplinkCount: 1, | ||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { | ||
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0]) | ||
require.NoError(t, err) | ||
defer ctx.Check(project.Close) | ||
|
||
_, err = multipart.BeginUpload(ctx, project, "not-existing-testbucket", "multipart-object", nil) | ||
require.Error(t, err) | ||
require.True(t, errors.Is(err, uplink.ErrBucketNotFound)) | ||
|
||
err = planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket") | ||
require.NoError(t, err) | ||
|
||
// assert there is no pending multipart upload | ||
assertUploadList(ctx, t, project, "testbucket", nil) | ||
|
||
info, err := multipart.BeginUpload(ctx, project, "testbucket", "multipart-object", nil) | ||
require.NoError(t, err) | ||
require.NotNil(t, info.UploadID) | ||
|
||
// assert there is only one pending multipart upload | ||
assertUploadList(ctx, t, project, "testbucket", nil, "multipart-object") | ||
|
||
// we allow to start several multipart uploads for the same key | ||
_, err = multipart.BeginUpload(ctx, project, "testbucket", "multipart-object", nil) | ||
require.NoError(t, err) | ||
require.NotNil(t, info.UploadID) | ||
|
||
info, err = multipart.BeginUpload(ctx, project, "testbucket", "multipart-object-1", nil) | ||
require.NoError(t, err) | ||
require.NotNil(t, info.UploadID) | ||
|
||
// assert there are two pending multipart uploads | ||
assertUploadList(ctx, t, project, "testbucket", nil, "multipart-object", "multipart-object-1") | ||
}) | ||
} | ||
|
||
func TestBeginUploadWithMetadata(t *testing.T) { | ||
testplanet.Run(t, testplanet.Config{ | ||
SatelliteCount: 1, | ||
StorageNodeCount: 0, | ||
UplinkCount: 1, | ||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { | ||
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0]) | ||
require.NoError(t, err) | ||
defer ctx.Check(project.Close) | ||
|
||
err = planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket") | ||
require.NoError(t, err) | ||
|
||
expectedMetadata := map[string]uplink.CustomMetadata{ | ||
"nil": nil, | ||
"empty": {}, | ||
"not-empty": { | ||
"key": "value", | ||
}, | ||
} | ||
|
||
for name, metadata := range expectedMetadata { | ||
t.Run(name, func(t *testing.T) { | ||
info, err := multipart.BeginUpload(ctx, project, "testbucket", name, &multipart.UploadOptions{ | ||
CustomMetadata: metadata, | ||
}) | ||
require.NoError(t, err) | ||
require.NotNil(t, info.UploadID) | ||
|
||
list := project.ListUploads(ctx, "testbucket", &uplink.ListUploadsOptions{ | ||
Prefix: name, | ||
Custom: true, | ||
}) | ||
require.True(t, list.Next()) | ||
|
||
if metadata == nil { | ||
require.Empty(t, list.Item().Custom) | ||
} else { | ||
require.Equal(t, metadata, list.Item().Custom) | ||
} | ||
require.False(t, list.Next()) | ||
require.NoError(t, list.Err()) | ||
}) | ||
} | ||
}) | ||
} | ||
|
||
func assertUploadList(ctx context.Context, t *testing.T, project *uplink.Project, bucket string, options *uplink.ListUploadsOptions, objectKeys ...string) { | ||
list := project.ListUploads(ctx, bucket, options) | ||
require.NoError(t, list.Err()) | ||
require.Nil(t, list.Item()) | ||
|
||
itemKeys := make(map[string]struct{}) | ||
for list.Next() { | ||
require.NoError(t, list.Err()) | ||
require.NotNil(t, list.Item()) | ||
require.False(t, list.Item().IsPrefix) | ||
itemKeys[list.Item().Key] = struct{}{} | ||
} | ||
|
||
for _, objectKey := range objectKeys { | ||
if assert.Contains(t, itemKeys, objectKey) { | ||
delete(itemKeys, objectKey) | ||
} | ||
} | ||
|
||
require.Empty(t, itemKeys) | ||
|
||
require.False(t, list.Next()) | ||
require.NoError(t, list.Err()) | ||
require.Nil(t, list.Item()) | ||
} |