Skip to content

Commit c699af0

Browse files
committed
*: Add zero part and use it as multipart uploadID
Signed-off-by: Evgenii Baidakov <evgenii@nspcc.io>
1 parent b6283bc commit c699af0

File tree

7 files changed

+177
-67
lines changed

7 files changed

+177
-67
lines changed

api/data/tree.go

-2
Original file line numberDiff line numberDiff line change
@@ -132,8 +132,6 @@ type PartInfo struct {
132132
HomoHash []byte
133133
// Elements contain [oid.ID] and size for each element for the current part.
134134
Elements []LinkObjectPayload
135-
// FirstSplitOID contains first object part in the split chain.
136-
FirstSplitOID oid.ID
137135
}
138136

139137
// ToHeaderString form short part representation to use in S3-Completed-Parts header.

api/handler/multipart_upload.go

+6-8
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ import (
77
"strconv"
88
"time"
99

10-
"github.com/google/uuid"
1110
"github.com/nspcc-dev/neofs-s3-gw/api"
1211
"github.com/nspcc-dev/neofs-s3-gw/api/data"
1312
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
@@ -101,17 +100,14 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
101100
return
102101
}
103102

104-
uploadID := uuid.New()
105103
additional := []zap.Field{
106-
zap.String("uploadID", uploadID.String()),
107104
zap.String("Key", reqInfo.ObjectName),
108105
}
109106

110107
p := &layer.CreateMultipartParams{
111108
Info: &layer.UploadInfoParams{
112-
UploadID: uploadID.String(),
113-
Bkt: bktInfo,
114-
Key: reqInfo.ObjectName,
109+
Bkt: bktInfo,
110+
Key: reqInfo.ObjectName,
115111
},
116112
Data: &layer.UploadData{},
117113
}
@@ -154,7 +150,8 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
154150
return
155151
}
156152

157-
if err = h.obj.CreateMultipartUpload(r.Context(), p); err != nil {
153+
uploadID, err := h.obj.CreateMultipartUpload(r.Context(), p)
154+
if err != nil {
158155
h.logAndSendError(w, "could create multipart upload", reqInfo, err, additional...)
159156
return
160157
}
@@ -166,9 +163,10 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
166163
resp := InitiateMultipartUploadResponse{
167164
Bucket: reqInfo.BucketName,
168165
Key: reqInfo.ObjectName,
169-
UploadID: uploadID.String(),
166+
UploadID: uploadID,
170167
}
171168

169+
additional = append(additional, zap.String("uploadID", uploadID))
172170
if err = api.EncodeToResponse(w, resp); err != nil {
173171
h.logAndSendError(w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...)
174172
return

api/layer/layer.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ type (
222222

223223
DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject
224224

225-
CreateMultipartUpload(ctx context.Context, p *CreateMultipartParams) error
225+
CreateMultipartUpload(ctx context.Context, p *CreateMultipartParams) (string, error)
226226
CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error)
227227
UploadPart(ctx context.Context, p *UploadPartParams) (string, error)
228228
UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error)

api/layer/multipart_upload.go

+162-40
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ type (
141141
}
142142
)
143143

144-
func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartParams) error {
144+
func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartParams) (string, error) {
145145
metaSize := len(p.Header)
146146
if p.Data != nil {
147147
metaSize += len(p.Data.ACLHeaders)
@@ -150,12 +150,11 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
150150

151151
ownerPubKey, err := n.OwnerPublicKey(ctx)
152152
if err != nil {
153-
return fmt.Errorf("owner pub key: %w", err)
153+
return "", fmt.Errorf("owner pub key: %w", err)
154154
}
155155

156156
info := &data.MultipartInfo{
157157
Key: p.Info.Key,
158-
UploadID: p.Info.UploadID,
159158
Owner: n.Owner(ctx),
160159
OwnerPubKey: *ownerPubKey,
161160
Created: TimeNow(ctx),
@@ -179,11 +178,27 @@ func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartPar
179178

180179
if p.Info.Encryption.Enabled() {
181180
if err := addEncryptionHeaders(info.Meta, p.Info.Encryption); err != nil {
182-
return fmt.Errorf("add encryption header: %w", err)
181+
return "", fmt.Errorf("add encryption header: %w", err)
183182
}
184183
}
185184

186-
return n.treeService.CreateMultipartUpload(ctx, p.Info.Bkt, info)
185+
zeroPartInfo, err := n.uploadZeroPart(ctx, info, p.Info)
186+
if err != nil {
187+
return "", fmt.Errorf("upload zero part: %w", err)
188+
}
189+
190+
info.UploadID = zeroPartInfo.UploadID
191+
192+
nodeID, err := n.treeService.CreateMultipartUpload(ctx, p.Info.Bkt, info)
193+
if err != nil {
194+
return "", fmt.Errorf("create multipart upload: %w", err)
195+
}
196+
197+
if err = n.finalizeZeroPart(ctx, p.Info.Bkt, nodeID, zeroPartInfo); err != nil {
198+
return "", fmt.Errorf("finalize zero part: %w", err)
199+
}
200+
201+
return zeroPartInfo.UploadID, nil
187202
}
188203

189204
func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) {
@@ -249,28 +264,28 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
249264

250265
lastPart, err := n.treeService.GetPartByNumber(ctx, bktInfo, multipartInfo.ID, p.PartNumber-1)
251266
if err != nil {
252-
// if ErrPartListIsEmpty, there is the first part of multipart.
253-
if !errors.Is(err, ErrPartListIsEmpty) {
254-
return nil, fmt.Errorf("getLastPart: %w", err)
255-
}
256-
} else {
257-
// try to restore hash state from the last part.
258-
// the required interface is guaranteed according to the docs, so just cast without checks.
259-
binaryUnmarshaler := multipartHash.(encoding.BinaryUnmarshaler)
260-
if err = binaryUnmarshaler.UnmarshalBinary(lastPart.MultipartHash); err != nil {
261-
return nil, fmt.Errorf("unmarshal previous part hash: %w", err)
262-
}
263-
264-
if tzHash != nil {
265-
binaryUnmarshaler = tzHash.(encoding.BinaryUnmarshaler)
266-
if err = binaryUnmarshaler.UnmarshalBinary(lastPart.HomoHash); err != nil {
267-
return nil, fmt.Errorf("unmarshal previous part homo hash: %w", err)
268-
}
267+
return nil, fmt.Errorf("getLastPart: %w", err)
268+
}
269+
270+
// try to restore hash state from the last part.
271+
// the required interface is guaranteed according to the docs, so just cast without checks.
272+
binaryUnmarshaler := multipartHash.(encoding.BinaryUnmarshaler)
273+
if err = binaryUnmarshaler.UnmarshalBinary(lastPart.MultipartHash); err != nil {
274+
return nil, fmt.Errorf("unmarshal previous part hash: %w", err)
275+
}
276+
277+
if tzHash != nil {
278+
binaryUnmarshaler = tzHash.(encoding.BinaryUnmarshaler)
279+
if err = binaryUnmarshaler.UnmarshalBinary(lastPart.HomoHash); err != nil {
280+
return nil, fmt.Errorf("unmarshal previous part homo hash: %w", err)
269281
}
282+
}
270283

271-
isSetSplitPreviousID = true
272-
splitPreviousID = lastPart.OID
273-
splitFirstID = lastPart.FirstSplitOID
284+
isSetSplitPreviousID = true
285+
splitPreviousID = lastPart.OID
286+
287+
if err = splitFirstID.DecodeString(multipartInfo.UploadID); err != nil {
288+
return nil, fmt.Errorf("failed to decode multipart upload ID: %w", err)
274289
}
275290

276291
var (
@@ -298,10 +313,6 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
298313
},
299314
}
300315

301-
if lastPart != nil {
302-
splitFirstID = lastPart.FirstSplitOID
303-
}
304-
305316
chunk := n.buffers.Get().(*[]byte)
306317
var totalBytes int
307318
// slice part manually. Simultaneously considering the part is a single object for user.
@@ -326,10 +337,6 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
326337
return nil, err
327338
}
328339

329-
if splitFirstID.Equals(oid.ID{}) {
330-
splitFirstID = id
331-
}
332-
333340
isSetSplitPreviousID = true
334341
splitPreviousID = id
335342
elements = append(elements, data.LinkObjectPayload{OID: id, Size: uint32(nBts)})
@@ -367,10 +374,6 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
367374
Elements: elements,
368375
}
369376

370-
if !splitFirstID.Equals(oid.ID{}) {
371-
partInfo.FirstSplitOID = splitFirstID
372-
}
373-
374377
// encoding hash.Hash state to save it in tree service.
375378
// the required interface is guaranteed according to the docs, so just cast without checks.
376379
binaryMarshaler := multipartHash.(encoding.BinaryMarshaler)
@@ -417,6 +420,114 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
417420
return objInfo, nil
418421
}
419422

423+
func (n *layer) uploadZeroPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadInfoParams) (*data.PartInfo, error) {
424+
encInfo := FormEncryptionInfo(multipartInfo.Meta)
425+
if err := p.Encryption.MatchObjectEncryption(encInfo); err != nil {
426+
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
427+
return nil, s3errors.GetAPIError(s3errors.ErrInvalidEncryptionParameters)
428+
}
429+
430+
var (
431+
bktInfo = p.Bkt
432+
attributes [][2]string
433+
multipartHash = sha256.New()
434+
tzHash hash.Hash
435+
id oid.ID
436+
elements []data.LinkObjectPayload
437+
creationTime = TimeNow(ctx)
438+
currentPartHash = sha256.New()
439+
)
440+
441+
if p.Encryption.Enabled() {
442+
attributes = append(attributes, [2]string{AttributeDecryptedSize, "0"})
443+
}
444+
445+
if n.neoFS.IsHomomorphicHashingEnabled() {
446+
tzHash = tz.New()
447+
}
448+
449+
objHashes := []hash.Hash{multipartHash, currentPartHash}
450+
if tzHash != nil {
451+
objHashes = append(objHashes, tzHash)
452+
}
453+
454+
prm := PrmObjectCreate{
455+
Container: bktInfo.CID,
456+
Creator: bktInfo.Owner,
457+
Attributes: attributes,
458+
CreationTime: creationTime,
459+
CopiesNumber: multipartInfo.CopiesNumber,
460+
Multipart: &Multipart{
461+
MultipartHashes: objHashes,
462+
},
463+
Payload: bytes.NewBuffer(nil),
464+
}
465+
466+
id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
467+
if err != nil {
468+
return nil, err
469+
}
470+
471+
elements = append(elements, data.LinkObjectPayload{OID: id, Size: 0})
472+
473+
reqInfo := api.GetReqInfo(ctx)
474+
n.log.Debug("upload zero part",
475+
zap.String("reqId", reqInfo.RequestID),
476+
zap.String("bucket", bktInfo.Name), zap.Stringer("cid", bktInfo.CID),
477+
zap.String("multipart upload", id.String()),
478+
zap.Int("part number", 0), zap.String("object", p.Key), zap.Stringer("oid", id))
479+
480+
partInfo := &data.PartInfo{
481+
Key: p.Key,
482+
// UploadID equals zero part ID intentionally.
483+
UploadID: id.String(),
484+
Number: 0,
485+
OID: id,
486+
Size: 0,
487+
ETag: hex.EncodeToString(currentPartHash.Sum(nil)),
488+
Created: prm.CreationTime,
489+
Elements: elements,
490+
}
491+
492+
// encoding hash.Hash state to save it in tree service.
493+
// the required interface is guaranteed according to the docs, so just cast without checks.
494+
binaryMarshaler := multipartHash.(encoding.BinaryMarshaler)
495+
partInfo.MultipartHash, err = binaryMarshaler.MarshalBinary()
496+
if err != nil {
497+
return nil, fmt.Errorf("marshalBinary: %w", err)
498+
}
499+
500+
if tzHash != nil {
501+
binaryMarshaler = tzHash.(encoding.BinaryMarshaler)
502+
partInfo.HomoHash, err = binaryMarshaler.MarshalBinary()
503+
504+
if err != nil {
505+
return nil, fmt.Errorf("marshalBinary: %w", err)
506+
}
507+
}
508+
509+
return partInfo, nil
510+
}
511+
512+
func (n *layer) finalizeZeroPart(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, partInfo *data.PartInfo) error {
513+
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, nodeID, partInfo)
514+
oldPartIDNotFound := errors.Is(err, ErrNoNodeToRemove)
515+
if err != nil && !oldPartIDNotFound {
516+
return err
517+
}
518+
519+
if !oldPartIDNotFound {
520+
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
521+
n.log.Error("couldn't delete old part object", zap.Error(err),
522+
zap.String("cnrID", bktInfo.CID.EncodeToString()),
523+
zap.String("bucket name", bktInfo.Name),
524+
zap.String("objID", oldPartID.EncodeToString()))
525+
}
526+
}
527+
528+
return nil
529+
}
530+
420531
func (n *layer) reUploadFollowingParts(ctx context.Context, uploadParams UploadPartParams, partID int, bktInfo *data.BucketInfo, multipartInfo *data.MultipartInfo) error {
421532
parts, err := n.treeService.GetPartsAfter(ctx, bktInfo, multipartInfo.ID, partID)
422533
if err != nil {
@@ -535,8 +646,21 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
535646
var encMultipartObjectSize uint64
536647
var lastPartID int
537648
var completedPartsHeader strings.Builder
649+
var splitFirstID oid.ID
650+
651+
if err = splitFirstID.DecodeString(multipartInfo.UploadID); err != nil {
652+
return nil, nil, fmt.Errorf("decode splitFirstID from UploadID :%w", err)
653+
}
654+
655+
// +1 is the zero part, it equals to the uploadID.
538656
// +1 is the last part, it will be created later in the code.
539-
var measuredObjects = make([]object.MeasuredObject, 0, len(p.Parts)+1)
657+
var measuredObjects = make([]object.MeasuredObject, 0, len(p.Parts)+2)
658+
659+
// user know nothing about zero part, we have to add this part manually.
660+
var zeroObject object.MeasuredObject
661+
zeroObject.SetObjectID(splitFirstID)
662+
measuredObjects = append(measuredObjects, zeroObject)
663+
540664
for i, part := range p.Parts {
541665
partInfo := partsInfo[part.PartNumber]
542666
if partInfo == nil || part.ETag != partInfo.ETag {
@@ -580,15 +704,13 @@ func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipar
580704
multipartHash := sha256.New()
581705
var homoHash hash.Hash
582706
var splitPreviousID oid.ID
583-
var splitFirstID oid.ID
584707

585708
if lastPartID > 0 {
586709
lastPart := partsInfo[lastPartID]
587710

588711
if lastPart != nil {
589712
if len(lastPart.MultipartHash) > 0 {
590713
splitPreviousID = lastPart.OID
591-
splitFirstID = lastPart.FirstSplitOID
592714

593715
if len(lastPart.MultipartHash) > 0 {
594716
binaryUnmarshaler := multipartHash.(encoding.BinaryUnmarshaler)

api/layer/tree_mock.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -276,13 +276,13 @@ func (t *TreeServiceMock) GetAllVersionsByPrefix(_ context.Context, bktInfo *dat
276276
return result, nil
277277
}
278278

279-
func (t *TreeServiceMock) CreateMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error {
279+
func (t *TreeServiceMock) CreateMultipartUpload(_ context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) (uint64, error) {
280280
cnrMultipartsMap, ok := t.multiparts[bktInfo.CID.EncodeToString()]
281281
if !ok {
282282
t.multiparts[bktInfo.CID.EncodeToString()] = map[string][]*data.MultipartInfo{
283283
info.Key: {info},
284284
}
285-
return nil
285+
return 0, nil
286286
}
287287

288288
multiparts := cnrMultipartsMap[info.Key]
@@ -291,7 +291,7 @@ func (t *TreeServiceMock) CreateMultipartUpload(_ context.Context, bktInfo *data
291291
}
292292
cnrMultipartsMap[info.Key] = append(multiparts, info)
293293

294-
return nil
294+
return info.ID, nil
295295
}
296296

297297
func (t *TreeServiceMock) GetMultipartUploadsByPrefix(_ context.Context, _ *data.BucketInfo, _ string) ([]*data.MultipartInfo, error) {

api/layer/tree_service.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ type TreeService interface {
6363
PutLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64, lock *data.LockInfo) error
6464
GetLock(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) (*data.LockInfo, error)
6565

66-
CreateMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error
66+
CreateMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) (uint64, error)
6767
DeleteMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64) error
6868
GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error)
6969
GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)

0 commit comments

Comments
 (0)