Skip to content

Commit

Permalink
s3: use the s3manager to write the lock file.
Browse files Browse the repository at this point in the history
When S3 Object Lock is enabled on a bucket with a retention period, S3 requires the Content-MD5 or x-amz-sdk-checksum-algorithm header for object uploads (via PutObject) to ensure data integrity during the upload process.

Terraform’s state writes to the S3 bucket relied on the “uploader” from aws-sdk-go-v2, which automatically appends these required headers. However, the lock file implementation did not use the “uploader,” resulting in missing headers for PutObject requests and conflicts with Object Lock requirements.

This commit updates the lock file implementation to use the “uploader,” ensuring the necessary headers are included in the requests, maintaining compatibility with Object Lock-enabled buckets.
  • Loading branch information
bschaatsbergen committed Nov 27, 2024
1 parent 31fdb1f commit b667207
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 3 deletions.
10 changes: 7 additions & 3 deletions internal/backend/remote-state/s3/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -346,18 +346,21 @@ func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
// exist, the operation will succeed, acquiring the lock. If the lock file already exists, the operation
// will fail due to a conditional write, indicating that the lock is already held by another Terraform client.
func (c *RemoteClient) lockWithFile(ctx context.Context, info *statemgr.LockInfo, log hclog.Logger) error {
lockFileJson, err := json.Marshal(info)
data, err := json.Marshal(info)
if err != nil {
return err
}

input := &s3.PutObjectInput{
ContentType: aws.String("application/json"),
Body: bytes.NewReader(lockFileJson),
Body: bytes.NewReader(data),
Bucket: aws.String(c.bucketName),
Key: aws.String(c.lockFilePath),
IfNoneMatch: aws.String("*"),
}
if !c.skipS3Checksum {
input.ChecksumAlgorithm = s3types.ChecksumAlgorithmSha256
}

if c.serverSideEncryption {
if c.kmsKeyID != "" {
Expand All @@ -378,7 +381,8 @@ func (c *RemoteClient) lockWithFile(ctx context.Context, info *statemgr.LockInfo

log.Debug("Uploading lock file")

_, err = c.s3Client.PutObject(ctx, input)
uploader := manager.NewUploader(c.s3Client, func(u *manager.Uploader) {})
_, err = uploader.Upload(ctx, input)
if err != nil {
// Attempt to retrieve lock info from the file, and merge errors if it fails.
lockInfo, infoErr := c.getLockInfoWithFile(ctx)
Expand Down
40 changes: 40 additions & 0 deletions internal/backend/remote-state/s3/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,46 @@ func TestRemoteClientPutLargeUploadWithObjectLock(t *testing.T) {
}
}

func TestRemoteClientObjectLockAndLockFile(t *testing.T) {
testACC(t)
objectLockPreCheck(t)

ctx := context.TODO()

bucketName := fmt.Sprintf("terraform-remote-s3-test-%x", time.Now().Unix())
keyName := "testState"

b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
"bucket": bucketName,
"key": keyName,
"use_lockfile": true,
})).(*Backend)

createS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region,
s3BucketWithVersioning,
s3BucketWithObjectLock(s3types.ObjectLockRetentionModeCompliance),
)
defer deleteS3Bucket(ctx, t, b.s3Client, bucketName, b.awsConfig.Region)

s1, err := b.StateMgr(backend.DefaultStateName)
if err != nil {
t.Fatal(err)
}
client := s1.(*remote.State).Client

var state bytes.Buffer
dataW := io.LimitReader(neverEnding('x'), manager.DefaultUploadPartSize)
_, err = state.ReadFrom(dataW)
if err != nil {
t.Fatalf("writing dummy data: %s", err)
}

err = client.Put(state.Bytes())
if err != nil {
t.Fatalf("putting data: %s", err)
}
}

type neverEnding byte

func (b neverEnding) Read(p []byte) (n int, err error) {
Expand Down

0 comments on commit b667207

Please sign in to comment.