Skip to content

Commit 87ed960

Browse files
committed
[bulk] Fix being able to Decompress large payloads
Contrary to Decompress, bulk.Decompress does not retry with stream decompression on failing to Decompress. #115 introduced preventing to allocate too big buffers when the input zstd header was malicious. This had the side-effect that for highly compressed payloads (> 10x), the dst buffer was still resized and would fail. This fixes the issue and adds a test
1 parent ea68dca commit 87ed960

File tree

2 files changed

+28
-3
lines changed

2 files changed

+28
-3
lines changed

zstd_bulk.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -112,20 +112,20 @@ func (p *BulkProcessor) Decompress(dst, src []byte) ([]byte, error) {
112112

113113
contentSize := decompressSizeHint(src)
114114
if cap(dst) >= contentSize {
115-
dst = dst[0:contentSize]
115+
dst = dst[0:cap(dst)]
116116
} else {
117117
dst = make([]byte, contentSize)
118118
}
119119

120-
if contentSize == 0 {
120+
if len(dst) == 0 {
121121
return dst, nil
122122
}
123123

124124
dctx := C.ZSTD_createDCtx()
125125
cWritten := C.ZSTD_decompress_usingDDict(
126126
dctx,
127127
unsafe.Pointer(&dst[0]),
128-
C.size_t(contentSize),
128+
C.size_t(len(dst)),
129129
unsafe.Pointer(&src[0]),
130130
C.size_t(len(src)),
131131
p.dDict,

zstd_bullk_test.go

+25
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,31 @@ func TestBulkCompressAndDecompressInReverseOrder(t *testing.T) {
216216
}
217217
}
218218

219+
func TestDecompressHighlyCompressable(t *testing.T) {
220+
p := newBulkProcessor(t, dict, BestSpeed)
221+
222+
// Generate a big payload
223+
msgSize := 10 * 1000 * 1000 // 10 MiB
224+
msg := make([]byte, msgSize)
225+
compressed, err := Compress(nil, msg)
226+
if err != nil {
227+
t.Error("failed to compress")
228+
}
229+
230+
// Regular decompression would trigger zipbomb prevention
231+
_, err = p.Decompress(nil, compressed)
232+
if !IsDstSizeTooSmallError(err) {
233+
t.Error("expected too small error")
234+
}
235+
236+
// Passing an output should suceed the decompression
237+
dst := make([]byte, 10*msgSize)
238+
_, err = p.Decompress(dst, compressed)
239+
if err != nil {
240+
t.Errorf("failed to decompress: %s", err)
241+
}
242+
}
243+
219244
// BenchmarkBulkCompress-8 780148 1505 ns/op 61.14 MB/s 208 B/op 5 allocs/op
220245
func BenchmarkBulkCompress(b *testing.B) {
221246
p := newBulkProcessor(b, dict, BestSpeed)

0 commit comments

Comments
 (0)