|
1 | 1 | package bbolt
|
2 | 2 |
|
3 | 3 | import (
|
| 4 | + "bytes" |
| 5 | + crand "crypto/rand" |
| 6 | + "encoding/binary" |
| 7 | + "fmt" |
| 8 | + "math/rand" |
4 | 9 | "path/filepath"
|
5 | 10 | "testing"
|
| 11 | + "unsafe" |
6 | 12 |
|
7 | 13 | "github.com/stretchr/testify/assert"
|
8 | 14 | "github.com/stretchr/testify/require"
|
9 | 15 |
|
10 | 16 | "go.etcd.io/bbolt/errors"
|
| 17 | + "go.etcd.io/bbolt/internal/common" |
| 18 | + "go.etcd.io/bbolt/internal/guts_cli" |
11 | 19 | )
|
12 | 20 |
|
13 | 21 | func TestOpenWithPreLoadFreelist(t *testing.T) {
|
@@ -112,6 +120,168 @@ func TestMethodPage(t *testing.T) {
|
112 | 120 | }
|
113 | 121 | }
|
114 | 122 |
|
| 123 | +func TestTx_Check_CorruptPage_ViolateBtreeInvariant(t *testing.T) { |
| 124 | + bucketKey := "testBucket" |
| 125 | + pageSize := 4096 |
| 126 | + |
| 127 | + t.Log("Creating db file.") |
| 128 | + db, err := Open(filepath.Join(t.TempDir(), "db"), 0600, &Options{PageSize: pageSize}) |
| 129 | + require.NoError(t, err) |
| 130 | + defer func() { |
| 131 | + require.NoError(t, db.Close()) |
| 132 | + }() |
| 133 | + |
| 134 | + uErr := db.Update(func(tx *Tx) error { |
| 135 | + t.Logf("Creating bucket '%v'.", bucketKey) |
| 136 | + b, bErr := tx.CreateBucketIfNotExists([]byte(bucketKey)) |
| 137 | + require.NoError(t, bErr) |
| 138 | + t.Logf("Generating random data in bucket '%v'.", bucketKey) |
| 139 | + generateSampleDataInBucket(t, b, pageSize, 3) |
| 140 | + return nil |
| 141 | + }) |
| 142 | + require.NoError(t, uErr) |
| 143 | + |
| 144 | + t.Logf("Corrupting random leaf page in bucket '%v'.", bucketKey) |
| 145 | + victimPageId, validPageIds := corruptLeafPage(t, db, pageSize, false) |
| 146 | + |
| 147 | + t.Log("Running consistency check.") |
| 148 | + vErr := db.View(func(tx *Tx) error { |
| 149 | + chkConfig := checkConfig{ |
| 150 | + kvStringer: HexKVStringer(), |
| 151 | + } |
| 152 | + |
| 153 | + t.Log("Check corrupted page.") |
| 154 | + ch := make(chan error) |
| 155 | + chkConfig.pageId = uint(victimPageId) |
| 156 | + go func() { |
| 157 | + defer close(ch) |
| 158 | + tx.check(chkConfig, ch) |
| 159 | + }() |
| 160 | + |
| 161 | + var cErrs []error |
| 162 | + for cErr := range ch { |
| 163 | + cErrs = append(cErrs, cErr) |
| 164 | + } |
| 165 | + require.Greater(t, len(cErrs), 0) |
| 166 | + |
| 167 | + t.Log("Check valid pages.") |
| 168 | + cErrs = cErrs[:0] |
| 169 | + for _, pgId := range validPageIds { |
| 170 | + ch = make(chan error) |
| 171 | + chkConfig.pageId = uint(pgId) |
| 172 | + go func() { |
| 173 | + defer close(ch) |
| 174 | + tx.check(chkConfig, ch) |
| 175 | + }() |
| 176 | + |
| 177 | + for cErr := range ch { |
| 178 | + cErrs = append(cErrs, cErr) |
| 179 | + } |
| 180 | + require.Equal(t, 0, len(cErrs)) |
| 181 | + } |
| 182 | + return nil |
| 183 | + }) |
| 184 | + require.NoError(t, vErr) |
| 185 | +} |
| 186 | + |
| 187 | +// corruptLeafPage write an invalid leafPageElement into the victim page. |
| 188 | +func corruptLeafPage(t testing.TB, db *DB, pageSize int, expectPanic bool) (victimPageId common.Pgid, validPageIds []common.Pgid) { |
| 189 | + t.Helper() |
| 190 | + |
| 191 | + victimPageId, validPageIds = findVictimPageId(t, db) |
| 192 | + |
| 193 | + victimPage, victimBuf, err := guts_cli.ReadPage(db.Path(), uint64(victimPageId)) |
| 194 | + require.NoError(t, err) |
| 195 | + require.True(t, victimPage.IsLeafPage()) |
| 196 | + require.True(t, victimPage.Count() > 0) |
| 197 | + |
| 198 | + // Dumping random bytes in victim page for corruption. |
| 199 | + copy(victimBuf[32:], generateCorruptionBytes(t, pageSize, expectPanic)) |
| 200 | + // Write the corrupt page to db file. |
| 201 | + err = guts_cli.WritePage(db.Path(), victimBuf) |
| 202 | + require.NoError(t, err) |
| 203 | + |
| 204 | + return victimPageId, validPageIds |
| 205 | +} |
| 206 | + |
| 207 | +// findVictimPageId finds all the leaf pages of a bucket and picks a random leaf page as a victim to be corrupted. |
| 208 | +func findVictimPageId(t testing.TB, db *DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { |
| 209 | + t.Helper() |
| 210 | + // Read DB's RootPage. |
| 211 | + rootPageId, _, err := guts_cli.GetRootPage(db.Path()) |
| 212 | + require.NoError(t, err) |
| 213 | + rootPage, _, err := guts_cli.ReadPage(db.Path(), uint64(rootPageId)) |
| 214 | + require.NoError(t, err) |
| 215 | + require.True(t, rootPage.IsLeafPage()) |
| 216 | + require.Equal(t, 1, len(rootPage.LeafPageElements())) |
| 217 | + // Find Bucket's RootPage. |
| 218 | + lpe := rootPage.LeafPageElement(uint16(0)) |
| 219 | + require.Equal(t, uint32(common.BranchPageFlag), lpe.Flags()) |
| 220 | + k := lpe.Key() |
| 221 | + require.Equal(t, "testBucket", string(k)) |
| 222 | + bucketRootPageId := lpe.Bucket().RootPage() |
| 223 | + // Read Bucket's RootPage. |
| 224 | + bucketRootPage, _, err := guts_cli.ReadPage(db.Path(), uint64(bucketRootPageId)) |
| 225 | + require.NoError(t, err) |
| 226 | + require.Equal(t, uint16(common.BranchPageFlag), bucketRootPage.Flags()) |
| 227 | + // Retrieve Bucket's PageIds |
| 228 | + var bucketPageIds []common.Pgid |
| 229 | + for _, bpe := range bucketRootPage.BranchPageElements() { |
| 230 | + bucketPageIds = append(bucketPageIds, bpe.Pgid()) |
| 231 | + } |
| 232 | + |
| 233 | + randomIdx := rand.Intn(len(bucketPageIds)) |
| 234 | + victimPageId = bucketPageIds[randomIdx] |
| 235 | + validPageIds = append(bucketPageIds[:randomIdx], bucketPageIds[randomIdx+1:]...) |
| 236 | + return victimPageId, validPageIds |
| 237 | +} |
| 238 | + |
| 239 | +// generateSampleDataInBucket fill in sample data into given bucket to create the given |
| 240 | +// number of leafPages. To control the number of leafPages, sample data are generated in order. |
| 241 | +func generateSampleDataInBucket(t testing.TB, bk *Bucket, pageSize int, lPages int) { |
| 242 | + t.Helper() |
| 243 | + |
| 244 | + maxBytesInPage := int(DefaultFillPercent * float32(pageSize)) |
| 245 | + |
| 246 | + currentKey := 1 |
| 247 | + currentVal := 100 |
| 248 | + for i := 0; i < lPages; i++ { |
| 249 | + currentSize := common.PageHeaderSize |
| 250 | + for { |
| 251 | + err := bk.Put([]byte(fmt.Sprintf("key_%d", currentKey)), []byte(fmt.Sprintf("val_%d", currentVal))) |
| 252 | + require.NoError(t, err) |
| 253 | + currentSize += common.LeafPageElementSize + unsafe.Sizeof(currentKey) + unsafe.Sizeof(currentVal) |
| 254 | + if int(currentSize) >= maxBytesInPage { |
| 255 | + break |
| 256 | + } |
| 257 | + currentKey++ |
| 258 | + currentVal++ |
| 259 | + } |
| 260 | + } |
| 261 | +} |
| 262 | + |
| 263 | +// generateCorruptionBytes returns random bytes to corrupt a page. |
| 264 | +// It inserts a page element which violates the btree key order if no panic is expected. |
| 265 | +// Otherwise, it dumps random bytes into a page which causes a panic. |
| 266 | +func generateCorruptionBytes(t testing.TB, pageSize int, expectPanic bool) []byte { |
| 267 | + if expectPanic { |
| 268 | + // Generated data size is between pageHeader and pageSize. |
| 269 | + maxLen := pageSize - int(common.PageHeaderSize) |
| 270 | + minLen := 16 |
| 271 | + corruptDataLength := rand.Intn(maxLen-minLen) + minLen |
| 272 | + corruptData := make([]byte, corruptDataLength) |
| 273 | + _, err := crand.Read(corruptData) |
| 274 | + require.NoError(t, err) |
| 275 | + return corruptData |
| 276 | + } |
| 277 | + // Insert LeafPageElement which violates the BTree range. |
| 278 | + invalidLPE := common.NewLeafPageElement(0, 0, 0, 0) |
| 279 | + var buf bytes.Buffer |
| 280 | + err := binary.Write(&buf, binary.BigEndian, invalidLPE) |
| 281 | + require.NoError(t, err) |
| 282 | + return buf.Bytes() |
| 283 | +} |
| 284 | + |
115 | 285 | func prepareData(t *testing.T) (string, error) {
|
116 | 286 | fileName := filepath.Join(t.TempDir(), "db")
|
117 | 287 | db, err := Open(fileName, 0666, nil)
|
|
0 commit comments