|
1 | 1 | 'use strict';
|
2 | 2 |
|
3 |
| -// Most platforms don't allow reads or writes >= 2 GB. |
4 |
| -// See https://github.com/libuv/libuv/pull/1501. |
5 |
| -const kIoMaxLength = 2 ** 31 - 1; |
6 |
| - |
7 |
| -// Note: This is different from kReadFileBufferLength used for non-promisified |
8 |
| -// fs.readFile. |
9 |
| -const kReadFileMaxChunkSize = 2 ** 14; |
10 |
| -const kWriteFileMaxChunkSize = 2 ** 14; |
11 |
| - |
12 |
| -// 2 ** 32 - 1 |
13 |
| -const kMaxUserId = 4294967295; |
14 |
| - |
15 | 3 | const {
|
| 4 | + ArrayPrototypePush, |
16 | 5 | Error,
|
17 | 6 | MathMax,
|
18 | 7 | MathMin,
|
@@ -44,6 +33,13 @@ const {
|
44 | 33 | const { isArrayBufferView } = require('internal/util/types');
|
45 | 34 | const { rimrafPromises } = require('internal/fs/rimraf');
|
46 | 35 | const {
|
| 36 | + constants: { |
| 37 | + kIoMaxLength, |
| 38 | + kMaxUserId, |
| 39 | + kReadFileBufferLength, |
| 40 | + kReadFileUnknownBufferLength, |
| 41 | + kWriteFileMaxChunkSize, |
| 42 | + }, |
47 | 43 | copyObject,
|
48 | 44 | getDirents,
|
49 | 45 | getOptions,
|
@@ -296,24 +292,46 @@ async function readFileHandle(filehandle, options) {
|
296 | 292 | if (size > kIoMaxLength)
|
297 | 293 | throw new ERR_FS_FILE_TOO_LARGE(size);
|
298 | 294 |
|
299 |
| - const chunks = []; |
300 |
| - const chunkSize = size === 0 ? |
301 |
| - kReadFileMaxChunkSize : |
302 |
| - MathMin(size, kReadFileMaxChunkSize); |
303 | 295 | let endOfFile = false;
|
| 296 | + let totalRead = 0; |
| 297 | + const noSize = size === 0; |
| 298 | + const buffers = []; |
| 299 | + const fullBuffer = noSize ? undefined : Buffer.allocUnsafeSlow(size); |
304 | 300 | do {
|
305 | 301 | if (signal && signal.aborted) {
|
306 | 302 | throw lazyDOMException('The operation was aborted', 'AbortError');
|
307 | 303 | }
|
308 |
| - const buf = Buffer.alloc(chunkSize); |
309 |
| - const { bytesRead, buffer } = |
310 |
| - await read(filehandle, buf, 0, chunkSize, -1); |
311 |
| - endOfFile = bytesRead === 0; |
312 |
| - if (bytesRead > 0) |
313 |
| - chunks.push(buffer.slice(0, bytesRead)); |
| 304 | + let buffer; |
| 305 | + let offset; |
| 306 | + let length; |
| 307 | + if (noSize) { |
| 308 | + buffer = Buffer.allocUnsafeSlow(kReadFileUnknownBufferLength); |
| 309 | + offset = 0; |
| 310 | + length = kReadFileUnknownBufferLength; |
| 311 | + } else { |
| 312 | + buffer = fullBuffer; |
| 313 | + offset = totalRead; |
| 314 | + length = MathMin(size - totalRead, kReadFileBufferLength); |
| 315 | + } |
| 316 | + |
| 317 | + const bytesRead = (await binding.read(filehandle.fd, buffer, offset, |
| 318 | + length, -1, kUsePromises)) || 0; |
| 319 | + totalRead += bytesRead; |
| 320 | + endOfFile = bytesRead === 0 || totalRead === size; |
| 321 | + if (noSize && bytesRead > 0) { |
| 322 | + const isBufferFull = bytesRead === kReadFileUnknownBufferLength; |
| 323 | + const chunkBuffer = isBufferFull ? buffer : buffer.slice(0, bytesRead); |
| 324 | + ArrayPrototypePush(buffers, chunkBuffer); |
| 325 | + } |
314 | 326 | } while (!endOfFile);
|
315 | 327 |
|
316 |
| - const result = chunks.length === 1 ? chunks[0] : Buffer.concat(chunks); |
| 328 | + let result; |
| 329 | + if (size > 0) { |
| 330 | + result = totalRead === size ? fullBuffer : fullBuffer.slice(0, totalRead); |
| 331 | + } else { |
| 332 | + result = buffers.length === 1 ? buffers[0] : Buffer.concat(buffers, |
| 333 | + totalRead); |
| 334 | + } |
317 | 335 |
|
318 | 336 | return options.encoding ? result.toString(options.encoding) : result;
|
319 | 337 | }
|
|
0 commit comments