Skip to content

Commit 03d7e3f

Browse files
committed
http2: prevent large writes from timing out
When writing a large chunk of data in http2, once the data is handed off to C++, the JS session & stream lose all track of the write and will timeout if the write doesn't complete within the timeout window Fix this issue by tracking whether a write request is ongoing and also tracking how many chunks have been sent since the most recent write started. (Since each write call resets the timer.)
1 parent a051ccc commit 03d7e3f

File tree

6 files changed

+262
-4
lines changed

6 files changed

+262
-4
lines changed

lib/internal/http2/core.js

+60-4
Original file line numberDiff line numberDiff line change
@@ -746,7 +746,8 @@ class Http2Session extends EventEmitter {
746746
shutdown: false,
747747
shuttingDown: false,
748748
pendingAck: 0,
749-
maxPendingAck: Math.max(1, (options.maxPendingAck | 0) || 10)
749+
maxPendingAck: Math.max(1, (options.maxPendingAck | 0) || 10),
750+
writeQueueSize: 0
750751
};
751752

752753
this[kType] = type;
@@ -1080,6 +1081,20 @@ class Http2Session extends EventEmitter {
10801081
}
10811082

10821083
_onTimeout() {
1084+
// This checks whether a write is currently in progress and also whether
1085+
// that write is actually sending data across the write. The kHandle
1086+
// stored `chunksSentSinceLastWrite` is only updated when a timeout event
1087+
// happens, meaning that if a write is ongoing it should never equal the
1088+
// newly fetched, updated value.
1089+
if (this[kState].writeQueueSize > 0) {
1090+
const handle = this[kHandle];
1091+
if (handle !== undefined &&
1092+
handle.chunksSentSinceLastWrite !== handle.updateChunksSent()) {
1093+
_unrefActive(this);
1094+
return;
1095+
}
1096+
}
1097+
10831098
process.nextTick(emit, this, 'timeout');
10841099
}
10851100
}
@@ -1199,12 +1214,26 @@ function createWriteReq(req, handle, data, encoding) {
11991214
}
12001215
}
12011216

1217+
function trackWriteState(stream, bytes) {
1218+
const session = stream[kSession];
1219+
stream[kState].writeQueueSize += bytes;
1220+
session[kState].writeQueueSize += bytes;
1221+
session[kHandle].chunksSentSinceLastWrite = 0;
1222+
}
1223+
12021224
function afterDoStreamWrite(status, handle, req) {
12031225
const session = handle[kOwner];
1204-
const stream = session[kState].streams.get(req.stream);
12051226
_unrefActive(session);
1206-
if (stream !== undefined)
1227+
1228+
const state = session[kState];
1229+
const { bytes } = req;
1230+
state.writeQueueSize -= bytes;
1231+
1232+
const stream = state.streams.get(req.stream);
1233+
if (stream !== undefined) {
12071234
_unrefActive(stream);
1235+
stream[kState].writeQueueSize -= bytes;
1236+
}
12081237

12091238
if (typeof req.callback === 'function')
12101239
req.callback();
@@ -1317,7 +1346,8 @@ class Http2Stream extends Duplex {
13171346
headersSent: false,
13181347
headRequest: false,
13191348
aborted: false,
1320-
closeHandler: onSessionClose.bind(this)
1349+
closeHandler: onSessionClose.bind(this),
1350+
writeQueueSize: 0
13211351
};
13221352

13231353
this.once('ready', streamOnceReady);
@@ -1364,6 +1394,21 @@ class Http2Stream extends Duplex {
13641394
}
13651395

13661396
_onTimeout() {
1397+
// This checks whether a write is currently in progress and also whether
1398+
// that write is actually sending data across the write. The kHandle
1399+
// stored `chunksSentSinceLastWrite` is only updated when a timeout event
1400+
// happens, meaning that if a write is ongoing it should never equal the
1401+
// newly fetched, updated value.
1402+
if (this[kState].writeQueueSize > 0) {
1403+
const handle = this[kSession][kHandle];
1404+
if (handle !== undefined &&
1405+
handle.chunksSentSinceLastWrite !== handle.updateChunksSent()) {
1406+
_unrefActive(this);
1407+
_unrefActive(this[kSession]);
1408+
return;
1409+
}
1410+
}
1411+
13671412
process.nextTick(emit, this, 'timeout');
13681413
}
13691414

@@ -1416,6 +1461,7 @@ class Http2Stream extends Duplex {
14161461
const err = createWriteReq(req, handle, data, encoding);
14171462
if (err)
14181463
throw util._errnoException(err, 'write', req.error);
1464+
trackWriteState(this, req.bytes);
14191465
}
14201466

14211467
_writev(data, cb) {
@@ -1444,6 +1490,7 @@ class Http2Stream extends Duplex {
14441490
const err = handle.writev(req, chunks);
14451491
if (err)
14461492
throw util._errnoException(err, 'write', req.error);
1493+
trackWriteState(this, req.bytes);
14471494
}
14481495

14491496
_read(nread) {
@@ -1537,6 +1584,10 @@ class Http2Stream extends Duplex {
15371584
return;
15381585
}
15391586

1587+
const state = this[kState];
1588+
session[kState].writeQueueSize -= state.writeQueueSize;
1589+
state.writeQueueSize = 0;
1590+
15401591
const server = session[kServer];
15411592
if (server !== undefined && err) {
15421593
server.emit('streamError', err, this);
@@ -1631,7 +1682,12 @@ function processRespondWithFD(fd, headers, offset = 0, length = -1,
16311682
if (ret < 0) {
16321683
err = new NghttpError(ret);
16331684
process.nextTick(emit, this, 'error', err);
1685+
break;
16341686
}
1687+
// exact length of the file doesn't matter here, since the
1688+
// stream is closing anyway — just use 1 to signify that
1689+
// a write does exist
1690+
trackWriteState(this, 1);
16351691
}
16361692
}
16371693

src/env.h

+1
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ class ModuleWrap;
111111
V(callback_string, "callback") \
112112
V(change_string, "change") \
113113
V(channel_string, "channel") \
114+
V(chunks_sent_since_last_write_string, "chunksSentSinceLastWrite") \
114115
V(constants_string, "constants") \
115116
V(oncertcb_string, "oncertcb") \
116117
V(onclose_string, "_onclose") \

src/node_http2.cc

+24
Original file line numberDiff line numberDiff line change
@@ -603,6 +603,8 @@ void Http2Session::SubmitFile(const FunctionCallbackInfo<Value>& args) {
603603
return args.GetReturnValue().Set(NGHTTP2_ERR_INVALID_STREAM_ID);
604604
}
605605

606+
session->chunks_sent_since_last_write_ = 0;
607+
606608
Headers list(isolate, context, headers);
607609

608610
args.GetReturnValue().Set(stream->SubmitFile(fd, *list, list.length(),
@@ -757,6 +759,23 @@ void Http2Session::FlushData(const FunctionCallbackInfo<Value>& args) {
757759
stream->FlushDataChunks();
758760
}
759761

762+
void Http2Session::UpdateChunksSent(const FunctionCallbackInfo<Value>& args) {
763+
Http2Session* session;
764+
Environment* env = Environment::GetCurrent(args);
765+
Isolate* isolate = env->isolate();
766+
ASSIGN_OR_RETURN_UNWRAP(&session, args.Holder());
767+
768+
HandleScope scope(isolate);
769+
770+
uint32_t length = session->chunks_sent_since_last_write_;
771+
772+
session->object()->Set(env->context(),
773+
env->chunks_sent_since_last_write_string(),
774+
Integer::NewFromUnsigned(isolate, length)).FromJust();
775+
776+
args.GetReturnValue().Set(length);
777+
}
778+
760779
void Http2Session::SubmitPushPromise(const FunctionCallbackInfo<Value>& args) {
761780
Http2Session* session;
762781
Environment* env = Environment::GetCurrent(args);
@@ -811,6 +830,8 @@ int Http2Session::DoWrite(WriteWrap* req_wrap,
811830
}
812831
}
813832

833+
chunks_sent_since_last_write_ = 0;
834+
814835
nghttp2_stream_write_t* req = new nghttp2_stream_write_t;
815836
req->data = req_wrap;
816837

@@ -846,6 +867,7 @@ void Http2Session::Send(uv_buf_t* buf, size_t length) {
846867
this,
847868
AfterWrite);
848869

870+
chunks_sent_since_last_write_++;
849871
uv_buf_t actual = uv_buf_init(buf->base, length);
850872
if (stream_->DoWrite(write_req, &actual, 1, nullptr)) {
851873
write_req->Dispose();
@@ -1255,6 +1277,8 @@ void Initialize(Local<Object> target,
12551277
Http2Session::DestroyStream);
12561278
env->SetProtoMethod(session, "flushData",
12571279
Http2Session::FlushData);
1280+
env->SetProtoMethod(session, "updateChunksSent",
1281+
Http2Session::UpdateChunksSent);
12581282
StreamBase::AddMethods<Http2Session>(env, session,
12591283
StreamBase::kFlagHasWritev |
12601284
StreamBase::kFlagNoShutdown);

src/node_http2.h

+4
Original file line numberDiff line numberDiff line change
@@ -474,6 +474,7 @@ class Http2Session : public AsyncWrap,
474474
static void SubmitGoaway(const FunctionCallbackInfo<Value>& args);
475475
static void DestroyStream(const FunctionCallbackInfo<Value>& args);
476476
static void FlushData(const FunctionCallbackInfo<Value>& args);
477+
static void UpdateChunksSent(const FunctionCallbackInfo<Value>& args);
477478

478479
template <get_setting fn>
479480
static void GetSettings(const FunctionCallbackInfo<Value>& args);
@@ -492,6 +493,9 @@ class Http2Session : public AsyncWrap,
492493
StreamResource::Callback<StreamResource::ReadCb> prev_read_cb_;
493494
padding_strategy_type padding_strategy_ = PADDING_STRATEGY_NONE;
494495

496+
// use this to allow timeout tracking during long-lasting writes
497+
uint32_t chunks_sent_since_last_write_ = 0;
498+
495499
char stream_buf_[kAllocBufferSize];
496500
};
497501

Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
'use strict';
2+
const common = require('../common');
3+
if (!common.hasCrypto)
4+
common.skip('missing crypto');
5+
const assert = require('assert');
6+
const fixtures = require('../common/fixtures');
7+
const fs = require('fs');
8+
const http2 = require('http2');
9+
const path = require('path');
10+
11+
common.refreshTmpDir();
12+
13+
// This test assesses whether long-running writes can complete
14+
// or timeout because the session or stream are not aware that the
15+
// backing stream is still writing.
16+
// To simulate a slow client, we write a really large chunk and
17+
// then proceed through the following cycle:
18+
// 1) Receive first 'data' event and record currently written size
19+
// 2) Once we've read up to currently written size recorded above,
20+
// we pause the stream and wait longer than the server timeout
21+
// 3) Socket.prototype._onTimeout triggers and should confirm
22+
// that the backing stream is still active and writing
23+
// 4) Our timer fires, we resume the socket and start at 1)
24+
25+
const writeSize = 3000000;
26+
const minReadSize = 500000;
27+
const serverTimeout = common.platformTimeout(500);
28+
let offsetTimeout = common.platformTimeout(100);
29+
let didReceiveData = false;
30+
31+
const content = Buffer.alloc(writeSize, 0x44);
32+
const filepath = path.join(common.tmpDir, 'http2-large-write.tmp');
33+
fs.writeFileSync(filepath, content, 'binary');
34+
const fd = fs.openSync(filepath, 'r');
35+
36+
const server = http2.createSecureServer({
37+
key: fixtures.readKey('agent1-key.pem'),
38+
cert: fixtures.readKey('agent1-cert.pem')
39+
});
40+
server.on('stream', common.mustCall((stream) => {
41+
stream.respondWithFD(fd, {
42+
'Content-Type': 'application/octet-stream',
43+
'Content-Length': content.length.toString(),
44+
'Vary': 'Accept-Encoding'
45+
});
46+
stream.setTimeout(serverTimeout);
47+
stream.on('timeout', () => {
48+
assert.strictEqual(didReceiveData, false, 'Should not timeout');
49+
});
50+
stream.end();
51+
}));
52+
server.setTimeout(serverTimeout);
53+
server.on('timeout', () => {
54+
assert.strictEqual(didReceiveData, false, 'Should not timeout');
55+
});
56+
57+
server.listen(0, common.mustCall(() => {
58+
const client = http2.connect(`https://localhost:${server.address().port}`,
59+
{ rejectUnauthorized: false });
60+
61+
const req = client.request({ ':path': '/' });
62+
req.end();
63+
64+
const resume = () => req.resume();
65+
let receivedBufferLength = 0;
66+
let firstReceivedAt;
67+
req.on('data', common.mustCallAtLeast((buf) => {
68+
if (receivedBufferLength === 0) {
69+
didReceiveData = false;
70+
firstReceivedAt = Date.now();
71+
}
72+
receivedBufferLength += buf.length;
73+
if (receivedBufferLength >= minReadSize &&
74+
receivedBufferLength < writeSize) {
75+
didReceiveData = true;
76+
receivedBufferLength = 0;
77+
req.pause();
78+
setTimeout(
79+
resume,
80+
serverTimeout + offsetTimeout - (Date.now() - firstReceivedAt)
81+
);
82+
offsetTimeout = 0;
83+
}
84+
}, 1));
85+
req.on('end', common.mustCall(() => {
86+
client.destroy();
87+
server.close();
88+
}));
89+
}));
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
'use strict';
2+
const common = require('../common');
3+
if (!common.hasCrypto)
4+
common.skip('missing crypto');
5+
const assert = require('assert');
6+
const fixtures = require('../common/fixtures');
7+
const http2 = require('http2');
8+
9+
// This test assesses whether long-running writes can complete
10+
// or timeout because the session or stream are not aware that the
11+
// backing stream is still writing.
12+
// To simulate a slow client, we write a really large chunk and
13+
// then proceed through the following cycle:
14+
// 1) Receive first 'data' event and record currently written size
15+
// 2) Once we've read up to currently written size recorded above,
16+
// we pause the stream and wait longer than the server timeout
17+
// 3) Socket.prototype._onTimeout triggers and should confirm
18+
// that the backing stream is still active and writing
19+
// 4) Our timer fires, we resume the socket and start at 1)
20+
21+
const writeSize = 3000000;
22+
const minReadSize = 500000;
23+
const serverTimeout = common.platformTimeout(500);
24+
let offsetTimeout = common.platformTimeout(100);
25+
let didReceiveData = false;
26+
27+
const server = http2.createSecureServer({
28+
key: fixtures.readKey('agent1-key.pem'),
29+
cert: fixtures.readKey('agent1-cert.pem')
30+
});
31+
server.on('stream', common.mustCall((stream) => {
32+
const content = Buffer.alloc(writeSize, 0x44);
33+
34+
stream.respond({
35+
'Content-Type': 'application/octet-stream',
36+
'Content-Length': content.length.toString(),
37+
'Vary': 'Accept-Encoding'
38+
});
39+
40+
stream.write(content);
41+
stream.setTimeout(serverTimeout);
42+
stream.on('timeout', () => {
43+
assert.strictEqual(didReceiveData, false, 'Should not timeout');
44+
});
45+
stream.end();
46+
}));
47+
server.setTimeout(serverTimeout);
48+
server.on('timeout', () => {
49+
assert.strictEqual(didReceiveData, false, 'Should not timeout');
50+
});
51+
52+
server.listen(0, common.mustCall(() => {
53+
const client = http2.connect(`https://localhost:${server.address().port}`,
54+
{ rejectUnauthorized: false });
55+
56+
const req = client.request({ ':path': '/' });
57+
req.end();
58+
59+
const resume = () => req.resume();
60+
let receivedBufferLength = 0;
61+
let firstReceivedAt;
62+
req.on('data', common.mustCallAtLeast((buf) => {
63+
if (receivedBufferLength === 0) {
64+
didReceiveData = false;
65+
firstReceivedAt = Date.now();
66+
}
67+
receivedBufferLength += buf.length;
68+
if (receivedBufferLength >= minReadSize &&
69+
receivedBufferLength < writeSize) {
70+
didReceiveData = true;
71+
receivedBufferLength = 0;
72+
req.pause();
73+
setTimeout(
74+
resume,
75+
serverTimeout + offsetTimeout - (Date.now() - firstReceivedAt)
76+
);
77+
offsetTimeout = 0;
78+
}
79+
}, 1));
80+
req.on('end', common.mustCall(() => {
81+
client.destroy();
82+
server.close();
83+
}));
84+
}));

0 commit comments

Comments
 (0)