@@ -158,38 +158,57 @@ void NodeTraceWriter::Flush(bool blocking) {
158
158
159
159
void NodeTraceWriter::WriteToFile (std::string&& str, int highest_request_id) {
160
160
if (fd_ == -1 ) return ;
161
- WriteRequest* write_req = new WriteRequest ();
162
- write_req->str = std::move (str);
163
- write_req->writer = this ;
164
- write_req->highest_request_id = highest_request_id;
165
- uv_buf_t uv_buf = uv_buf_init (const_cast <char *>(write_req->str .c_str ()),
166
- write_req->str .length ());
167
- request_mutex_.Lock ();
168
- // Manage a queue of WriteRequest objects because the behavior of uv_write is
169
- // undefined if the same WriteRequest object is used more than once
170
- // between WriteCb calls. In addition, this allows us to keep track of the id
171
- // of the latest write request that actually been completed.
172
- write_req_queue_.push (write_req);
173
- request_mutex_.Unlock ();
174
- int err = uv_fs_write (tracing_loop_, reinterpret_cast <uv_fs_t *>(write_req),
175
- fd_, &uv_buf, 1 , -1 , WriteCb);
161
+
162
+ uv_buf_t buf = uv_buf_init (nullptr , 0 );
163
+ {
164
+ Mutex::ScopedLock lock (request_mutex_);
165
+ write_req_queue_.emplace (WriteRequest {
166
+ std::move (str), highest_request_id
167
+ });
168
+ if (write_req_queue_.size () == 1 ) {
169
+ buf = uv_buf_init (
170
+ const_cast <char *>(write_req_queue_.front ().str .c_str ()),
171
+ write_req_queue_.front ().str .length ());
172
+ }
173
+ }
174
+ // Only one write request for the same file descriptor should be active at
175
+ // a time.
176
+ if (buf.base != nullptr && fd_ != -1 ) {
177
+ StartWrite (buf);
178
+ }
179
+ }
180
+
181
+ void NodeTraceWriter::StartWrite (uv_buf_t buf) {
182
+ int err = uv_fs_write (
183
+ tracing_loop_, &write_req_, fd_, &buf, 1 , -1 ,
184
+ [](uv_fs_t * req) {
185
+ NodeTraceWriter* writer =
186
+ ContainerOf (&NodeTraceWriter::write_req_, req);
187
+ writer->AfterWrite ();
188
+ });
176
189
CHECK_EQ (err, 0 );
177
190
}
178
191
179
- void NodeTraceWriter::WriteCb ( uv_fs_t * req ) {
180
- WriteRequest* write_req = ContainerOf (&WriteRequest::req, req );
181
- CHECK_GE (write_req-> req . result , 0 );
192
+ void NodeTraceWriter::AfterWrite ( ) {
193
+ CHECK_GE (write_req_. result , 0 );
194
+ uv_fs_req_cleanup (&write_req_ );
182
195
183
- NodeTraceWriter* writer = write_req->writer ;
184
- int highest_request_id = write_req->highest_request_id ;
196
+ uv_buf_t buf = uv_buf_init (nullptr , 0 );
185
197
{
186
- Mutex::ScopedLock scoped_lock (writer->request_mutex_ );
187
- CHECK_EQ (write_req, writer->write_req_queue_ .front ());
188
- writer->write_req_queue_ .pop ();
189
- writer->highest_request_id_completed_ = highest_request_id;
190
- writer->request_cond_ .Broadcast (scoped_lock);
198
+ Mutex::ScopedLock scoped_lock (request_mutex_);
199
+ int highest_request_id = write_req_queue_.front ().highest_request_id ;
200
+ write_req_queue_.pop ();
201
+ highest_request_id_completed_ = highest_request_id;
202
+ request_cond_.Broadcast (scoped_lock);
203
+ if (!write_req_queue_.empty ()) {
204
+ buf = uv_buf_init (
205
+ const_cast <char *>(write_req_queue_.front ().str .c_str ()),
206
+ write_req_queue_.front ().str .length ());
207
+ }
208
+ }
209
+ if (buf.base != nullptr && fd_ != -1 ) {
210
+ StartWrite (buf);
191
211
}
192
- delete write_req;
193
212
}
194
213
195
214
// static
0 commit comments