@@ -241,68 +241,111 @@ void uv_mutex_unlock(uv_mutex_t* mutex) {
241
241
242
242
243
243
int uv_rwlock_init (uv_rwlock_t * rwlock ) {
244
- uv__once_init ();
244
+ /* Initialize the semaphore that acts as the write lock. */
245
+ HANDLE handle = CreateSemaphoreW (NULL , 1 , 1 , NULL );
246
+ if (handle == NULL )
247
+ return uv_translate_sys_error (GetLastError ());
248
+ rwlock -> state_ .write_semaphore_ = handle ;
245
249
246
- if (HAVE_SRWLOCK_API ())
247
- return uv__rwlock_srwlock_init (rwlock );
248
- else
249
- return uv__rwlock_fallback_init (rwlock );
250
+ /* Initialize the critical section protecting the reader count. */
251
+ InitializeCriticalSection (& rwlock -> state_ .num_readers_lock_ );
252
+
253
+ /* Initialize the reader count. */
254
+ rwlock -> state_ .num_readers_ = 0 ;
255
+
256
+ return 0 ;
250
257
}
251
258
252
259
253
260
void uv_rwlock_destroy (uv_rwlock_t * rwlock ) {
254
- if (HAVE_SRWLOCK_API ())
255
- uv__rwlock_srwlock_destroy (rwlock );
256
- else
257
- uv__rwlock_fallback_destroy (rwlock );
261
+ DeleteCriticalSection (& rwlock -> state_ .num_readers_lock_ );
262
+ CloseHandle (rwlock -> state_ .write_semaphore_ );
258
263
}
259
264
260
265
261
266
void uv_rwlock_rdlock (uv_rwlock_t * rwlock ) {
262
- if (HAVE_SRWLOCK_API ())
263
- uv__rwlock_srwlock_rdlock (rwlock );
264
- else
265
- uv__rwlock_fallback_rdlock (rwlock );
267
+ /* Acquire the lock that protects the reader count. */
268
+ EnterCriticalSection (& rwlock -> state_ .num_readers_lock_ );
269
+
270
+ /* Increase the reader count, and lock for write if this is the first
271
+ * reader.
272
+ */
273
+ if (++ rwlock -> state_ .num_readers_ == 1 ) {
274
+ DWORD r = WaitForSingleObject (rwlock -> state_ .write_semaphore_ , INFINITE );
275
+ if (r != WAIT_OBJECT_0 )
276
+ uv_fatal_error (GetLastError (), "WaitForSingleObject" );
277
+ }
278
+
279
+ /* Release the lock that protects the reader count. */
280
+ LeaveCriticalSection (& rwlock -> state_ .num_readers_lock_ );
266
281
}
267
282
268
283
269
284
int uv_rwlock_tryrdlock (uv_rwlock_t * rwlock ) {
270
- if (HAVE_SRWLOCK_API ())
271
- return uv__rwlock_srwlock_tryrdlock (rwlock );
272
- else
273
- return uv__rwlock_fallback_tryrdlock (rwlock );
285
+ int err ;
286
+
287
+ if (!TryEnterCriticalSection (& rwlock -> state_ .num_readers_lock_ ))
288
+ return UV_EBUSY ;
289
+
290
+ err = 0 ;
291
+
292
+ if (rwlock -> state_ .num_readers_ == 0 ) {
293
+ /* Currently there are no other readers, which means that the write lock
294
+ * needs to be acquired.
295
+ */
296
+ DWORD r = WaitForSingleObject (rwlock -> state_ .write_semaphore_ , 0 );
297
+ if (r == WAIT_OBJECT_0 )
298
+ rwlock -> state_ .num_readers_ ++ ;
299
+ else if (r == WAIT_TIMEOUT )
300
+ err = UV_EBUSY ;
301
+ else if (r == WAIT_FAILED )
302
+ uv_fatal_error (GetLastError (), "WaitForSingleObject" );
303
+
304
+ } else {
305
+ /* The write lock has already been acquired because there are other
306
+ * active readers.
307
+ */
308
+ rwlock -> state_ .num_readers_ ++ ;
309
+ }
310
+
311
+ LeaveCriticalSection (& rwlock -> state_ .num_readers_lock_ );
312
+ return err ;
274
313
}
275
314
276
315
277
316
void uv_rwlock_rdunlock (uv_rwlock_t * rwlock ) {
278
- if (HAVE_SRWLOCK_API ())
279
- uv__rwlock_srwlock_rdunlock (rwlock );
280
- else
281
- uv__rwlock_fallback_rdunlock (rwlock );
317
+ EnterCriticalSection (& rwlock -> state_ .num_readers_lock_ );
318
+
319
+ if (-- rwlock -> state_ .num_readers_ == 0 ) {
320
+ if (!ReleaseSemaphore (rwlock -> state_ .write_semaphore_ , 1 , NULL ))
321
+ uv_fatal_error (GetLastError (), "ReleaseSemaphore" );
322
+ }
323
+
324
+ LeaveCriticalSection (& rwlock -> state_ .num_readers_lock_ );
282
325
}
283
326
284
327
285
328
void uv_rwlock_wrlock (uv_rwlock_t * rwlock ) {
286
- if (HAVE_SRWLOCK_API ())
287
- uv__rwlock_srwlock_wrlock (rwlock );
288
- else
289
- uv__rwlock_fallback_wrlock (rwlock );
329
+ DWORD r = WaitForSingleObject (rwlock -> state_ .write_semaphore_ , INFINITE );
330
+ if (r != WAIT_OBJECT_0 )
331
+ uv_fatal_error (GetLastError (), "WaitForSingleObject" );
290
332
}
291
333
292
334
293
335
int uv_rwlock_trywrlock (uv_rwlock_t * rwlock ) {
294
- if (HAVE_SRWLOCK_API ())
295
- return uv__rwlock_srwlock_trywrlock (rwlock );
336
+ DWORD r = WaitForSingleObject (rwlock -> state_ .write_semaphore_ , 0 );
337
+ if (r == WAIT_OBJECT_0 )
338
+ return 0 ;
339
+ else if (r == WAIT_TIMEOUT )
340
+ return UV_EBUSY ;
296
341
else
297
- return uv__rwlock_fallback_trywrlock ( rwlock );
342
+ uv_fatal_error ( GetLastError (), "WaitForSingleObject" );
298
343
}
299
344
300
345
301
346
void uv_rwlock_wrunlock (uv_rwlock_t * rwlock ) {
302
- if (HAVE_SRWLOCK_API ())
303
- uv__rwlock_srwlock_wrunlock (rwlock );
304
- else
305
- uv__rwlock_fallback_wrunlock (rwlock );
347
+ if (!ReleaseSemaphore (rwlock -> state_ .write_semaphore_ , 1 , NULL ))
348
+ uv_fatal_error (GetLastError (), "ReleaseSemaphore" );
306
349
}
307
350
308
351
@@ -347,157 +390,6 @@ int uv_sem_trywait(uv_sem_t* sem) {
347
390
}
348
391
349
392
350
- static int uv__rwlock_srwlock_init (uv_rwlock_t * rwlock ) {
351
- pInitializeSRWLock (& rwlock -> srwlock_ );
352
- return 0 ;
353
- }
354
-
355
-
356
- static void uv__rwlock_srwlock_destroy (uv_rwlock_t * rwlock ) {
357
- (void ) rwlock ;
358
- }
359
-
360
-
361
- static void uv__rwlock_srwlock_rdlock (uv_rwlock_t * rwlock ) {
362
- pAcquireSRWLockShared (& rwlock -> srwlock_ );
363
- }
364
-
365
-
366
- static int uv__rwlock_srwlock_tryrdlock (uv_rwlock_t * rwlock ) {
367
- if (pTryAcquireSRWLockShared (& rwlock -> srwlock_ ))
368
- return 0 ;
369
- else
370
- return UV_EBUSY ; /* TODO(bnoordhuis) EAGAIN when owned by this thread. */
371
- }
372
-
373
-
374
- static void uv__rwlock_srwlock_rdunlock (uv_rwlock_t * rwlock ) {
375
- pReleaseSRWLockShared (& rwlock -> srwlock_ );
376
- }
377
-
378
-
379
- static void uv__rwlock_srwlock_wrlock (uv_rwlock_t * rwlock ) {
380
- pAcquireSRWLockExclusive (& rwlock -> srwlock_ );
381
- }
382
-
383
-
384
- static int uv__rwlock_srwlock_trywrlock (uv_rwlock_t * rwlock ) {
385
- if (pTryAcquireSRWLockExclusive (& rwlock -> srwlock_ ))
386
- return 0 ;
387
- else
388
- return UV_EBUSY ; /* TODO(bnoordhuis) EAGAIN when owned by this thread. */
389
- }
390
-
391
-
392
- static void uv__rwlock_srwlock_wrunlock (uv_rwlock_t * rwlock ) {
393
- pReleaseSRWLockExclusive (& rwlock -> srwlock_ );
394
- }
395
-
396
-
397
- static int uv__rwlock_fallback_init (uv_rwlock_t * rwlock ) {
398
- /* Initialize the semaphore that acts as the write lock. */
399
- HANDLE handle = CreateSemaphoreW (NULL , 1 , 1 , NULL );
400
- if (handle == NULL )
401
- return uv_translate_sys_error (GetLastError ());
402
- rwlock -> fallback_ .write_lock_ .sem = handle ;
403
-
404
- /* Initialize the critical section protecting the reader count. */
405
- InitializeCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
406
-
407
- /* Initialize the reader count. */
408
- rwlock -> fallback_ .num_readers_ = 0 ;
409
-
410
- return 0 ;
411
- }
412
-
413
-
414
- static void uv__rwlock_fallback_destroy (uv_rwlock_t * rwlock ) {
415
- DeleteCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
416
- CloseHandle (rwlock -> fallback_ .write_lock_ .sem );
417
- }
418
-
419
-
420
- static void uv__rwlock_fallback_rdlock (uv_rwlock_t * rwlock ) {
421
- /* Acquire the lock that protects the reader count. */
422
- EnterCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
423
-
424
- /* Increase the reader count, and lock for write if this is the first
425
- * reader.
426
- */
427
- if (++ rwlock -> fallback_ .num_readers_ == 1 ) {
428
- DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , INFINITE );
429
- if (r != WAIT_OBJECT_0 )
430
- uv_fatal_error (GetLastError (), "WaitForSingleObject" );
431
- }
432
-
433
- /* Release the lock that protects the reader count. */
434
- LeaveCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
435
- }
436
-
437
-
438
- static int uv__rwlock_fallback_tryrdlock (uv_rwlock_t * rwlock ) {
439
- int err ;
440
-
441
- if (!TryEnterCriticalSection (& rwlock -> fallback_ .read_lock_ .cs ))
442
- return UV_EAGAIN ;
443
-
444
- err = 0 ;
445
- if (rwlock -> fallback_ .num_readers_ == 0 ) {
446
- DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , 0 );
447
- if (r == WAIT_OBJECT_0 )
448
- rwlock -> fallback_ .num_readers_ ++ ;
449
- else if (r == WAIT_TIMEOUT )
450
- err = UV_EAGAIN ;
451
- else if (r == WAIT_FAILED )
452
- err = uv_translate_sys_error (GetLastError ());
453
- else
454
- err = UV_EIO ;
455
- }
456
-
457
- LeaveCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
458
- return err ;
459
- }
460
-
461
-
462
- static void uv__rwlock_fallback_rdunlock (uv_rwlock_t * rwlock ) {
463
- EnterCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
464
-
465
- if (-- rwlock -> fallback_ .num_readers_ == 0 ) {
466
- if (!ReleaseSemaphore (rwlock -> fallback_ .write_lock_ .sem , 1 , NULL ))
467
- uv_fatal_error (GetLastError (), "ReleaseSemaphore" );
468
- }
469
-
470
- LeaveCriticalSection (& rwlock -> fallback_ .read_lock_ .cs );
471
- }
472
-
473
-
474
- static void uv__rwlock_fallback_wrlock (uv_rwlock_t * rwlock ) {
475
- DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , INFINITE );
476
- if (r != WAIT_OBJECT_0 )
477
- uv_fatal_error (GetLastError (), "WaitForSingleObject" );
478
- }
479
-
480
-
481
- static int uv__rwlock_fallback_trywrlock (uv_rwlock_t * rwlock ) {
482
- DWORD r = WaitForSingleObject (rwlock -> fallback_ .write_lock_ .sem , 0 );
483
- if (r == WAIT_OBJECT_0 )
484
- return 0 ;
485
- else if (r == WAIT_TIMEOUT )
486
- return UV_EAGAIN ;
487
- else if (r == WAIT_FAILED )
488
- return uv_translate_sys_error (GetLastError ());
489
- else
490
- return UV_EIO ;
491
- }
492
-
493
-
494
- static void uv__rwlock_fallback_wrunlock (uv_rwlock_t * rwlock ) {
495
- if (!ReleaseSemaphore (rwlock -> fallback_ .write_lock_ .sem , 1 , NULL ))
496
- uv_fatal_error (GetLastError (), "ReleaseSemaphore" );
497
- }
498
-
499
-
500
-
501
393
/* This condition variable implementation is based on the SetEvent solution
502
394
* (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
503
395
* We could not use the SignalObjectAndWait solution (section 3.4) because
0 commit comments