52
52
/* pool flags */
53
53
POOL_MANAGE_WORKERS = 1 << 0 , /* need to manage workers */
54
54
POOL_MANAGING_WORKERS = 1 << 1 , /* managing workers */
55
- POOL_HIGHPRI_PENDING = 1 << 2 , /* highpri works on queue */
56
55
57
56
/* worker flags */
58
57
WORKER_STARTED = 1 << 0 , /* started */
74
73
TRUSTEE_RELEASE = 3 , /* release workers */
75
74
TRUSTEE_DONE = 4 , /* trustee is done */
76
75
77
- NR_WORKER_POOLS = 1 , /* # worker pools per gcwq */
76
+ NR_WORKER_POOLS = 2 , /* # worker pools per gcwq */
78
77
79
78
BUSY_WORKER_HASH_ORDER = 6 , /* 64 pointers */
80
79
BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER ,
95
94
* all cpus. Give -20.
96
95
*/
97
96
RESCUER_NICE_LEVEL = -20 ,
97
+ HIGHPRI_NICE_LEVEL = -20 ,
98
98
};
99
99
100
100
/*
@@ -174,7 +174,7 @@ struct global_cwq {
174
174
struct hlist_head busy_hash [BUSY_WORKER_HASH_SIZE ];
175
175
/* L: hash of busy workers */
176
176
177
- struct worker_pool pool ; /* the worker pools */
177
+ struct worker_pool pools [ 2 ]; /* normal and highpri pools */
178
178
179
179
struct task_struct * trustee ; /* L: for gcwq shutdown */
180
180
unsigned int trustee_state ; /* L: trustee state */
@@ -277,7 +277,8 @@ EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
277
277
#include <trace/events/workqueue.h>
278
278
279
279
#define for_each_worker_pool (pool , gcwq ) \
280
- for ((pool) = &(gcwq)->pool; (pool); (pool) = NULL)
280
+ for ((pool) = &(gcwq)->pools[0]; \
281
+ (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
281
282
282
283
#define for_each_busy_worker (worker , i , pos , gcwq ) \
283
284
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
@@ -473,6 +474,11 @@ static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
473
474
474
475
static int worker_thread (void * __worker );
475
476
477
+ static int worker_pool_pri (struct worker_pool * pool )
478
+ {
479
+ return pool - pool -> gcwq -> pools ;
480
+ }
481
+
476
482
static struct global_cwq * get_gcwq (unsigned int cpu )
477
483
{
478
484
if (cpu != WORK_CPU_UNBOUND )
@@ -484,7 +490,7 @@ static struct global_cwq *get_gcwq(unsigned int cpu)
484
490
static atomic_t * get_pool_nr_running (struct worker_pool * pool )
485
491
{
486
492
int cpu = pool -> gcwq -> cpu ;
487
- int idx = 0 ;
493
+ int idx = worker_pool_pri ( pool ) ;
488
494
489
495
if (cpu != WORK_CPU_UNBOUND )
490
496
return & per_cpu (pool_nr_running , cpu )[idx ];
@@ -586,15 +592,14 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
586
592
}
587
593
588
594
/*
589
- * Policy functions. These define the policies on how the global
590
- * worker pool is managed. Unless noted otherwise, these functions
591
- * assume that they're being called with gcwq->lock held.
595
+ * Policy functions. These define the policies on how the global worker
596
+ * pools are managed. Unless noted otherwise, these functions assume that
597
+ * they're being called with gcwq->lock held.
592
598
*/
593
599
594
600
static bool __need_more_worker (struct worker_pool * pool )
595
601
{
596
- return !atomic_read (get_pool_nr_running (pool )) ||
597
- (pool -> flags & POOL_HIGHPRI_PENDING );
602
+ return !atomic_read (get_pool_nr_running (pool ));
598
603
}
599
604
600
605
/*
@@ -621,9 +626,7 @@ static bool keep_working(struct worker_pool *pool)
621
626
{
622
627
atomic_t * nr_running = get_pool_nr_running (pool );
623
628
624
- return !list_empty (& pool -> worklist ) &&
625
- (atomic_read (nr_running ) <= 1 ||
626
- (pool -> flags & POOL_HIGHPRI_PENDING ));
629
+ return !list_empty (& pool -> worklist ) && atomic_read (nr_running ) <= 1 ;
627
630
}
628
631
629
632
/* Do we need a new worker? Called from manager. */
@@ -891,43 +894,6 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
891
894
work );
892
895
}
893
896
894
- /**
895
- * pool_determine_ins_pos - find insertion position
896
- * @pool: pool of interest
897
- * @cwq: cwq a work is being queued for
898
- *
899
- * A work for @cwq is about to be queued on @pool, determine insertion
900
- * position for the work. If @cwq is for HIGHPRI wq, the work is
901
- * queued at the head of the queue but in FIFO order with respect to
902
- * other HIGHPRI works; otherwise, at the end of the queue. This
903
- * function also sets POOL_HIGHPRI_PENDING flag to hint @pool that
904
- * there are HIGHPRI works pending.
905
- *
906
- * CONTEXT:
907
- * spin_lock_irq(gcwq->lock).
908
- *
909
- * RETURNS:
910
- * Pointer to inserstion position.
911
- */
912
- static inline struct list_head * pool_determine_ins_pos (struct worker_pool * pool ,
913
- struct cpu_workqueue_struct * cwq )
914
- {
915
- struct work_struct * twork ;
916
-
917
- if (likely (!(cwq -> wq -> flags & WQ_HIGHPRI )))
918
- return & pool -> worklist ;
919
-
920
- list_for_each_entry (twork , & pool -> worklist , entry ) {
921
- struct cpu_workqueue_struct * tcwq = get_work_cwq (twork );
922
-
923
- if (!(tcwq -> wq -> flags & WQ_HIGHPRI ))
924
- break ;
925
- }
926
-
927
- pool -> flags |= POOL_HIGHPRI_PENDING ;
928
- return & twork -> entry ;
929
- }
930
-
931
897
/**
932
898
* insert_work - insert a work into gcwq
933
899
* @cwq: cwq @work belongs to
@@ -1068,7 +1034,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1068
1034
if (likely (cwq -> nr_active < cwq -> max_active )) {
1069
1035
trace_workqueue_activate_work (work );
1070
1036
cwq -> nr_active ++ ;
1071
- worklist = pool_determine_ins_pos ( cwq -> pool , cwq ) ;
1037
+ worklist = & cwq -> pool -> worklist ;
1072
1038
} else {
1073
1039
work_flags |= WORK_STRUCT_DELAYED ;
1074
1040
worklist = & cwq -> delayed_works ;
@@ -1385,6 +1351,7 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind)
1385
1351
{
1386
1352
struct global_cwq * gcwq = pool -> gcwq ;
1387
1353
bool on_unbound_cpu = gcwq -> cpu == WORK_CPU_UNBOUND ;
1354
+ const char * pri = worker_pool_pri (pool ) ? "H" : "" ;
1388
1355
struct worker * worker = NULL ;
1389
1356
int id = -1 ;
1390
1357
@@ -1406,15 +1373,17 @@ static struct worker *create_worker(struct worker_pool *pool, bool bind)
1406
1373
1407
1374
if (!on_unbound_cpu )
1408
1375
worker -> task = kthread_create_on_node (worker_thread ,
1409
- worker ,
1410
- cpu_to_node (gcwq -> cpu ),
1411
- "kworker/%u:%d" , gcwq -> cpu , id );
1376
+ worker , cpu_to_node (gcwq -> cpu ),
1377
+ "kworker/%u:%d%s" , gcwq -> cpu , id , pri );
1412
1378
else
1413
1379
worker -> task = kthread_create (worker_thread , worker ,
1414
- "kworker/u:%d" , id );
1380
+ "kworker/u:%d%s " , id , pri );
1415
1381
if (IS_ERR (worker -> task ))
1416
1382
goto fail ;
1417
1383
1384
+ if (worker_pool_pri (pool ))
1385
+ set_user_nice (worker -> task , HIGHPRI_NICE_LEVEL );
1386
+
1418
1387
/*
1419
1388
* A rogue worker will become a regular one if CPU comes
1420
1389
* online later on. Make sure every worker has
@@ -1761,10 +1730,9 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1761
1730
{
1762
1731
struct work_struct * work = list_first_entry (& cwq -> delayed_works ,
1763
1732
struct work_struct , entry );
1764
- struct list_head * pos = pool_determine_ins_pos (cwq -> pool , cwq );
1765
1733
1766
1734
trace_workqueue_activate_work (work );
1767
- move_linked_works (work , pos , NULL );
1735
+ move_linked_works (work , & cwq -> pool -> worklist , NULL );
1768
1736
__clear_bit (WORK_STRUCT_DELAYED_BIT , work_data_bits (work ));
1769
1737
cwq -> nr_active ++ ;
1770
1738
}
@@ -1879,21 +1847,6 @@ __acquires(&gcwq->lock)
1879
1847
set_work_cpu (work , gcwq -> cpu );
1880
1848
list_del_init (& work -> entry );
1881
1849
1882
- /*
1883
- * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1884
- * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1885
- */
1886
- if (unlikely (pool -> flags & POOL_HIGHPRI_PENDING )) {
1887
- struct work_struct * nwork = list_first_entry (& pool -> worklist ,
1888
- struct work_struct , entry );
1889
-
1890
- if (!list_empty (& pool -> worklist ) &&
1891
- get_work_cwq (nwork )-> wq -> flags & WQ_HIGHPRI )
1892
- wake_up_worker (pool );
1893
- else
1894
- pool -> flags &= ~POOL_HIGHPRI_PENDING ;
1895
- }
1896
-
1897
1850
/*
1898
1851
* CPU intensive works don't participate in concurrency
1899
1852
* management. They're the scheduler's responsibility.
@@ -3047,9 +3000,10 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3047
3000
for_each_cwq_cpu (cpu , wq ) {
3048
3001
struct cpu_workqueue_struct * cwq = get_cwq (cpu , wq );
3049
3002
struct global_cwq * gcwq = get_gcwq (cpu );
3003
+ int pool_idx = (bool )(flags & WQ_HIGHPRI );
3050
3004
3051
3005
BUG_ON ((unsigned long )cwq & WORK_STRUCT_FLAG_MASK );
3052
- cwq -> pool = & gcwq -> pool ;
3006
+ cwq -> pool = & gcwq -> pools [ pool_idx ] ;
3053
3007
cwq -> wq = wq ;
3054
3008
cwq -> flush_color = -1 ;
3055
3009
cwq -> max_active = max_active ;
0 commit comments