68
68
* Further notes from the original documentation:
69
69
*
70
70
* 11 April '97. Started multi-threading - markhe
71
- * The global cache-chain is protected by the mutex 'cache_chain_mutex '.
71
+ * The global cache-chain is protected by the mutex 'slab_mutex '.
72
72
* The sem is only needed when accessing/extending the cache-chain, which
73
73
* can never happen inside an interrupt (kmem_cache_create(),
74
74
* kmem_cache_shrink() and kmem_cache_reap()).
@@ -671,12 +671,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
671
671
}
672
672
#endif
673
673
674
- /*
675
- * Guard access to the cache-chain.
676
- */
677
- static DEFINE_MUTEX (cache_chain_mutex );
678
- static struct list_head cache_chain ;
679
-
680
674
static DEFINE_PER_CPU (struct delayed_work , slab_reap_work ) ;
681
675
682
676
static inline struct array_cache * cpu_cache_get (struct kmem_cache * cachep )
@@ -1100,15 +1094,15 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1100
1094
* When hotplugging memory or a cpu, existing nodelists are not replaced if
1101
1095
* already in use.
1102
1096
*
1103
- * Must hold cache_chain_mutex .
1097
+ * Must hold slab_mutex .
1104
1098
*/
1105
1099
static int init_cache_nodelists_node (int node )
1106
1100
{
1107
1101
struct kmem_cache * cachep ;
1108
1102
struct kmem_list3 * l3 ;
1109
1103
const int memsize = sizeof (struct kmem_list3 );
1110
1104
1111
- list_for_each_entry (cachep , & cache_chain , list ) {
1105
+ list_for_each_entry (cachep , & slab_caches , list ) {
1112
1106
/*
1113
1107
* Set up the size64 kmemlist for cpu before we can
1114
1108
* begin anything. Make sure some other cpu on this
@@ -1124,7 +1118,7 @@ static int init_cache_nodelists_node(int node)
1124
1118
1125
1119
/*
1126
1120
* The l3s don't come and go as CPUs come and
1127
- * go. cache_chain_mutex is sufficient
1121
+ * go. slab_mutex is sufficient
1128
1122
* protection here.
1129
1123
*/
1130
1124
cachep -> nodelists [node ] = l3 ;
@@ -1146,7 +1140,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1146
1140
int node = cpu_to_mem (cpu );
1147
1141
const struct cpumask * mask = cpumask_of_node (node );
1148
1142
1149
- list_for_each_entry (cachep , & cache_chain , list ) {
1143
+ list_for_each_entry (cachep , & slab_caches , list ) {
1150
1144
struct array_cache * nc ;
1151
1145
struct array_cache * shared ;
1152
1146
struct array_cache * * alien ;
@@ -1196,7 +1190,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1196
1190
* the respective cache's slabs, now we can go ahead and
1197
1191
* shrink each nodelist to its limit.
1198
1192
*/
1199
- list_for_each_entry (cachep , & cache_chain , list ) {
1193
+ list_for_each_entry (cachep , & slab_caches , list ) {
1200
1194
l3 = cachep -> nodelists [node ];
1201
1195
if (!l3 )
1202
1196
continue ;
@@ -1225,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu)
1225
1219
* Now we can go ahead with allocating the shared arrays and
1226
1220
* array caches
1227
1221
*/
1228
- list_for_each_entry (cachep , & cache_chain , list ) {
1222
+ list_for_each_entry (cachep , & slab_caches , list ) {
1229
1223
struct array_cache * nc ;
1230
1224
struct array_cache * shared = NULL ;
1231
1225
struct array_cache * * alien = NULL ;
@@ -1293,9 +1287,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1293
1287
switch (action ) {
1294
1288
case CPU_UP_PREPARE :
1295
1289
case CPU_UP_PREPARE_FROZEN :
1296
- mutex_lock (& cache_chain_mutex );
1290
+ mutex_lock (& slab_mutex );
1297
1291
err = cpuup_prepare (cpu );
1298
- mutex_unlock (& cache_chain_mutex );
1292
+ mutex_unlock (& slab_mutex );
1299
1293
break ;
1300
1294
case CPU_ONLINE :
1301
1295
case CPU_ONLINE_FROZEN :
@@ -1305,7 +1299,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1305
1299
case CPU_DOWN_PREPARE :
1306
1300
case CPU_DOWN_PREPARE_FROZEN :
1307
1301
/*
1308
- * Shutdown cache reaper. Note that the cache_chain_mutex is
1302
+ * Shutdown cache reaper. Note that the slab_mutex is
1309
1303
* held so that if cache_reap() is invoked it cannot do
1310
1304
* anything expensive but will only modify reap_work
1311
1305
* and reschedule the timer.
@@ -1332,9 +1326,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1332
1326
#endif
1333
1327
case CPU_UP_CANCELED :
1334
1328
case CPU_UP_CANCELED_FROZEN :
1335
- mutex_lock (& cache_chain_mutex );
1329
+ mutex_lock (& slab_mutex );
1336
1330
cpuup_canceled (cpu );
1337
- mutex_unlock (& cache_chain_mutex );
1331
+ mutex_unlock (& slab_mutex );
1338
1332
break ;
1339
1333
}
1340
1334
return notifier_from_errno (err );
@@ -1350,14 +1344,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
1350
1344
* Returns -EBUSY if all objects cannot be drained so that the node is not
1351
1345
* removed.
1352
1346
*
1353
- * Must hold cache_chain_mutex .
1347
+ * Must hold slab_mutex .
1354
1348
*/
1355
1349
static int __meminit drain_cache_nodelists_node (int node )
1356
1350
{
1357
1351
struct kmem_cache * cachep ;
1358
1352
int ret = 0 ;
1359
1353
1360
- list_for_each_entry (cachep , & cache_chain , list ) {
1354
+ list_for_each_entry (cachep , & slab_caches , list ) {
1361
1355
struct kmem_list3 * l3 ;
1362
1356
1363
1357
l3 = cachep -> nodelists [node ];
@@ -1388,14 +1382,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
1388
1382
1389
1383
switch (action ) {
1390
1384
case MEM_GOING_ONLINE :
1391
- mutex_lock (& cache_chain_mutex );
1385
+ mutex_lock (& slab_mutex );
1392
1386
ret = init_cache_nodelists_node (nid );
1393
- mutex_unlock (& cache_chain_mutex );
1387
+ mutex_unlock (& slab_mutex );
1394
1388
break ;
1395
1389
case MEM_GOING_OFFLINE :
1396
- mutex_lock (& cache_chain_mutex );
1390
+ mutex_lock (& slab_mutex );
1397
1391
ret = drain_cache_nodelists_node (nid );
1398
- mutex_unlock (& cache_chain_mutex );
1392
+ mutex_unlock (& slab_mutex );
1399
1393
break ;
1400
1394
case MEM_ONLINE :
1401
1395
case MEM_OFFLINE :
@@ -1499,8 +1493,8 @@ void __init kmem_cache_init(void)
1499
1493
node = numa_mem_id ();
1500
1494
1501
1495
/* 1) create the cache_cache */
1502
- INIT_LIST_HEAD (& cache_chain );
1503
- list_add (& cache_cache .list , & cache_chain );
1496
+ INIT_LIST_HEAD (& slab_caches );
1497
+ list_add (& cache_cache .list , & slab_caches );
1504
1498
cache_cache .colour_off = cache_line_size ();
1505
1499
cache_cache .array [smp_processor_id ()] = & initarray_cache .cache ;
1506
1500
cache_cache .nodelists [node ] = & initkmem_list3 [CACHE_CACHE + node ];
@@ -1642,11 +1636,11 @@ void __init kmem_cache_init_late(void)
1642
1636
init_lock_keys ();
1643
1637
1644
1638
/* 6) resize the head arrays to their final sizes */
1645
- mutex_lock (& cache_chain_mutex );
1646
- list_for_each_entry (cachep , & cache_chain , list )
1639
+ mutex_lock (& slab_mutex );
1640
+ list_for_each_entry (cachep , & slab_caches , list )
1647
1641
if (enable_cpucache (cachep , GFP_NOWAIT ))
1648
1642
BUG ();
1649
- mutex_unlock (& cache_chain_mutex );
1643
+ mutex_unlock (& slab_mutex );
1650
1644
1651
1645
/* Done! */
1652
1646
slab_state = FULL ;
@@ -2253,10 +2247,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2253
2247
*/
2254
2248
if (slab_is_available ()) {
2255
2249
get_online_cpus ();
2256
- mutex_lock (& cache_chain_mutex );
2250
+ mutex_lock (& slab_mutex );
2257
2251
}
2258
2252
2259
- list_for_each_entry (pc , & cache_chain , list ) {
2253
+ list_for_each_entry (pc , & slab_caches , list ) {
2260
2254
char tmp ;
2261
2255
int res ;
2262
2256
@@ -2500,10 +2494,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2500
2494
}
2501
2495
2502
2496
/* cache setup completed, link it into the list */
2503
- list_add (& cachep -> list , & cache_chain );
2497
+ list_add (& cachep -> list , & slab_caches );
2504
2498
oops :
2505
2499
if (slab_is_available ()) {
2506
- mutex_unlock (& cache_chain_mutex );
2500
+ mutex_unlock (& slab_mutex );
2507
2501
put_online_cpus ();
2508
2502
}
2509
2503
return cachep ;
@@ -2622,7 +2616,7 @@ static int drain_freelist(struct kmem_cache *cache,
2622
2616
return nr_freed ;
2623
2617
}
2624
2618
2625
- /* Called with cache_chain_mutex held to protect against cpu hotplug */
2619
+ /* Called with slab_mutex held to protect against cpu hotplug */
2626
2620
static int __cache_shrink (struct kmem_cache * cachep )
2627
2621
{
2628
2622
int ret = 0 , i = 0 ;
@@ -2657,9 +2651,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
2657
2651
BUG_ON (!cachep || in_interrupt ());
2658
2652
2659
2653
get_online_cpus ();
2660
- mutex_lock (& cache_chain_mutex );
2654
+ mutex_lock (& slab_mutex );
2661
2655
ret = __cache_shrink (cachep );
2662
- mutex_unlock (& cache_chain_mutex );
2656
+ mutex_unlock (& slab_mutex );
2663
2657
put_online_cpus ();
2664
2658
return ret ;
2665
2659
}
@@ -2687,15 +2681,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2687
2681
2688
2682
/* Find the cache in the chain of caches. */
2689
2683
get_online_cpus ();
2690
- mutex_lock (& cache_chain_mutex );
2684
+ mutex_lock (& slab_mutex );
2691
2685
/*
2692
2686
* the chain is never empty, cache_cache is never destroyed
2693
2687
*/
2694
2688
list_del (& cachep -> list );
2695
2689
if (__cache_shrink (cachep )) {
2696
2690
slab_error (cachep , "Can't free all objects" );
2697
- list_add (& cachep -> list , & cache_chain );
2698
- mutex_unlock (& cache_chain_mutex );
2691
+ list_add (& cachep -> list , & slab_caches );
2692
+ mutex_unlock (& slab_mutex );
2699
2693
put_online_cpus ();
2700
2694
return ;
2701
2695
}
@@ -2704,7 +2698,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
2704
2698
rcu_barrier ();
2705
2699
2706
2700
__kmem_cache_destroy (cachep );
2707
- mutex_unlock (& cache_chain_mutex );
2701
+ mutex_unlock (& slab_mutex );
2708
2702
put_online_cpus ();
2709
2703
}
2710
2704
EXPORT_SYMBOL (kmem_cache_destroy );
@@ -4017,7 +4011,7 @@ static void do_ccupdate_local(void *info)
4017
4011
new -> new [smp_processor_id ()] = old ;
4018
4012
}
4019
4013
4020
- /* Always called with the cache_chain_mutex held */
4014
+ /* Always called with the slab_mutex held */
4021
4015
static int do_tune_cpucache (struct kmem_cache * cachep , int limit ,
4022
4016
int batchcount , int shared , gfp_t gfp )
4023
4017
{
@@ -4061,7 +4055,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
4061
4055
return alloc_kmemlist (cachep , gfp );
4062
4056
}
4063
4057
4064
- /* Called with cache_chain_mutex held always */
4058
+ /* Called with slab_mutex held always */
4065
4059
static int enable_cpucache (struct kmem_cache * cachep , gfp_t gfp )
4066
4060
{
4067
4061
int err ;
@@ -4163,11 +4157,11 @@ static void cache_reap(struct work_struct *w)
4163
4157
int node = numa_mem_id ();
4164
4158
struct delayed_work * work = to_delayed_work (w );
4165
4159
4166
- if (!mutex_trylock (& cache_chain_mutex ))
4160
+ if (!mutex_trylock (& slab_mutex ))
4167
4161
/* Give up. Setup the next iteration. */
4168
4162
goto out ;
4169
4163
4170
- list_for_each_entry (searchp , & cache_chain , list ) {
4164
+ list_for_each_entry (searchp , & slab_caches , list ) {
4171
4165
check_irq_on ();
4172
4166
4173
4167
/*
@@ -4205,7 +4199,7 @@ static void cache_reap(struct work_struct *w)
4205
4199
cond_resched ();
4206
4200
}
4207
4201
check_irq_on ();
4208
- mutex_unlock (& cache_chain_mutex );
4202
+ mutex_unlock (& slab_mutex );
4209
4203
next_reap_node ();
4210
4204
out :
4211
4205
/* Set up the next iteration */
@@ -4241,21 +4235,21 @@ static void *s_start(struct seq_file *m, loff_t *pos)
4241
4235
{
4242
4236
loff_t n = * pos ;
4243
4237
4244
- mutex_lock (& cache_chain_mutex );
4238
+ mutex_lock (& slab_mutex );
4245
4239
if (!n )
4246
4240
print_slabinfo_header (m );
4247
4241
4248
- return seq_list_start (& cache_chain , * pos );
4242
+ return seq_list_start (& slab_caches , * pos );
4249
4243
}
4250
4244
4251
4245
static void * s_next (struct seq_file * m , void * p , loff_t * pos )
4252
4246
{
4253
- return seq_list_next (p , & cache_chain , pos );
4247
+ return seq_list_next (p , & slab_caches , pos );
4254
4248
}
4255
4249
4256
4250
static void s_stop (struct seq_file * m , void * p )
4257
4251
{
4258
- mutex_unlock (& cache_chain_mutex );
4252
+ mutex_unlock (& slab_mutex );
4259
4253
}
4260
4254
4261
4255
static int s_show (struct seq_file * m , void * p )
@@ -4406,9 +4400,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4406
4400
return - EINVAL ;
4407
4401
4408
4402
/* Find the cache in the chain of caches. */
4409
- mutex_lock (& cache_chain_mutex );
4403
+ mutex_lock (& slab_mutex );
4410
4404
res = - EINVAL ;
4411
- list_for_each_entry (cachep , & cache_chain , list ) {
4405
+ list_for_each_entry (cachep , & slab_caches , list ) {
4412
4406
if (!strcmp (cachep -> name , kbuf )) {
4413
4407
if (limit < 1 || batchcount < 1 ||
4414
4408
batchcount > limit || shared < 0 ) {
@@ -4421,7 +4415,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4421
4415
break ;
4422
4416
}
4423
4417
}
4424
- mutex_unlock (& cache_chain_mutex );
4418
+ mutex_unlock (& slab_mutex );
4425
4419
if (res >= 0 )
4426
4420
res = count ;
4427
4421
return res ;
@@ -4444,8 +4438,8 @@ static const struct file_operations proc_slabinfo_operations = {
4444
4438
4445
4439
static void * leaks_start (struct seq_file * m , loff_t * pos )
4446
4440
{
4447
- mutex_lock (& cache_chain_mutex );
4448
- return seq_list_start (& cache_chain , * pos );
4441
+ mutex_lock (& slab_mutex );
4442
+ return seq_list_start (& slab_caches , * pos );
4449
4443
}
4450
4444
4451
4445
static inline int add_caller (unsigned long * n , unsigned long v )
@@ -4544,17 +4538,17 @@ static int leaks_show(struct seq_file *m, void *p)
4544
4538
name = cachep -> name ;
4545
4539
if (n [0 ] == n [1 ]) {
4546
4540
/* Increase the buffer size */
4547
- mutex_unlock (& cache_chain_mutex );
4541
+ mutex_unlock (& slab_mutex );
4548
4542
m -> private = kzalloc (n [0 ] * 4 * sizeof (unsigned long ), GFP_KERNEL );
4549
4543
if (!m -> private ) {
4550
4544
/* Too bad, we are really out */
4551
4545
m -> private = n ;
4552
- mutex_lock (& cache_chain_mutex );
4546
+ mutex_lock (& slab_mutex );
4553
4547
return - ENOMEM ;
4554
4548
}
4555
4549
* (unsigned long * )m -> private = n [0 ] * 2 ;
4556
4550
kfree (n );
4557
- mutex_lock (& cache_chain_mutex );
4551
+ mutex_lock (& slab_mutex );
4558
4552
/* Now make sure this entry will be retried */
4559
4553
m -> count = m -> size ;
4560
4554
return 0 ;
0 commit comments