Skip to content

Commit 18004c5

Browse files
Christoph Lameterpenberg
Christoph Lameter
authored andcommitted
mm, sl[aou]b: Use a common mutex definition
Use the mutex definition from SLAB and make it the common way to take a sleeping lock. This has the effect of using a mutex instead of a rw semaphore for SLUB. SLOB gains the use of a mutex for kmem_cache_create serialization. Not needed now but SLOB may acquire some more features later (like slabinfo / sysfs support) through the expansion of the common code that will need this. Reviewed-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
1 parent 97d0660 commit 18004c5

File tree

4 files changed

+82
-86
lines changed

4 files changed

+82
-86
lines changed

mm/slab.c

+51-57
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@
6868
* Further notes from the original documentation:
6969
*
7070
* 11 April '97. Started multi-threading - markhe
71-
* The global cache-chain is protected by the mutex 'cache_chain_mutex'.
71+
* The global cache-chain is protected by the mutex 'slab_mutex'.
7272
* The sem is only needed when accessing/extending the cache-chain, which
7373
* can never happen inside an interrupt (kmem_cache_create(),
7474
* kmem_cache_shrink() and kmem_cache_reap()).
@@ -671,12 +671,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
671671
}
672672
#endif
673673

674-
/*
675-
* Guard access to the cache-chain.
676-
*/
677-
static DEFINE_MUTEX(cache_chain_mutex);
678-
static struct list_head cache_chain;
679-
680674
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
681675

682676
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@@ -1100,15 +1094,15 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
11001094
* When hotplugging memory or a cpu, existing nodelists are not replaced if
11011095
* already in use.
11021096
*
1103-
* Must hold cache_chain_mutex.
1097+
* Must hold slab_mutex.
11041098
*/
11051099
static int init_cache_nodelists_node(int node)
11061100
{
11071101
struct kmem_cache *cachep;
11081102
struct kmem_list3 *l3;
11091103
const int memsize = sizeof(struct kmem_list3);
11101104

1111-
list_for_each_entry(cachep, &cache_chain, list) {
1105+
list_for_each_entry(cachep, &slab_caches, list) {
11121106
/*
11131107
* Set up the size64 kmemlist for cpu before we can
11141108
* begin anything. Make sure some other cpu on this
@@ -1124,7 +1118,7 @@ static int init_cache_nodelists_node(int node)
11241118

11251119
/*
11261120
* The l3s don't come and go as CPUs come and
1127-
* go. cache_chain_mutex is sufficient
1121+
* go. slab_mutex is sufficient
11281122
* protection here.
11291123
*/
11301124
cachep->nodelists[node] = l3;
@@ -1146,7 +1140,7 @@ static void __cpuinit cpuup_canceled(long cpu)
11461140
int node = cpu_to_mem(cpu);
11471141
const struct cpumask *mask = cpumask_of_node(node);
11481142

1149-
list_for_each_entry(cachep, &cache_chain, list) {
1143+
list_for_each_entry(cachep, &slab_caches, list) {
11501144
struct array_cache *nc;
11511145
struct array_cache *shared;
11521146
struct array_cache **alien;
@@ -1196,7 +1190,7 @@ static void __cpuinit cpuup_canceled(long cpu)
11961190
* the respective cache's slabs, now we can go ahead and
11971191
* shrink each nodelist to its limit.
11981192
*/
1199-
list_for_each_entry(cachep, &cache_chain, list) {
1193+
list_for_each_entry(cachep, &slab_caches, list) {
12001194
l3 = cachep->nodelists[node];
12011195
if (!l3)
12021196
continue;
@@ -1225,7 +1219,7 @@ static int __cpuinit cpuup_prepare(long cpu)
12251219
* Now we can go ahead with allocating the shared arrays and
12261220
* array caches
12271221
*/
1228-
list_for_each_entry(cachep, &cache_chain, list) {
1222+
list_for_each_entry(cachep, &slab_caches, list) {
12291223
struct array_cache *nc;
12301224
struct array_cache *shared = NULL;
12311225
struct array_cache **alien = NULL;
@@ -1293,9 +1287,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
12931287
switch (action) {
12941288
case CPU_UP_PREPARE:
12951289
case CPU_UP_PREPARE_FROZEN:
1296-
mutex_lock(&cache_chain_mutex);
1290+
mutex_lock(&slab_mutex);
12971291
err = cpuup_prepare(cpu);
1298-
mutex_unlock(&cache_chain_mutex);
1292+
mutex_unlock(&slab_mutex);
12991293
break;
13001294
case CPU_ONLINE:
13011295
case CPU_ONLINE_FROZEN:
@@ -1305,7 +1299,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
13051299
case CPU_DOWN_PREPARE:
13061300
case CPU_DOWN_PREPARE_FROZEN:
13071301
/*
1308-
* Shutdown cache reaper. Note that the cache_chain_mutex is
1302+
* Shutdown cache reaper. Note that the slab_mutex is
13091303
* held so that if cache_reap() is invoked it cannot do
13101304
* anything expensive but will only modify reap_work
13111305
* and reschedule the timer.
@@ -1332,9 +1326,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
13321326
#endif
13331327
case CPU_UP_CANCELED:
13341328
case CPU_UP_CANCELED_FROZEN:
1335-
mutex_lock(&cache_chain_mutex);
1329+
mutex_lock(&slab_mutex);
13361330
cpuup_canceled(cpu);
1337-
mutex_unlock(&cache_chain_mutex);
1331+
mutex_unlock(&slab_mutex);
13381332
break;
13391333
}
13401334
return notifier_from_errno(err);
@@ -1350,14 +1344,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
13501344
* Returns -EBUSY if all objects cannot be drained so that the node is not
13511345
* removed.
13521346
*
1353-
* Must hold cache_chain_mutex.
1347+
* Must hold slab_mutex.
13541348
*/
13551349
static int __meminit drain_cache_nodelists_node(int node)
13561350
{
13571351
struct kmem_cache *cachep;
13581352
int ret = 0;
13591353

1360-
list_for_each_entry(cachep, &cache_chain, list) {
1354+
list_for_each_entry(cachep, &slab_caches, list) {
13611355
struct kmem_list3 *l3;
13621356

13631357
l3 = cachep->nodelists[node];
@@ -1388,14 +1382,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
13881382

13891383
switch (action) {
13901384
case MEM_GOING_ONLINE:
1391-
mutex_lock(&cache_chain_mutex);
1385+
mutex_lock(&slab_mutex);
13921386
ret = init_cache_nodelists_node(nid);
1393-
mutex_unlock(&cache_chain_mutex);
1387+
mutex_unlock(&slab_mutex);
13941388
break;
13951389
case MEM_GOING_OFFLINE:
1396-
mutex_lock(&cache_chain_mutex);
1390+
mutex_lock(&slab_mutex);
13971391
ret = drain_cache_nodelists_node(nid);
1398-
mutex_unlock(&cache_chain_mutex);
1392+
mutex_unlock(&slab_mutex);
13991393
break;
14001394
case MEM_ONLINE:
14011395
case MEM_OFFLINE:
@@ -1499,8 +1493,8 @@ void __init kmem_cache_init(void)
14991493
node = numa_mem_id();
15001494

15011495
/* 1) create the cache_cache */
1502-
INIT_LIST_HEAD(&cache_chain);
1503-
list_add(&cache_cache.list, &cache_chain);
1496+
INIT_LIST_HEAD(&slab_caches);
1497+
list_add(&cache_cache.list, &slab_caches);
15041498
cache_cache.colour_off = cache_line_size();
15051499
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
15061500
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
@@ -1642,11 +1636,11 @@ void __init kmem_cache_init_late(void)
16421636
init_lock_keys();
16431637

16441638
/* 6) resize the head arrays to their final sizes */
1645-
mutex_lock(&cache_chain_mutex);
1646-
list_for_each_entry(cachep, &cache_chain, list)
1639+
mutex_lock(&slab_mutex);
1640+
list_for_each_entry(cachep, &slab_caches, list)
16471641
if (enable_cpucache(cachep, GFP_NOWAIT))
16481642
BUG();
1649-
mutex_unlock(&cache_chain_mutex);
1643+
mutex_unlock(&slab_mutex);
16501644

16511645
/* Done! */
16521646
slab_state = FULL;
@@ -2253,10 +2247,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
22532247
*/
22542248
if (slab_is_available()) {
22552249
get_online_cpus();
2256-
mutex_lock(&cache_chain_mutex);
2250+
mutex_lock(&slab_mutex);
22572251
}
22582252

2259-
list_for_each_entry(pc, &cache_chain, list) {
2253+
list_for_each_entry(pc, &slab_caches, list) {
22602254
char tmp;
22612255
int res;
22622256

@@ -2500,10 +2494,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
25002494
}
25012495

25022496
/* cache setup completed, link it into the list */
2503-
list_add(&cachep->list, &cache_chain);
2497+
list_add(&cachep->list, &slab_caches);
25042498
oops:
25052499
if (slab_is_available()) {
2506-
mutex_unlock(&cache_chain_mutex);
2500+
mutex_unlock(&slab_mutex);
25072501
put_online_cpus();
25082502
}
25092503
return cachep;
@@ -2622,7 +2616,7 @@ static int drain_freelist(struct kmem_cache *cache,
26222616
return nr_freed;
26232617
}
26242618

2625-
/* Called with cache_chain_mutex held to protect against cpu hotplug */
2619+
/* Called with slab_mutex held to protect against cpu hotplug */
26262620
static int __cache_shrink(struct kmem_cache *cachep)
26272621
{
26282622
int ret = 0, i = 0;
@@ -2657,9 +2651,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
26572651
BUG_ON(!cachep || in_interrupt());
26582652

26592653
get_online_cpus();
2660-
mutex_lock(&cache_chain_mutex);
2654+
mutex_lock(&slab_mutex);
26612655
ret = __cache_shrink(cachep);
2662-
mutex_unlock(&cache_chain_mutex);
2656+
mutex_unlock(&slab_mutex);
26632657
put_online_cpus();
26642658
return ret;
26652659
}
@@ -2687,15 +2681,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
26872681

26882682
/* Find the cache in the chain of caches. */
26892683
get_online_cpus();
2690-
mutex_lock(&cache_chain_mutex);
2684+
mutex_lock(&slab_mutex);
26912685
/*
26922686
* the chain is never empty, cache_cache is never destroyed
26932687
*/
26942688
list_del(&cachep->list);
26952689
if (__cache_shrink(cachep)) {
26962690
slab_error(cachep, "Can't free all objects");
2697-
list_add(&cachep->list, &cache_chain);
2698-
mutex_unlock(&cache_chain_mutex);
2691+
list_add(&cachep->list, &slab_caches);
2692+
mutex_unlock(&slab_mutex);
26992693
put_online_cpus();
27002694
return;
27012695
}
@@ -2704,7 +2698,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
27042698
rcu_barrier();
27052699

27062700
__kmem_cache_destroy(cachep);
2707-
mutex_unlock(&cache_chain_mutex);
2701+
mutex_unlock(&slab_mutex);
27082702
put_online_cpus();
27092703
}
27102704
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -4017,7 +4011,7 @@ static void do_ccupdate_local(void *info)
40174011
new->new[smp_processor_id()] = old;
40184012
}
40194013

4020-
/* Always called with the cache_chain_mutex held */
4014+
/* Always called with the slab_mutex held */
40214015
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
40224016
int batchcount, int shared, gfp_t gfp)
40234017
{
@@ -4061,7 +4055,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
40614055
return alloc_kmemlist(cachep, gfp);
40624056
}
40634057

4064-
/* Called with cache_chain_mutex held always */
4058+
/* Called with slab_mutex held always */
40654059
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
40664060
{
40674061
int err;
@@ -4163,11 +4157,11 @@ static void cache_reap(struct work_struct *w)
41634157
int node = numa_mem_id();
41644158
struct delayed_work *work = to_delayed_work(w);
41654159

4166-
if (!mutex_trylock(&cache_chain_mutex))
4160+
if (!mutex_trylock(&slab_mutex))
41674161
/* Give up. Setup the next iteration. */
41684162
goto out;
41694163

4170-
list_for_each_entry(searchp, &cache_chain, list) {
4164+
list_for_each_entry(searchp, &slab_caches, list) {
41714165
check_irq_on();
41724166

41734167
/*
@@ -4205,7 +4199,7 @@ static void cache_reap(struct work_struct *w)
42054199
cond_resched();
42064200
}
42074201
check_irq_on();
4208-
mutex_unlock(&cache_chain_mutex);
4202+
mutex_unlock(&slab_mutex);
42094203
next_reap_node();
42104204
out:
42114205
/* Set up the next iteration */
@@ -4241,21 +4235,21 @@ static void *s_start(struct seq_file *m, loff_t *pos)
42414235
{
42424236
loff_t n = *pos;
42434237

4244-
mutex_lock(&cache_chain_mutex);
4238+
mutex_lock(&slab_mutex);
42454239
if (!n)
42464240
print_slabinfo_header(m);
42474241

4248-
return seq_list_start(&cache_chain, *pos);
4242+
return seq_list_start(&slab_caches, *pos);
42494243
}
42504244

42514245
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
42524246
{
4253-
return seq_list_next(p, &cache_chain, pos);
4247+
return seq_list_next(p, &slab_caches, pos);
42544248
}
42554249

42564250
static void s_stop(struct seq_file *m, void *p)
42574251
{
4258-
mutex_unlock(&cache_chain_mutex);
4252+
mutex_unlock(&slab_mutex);
42594253
}
42604254

42614255
static int s_show(struct seq_file *m, void *p)
@@ -4406,9 +4400,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
44064400
return -EINVAL;
44074401

44084402
/* Find the cache in the chain of caches. */
4409-
mutex_lock(&cache_chain_mutex);
4403+
mutex_lock(&slab_mutex);
44104404
res = -EINVAL;
4411-
list_for_each_entry(cachep, &cache_chain, list) {
4405+
list_for_each_entry(cachep, &slab_caches, list) {
44124406
if (!strcmp(cachep->name, kbuf)) {
44134407
if (limit < 1 || batchcount < 1 ||
44144408
batchcount > limit || shared < 0) {
@@ -4421,7 +4415,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
44214415
break;
44224416
}
44234417
}
4424-
mutex_unlock(&cache_chain_mutex);
4418+
mutex_unlock(&slab_mutex);
44254419
if (res >= 0)
44264420
res = count;
44274421
return res;
@@ -4444,8 +4438,8 @@ static const struct file_operations proc_slabinfo_operations = {
44444438

44454439
static void *leaks_start(struct seq_file *m, loff_t *pos)
44464440
{
4447-
mutex_lock(&cache_chain_mutex);
4448-
return seq_list_start(&cache_chain, *pos);
4441+
mutex_lock(&slab_mutex);
4442+
return seq_list_start(&slab_caches, *pos);
44494443
}
44504444

44514445
static inline int add_caller(unsigned long *n, unsigned long v)
@@ -4544,17 +4538,17 @@ static int leaks_show(struct seq_file *m, void *p)
45444538
name = cachep->name;
45454539
if (n[0] == n[1]) {
45464540
/* Increase the buffer size */
4547-
mutex_unlock(&cache_chain_mutex);
4541+
mutex_unlock(&slab_mutex);
45484542
m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
45494543
if (!m->private) {
45504544
/* Too bad, we are really out */
45514545
m->private = n;
4552-
mutex_lock(&cache_chain_mutex);
4546+
mutex_lock(&slab_mutex);
45534547
return -ENOMEM;
45544548
}
45554549
*(unsigned long *)m->private = n[0] * 2;
45564550
kfree(n);
4557-
mutex_lock(&cache_chain_mutex);
4551+
mutex_lock(&slab_mutex);
45584552
/* Now make sure this entry will be retried */
45594553
m->count = m->size;
45604554
return 0;

mm/slab.h

+4
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,10 @@ enum slab_state {
2323

2424
extern enum slab_state slab_state;
2525

26+
/* The slab cache mutex protects the management structures during changes */
27+
extern struct mutex slab_mutex;
28+
extern struct list_head slab_caches;
29+
2630
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
2731
size_t align, unsigned long flags, void (*ctor)(void *));
2832

mm/slab_common.c

+2
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
#include "slab.h"
2020

2121
enum slab_state slab_state;
22+
LIST_HEAD(slab_caches);
23+
DEFINE_MUTEX(slab_mutex);
2224

2325
/*
2426
* kmem_cache_create - Create a cache.

0 commit comments

Comments
 (0)