Skip to content

Commit b140513

Browse files
hygonitehcaster
authored andcommitted
mm/sl[au]b: generalize kmalloc subsystem
Now everything in kmalloc subsystem can be generalized. Let's do it! Generalize __do_kmalloc_node(), __kmalloc_node_track_caller(), kfree(), __ksize(), __kmalloc(), __kmalloc_node() and move them to slab_common.c. In the meantime, rename kmalloc_large_node_notrace() to __kmalloc_large_node() and make it static as it's now only called in slab_common.c. [ feng.tang@intel.com: adjust kfence skip list to include __kmem_cache_free so that kfence kunit tests do not fail ] Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
1 parent ed4cd17 commit b140513

File tree

5 files changed

+107
-200
lines changed

5 files changed

+107
-200
lines changed

mm/kfence/report.c

+1
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
8686
/* Also the *_bulk() variants by only checking prefixes. */
8787
if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
8888
str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
89+
str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
8990
str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
9091
str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
9192
goto found;

mm/slab.c

-108
Original file line numberDiff line numberDiff line change
@@ -3587,44 +3587,6 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
35873587
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
35883588
#endif
35893589

3590-
static __always_inline void *
3591-
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3592-
{
3593-
struct kmem_cache *cachep;
3594-
void *ret;
3595-
3596-
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3597-
ret = kmalloc_large_node_notrace(size, flags, node);
3598-
3599-
trace_kmalloc_node(caller, ret, NULL, size,
3600-
PAGE_SIZE << get_order(size),
3601-
flags, node);
3602-
return ret;
3603-
}
3604-
3605-
cachep = kmalloc_slab(size, flags);
3606-
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3607-
return cachep;
3608-
3609-
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
3610-
ret = kasan_kmalloc(cachep, ret, size, flags);
3611-
3612-
return ret;
3613-
}
3614-
3615-
void *__kmalloc_node(size_t size, gfp_t flags, int node)
3616-
{
3617-
return __do_kmalloc_node(size, flags, node, _RET_IP_);
3618-
}
3619-
EXPORT_SYMBOL(__kmalloc_node);
3620-
3621-
void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3622-
int node, unsigned long caller)
3623-
{
3624-
return __do_kmalloc_node(size, flags, node, caller);
3625-
}
3626-
EXPORT_SYMBOL(__kmalloc_node_track_caller);
3627-
36283590
#ifdef CONFIG_PRINTK
36293591
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
36303592
{
@@ -3647,12 +3609,6 @@ void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
36473609
}
36483610
#endif
36493611

3650-
void *__kmalloc(size_t size, gfp_t flags)
3651-
{
3652-
return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
3653-
}
3654-
EXPORT_SYMBOL(__kmalloc);
3655-
36563612
static __always_inline
36573613
void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp,
36583614
unsigned long caller)
@@ -3730,43 +3686,6 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
37303686
}
37313687
EXPORT_SYMBOL(kmem_cache_free_bulk);
37323688

3733-
/**
3734-
* kfree - free previously allocated memory
3735-
* @objp: pointer returned by kmalloc.
3736-
*
3737-
* If @objp is NULL, no operation is performed.
3738-
*
3739-
* Don't free memory not originally allocated by kmalloc()
3740-
* or you will run into trouble.
3741-
*/
3742-
void kfree(const void *objp)
3743-
{
3744-
struct kmem_cache *c;
3745-
unsigned long flags;
3746-
struct folio *folio;
3747-
3748-
trace_kfree(_RET_IP_, objp);
3749-
3750-
if (unlikely(ZERO_OR_NULL_PTR(objp)))
3751-
return;
3752-
3753-
folio = virt_to_folio(objp);
3754-
if (!folio_test_slab(folio)) {
3755-
free_large_kmalloc(folio, (void *)objp);
3756-
return;
3757-
}
3758-
3759-
c = folio_slab(folio)->slab_cache;
3760-
3761-
local_irq_save(flags);
3762-
kfree_debugcheck(objp);
3763-
debug_check_no_locks_freed(objp, c->object_size);
3764-
debug_check_no_obj_freed(objp, c->object_size);
3765-
__cache_free(c, (void *)objp, _RET_IP_);
3766-
local_irq_restore(flags);
3767-
}
3768-
EXPORT_SYMBOL(kfree);
3769-
37703689
/*
37713690
* This initializes kmem_cache_node or resizes various caches for all nodes.
37723691
*/
@@ -4169,30 +4088,3 @@ void __check_heap_object(const void *ptr, unsigned long n,
41694088
usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
41704089
}
41714090
#endif /* CONFIG_HARDENED_USERCOPY */
4172-
4173-
/**
4174-
* __ksize -- Uninstrumented ksize.
4175-
* @objp: pointer to the object
4176-
*
4177-
* Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
4178-
* safety checks as ksize() with KASAN instrumentation enabled.
4179-
*
4180-
* Return: size of the actual memory used by @objp in bytes
4181-
*/
4182-
size_t __ksize(const void *objp)
4183-
{
4184-
struct kmem_cache *c;
4185-
struct folio *folio;
4186-
4187-
BUG_ON(!objp);
4188-
if (unlikely(objp == ZERO_SIZE_PTR))
4189-
return 0;
4190-
4191-
folio = virt_to_folio(objp);
4192-
if (!folio_test_slab(folio))
4193-
return folio_size(folio);
4194-
4195-
c = folio_slab(folio)->slab_cache;
4196-
return c->object_size;
4197-
}
4198-
EXPORT_SYMBOL(__ksize);

mm/slab.h

-2
Original file line numberDiff line numberDiff line change
@@ -280,8 +280,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
280280
void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller);
281281
#endif
282282

283-
void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node);
284-
285283
gfp_t kmalloc_fix_flags(gfp_t flags);
286284

287285
/* Functions provided by the slab allocators */

mm/slab_common.c

+106-3
Original file line numberDiff line numberDiff line change
@@ -897,6 +897,109 @@ void free_large_kmalloc(struct folio *folio, void *object)
897897
-(PAGE_SIZE << order));
898898
__free_pages(folio_page(folio, 0), order);
899899
}
900+
901+
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
902+
static __always_inline
903+
void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
904+
{
905+
struct kmem_cache *s;
906+
void *ret;
907+
908+
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
909+
ret = __kmalloc_large_node(size, flags, node);
910+
trace_kmalloc_node(caller, ret, NULL,
911+
size, PAGE_SIZE << get_order(size),
912+
flags, node);
913+
return ret;
914+
}
915+
916+
s = kmalloc_slab(size, flags);
917+
918+
if (unlikely(ZERO_OR_NULL_PTR(s)))
919+
return s;
920+
921+
ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
922+
ret = kasan_kmalloc(s, ret, size, flags);
923+
trace_kmalloc_node(caller, ret, s, size,
924+
s->size, flags, node);
925+
return ret;
926+
}
927+
928+
void *__kmalloc_node(size_t size, gfp_t flags, int node)
929+
{
930+
return __do_kmalloc_node(size, flags, node, _RET_IP_);
931+
}
932+
EXPORT_SYMBOL(__kmalloc_node);
933+
934+
void *__kmalloc(size_t size, gfp_t flags)
935+
{
936+
return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
937+
}
938+
EXPORT_SYMBOL(__kmalloc);
939+
940+
void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
941+
int node, unsigned long caller)
942+
{
943+
return __do_kmalloc_node(size, flags, node, caller);
944+
}
945+
EXPORT_SYMBOL(__kmalloc_node_track_caller);
946+
947+
/**
948+
* kfree - free previously allocated memory
949+
* @object: pointer returned by kmalloc.
950+
*
951+
* If @object is NULL, no operation is performed.
952+
*
953+
* Don't free memory not originally allocated by kmalloc()
954+
* or you will run into trouble.
955+
*/
956+
void kfree(const void *object)
957+
{
958+
struct folio *folio;
959+
struct slab *slab;
960+
struct kmem_cache *s;
961+
962+
trace_kfree(_RET_IP_, object);
963+
964+
if (unlikely(ZERO_OR_NULL_PTR(object)))
965+
return;
966+
967+
folio = virt_to_folio(object);
968+
if (unlikely(!folio_test_slab(folio))) {
969+
free_large_kmalloc(folio, (void *)object);
970+
return;
971+
}
972+
973+
slab = folio_slab(folio);
974+
s = slab->slab_cache;
975+
__kmem_cache_free(s, (void *)object, _RET_IP_);
976+
}
977+
EXPORT_SYMBOL(kfree);
978+
979+
/**
980+
* __ksize -- Uninstrumented ksize.
981+
* @object: pointer to the object
982+
*
983+
* Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
984+
* safety checks as ksize() with KASAN instrumentation enabled.
985+
*
986+
* Return: size of the actual memory used by @object in bytes
987+
*/
988+
size_t __ksize(const void *object)
989+
{
990+
struct folio *folio;
991+
992+
if (unlikely(object == ZERO_SIZE_PTR))
993+
return 0;
994+
995+
folio = virt_to_folio(object);
996+
997+
if (unlikely(!folio_test_slab(folio)))
998+
return folio_size(folio);
999+
1000+
return slab_ksize(folio_slab(folio)->slab_cache);
1001+
}
1002+
EXPORT_SYMBOL(__ksize);
9001003
#endif /* !CONFIG_SLOB */
9011004

9021005
gfp_t kmalloc_fix_flags(gfp_t flags)
@@ -917,7 +1020,7 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
9171020
* know the allocation order to free the pages properly in kfree.
9181021
*/
9191022

920-
void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
1023+
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
9211024
{
9221025
struct page *page;
9231026
void *ptr = NULL;
@@ -943,7 +1046,7 @@ void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
9431046

9441047
void *kmalloc_large(size_t size, gfp_t flags)
9451048
{
946-
void *ret = kmalloc_large_node_notrace(size, flags, NUMA_NO_NODE);
1049+
void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
9471050

9481051
trace_kmalloc(_RET_IP_, ret, NULL, size,
9491052
PAGE_SIZE << get_order(size), flags);
@@ -953,7 +1056,7 @@ EXPORT_SYMBOL(kmalloc_large);
9531056

9541057
void *kmalloc_large_node(size_t size, gfp_t flags, int node)
9551058
{
956-
void *ret = kmalloc_large_node_notrace(size, flags, node);
1059+
void *ret = __kmalloc_large_node(size, flags, node);
9571060

9581061
trace_kmalloc_node(_RET_IP_, ret, NULL, size,
9591062
PAGE_SIZE << get_order(size), flags, node);

mm/slub.c

-87
Original file line numberDiff line numberDiff line change
@@ -4388,49 +4388,6 @@ static int __init setup_slub_min_objects(char *str)
43884388

43894389
__setup("slub_min_objects=", setup_slub_min_objects);
43904390

4391-
static __always_inline
4392-
void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
4393-
{
4394-
struct kmem_cache *s;
4395-
void *ret;
4396-
4397-
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4398-
ret = kmalloc_large_node_notrace(size, flags, node);
4399-
4400-
trace_kmalloc_node(caller, ret, NULL,
4401-
size, PAGE_SIZE << get_order(size),
4402-
flags, node);
4403-
4404-
return ret;
4405-
}
4406-
4407-
s = kmalloc_slab(size, flags);
4408-
4409-
if (unlikely(ZERO_OR_NULL_PTR(s)))
4410-
return s;
4411-
4412-
ret = slab_alloc_node(s, NULL, flags, node, caller, size);
4413-
4414-
trace_kmalloc_node(caller, ret, s, size, s->size, flags, node);
4415-
4416-
ret = kasan_kmalloc(s, ret, size, flags);
4417-
4418-
return ret;
4419-
}
4420-
4421-
void *__kmalloc_node(size_t size, gfp_t flags, int node)
4422-
{
4423-
return __do_kmalloc_node(size, flags, node, _RET_IP_);
4424-
}
4425-
EXPORT_SYMBOL(__kmalloc_node);
4426-
4427-
void *__kmalloc(size_t size, gfp_t flags)
4428-
{
4429-
return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
4430-
}
4431-
EXPORT_SYMBOL(__kmalloc);
4432-
4433-
44344391
#ifdef CONFIG_HARDENED_USERCOPY
44354392
/*
44364393
* Rejects incorrectly sized objects and objects that are to be copied
@@ -4481,43 +4438,6 @@ void __check_heap_object(const void *ptr, unsigned long n,
44814438
}
44824439
#endif /* CONFIG_HARDENED_USERCOPY */
44834440

4484-
size_t __ksize(const void *object)
4485-
{
4486-
struct folio *folio;
4487-
4488-
if (unlikely(object == ZERO_SIZE_PTR))
4489-
return 0;
4490-
4491-
folio = virt_to_folio(object);
4492-
4493-
if (unlikely(!folio_test_slab(folio)))
4494-
return folio_size(folio);
4495-
4496-
return slab_ksize(folio_slab(folio)->slab_cache);
4497-
}
4498-
EXPORT_SYMBOL(__ksize);
4499-
4500-
void kfree(const void *x)
4501-
{
4502-
struct folio *folio;
4503-
struct slab *slab;
4504-
void *object = (void *)x;
4505-
4506-
trace_kfree(_RET_IP_, x);
4507-
4508-
if (unlikely(ZERO_OR_NULL_PTR(x)))
4509-
return;
4510-
4511-
folio = virt_to_folio(x);
4512-
if (unlikely(!folio_test_slab(folio))) {
4513-
free_large_kmalloc(folio, object);
4514-
return;
4515-
}
4516-
slab = folio_slab(folio);
4517-
slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
4518-
}
4519-
EXPORT_SYMBOL(kfree);
4520-
45214441
#define SHRINK_PROMOTE_MAX 32
45224442

45234443
/*
@@ -4863,13 +4783,6 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
48634783
return 0;
48644784
}
48654785

4866-
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4867-
int node, unsigned long caller)
4868-
{
4869-
return __do_kmalloc_node(size, gfpflags, node, caller);
4870-
}
4871-
EXPORT_SYMBOL(__kmalloc_node_track_caller);
4872-
48734786
#ifdef CONFIG_SYSFS
48744787
static int count_inuse(struct slab *slab)
48754788
{

0 commit comments

Comments
 (0)