74
74
*/
75
75
#define ZS_ALIGN 8
76
76
77
- /*
78
- * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
79
- * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
80
- */
81
- #define ZS_MAX_ZSPAGE_ORDER 2
82
- #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
77
+ #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_PAGE_ORDER)
83
78
84
79
#define ZS_HANDLE_SIZE (sizeof(unsigned long))
85
80
124
119
#define ISOLATED_BITS 3
125
120
#define MAGIC_VAL_BITS 8
126
121
127
- #define MAX (a , b ) ((a) >= (b) ? (a) : (b))
128
- /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
129
- #define ZS_MIN_ALLOC_SIZE \
130
- MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
122
+ #define ZS_MIN_ALLOC_SIZE 32U
123
+
131
124
/* each chunk includes extra space to keep handle */
132
125
#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
133
126
141
134
* determined). NOTE: all those class sizes must be set as multiple of
142
135
* ZS_ALIGN to make sure link_free itself never has to span 2 pages.
143
136
*
144
- * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
145
- * (reason above)
137
+ * pool->min_alloc_size ( ZS_MIN_ALLOC_SIZE) and ZS_SIZE_CLASS_DELTA must
138
+ * be multiple of ZS_ALIGN (reason above)
146
139
*/
147
140
#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
148
- #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
149
- ZS_SIZE_CLASS_DELTA) + 1)
150
141
151
142
enum fullness_group {
152
143
ZS_EMPTY ,
@@ -230,12 +221,15 @@ struct link_free {
230
221
struct zs_pool {
231
222
const char * name ;
232
223
233
- struct size_class * size_class [ ZS_SIZE_CLASSES ] ;
224
+ struct size_class * * size_class ;
234
225
struct kmem_cache * handle_cachep ;
235
226
struct kmem_cache * zspage_cachep ;
236
227
237
228
atomic_long_t pages_allocated ;
238
229
230
+ u32 num_size_classes ;
231
+ u32 min_alloc_size ;
232
+
239
233
struct zs_pool_stats stats ;
240
234
241
235
/* Compact classes */
@@ -523,15 +517,15 @@ static void set_zspage_mapping(struct zspage *zspage,
523
517
* classes depending on its size. This function returns index of the
524
518
* size class which has chunk size big enough to hold the given size.
525
519
*/
526
- static int get_size_class_index (int size )
520
+ static int get_size_class_index (struct zs_pool * pool , int size )
527
521
{
528
522
int idx = 0 ;
529
523
530
- if (likely (size > ZS_MIN_ALLOC_SIZE ))
531
- idx = DIV_ROUND_UP (size - ZS_MIN_ALLOC_SIZE ,
524
+ if (likely (size > pool -> min_alloc_size ))
525
+ idx = DIV_ROUND_UP (size - pool -> min_alloc_size ,
532
526
ZS_SIZE_CLASS_DELTA );
533
527
534
- return min_t (int , ZS_SIZE_CLASSES - 1 , idx );
528
+ return min_t (int , pool -> num_size_classes - 1 , idx );
535
529
}
536
530
537
531
/* type can be of enum type class_stat_type or fullness_group */
@@ -591,7 +585,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
591
585
"obj_allocated" , "obj_used" , "pages_used" ,
592
586
"pages_per_zspage" , "freeable" );
593
587
594
- for (i = 0 ; i < ZS_SIZE_CLASSES ; i ++ ) {
588
+ for (i = 0 ; i < pool -> num_size_classes ; i ++ ) {
595
589
class = pool -> size_class [i ];
596
590
597
591
if (class -> index != i )
@@ -777,13 +771,13 @@ static enum fullness_group fix_fullness_group(struct size_class *class,
777
771
* link together 3 PAGE_SIZE sized pages to form a zspage
778
772
* since then we can perfectly fit in 8 such objects.
779
773
*/
780
- static int get_pages_per_zspage (int class_size )
774
+ static int get_pages_per_zspage (u32 class_size , u32 max_pages_per_zspage )
781
775
{
782
776
int i , max_usedpc = 0 ;
783
777
/* zspage order which gives maximum used size per KB */
784
778
int max_usedpc_order = 1 ;
785
779
786
- for (i = 1 ; i <= ZS_MAX_PAGES_PER_ZSPAGE ; i ++ ) {
780
+ for (i = 1 ; i <= max_pages_per_zspage ; i ++ ) {
787
781
int zspage_size ;
788
782
int waste , usedpc ;
789
783
@@ -1220,7 +1214,7 @@ unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
1220
1214
{
1221
1215
struct size_class * class ;
1222
1216
1223
- class = pool -> size_class [get_size_class_index (size )];
1217
+ class = pool -> size_class [get_size_class_index (pool , size )];
1224
1218
1225
1219
return class -> index ;
1226
1220
}
@@ -1431,7 +1425,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1431
1425
1432
1426
/* extra space in chunk to keep the handle */
1433
1427
size += ZS_HANDLE_SIZE ;
1434
- class = pool -> size_class [get_size_class_index (size )];
1428
+ class = pool -> size_class [get_size_class_index (pool , size )];
1435
1429
1436
1430
/* class->lock effectively protects the zpage migration */
1437
1431
spin_lock (& class -> lock );
@@ -1980,7 +1974,7 @@ static void async_free_zspage(struct work_struct *work)
1980
1974
struct zs_pool * pool = container_of (work , struct zs_pool ,
1981
1975
free_work );
1982
1976
1983
- for (i = 0 ; i < ZS_SIZE_CLASSES ; i ++ ) {
1977
+ for (i = 0 ; i < pool -> num_size_classes ; i ++ ) {
1984
1978
class = pool -> size_class [i ];
1985
1979
if (class -> index != i )
1986
1980
continue ;
@@ -2129,7 +2123,7 @@ unsigned long zs_compact(struct zs_pool *pool)
2129
2123
struct size_class * class ;
2130
2124
unsigned long pages_freed = 0 ;
2131
2125
2132
- for (i = ZS_SIZE_CLASSES - 1 ; i >= 0 ; i -- ) {
2126
+ for (i = pool -> num_size_classes - 1 ; i >= 0 ; i -- ) {
2133
2127
class = pool -> size_class [i ];
2134
2128
if (class -> index != i )
2135
2129
continue ;
@@ -2173,7 +2167,7 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2173
2167
struct zs_pool * pool = container_of (shrinker , struct zs_pool ,
2174
2168
shrinker );
2175
2169
2176
- for (i = ZS_SIZE_CLASSES - 1 ; i >= 0 ; i -- ) {
2170
+ for (i = pool -> num_size_classes - 1 ; i >= 0 ; i -- ) {
2177
2171
class = pool -> size_class [i ];
2178
2172
if (class -> index != i )
2179
2173
continue ;
@@ -2215,11 +2209,28 @@ struct zs_pool *zs_create_pool(const char *name)
2215
2209
int i ;
2216
2210
struct zs_pool * pool ;
2217
2211
struct size_class * prev_class = NULL ;
2212
+ u32 max_pages_per_zspage ;
2218
2213
2219
2214
pool = kzalloc (sizeof (* pool ), GFP_KERNEL );
2220
2215
if (!pool )
2221
2216
return NULL ;
2222
2217
2218
+ max_pages_per_zspage = 1U << ZS_DEFAULT_PAGE_ORDER ;
2219
+ /* min_alloc_size must be multiple of ZS_ALIGN */
2220
+ pool -> min_alloc_size = (max_pages_per_zspage << PAGE_SHIFT ) >>
2221
+ OBJ_INDEX_BITS ;
2222
+ pool -> min_alloc_size = max (pool -> min_alloc_size , ZS_MIN_ALLOC_SIZE );
2223
+
2224
+ pool -> num_size_classes =
2225
+ DIV_ROUND_UP (ZS_MAX_ALLOC_SIZE - pool -> min_alloc_size ,
2226
+ ZS_SIZE_CLASS_DELTA ) + 1 ;
2227
+
2228
+ pool -> size_class = kmalloc_array (pool -> num_size_classes ,
2229
+ sizeof (struct size_class * ),
2230
+ GFP_KERNEL | __GFP_ZERO );
2231
+ if (!pool -> size_class )
2232
+ goto err ;
2233
+
2223
2234
init_deferred_free (pool );
2224
2235
rwlock_init (& pool -> migrate_lock );
2225
2236
@@ -2234,17 +2245,18 @@ struct zs_pool *zs_create_pool(const char *name)
2234
2245
* Iterate reversely, because, size of size_class that we want to use
2235
2246
* for merging should be larger or equal to current size.
2236
2247
*/
2237
- for (i = ZS_SIZE_CLASSES - 1 ; i >= 0 ; i -- ) {
2248
+ for (i = pool -> num_size_classes - 1 ; i >= 0 ; i -- ) {
2238
2249
int size ;
2239
2250
int pages_per_zspage ;
2240
2251
int objs_per_zspage ;
2241
2252
struct size_class * class ;
2242
2253
int fullness = 0 ;
2243
2254
2244
- size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA ;
2255
+ size = pool -> min_alloc_size + i * ZS_SIZE_CLASS_DELTA ;
2245
2256
if (size > ZS_MAX_ALLOC_SIZE )
2246
2257
size = ZS_MAX_ALLOC_SIZE ;
2247
- pages_per_zspage = get_pages_per_zspage (size );
2258
+ pages_per_zspage = get_pages_per_zspage (size ,
2259
+ max_pages_per_zspage );
2248
2260
objs_per_zspage = pages_per_zspage * PAGE_SIZE / size ;
2249
2261
2250
2262
/*
@@ -2328,7 +2340,7 @@ void zs_destroy_pool(struct zs_pool *pool)
2328
2340
zs_flush_migration (pool );
2329
2341
zs_pool_stat_destroy (pool );
2330
2342
2331
- for (i = 0 ; i < ZS_SIZE_CLASSES ; i ++ ) {
2343
+ for (i = 0 ; i < pool -> num_size_classes ; i ++ ) {
2332
2344
int fg ;
2333
2345
struct size_class * class = pool -> size_class [i ];
2334
2346
@@ -2348,6 +2360,7 @@ void zs_destroy_pool(struct zs_pool *pool)
2348
2360
}
2349
2361
2350
2362
destroy_cache (pool );
2363
+ kfree (pool -> size_class );
2351
2364
kfree (pool -> name );
2352
2365
kfree (pool );
2353
2366
}
0 commit comments