Skip to content

Commit 29f6738

Browse files
Yinghai Lutorvalds
Yinghai Lu
authored andcommitted
memblock: free allocated memblock_reserved_regions later
memblock_free_reserved_regions() calls memblock_free(), but memblock_free() would double reserved.regions too, so we could free the old range for reserved.regions. Also tj said there is another bug which could be related to this. | I don't think we're saving any noticeable | amount by doing this "free - give it to page allocator - reserve | again" dancing. We should just allocate regions aligned to page | boundaries and free them later when memblock is no longer in use. in that case, when DEBUG_PAGEALLOC, will get panic: memblock_free: [0x0000102febc080-0x0000102febf080] memblock_free_reserved_regions+0x37/0x39 BUG: unable to handle kernel paging request at ffff88102febd948 IP: [<ffffffff836a5774>] __next_free_mem_range+0x9b/0x155 PGD 4826063 PUD cf67a067 PMD cf7fa067 PTE 800000102febd160 Oops: 0000 [raspberrypi#1] PREEMPT SMP DEBUG_PAGEALLOC CPU 0 Pid: 0, comm: swapper Not tainted 3.5.0-rc2-next-20120614-sasha raspberrypi#447 RIP: 0010:[<ffffffff836a5774>] [<ffffffff836a5774>] __next_free_mem_range+0x9b/0x155 See the discussion at https://lkml.org/lkml/2012/6/13/469 So try to allocate with PAGE_SIZE alignment and free it later. Reported-by: Sasha Levin <levinsasha928@gmail.com> Acked-by: Tejun Heo <tj@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 99ab7b1 commit 29f6738

File tree

3 files changed

+47
-46
lines changed

3 files changed

+47
-46
lines changed

include/linux/memblock.h

+1-3
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
5050
phys_addr_t size, phys_addr_t align, int nid);
5151
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
5252
phys_addr_t size, phys_addr_t align);
53-
int memblock_free_reserved_regions(void);
54-
int memblock_reserve_reserved_regions(void);
55-
53+
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
5654
void memblock_allow_resize(void);
5755
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
5856
int memblock_add(phys_addr_t base, phys_addr_t size);

mm/memblock.c

+23-28
Original file line numberDiff line numberDiff line change
@@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
143143
MAX_NUMNODES);
144144
}
145145

146-
/*
147-
* Free memblock.reserved.regions
148-
*/
149-
int __init_memblock memblock_free_reserved_regions(void)
150-
{
151-
if (memblock.reserved.regions == memblock_reserved_init_regions)
152-
return 0;
153-
154-
return memblock_free(__pa(memblock.reserved.regions),
155-
sizeof(struct memblock_region) * memblock.reserved.max);
156-
}
157-
158-
/*
159-
* Reserve memblock.reserved.regions
160-
*/
161-
int __init_memblock memblock_reserve_reserved_regions(void)
162-
{
163-
if (memblock.reserved.regions == memblock_reserved_init_regions)
164-
return 0;
165-
166-
return memblock_reserve(__pa(memblock.reserved.regions),
167-
sizeof(struct memblock_region) * memblock.reserved.max);
168-
}
169-
170146
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
171147
{
172148
type->total_size -= type->regions[r].size;
@@ -184,6 +160,18 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
184160
}
185161
}
186162

163+
phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
164+
phys_addr_t *addr)
165+
{
166+
if (memblock.reserved.regions == memblock_reserved_init_regions)
167+
return 0;
168+
169+
*addr = __pa(memblock.reserved.regions);
170+
171+
return PAGE_ALIGN(sizeof(struct memblock_region) *
172+
memblock.reserved.max);
173+
}
174+
187175
/**
188176
* memblock_double_array - double the size of the memblock regions array
189177
* @type: memblock type of the regions array being doubled
@@ -204,6 +192,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
204192
phys_addr_t new_area_size)
205193
{
206194
struct memblock_region *new_array, *old_array;
195+
phys_addr_t old_alloc_size, new_alloc_size;
207196
phys_addr_t old_size, new_size, addr;
208197
int use_slab = slab_is_available();
209198
int *in_slab;
@@ -217,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
217206
/* Calculate new doubled size */
218207
old_size = type->max * sizeof(struct memblock_region);
219208
new_size = old_size << 1;
209+
/*
210+
* We need to allocated new one align to PAGE_SIZE,
211+
* so we can free them completely later.
212+
*/
213+
old_alloc_size = PAGE_ALIGN(old_size);
214+
new_alloc_size = PAGE_ALIGN(new_size);
220215

221216
/* Retrieve the slab flag */
222217
if (type == &memblock.memory)
@@ -245,11 +240,11 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
245240

246241
addr = memblock_find_in_range(new_area_start + new_area_size,
247242
memblock.current_limit,
248-
new_size, sizeof(phys_addr_t));
243+
new_alloc_size, PAGE_SIZE);
249244
if (!addr && new_area_size)
250245
addr = memblock_find_in_range(0,
251246
min(new_area_start, memblock.current_limit),
252-
new_size, sizeof(phys_addr_t));
247+
new_alloc_size, PAGE_SIZE);
253248

254249
new_array = addr ? __va(addr) : 0;
255250
}
@@ -279,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
279274
kfree(old_array);
280275
else if (old_array != memblock_memory_init_regions &&
281276
old_array != memblock_reserved_init_regions)
282-
memblock_free(__pa(old_array), old_size);
277+
memblock_free(__pa(old_array), old_alloc_size);
283278

284279
/* Reserve the new array if that comes from the memblock.
285280
* Otherwise, we needn't do it
286281
*/
287282
if (!use_slab)
288-
BUG_ON(memblock_reserve(addr, new_size));
283+
BUG_ON(memblock_reserve(addr, new_alloc_size));
289284

290285
/* Update slab flag */
291286
*in_slab = use_slab;

mm/nobootmem.c

+23-15
Original file line numberDiff line numberDiff line change
@@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
105105
__free_pages_bootmem(pfn_to_page(i), 0);
106106
}
107107

108+
static unsigned long __init __free_memory_core(phys_addr_t start,
109+
phys_addr_t end)
110+
{
111+
unsigned long start_pfn = PFN_UP(start);
112+
unsigned long end_pfn = min_t(unsigned long,
113+
PFN_DOWN(end), max_low_pfn);
114+
115+
if (start_pfn > end_pfn)
116+
return 0;
117+
118+
__free_pages_memory(start_pfn, end_pfn);
119+
120+
return end_pfn - start_pfn;
121+
}
122+
108123
unsigned long __init free_low_memory_core_early(int nodeid)
109124
{
110125
unsigned long count = 0;
111-
phys_addr_t start, end;
126+
phys_addr_t start, end, size;
112127
u64 i;
113128

114-
/* free reserved array temporarily so that it's treated as free area */
115-
memblock_free_reserved_regions();
116-
117-
for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
118-
unsigned long start_pfn = PFN_UP(start);
119-
unsigned long end_pfn = min_t(unsigned long,
120-
PFN_DOWN(end), max_low_pfn);
121-
if (start_pfn < end_pfn) {
122-
__free_pages_memory(start_pfn, end_pfn);
123-
count += end_pfn - start_pfn;
124-
}
125-
}
129+
for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
130+
count += __free_memory_core(start, end);
131+
132+
/* free range that is used for reserved array if we allocate it */
133+
size = get_allocated_memblock_reserved_regions_info(&start);
134+
if (size)
135+
count += __free_memory_core(start, start + size);
126136

127-
/* put region array back? */
128-
memblock_reserve_reserved_regions();
129137
return count;
130138
}
131139

0 commit comments

Comments
 (0)