@@ -731,10 +731,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
731
731
static DEFINE_SPINLOCK (free_vmap_area_lock );
732
732
static bool vmap_initialized __read_mostly ;
733
733
734
- static struct rb_root purge_vmap_area_root = RB_ROOT ;
735
- static LIST_HEAD (purge_vmap_area_list );
736
- static DEFINE_SPINLOCK (purge_vmap_area_lock );
737
-
738
734
/*
739
735
* This kmem_cache is used for vmap_area objects. Instead of
740
736
* allocating from slab we reuse an object from this cache to
@@ -782,6 +778,12 @@ struct rb_list {
782
778
static struct vmap_node {
783
779
/* Bookkeeping data of this node. */
784
780
struct rb_list busy ;
781
+ struct rb_list lazy ;
782
+
783
+ /*
784
+ * Ready-to-free areas.
785
+ */
786
+ struct list_head purge_list ;
785
787
} single ;
786
788
787
789
static struct vmap_node * vmap_nodes = & single ;
@@ -1766,40 +1768,22 @@ static DEFINE_MUTEX(vmap_purge_lock);
1766
1768
1767
1769
/* for per-CPU blocks */
1768
1770
static void purge_fragmented_blocks_allcpus (void );
1771
+ static cpumask_t purge_nodes ;
1769
1772
1770
1773
/*
1771
1774
* Purges all lazily-freed vmap areas.
1772
1775
*/
1773
- static bool __purge_vmap_area_lazy (unsigned long start , unsigned long end )
1776
+ static unsigned long
1777
+ purge_vmap_node (struct vmap_node * vn )
1774
1778
{
1775
- unsigned long resched_threshold ;
1776
- unsigned int num_purged_areas = 0 ;
1777
- struct list_head local_purge_list ;
1779
+ unsigned long num_purged_areas = 0 ;
1778
1780
struct vmap_area * va , * n_va ;
1779
1781
1780
- lockdep_assert_held (& vmap_purge_lock );
1781
-
1782
- spin_lock (& purge_vmap_area_lock );
1783
- purge_vmap_area_root = RB_ROOT ;
1784
- list_replace_init (& purge_vmap_area_list , & local_purge_list );
1785
- spin_unlock (& purge_vmap_area_lock );
1786
-
1787
- if (unlikely (list_empty (& local_purge_list )))
1788
- goto out ;
1789
-
1790
- start = min (start ,
1791
- list_first_entry (& local_purge_list ,
1792
- struct vmap_area , list )-> va_start );
1793
-
1794
- end = max (end ,
1795
- list_last_entry (& local_purge_list ,
1796
- struct vmap_area , list )-> va_end );
1797
-
1798
- flush_tlb_kernel_range (start , end );
1799
- resched_threshold = lazy_max_pages () << 1 ;
1782
+ if (list_empty (& vn -> purge_list ))
1783
+ return 0 ;
1800
1784
1801
1785
spin_lock (& free_vmap_area_lock );
1802
- list_for_each_entry_safe (va , n_va , & local_purge_list , list ) {
1786
+ list_for_each_entry_safe (va , n_va , & vn -> purge_list , list ) {
1803
1787
unsigned long nr = (va -> va_end - va -> va_start ) >> PAGE_SHIFT ;
1804
1788
unsigned long orig_start = va -> va_start ;
1805
1789
unsigned long orig_end = va -> va_end ;
@@ -1821,13 +1805,55 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1821
1805
1822
1806
atomic_long_sub (nr , & vmap_lazy_nr );
1823
1807
num_purged_areas ++ ;
1824
-
1825
- if (atomic_long_read (& vmap_lazy_nr ) < resched_threshold )
1826
- cond_resched_lock (& free_vmap_area_lock );
1827
1808
}
1828
1809
spin_unlock (& free_vmap_area_lock );
1829
1810
1830
- out :
1811
+ return num_purged_areas ;
1812
+ }
1813
+
1814
+ /*
1815
+ * Purges all lazily-freed vmap areas.
1816
+ */
1817
+ static bool __purge_vmap_area_lazy (unsigned long start , unsigned long end )
1818
+ {
1819
+ unsigned long num_purged_areas = 0 ;
1820
+ struct vmap_node * vn ;
1821
+ int i ;
1822
+
1823
+ lockdep_assert_held (& vmap_purge_lock );
1824
+ purge_nodes = CPU_MASK_NONE ;
1825
+
1826
+ for (i = 0 ; i < nr_vmap_nodes ; i ++ ) {
1827
+ vn = & vmap_nodes [i ];
1828
+
1829
+ INIT_LIST_HEAD (& vn -> purge_list );
1830
+
1831
+ if (RB_EMPTY_ROOT (& vn -> lazy .root ))
1832
+ continue ;
1833
+
1834
+ spin_lock (& vn -> lazy .lock );
1835
+ WRITE_ONCE (vn -> lazy .root .rb_node , NULL );
1836
+ list_replace_init (& vn -> lazy .head , & vn -> purge_list );
1837
+ spin_unlock (& vn -> lazy .lock );
1838
+
1839
+ start = min (start , list_first_entry (& vn -> purge_list ,
1840
+ struct vmap_area , list )-> va_start );
1841
+
1842
+ end = max (end , list_last_entry (& vn -> purge_list ,
1843
+ struct vmap_area , list )-> va_end );
1844
+
1845
+ cpumask_set_cpu (i , & purge_nodes );
1846
+ }
1847
+
1848
+ if (cpumask_weight (& purge_nodes ) > 0 ) {
1849
+ flush_tlb_kernel_range (start , end );
1850
+
1851
+ for_each_cpu (i , & purge_nodes ) {
1852
+ vn = & nodes [i ];
1853
+ num_purged_areas += purge_vmap_node (vn );
1854
+ }
1855
+ }
1856
+
1831
1857
trace_purge_vmap_area_lazy (start , end , num_purged_areas );
1832
1858
return num_purged_areas > 0 ;
1833
1859
}
@@ -1846,16 +1872,9 @@ static void reclaim_and_purge_vmap_areas(void)
1846
1872
1847
1873
static void drain_vmap_area_work (struct work_struct * work )
1848
1874
{
1849
- unsigned long nr_lazy ;
1850
-
1851
- do {
1852
- mutex_lock (& vmap_purge_lock );
1853
- __purge_vmap_area_lazy (ULONG_MAX , 0 );
1854
- mutex_unlock (& vmap_purge_lock );
1855
-
1856
- /* Recheck if further work is required. */
1857
- nr_lazy = atomic_long_read (& vmap_lazy_nr );
1858
- } while (nr_lazy > lazy_max_pages ());
1875
+ mutex_lock (& vmap_purge_lock );
1876
+ __purge_vmap_area_lazy (ULONG_MAX , 0 );
1877
+ mutex_unlock (& vmap_purge_lock );
1859
1878
}
1860
1879
1861
1880
/*
@@ -1865,6 +1884,7 @@ static void drain_vmap_area_work(struct work_struct *work)
1865
1884
*/
1866
1885
static void free_vmap_area_noflush (struct vmap_area * va )
1867
1886
{
1887
+ struct vmap_node * vn = addr_to_node (va -> va_start );
1868
1888
unsigned long nr_lazy_max = lazy_max_pages ();
1869
1889
unsigned long va_start = va -> va_start ;
1870
1890
unsigned long nr_lazy ;
@@ -1878,10 +1898,9 @@ static void free_vmap_area_noflush(struct vmap_area *va)
1878
1898
/*
1879
1899
* Merge or place it to the purge tree/list.
1880
1900
*/
1881
- spin_lock (& purge_vmap_area_lock );
1882
- merge_or_add_vmap_area (va ,
1883
- & purge_vmap_area_root , & purge_vmap_area_list );
1884
- spin_unlock (& purge_vmap_area_lock );
1901
+ spin_lock (& vn -> lazy .lock );
1902
+ merge_or_add_vmap_area (va , & vn -> lazy .root , & vn -> lazy .head );
1903
+ spin_unlock (& vn -> lazy .lock );
1885
1904
1886
1905
trace_free_vmap_area_noflush (va_start , nr_lazy , nr_lazy_max );
1887
1906
@@ -4411,15 +4430,21 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
4411
4430
4412
4431
static void show_purge_info (struct seq_file * m )
4413
4432
{
4433
+ struct vmap_node * vn ;
4414
4434
struct vmap_area * va ;
4435
+ int i ;
4415
4436
4416
- spin_lock (& purge_vmap_area_lock );
4417
- list_for_each_entry (va , & purge_vmap_area_list , list ) {
4418
- seq_printf (m , "0x%pK-0x%pK %7ld unpurged vm_area\n" ,
4419
- (void * )va -> va_start , (void * )va -> va_end ,
4420
- va -> va_end - va -> va_start );
4437
+ for (i = 0 ; i < nr_vmap_nodes ; i ++ ) {
4438
+ vn = & vmap_nodes [i ];
4439
+
4440
+ spin_lock (& vn -> lazy .lock );
4441
+ list_for_each_entry (va , & vn -> lazy .head , list ) {
4442
+ seq_printf (m , "0x%pK-0x%pK %7ld unpurged vm_area\n" ,
4443
+ (void * )va -> va_start , (void * )va -> va_end ,
4444
+ va -> va_end - va -> va_start );
4445
+ }
4446
+ spin_unlock (& vn -> lazy .lock );
4421
4447
}
4422
- spin_unlock (& purge_vmap_area_lock );
4423
4448
}
4424
4449
4425
4450
static int s_show (struct seq_file * m , void * p )
@@ -4558,6 +4583,10 @@ static void vmap_init_nodes(void)
4558
4583
vn -> busy .root = RB_ROOT ;
4559
4584
INIT_LIST_HEAD (& vn -> busy .head );
4560
4585
spin_lock_init (& vn -> busy .lock );
4586
+
4587
+ vn -> lazy .root = RB_ROOT ;
4588
+ INIT_LIST_HEAD (& vn -> lazy .head );
4589
+ spin_lock_init (& vn -> lazy .lock );
4561
4590
}
4562
4591
}
4563
4592
0 commit comments