@@ -90,6 +90,12 @@ static LIST_HEAD(mlx5_ib_dev_list);
90
90
*/
91
91
static DEFINE_MUTEX (mlx5_ib_multiport_mutex );
92
92
93
+ /* We can't use an array for xlt_emergency_page because dma_map_single
94
+ * doesn't work on kernel modules memory
95
+ */
96
+ static unsigned long xlt_emergency_page ;
97
+ static struct mutex xlt_emergency_page_mutex ;
98
+
93
99
struct mlx5_ib_dev * mlx5_ib_get_ibdev_from_mpi (struct mlx5_ib_multiport_info * mpi )
94
100
{
95
101
struct mlx5_ib_dev * dev ;
@@ -1671,17 +1677,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1671
1677
context -> ibucontext .invalidate_range = & mlx5_ib_invalidate_range ;
1672
1678
#endif
1673
1679
1674
- context -> upd_xlt_page = __get_free_page (GFP_KERNEL );
1675
- if (!context -> upd_xlt_page ) {
1676
- err = - ENOMEM ;
1677
- goto out_uars ;
1678
- }
1679
- mutex_init (& context -> upd_xlt_page_mutex );
1680
-
1681
1680
if (MLX5_CAP_GEN (dev -> mdev , log_max_transport_domain )) {
1682
1681
err = mlx5_ib_alloc_transport_domain (dev , & context -> tdn );
1683
1682
if (err )
1684
- goto out_page ;
1683
+ goto out_uars ;
1685
1684
}
1686
1685
1687
1686
INIT_LIST_HEAD (& context -> vma_private_list );
@@ -1758,9 +1757,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1758
1757
if (MLX5_CAP_GEN (dev -> mdev , log_max_transport_domain ))
1759
1758
mlx5_ib_dealloc_transport_domain (dev , context -> tdn );
1760
1759
1761
- out_page :
1762
- free_page (context -> upd_xlt_page );
1763
-
1764
1760
out_uars :
1765
1761
deallocate_uars (dev , context );
1766
1762
@@ -1786,7 +1782,6 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1786
1782
if (MLX5_CAP_GEN (dev -> mdev , log_max_transport_domain ))
1787
1783
mlx5_ib_dealloc_transport_domain (dev , context -> tdn );
1788
1784
1789
- free_page (context -> upd_xlt_page );
1790
1785
deallocate_uars (dev , context );
1791
1786
kfree (bfregi -> sys_pages );
1792
1787
kfree (bfregi -> count );
@@ -5065,13 +5060,32 @@ static struct mlx5_interface mlx5_ib_interface = {
5065
5060
.protocol = MLX5_INTERFACE_PROTOCOL_IB ,
5066
5061
};
5067
5062
5063
+ unsigned long mlx5_ib_get_xlt_emergency_page (void )
5064
+ {
5065
+ mutex_lock (& xlt_emergency_page_mutex );
5066
+ return xlt_emergency_page ;
5067
+ }
5068
+
5069
+ void mlx5_ib_put_xlt_emergency_page (void )
5070
+ {
5071
+ mutex_unlock (& xlt_emergency_page_mutex );
5072
+ }
5073
+
5068
5074
static int __init mlx5_ib_init (void )
5069
5075
{
5070
5076
int err ;
5071
5077
5078
+ xlt_emergency_page = __get_free_page (GFP_KERNEL );
5079
+ if (!xlt_emergency_page )
5080
+ return - ENOMEM ;
5081
+
5082
+ mutex_init (& xlt_emergency_page_mutex );
5083
+
5072
5084
mlx5_ib_event_wq = alloc_ordered_workqueue ("mlx5_ib_event_wq" , 0 );
5073
- if (!mlx5_ib_event_wq )
5085
+ if (!mlx5_ib_event_wq ) {
5086
+ free_page (xlt_emergency_page );
5074
5087
return - ENOMEM ;
5088
+ }
5075
5089
5076
5090
mlx5_ib_odp_init ();
5077
5091
@@ -5084,6 +5098,8 @@ static void __exit mlx5_ib_cleanup(void)
5084
5098
{
5085
5099
mlx5_unregister_interface (& mlx5_ib_interface );
5086
5100
destroy_workqueue (mlx5_ib_event_wq );
5101
+ mutex_destroy (& xlt_emergency_page_mutex );
5102
+ free_page (xlt_emergency_page );
5087
5103
}
5088
5104
5089
5105
module_init (mlx5_ib_init );
0 commit comments