@@ -109,8 +109,6 @@ MALLOC_DEFINE(M_LCINT, "linuxint", "Linux compat internal");
109
109
#undef file
110
110
#undef cdev
111
111
112
- static void * linux_cdev_handle_find (void * handle );
113
-
114
112
struct cpuinfo_x86 boot_cpu_data ;
115
113
116
114
struct kobject linux_class_root ;
@@ -677,31 +675,47 @@ linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_pag
677
675
}
678
676
#endif
679
677
678
+ struct lcdev_handle {
679
+ unsigned ref_count ;
680
+ void * vma_private_data ;
681
+ vm_offset_t vma_pgoff ;
682
+ struct linux_file * vma_file ;
683
+ const struct vm_operations_struct * vma_ops ;
684
+
685
+ TAILQ_ENTRY (lcdev_handle ) link ;
686
+ };
687
+
680
688
static int
681
689
linux_cdev_pager_populate (vm_object_t vm_obj , vm_pindex_t pidx , int fault_type ,
682
690
vm_prot_t max_prot , vm_pindex_t * first , vm_pindex_t * last )
683
691
{
684
692
struct vm_fault vmf ;
685
- struct vm_area_struct cvma , * vmap ;
693
+ struct vm_area_struct cvma ;
694
+ struct lcdev_handle * hndl ;
686
695
int rc , err ;
687
696
688
697
linux_set_current ();
689
-
690
- vmap = linux_cdev_handle_find ( vm_obj -> handle ) ;
698
+
699
+ hndl = vm_obj -> handle ;
691
700
vmf .virtual_address = (void * )(pidx << PAGE_SHIFT );
692
701
vmf .flags = (fault_type & VM_PROT_WRITE ) ? FAULT_FLAG_WRITE : 0 ;
693
- memcpy (& cvma , vmap , sizeof (* vmap ));
694
- MPASS (cvma .vm_private_data == vm_obj -> handle );
695
-
702
+
703
+ bzero (& cvma , sizeof (struct vm_area_struct ));
704
+ cvma .vm_private_data = hndl -> vma_private_data ;
705
+ cvma .vm_start = 0 ;
706
+ cvma .vm_end = ( vm_obj -> size - hndl -> vma_pgoff ) * PAGE_SIZE ;
707
+ cvma .vm_pgoff = hndl -> vma_pgoff ;
708
+ cvma .vm_pfn = 0 ;
709
+ cvma .vm_file = hndl -> vma_file ;
696
710
cvma .vm_pfn_count = 0 ;
697
711
cvma .vm_pfn_pcount = & cvma .vm_pfn_count ;
698
712
cvma .vm_obj = vm_obj ;
699
-
713
+
700
714
VM_OBJECT_WUNLOCK (vm_obj );
701
- err = vmap -> vm_ops -> fault (& cvma , & vmf );
715
+ err = hndl -> vma_ops -> fault (& cvma , & vmf );
702
716
while (cvma .vm_pfn_count == 0 && err == VM_FAULT_NOPAGE ) {
703
717
kern_yield (0 );
704
- err = vmap -> vm_ops -> fault (& cvma , & vmf );
718
+ err = hndl -> vma_ops -> fault (& cvma , & vmf );
705
719
}
706
720
atomic_add_int (& cdev_pfn_found_count , cvma .vm_pfn_count );
707
721
VM_OBJECT_WLOCK (vm_obj );
@@ -735,105 +749,120 @@ linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
735
749
return (rc );
736
750
}
737
751
738
- struct list_head lcdev_handle_list ;
739
752
740
- struct lcdev_handle_ref {
741
- void * handle ;
742
- void * data ;
743
- struct list_head list ;
744
- };
753
+ TAILQ_HEAD (, lcdev_handle ) lcdev_handle_list = TAILQ_HEAD_INITIALIZER (lcdev_handle_list );
745
754
746
- static void
747
- linux_cdev_handle_insert (void * handle , void * data , int size )
748
- {
749
- struct list_head * h ;
750
- struct lcdev_handle_ref * r ;
751
- void * datap ;
752
755
753
- rw_rlock (& linux_global_rw );
754
- list_for_each (h , & lcdev_handle_list ) {
755
- r = __containerof (h , struct lcdev_handle_ref , list );
756
- if (r -> handle == handle ) {
757
- rw_runlock (& linux_global_rw );
758
- return ;
756
+
757
+ static struct lcdev_handle *
758
+ linux_cdev_handle_insert (struct vm_area_struct * vma )
759
+ {
760
+ struct lcdev_handle * hndl , * list_hndl ;
761
+
762
+ hndl = malloc (sizeof (struct lcdev_handle ), M_KMALLOC , M_WAITOK );
763
+ hndl -> vma_private_data = vma -> vm_private_data ;
764
+ hndl -> ref_count = 1 ;
765
+ hndl -> vma_pgoff = vma -> vm_pgoff ;
766
+ hndl -> vma_file = vma -> vm_file ;
767
+ hndl -> vma_ops = vma -> vm_ops ;
768
+
769
+ rw_wlock (& linux_global_rw );
770
+ TAILQ_FOREACH (list_hndl , & lcdev_handle_list , link ) {
771
+ if ( list_hndl -> vma_private_data == hndl -> vma_private_data ) {
772
+ list_hndl -> ref_count ++ ;
773
+
774
+ rw_wunlock (& linux_global_rw );
775
+ MPASS ( hndl -> vma_pgoff == list_hndl -> vma_pgoff );
776
+ if ( 0 )
777
+ MPASS ( hndl -> vma_file == list_hndl -> vma_file );
778
+ else if ( hndl -> vma_file != list_hndl -> vma_file )
779
+ {
780
+ /*
781
+ Same object accessible through different files.
782
+ Let's hope the Linux code don't really care about the file.
783
+ If it diess, we will need to track the different mappings,
784
+ and probably use the largest one
785
+ */
786
+
787
+ list_hndl -> vma_file = NULL ;
788
+ }
789
+
790
+ MPASS ( hndl -> vma_ops == list_hndl -> vma_ops );
791
+ free (hndl , M_KMALLOC );
792
+ return list_hndl ;
759
793
}
760
794
}
761
- rw_runlock (& linux_global_rw );
762
- r = malloc (sizeof (struct lcdev_handle_ref ), M_KMALLOC , M_WAITOK );
763
- r -> handle = handle ;
764
- datap = malloc (size , M_KMALLOC , M_WAITOK );
765
- memcpy (datap , data , size );
766
- r -> data = datap ;
767
- INIT_LIST_HEAD (& r -> list ); /* XXX why _HEAD? */
795
+
796
+ TAILQ_INSERT_TAIL ( & lcdev_handle_list , hndl , link );
797
+ rw_wunlock (& linux_global_rw );
798
+
799
+ return hndl ;
800
+ }
801
+
802
+ static void linux_cdesv_handle_ref (struct lcdev_handle * hndl )
803
+ {
768
804
rw_wlock (& linux_global_rw );
769
- /* XXX need to re-lookup */
770
- list_add_tail ( & r -> list , & lcdev_handle_list ) ;
805
+ MPASS ( hndl -> ref_count );
806
+ hndl -> ref_count ++ ;
771
807
rw_wunlock (& linux_global_rw );
772
808
}
773
809
774
810
static void
775
- linux_cdev_handle_remove (void * handle )
811
+ linux_cdev_handle_remove (struct lcdev_handle * hndl )
776
812
{
777
- struct list_head * h ;
778
- struct lcdev_handle_ref * r ;
779
-
780
- rw_wlock (& linux_global_rw );
781
- list_for_each (h , & lcdev_handle_list ) {
782
- r = __containerof (h , struct lcdev_handle_ref , list );
783
- if (r -> handle == handle )
784
- break ;
785
- }
786
- MPASS (r && r -> handle == handle );
787
- list_del (& r -> list );
813
+ rw_wlock (& linux_global_rw );
814
+ hndl -> ref_count -- ;
815
+ if ( hndl -> ref_count == 0 ) {
816
+ TAILQ_REMOVE ( & lcdev_handle_list , hndl , link );
817
+ } else
818
+ hndl = NULL ;
788
819
rw_wunlock (& linux_global_rw );
789
- free (r -> data , M_KMALLOC );
790
- free (r , M_KMALLOC );
820
+
821
+ if ( hndl )
822
+ free (hndl , M_KMALLOC );
791
823
}
792
824
793
- static void *
794
- linux_cdev_handle_find (void * handle )
795
- {
796
- struct list_head * h ;
797
- struct lcdev_handle_ref * r ;
798
- void * data ;
799
825
826
+ void *
827
+ linux_cdev_handle_find_by_pd (void * pd )
828
+ {
829
+ struct lcdev_handle * hndl ;
830
+
800
831
rw_rlock (& linux_global_rw );
801
- list_for_each (h , & lcdev_handle_list ) {
802
- r = __containerof (h , struct lcdev_handle_ref , list );
803
- if (r -> handle == handle )
804
- break ;
832
+ TAILQ_FOREACH (hndl , & lcdev_handle_list , link ) {
833
+ if ( hndl -> vma_private_data == pd ) {
834
+ rw_runlock (& linux_global_rw );
835
+ return hndl ;
836
+ }
805
837
}
806
- MPASS (r && r -> handle == handle );
807
- data = r -> data ;
808
838
rw_runlock (& linux_global_rw );
809
- return (data );
839
+
840
+
841
+ return NULL ;
810
842
}
811
843
844
+
812
845
static int
813
846
linux_cdev_pager_ctor (void * handle , vm_ooffset_t size , vm_prot_t prot ,
814
847
vm_ooffset_t foff , struct ucred * cred , u_short * color )
815
848
{
816
- struct vm_area_struct * vmap ;
817
-
818
- vmap = linux_cdev_handle_find (handle );
819
- MPASS (vmap != NULL );
820
- vmap -> vm_private_data = handle ;
821
-
849
+ linux_cdesv_handle_ref (handle );
822
850
* color = 0 ;
823
851
return (0 );
824
852
}
825
853
826
854
static void
827
855
linux_cdev_pager_dtor (void * handle )
828
856
{
829
- struct vm_area_struct * vmap ;
830
-
831
- vmap = linux_cdev_handle_find (handle );
832
- MPASS (vmap != NULL );
857
+ struct lcdev_handle * hndl = handle ;
858
+ struct vm_area_struct cvma ;
859
+
860
+ cvma .vm_private_data = hndl -> vma_private_data ;
861
+
862
+ const struct vm_operations_struct * vma_ops = hndl -> vma_ops ;
863
+ linux_cdev_handle_remove (hndl );
833
864
834
- vmap -> vm_ops -> close (vmap );
835
- vmap -> vm_private_data = handle ;
836
- linux_cdev_handle_remove (handle );
865
+ vma_ops -> close (& cvma );
837
866
}
838
867
839
868
static struct cdev_pager_ops linux_cdev_pager_ops = {
@@ -1223,6 +1252,7 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1223
1252
struct file * file ;
1224
1253
struct vm_area_struct vma ;
1225
1254
vm_memattr_t attr ;
1255
+ struct lcdev_handle * cdev_hndl ;
1226
1256
int error ;
1227
1257
1228
1258
td = curthread ;
@@ -1252,13 +1282,12 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1252
1282
MPASS (vma .vm_ops -> open != NULL );
1253
1283
MPASS (vma .vm_ops -> close != NULL );
1254
1284
1255
- linux_cdev_handle_insert (vma . vm_private_data , & vma , sizeof ( struct vm_area_struct ) );
1256
- * object = cdev_pager_allocate (vma . vm_private_data , OBJT_MGTDEVICE ,
1285
+ cdev_hndl = linux_cdev_handle_insert (& vma );
1286
+ * object = cdev_pager_allocate (cdev_hndl , OBJT_MGTDEVICE ,
1257
1287
& linux_cdev_pager_ops , size , nprot ,
1258
1288
* offset , curthread -> td_ucred );
1259
-
1260
- if (* object == NULL )
1261
- linux_cdev_handle_remove (vma .vm_private_data );
1289
+
1290
+ linux_cdev_handle_remove (cdev_hndl );
1262
1291
} else {
1263
1292
sg = sglist_alloc (1 , M_WAITOK );
1264
1293
sglist_append_phys (sg ,
@@ -2042,7 +2071,6 @@ linux_compat_init(void *arg)
2042
2071
boot_cpu_data .x86_clflush_size = cpu_clflush_line_size ;
2043
2072
boot_cpu_data .x86 = ((cpu_id & 0xF0000 ) >> 12 ) | ((cpu_id & 0xF0 ) >> 4 );
2044
2073
2045
- INIT_LIST_HEAD (& lcdev_handle_list );
2046
2074
rootoid = SYSCTL_ADD_ROOT_NODE (NULL ,
2047
2075
OID_AUTO , "sys" , CTLFLAG_RD |CTLFLAG_MPSAFE , NULL , "sys" );
2048
2076
kobject_init (& linux_class_root , & linux_class_ktype );
0 commit comments