@@ -109,8 +109,6 @@ MALLOC_DEFINE(M_LCINT, "linuxint", "Linux compat internal");
109
109
#undef file
110
110
#undef cdev
111
111
112
- static void * linux_cdev_handle_find (void * handle );
113
-
114
112
struct cpuinfo_x86 boot_cpu_data ;
115
113
116
114
struct kobject linux_class_root ;
@@ -655,31 +653,47 @@ linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_pag
655
653
}
656
654
#endif
657
655
656
+ struct lcdev_handle {
657
+ atomic_t ref_count ;
658
+ void * vma_private_data ;
659
+ vm_offset_t vma_pgoff ;
660
+ struct linux_file * vma_file ;
661
+ const struct vm_operations_struct * vma_ops ;
662
+
663
+ TAILQ_ENTRY (lcdev_handle ) link ;
664
+ };
665
+
658
666
static int
659
667
linux_cdev_pager_populate (vm_object_t vm_obj , vm_pindex_t pidx , int fault_type ,
660
668
vm_prot_t max_prot , vm_pindex_t * first , vm_pindex_t * last )
661
669
{
662
670
struct vm_fault vmf ;
663
- struct vm_area_struct cvma , * vmap ;
671
+ struct vm_area_struct cvma ;
672
+ struct lcdev_handle * hndl ;
664
673
int rc , err ;
665
674
666
675
linux_set_current (curthread );
667
-
668
- vmap = linux_cdev_handle_find ( vm_obj -> handle ) ;
676
+
677
+ hndl = vm_obj -> handle ;
669
678
vmf .virtual_address = (void * )(pidx << PAGE_SHIFT );
670
679
vmf .flags = (fault_type & VM_PROT_WRITE ) ? FAULT_FLAG_WRITE : 0 ;
671
- memcpy (& cvma , vmap , sizeof (* vmap ));
672
- MPASS (cvma .vm_private_data == vm_obj -> handle );
673
-
680
+
681
+ bzero (& cvma , sizeof (struct vm_area_struct ));
682
+ cvma .vm_private_data = hndl -> vma_private_data ;
683
+ cvma .vm_start = 0 ;
684
+ cvma .vm_end = ( vm_obj -> size - hndl -> vma_pgoff ) * PAGE_SIZE ;
685
+ cvma .vm_pgoff = hndl -> vma_pgoff ;
686
+ cvma .vm_pfn = 0 ;
687
+ cvma .vm_file = hndl -> vma_file ;
674
688
cvma .vm_pfn_count = 0 ;
675
689
cvma .vm_pfn_pcount = & cvma .vm_pfn_count ;
676
690
cvma .vm_obj = vm_obj ;
677
-
691
+
678
692
VM_OBJECT_WUNLOCK (vm_obj );
679
- err = vmap -> vm_ops -> fault (& cvma , & vmf );
693
+ err = hndl -> vma_ops -> fault (& cvma , & vmf );
680
694
while (cvma .vm_pfn_count == 0 && err == VM_FAULT_NOPAGE ) {
681
695
kern_yield (0 );
682
- err = vmap -> vm_ops -> fault (& cvma , & vmf );
696
+ err = hndl -> vma_ops -> fault (& cvma , & vmf );
683
697
}
684
698
atomic_add_int (& cdev_pfn_found_count , cvma .vm_pfn_count );
685
699
VM_OBJECT_WLOCK (vm_obj );
@@ -713,105 +727,112 @@ linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
713
727
return (rc );
714
728
}
715
729
716
- struct list_head lcdev_handle_list ;
717
730
718
- struct lcdev_handle_ref {
719
- void * handle ;
720
- void * data ;
721
- struct list_head list ;
722
- };
731
+ TAILQ_HEAD (, lcdev_handle ) lcdev_handle_list = TAILQ_HEAD_INITIALIZER (lcdev_handle_list );
723
732
724
- static void
725
- linux_cdev_handle_insert (void * handle , void * data , int size )
726
- {
727
- struct list_head * h ;
728
- struct lcdev_handle_ref * r ;
729
- void * datap ;
730
733
731
- rw_rlock (& linux_global_rw );
732
- list_for_each (h , & lcdev_handle_list ) {
733
- r = __containerof (h , struct lcdev_handle_ref , list );
734
- if (r -> handle == handle ) {
735
- rw_runlock (& linux_global_rw );
736
- return ;
734
+
735
+ static struct lcdev_handle *
736
+ linux_cdev_handle_insert (struct vm_area_struct * vma )
737
+ {
738
+ struct lcdev_handle * hndl , * list_hndl ;
739
+
740
+ hndl = malloc (sizeof (struct lcdev_handle ), M_KMALLOC , M_WAITOK );
741
+ hndl -> vma_private_data = vma -> vm_private_data ;
742
+ atomic_set ( & hndl -> ref_count , 1 );
743
+ hndl -> vma_pgoff = vma -> vm_pgoff ;
744
+ hndl -> vma_file = vma -> vm_file ;
745
+ hndl -> vma_ops = vma -> vm_ops ;
746
+
747
+ rw_wlock (& linux_global_rw );
748
+ TAILQ_FOREACH (list_hndl , & lcdev_handle_list , link ) {
749
+ if ( list_hndl -> vma_private_data == hndl -> vma_private_data && atomic_inc_not_zero ( & list_hndl -> ref_count ) ) {
750
+
751
+ rw_wunlock (& linux_global_rw );
752
+ MPASS ( hndl -> vma_pgoff == list_hndl -> vma_pgoff );
753
+ if ( hndl -> vma_file != list_hndl -> vma_file )
754
+ {
755
+ /*
756
+ Same object accessible through different files.
757
+ Let's hope the Linux code don't really care about the file.
758
+ If it diess, we will need to track the different mappings,
759
+ and probably use the largest one
760
+ */
761
+
762
+ list_hndl -> vma_file = NULL ;
763
+ }
764
+
765
+ MPASS ( hndl -> vma_ops == list_hndl -> vma_ops );
766
+ free (hndl , M_KMALLOC );
767
+ return list_hndl ;
737
768
}
738
769
}
739
- rw_runlock (& linux_global_rw );
740
- r = malloc (sizeof (struct lcdev_handle_ref ), M_KMALLOC , M_WAITOK );
741
- r -> handle = handle ;
742
- datap = malloc (size , M_KMALLOC , M_WAITOK );
743
- memcpy (datap , data , size );
744
- r -> data = datap ;
745
- INIT_LIST_HEAD (& r -> list ); /* XXX why _HEAD? */
746
- rw_wlock (& linux_global_rw );
747
- /* XXX need to re-lookup */
748
- list_add_tail (& r -> list , & lcdev_handle_list );
770
+
771
+ TAILQ_INSERT_TAIL ( & lcdev_handle_list , hndl , link );
749
772
rw_wunlock (& linux_global_rw );
773
+
774
+ return hndl ;
750
775
}
751
776
752
- static void
753
- linux_cdev_handle_remove (void * handle )
777
+ static void linux_cdesv_handle_ref (struct lcdev_handle * hndl )
754
778
{
755
- struct list_head * h ;
756
- struct lcdev_handle_ref * r ;
779
+ MPASS ( atomic_read (& hndl -> ref_count ) );
780
+ atomic_inc ( & hndl -> ref_count );
781
+ }
757
782
758
- rw_wlock (& linux_global_rw );
759
- list_for_each (h , & lcdev_handle_list ) {
760
- r = __containerof (h , struct lcdev_handle_ref , list );
761
- if (r -> handle == handle )
762
- break ;
783
+ static void
784
+ linux_cdev_handle_remove (struct lcdev_handle * hndl )
785
+ {
786
+ if ( atomic_dec_and_test ( & hndl -> ref_count ) ) {
787
+ rw_wlock (& linux_global_rw );
788
+ TAILQ_REMOVE ( & lcdev_handle_list , hndl , link );
789
+ rw_wunlock (& linux_global_rw );
790
+
791
+ free (hndl , M_KMALLOC );
763
792
}
764
- MPASS (r && r -> handle == handle );
765
- list_del (& r -> list );
766
- rw_wunlock (& linux_global_rw );
767
- free (r -> data , M_KMALLOC );
768
- free (r , M_KMALLOC );
769
793
}
770
794
771
- static void *
772
- linux_cdev_handle_find (void * handle )
773
- {
774
- struct list_head * h ;
775
- struct lcdev_handle_ref * r ;
776
- void * data ;
777
795
796
+ void *
797
+ linux_cdev_handle_find_by_pd (void * pd )
798
+ {
799
+ struct lcdev_handle * hndl ;
800
+
778
801
rw_rlock (& linux_global_rw );
779
- list_for_each (h , & lcdev_handle_list ) {
780
- r = __containerof (h , struct lcdev_handle_ref , list );
781
- if (r -> handle == handle )
782
- break ;
802
+ TAILQ_FOREACH (hndl , & lcdev_handle_list , link ) {
803
+ if ( hndl -> vma_private_data == pd ) {
804
+ rw_runlock (& linux_global_rw );
805
+ return hndl ;
806
+ }
783
807
}
784
- MPASS (r && r -> handle == handle );
785
- data = r -> data ;
786
808
rw_runlock (& linux_global_rw );
787
- return (data );
809
+
810
+
811
+ return NULL ;
788
812
}
789
813
814
+
790
815
static int
791
816
linux_cdev_pager_ctor (void * handle , vm_ooffset_t size , vm_prot_t prot ,
792
817
vm_ooffset_t foff , struct ucred * cred , u_short * color )
793
818
{
794
- struct vm_area_struct * vmap ;
795
-
796
- vmap = linux_cdev_handle_find (handle );
797
- MPASS (vmap != NULL );
798
- vmap -> vm_private_data = handle ;
799
-
819
+ linux_cdesv_handle_ref (handle );
800
820
* color = 0 ;
801
821
return (0 );
802
822
}
803
823
804
824
static void
805
825
linux_cdev_pager_dtor (void * handle )
806
826
{
807
- struct vm_area_struct * vmap ;
808
-
809
- vmap = linux_cdev_handle_find (handle );
810
- MPASS (vmap != NULL );
827
+ struct lcdev_handle * hndl = handle ;
828
+ struct vm_area_struct cvma ;
829
+
830
+ cvma .vm_private_data = hndl -> vma_private_data ;
831
+
832
+ const struct vm_operations_struct * vma_ops = hndl -> vma_ops ;
833
+ linux_cdev_handle_remove (hndl );
811
834
812
- vmap -> vm_ops -> close (vmap );
813
- vmap -> vm_private_data = handle ;
814
- linux_cdev_handle_remove (handle );
835
+ vma_ops -> close (& cvma );
815
836
}
816
837
817
838
static struct cdev_pager_ops linux_cdev_pager_ops = {
@@ -1201,6 +1222,7 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1201
1222
struct file * file ;
1202
1223
struct vm_area_struct vma ;
1203
1224
vm_memattr_t attr ;
1225
+ struct lcdev_handle * cdev_hndl ;
1204
1226
int error ;
1205
1227
1206
1228
td = curthread ;
@@ -1230,13 +1252,12 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
1230
1252
MPASS (vma .vm_ops -> open != NULL );
1231
1253
MPASS (vma .vm_ops -> close != NULL );
1232
1254
1233
- linux_cdev_handle_insert (vma . vm_private_data , & vma , sizeof ( struct vm_area_struct ) );
1234
- * object = cdev_pager_allocate (vma . vm_private_data , OBJT_MGTDEVICE ,
1255
+ cdev_hndl = linux_cdev_handle_insert (& vma );
1256
+ * object = cdev_pager_allocate (cdev_hndl , OBJT_MGTDEVICE ,
1235
1257
& linux_cdev_pager_ops , size , nprot ,
1236
1258
* offset , curthread -> td_ucred );
1237
-
1238
- if (* object == NULL )
1239
- linux_cdev_handle_remove (vma .vm_private_data );
1259
+
1260
+ linux_cdev_handle_remove (cdev_hndl );
1240
1261
} else {
1241
1262
sg = sglist_alloc (1 , M_WAITOK );
1242
1263
sglist_append_phys (sg ,
@@ -2019,7 +2040,6 @@ linux_compat_init(void *arg)
2019
2040
boot_cpu_data .x86_clflush_size = cpu_clflush_line_size ;
2020
2041
boot_cpu_data .x86 = ((cpu_id & 0xF0000 ) >> 12 ) | ((cpu_id & 0xF0 ) >> 4 );
2021
2042
2022
- INIT_LIST_HEAD (& lcdev_handle_list );
2023
2043
rootoid = SYSCTL_ADD_ROOT_NODE (NULL ,
2024
2044
OID_AUTO , "sys" , CTLFLAG_RD |CTLFLAG_MPSAFE , NULL , "sys" );
2025
2045
kobject_init (& linux_class_root , & linux_class_ktype );
0 commit comments