Skip to content

Commit 9577ef9

Browse files
rework linux_cdev_handler
Fixes free while used case with defered deallocation of vm_objects Also alow for multipple mmaps with different sizes
1 parent a846ccf commit 9577ef9

File tree

3 files changed

+111
-86
lines changed

3 files changed

+111
-86
lines changed

sys/compat/linuxkpi/common/include/linux/mm.h

+2
Original file line numberDiff line numberDiff line change
@@ -337,4 +337,6 @@ struct vm_operations_struct {
337337

338338
};
339339

340+
void *linux_cdev_handle_find_by_pd(void *pd);
341+
340342
#endif /* _LINUX_MM_H_ */

sys/compat/linuxkpi/common/src/linux_compat.c

+105-85
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,6 @@ MALLOC_DEFINE(M_LCINT, "linuxint", "Linux compat internal");
109109
#undef file
110110
#undef cdev
111111

112-
static void *linux_cdev_handle_find(void *handle);
113-
114112
struct cpuinfo_x86 boot_cpu_data;
115113

116114
struct kobject linux_class_root;
@@ -655,31 +653,47 @@ linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_pag
655653
}
656654
#endif
657655

656+
struct lcdev_handle {
657+
atomic_t ref_count;
658+
void *vma_private_data;
659+
vm_offset_t vma_pgoff;
660+
struct linux_file *vma_file;
661+
const struct vm_operations_struct *vma_ops;
662+
663+
TAILQ_ENTRY(lcdev_handle) link;
664+
};
665+
658666
static int
659667
linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
660668
vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
661669
{
662670
struct vm_fault vmf;
663-
struct vm_area_struct cvma, *vmap;
671+
struct vm_area_struct cvma;
672+
struct lcdev_handle *hndl;
664673
int rc, err;
665674

666675
linux_set_current(curthread);
667-
668-
vmap = linux_cdev_handle_find(vm_obj->handle);
676+
677+
hndl = vm_obj->handle;
669678
vmf.virtual_address = (void *)(pidx << PAGE_SHIFT);
670679
vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
671-
memcpy(&cvma, vmap, sizeof(*vmap));
672-
MPASS(cvma.vm_private_data == vm_obj->handle);
673-
680+
681+
bzero(&cvma, sizeof(struct vm_area_struct));
682+
cvma.vm_private_data = hndl->vma_private_data;
683+
cvma.vm_start = 0;
684+
cvma.vm_end = ( vm_obj->size - hndl->vma_pgoff ) * PAGE_SIZE;
685+
cvma.vm_pgoff = hndl->vma_pgoff;
686+
cvma.vm_pfn = 0;
687+
cvma.vm_file = hndl->vma_file;
674688
cvma.vm_pfn_count = 0;
675689
cvma.vm_pfn_pcount = &cvma.vm_pfn_count;
676690
cvma.vm_obj = vm_obj;
677-
691+
678692
VM_OBJECT_WUNLOCK(vm_obj);
679-
err = vmap->vm_ops->fault(&cvma, &vmf);
693+
err = hndl->vma_ops->fault(&cvma, &vmf);
680694
while (cvma.vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
681695
kern_yield(0);
682-
err = vmap->vm_ops->fault(&cvma, &vmf);
696+
err = hndl->vma_ops->fault(&cvma, &vmf);
683697
}
684698
atomic_add_int(&cdev_pfn_found_count, cvma.vm_pfn_count);
685699
VM_OBJECT_WLOCK(vm_obj);
@@ -713,105 +727,112 @@ linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
713727
return (rc);
714728
}
715729

716-
struct list_head lcdev_handle_list;
717730

718-
struct lcdev_handle_ref {
719-
void *handle;
720-
void *data;
721-
struct list_head list;
722-
};
731+
TAILQ_HEAD(, lcdev_handle) lcdev_handle_list = TAILQ_HEAD_INITIALIZER(lcdev_handle_list);
723732

724-
static void
725-
linux_cdev_handle_insert(void *handle, void *data, int size)
726-
{
727-
struct list_head *h;
728-
struct lcdev_handle_ref *r;
729-
void *datap;
730733

731-
rw_rlock(&linux_global_rw);
732-
list_for_each(h, &lcdev_handle_list) {
733-
r = __containerof(h, struct lcdev_handle_ref, list);
734-
if (r->handle == handle) {
735-
rw_runlock(&linux_global_rw);
736-
return;
734+
735+
static struct lcdev_handle *
736+
linux_cdev_handle_insert(struct vm_area_struct *vma)
737+
{
738+
struct lcdev_handle *hndl, *list_hndl;
739+
740+
hndl = malloc(sizeof(struct lcdev_handle), M_KMALLOC, M_WAITOK);
741+
hndl->vma_private_data = vma->vm_private_data;
742+
atomic_set( &hndl->ref_count, 1 );
743+
hndl->vma_pgoff = vma->vm_pgoff;
744+
hndl->vma_file = vma->vm_file;
745+
hndl->vma_ops = vma->vm_ops;
746+
747+
rw_wlock(&linux_global_rw);
748+
TAILQ_FOREACH(list_hndl, &lcdev_handle_list, link) {
749+
if( list_hndl->vma_private_data == hndl->vma_private_data && atomic_inc_not_zero( &list_hndl->ref_count ) ) {
750+
751+
rw_wunlock(&linux_global_rw);
752+
MPASS( hndl->vma_pgoff == list_hndl->vma_pgoff );
753+
if( hndl->vma_file != list_hndl->vma_file )
754+
{
755+
/*
756+
Same object accessible through different files.
757+
Let's hope the Linux code don't really care about the file.
758+
If it diess, we will need to track the different mappings,
759+
and probably use the largest one
760+
*/
761+
762+
list_hndl->vma_file = NULL;
763+
}
764+
765+
MPASS( hndl->vma_ops == list_hndl->vma_ops );
766+
free(hndl, M_KMALLOC);
767+
return list_hndl;
737768
}
738769
}
739-
rw_runlock(&linux_global_rw);
740-
r = malloc(sizeof(struct lcdev_handle_ref), M_KMALLOC, M_WAITOK);
741-
r->handle = handle;
742-
datap = malloc(size, M_KMALLOC, M_WAITOK);
743-
memcpy(datap, data, size);
744-
r->data = datap;
745-
INIT_LIST_HEAD(&r->list); /* XXX why _HEAD? */
746-
rw_wlock(&linux_global_rw);
747-
/* XXX need to re-lookup */
748-
list_add_tail(&r->list, &lcdev_handle_list);
770+
771+
TAILQ_INSERT_TAIL( &lcdev_handle_list, hndl, link );
749772
rw_wunlock(&linux_global_rw);
773+
774+
return hndl;
750775
}
751776

752-
static void
753-
linux_cdev_handle_remove(void *handle)
777+
static void linux_cdesv_handle_ref(struct lcdev_handle *hndl)
754778
{
755-
struct list_head *h;
756-
struct lcdev_handle_ref *r;
779+
MPASS( atomic_read(&hndl->ref_count) );
780+
atomic_inc( &hndl->ref_count );
781+
}
757782

758-
rw_wlock(&linux_global_rw);
759-
list_for_each(h, &lcdev_handle_list) {
760-
r = __containerof(h, struct lcdev_handle_ref, list);
761-
if (r->handle == handle)
762-
break;
783+
static void
784+
linux_cdev_handle_remove(struct lcdev_handle *hndl)
785+
{
786+
if( atomic_dec_and_test( &hndl->ref_count ) ) {
787+
rw_wlock(&linux_global_rw);
788+
TAILQ_REMOVE( &lcdev_handle_list, hndl, link );
789+
rw_wunlock(&linux_global_rw);
790+
791+
free(hndl, M_KMALLOC);
763792
}
764-
MPASS (r && r->handle == handle);
765-
list_del(&r->list);
766-
rw_wunlock(&linux_global_rw);
767-
free(r->data, M_KMALLOC);
768-
free(r, M_KMALLOC);
769793
}
770794

771-
static void *
772-
linux_cdev_handle_find(void *handle)
773-
{
774-
struct list_head *h;
775-
struct lcdev_handle_ref *r;
776-
void *data;
777795

796+
void *
797+
linux_cdev_handle_find_by_pd(void *pd)
798+
{
799+
struct lcdev_handle *hndl;
800+
778801
rw_rlock(&linux_global_rw);
779-
list_for_each(h, &lcdev_handle_list) {
780-
r = __containerof(h, struct lcdev_handle_ref, list);
781-
if (r->handle == handle)
782-
break;
802+
TAILQ_FOREACH(hndl, &lcdev_handle_list, link) {
803+
if( hndl->vma_private_data == pd ) {
804+
rw_runlock(&linux_global_rw);
805+
return hndl;
806+
}
783807
}
784-
MPASS (r && r->handle == handle);
785-
data = r->data;
786808
rw_runlock(&linux_global_rw);
787-
return (data);
809+
810+
811+
return NULL;
788812
}
789813

814+
790815
static int
791816
linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
792817
vm_ooffset_t foff, struct ucred *cred, u_short *color)
793818
{
794-
struct vm_area_struct *vmap;
795-
796-
vmap = linux_cdev_handle_find(handle);
797-
MPASS(vmap != NULL);
798-
vmap->vm_private_data = handle;
799-
819+
linux_cdesv_handle_ref(handle);
800820
*color = 0;
801821
return (0);
802822
}
803823

804824
static void
805825
linux_cdev_pager_dtor(void *handle)
806826
{
807-
struct vm_area_struct *vmap;
808-
809-
vmap = linux_cdev_handle_find(handle);
810-
MPASS(vmap != NULL);
827+
struct lcdev_handle *hndl = handle;
828+
struct vm_area_struct cvma;
829+
830+
cvma.vm_private_data = hndl->vma_private_data;
831+
832+
const struct vm_operations_struct *vma_ops = hndl->vma_ops;
833+
linux_cdev_handle_remove(hndl);
811834

812-
vmap->vm_ops->close(vmap);
813-
vmap->vm_private_data = handle;
814-
linux_cdev_handle_remove(handle);
835+
vma_ops->close(&cvma);
815836
}
816837

817838
static struct cdev_pager_ops linux_cdev_pager_ops = {
@@ -1201,6 +1222,7 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
12011222
struct file *file;
12021223
struct vm_area_struct vma;
12031224
vm_memattr_t attr;
1225+
struct lcdev_handle *cdev_hndl;
12041226
int error;
12051227

12061228
td = curthread;
@@ -1230,13 +1252,12 @@ linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset,
12301252
MPASS(vma.vm_ops->open != NULL);
12311253
MPASS(vma.vm_ops->close != NULL);
12321254

1233-
linux_cdev_handle_insert(vma.vm_private_data, &vma, sizeof(struct vm_area_struct));
1234-
*object = cdev_pager_allocate(vma.vm_private_data, OBJT_MGTDEVICE,
1255+
cdev_hndl = linux_cdev_handle_insert(&vma);
1256+
*object = cdev_pager_allocate(cdev_hndl, OBJT_MGTDEVICE,
12351257
&linux_cdev_pager_ops, size, nprot,
12361258
*offset, curthread->td_ucred);
1237-
1238-
if (*object == NULL)
1239-
linux_cdev_handle_remove(vma.vm_private_data);
1259+
1260+
linux_cdev_handle_remove(cdev_hndl);
12401261
} else {
12411262
sg = sglist_alloc(1, M_WAITOK);
12421263
sglist_append_phys(sg,
@@ -2019,7 +2040,6 @@ linux_compat_init(void *arg)
20192040
boot_cpu_data.x86_clflush_size = cpu_clflush_line_size;
20202041
boot_cpu_data.x86 = ((cpu_id & 0xF0000) >> 12) | ((cpu_id & 0xF0) >> 4);
20212042

2022-
INIT_LIST_HEAD(&lcdev_handle_list);
20232043
rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
20242044
OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
20252045
kobject_init(&linux_class_root, &linux_class_ktype);

sys/compat/linuxkpi/common/src/linux_page.c

+4-1
Original file line numberDiff line numberDiff line change
@@ -578,7 +578,10 @@ unmap_mapping_range(void *obj, loff_t const holebegin, loff_t const holelen, int
578578
printf("unmap_mapping_range: obj: %p holebegin %zu, holelen: %zu, even_cows: %d\n",
579579
obj, holebegin, holelen, even_cows);
580580
#endif
581-
devobj = cdev_pager_lookup(obj);
581+
void *hndl = linux_cdev_handle_find_by_pd(obj);
582+
if( hndl == NULL )
583+
return;
584+
devobj = cdev_pager_lookup(hndl);
582585
if (devobj != NULL) {
583586
page_count = OFF_TO_IDX(holelen);
584587

0 commit comments

Comments
 (0)