Skip to content

Commit a52b8d4

Browse files
authored
Merge pull request torvalds#190 from wedsonaf/pages
Move page management to `kernel` crate.
2 parents b6f20e7 + 53b02d8 commit a52b8d4

File tree

5 files changed

+181
-110
lines changed

5 files changed

+181
-110
lines changed

drivers/android/allocation.rs

+3-4
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,12 @@
22

33
use alloc::sync::Arc;
44
use core::mem::{size_of, MaybeUninit};
5-
use kernel::{bindings, prelude::*, user_ptr::UserSlicePtrReader, Error};
5+
use kernel::{bindings, pages::Pages, prelude::*, user_ptr::UserSlicePtrReader, Error};
66

77
use crate::{
88
node::NodeRef,
99
process::{AllocationInfo, Process},
1010
thread::{BinderError, BinderResult},
11-
Pages,
1211
};
1312

1413
pub(crate) struct Allocation<'a> {
@@ -83,7 +82,7 @@ impl<'a> Allocation<'a> {
8382
offset,
8483
to_copy,
8584
)
86-
};
85+
}?;
8786
out_offset += to_copy;
8887
Ok(())
8988
})?;
@@ -97,7 +96,7 @@ impl<'a> Allocation<'a> {
9796
// SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
9897
let obj_ptr = unsafe { (obj as *const T as *const u8).add(obj_offset) };
9998
// SAFETY: We have a reference to the object, so the pointer is valid.
100-
unsafe { page.write(obj_ptr, offset, to_copy) };
99+
unsafe { page.write(obj_ptr, offset, to_copy) }?;
101100
obj_offset += to_copy;
102101
Ok(())
103102
})

drivers/android/process.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ use core::{
99
use kernel::{
1010
bindings, c_types,
1111
file_operations::{File, FileOpener, FileOperations, IoctlCommand, IoctlHandler, PollTable},
12+
pages::Pages,
1213
prelude::*,
1314
sync::{Guard, Mutex, Ref, RefCount, RefCounted},
1415
user_ptr::{UserSlicePtr, UserSlicePtrReader},
@@ -23,7 +24,7 @@ use crate::{
2324
node::{Node, NodeDeath, NodeRef},
2425
range_alloc::RangeAllocator,
2526
thread::{BinderError, BinderResult, Thread},
26-
DeliverToRead, Either, Pages,
27+
DeliverToRead, Either,
2728
};
2829

2930
// TODO: Review this:

drivers/android/rust_binder.rs

+2-105
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,8 @@
88
#![feature(global_asm, try_reserve, allocator_api, concat_idents)]
99

1010
use alloc::{boxed::Box, sync::Arc};
11-
use core::{marker::PhantomData, pin::Pin, ptr};
12-
use kernel::{
13-
bindings, c_types, cstr,
14-
miscdev::Registration,
15-
prelude::*,
16-
user_ptr::{UserSlicePtrReader, UserSlicePtrWriter},
17-
Error,
18-
};
11+
use core::pin::Pin;
12+
use kernel::{cstr, miscdev::Registration, prelude::*, user_ptr::UserSlicePtrWriter};
1913

2014
mod allocation;
2115
mod context;
@@ -98,103 +92,6 @@ impl DeliverToRead for DeliverCode {
9892
}
9993
}
10094

101-
extern "C" {
102-
#[allow(improper_ctypes)]
103-
fn rust_helper_alloc_pages(
104-
gfp_mask: bindings::gfp_t,
105-
order: c_types::c_uint,
106-
) -> *mut bindings::page;
107-
108-
#[allow(improper_ctypes)]
109-
fn rust_helper_kmap(page: *mut bindings::page) -> *mut c_types::c_void;
110-
111-
#[allow(improper_ctypes)]
112-
fn rust_helper_kunmap(page: *mut bindings::page);
113-
}
114-
115-
/// Pages holds a reference to a set of pages of order `ORDER`. Having the order as a generic const
116-
/// allows the struct to have the same size as pointer.
117-
struct Pages<const ORDER: u32> {
118-
pages: *mut bindings::page,
119-
}
120-
121-
impl<const ORDER: u32> Pages<ORDER> {
122-
fn new() -> KernelResult<Self> {
123-
// TODO: Consider whether we want to allow callers to specify flags.
124-
let pages = unsafe {
125-
rust_helper_alloc_pages(
126-
bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM,
127-
ORDER,
128-
)
129-
};
130-
if pages.is_null() {
131-
return Err(Error::ENOMEM);
132-
}
133-
Ok(Self { pages })
134-
}
135-
136-
fn insert_page(&self, vma: &mut bindings::vm_area_struct, address: usize) -> KernelResult {
137-
let ret = unsafe { bindings::vm_insert_page(vma, address as _, self.pages) };
138-
if ret != 0 {
139-
Err(Error::from_kernel_errno(ret))
140-
} else {
141-
Ok(())
142-
}
143-
}
144-
145-
fn copy_into_page(
146-
&self,
147-
reader: &mut UserSlicePtrReader,
148-
offset: usize,
149-
len: usize,
150-
) -> KernelResult {
151-
let mapping = self.kmap(0).unwrap();
152-
unsafe { reader.read_raw((mapping.ptr as usize + offset) as _, len) }?;
153-
Ok(())
154-
}
155-
156-
unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) {
157-
let mapping = self.kmap(0).unwrap();
158-
ptr::copy((mapping.ptr as *mut u8).add(offset), dest, len);
159-
}
160-
161-
unsafe fn write(&self, src: *const u8, offset: usize, len: usize) {
162-
let mapping = self.kmap(0).unwrap();
163-
ptr::copy(src, (mapping.ptr as *mut u8).add(offset), len);
164-
}
165-
166-
fn kmap(&self, index: usize) -> Option<PageMapping> {
167-
if index >= 1usize << ORDER {
168-
return None;
169-
}
170-
let page = unsafe { self.pages.add(index) };
171-
let ptr = unsafe { rust_helper_kmap(page) };
172-
Some(PageMapping {
173-
page,
174-
ptr,
175-
_phantom: PhantomData,
176-
})
177-
}
178-
}
179-
180-
impl<const ORDER: u32> Drop for Pages<ORDER> {
181-
fn drop(&mut self) {
182-
unsafe { bindings::__free_pages(self.pages, ORDER) };
183-
}
184-
}
185-
186-
struct PageMapping<'a> {
187-
page: *mut bindings::page,
188-
ptr: *mut c_types::c_void,
189-
_phantom: PhantomData<&'a i32>,
190-
}
191-
192-
impl Drop for PageMapping<'_> {
193-
fn drop(&mut self) {
194-
unsafe { rust_helper_kunmap(self.page) };
195-
}
196-
}
197-
19895
const fn ptr_align(value: usize) -> usize {
19996
let size = core::mem::size_of::<usize>() - 1;
20097
(value + size) & !size

rust/kernel/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ pub mod chrdev;
4141
mod error;
4242
pub mod file_operations;
4343
pub mod miscdev;
44+
pub mod pages;
4445

4546
#[doc(hidden)]
4647
pub mod module_param;

rust/kernel/pages.rs

+173
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,173 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
//! Kernel page allocation and management.
4+
//!
5+
//! TODO: This module is a work in progress.
6+
7+
use crate::{bindings, c_types, user_ptr::UserSlicePtrReader, Error, KernelResult, PAGE_SIZE};
8+
use core::{marker::PhantomData, ptr};
9+
10+
extern "C" {
11+
#[allow(improper_ctypes)]
12+
fn rust_helper_alloc_pages(
13+
gfp_mask: bindings::gfp_t,
14+
order: c_types::c_uint,
15+
) -> *mut bindings::page;
16+
17+
#[allow(improper_ctypes)]
18+
fn rust_helper_kmap(page: *mut bindings::page) -> *mut c_types::c_void;
19+
20+
#[allow(improper_ctypes)]
21+
fn rust_helper_kunmap(page: *mut bindings::page);
22+
}
23+
24+
/// A set of physical pages.
25+
///
26+
/// `Pages` holds a reference to a set of pages of order `ORDER`. Having the order as a generic
27+
/// const allows the struct to have the same size as a pointer.
28+
///
29+
/// # Invariants
30+
///
31+
/// The pointer [`Pages::pages`] is valid and points to 2^ORDER pages.
32+
pub struct Pages<const ORDER: u32> {
33+
pages: *mut bindings::page,
34+
}
35+
36+
impl<const ORDER: u32> Pages<ORDER> {
37+
/// Allocates a new set of contiguous pages.
38+
pub fn new() -> KernelResult<Self> {
39+
// TODO: Consider whether we want to allow callers to specify flags.
40+
// SAFETY: This only allocates pages. We check that it succeeds in the next statement.
41+
let pages = unsafe {
42+
rust_helper_alloc_pages(
43+
bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM,
44+
ORDER,
45+
)
46+
};
47+
if pages.is_null() {
48+
return Err(Error::ENOMEM);
49+
}
50+
// INVARIANTS: We checked that the allocation above succeeded>
51+
Ok(Self { pages })
52+
}
53+
54+
/// Maps a single page at the given address in the given VM area.
55+
///
56+
/// This is only meant to be used by pages of order 0.
57+
pub fn insert_page(&self, vma: &mut bindings::vm_area_struct, address: usize) -> KernelResult {
58+
if ORDER != 0 {
59+
return Err(Error::EINVAL);
60+
}
61+
62+
// SAFETY: We check above that the allocation is of order 0. The range of `address` is
63+
// already checked by `vm_insert_page`.
64+
let ret = unsafe { bindings::vm_insert_page(vma, address as _, self.pages) };
65+
if ret != 0 {
66+
Err(Error::from_kernel_errno(ret))
67+
} else {
68+
Ok(())
69+
}
70+
}
71+
72+
/// Copies data from the given [`UserSlicePtrReader`] into the pages.
73+
pub fn copy_into_page(
74+
&self,
75+
reader: &mut UserSlicePtrReader,
76+
offset: usize,
77+
len: usize,
78+
) -> KernelResult {
79+
// TODO: For now this only works on the first page.
80+
let end = offset.checked_add(len).ok_or(Error::EINVAL)?;
81+
if end > PAGE_SIZE {
82+
return Err(Error::EINVAL);
83+
}
84+
85+
let mapping = self.kmap(0).ok_or(Error::EINVAL)?;
86+
87+
// SAFETY: We ensured that the buffer was valid with the check above.
88+
unsafe { reader.read_raw((mapping.ptr as usize + offset) as _, len) }?;
89+
Ok(())
90+
}
91+
92+
/// Maps the pages and reads from them into the given buffer.
93+
///
94+
/// # Safety
95+
///
96+
/// Callers must ensure that the destination buffer is valid for the given length.
97+
/// Additionally, if the raw buffer is intended to be recast, they must ensure that the data
98+
/// can be safely cast; [`crate::user_ptr::ReadableFromBytes`] has more details about it.
99+
pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> KernelResult {
100+
// TODO: For now this only works on the first page.
101+
let end = offset.checked_add(len).ok_or(Error::EINVAL)?;
102+
if end > PAGE_SIZE {
103+
return Err(Error::EINVAL);
104+
}
105+
106+
let mapping = self.kmap(0).ok_or(Error::EINVAL)?;
107+
ptr::copy((mapping.ptr as *mut u8).add(offset), dest, len);
108+
Ok(())
109+
}
110+
111+
/// Maps the pages and writes into them from the given bufer.
112+
///
113+
/// # Safety
114+
///
115+
/// Callers must ensure that the buffer is valid for the given length. Additionally, if the
116+
/// page is (or will be) mapped by userspace, they must ensure that no kernel data is leaked
117+
/// through padding if it was cast from another type; [`crate::user_ptr::WritableToBytes`] has
118+
/// more details about it.
119+
pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> KernelResult {
120+
// TODO: For now this only works on the first page.
121+
let end = offset.checked_add(len).ok_or(Error::EINVAL)?;
122+
if end > PAGE_SIZE {
123+
return Err(Error::EINVAL);
124+
}
125+
126+
let mapping = self.kmap(0).ok_or(Error::EINVAL)?;
127+
ptr::copy(src, (mapping.ptr as *mut u8).add(offset), len);
128+
Ok(())
129+
}
130+
131+
/// Maps the page at index `index`.
132+
fn kmap(&self, index: usize) -> Option<PageMapping> {
133+
if index >= 1usize << ORDER {
134+
return None;
135+
}
136+
137+
// SAFETY: We checked above that `index` is within range.
138+
let page = unsafe { self.pages.add(index) };
139+
140+
// SAFETY: `page` is valid based on the checks above.
141+
let ptr = unsafe { rust_helper_kmap(page) };
142+
if ptr.is_null() {
143+
return None;
144+
}
145+
146+
Some(PageMapping {
147+
page,
148+
ptr,
149+
_phantom: PhantomData,
150+
})
151+
}
152+
}
153+
154+
impl<const ORDER: u32> Drop for Pages<ORDER> {
155+
fn drop(&mut self) {
156+
// SAFETY: By the type invariants, we know the pages are allocated with the given order.
157+
unsafe { bindings::__free_pages(self.pages, ORDER) };
158+
}
159+
}
160+
161+
struct PageMapping<'a> {
162+
page: *mut bindings::page,
163+
ptr: *mut c_types::c_void,
164+
_phantom: PhantomData<&'a i32>,
165+
}
166+
167+
impl Drop for PageMapping<'_> {
168+
fn drop(&mut self) {
169+
// SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
170+
// page, so it is safe to unmap it here.
171+
unsafe { rust_helper_kunmap(self.page) };
172+
}
173+
}

0 commit comments

Comments
 (0)