wdk_mutex/kmutex.rs
1//! A Rust idiomatic Windows Kernel Driver KMUTEX type which protects the inner type T
2
3use alloc::boxed::Box;
4use core::{
5 ffi::c_void, fmt::Display, mem::ManuallyDrop, ops::{Deref, DerefMut}, ptr::{self, drop_in_place, null_mut}
6};
7use wdk_sys::{
8 ntddk::{
9 ExAllocatePool2, ExFreePool, KeGetCurrentIrql, KeInitializeMutex, KeReleaseMutex,
10 KeWaitForSingleObject,
11 },
12 APC_LEVEL, DISPATCH_LEVEL, FALSE, KMUTEX, POOL_FLAG_NON_PAGED,
13 _KWAIT_REASON::Executive,
14 _MODE::KernelMode,
15};
16
17extern crate alloc;
18
19use crate::errors::DriverMutexError;
20/// A thread safe mutex implemented through acquiring a KMUTEX in the Windows kernel.
21///
22/// The type `Kmutex<T>` provides mutually exclusive access to the inner type T allocated through
23/// this crate in the non-paged pool. All data required to initialise the KMutex is allocated in the
24/// non-paged pool and as such is safe to pass stack data into the type as it will not go out of scope.
25///
26/// `KMutex` holds an inner value which is a pointer to a `KMutexInner` type which is the actual type
27/// allocated in the non-paged pool, and this holds information relating to the mutex.
28///
29/// Access to the `T` within the `KMutex` can be done through calling [`Self::lock`].
30///
31/// # Lifetimes
32///
33/// As the `KMutex` is designed to be used in the Windows Kernel, with the Windows `wdk` crate, the lifetimes of
34/// the `KMutex` must be considered by the caller. See examples below for usage.
35///
36/// The `KMutex` can exist in a locally scoped function with little additional configuration. To use the mutex across
37/// thread boundaries, or to use it in callback functions, you can use the `Grt` module found in this crate. See below for
38/// details.
39///
40/// # Deallocation
41///
42/// KMutex handles the deallocation of resources at the point the KMutex is dropped.
43///
44/// # Examples
45///
46/// ## Locally scoped mutex:
47///
48/// ```
49/// {
50/// let mtx = KMutex::new(0u32).unwrap();
51/// let lock = mtx.lock().unwrap();
52///
53/// // If T implements display, you do not need to dereference the lock to print.
54/// println!("The value is: {}", lock);
55/// } // Mutex will become unlocked as it is managed via RAII
56/// ```
57///
58/// ## Global scope via the `Grt` module in `wdk-mutex`:
59///
60/// ```
61/// // Initialise the mutex on DriverEntry
62///
63/// #[export_name = "DriverEntry"]
64/// pub unsafe extern "system" fn driver_entry(
65/// driver: &mut DRIVER_OBJECT,
66/// registry_path: PCUNICODE_STRING,
67/// ) -> NTSTATUS {
68/// if let Err(e) = Grt::init() {
69/// println!("Error creating Grt!: {:?}", e);
70/// return STATUS_UNSUCCESSFUL;
71/// }
72///
73/// // ...
74/// my_function();
75/// }
76///
77///
78/// // Register a new Mutex in the `Grt` of value 0u32:
79///
80/// pub fn my_function() {
81/// Grt::register_kmutex("my_test_mutex", 0u32);
82/// }
83///
84/// unsafe extern "C" fn my_thread_fn_pointer(_: *mut c_void) {
85/// let my_mutex = Grt::get_kmutex::<u32>("my_test_mutex");
86/// if let Err(e) = my_mutex {
87/// println!("Error in thread: {:?}", e);
88/// return;
89/// }
90///
91/// let mut lock = my_mutex.unwrap().lock().unwrap();
92/// *lock += 1;
93/// }
94///
95///
96/// // Destroy the Grt to prevent memory leak on DriverExit
97///
98/// extern "C" fn driver_exit(driver: *mut DRIVER_OBJECT) {
99/// unsafe {Grt::destroy()};
100/// }
101/// ```
102pub struct KMutex<T> {
103 inner: *mut KMutexInner<T>,
104}
105
106/// The underlying data which is non-page pool allocated which is pointed to by the `KMutex`.
107struct KMutexInner<T> {
108 /// A KMUTEX structure allocated into KMutexInner
109 mutex: KMUTEX,
110 /// The data for which the mutex is protecting
111 data: T,
112}
113
114unsafe impl<T> Sync for KMutex<T> {}
115unsafe impl<T> Send for KMutex<T> {}
116
117impl<T> KMutex<T> {
118 /// Creates a new KMUTEX Windows Kernel Driver Mutex in a signaled (free) state.
119 ///
120 /// # IRQL
121 ///
122 /// This can be called at any IRQL.
123 ///
124 /// # Examples
125 ///
126 /// ```
127 /// use wdk_mutex::Mutex;
128 ///
129 /// let my_mutex = wdk_mutex::KMutex::new(0u32);
130 /// ```
131 pub fn new(data: T) -> Result<Self, DriverMutexError> {
132 //
133 // Non-Paged heap alloc for all struct data required for KMutexInner
134 //
135 let total_sz_required = size_of::<KMutexInner<T>>();
136 let inner_heap_ptr: *mut c_void = unsafe {
137 ExAllocatePool2(
138 POOL_FLAG_NON_PAGED,
139 total_sz_required as u64,
140 u32::from_be_bytes(*b"kmtx"),
141 )
142 };
143 if inner_heap_ptr.is_null() {
144 return Err(DriverMutexError::PagedPoolAllocFailed);
145 }
146
147 // Cast the memory allocation to a pointer to the inner
148 let kmutex_inner_ptr = inner_heap_ptr as *mut KMutexInner<T>;
149
150 // SAFETY: This raw write is safe as the pointer validity is checked above.
151 unsafe {
152 ptr::write(
153 kmutex_inner_ptr,
154 KMutexInner {
155 mutex: KMUTEX::default(),
156 data,
157 },
158 );
159
160 // Initialise the KMUTEX object via the kernel
161 KeInitializeMutex(&(*kmutex_inner_ptr).mutex as *const _ as *mut _, 0);
162 }
163
164 Ok(Self {
165 inner: kmutex_inner_ptr,
166 })
167 }
168
169 /// Acquires a mutex in a non-alertable manner.
170 ///
171 /// Once the thread has acquired the mutex, it will return a `KMutexGuard` which is a RAII scoped
172 /// guard allowing exclusive access to the inner T.
173 ///
174 /// # Errors
175 ///
176 /// If the IRQL is too high, this function will return an error and will not acquire a lock. To prevent
177 /// a kernel panic, the caller should match the return value rather than just unwrapping the value.
178 ///
179 /// # IRQL
180 ///
181 /// This function must be called at IRQL `<= APC_LEVEL`, if the IRQL is higher than this,
182 /// the function will return an error.
183 ///
184 /// It is the callers responsibility to ensure the IRQL is sufficient to call this function and it
185 /// will not alter the IRQL for the caller, as this may introduce undefined behaviour elsewhere in the
186 /// driver / kernel.
187 ///
188 /// # Examples
189 ///
190 /// ```
191 /// let mtx = KMutex::new(0u32).unwrap();
192 /// let lock = mtx.lock().unwrap();
193 /// ```
194 pub fn lock(&self) -> Result<KMutexGuard<'_, T>, DriverMutexError> {
195 // Check the IRQL is <= APC_LEVEL as per remarks at
196 // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/nf-wdm-kewaitforsingleobject
197 let irql = unsafe { KeGetCurrentIrql() };
198 if irql > APC_LEVEL as u8 {
199 return Err(DriverMutexError::IrqlTooHigh);
200 }
201
202 // Discard the return value; the status code does not represent an error or contain information
203 // relevant to the context of no timeout.
204 let _ = unsafe {
205 // SAFETY: The IRQL is sufficient for the operation as checked above, and we know our pointer
206 // is valid as RAII manages the lifetime of the heap allocation, ensuring it will only be deallocated
207 // once Self gets dropped.
208 KeWaitForSingleObject(
209 &mut (*self.inner).mutex as *mut _ as *mut _,
210 Executive,
211 KernelMode as i8,
212 FALSE as u8,
213 null_mut(),
214 )
215 };
216
217 Ok(KMutexGuard { kmutex: self })
218 }
219
220 /// Consumes the mutex and returns an owned copy of the protected data (`T`).
221 ///
222 /// This method performs a deep copy of the data (`T`) guarded by the mutex before
223 /// deallocating the internal memory. Be cautious when using this method with large
224 /// data types, as it may lead to inefficiencies or stack overflows.
225 ///
226 /// For scenarios involving large data that you prefer not to allocate on the stack,
227 /// consider using [`Self::to_owned_box`] instead.
228 ///
229 /// # Safety
230 ///
231 /// This function moves `T` out of the mutex without running `Drop` on the original
232 /// in-place value. The returned `T` remains fully owned by the caller and will be
233 /// dropped normally.
234 ///
235 /// - **Single Ownership Guarantee:** After calling [`Self::to_owned`], ensure that
236 /// no other references (especially static or global ones) attempt to access the
237 /// underlying mutex. This is because the mutexes memory is deallocated once this
238 /// method is invoked.
239 /// - **Exclusive Access:** This function should only be called when you can guarantee
240 /// that there will be no further access to the protected `T`. Violating this can
241 /// lead to undefined behavior since the memory is freed after the call.
242 ///
243 /// # Example
244 ///
245 /// ```
246 /// unsafe {
247 /// let owned_data: T = mutex.to_owned();
248 /// // Use `owned_data` safely here
249 /// }
250 /// ```
251 pub unsafe fn to_owned(self) -> T {
252 let manually_dropped = ManuallyDrop::new(self);
253 let data_read = unsafe { ptr::read(&(*manually_dropped.inner).data) };
254
255 // Free the mutex allocation without using drop semantics which could cause an
256 // accidental double drop of the underlying `T`.
257 unsafe { ExFreePool(manually_dropped.inner as _) };
258
259 data_read
260 }
261
262 /// Consumes the mutex and returns an owned `Box<T>` containing the protected data (`T`).
263 ///
264 /// This method is an alternative to [`Self::to_owned`] and is particularly useful when
265 /// dealing with large data types. By returning a `Box<T>`, the data is pool-allocated,
266 /// avoiding potential stack overflows associated with large stack allocations.
267 ///
268 /// # Safety
269 ///
270 /// This function moves `T` out of the mutex without running `Drop` on the original
271 /// in-place value. The returned `T` remains fully owned by the caller and will be
272 /// dropped normally.
273 ///
274 /// - **Single Ownership Guarantee:** After calling [`Self::to_owned_box`], ensure that
275 /// no other references (especially static or global ones) attempt to access the
276 /// underlying mutex. This is because the mutexes memory is deallocated once this
277 /// method is invoked.
278 /// - **Exclusive Access:** This function should only be called when you can guarantee
279 /// that there will be no further access to the protected `T`. Violating this can
280 /// lead to undefined behavior since the memory is freed after the call.
281 ///
282 /// # Example
283 ///
284 /// ```rust
285 /// unsafe {
286 /// let boxed_data: Box<T> = mutex.to_owned_box();
287 /// // Use `boxed_data` safely here
288 /// }
289 /// ```
290 pub unsafe fn to_owned_box(self) -> Box<T> {
291 let manually_dropped = ManuallyDrop::new(self);
292 let data_read = unsafe { ptr::read(&(*manually_dropped.inner).data) };
293
294 // Free the mutex allocation without using drop semantics which could cause an
295 // accidental double drop of the underlying `T`.
296 unsafe { ExFreePool(manually_dropped.inner as _) };
297
298 Box::new(data_read)
299 }
300}
301
302impl<T> Drop for KMutex<T> {
303 fn drop(&mut self) {
304 unsafe {
305 // Drop the underlying data and run destructors for the data, this would be relevant in the
306 // case where Self contains other heap allocated types which have their own deallocation
307 // methods.
308 drop_in_place(&mut (*self.inner).data);
309
310 // Free the memory we allocated
311 ExFreePool(self.inner as *mut _);
312 }
313 }
314}
315
316/// A RAII scoped guard for the inner data protected by the mutex. Once this guard is given out, the protected data
317/// may be safely mutated by the caller as we guarantee exclusive access via Windows Kernel Mutex primitives.
318///
319/// When this structure is dropped (falls out of scope), the lock will be unlocked.
320///
321/// # IRQL
322///
323/// Access to the data within this guard must be done at <= APC_LEVEL if a non-alertable lock was acquired, or <=
324/// DISPATCH_LEVEL if an alertable lock was acquired. It is the callers responsible to manage APC levels whilst
325/// using the KMutex.
326///
327/// If you wish to manually drop the lock with a safety check, call the function [`Self::drop_safe`].
328///
329/// # Kernel panic
330///
331/// Raising the IRQL above safe limits whilst using the mutex will cause a Kernel Panic if not appropriately handled.
332/// When RAII drops this type, the mutex is released, if the mutex goes out of scope whilst you hold an IRQL that
333/// is too high, you will receive a kernel panic.
334///
335pub struct KMutexGuard<'a, T> {
336 kmutex: &'a KMutex<T>,
337}
338
339impl<T> Display for KMutexGuard<'_, T>
340where
341 T: Display,
342{
343 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
344 // SAFETY: Dereferencing the inner data is safe as RAII controls the memory allocations.
345 write!(f, "{}", unsafe { &(*self.kmutex.inner).data })
346 }
347}
348
349impl<T> Deref for KMutexGuard<'_, T> {
350 type Target = T;
351
352 fn deref(&self) -> &Self::Target {
353 // SAFETY: Dereferencing the inner data is safe as RAII controls the memory allocations.
354 unsafe { &(*self.kmutex.inner).data }
355 }
356}
357
358impl<T> DerefMut for KMutexGuard<'_, T> {
359 fn deref_mut(&mut self) -> &mut Self::Target {
360 // SAFETY: Dereferencing the inner data is safe as RAII controls the memory allocations.
361 // Mutable access is safe due to Self only being given out whilst a mutex is held from the
362 // kernel.
363 unsafe { &mut (*self.kmutex.inner).data }
364 }
365}
366
367impl<T> Drop for KMutexGuard<'_, T> {
368 fn drop(&mut self) {
369 // NOT SAFE AT A IRQL TOO HIGH
370 unsafe { KeReleaseMutex(&mut (*self.kmutex.inner).mutex, FALSE as u8) };
371 }
372}
373
374impl<T> KMutexGuard<'_, T> {
375 /// Safely drop the KMutexGuard, an alternative to RAII.
376 ///
377 /// This function checks the IRQL before attempting to drop the guard.
378 ///
379 /// # Errors
380 ///
381 /// If the IRQL > DISPATCH_LEVEL, no unlock will occur and a DriverMutexError will be returned to the
382 /// caller.
383 ///
384 /// # IRQL
385 ///
386 /// This function is safe to call at any IRQL, but it will not release the mutex if IRQL > DISPATCH_LEVEL
387 pub fn drop_safe(&mut self) -> Result<(), DriverMutexError> {
388 let irql = unsafe { KeGetCurrentIrql() };
389 if irql > DISPATCH_LEVEL as u8 {
390 return Err(DriverMutexError::IrqlTooHigh);
391 }
392
393 unsafe { KeReleaseMutex(&mut (*self.kmutex.inner).mutex, FALSE as u8) };
394
395 Ok(())
396 }
397}