From 8089b18d34a0bed895ae4d1e16f4c892fb22d807 Mon Sep 17 00:00:00 2001 From: Romain Malmain Date: Mon, 20 Jan 2025 17:25:55 +0100 Subject: [PATCH] Remove ShMemProvider bound from struct definitions (#2861) * No more ShMemProvider bound constraint in struct definition whenever possible * Introduce StdShMem * Update CONTRIBUTING.md --- CONTRIBUTING.md | 47 ++++- .../frida_executable_libpng/src/fuzzer.rs | 8 +- .../binary_only/frida_libpng/src/fuzzer.rs | 4 +- .../frida_windows_gdiplus/src/fuzzer.rs | 8 +- .../binary_only/qemu_coverage/src/fuzzer.rs | 2 +- .../binary_only/qemu_launcher/src/instance.rs | 6 +- .../libafl-fuzz/src/feedback/filepath.rs | 12 +- fuzzers/forkserver/libafl-fuzz/src/fuzzer.rs | 5 +- .../full_system/nyx_launcher/src/instance.rs | 4 +- .../libfuzzer_libpng_centralized/src/lib.rs | 2 +- .../libfuzzer_libpng_cmin/src/lib.rs | 4 +- .../libfuzzer_libpng_norestart/src/lib.rs | 2 +- .../src/lib.rs | 2 +- libafl/src/events/broker_hooks/centralized.rs | 6 +- .../broker_hooks/centralized_multi_machine.rs | 11 +- libafl/src/events/broker_hooks/mod.rs | 6 +- libafl/src/events/centralized.rs | 98 ++++++---- libafl/src/events/launcher.rs | 32 +++- libafl/src/events/llmp/mgr.rs | 144 ++++++++------ libafl/src/events/llmp/mod.rs | 52 +++--- libafl/src/events/llmp/restarting.rs | 131 +++++++------ libafl/src/events/simple.rs | 55 +++--- libafl/src/events/tcp.rs | 84 ++++----- libafl/src/executors/forkserver.rs | 61 +++--- libafl/src/executors/inprocess/inner.rs | 1 + libafl/src/executors/inprocess/stateful.rs | 20 +- libafl/src/executors/inprocess_fork/inner.rs | 16 +- libafl/src/executors/inprocess_fork/mod.rs | 22 +-- .../src/executors/inprocess_fork/stateful.rs | 22 +-- .../concolic/serialization_format.rs | 16 +- libafl/src/stages/sync.rs | 27 +-- libafl_bolts/examples/llmp_test/main.rs | 6 +- libafl_bolts/src/core_affinity.rs | 1 + libafl_bolts/src/llmp.rs | 176 ++++++++---------- libafl_bolts/src/os/unix_shmem_server.rs | 80 +++----- libafl_bolts/src/ownedref.rs | 4 +- libafl_bolts/src/shmem.rs | 128 ++++++++----- libafl_bolts/src/staterestore.rs | 20 +- libafl_concolic/symcc_runtime/src/lib.rs | 1 + libafl_concolic/symcc_runtime/src/tracing.rs | 25 ++- libafl_concolic/test/runtime_test/src/lib.rs | 6 +- libafl_libfuzzer/runtime/src/fuzz.rs | 2 +- libafl_libfuzzer/runtime/src/merge.rs | 2 +- libafl_qemu/src/executor.rs | 11 +- libafl_sugar/src/forkserver.rs | 2 +- libafl_sugar/src/inmemory.rs | 2 +- libafl_sugar/src/qemu.rs | 2 +- libafl_tinyinst/src/executor.rs | 38 ++-- 48 files changed, 753 insertions(+), 663 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f4510882dc..a8cc60e368 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -75,7 +75,7 @@ pub trait X } ``` -- __Ideally__ the types used in the the arguments of methods in traits should have the same as the types defined on the traits. +- __Ideally__ the types used in the arguments of methods in traits should have the same as the types defined on the traits. ```rust pub trait X // <- this trait have 3 generics, A, B, and C { @@ -84,6 +84,22 @@ pub trait X // <- this trait have 3 generics, A, B, and C fn do_other_stuff(&self, a: A, b: B); // <- this is not ideal because it does not have C. } ``` +- Generic naming should be consistent. Do NOT use multiple name for the same generic, it just makes things more confusing. Do: +```rust +pub struct X { + phantom: PhanomData, +} + +impl X {} +``` +But not: +```rust +pub struct X { + phantom: PhanomData, +} + +impl X {} // <- Do NOT do that, use A instead of B +``` - Always alphabetically order the type generics. Therefore, ```rust pub struct X {}; // <- Generics are alphabetically ordered @@ -91,4 +107,31 @@ pub struct X {}; // <- Generics are alphabetically ordered But not, ```rust pub struct X {}; // <- Generics are not ordered -``` \ No newline at end of file +``` +- Similarly, generic bounds in `where` clauses should be alphabetically sorted. Prefer: +```rust +pub trait FooA {} +pub trait FooB {} + +pub struct X; + +impl X +where + A: FooA, + B: FooB, +{} +``` +Over: +```rust +pub trait FooA {} +pub trait FooB {} + +pub struct X; + +impl X +where + B: FooB, // <-| + // | Generic bounds are not alphabetically ordered. + A: FooA, // <-| +{} +``` diff --git a/fuzzers/binary_only/frida_executable_libpng/src/fuzzer.rs b/fuzzers/binary_only/frida_executable_libpng/src/fuzzer.rs index bd3e2202a4..d4988a97b0 100644 --- a/fuzzers/binary_only/frida_executable_libpng/src/fuzzer.rs +++ b/fuzzers/binary_only/frida_executable_libpng/src/fuzzer.rs @@ -96,7 +96,7 @@ unsafe fn fuzz( let shmem_provider = StdShMemProvider::new()?; let mut run_client = |state: Option<_>, - mgr: LlmpRestartingEventManager<_, _, _, _>, + mgr: LlmpRestartingEventManager<_, _, _, _, _>, client_description: ClientDescription| { // The restarting state will spawn the same process again as child, then restarted it each time it crashes. @@ -104,7 +104,7 @@ unsafe fn fuzz( if options.asan && options.asan_cores.contains(client_description.core_id()) { (|state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _client_description| { let gum = Gum::obtain(); @@ -231,7 +231,7 @@ unsafe fn fuzz( })(state, mgr, client_description) } else if options.cmplog && options.cmplog_cores.contains(client_description.core_id()) { (|state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _client_description| { let gum = Gum::obtain(); @@ -367,7 +367,7 @@ unsafe fn fuzz( })(state, mgr, client_description) } else { (|state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _client_description| { let gum = Gum::obtain(); diff --git a/fuzzers/binary_only/frida_libpng/src/fuzzer.rs b/fuzzers/binary_only/frida_libpng/src/fuzzer.rs index adc03b5212..6d7ee9441e 100644 --- a/fuzzers/binary_only/frida_libpng/src/fuzzer.rs +++ b/fuzzers/binary_only/frida_libpng/src/fuzzer.rs @@ -81,7 +81,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { }; let mut run_client = |state: Option<_>, - mgr: LlmpRestartingEventManager<_, _, _, _>, + mgr: LlmpRestartingEventManager<_, _, _, _, _>, client_description: ClientDescription| { // The restarting state will spawn the same process again as child, then restarted it each time it crashes. @@ -101,7 +101,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { // if options.asan && options.asan_cores.contains(client_description.core_id()) { (|state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _client_description| { let gum = Gum::obtain(); diff --git a/fuzzers/binary_only/frida_windows_gdiplus/src/fuzzer.rs b/fuzzers/binary_only/frida_windows_gdiplus/src/fuzzer.rs index 7e613bf238..5e09527f1a 100644 --- a/fuzzers/binary_only/frida_windows_gdiplus/src/fuzzer.rs +++ b/fuzzers/binary_only/frida_windows_gdiplus/src/fuzzer.rs @@ -78,7 +78,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { let shmem_provider = StdShMemProvider::new()?; let mut run_client = |state: Option<_>, - mgr: LlmpRestartingEventManager<_, _, _, _>, + mgr: LlmpRestartingEventManager<_, _, _, _, _>, client_description: ClientDescription| { // The restarting state will spawn the same process again as child, then restarted it each time it crashes. @@ -98,7 +98,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { if options.asan && options.asan_cores.contains(client_description.core_id()) { (|state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _client_description| { let gum = Gum::obtain(); @@ -214,7 +214,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { })(state, mgr, client_description) } else if options.cmplog && options.cmplog_cores.contains(client_description.core_id()) { (|state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _client_description| { let gum = Gum::obtain(); @@ -344,7 +344,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { })(state, mgr, client_description) } else { (|state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _client_description| { let gum = Gum::obtain(); diff --git a/fuzzers/binary_only/qemu_coverage/src/fuzzer.rs b/fuzzers/binary_only/qemu_coverage/src/fuzzer.rs index 9bb14f600c..5bb670362e 100644 --- a/fuzzers/binary_only/qemu_coverage/src/fuzzer.rs +++ b/fuzzers/binary_only/qemu_coverage/src/fuzzer.rs @@ -124,7 +124,7 @@ pub fn fuzz() { env::remove_var("LD_LIBRARY_PATH"); let mut run_client = |state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, client_description: ClientDescription| { let mut cov_path = options.coverage_path.clone(); diff --git a/fuzzers/binary_only/qemu_launcher/src/instance.rs b/fuzzers/binary_only/qemu_launcher/src/instance.rs index cb23e17640..5a6a8584bf 100644 --- a/fuzzers/binary_only/qemu_launcher/src/instance.rs +++ b/fuzzers/binary_only/qemu_launcher/src/instance.rs @@ -7,7 +7,7 @@ use libafl::events::SimpleEventManager; use libafl::events::{LlmpRestartingEventManager, MonitorTypedEventManager}; use libafl::{ corpus::{Corpus, InMemoryOnDiskCorpus, OnDiskCorpus}, - events::{ClientDescription, EventRestarter, NopEventManager}, + events::{ClientDescription, EventRestarter}, executors::{Executor, ShadowExecutor}, feedback_or, feedback_or_fast, feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, @@ -30,7 +30,7 @@ use libafl::{ Error, HasMetadata, NopFuzzer, }; #[cfg(not(feature = "simplemgr"))] -use libafl_bolts::shmem::StdShMemProvider; +use libafl_bolts::shmem::{StdShMem, StdShMemProvider}; use libafl_bolts::{ ownedref::OwnedMutSlice, rands::StdRand, @@ -58,7 +58,7 @@ pub type ClientState = pub type ClientMgr = SimpleEventManager; #[cfg(not(feature = "simplemgr"))] pub type ClientMgr = MonitorTypedEventManager< - LlmpRestartingEventManager<(), BytesInput, ClientState, StdShMemProvider>, + LlmpRestartingEventManager<(), BytesInput, ClientState, StdShMem, StdShMemProvider>, M, >; diff --git a/fuzzers/forkserver/libafl-fuzz/src/feedback/filepath.rs b/fuzzers/forkserver/libafl-fuzz/src/feedback/filepath.rs index 28fc3d2009..20abd783c7 100644 --- a/fuzzers/forkserver/libafl-fuzz/src/feedback/filepath.rs +++ b/fuzzers/forkserver/libafl-fuzz/src/feedback/filepath.rs @@ -68,6 +68,12 @@ where Ok(false) } + #[cfg(feature = "track_hit_feedbacks")] + #[inline] + fn last_result(&self) -> Result { + Ok(false) + } + fn append_metadata( &mut self, state: &mut S, @@ -78,10 +84,4 @@ where (self.func)(state, testcase, &self.out_dir)?; Ok(()) } - - #[cfg(feature = "track_hit_feedbacks")] - #[inline] - fn last_result(&self) -> Result { - Ok(false) - } } diff --git a/fuzzers/forkserver/libafl-fuzz/src/fuzzer.rs b/fuzzers/forkserver/libafl-fuzz/src/fuzzer.rs index 5d545207ad..633f4cc212 100644 --- a/fuzzers/forkserver/libafl-fuzz/src/fuzzer.rs +++ b/fuzzers/forkserver/libafl-fuzz/src/fuzzer.rs @@ -42,7 +42,7 @@ use libafl::{ Error, Fuzzer, HasFeedback, HasMetadata, SerdeAny, }; #[cfg(not(feature = "fuzzbench"))] -use libafl_bolts::shmem::StdShMemProvider; +use libafl_bolts::shmem::{StdShMem, StdShMemProvider}; use libafl_bolts::{ core_affinity::CoreId, current_nanos, current_time, @@ -77,10 +77,11 @@ pub type LibaflFuzzState = #[cfg(not(feature = "fuzzbench"))] type LibaflFuzzManager = CentralizedEventManager< - LlmpRestartingEventManager<(), BytesInput, LibaflFuzzState, StdShMemProvider>, + LlmpRestartingEventManager<(), BytesInput, LibaflFuzzState, StdShMem, StdShMemProvider>, (), BytesInput, LibaflFuzzState, + StdShMem, StdShMemProvider, >; #[cfg(feature = "fuzzbench")] diff --git a/fuzzers/full_system/nyx_launcher/src/instance.rs b/fuzzers/full_system/nyx_launcher/src/instance.rs index c20b3839d8..568297844b 100644 --- a/fuzzers/full_system/nyx_launcher/src/instance.rs +++ b/fuzzers/full_system/nyx_launcher/src/instance.rs @@ -30,7 +30,7 @@ use libafl::{ use libafl_bolts::{ current_nanos, rands::StdRand, - shmem::StdShMemProvider, + shmem::{StdShMem, StdShMemProvider}, tuples::{tuple_list, Merge}, }; use libafl_nyx::{ @@ -44,7 +44,7 @@ pub type ClientState = StdState, BytesInput, StdRand, OnDiskCorpus>; pub type ClientMgr = MonitorTypedEventManager< - LlmpRestartingEventManager<(), BytesInput, ClientState, StdShMemProvider>, + LlmpRestartingEventManager<(), BytesInput, ClientState, StdShMem, StdShMemProvider>, M, >; diff --git a/fuzzers/inprocess/libfuzzer_libpng_centralized/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_centralized/src/lib.rs index 8eb53dafe8..f4508a1bb0 100644 --- a/fuzzers/inprocess/libfuzzer_libpng_centralized/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng_centralized/src/lib.rs @@ -141,7 +141,7 @@ pub extern "C" fn libafl_main() { let mut secondary_run_client = |state: Option<_>, - mut mgr: CentralizedEventManager<_, _, _, _, _>, + mut mgr: CentralizedEventManager<_, _, _, _, _, _>, _client_description: ClientDescription| { // Create an observation channel using the coverage map let edges_observer = diff --git a/fuzzers/inprocess/libfuzzer_libpng_cmin/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_cmin/src/lib.rs index 88e552b437..850d15ed27 100644 --- a/fuzzers/inprocess/libfuzzer_libpng_cmin/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng_cmin/src/lib.rs @@ -219,7 +219,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let orig_size = state.corpus().count(); let msg = "Started distillation...".to_string(); - as EventFirer>::log( + as EventFirer>::log( &mut restarting_mgr, &mut state, LogSeverity::Info, @@ -227,7 +227,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re )?; minimizer.minimize(&mut fuzzer, &mut executor, &mut restarting_mgr, &mut state)?; let msg = format!("Distilled out {} cases", orig_size - state.corpus().count()); - as EventFirer>::log( + as EventFirer>::log( &mut restarting_mgr, &mut state, LogSeverity::Info, diff --git a/fuzzers/inprocess/libfuzzer_libpng_norestart/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_norestart/src/lib.rs index 8b299b1517..5a195930c9 100644 --- a/fuzzers/inprocess/libfuzzer_libpng_norestart/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng_norestart/src/lib.rs @@ -162,7 +162,7 @@ pub extern "C" fn libafl_main() { ); let mut run_client = |state: Option<_>, - mut restarting_mgr: LlmpRestartingEventManager<_, _, _, _>, + mut restarting_mgr: LlmpRestartingEventManager<_, _, _, _, _>, client_description: ClientDescription| { // Create an observation channel using the coverage map let edges_observer = diff --git a/fuzzers/inprocess/sqlite_centralized_multi_machine/src/lib.rs b/fuzzers/inprocess/sqlite_centralized_multi_machine/src/lib.rs index 66d9963f07..096f06e5ad 100644 --- a/fuzzers/inprocess/sqlite_centralized_multi_machine/src/lib.rs +++ b/fuzzers/inprocess/sqlite_centralized_multi_machine/src/lib.rs @@ -157,7 +157,7 @@ pub extern "C" fn libafl_main() { let mut secondary_run_client = |state: Option<_>, - mut mgr: CentralizedEventManager<_, _, _, _>, + mut mgr: CentralizedEventManager<_, _, _, _, _, _>, _client_description: ClientDescription| { // Create an observation channel using the coverage map let edges_observer = diff --git a/libafl/src/events/broker_hooks/centralized.rs b/libafl/src/events/broker_hooks/centralized.rs index 5fbb437719..28381279b0 100644 --- a/libafl/src/events/broker_hooks/centralized.rs +++ b/libafl/src/events/broker_hooks/centralized.rs @@ -5,7 +5,6 @@ use core::{fmt::Debug, marker::PhantomData}; use libafl_bolts::{compress::GzipCompressor, llmp::LLMP_FLAG_COMPRESSED}; use libafl_bolts::{ llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag}, - shmem::ShMemProvider, ClientId, Error, }; use serde::de::DeserializeOwned; @@ -21,14 +20,13 @@ pub struct CentralizedLlmpHook { phantom: PhantomData, } -impl LlmpHook for CentralizedLlmpHook +impl LlmpHook for CentralizedLlmpHook where I: DeserializeOwned, - SP: ShMemProvider, { fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, _msg_flags: &mut Flags, diff --git a/libafl/src/events/broker_hooks/centralized_multi_machine.rs b/libafl/src/events/broker_hooks/centralized_multi_machine.rs index e522c62831..08fdf2f7bb 100644 --- a/libafl/src/events/broker_hooks/centralized_multi_machine.rs +++ b/libafl/src/events/broker_hooks/centralized_multi_machine.rs @@ -11,7 +11,6 @@ use libafl_bolts::llmp::LLMP_FLAG_COMPRESSED; use libafl_bolts::{ llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag, LLMP_FLAG_FROM_MM}, ownedref::OwnedRef, - shmem::ShMemProvider, ClientId, Error, }; use serde::Serialize; @@ -149,16 +148,15 @@ where } } -impl LlmpHook for TcpMultiMachineLlmpSenderHook +impl LlmpHook for TcpMultiMachineLlmpSenderHook where I: Input, A: Clone + Display + ToSocketAddrs + Send + Sync + 'static, - SP: ShMemProvider, { /// check for received messages, and forward them alongside the incoming message to inner. fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, _client_id: ClientId, _msg_tag: &mut Tag, _msg_flags: &mut Flags, @@ -211,16 +209,15 @@ where } } -impl LlmpHook for TcpMultiMachineLlmpReceiverHook +impl LlmpHook for TcpMultiMachineLlmpReceiverHook where I: Input, A: Clone + Display + ToSocketAddrs + Send + Sync + 'static, - SP: ShMemProvider, { /// check for received messages, and forward them alongside the incoming message to inner. fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, _client_id: ClientId, _msg_tag: &mut Tag, _msg_flags: &mut Flags, diff --git a/libafl/src/events/broker_hooks/mod.rs b/libafl/src/events/broker_hooks/mod.rs index baac20f950..f65f1c131d 100644 --- a/libafl/src/events/broker_hooks/mod.rs +++ b/libafl/src/events/broker_hooks/mod.rs @@ -6,7 +6,6 @@ use core::marker::PhantomData; use libafl_bolts::{compress::GzipCompressor, llmp::LLMP_FLAG_COMPRESSED}; use libafl_bolts::{ llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag}, - shmem::ShMemProvider, ClientId, }; use serde::de::DeserializeOwned; @@ -40,15 +39,14 @@ pub struct StdLlmpEventHook { phantom: PhantomData, } -impl LlmpHook for StdLlmpEventHook +impl LlmpHook for StdLlmpEventHook where I: DeserializeOwned, - SP: ShMemProvider, MT: Monitor, { fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, #[cfg(feature = "llmp_compression")] msg_flags: &mut Flags, diff --git a/libafl/src/events/centralized.rs b/libafl/src/events/centralized.rs index 0dbede7466..63e80359d7 100644 --- a/libafl/src/events/centralized.rs +++ b/libafl/src/events/centralized.rs @@ -18,7 +18,7 @@ use libafl_bolts::{ }; use libafl_bolts::{ llmp::{LlmpClient, LlmpClientDescription, Tag}, - shmem::{NopShMemProvider, ShMemProvider}, + shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider}, tuples::{Handle, MatchNameRef}, ClientId, }; @@ -46,13 +46,10 @@ pub(crate) const _LLMP_TAG_TO_MAIN: Tag = Tag(0x3453453); /// A wrapper manager to implement a main-secondary architecture with another broker #[derive(Debug)] -pub struct CentralizedEventManager -where - SP: ShMemProvider, -{ +pub struct CentralizedEventManager { inner: EM, /// The centralized LLMP client for inter process communication - client: LlmpClient, + client: LlmpClient, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor, time_ref: Option>, @@ -61,7 +58,16 @@ where phantom: PhantomData<(I, S)>, } -impl CentralizedEventManager, NopShMemProvider> { +impl + CentralizedEventManager< + NopEventManager, + (), + NopInput, + NopState, + NopShMem, + NopShMemProvider, + > +{ /// Creates a builder for [`CentralizedEventManager`] #[must_use] pub fn builder() -> CentralizedEventManagerBuilder { @@ -95,13 +101,14 @@ impl CentralizedEventManagerBuilder { } /// Creates a new [`CentralizedEventManager`]. + #[expect(clippy::type_complexity)] pub fn build_from_client( self, inner: EM, hooks: EMH, - client: LlmpClient, + client: LlmpClient, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where SP: ShMemProvider, { @@ -121,16 +128,17 @@ impl CentralizedEventManagerBuilder { /// /// If the port is not yet bound, it will act as a broker; otherwise, it /// will act as a client. - pub fn build_on_port( + pub fn build_on_port( self, inner: EM, hooks: EMH, shmem_provider: SP, port: u16, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let client = LlmpClient::create_attach_to_tcp(shmem_provider, port)?; Self::build_from_client(self, inner, hooks, client, time_obs) @@ -138,42 +146,43 @@ impl CentralizedEventManagerBuilder { /// If a client respawns, it may reuse the existing connection, previously /// stored by [`LlmpClient::to_env()`]. - pub fn build_existing_client_from_env( + pub fn build_existing_client_from_env( self, inner: EM, hooks: EMH, shmem_provider: SP, env_name: &str, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let client = LlmpClient::on_existing_from_env(shmem_provider, env_name)?; Self::build_from_client(self, inner, hooks, client, time_obs) } /// Create an existing client from description - pub fn existing_client_from_description( + pub fn existing_client_from_description( self, inner: EM, hooks: EMH, shmem_provider: SP, description: &LlmpClientDescription, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let client = LlmpClient::existing_client_from_description(shmem_provider, description)?; Self::build_from_client(self, inner, hooks, client, time_obs) } } -impl AdaptiveSerializer for CentralizedEventManager +impl AdaptiveSerializer for CentralizedEventManager where EM: AdaptiveSerializer, - SP: ShMemProvider, { fn serialization_time(&self) -> Duration { self.inner.serialization_time() @@ -206,13 +215,14 @@ where } } -impl EventFirer for CentralizedEventManager +impl EventFirer for CentralizedEventManager where EM: HasEventManagerId + EventFirer, EMH: EventManagerHooksTuple, - SP: ShMemProvider, S: Stoppable, I: Input, + SHM: ShMem, + SP: ShMemProvider, { fn should_send(&self) -> bool { self.inner.should_send() @@ -263,10 +273,11 @@ where } } -impl EventRestarter for CentralizedEventManager +impl EventRestarter for CentralizedEventManager where - SP: ShMemProvider, EM: EventRestarter, + SHM: ShMem, + SP: ShMemProvider, { #[inline] fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { @@ -276,10 +287,10 @@ where } } -impl CanSerializeObserver for CentralizedEventManager +impl CanSerializeObserver + for CentralizedEventManager where EM: AdaptiveSerializer, - SP: ShMemProvider, OT: Serialize + MatchNameRef, { fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> { @@ -292,10 +303,11 @@ where } } -impl ManagerExit for CentralizedEventManager +impl ManagerExit for CentralizedEventManager where EM: ManagerExit, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn send_exiting(&mut self) -> Result<(), Error> { self.client.sender_mut().send_exiting()?; @@ -309,15 +321,17 @@ where } } -impl EventProcessor for CentralizedEventManager +impl EventProcessor + for CentralizedEventManager where E: HasObservers, E::Observers: DeserializeOwned, EM: EventProcessor + HasEventManagerId + EventFirer, EMH: EventManagerHooksTuple, - S: Stoppable, I: Input, - SP: ShMemProvider, + S: Stoppable, + SHM: ShMem, + SP: ShMemProvider, Z: ExecutionProcessor + EvaluatorObservers, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { @@ -337,13 +351,14 @@ where } } -impl ProgressReporter for CentralizedEventManager +impl ProgressReporter for CentralizedEventManager where EM: EventFirer + HasEventManagerId, EMH: EventManagerHooksTuple, - S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor, I: Input, - SP: ShMemProvider, + S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor, + SHM: ShMem, + SP: ShMemProvider, { fn maybe_report_progress( &mut self, @@ -358,19 +373,19 @@ where } } -impl HasEventManagerId for CentralizedEventManager +impl HasEventManagerId for CentralizedEventManager where EM: HasEventManagerId, - SP: ShMemProvider, { fn mgr_id(&self) -> EventManagerId { self.inner.mgr_id() } } -impl CentralizedEventManager +impl CentralizedEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Describe the client event manager's LLMP parts in a restorable fashion pub fn describe(&self) -> Result { @@ -389,13 +404,14 @@ where } } -impl CentralizedEventManager +impl CentralizedEventManager where EM: HasEventManagerId + EventFirer, EMH: EventManagerHooksTuple, - S: Stoppable, I: Input, - SP: ShMemProvider, + S: Stoppable, + SHM: ShMem, + SP: ShMemProvider, { #[cfg(feature = "llmp_compression")] fn forward_to_main(&mut self, event: &Event) -> Result<(), Error> { diff --git a/libafl/src/events/launcher.rs b/libafl/src/events/launcher.rs index 5fb4695bfa..12394d768f 100644 --- a/libafl/src/events/launcher.rs +++ b/libafl/src/events/launcher.rs @@ -208,7 +208,6 @@ impl Debug for Launcher<'_, CF, MT, SP> { impl Launcher<'_, CF, MT, SP> where MT: Monitor + Clone, - SP: ShMemProvider, { /// Launch the broker and the clients and fuzz #[cfg(any(windows, not(feature = "fork"), all(unix, feature = "fork")))] @@ -216,11 +215,12 @@ where where CF: FnOnce( Option, - LlmpRestartingEventManager<(), I, S, SP>, + LlmpRestartingEventManager<(), I, S, SP::ShMem, SP>, ClientDescription, ) -> Result<(), Error>, I: DeserializeOwned, S: DeserializeOwned + Serialize, + SP: ShMemProvider, { Self::launch_with_hooks(self, tuple_list!()) } @@ -240,7 +240,7 @@ where EMH: EventManagerHooksTuple + Clone + Copy, CF: FnOnce( Option, - LlmpRestartingEventManager, + LlmpRestartingEventManager, ClientDescription, ) -> Result<(), Error>, { @@ -385,7 +385,7 @@ where where CF: FnOnce( Option, - LlmpRestartingEventManager, + LlmpRestartingEventManager, ClientDescription, ) -> Result<(), Error>, EMH: EventManagerHooksTuple + Clone + Copy, @@ -620,7 +620,7 @@ impl Debug for CentralizedLauncher<'_, CF, MF, MT, SP> { } /// The standard inner manager of centralized -pub type StdCentralizedInnerMgr = LlmpRestartingEventManager<(), I, S, SP>; +pub type StdCentralizedInnerMgr = LlmpRestartingEventManager<(), I, S, SHM, SP>; #[cfg(all(unix, feature = "fork"))] impl CentralizedLauncher<'_, CF, MF, MT, SP> @@ -635,12 +635,26 @@ where I: DeserializeOwned + Input + Send + Sync + 'static, CF: FnOnce( Option, - CentralizedEventManager, (), I, S, SP>, + CentralizedEventManager< + StdCentralizedInnerMgr, + (), + I, + S, + SP::ShMem, + SP, + >, ClientDescription, ) -> Result<(), Error>, MF: FnOnce( Option, - CentralizedEventManager, (), I, S, SP>, + CentralizedEventManager< + StdCentralizedInnerMgr, + (), + I, + S, + SP::ShMem, + SP, + >, ClientDescription, ) -> Result<(), Error>, { @@ -682,13 +696,13 @@ where I: Input + Send + Sync + 'static, CF: FnOnce( Option, - CentralizedEventManager, + CentralizedEventManager, ClientDescription, ) -> Result<(), Error>, EMB: FnOnce(&Self, ClientDescription) -> Result<(Option, EM), Error>, MF: FnOnce( Option, - CentralizedEventManager, // No broker_hooks for centralized EM + CentralizedEventManager, // No broker_hooks for centralized EM ClientDescription, ) -> Result<(), Error>, { diff --git a/libafl/src/events/llmp/mgr.rs b/libafl/src/events/llmp/mgr.rs index 34b0619432..de348130f9 100644 --- a/libafl/src/events/llmp/mgr.rs +++ b/libafl/src/events/llmp/mgr.rs @@ -4,7 +4,7 @@ #[cfg(feature = "std")] use alloc::string::ToString; use alloc::vec::Vec; -use core::{marker::PhantomData, time::Duration}; +use core::{fmt::Debug, marker::PhantomData, time::Duration}; #[cfg(feature = "std")] use std::net::TcpStream; @@ -18,7 +18,7 @@ use libafl_bolts::{ use libafl_bolts::{ current_time, llmp::{LlmpClient, LlmpClientDescription, LLMP_FLAG_FROM_MM}, - shmem::{NopShMemProvider, ShMemProvider}, + shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider}, tuples::Handle, ClientId, }; @@ -59,9 +59,10 @@ const INITIAL_EVENT_BUFFER_SIZE: usize = 1024 * 4; /// An `EventManager` that forwards all events to other attached fuzzers on shared maps or via tcp, /// using low-level message passing, `llmp`. -pub struct LlmpEventManager +pub struct LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// We only send 1 testcase for every `throttle` second pub(crate) throttle: Option, @@ -69,7 +70,7 @@ where last_sent: Duration, hooks: EMH, /// The LLMP client for inter process communication - llmp: LlmpClient, + llmp: LlmpClient, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor, /// The configuration defines this specific fuzzer. @@ -85,7 +86,7 @@ where event_buffer: Vec, } -impl LlmpEventManager<(), NopState, NopInput, NopShMemProvider> { +impl LlmpEventManager<(), NopState, NopInput, NopShMem, NopShMemProvider> { /// Creates a builder for [`LlmpEventManager`] #[must_use] pub fn builder() -> LlmpEventManagerBuilder<()> { @@ -134,14 +135,15 @@ impl LlmpEventManagerBuilder { } /// Create a manager from a raw LLMP client - pub fn build_from_client( + pub fn build_from_client( self, - llmp: LlmpClient, + llmp: LlmpClient, configuration: EventConfig, time_ref: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { Ok(LlmpEventManager { throttle: self.throttle, @@ -164,15 +166,16 @@ impl LlmpEventManagerBuilder { /// Create an LLMP event manager on a port. /// It expects a broker to exist on this port. #[cfg(feature = "std")] - pub fn build_on_port( + pub fn build_on_port( self, shmem_provider: SP, port: u16, configuration: EventConfig, time_ref: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?; Self::build_from_client(self, llmp, configuration, time_ref) @@ -181,30 +184,32 @@ impl LlmpEventManagerBuilder { /// If a client respawns, it may reuse the existing connection, previously /// stored by [`LlmpClient::to_env()`]. #[cfg(feature = "std")] - pub fn build_existing_client_from_env( + pub fn build_existing_client_from_env( self, shmem_provider: SP, env_name: &str, configuration: EventConfig, time_ref: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?; Self::build_from_client(self, llmp, configuration, time_ref) } /// Create an existing client from description - pub fn build_existing_client_from_description( + pub fn build_existing_client_from_description( self, shmem_provider: SP, description: &LlmpClientDescription, configuration: EventConfig, time_ref: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::existing_client_from_description(shmem_provider, description)?; Self::build_from_client(self, llmp, configuration, time_ref) @@ -212,19 +217,21 @@ impl LlmpEventManagerBuilder { } #[cfg(feature = "std")] -impl CanSerializeObserver for LlmpEventManager +impl CanSerializeObserver for LlmpEventManager where - SP: ShMemProvider, OT: Serialize + MatchNameRef, + SHM: ShMem, + SP: ShMemProvider, { fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> { serialize_observers_adaptive::(self, observers, 2, 80) } } -impl AdaptiveSerializer for LlmpEventManager +impl AdaptiveSerializer for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn serialization_time(&self) -> Duration { self.serialization_time @@ -257,9 +264,10 @@ where } } -impl core::fmt::Debug for LlmpEventManager +impl Debug for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut debug_struct = f.debug_struct("LlmpEventManager"); @@ -274,9 +282,10 @@ where } } -impl Drop for LlmpEventManager +impl Drop for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// LLMP clients will have to wait until their pages are mapped by somebody. fn drop(&mut self) { @@ -284,9 +293,10 @@ where } } -impl LlmpEventManager +impl LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Calling this function will tell the llmp broker that this client is exiting /// This should be called from the restarter not from the actual fuzzer client @@ -334,9 +344,10 @@ where } } -impl LlmpEventManager +impl LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { // Handle arriving events in the client fn handle_in_client( @@ -423,7 +434,11 @@ where } } -impl LlmpEventManager { +impl LlmpEventManager +where + SHM: ShMem, + SP: ShMemProvider, +{ /// Send information that this client is exiting. /// The other side may free up all allocated memory. /// We are no longer allowed to send anything afterwards. @@ -432,18 +447,12 @@ impl LlmpEventManager { } } -impl EventFirer for LlmpEventManager +impl EventFirer for LlmpEventManager where I: Serialize, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { - fn should_send(&self) -> bool { - if let Some(throttle) = self.throttle { - current_time() - self.last_sent > throttle - } else { - true - } - } fn fire(&mut self, _state: &mut S, event: Event) -> Result<(), Error> { #[cfg(feature = "llmp_compression")] let flags = LLMP_FLAG_INITIALIZED; @@ -490,46 +499,57 @@ where self.last_sent = current_time(); Ok(()) } - fn configuration(&self) -> EventConfig { self.configuration } + + fn should_send(&self) -> bool { + if let Some(throttle) = self.throttle { + current_time() - self.last_sent > throttle + } else { + true + } + } } -impl EventRestarter for LlmpEventManager +impl EventRestarter for LlmpEventManager where - SP: ShMemProvider, S: HasCurrentStageId, + SHM: ShMem, + SP: ShMemProvider, { fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { std_on_restart(self, state) } } -impl ManagerExit for LlmpEventManager +impl ManagerExit for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SHM: ShMem, + SP: ShMemProvider, { + fn send_exiting(&mut self) -> Result<(), Error> { + self.llmp.sender_mut().send_exiting() + } + /// The LLMP client needs to wait until a broker has mapped all pages before shutting down. /// Otherwise, the OS may already have removed the shared maps. fn await_restart_safe(&mut self) { // wait until we can drop the message safely. self.llmp.await_safe_to_unmap_blocking(); } - - fn send_exiting(&mut self) -> Result<(), Error> { - self.llmp.sender_mut().send_exiting() - } } -impl EventProcessor for LlmpEventManager +impl EventProcessor for LlmpEventManager where E: HasObservers, E::Observers: DeserializeOwned, - S: HasImported + HasSolutions + HasCurrentTestcase + Stoppable, EMH: EventManagerHooksTuple, I: DeserializeOwned + Input, - SP: ShMemProvider, + S: HasImported + HasSolutions + HasCurrentTestcase + Stoppable, + SHM: ShMem, + SP: ShMemProvider, Z: ExecutionProcessor + EvaluatorObservers, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { @@ -537,14 +557,15 @@ where let self_id = self.llmp.sender().id(); let mut count = 0; while let Some((client_id, tag, flags, msg)) = self.llmp.recv_buf_with_flags()? { - assert!( - tag != _LLMP_TAG_EVENT_TO_BROKER, + assert_ne!( + tag, _LLMP_TAG_EVENT_TO_BROKER, "EVENT_TO_BROKER parcel should not have arrived in the client!" ); if client_id == self_id { continue; } + #[cfg(not(feature = "llmp_compression"))] let event_bytes = msg; #[cfg(feature = "llmp_compression")] @@ -556,6 +577,7 @@ where } else { msg }; + let event: Event = postcard::from_bytes(event_bytes)?; log::debug!("Received event in normal llmp {}", event.name_detailed()); @@ -576,11 +598,12 @@ where } } -impl ProgressReporter for LlmpEventManager +impl ProgressReporter for LlmpEventManager where - S: HasExecutions + HasLastReportTime + HasMetadata + MaybeHasClientPerfMonitor, - SP: ShMemProvider, I: Serialize, + S: HasExecutions + HasLastReportTime + HasMetadata + MaybeHasClientPerfMonitor, + SHM: ShMem, + SP: ShMemProvider, { fn maybe_report_progress( &mut self, @@ -595,9 +618,10 @@ where } } -impl HasEventManagerId for LlmpEventManager +impl HasEventManagerId for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Gets the id assigned to this staterestorer. fn mgr_id(&self) -> EventManagerId { diff --git a/libafl/src/events/llmp/mod.rs b/libafl/src/events/llmp/mod.rs index ffcbb2d06d..e9f651548c 100644 --- a/libafl/src/events/llmp/mod.rs +++ b/libafl/src/events/llmp/mod.rs @@ -9,7 +9,7 @@ use libafl_bolts::{ }; use libafl_bolts::{ llmp::{LlmpClient, LlmpClientDescription, Tag}, - shmem::{NopShMemProvider, ShMemProvider}, + shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider}, ClientId, }; use serde::{de::DeserializeOwned, Serialize}; @@ -84,12 +84,9 @@ impl LlmpShouldSaveState { } /// A manager-like llmp client that converts between input types -pub struct LlmpEventConverter -where - SP: ShMemProvider, -{ +pub struct LlmpEventConverter { throttle: Option, - llmp: LlmpClient, + llmp: LlmpClient, last_sent: Duration, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor, @@ -104,6 +101,7 @@ impl NopInputConverter, NopInputConverter, NopState, + NopShMem, NopShMemProvider, > { @@ -136,15 +134,12 @@ impl LlmpEventConverterBuilder { } /// Create a event converter from a raw llmp client - pub fn build_from_client( + pub fn build_from_client( self, - llmp: LlmpClient, + llmp: LlmpClient, converter: Option, converter_back: Option, - ) -> Result, Error> - where - SP: ShMemProvider, - { + ) -> Result, Error> { Ok(LlmpEventConverter { throttle: self.throttle, last_sent: Duration::from_secs(0), @@ -159,15 +154,16 @@ impl LlmpEventConverterBuilder { /// Create a client from port and the input converters #[cfg(feature = "std")] - pub fn build_on_port( + pub fn build_on_port( self, shmem_provider: SP, port: u16, converter: Option, converter_back: Option, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?; Ok(LlmpEventConverter { @@ -184,15 +180,16 @@ impl LlmpEventConverterBuilder { /// If a client respawns, it may reuse the existing connection, previously stored by [`LlmpClient::to_env()`]. #[cfg(feature = "std")] - pub fn build_existing_client_from_env( + pub fn build_existing_client_from_env( self, shmem_provider: SP, env_name: &str, converter: Option, converter_back: Option, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?; Ok(LlmpEventConverter { @@ -208,11 +205,12 @@ impl LlmpEventConverterBuilder { } } -impl Debug for LlmpEventConverter +impl Debug for LlmpEventConverter where - SP: ShMemProvider, IC: Debug, ICB: Debug, + SHM: Debug, + SP: Debug, { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut debug_struct = f.debug_struct("LlmpEventConverter"); @@ -228,9 +226,10 @@ where } } -impl LlmpEventConverter +impl LlmpEventConverter where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { // TODO other new_* routines @@ -341,8 +340,8 @@ where let self_id = self.llmp.sender().id(); let mut count = 0; while let Some((client_id, tag, _flags, msg)) = self.llmp.recv_buf_with_flags()? { - assert!( - tag != _LLMP_TAG_EVENT_TO_BROKER, + assert_ne!( + tag, _LLMP_TAG_EVENT_TO_BROKER, "EVENT_TO_BROKER parcel should not have arrived in the client!" ); @@ -370,11 +369,12 @@ where } } -impl EventFirer for LlmpEventConverter +impl EventFirer for LlmpEventConverter where IC: InputConverter, - SP: ShMemProvider, IC::To: Serialize, + SHM: ShMem, + SP: ShMemProvider, { fn should_send(&self) -> bool { if let Some(throttle) = self.throttle { diff --git a/libafl/src/events/llmp/restarting.rs b/libafl/src/events/llmp/restarting.rs index bea9ca2789..bbdc971263 100644 --- a/libafl/src/events/llmp/restarting.rs +++ b/libafl/src/events/llmp/restarting.rs @@ -22,7 +22,7 @@ use libafl_bolts::{ core_affinity::CoreId, llmp::{Broker, LlmpBroker, LlmpConnection}, os::CTRL_C_EXIT, - shmem::{ShMemProvider, StdShMemProvider}, + shmem::{ShMem, ShMemProvider, StdShMem, StdShMemProvider}, staterestore::StateRestorer, tuples::{tuple_list, Handle, MatchNameRef}, }; @@ -55,21 +55,23 @@ use crate::{ /// A manager that can restart on the fly, storing states in-between (in `on_restart`) #[derive(Debug)] -pub struct LlmpRestartingEventManager +pub struct LlmpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// The embedded LLMP event manager - llmp_mgr: LlmpEventManager, + llmp_mgr: LlmpEventManager, /// The staterestorer to serialize the state for the next runner - staterestorer: StateRestorer, + staterestorer: StateRestorer, /// Decide if the state restorer must save the serialized state save_state: LlmpShouldSaveState, } -impl AdaptiveSerializer for LlmpRestartingEventManager +impl AdaptiveSerializer for LlmpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn serialization_time(&self) -> Duration { self.llmp_mgr.serialization_time() @@ -102,11 +104,12 @@ where } } -impl ProgressReporter for LlmpRestartingEventManager +impl ProgressReporter for LlmpRestartingEventManager where - S: HasExecutions + HasLastReportTime + HasMetadata + Serialize + MaybeHasClientPerfMonitor, - SP: ShMemProvider, I: Serialize, + S: HasExecutions + HasLastReportTime + HasMetadata + Serialize + MaybeHasClientPerfMonitor, + SHM: ShMem, + SP: ShMemProvider, { fn maybe_report_progress( &mut self, @@ -121,16 +124,13 @@ where } } -impl EventFirer for LlmpRestartingEventManager +impl EventFirer for LlmpRestartingEventManager where I: Serialize, S: Serialize, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { - fn should_send(&self) -> bool { - as EventFirer>::should_send(&self.llmp_mgr) - } - fn fire(&mut self, state: &mut S, event: Event) -> Result<(), Error> { // Check if we are going to crash in the event, in which case we store our current state for the next runner self.llmp_mgr.fire(state, event)?; @@ -139,25 +139,32 @@ where } fn configuration(&self) -> EventConfig { - as EventFirer>::configuration(&self.llmp_mgr) + as EventFirer>::configuration(&self.llmp_mgr) + } + + fn should_send(&self) -> bool { + as EventFirer>::should_send(&self.llmp_mgr) } } #[cfg(feature = "std")] -impl CanSerializeObserver for LlmpRestartingEventManager +impl CanSerializeObserver + for LlmpRestartingEventManager where - SP: ShMemProvider, OT: Serialize + MatchNameRef, + SHM: ShMem, + SP: ShMemProvider, { fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> { serialize_observers_adaptive::(self, observers, 2, 80) } } -impl EventRestarter for LlmpRestartingEventManager +impl EventRestarter for LlmpRestartingEventManager where - SP: ShMemProvider, S: Serialize + HasCurrentStageId, + SHM: ShMem, + SP: ShMemProvider, { /// Reset the single page (we reuse it over and over from pos 0), then send the current state to the next runner. fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { @@ -180,9 +187,10 @@ where } } -impl ManagerExit for LlmpRestartingEventManager +impl ManagerExit for LlmpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn send_exiting(&mut self) -> Result<(), Error> { self.staterestorer.send_exiting(); @@ -199,16 +207,18 @@ where } } -impl EventProcessor for LlmpRestartingEventManager +impl EventProcessor + for LlmpRestartingEventManager where - EMH: EventManagerHooksTuple, E: HasObservers, E::Observers: DeserializeOwned, - S: HasImported + HasCurrentTestcase + HasSolutions + Stoppable + Serialize, + EMH: EventManagerHooksTuple, I: DeserializeOwned + Input, - SP: ShMemProvider, - Z: ExecutionProcessor, I, E::Observers, S> - + EvaluatorObservers, I, S>, + S: HasImported + HasCurrentTestcase + HasSolutions + Stoppable + Serialize, + SHM: ShMem, + SP: ShMemProvider, + Z: ExecutionProcessor, I, E::Observers, S> + + EvaluatorObservers, I, S>, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { let res = self.llmp_mgr.process(fuzzer, state, executor)?; @@ -221,9 +231,10 @@ where } } -impl HasEventManagerId for LlmpRestartingEventManager +impl HasEventManagerId for LlmpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn mgr_id(&self) -> EventManagerId { self.llmp_mgr.mgr_id() @@ -236,15 +247,16 @@ const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER"; /// The llmp (2 way) connection from a fuzzer to the broker (broadcasting all other fuzzer messages) const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT"; -impl LlmpRestartingEventManager +impl LlmpRestartingEventManager where - SP: ShMemProvider, S: Serialize, + SHM: ShMem, + SP: ShMemProvider, { /// Create a new runner, the executed child doing the actual fuzzing. pub fn new( - llmp_mgr: LlmpEventManager, - staterestorer: StateRestorer, + llmp_mgr: LlmpEventManager, + staterestorer: StateRestorer, ) -> Self { Self { llmp_mgr, @@ -255,8 +267,8 @@ where /// Create a new runner specifying if it must save the serialized state on restart. pub fn with_save_state( - llmp_mgr: LlmpEventManager, - staterestorer: StateRestorer, + llmp_mgr: LlmpEventManager, + staterestorer: StateRestorer, save_state: LlmpShouldSaveState, ) -> Self { Self { @@ -267,12 +279,12 @@ where } /// Get the staterestorer - pub fn staterestorer(&self) -> &StateRestorer { + pub fn staterestorer(&self) -> &StateRestorer { &self.staterestorer } /// Get the staterestorer (mutable) - pub fn staterestorer_mut(&mut self) -> &mut StateRestorer { + pub fn staterestorer_mut(&mut self) -> &mut StateRestorer { &mut self.staterestorer } @@ -314,7 +326,7 @@ pub fn setup_restarting_mgr_std( ) -> Result< ( Option, - LlmpRestartingEventManager<(), I, S, StdShMemProvider>, + LlmpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>, ), Error, > @@ -347,7 +359,7 @@ pub fn setup_restarting_mgr_std_adaptive( ) -> Result< ( Option, - LlmpRestartingEventManager<(), I, S, StdShMemProvider>, + LlmpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>, ), Error, > @@ -414,20 +426,26 @@ pub struct RestartingMgr { impl RestartingMgr where EMH: EventManagerHooksTuple + Copy + Clone, - SP: ShMemProvider, - S: Serialize + DeserializeOwned, I: DeserializeOwned, MT: Monitor + Clone, + S: Serialize + DeserializeOwned, + SP: ShMemProvider, { /// Launch the broker and the clients and fuzz pub fn launch( &mut self, - ) -> Result<(Option, LlmpRestartingEventManager), Error> { + ) -> Result< + ( + Option, + LlmpRestartingEventManager, + ), + Error, + > { // We start ourselves as child process to actually fuzz let (staterestorer, new_shmem_provider, core_id) = if std::env::var(_ENV_FUZZER_SENDER) .is_err() { - let broker_things = |mut broker: LlmpBroker<_, SP>, remote_broker_addr| { + let broker_things = |mut broker: LlmpBroker<_, SP::ShMem, SP>, remote_broker_addr| { if let Some(remote_broker_addr) = remote_broker_addr { log::info!("B2b: Connecting to {:?}", &remote_broker_addr); broker.inner_mut().connect_b2b(remote_broker_addr)?; @@ -467,13 +485,14 @@ where return Err(Error::shutting_down()); } LlmpConnection::IsClient { client } => { - let mgr: LlmpEventManager = LlmpEventManager::builder() - .hooks(self.hooks) - .build_from_client( - client, - self.configuration, - self.time_ref.clone(), - )?; + let mgr: LlmpEventManager = + LlmpEventManager::builder() + .hooks(self.hooks) + .build_from_client( + client, + self.configuration, + self.time_ref.clone(), + )?; (mgr, None) } } @@ -516,11 +535,11 @@ where // First, create a channel from the current fuzzer to the next to store state between restarts. #[cfg(unix)] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?); #[cfg(not(unix))] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?); // Store the information to a map. staterestorer.write_to_env(_ENV_FUZZER_SENDER)?; @@ -683,7 +702,7 @@ mod tests { use libafl_bolts::{ llmp::{LlmpClient, LlmpSharedMap}, rands::StdRand, - shmem::{ShMemProvider, StdShMemProvider}, + shmem::{ShMemProvider, StdShMem, StdShMemProvider}, staterestore::StateRestorer, tuples::{tuple_list, Handled}, ClientId, @@ -772,7 +791,7 @@ mod tests { let mut stages = tuple_list!(StdMutationalStage::new(mutator)); // First, create a channel from the current fuzzer to the next to store state between restarts. - let mut staterestorer = StateRestorer::::new( + let mut staterestorer = StateRestorer::::new( shmem_provider.new_shmem(256 * 1024 * 1024).unwrap(), ); diff --git a/libafl/src/events/simple.rs b/libafl/src/events/simple.rs index 475517be4b..35e2cdb8de 100644 --- a/libafl/src/events/simple.rs +++ b/libafl/src/events/simple.rs @@ -13,7 +13,11 @@ use libafl_bolts::os::unix_signals::setup_signal_handler; use libafl_bolts::os::{fork, ForkResult}; use libafl_bolts::ClientId; #[cfg(feature = "std")] -use libafl_bolts::{os::CTRL_C_EXIT, shmem::ShMemProvider, staterestore::StateRestorer}; +use libafl_bolts::{ + os::CTRL_C_EXIT, + shmem::{ShMem, ShMemProvider}, + staterestore::StateRestorer, +}; #[cfg(feature = "std")] use serde::de::DeserializeOwned; use serde::Serialize; @@ -280,23 +284,19 @@ where /// `restarter` will start a new process each time the child crashes or times out. #[cfg(feature = "std")] #[derive(Debug)] -pub struct SimpleRestartingEventManager -where - SP: ShMemProvider, -{ +pub struct SimpleRestartingEventManager { /// The actual simple event mgr inner: SimpleEventManager, /// [`StateRestorer`] for restarts - staterestorer: StateRestorer, + staterestorer: StateRestorer, } #[cfg(feature = "std")] -impl EventFirer for SimpleRestartingEventManager +impl EventFirer for SimpleRestartingEventManager where I: Debug, MT: Monitor, S: Stoppable, - SP: ShMemProvider, { fn should_send(&self) -> bool { true @@ -308,9 +308,10 @@ where } #[cfg(feature = "std")] -impl EventRestarter for SimpleRestartingEventManager +impl EventRestarter for SimpleRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, S: HasCurrentStageId + Serialize, MT: Monitor, { @@ -329,9 +330,9 @@ where } #[cfg(feature = "std")] -impl CanSerializeObserver for SimpleRestartingEventManager +impl CanSerializeObserver + for SimpleRestartingEventManager where - SP: ShMemProvider, OT: Serialize, { fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> { @@ -340,9 +341,10 @@ where } #[cfg(feature = "std")] -impl ManagerExit for SimpleRestartingEventManager +impl ManagerExit for SimpleRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn send_exiting(&mut self) -> Result<(), Error> { self.staterestorer.send_exiting(); @@ -354,12 +356,14 @@ where } #[cfg(feature = "std")] -impl EventProcessor for SimpleRestartingEventManager +impl EventProcessor + for SimpleRestartingEventManager where I: Debug, MT: Monitor, - SP: ShMemProvider, S: Stoppable, + SHM: ShMem, + SP: ShMemProvider, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { self.inner.process(fuzzer, state, executor) @@ -371,11 +375,10 @@ where } #[cfg(feature = "std")] -impl ProgressReporter for SimpleRestartingEventManager +impl ProgressReporter for SimpleRestartingEventManager where I: Debug, MT: Monitor, - SP: ShMemProvider, S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor, { fn maybe_report_progress( @@ -392,25 +395,23 @@ where } #[cfg(feature = "std")] -impl HasEventManagerId for SimpleRestartingEventManager -where - SP: ShMemProvider, -{ +impl HasEventManagerId for SimpleRestartingEventManager { fn mgr_id(&self) -> EventManagerId { self.inner.mgr_id() } } #[cfg(feature = "std")] -impl SimpleRestartingEventManager +impl SimpleRestartingEventManager where I: Debug, MT: Monitor, S: Stoppable, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Creates a new [`SimpleEventManager`]. - fn launched(monitor: MT, staterestorer: StateRestorer) -> Self { + fn launched(monitor: MT, staterestorer: StateRestorer) -> Self { Self { staterestorer, inner: SimpleEventManager::new(monitor), @@ -429,10 +430,10 @@ where let mut staterestorer = if std::env::var(_ENV_FUZZER_SENDER).is_err() { // First, create a place to store state in, for restarts. #[cfg(unix)] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(shmem_provider.new_shmem(256 * 1024 * 1024)?); #[cfg(not(unix))] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(shmem_provider.new_shmem(256 * 1024 * 1024)?); //let staterestorer = { LlmpSender::new(shmem_provider.clone(), 0, false)? }; diff --git a/libafl/src/events/tcp.rs b/libafl/src/events/tcp.rs index dcb90fd5cb..fb53d7bd87 100644 --- a/libafl/src/events/tcp.rs +++ b/libafl/src/events/tcp.rs @@ -25,7 +25,7 @@ use libafl_bolts::os::{fork, ForkResult}; use libafl_bolts::{ core_affinity::CoreId, os::CTRL_C_EXIT, - shmem::{ShMemProvider, StdShMemProvider}, + shmem::{ShMem, ShMemProvider, StdShMem, StdShMemProvider}, staterestore::StateRestorer, tuples::tuple_list, ClientId, @@ -819,24 +819,20 @@ impl HasEventManagerId for TcpEventManager { /// A manager that can restart on the fly, storing states in-between (in `on_restart`) #[derive(Debug)] -pub struct TcpRestartingEventManager -where - SP: ShMemProvider, -{ +pub struct TcpRestartingEventManager { /// The embedded TCP event manager tcp_mgr: TcpEventManager, /// The staterestorer to serialize the state for the next runner - staterestorer: StateRestorer, + staterestorer: StateRestorer, /// Decide if the state restorer must save the serialized state save_state: bool, } -impl ProgressReporter for TcpRestartingEventManager +impl ProgressReporter for TcpRestartingEventManager where EMH: EventManagerHooksTuple, S: HasMetadata + HasExecutions + HasLastReportTime + MaybeHasClientPerfMonitor, I: Serialize, - SP: ShMemProvider, { fn maybe_report_progress( &mut self, @@ -851,11 +847,10 @@ where } } -impl EventFirer for TcpRestartingEventManager +impl EventFirer for TcpRestartingEventManager where EMH: EventManagerHooksTuple, I: Serialize, - SP: ShMemProvider, { fn should_send(&self) -> bool { self.tcp_mgr.should_send() @@ -871,30 +866,32 @@ where } } -impl ManagerExit for TcpRestartingEventManager +impl ManagerExit for TcpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { - /// The tcp client needs to wait until a broker mapped all pages, before shutting down. - /// Otherwise, the OS may already have removed the shared maps, - #[inline] - fn await_restart_safe(&mut self) { - self.tcp_mgr.await_restart_safe(); - } - fn send_exiting(&mut self) -> Result<(), Error> { self.staterestorer.send_exiting(); // Also inform the broker that we are about to exit. // This way, the broker can clean up the pages, and eventually exit. self.tcp_mgr.send_exiting() } + + /// The tcp client needs to wait until a broker mapped all pages, before shutting down. + /// Otherwise, the OS may already have removed the shared maps, + #[inline] + fn await_restart_safe(&mut self) { + self.tcp_mgr.await_restart_safe(); + } } -impl EventRestarter for TcpRestartingEventManager +impl EventRestarter for TcpRestartingEventManager where EMH: EventManagerHooksTuple, S: HasExecutions + HasCurrentStageId + Serialize, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Reset the single page (we reuse it over and over from pos 0), then send the current state to the next runner. fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { @@ -913,7 +910,8 @@ where } } -impl EventProcessor for TcpRestartingEventManager +impl EventProcessor + for TcpRestartingEventManager where E: HasObservers + Executor, I, S, Z>, for<'a> E::Observers: Deserialize<'a>, @@ -926,7 +924,8 @@ where + HasSolutions + HasCurrentTestcase + Stoppable, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, Z: ExecutionProcessor, I, E::Observers, S> + EvaluatorObservers, I, S>, { @@ -939,10 +938,7 @@ where } } -impl HasEventManagerId for TcpRestartingEventManager -where - SP: ShMemProvider, -{ +impl HasEventManagerId for TcpRestartingEventManager { fn mgr_id(&self) -> EventManagerId { self.tcp_mgr.mgr_id() } @@ -954,13 +950,12 @@ const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER"; /// The tcp (2 way) connection from a fuzzer to the broker (broadcasting all other fuzzer messages) const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT"; -impl TcpRestartingEventManager +impl TcpRestartingEventManager where EMH: EventManagerHooksTuple, - SP: ShMemProvider, { /// Create a new runner, the executed child doing the actual fuzzing. - pub fn new(tcp_mgr: TcpEventManager, staterestorer: StateRestorer) -> Self { + pub fn new(tcp_mgr: TcpEventManager, staterestorer: StateRestorer) -> Self { Self { tcp_mgr, staterestorer, @@ -971,7 +966,7 @@ where /// Create a new runner specifying if it must save the serialized state on restart. pub fn with_save_state( tcp_mgr: TcpEventManager, - staterestorer: StateRestorer, + staterestorer: StateRestorer, save_state: bool, ) -> Self { Self { @@ -982,12 +977,12 @@ where } /// Get the staterestorer - pub fn staterestorer(&self) -> &StateRestorer { + pub fn staterestorer(&self) -> &StateRestorer { &self.staterestorer } /// Get the staterestorer (mutable) - pub fn staterestorer_mut(&mut self) -> &mut StateRestorer { + pub fn staterestorer_mut(&mut self) -> &mut StateRestorer { &mut self.staterestorer } } @@ -1018,7 +1013,7 @@ pub fn setup_restarting_mgr_tcp( ) -> Result< ( Option, - TcpRestartingEventManager<(), I, S, StdShMemProvider>, + TcpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>, ), Error, > @@ -1049,12 +1044,7 @@ where /// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The /// `restarter` will start a new process each time the child crashes or times out. #[derive(TypedBuilder, Debug)] -pub struct TcpRestartingMgr -where - MT: Monitor, - S: DeserializeOwned, - SP: ShMemProvider + 'static, -{ +pub struct TcpRestartingMgr { /// The shared memory provider to use for the broker or client spawned by the restarting /// manager. shmem_provider: SP, @@ -1095,7 +1085,6 @@ where EMH: EventManagerHooksTuple + Copy + Clone, I: Input, MT: Monitor + Clone, - SP: ShMemProvider, S: HasExecutions + HasMetadata + HasImported @@ -1103,11 +1092,18 @@ where + HasCurrentTestcase + DeserializeOwned + Stoppable, + SP: ShMemProvider, { /// Launch the restarting manager pub fn launch( &mut self, - ) -> Result<(Option, TcpRestartingEventManager), Error> { + ) -> Result< + ( + Option, + TcpRestartingEventManager, + ), + Error, + > { // We start ourself as child process to actually fuzz let (staterestorer, _new_shmem_provider, core_id) = if env::var(_ENV_FUZZER_SENDER).is_err() { @@ -1185,11 +1181,11 @@ where // First, create a channel from the current fuzzer to the next to store state between restarts. #[cfg(unix)] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?); #[cfg(not(unix))] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?); // Store the information to a map. staterestorer.write_to_env(_ENV_FUZZER_SENDER)?; diff --git a/libafl/src/executors/forkserver.rs b/libafl/src/executors/forkserver.rs index 825990a940..7aa726188d 100644 --- a/libafl/src/executors/forkserver.rs +++ b/libafl/src/executors/forkserver.rs @@ -22,7 +22,7 @@ use libafl_bolts::{ fs::{get_unique_std_input_file, InputFile}, os::{dup2, pipes::Pipe}, ownedref::OwnedSlice, - shmem::{ShMem, ShMemProvider, UnixShMemProvider}, + shmem::{ShMem, ShMemProvider, UnixShMem, UnixShMemProvider}, tuples::{Handle, Handled, MatchNameRef, Prepend, RefIndexable}, AsSlice, AsSliceMut, Truncate, }; @@ -606,10 +606,7 @@ impl Forkserver { /// /// Shared memory feature is also available, but you have to set things up in your code. /// Please refer to AFL++'s docs. -pub struct ForkserverExecutor -where - SP: ShMemProvider, -{ +pub struct ForkserverExecutor { target: OsString, args: Vec, input_file: InputFile, @@ -617,7 +614,7 @@ where uses_shmem_testcase: bool, forkserver: Forkserver, observers: OT, - map: Option, + map: Option, phantom: PhantomData<(I, S)>, map_size: Option, min_input_size: usize, @@ -628,11 +625,11 @@ where crash_exitcode: Option, } -impl Debug for ForkserverExecutor +impl Debug for ForkserverExecutor where TC: Debug, OT: Debug, - SP: ShMemProvider, + SHM: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("ForkserverExecutor") @@ -648,7 +645,7 @@ where } } -impl ForkserverExecutor<(), (), (), UnixShMemProvider, ()> { +impl ForkserverExecutor<(), (), (), UnixShMem, ()> { /// Builder for `ForkserverExecutor` #[must_use] pub fn builder( @@ -658,11 +655,11 @@ impl ForkserverExecutor<(), (), (), UnixShMemProvider, ()> { } } -impl ForkserverExecutor +impl ForkserverExecutor where OT: ObserversTuple, - SP: ShMemProvider, TC: TargetBytesConverter, + SHM: ShMem, { /// The `target` binary that's going to run. pub fn target(&self) -> &OsString { @@ -827,9 +824,10 @@ pub struct ForkserverExecutorBuilder<'a, TC, SP> { target_bytes_converter: TC, } -impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP> +impl<'a, TC, SHM, SP> ForkserverExecutorBuilder<'a, TC, SP> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Builds `ForkserverExecutor`. /// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given. @@ -840,10 +838,9 @@ where pub fn build( mut self, observers: OT, - ) -> Result, Error> + ) -> Result, Error> where OT: ObserversTuple, - SP: ShMemProvider, TC: TargetBytesConverter, { let (forkserver, input_file, map) = self.build_helper()?; @@ -905,13 +902,12 @@ where mut self, mut map_observer: A, other_observers: OT, - ) -> Result, Error> + ) -> Result, Error> where A: Observer + AsMut, I: Input + HasTargetBytes, MO: MapObserver + Truncate, // TODO maybe enforce Entry = u8 for the cov map OT: ObserversTuple + Prepend, - SP: ShMemProvider, { let (forkserver, input_file, map) = self.build_helper()?; @@ -965,10 +961,7 @@ where } #[expect(clippy::pedantic)] - fn build_helper(&mut self) -> Result<(Forkserver, InputFile, Option), Error> - where - SP: ShMemProvider, - { + fn build_helper(&mut self) -> Result<(Forkserver, InputFile, Option), Error> { let input_filename = match &self.input_filename { Some(name) => name.clone(), None => { @@ -1042,7 +1035,7 @@ where fn initialize_forkserver( &mut self, status: i32, - map: Option<&SP::ShMem>, + map: Option<&SHM>, forkserver: &mut Forkserver, ) -> Result<(), Error> { let keep = status; @@ -1140,7 +1133,7 @@ where fn initialize_old_forkserver( &mut self, status: i32, - map: Option<&SP::ShMem>, + map: Option<&SHM>, forkserver: &mut Forkserver, ) -> Result<(), Error> { if status & FS_OPT_ENABLED == FS_OPT_ENABLED && status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE { @@ -1505,7 +1498,7 @@ impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter, Unix impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMemProvider> { /// Shmem provider for forkserver's shared memory testcase feature. - pub fn shmem_provider( + pub fn shmem_provider( self, shmem_provider: &'a mut SP, ) -> ForkserverExecutorBuilder<'a, TC, SP> { @@ -1577,12 +1570,12 @@ impl Default } } -impl Executor for ForkserverExecutor +impl Executor for ForkserverExecutor where OT: ObserversTuple, - SP: ShMemProvider, S: HasExecutions, TC: TargetBytesConverter, + SHM: ShMem, { #[inline] fn run_target( @@ -1596,25 +1589,21 @@ where } } -impl HasTimeout for ForkserverExecutor -where - SP: ShMemProvider, -{ +impl HasTimeout for ForkserverExecutor { #[inline] - fn set_timeout(&mut self, timeout: Duration) { - self.timeout = TimeSpec::from_duration(timeout); + fn timeout(&self) -> Duration { + self.timeout.into() } #[inline] - fn timeout(&self) -> Duration { - self.timeout.into() + fn set_timeout(&mut self, timeout: Duration) { + self.timeout = TimeSpec::from_duration(timeout); } } -impl HasObservers for ForkserverExecutor +impl HasObservers for ForkserverExecutor where OT: ObserversTuple, - SP: ShMemProvider, { type Observers = OT; diff --git a/libafl/src/executors/inprocess/inner.rs b/libafl/src/executors/inprocess/inner.rs index f5297ba65a..f94317e0cd 100644 --- a/libafl/src/executors/inprocess/inner.rs +++ b/libafl/src/executors/inprocess/inner.rs @@ -74,6 +74,7 @@ where /// # Safety /// This function sets a bunch of raw pointers in global variables, reused in other parts of /// the code. + // TODO: Remove EM and Z from function bound and add it to struct instead to avoid possible type confusion #[inline] pub unsafe fn enter_target( &mut self, diff --git a/libafl/src/executors/inprocess/stateful.rs b/libafl/src/executors/inprocess/stateful.rs index c2f347952a..943afffcae 100644 --- a/libafl/src/executors/inprocess/stateful.rs +++ b/libafl/src/executors/inprocess/stateful.rs @@ -27,8 +27,8 @@ use crate::{ /// The process executor simply calls a target function, as mutable reference to a closure /// The internal state of the executor is made available to the harness. -pub type StatefulInProcessExecutor<'a, H, I, OT, S, ES> = - StatefulGenericInProcessExecutor; +pub type StatefulInProcessExecutor<'a, ES, H, I, OT, S> = + StatefulGenericInProcessExecutor; /// The process executor simply calls a target function, as boxed `FnMut` trait object /// The internal state of the executor is made available to the harness. @@ -44,7 +44,7 @@ pub type OwnedInProcessExecutor = StatefulGenericInProcessExecutor /// The inmem executor simply calls a target function, then returns afterwards. /// The harness can access the internal state of the executor. -pub struct StatefulGenericInProcessExecutor { +pub struct StatefulGenericInProcessExecutor { /// The harness function, being executed for each fuzzing loop execution harness_fn: HB, /// The state used as argument of the harness @@ -54,7 +54,7 @@ pub struct StatefulGenericInProcessExecutor { phantom: PhantomData<(ES, *const H)>, } -impl Debug for StatefulGenericInProcessExecutor +impl Debug for StatefulGenericInProcessExecutor where OT: Debug, { @@ -67,7 +67,7 @@ where } impl Executor - for StatefulGenericInProcessExecutor + for StatefulGenericInProcessExecutor where H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized, HB: BorrowMut, @@ -99,7 +99,7 @@ where } impl HasObservers - for StatefulGenericInProcessExecutor + for StatefulGenericInProcessExecutor where H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized, HB: BorrowMut, @@ -118,7 +118,7 @@ where } } -impl<'a, H, I, OT, S, ES> StatefulInProcessExecutor<'a, H, I, OT, S, ES> +impl<'a, H, I, OT, S, ES> StatefulInProcessExecutor<'a, ES, H, I, OT, S> where H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized, OT: ObserversTuple, @@ -224,7 +224,7 @@ where } } -impl StatefulGenericInProcessExecutor { +impl StatefulGenericInProcessExecutor { /// The executor state given to the harness pub fn exposed_executor_state(&self) -> &ES { &self.exposed_executor_state @@ -236,7 +236,7 @@ impl StatefulGenericInProcessExecutor StatefulGenericInProcessExecutor +impl StatefulGenericInProcessExecutor where H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized, HB: BorrowMut, @@ -364,7 +364,7 @@ where } impl HasInProcessHooks - for StatefulGenericInProcessExecutor + for StatefulGenericInProcessExecutor { /// the timeout handler #[inline] diff --git a/libafl/src/executors/inprocess_fork/inner.rs b/libafl/src/executors/inprocess_fork/inner.rs index b585bac18b..7a9bb569fa 100644 --- a/libafl/src/executors/inprocess_fork/inner.rs +++ b/libafl/src/executors/inprocess_fork/inner.rs @@ -32,7 +32,7 @@ use crate::{ }; /// Inner state of GenericInProcessExecutor-like structures. -pub struct GenericInProcessForkExecutorInner { +pub struct GenericInProcessForkExecutorInner { pub(super) hooks: (InChildProcessHooks, HT), pub(super) shmem_provider: SP, pub(super) observers: OT, @@ -40,10 +40,10 @@ pub struct GenericInProcessForkExecutorInner { pub(super) itimerspec: libc::itimerspec, #[cfg(all(unix, not(target_os = "linux")))] pub(super) itimerval: Itimerval, - pub(super) phantom: PhantomData<(I, S, EM, Z)>, + pub(super) phantom: PhantomData<(EM, I, S, Z)>, } -impl Debug for GenericInProcessForkExecutorInner +impl Debug for GenericInProcessForkExecutorInner where HT: Debug, OT: Debug, @@ -104,11 +104,11 @@ fn parse_itimerval(timeout: Duration) -> Itimerval { } } -impl GenericInProcessForkExecutorInner +impl GenericInProcessForkExecutorInner where HT: ExecutorHooksTuple, - SP: ShMemProvider, OT: ObserversTuple, + SP: ShMemProvider, { pub(super) unsafe fn pre_run_target_child( &mut self, @@ -195,7 +195,7 @@ where } } -impl GenericInProcessForkExecutorInner +impl GenericInProcessForkExecutorInner where HT: ExecutorHooksTuple, OT: ObserversTuple, @@ -284,8 +284,8 @@ where } } -impl HasObservers - for GenericInProcessForkExecutorInner +impl HasObservers + for GenericInProcessForkExecutorInner { type Observers = OT; diff --git a/libafl/src/executors/inprocess_fork/mod.rs b/libafl/src/executors/inprocess_fork/mod.rs index e0968334af..9144a0d14a 100644 --- a/libafl/src/executors/inprocess_fork/mod.rs +++ b/libafl/src/executors/inprocess_fork/mod.rs @@ -39,10 +39,10 @@ pub mod stateful; /// /// On Linux, when fuzzing a Rust target, set `panic = "abort"` in your `Cargo.toml` (see [Cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html#panic)). /// Else panics can not be caught by `LibAFL`. -pub type InProcessForkExecutor<'a, H, I, OT, S, SP, EM, Z> = - GenericInProcessForkExecutor<'a, H, (), I, OT, S, SP, EM, Z>; +pub type InProcessForkExecutor<'a, EM, H, I, OT, S, SP, Z> = + GenericInProcessForkExecutor<'a, EM, H, (), I, OT, S, SP, Z>; -impl<'a, H, I, OT, S, SP, EM, Z> InProcessForkExecutor<'a, H, I, OT, S, SP, EM, Z> +impl<'a, H, I, OT, S, SP, EM, Z> InProcessForkExecutor<'a, EM, H, I, OT, S, SP, Z> where OT: ObserversTuple, { @@ -73,13 +73,13 @@ where /// /// On Linux, when fuzzing a Rust target, set `panic = "abort"` in your `Cargo.toml` (see [Cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html#panic)). /// Else panics can not be caught by `LibAFL`. -pub struct GenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, EM, Z> { +pub struct GenericInProcessForkExecutor<'a, EM, H, HT, I, OT, S, SP, Z> { harness_fn: &'a mut H, - inner: GenericInProcessForkExecutorInner, + inner: GenericInProcessForkExecutorInner, } impl Debug - for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z> + for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SP, Z> where HT: Debug, OT: Debug, @@ -103,13 +103,13 @@ where } impl Executor - for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z> + for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SP, Z> where H: FnMut(&I) -> ExitKind + Sized, - S: HasExecutions, - SP: ShMemProvider, HT: ExecutorHooksTuple, OT: ObserversTuple, + S: HasExecutions, + SP: ShMemProvider, { #[inline] fn run_target( @@ -141,7 +141,7 @@ where } } -impl<'a, H, HT, I, OT, S, SP, EM, Z> GenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, EM, Z> +impl<'a, H, HT, I, OT, S, SP, EM, Z> GenericInProcessForkExecutor<'a, EM, H, HT, I, OT, S, SP, Z> where HT: ExecutorHooksTuple, OT: ObserversTuple, @@ -187,7 +187,7 @@ where { } impl HasObservers - for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z> + for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SP, Z> { type Observers = OT; #[inline] diff --git a/libafl/src/executors/inprocess_fork/stateful.rs b/libafl/src/executors/inprocess_fork/stateful.rs index 0a1f1df141..f6a206bf20 100644 --- a/libafl/src/executors/inprocess_fork/stateful.rs +++ b/libafl/src/executors/inprocess_fork/stateful.rs @@ -4,7 +4,6 @@ //! The harness can access internal state. use core::{ fmt::{self, Debug, Formatter}, - marker::PhantomData, time::Duration, }; @@ -25,12 +24,13 @@ use crate::{ }; /// The `StatefulInProcessForkExecutor` with no user hooks -pub type StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, ES, EM, Z> = - StatefulGenericInProcessForkExecutor<'a, H, (), I, OT, S, SP, ES, EM, Z>; +pub type StatefulInProcessForkExecutor<'a, EM, ES, H, I, OT, S, SP, Z> = + StatefulGenericInProcessForkExecutor<'a, EM, ES, H, (), I, OT, S, SP, Z>; -impl<'a, H, I, OT, S, SP, ES, EM, Z> StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, ES, EM, Z> +impl<'a, H, I, OT, S, SP, ES, EM, Z> StatefulInProcessForkExecutor<'a, EM, ES, H, I, OT, S, SP, Z> where OT: ObserversTuple, + SP: ShMemProvider, { #[expect(clippy::too_many_arguments)] /// The constructor for `InProcessForkExecutor` @@ -59,18 +59,17 @@ where } /// [`StatefulGenericInProcessForkExecutor`] is an executor that forks the current process before each execution. Harness can access some internal state. -pub struct StatefulGenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, ES, EM, Z> { +pub struct StatefulGenericInProcessForkExecutor<'a, EM, ES, H, HT, I, OT, S, SP, Z> { /// The harness function, being executed for each fuzzing loop execution harness_fn: &'a mut H, /// The state used as argument of the harness pub exposed_executor_state: ES, /// Inner state of the executor - pub inner: GenericInProcessForkExecutorInner, - phantom: PhantomData, + pub inner: GenericInProcessForkExecutorInner, } impl Debug - for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z> + for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SP, Z> where HT: Debug, OT: Debug, @@ -94,7 +93,7 @@ where } impl Executor - for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z> + for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SP, Z> where H: FnMut(&mut ES, &I) -> ExitKind + Sized, HT: ExecutorHooksTuple, @@ -133,7 +132,7 @@ where } impl<'a, H, HT, I, OT, S, SP, ES, EM, Z> - StatefulGenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, ES, EM, Z> + StatefulGenericInProcessForkExecutor<'a, EM, ES, H, HT, I, OT, S, SP, Z> where HT: ExecutorHooksTuple, OT: ObserversTuple, @@ -163,7 +162,6 @@ where timeout, shmem_provider, )?, - phantom: PhantomData, }) } @@ -181,7 +179,7 @@ where } impl HasObservers - for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z> + for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SP, Z> { type Observers = OT; diff --git a/libafl/src/observers/concolic/serialization_format.rs b/libafl/src/observers/concolic/serialization_format.rs index 9c7bcba2ad..3718d4eb4c 100644 --- a/libafl/src/observers/concolic/serialization_format.rs +++ b/libafl/src/observers/concolic/serialization_format.rs @@ -219,7 +219,7 @@ impl MessageFileReader { /// A `MessageFileWriter` writes a stream of [`SymExpr`] to any [`Write`]. For each written expression, it returns /// a [`SymExprRef`] which should be used to refer back to it. -pub struct MessageFileWriter { +pub struct MessageFileWriter { id_counter: usize, writer: W, writer_start_position: u64, @@ -396,7 +396,7 @@ impl MessageFileWriter { } } -use libafl_bolts::shmem::{ShMem, ShMemCursor, ShMemProvider, StdShMemProvider}; +use libafl_bolts::shmem::{ShMem, ShMemCursor, ShMemProvider, StdShMem, StdShMemProvider}; /// The default environment variable name to use for the shared memory used by the concolic tracing pub const DEFAULT_ENV_NAME: &str = "SHARED_MEMORY_MESSAGES"; @@ -439,14 +439,17 @@ impl<'buffer> MessageFileReader> { } } -impl MessageFileWriter> { +impl MessageFileWriter> +where + SHM: ShMem, +{ /// Creates a new `MessageFileWriter` from the given [`ShMemCursor`]. - pub fn from_shmem(shmem: T) -> io::Result { + pub fn from_shmem(shmem: SHM) -> io::Result { Self::from_writer(ShMemCursor::new(shmem)) } } -impl MessageFileWriter::ShMem>> { +impl MessageFileWriter> { /// Creates a new `MessageFileWriter` by reading a [`ShMem`] from the given environment variable. pub fn from_stdshmem_env_with_name(env_name: impl AsRef) -> io::Result { Self::from_shmem( @@ -464,8 +467,7 @@ impl MessageFileWriter::ShMem>> } /// A writer that will write messages to a shared memory buffer. -pub type StdShMemMessageFileWriter = - MessageFileWriter::ShMem>>; +pub type StdShMemMessageFileWriter = MessageFileWriter>; #[cfg(test)] mod serialization_tests { diff --git a/libafl/src/stages/sync.rs b/libafl/src/stages/sync.rs index 9c3c759927..72fcd5527e 100644 --- a/libafl/src/stages/sync.rs +++ b/libafl/src/stages/sync.rs @@ -7,7 +7,12 @@ use alloc::{ use core::{marker::PhantomData, time::Duration}; use std::path::{Path, PathBuf}; -use libafl_bolts::{current_time, fs::find_new_files_rec, shmem::ShMemProvider, Named}; +use libafl_bolts::{ + current_time, + fs::find_new_files_rec, + shmem::{ShMem, ShMemProvider}, + Named, +}; use serde::{Deserialize, Serialize}; use crate::{ @@ -219,14 +224,12 @@ impl SyncFromBrokerMetadata { /// A stage that loads testcases from disk to sync with other fuzzers such as AFL++ #[derive(Debug)] -pub struct SyncFromBrokerStage -where - SP: ShMemProvider, -{ - client: LlmpEventConverter, +pub struct SyncFromBrokerStage { + client: LlmpEventConverter, } -impl Stage for SyncFromBrokerStage +impl Stage + for SyncFromBrokerStage where DI: Input, EM: EventFirer, @@ -242,7 +245,8 @@ where + HasCurrentTestcase + Stoppable + MaybeHasClientPerfMonitor, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, Z: EvaluatorObservers + ExecutionProcessor, { #[inline] @@ -316,13 +320,10 @@ where } } -impl SyncFromBrokerStage -where - SP: ShMemProvider, -{ +impl SyncFromBrokerStage { /// Creates a new [`SyncFromBrokerStage`] #[must_use] - pub fn new(client: LlmpEventConverter) -> Self { + pub fn new(client: LlmpEventConverter) -> Self { Self { client } } } diff --git a/libafl_bolts/examples/llmp_test/main.rs b/libafl_bolts/examples/llmp_test/main.rs index 50762a536c..96a9667002 100644 --- a/libafl_bolts/examples/llmp_test/main.rs +++ b/libafl_bolts/examples/llmp_test/main.rs @@ -113,13 +113,13 @@ impl Default for LlmpExampleHook { } #[cfg(all(feature = "std", not(target_os = "haiku")))] -impl LlmpHook for LlmpExampleHook +impl LlmpHook for LlmpExampleHook where - SP: ShMemProvider + 'static, + SP: ShMemProvider + 'static, { fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, _msg_flags: &mut Flags, diff --git a/libafl_bolts/src/core_affinity.rs b/libafl_bolts/src/core_affinity.rs index dcb4de85c1..4408065c7c 100644 --- a/libafl_bolts/src/core_affinity.rs +++ b/libafl_bolts/src/core_affinity.rs @@ -251,6 +251,7 @@ mod linux { use super::CoreId; use crate::Error; + #[allow(trivial_numeric_casts)] pub fn get_core_ids() -> Result, Error> { let full_set = get_affinity_mask()?; let mut core_ids: Vec = Vec::new(); diff --git a/libafl_bolts/src/llmp.rs b/libafl_bolts/src/llmp.rs index acf3ef28ac..c820fda748 100644 --- a/libafl_bolts/src/llmp.rs +++ b/libafl_bolts/src/llmp.rs @@ -707,25 +707,23 @@ impl LlmpMsg { /// An Llmp instance #[derive(Debug)] -pub enum LlmpConnection -where - SP: ShMemProvider, -{ +pub enum LlmpConnection { /// A broker and a thread using this tcp background thread IsBroker { /// The [`LlmpBroker`] of this [`LlmpConnection`]. - broker: LlmpBroker, + broker: LlmpBroker, }, /// A client, connected to the port IsClient { /// The [`LlmpClient`] of this [`LlmpConnection`]. - client: LlmpClient, + client: LlmpClient, }, } -impl LlmpConnection<(), SP> +impl LlmpConnection<(), SHM, SP> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { #[cfg(feature = "std")] /// Creates either a broker, if the tcp port is not bound, or a client, connected to this port. @@ -776,10 +774,11 @@ where } } -impl LlmpConnection +impl LlmpConnection where - MT: LlmpHookTuple, - SP: ShMemProvider, + MT: LlmpHookTuple, + SHM: ShMem, + SP: ShMemProvider, { /// Describe this in a reproducible fashion, if it's a client pub fn describe(&self) -> Result { @@ -793,7 +792,7 @@ where pub fn existing_client_from_description( shmem_provider: SP, description: &LlmpClientDescription, - ) -> Result, Error> { + ) -> Result, Error> { Ok(LlmpConnection::IsClient { client: LlmpClient::existing_client_from_description(shmem_provider, description)?, }) @@ -891,23 +890,20 @@ struct LlmpClientExitInfo { /// Sending end on a (unidirectional) sharedmap channel #[derive(Debug)] -pub struct LlmpSender -where - SP: ShMemProvider, -{ +pub struct LlmpSender { /// ID of this sender. id: ClientId, /// Ref to the last message this sender sent on the last page. /// If null, a new page (just) started. last_msg_sent: *const LlmpMsg, /// A vec of page wrappers, each containing an initialized [`ShMem`] - out_shmems: Vec>, + out_shmems: Vec>, /// A vec of pages that we previously used, but that have served its purpose /// (no potential receivers are left). /// Instead of freeing them, we keep them around to potentially reuse them later, /// if they are still large enough. /// This way, the OS doesn't have to spend time zeroing pages, and getting rid of our old pages - unused_shmem_cache: Vec>, + unused_shmem_cache: Vec>, /// If true, pages will never be pruned. /// The broker uses this feature. /// By keeping the message history around, @@ -920,9 +916,10 @@ where } /// An actor on the sending part of the shared map -impl LlmpSender +impl LlmpSender where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Create a new [`LlmpSender`] using a given [`ShMemProvider`], and `id`. /// If `keep_pages_forever` is `true`, `ShMem` will never be freed. @@ -1068,7 +1065,7 @@ where /// else reattach will get a new, empty page, from the OS, or fail. pub fn on_existing_shmem( shmem_provider: SP, - current_out_shmem: SP::ShMem, + current_out_shmem: SHM, last_msg_sent_offset: Option, ) -> Result { let mut out_shmem = LlmpSharedMap::existing(current_out_shmem); @@ -1307,7 +1304,7 @@ where &mut self, sender_id: ClientId, next_min_shmem_size: usize, - ) -> Result::ShMem>, Error> { + ) -> Result, Error> { // Find a shared map that has been released to reuse, from which all receivers left / finished reading. let cached_shmem = self .unused_shmem_cache @@ -1586,10 +1583,7 @@ where /// Receiving end on a (unidirectional) sharedmap channel #[derive(Debug)] -pub struct LlmpReceiver -where - SP: ShMemProvider, -{ +pub struct LlmpReceiver { /// Client Id of this receiver id: ClientId, /// Pointer to the last message received @@ -1600,15 +1594,16 @@ where /// The shmem provider shmem_provider: SP, /// current page. After EOP, this gets replaced with the new one - current_recv_shmem: LlmpSharedMap, + current_recv_shmem: LlmpSharedMap, /// Caches the highest msg id we've seen so far highest_msg_id: MessageId, } /// Receiving end of an llmp channel -impl LlmpReceiver +impl LlmpReceiver where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Reattach to a vacant `recv_shmem`, to with a previous sender stored the information in an env before. #[cfg(feature = "std")] @@ -1634,7 +1629,7 @@ where /// else reattach will get a new, empty page, from the OS, or fail. pub fn on_existing_shmem( shmem_provider: SP, - current_sender_shmem: SP::ShMem, + current_sender_shmem: SHM, last_msg_recvd_offset: Option, ) -> Result { let mut current_recv_shmem = LlmpSharedMap::existing(current_sender_shmem); @@ -1897,10 +1892,7 @@ where /// A page wrapper #[derive(Clone, Debug)] -pub struct LlmpSharedMap -where - SHM: ShMem, -{ +pub struct LlmpSharedMap { /// Shmem containg the actual (unsafe) page, /// shared between one `LlmpSender` and one `LlmpReceiver` shmem: SHM, @@ -2050,18 +2042,15 @@ where /// The inner state of [`LlmpBroker`] #[derive(Debug)] -pub struct LlmpBrokerInner -where - SP: ShMemProvider, -{ +pub struct LlmpBrokerInner { /// Broadcast map from broker to all clients - llmp_out: LlmpSender, + llmp_out: LlmpSender, /// Users of Llmp can add message handlers in the broker. /// This allows us to intercept messages right in the broker. /// This keeps the out map clean. /// The backing values of `llmp_clients` [`ClientId`]s will always be sorted (but not gapless) /// Make sure to always increase `num_clients_seen` when pushing a new [`LlmpReceiver`] to `llmp_clients`! - llmp_clients: Vec>, + llmp_clients: Vec>, /// The own listeners we spawned via `launch_listener` or `crate_attach_to_tcp`. /// Listeners will be ignored for `exit_cleanly_after` and they are never considered to have timed out. listeners: Vec, @@ -2078,12 +2067,9 @@ where /// The broker (node 0) #[derive(Debug)] -pub struct LlmpBroker -where - SP: ShMemProvider, -{ +pub struct LlmpBroker { /// The broker - inner: LlmpBrokerInner, + inner: LlmpBrokerInner, /// Llmp hooks hooks: HT, } @@ -2118,10 +2104,11 @@ pub trait Broker { fn nb_listeners(&self) -> usize; } -impl Broker for LlmpBroker +impl Broker for LlmpBroker where - HT: LlmpHookTuple, - SP: ShMemProvider, + HT: LlmpHookTuple, + SHM: ShMem, + SP: ShMemProvider, { fn is_shutting_down(&self) -> bool { self.inner.is_shutting_down() @@ -2215,15 +2202,12 @@ impl CtrlHandler for LlmpShutdownSignalHandler { } /// Llmp hooks -pub trait LlmpHook -where - SP: ShMemProvider, -{ +pub trait LlmpHook { /// Hook called whenever a new message is received. It receives an llmp message as input, does /// something with it (read, transform, forward, etc...) and decides to discard it or not. fn on_new_message( &mut self, - broker_inner: &mut LlmpBrokerInner, + broker_inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, msg_flags: &mut Flags, @@ -2238,14 +2222,11 @@ where } /// A tuple of Llmp hooks. They are evaluated sequentially, and returns if one decides to filter out the evaluated message. -pub trait LlmpHookTuple -where - SP: ShMemProvider, -{ +pub trait LlmpHookTuple { /// Call all hook callbacks on new message. fn on_new_message_all( &mut self, - inner: &mut LlmpBrokerInner, + inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, msg_flags: &mut Flags, @@ -2257,13 +2238,10 @@ where fn on_timeout_all(&mut self) -> Result<(), Error>; } -impl LlmpHookTuple for () -where - SP: ShMemProvider, -{ +impl LlmpHookTuple for () { fn on_new_message_all( &mut self, - _inner: &mut LlmpBrokerInner, + _inner: &mut LlmpBrokerInner, _client_id: ClientId, _msg_tag: &mut Tag, _msg_flags: &mut Flags, @@ -2278,15 +2256,14 @@ where } } -impl LlmpHookTuple for (Head, Tail) +impl LlmpHookTuple for (Head, Tail) where - Head: LlmpHook, - Tail: LlmpHookTuple, - SP: ShMemProvider, + Head: LlmpHook, + Tail: LlmpHookTuple, { fn on_new_message_all( &mut self, - inner: &mut LlmpBrokerInner, + inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, msg_flags: &mut Flags, @@ -2315,15 +2292,12 @@ where } } -impl LlmpBroker<(), SP> -where - SP: ShMemProvider, -{ +impl LlmpBroker<(), SHM, SP> { /// Add hooks to a hookless [`LlmpBroker`]. /// We do not support replacing hooks for now. - pub fn add_hooks(self, hooks: HT) -> LlmpBroker + pub fn add_hooks(self, hooks: HT) -> LlmpBroker where - HT: LlmpHookTuple, + HT: LlmpHookTuple, { LlmpBroker { inner: self.inner, @@ -2446,10 +2420,11 @@ impl Brokers { } } -impl LlmpBroker +impl LlmpBroker where - HT: LlmpHookTuple, - SP: ShMemProvider, + HT: LlmpHookTuple, + SHM: ShMem, + SP: ShMemProvider, { /// Create and initialize a new [`LlmpBroker`], associated with some hooks. pub fn new(shmem_provider: SP, hooks: HT) -> Result { @@ -2496,12 +2471,12 @@ where } /// Get the inner state of the broker - pub fn inner(&self) -> &LlmpBrokerInner { + pub fn inner(&self) -> &LlmpBrokerInner { &self.inner } /// Get the inner mutable state of the broker - pub fn inner_mut(&mut self) -> &mut LlmpBrokerInner { + pub fn inner_mut(&mut self) -> &mut LlmpBrokerInner { &mut self.inner } @@ -2829,9 +2804,10 @@ where /// The broker forwards all messages to its own bus-like broadcast map. /// It may intercept messages passing through. -impl LlmpBrokerInner +impl LlmpBrokerInner where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Create and initialize a new [`LlmpBrokerInner`], associated with some hooks. pub fn new(shmem_provider: SP) -> Result { @@ -2917,7 +2893,7 @@ where /// Will increase `num_clients_seen`. /// The backing values of `llmp_clients` [`ClientId`]s will always be sorted (but not gapless) /// returns the [`ClientId`] of the new client. - pub fn add_client(&mut self, mut client_receiver: LlmpReceiver) -> ClientId { + pub fn add_client(&mut self, mut client_receiver: LlmpReceiver) -> ClientId { let id = self.peek_next_client_id(); client_receiver.id = id; self.llmp_clients.push(client_receiver); @@ -2932,7 +2908,7 @@ where /// Registers a new client for the given sharedmap str and size. /// Returns the id of the new client in [`broker.client_shmem`] - pub fn register_client(&mut self, mut client_page: LlmpSharedMap) -> ClientId { + pub fn register_client(&mut self, mut client_page: LlmpSharedMap) -> ClientId { // Tell the client it may unmap its initial allocated shmem page now. // Since we now have a handle to it, it won't be umapped too early (only after we also unmap it) client_page.mark_safe_to_unmap(); @@ -3090,7 +3066,7 @@ where /// Upon receiving this message, the broker should map the announced page and start tracking it for new messages. #[cfg(feature = "std")] fn announce_new_client( - sender: &mut LlmpSender, + sender: &mut LlmpSender, shmem_description: &ShMemDescription, ) -> Result<(), Error> { unsafe { @@ -3108,7 +3084,7 @@ where /// Tell the broker to disconnect this client from it. #[cfg(feature = "std")] - fn announce_client_exit(sender: &mut LlmpSender, client_id: u32) -> Result<(), Error> { + fn announce_client_exit(sender: &mut LlmpSender, client_id: u32) -> Result<(), Error> { // # Safety // No user-provided potentially unsafe parameters. unsafe { @@ -3280,7 +3256,7 @@ where mut stream: TcpStream, request: &TcpRequest, current_client_id: &mut ClientId, - sender: &mut LlmpSender, + sender: &mut LlmpSender, broker_shmem_description: &ShMemDescription, ) { match request { @@ -3451,21 +3427,19 @@ pub struct LlmpClientDescription { /// Client side of LLMP #[derive(Debug)] -pub struct LlmpClient -where - SP: ShMemProvider, -{ +pub struct LlmpClient { /// Outgoing channel to the broker - sender: LlmpSender, + sender: LlmpSender, /// Incoming (broker) broadcast map - receiver: LlmpReceiver, + receiver: LlmpReceiver, } /// `n` clients connect to a broker. They share an outgoing map with the broker, /// and get incoming messages from the shared broker bus -impl LlmpClient +impl LlmpClient where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Reattach to a vacant client map. /// It is essential, that the broker (or someone else) kept a pointer to the `out_shmem` @@ -3473,9 +3447,9 @@ where #[allow(clippy::needless_pass_by_value)] // no longer necessary on nightly pub fn on_existing_shmem( shmem_provider: SP, - _current_out_shmem: SP::ShMem, + _current_out_shmem: SHM, _last_msg_sent_offset: Option, - current_broker_shmem: SP::ShMem, + current_broker_shmem: SHM, last_msg_recvd_offset: Option, ) -> Result { Ok(Self { @@ -3542,25 +3516,25 @@ where /// Outgoing channel to the broker #[must_use] - pub fn sender(&self) -> &LlmpSender { + pub fn sender(&self) -> &LlmpSender { &self.sender } /// Outgoing channel to the broker (mut) #[must_use] - pub fn sender_mut(&mut self) -> &mut LlmpSender { + pub fn sender_mut(&mut self) -> &mut LlmpSender { &mut self.sender } /// Incoming (broker) broadcast map #[must_use] - pub fn receiver(&self) -> &LlmpReceiver { + pub fn receiver(&self) -> &LlmpReceiver { &self.receiver } /// Incoming (broker) broadcast map (mut) #[must_use] - pub fn receiver_mut(&mut self) -> &mut LlmpReceiver { + pub fn receiver_mut(&mut self) -> &mut LlmpReceiver { &mut self.receiver } @@ -3588,7 +3562,7 @@ where /// Creates a new [`LlmpClient`] pub fn new( mut shmem_provider: SP, - initial_broker_shmem: LlmpSharedMap, + initial_broker_shmem: LlmpSharedMap, sender_id: ClientId, ) -> Result { Ok(Self { diff --git a/libafl_bolts/src/os/unix_shmem_server.rs b/libafl_bolts/src/os/unix_shmem_server.rs index 2fef01ab84..d0b74e6268 100644 --- a/libafl_bolts/src/os/unix_shmem_server.rs +++ b/libafl_bolts/src/os/unix_shmem_server.rs @@ -11,6 +11,7 @@ use alloc::{ vec::Vec, }; use core::{ + fmt::Debug, mem::ManuallyDrop, ops::{Deref, DerefMut}, }; @@ -60,10 +61,7 @@ const AFL_SHMEM_SERVICE_STARTED: &str = "AFL_SHMEM_SERVICE_STARTED"; /// s out served shared maps, as used on Android. #[derive(Debug)] -pub struct ServedShMemProvider -where - SP: ShMemProvider, -{ +pub struct ServedShMemProvider { stream: UnixStream, inner: SP, id: i32, @@ -76,17 +74,14 @@ where /// [`ShMem`] that got served from a [`ShMemService`] via domain sockets and can now be used in this program. /// It works around Android's lack of "proper" shared maps. #[derive(Clone, Debug)] -pub struct ServedShMem -where - SH: ShMem, -{ - inner: ManuallyDrop, +pub struct ServedShMem { + inner: ManuallyDrop, server_fd: i32, } -impl Deref for ServedShMem +impl Deref for ServedShMem where - SH: ShMem, + SHM: Deref, { type Target = [u8]; @@ -95,18 +90,18 @@ where } } -impl DerefMut for ServedShMem +impl DerefMut for ServedShMem where - SH: ShMem, + SHM: DerefMut, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl ShMem for ServedShMem +impl ShMem for ServedShMem where - SH: ShMem, + SHM: ShMem, { fn id(&self) -> ShMemId { let client_id = self.inner.id(); @@ -114,10 +109,7 @@ where } } -impl ServedShMemProvider -where - SP: ShMemProvider, -{ +impl ServedShMemProvider { /// Send a request to the server, and wait for a response #[expect(clippy::similar_names)] // id and fd fn send_receive(&mut self, request: ServedShMemRequest) -> Result<(i32, i32), Error> { @@ -290,18 +282,12 @@ pub enum ServedShMemRequest { /// Client side communicating with the [`ShMemServer`] #[derive(Debug)] -struct SharedShMemClient -where - SH: ShMem, -{ +struct SharedShMemClient { stream: UnixStream, - maps: HashMap>>>, + maps: HashMap>>>, } -impl SharedShMemClient -where - SH: ShMem, -{ +impl SharedShMemClient { fn new(stream: UnixStream) -> Self { Self { stream, @@ -312,11 +298,8 @@ where /// Response from Server to Client #[derive(Debug)] -enum ServedShMemResponse -where - SP: ShMemProvider, -{ - Mapping(Rc>), +enum ServedShMemResponse { + Mapping(Rc>), Id(i32), RefCount(u32), } @@ -332,22 +315,19 @@ enum ShMemServiceStatus { /// The [`ShMemService`] is a service handing out [`ShMem`] pages via unix domain sockets. /// It is mainly used and needed on Android. #[derive(Debug, Clone)] -pub enum ShMemService -where - SP: ShMemProvider, -{ +pub enum ShMemService { /// A started service Started { /// The background thread bg_thread: Arc>, - /// The pantom data + /// The phantom data phantom: PhantomData, }, /// A failed service Failed { /// The error message err_msg: String, - /// The pantom data + /// The phantom data phantom: PhantomData, }, } @@ -412,7 +392,7 @@ where let syncpair = Arc::new((Mutex::new(ShMemServiceStatus::Starting), Condvar::new())); let childsyncpair = Arc::clone(&syncpair); let join_handle = thread::spawn(move || { - let mut worker = match ServedShMemServiceWorker::::new() { + let mut worker = match ServedShMemServiceWorker::::new() { Ok(worker) => worker, Err(e) => { // Make sure the parent processes can continue @@ -472,20 +452,18 @@ where /// The struct for the worker, handling incoming requests for [`ShMem`]. #[expect(clippy::type_complexity)] -struct ServedShMemServiceWorker -where - SP: ShMemProvider, -{ +struct ServedShMemServiceWorker { provider: SP, - clients: HashMap>, + clients: HashMap>, /// Maps from a pre-fork (parent) client id to its cloned maps. - forking_clients: HashMap>>>>, - all_shmems: HashMap>>, + forking_clients: HashMap>>>>, + all_shmems: HashMap>>, } -impl ServedShMemServiceWorker +impl ServedShMemServiceWorker where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Create a new [`ShMemService`] fn new() -> Result { @@ -497,7 +475,7 @@ where }) } - fn upgrade_shmem_with_id(&mut self, description_id: i32) -> Rc> { + fn upgrade_shmem_with_id(&mut self, description_id: i32) -> Rc> { self.all_shmems .get_mut(&description_id) .unwrap() @@ -507,7 +485,7 @@ where } /// Read and handle the client request, send the answer over unix fd. - fn handle_request(&mut self, client_id: RawFd) -> Result, Error> { + fn handle_request(&mut self, client_id: RawFd) -> Result, Error> { let request = self.read_request(client_id)?; // log::trace!("got ashmem client: {}, request:{:?}", client_id, request); diff --git a/libafl_bolts/src/ownedref.rs b/libafl_bolts/src/ownedref.rs index c271dba53e..d15046d352 100644 --- a/libafl_bolts/src/ownedref.rs +++ b/libafl_bolts/src/ownedref.rs @@ -190,7 +190,7 @@ where /// # Safety /// The shared memory needs to start with a valid object of type `T`. /// Any use of this [`OwnedRef`] will dereference a pointer to the shared memory accordingly. - pub unsafe fn from_shmem(shmem: &mut S) -> Self { + pub unsafe fn from_shmem(shmem: &mut SHM) -> Self { Self::from_ptr(shmem.as_mut_ptr_of().unwrap()) } @@ -325,7 +325,7 @@ where /// # Safety /// The shared memory needs to start with a valid object of type `T`. /// Any use of this [`OwnedRefMut`] will dereference a pointer to the shared memory accordingly. - pub unsafe fn from_shmem(shmem: &mut S) -> Self { + pub unsafe fn from_shmem(shmem: &mut SHM) -> Self { Self::from_mut_ptr(shmem.as_mut_ptr_of().unwrap()) } diff --git a/libafl_bolts/src/shmem.rs b/libafl_bolts/src/shmem.rs index 55a587b0ab..902c6f57be 100644 --- a/libafl_bolts/src/shmem.rs +++ b/libafl_bolts/src/shmem.rs @@ -32,25 +32,50 @@ pub use win32_shmem::{Win32ShMem, Win32ShMemProvider}; #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] use crate::os::pipes::Pipe; #[cfg(all(feature = "std", unix, not(target_os = "haiku")))] -pub use crate::os::unix_shmem_server::{ServedShMemProvider, ShMemService}; +pub use crate::os::unix_shmem_server::{ServedShMem, ServedShMemProvider, ShMemService}; use crate::Error; /// The standard sharedmem provider #[cfg(all(windows, feature = "std"))] pub type StdShMemProvider = Win32ShMemProvider; +/// The standard sharedmem +#[cfg(all(windows, feature = "std"))] +pub type StdShMem = Win32ShMem; + +/// The standard sharedmem +#[cfg(all(target_os = "android", feature = "std"))] +pub type StdShMem = RcShMem< + ServedShMem, + ServedShMemProvider, +>; + /// The standard sharedmem provider #[cfg(all(target_os = "android", feature = "std"))] pub type StdShMemProvider = RcShMemProvider>; + /// The standard sharedmem service #[cfg(all(target_os = "android", feature = "std"))] pub type StdShMemService = ShMemService; + +/// The standard sharedmem +#[cfg(all(feature = "std", target_vendor = "apple"))] +pub type StdShMem = RcShMem, ServedShMemProvider>; + /// The standard sharedmem provider #[cfg(all(feature = "std", target_vendor = "apple"))] pub type StdShMemProvider = RcShMemProvider>; #[cfg(all(feature = "std", target_vendor = "apple"))] /// The standard sharedmem service pub type StdShMemService = ShMemService; + +/// The default [`ShMem`]. +#[cfg(all( + feature = "std", + unix, + not(any(target_os = "android", target_vendor = "apple", target_os = "haiku")) +))] +pub type StdShMem = UnixShMem; /// The default [`ShMemProvider`] for this os. #[cfg(all( feature = "std", @@ -392,15 +417,19 @@ impl Deref for NopShMem { /// Useful if the `ShMemProvider` needs to keep local state. #[cfg(feature = "alloc")] #[derive(Debug, Clone, Default)] -pub struct RcShMem { - internal: ManuallyDrop, - provider: Rc>, +pub struct RcShMem +where + SHM: ShMem, + SP: ShMemProvider, +{ + internal: ManuallyDrop, + provider: Rc>, } #[cfg(feature = "alloc")] -impl ShMem for RcShMem +impl ShMem for RcShMem where - T: ShMemProvider + Debug, + SP: ShMemProvider, { fn id(&self) -> ShMemId { self.internal.id() @@ -408,9 +437,10 @@ where } #[cfg(feature = "alloc")] -impl Deref for RcShMem +impl Deref for RcShMem where - T: ShMemProvider + Debug, + SHM: ShMem, + SP: ShMemProvider, { type Target = [u8]; @@ -420,9 +450,10 @@ where } #[cfg(feature = "alloc")] -impl DerefMut for RcShMem +impl DerefMut for RcShMem where - T: ShMemProvider + Debug, + SHM: ShMem, + SP: ShMemProvider, { fn deref_mut(&mut self) -> &mut [u8] { &mut self.internal @@ -430,7 +461,11 @@ where } #[cfg(feature = "alloc")] -impl Drop for RcShMem { +impl Drop for RcShMem +where + SHM: ShMem, + SP: ShMemProvider, +{ fn drop(&mut self) { self.provider.borrow_mut().release_shmem(&mut self.internal); } @@ -441,10 +476,7 @@ impl Drop for RcShMem { /// Useful if the `ShMemProvider` needs to keep local state. #[derive(Debug, Clone)] #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] -pub struct RcShMemProvider -where - SP: ShMemProvider, -{ +pub struct RcShMemProvider { /// The wrapped [`ShMemProvider`]. internal: Rc>, /// A pipe the child uses to communicate progress to the parent after fork. @@ -457,15 +489,12 @@ where parent_child_pipe: Option, } -//#[cfg(all(unix, feature = "std"))] -//unsafe impl Send for RcShMemProvider {} - #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] impl ShMemProvider for RcShMemProvider where SP: ShMemProvider + Debug, { - type ShMem = RcShMem; + type ShMem = RcShMem; fn new() -> Result { Ok(Self { @@ -535,10 +564,7 @@ where } #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] -impl RcShMemProvider -where - SP: ShMemProvider, -{ +impl RcShMemProvider { /// "set" the "latch" /// (we abuse `pipes` as `semaphores`, as they don't need an additional shared mem region.) fn pipe_set(pipe: &mut Option) -> Result<(), Error> { @@ -599,7 +625,7 @@ where #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] impl Default for RcShMemProvider where - SP: ShMemProvider + Debug, + SP: ShMemProvider, { fn default() -> Self { Self::new().unwrap() @@ -607,10 +633,7 @@ where } #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] -impl RcShMemProvider> -where - SP: ShMemProvider + Debug, -{ +impl RcShMemProvider> { /// Forward to `ServedShMemProvider::on_restart` pub fn on_restart(&mut self) { self.internal.borrow_mut().on_restart(); @@ -1010,16 +1033,15 @@ pub mod unix_shmem { impl CommonUnixShMem { /// Create a new shared memory mapping, using shmget/shmat - #[expect(unused_qualifications)] pub fn new(map_size: usize) -> Result { #[cfg(any(target_os = "solaris", target_os = "illumos"))] - const SHM_R: libc::c_int = 0o400; + const SHM_R: c_int = 0o400; #[cfg(not(any(target_os = "solaris", target_os = "illumos")))] - const SHM_R: libc::c_int = libc::SHM_R; + const SHM_R: c_int = libc::SHM_R; #[cfg(any(target_os = "solaris", target_os = "illumos"))] - const SHM_W: libc::c_int = 0o200; + const SHM_W: c_int = 0o200; #[cfg(not(any(target_os = "solaris", target_os = "illumos")))] - const SHM_W: libc::c_int = libc::SHM_W; + const SHM_W: c_int = libc::SHM_W; unsafe { let os_id = shmget( @@ -1206,7 +1228,7 @@ pub mod unix_shmem { //return Err(Error::unknown("Failed to set the ashmem mapping's name".to_string())); //}; - #[expect(trivial_numeric_casts)] + #[allow(trivial_numeric_casts)] if ioctl(fd, ASHMEM_SET_SIZE as _, map_size) != 0 { close(fd); return Err(Error::unknown( @@ -1241,7 +1263,8 @@ pub mod unix_shmem { pub fn shmem_from_id_and_size(id: ShMemId, map_size: usize) -> Result { unsafe { let fd: i32 = id.to_string().parse().unwrap(); - #[expect(trivial_numeric_casts, clippy::cast_sign_loss)] + #[allow(trivial_numeric_casts)] + #[expect(clippy::cast_sign_loss)] if ioctl(fd, ASHMEM_GET_SIZE as _) as u32 as usize != map_size { return Err(Error::unknown( "The mapping's size differs from the requested size".to_string(), @@ -1294,12 +1317,12 @@ pub mod unix_shmem { /// [`Drop`] implementation for [`AshmemShMem`], which cleans up the mapping. impl Drop for AshmemShMem { - #[expect(trivial_numeric_casts)] + #[allow(trivial_numeric_casts)] fn drop(&mut self) { unsafe { let fd: i32 = self.id.to_string().parse().unwrap(); - #[expect(trivial_numeric_casts)] + #[allow(trivial_numeric_casts)] #[expect(clippy::cast_sign_loss)] let length = ioctl(fd, ASHMEM_GET_SIZE as _) as u32; @@ -1729,15 +1752,15 @@ impl DummyShMemService { /// A cursor around [`ShMem`] that immitates [`std::io::Cursor`]. Notably, this implements [`Write`] for [`ShMem`] in std environments. #[cfg(feature = "std")] #[derive(Debug)] -pub struct ShMemCursor { - inner: T, +pub struct ShMemCursor { + inner: SHM, pos: usize, } #[cfg(all(feature = "std", not(target_os = "haiku")))] -impl ShMemCursor { +impl ShMemCursor { /// Create a new [`ShMemCursor`] around [`ShMem`] - pub fn new(shmem: T) -> Self { + pub fn new(shmem: SHM) -> Self { Self { inner: shmem, pos: 0, @@ -1745,14 +1768,20 @@ impl ShMemCursor { } /// Slice from the current location on this map to the end, mutable - fn empty_slice_mut(&mut self) -> &mut [u8] { + fn empty_slice_mut(&mut self) -> &mut [u8] + where + SHM: DerefMut, + { use crate::AsSliceMut; &mut (self.inner.as_slice_mut()[self.pos..]) } } #[cfg(all(feature = "std", not(target_os = "haiku")))] -impl Write for ShMemCursor { +impl Write for ShMemCursor +where + SHM: DerefMut, +{ fn write(&mut self, buf: &[u8]) -> std::io::Result { match self.empty_slice_mut().write(buf) { Ok(w) => { @@ -1763,10 +1792,6 @@ impl Write for ShMemCursor { } } - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } - fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result { match self.empty_slice_mut().write_vectored(bufs) { Ok(w) => { @@ -1777,6 +1802,10 @@ impl Write for ShMemCursor { } } + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } + fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> { match self.empty_slice_mut().write_all(buf) { Ok(w) => { @@ -1789,7 +1818,10 @@ impl Write for ShMemCursor { } #[cfg(feature = "std")] -impl std::io::Seek for ShMemCursor { +impl std::io::Seek for ShMemCursor +where + SHM: DerefMut, +{ fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { let effective_new_pos = match pos { std::io::SeekFrom::Start(s) => s, diff --git a/libafl_bolts/src/staterestore.rs b/libafl_bolts/src/staterestore.rs index fbe27c492e..eabaa59579 100644 --- a/libafl_bolts/src/staterestore.rs +++ b/libafl_bolts/src/staterestore.rs @@ -65,17 +65,15 @@ impl StateShMemContent { /// it will instead write to disk, and store the file name into the map. /// Writing to [`StateRestorer`] multiple times is not allowed. #[derive(Debug, Clone)] -pub struct StateRestorer -where - SP: ShMemProvider, -{ - shmem: SP::ShMem, +pub struct StateRestorer { + shmem: SHM, phantom: PhantomData<*const SP>, } -impl StateRestorer +impl StateRestorer where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Get the map size backing this [`StateRestorer`]. pub fn mapsize(&self) -> usize { @@ -96,7 +94,7 @@ where } /// Create a new [`StateRestorer`]. - pub fn new(shmem: SP::ShMem) -> Self { + pub fn new(shmem: SHM) -> Self { let mut ret = Self { shmem, phantom: PhantomData, @@ -268,7 +266,7 @@ where File::open(tmpfile)?.read_to_end(&mut file_content)?; if file_content.is_empty() { return Err(Error::illegal_state(format!( - "Colud not restore state from file {}", + "Could not restore state from file {}", &filename ))); } @@ -296,7 +294,7 @@ mod tests { }; use crate::{ - shmem::{ShMemProvider, StdShMemProvider}, + shmem::{ShMemProvider, StdShMem, StdShMemProvider}, staterestore::StateRestorer, }; @@ -304,7 +302,7 @@ mod tests { let mut shmem_provider = StdShMemProvider::new().unwrap(); let shmem = shmem_provider.new_shmem(TESTMAP_SIZE).unwrap(); - let mut state_restorer = StateRestorer::::new(shmem); + let mut state_restorer = StateRestorer::::new(shmem); let state = "hello world".to_string(); diff --git a/libafl_concolic/symcc_runtime/src/lib.rs b/libafl_concolic/symcc_runtime/src/lib.rs index 7746364cb6..00c02e94b4 100644 --- a/libafl_concolic/symcc_runtime/src/lib.rs +++ b/libafl_concolic/symcc_runtime/src/lib.rs @@ -46,6 +46,7 @@ pub mod cpp_runtime { #[doc(hidden)] pub use ctor::ctor; use libafl::observers::concolic; +pub use libafl_bolts::shmem::StdShMem; #[doc(hidden)] pub use libc::atexit; #[doc(hidden)] diff --git a/libafl_concolic/symcc_runtime/src/tracing.rs b/libafl_concolic/symcc_runtime/src/tracing.rs index d456407340..5b0cdf3113 100644 --- a/libafl_concolic/symcc_runtime/src/tracing.rs +++ b/libafl_concolic/symcc_runtime/src/tracing.rs @@ -2,23 +2,30 @@ pub use libafl::observers::concolic::serialization_format::StdShMemMessageFileWriter; use libafl::observers::concolic::SymExpr; +use libafl_bolts::shmem::ShMem; use crate::{RSymExpr, Runtime}; /// Traces the expressions according to the format described in [`libafl::observers::concolic::serialization_format`]. /// /// The format can be read from elsewhere to perform processing of the expressions outside of the runtime. -pub struct TracingRuntime { - writer: StdShMemMessageFileWriter, +pub struct TracingRuntime +where + SHM: ShMem, +{ + writer: StdShMemMessageFileWriter, trace_locations: bool, } -impl TracingRuntime { +impl TracingRuntime +where + SHM: ShMem, +{ /// Creates the runtime, tracing using the given writer. /// When `trace_locations` is true, location information for calls, returns and basic blocks will also be part of the trace. /// Tracing location information can drastically increase trace size. It is therefore recommended to not active this if not needed. #[must_use] - pub fn new(writer: StdShMemMessageFileWriter, trace_locations: bool) -> Self { + pub fn new(writer: StdShMemMessageFileWriter, trace_locations: bool) -> Self { Self { writer, trace_locations, @@ -62,7 +69,10 @@ macro_rules! binary_expression_builder { }; } -impl Runtime for TracingRuntime { +impl Runtime for TracingRuntime +where + SHM: ShMem, +{ #[no_mangle] fn build_integer_from_buffer( &mut self, @@ -201,7 +211,10 @@ impl Runtime for TracingRuntime { } } -impl Drop for TracingRuntime { +impl Drop for TracingRuntime +where + SHM: ShMem, +{ fn drop(&mut self) { // manually end the writer to update the length prefix self.writer diff --git a/libafl_concolic/test/runtime_test/src/lib.rs b/libafl_concolic/test/runtime_test/src/lib.rs index 56c1ce003c..d6ec636d97 100644 --- a/libafl_concolic/test/runtime_test/src/lib.rs +++ b/libafl_concolic/test/runtime_test/src/lib.rs @@ -7,9 +7,11 @@ use symcc_runtime::{ export_runtime, filter::NoFloat, tracing::{self, StdShMemMessageFileWriter}, - Runtime, + Runtime, StdShMem, }; +// use libafl_bolts::StdShmem; + export_runtime!( NoFloat => NoFloat; tracing::TracingRuntime::new( @@ -17,5 +19,5 @@ export_runtime!( .expect("unable to construct tracing runtime writer. (missing env?)"), false ) - => tracing::TracingRuntime + => tracing::TracingRuntime ); diff --git a/libafl_libfuzzer/runtime/src/fuzz.rs b/libafl_libfuzzer/runtime/src/fuzz.rs index 3950cd8eb5..9d698afb87 100644 --- a/libafl_libfuzzer/runtime/src/fuzz.rs +++ b/libafl_libfuzzer/runtime/src/fuzz.rs @@ -111,7 +111,7 @@ where fuzz_with!(options, harness, do_fuzz, |fuzz_single| { let (state, mgr): ( Option>, - SimpleRestartingEventManager<_, _, StdState<_, _, _, _>, _>, + SimpleRestartingEventManager<_, _, StdState<_, _, _, _>, _, _>, ) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider) { // The restarting state will spawn the same process again as child, then restarted it each time it crashes. Ok(res) => res, diff --git a/libafl_libfuzzer/runtime/src/merge.rs b/libafl_libfuzzer/runtime/src/merge.rs index 31c42f1b2b..d7917a6039 100644 --- a/libafl_libfuzzer/runtime/src/merge.rs +++ b/libafl_libfuzzer/runtime/src/merge.rs @@ -69,7 +69,7 @@ pub fn merge( let (state, mut mgr): ( Option>, - SimpleRestartingEventManager<_, _, StdState<_, _, _, _>, _>, + SimpleRestartingEventManager<_, _, StdState<_, _, _, _>, _, _>, ) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider) { // The restarting state will spawn the same process again as child, then restarted it each time it crashes. Ok(res) => res, diff --git a/libafl_qemu/src/executor.rs b/libafl_qemu/src/executor.rs index 08f133f279..6f3bd4abf3 100644 --- a/libafl_qemu/src/executor.rs +++ b/libafl_qemu/src/executor.rs @@ -43,7 +43,7 @@ use crate::Qemu; use crate::{command::CommandManager, modules::EmulatorModuleTuple, Emulator, EmulatorDriver}; type EmulatorInProcessExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM> = - StatefulInProcessExecutor<'a, H, I, OT, S, Emulator>; + StatefulInProcessExecutor<'a, Emulator, H, I, OT, S>; pub struct QemuExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM> { inner: EmulatorInProcessExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM>, @@ -207,7 +207,7 @@ where } inner.inprocess_hooks_mut().timeout_handler = inproc_qemu_timeout_handler::< - StatefulInProcessExecutor<'a, H, I, OT, S, Emulator>, + StatefulInProcessExecutor<'a, Emulator, H, I, OT, S>, EM, ET, I, @@ -299,9 +299,10 @@ where } pub type QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> = - StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, Emulator, EM, Z>; + StatefulInProcessForkExecutor<'a, EM, Emulator, H, I, OT, S, SP, Z>; #[cfg(feature = "fork")] +#[expect(clippy::type_complexity)] pub struct QemuForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> { inner: QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z>, } @@ -313,12 +314,13 @@ where C: Debug, CM: Debug, ED: Debug, + EM: Debug, ET: EmulatorModuleTuple + Debug, OT: ObserversTuple + Debug, I: Debug, S: Debug, SM: Debug, - SP: ShMemProvider, + SP: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("QemuForkExecutor") @@ -437,7 +439,6 @@ impl HasObservers where ET: EmulatorModuleTuple, OT: ObserversTuple, - SP: ShMemProvider, { type Observers = OT; #[inline] diff --git a/libafl_sugar/src/forkserver.rs b/libafl_sugar/src/forkserver.rs index e21a78d9c5..2e285897ee 100644 --- a/libafl_sugar/src/forkserver.rs +++ b/libafl_sugar/src/forkserver.rs @@ -119,7 +119,7 @@ impl ForkserverBytesCoverageSugar<'_> { let time_ref = time_observer.handle(); let mut run_client = |state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _core_id| { let time_observer = time_observer.clone(); diff --git a/libafl_sugar/src/inmemory.rs b/libafl_sugar/src/inmemory.rs index 2a9b41ad7b..e26765a26d 100644 --- a/libafl_sugar/src/inmemory.rs +++ b/libafl_sugar/src/inmemory.rs @@ -147,7 +147,7 @@ where let time_ref = time_observer.handle(); let mut run_client = |state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _core_id| { let time_observer = time_observer.clone(); diff --git a/libafl_sugar/src/qemu.rs b/libafl_sugar/src/qemu.rs index fa4be32f8a..50c8d3a284 100644 --- a/libafl_sugar/src/qemu.rs +++ b/libafl_sugar/src/qemu.rs @@ -150,7 +150,7 @@ where let time_ref = time_observer.handle(); let mut run_client = |state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _core_id| { let time_observer = time_observer.clone(); diff --git a/libafl_tinyinst/src/executor.rs b/libafl_tinyinst/src/executor.rs index 03381bd172..871f7d258b 100644 --- a/libafl_tinyinst/src/executor.rs +++ b/libafl_tinyinst/src/executor.rs @@ -1,4 +1,5 @@ use core::{marker::PhantomData, ptr, time::Duration}; +use std::fmt::{Debug, Formatter}; use libafl::{ executors::{Executor, ExitKind, HasObservers}, @@ -8,27 +9,24 @@ use libafl::{ }; use libafl_bolts::{ fs::{InputFile, INPUTFILE_STD}, - shmem::{NopShMemProvider, ShMem, ShMemProvider}, + shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider}, tuples::RefIndexable, AsSlice, AsSliceMut, }; use tinyinst::tinyinst::{litecov::RunResult, TinyInst}; /// [`TinyInst`](https://github.com/googleprojectzero/TinyInst) executor -pub struct TinyInstExecutor -where - SP: ShMemProvider, -{ +pub struct TinyInstExecutor { tinyinst: TinyInst, coverage_ptr: *mut Vec, timeout: Duration, observers: OT, phantom: PhantomData, cur_input: InputFile, - map: Option<::ShMem>, + map: Option, } -impl TinyInstExecutor<(), NopShMemProvider, ()> { +impl TinyInstExecutor<(), NopShMem, ()> { /// Create a builder for [`TinyInstExecutor`] #[must_use] pub fn builder<'a>() -> TinyInstExecutorBuilder<'a, NopShMemProvider> { @@ -36,22 +34,19 @@ impl TinyInstExecutor<(), NopShMemProvider, ()> { } } -impl std::fmt::Debug for TinyInstExecutor -where - SP: ShMemProvider, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl Debug for TinyInstExecutor { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { f.debug_struct("TinyInstExecutor") .field("timeout", &self.timeout) .finish_non_exhaustive() } } -impl Executor for TinyInstExecutor +impl Executor for TinyInstExecutor where S: HasExecutions, I: HasTargetBytes, - SP: ShMemProvider, + SHM: ShMem, { #[inline] fn run_target( @@ -133,10 +128,7 @@ impl<'a> TinyInstExecutorBuilder<'a, NopShMemProvider> { /// Use this to enable shmem testcase passing. #[must_use] - pub fn shmem_provider( - self, - shmem_provider: &'a mut SP, - ) -> TinyInstExecutorBuilder<'a, SP> { + pub fn shmem_provider(self, shmem_provider: &'a mut SP) -> TinyInstExecutorBuilder<'a, SP> { TinyInstExecutorBuilder { tinyinst_args: self.tinyinst_args, program_args: self.program_args, @@ -246,7 +238,10 @@ where } /// Build [`TinyInst`](https://github.com/googleprojectzero/TinyInst) executor - pub fn build(&mut self, observers: OT) -> Result, Error> { + pub fn build( + &mut self, + observers: OT, + ) -> Result, Error> { if self.coverage_ptr.is_null() { return Err(Error::illegal_argument("Coverage pointer may not be null.")); } @@ -313,10 +308,7 @@ where } } -impl HasObservers for TinyInstExecutor -where - SP: ShMemProvider, -{ +impl HasObservers for TinyInstExecutor { type Observers = OT; fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> {