diff --git a/Cargo.lock b/Cargo.lock index bdf81f9e5..239d3e16e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1710,6 +1710,7 @@ dependencies = [ "prost", "prost-build", "rand", + "rusty_ulid", "serde", "serde_json", "sha2", diff --git a/bin/config.toml b/bin/config.toml index 0dfced877..1ed014095 100644 --- a/bin/config.toml +++ b/bin/config.toml @@ -33,7 +33,12 @@ log_target = "stdout" # optional different target for access logs (IP addresses, domains, URI, HTTP status, etc) # It supports the same options as log_target -# log_access_target = "file:///var/logs/sozu-access.log" +# access_logs_target = "file:///var/logs/sozu-access.log" + +# format of the access logs. Defaults to ascii. +# - ascii +# - protobuf (defined in [sozu_command_lib::proto::command::ProtobufAccessLog]) +# access_logs_format = "ascii" # path to the unix socket file used to send commands to sozu # default value points to "sozu.sock" file in the current directory diff --git a/bin/src/cli.rs b/bin/src/cli.rs index 3b100eb46..6e06784e6 100644 --- a/bin/src/cli.rs +++ b/bin/src/cli.rs @@ -130,8 +130,8 @@ pub enum SubCmd { }, #[clap(name = "logging", about = "change logging level")] Logging { - #[clap(subcommand)] - level: LoggingLevel, + #[clap(name = "filter")] + filter: String, }, #[clap(name = "state", about = "state management")] State { @@ -231,28 +231,6 @@ pub enum MetricsCmd { }, } -#[derive(Subcommand, PartialEq, Eq, Clone, Debug)] -pub enum LoggingLevel { - #[clap(name = "trace", about = "Displays a LOT of logs")] - Trace, - #[clap( - name = "debug", - about = "Displays more logs about the inner workings of Sōzu" - )] - Debug, - #[clap(name = "error", about = "Displays occurring errors")] - Error, - #[clap(name = "warn", about = "Displays warnings about non-critical errors")] - Warn, - #[clap(name = "info", about = "Displays logs about normal behaviour of Sōzu")] - Info, -} -impl std::fmt::Display for LoggingLevel { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{self:?}") - } -} - #[derive(Subcommand, PartialEq, Eq, Clone, Debug)] pub enum StateCmd { #[clap(name = "save", about = "Save state to that file")] diff --git a/bin/src/command/requests.rs b/bin/src/command/requests.rs index 4dc839885..c9fbe3939 100644 --- a/bin/src/command/requests.rs +++ b/bin/src/command/requests.rs @@ -1,5 +1,6 @@ use std::{ collections::{BTreeMap, HashMap}, + env, fs::File, io::{ErrorKind, Read}, }; @@ -216,17 +217,28 @@ fn save_state(server: &mut Server, client: &mut ClientSession, path: &str) { /// change logging level on the main process, and on all workers fn set_logging_level(server: &mut Server, client: &mut ClientSession, logging_filter: String) { debug!("Changing main process log level to {}", logging_filter); - logging::LOGGER.with(|l| { - let directives = logging::parse_logging_spec(&logging_filter); - l.borrow_mut().set_directives(directives); + let (directives, errors) = logging::parse_logging_spec(&logging_filter); + if !errors.is_empty() { + client.finish_failure(format!( + "Error parsing logging filter:\n- {}", + errors + .iter() + .map(logging::LogSpecParseError::to_string) + .collect::>() + .join("\n- ") + )); + return; + } + logging::LOGGER.with(|logger| { + logger.borrow_mut().set_directives(directives); }); // also change / set the content of RUST_LOG so future workers / main thread // will have the new logging filter value - ::std::env::set_var("RUST_LOG", &logging_filter); + env::set_var("RUST_LOG", &logging_filter); debug!( "Logging level now: {}", - ::std::env::var("RUST_LOG").unwrap_or("could get RUST_LOG from env".to_string()) + env::var("RUST_LOG").unwrap_or("could get RUST_LOG from env".to_string()) ); worker_request(server, client, RequestType::Logging(logging_filter)); diff --git a/bin/src/command/server.rs b/bin/src/command/server.rs index a0d648c14..a20f70c34 100644 --- a/bin/src/command/server.rs +++ b/bin/src/command/server.rs @@ -1,6 +1,6 @@ use std::{ collections::{HashMap, HashSet}, - fmt::Debug, + fmt::{self, Debug}, io::Error as IoError, ops::{Deref, DerefMut}, os::fd::{AsRawFd, FromRawFd}, @@ -223,8 +223,8 @@ impl CommandHub { let channel = Channel::new(stream, 4096, u64::MAX); let id = self.next_client_id(); let session = ClientSession::new(channel, id, token); - info!("register new client: {}", id); - debug!("new client: {:?}", session); + info!("Register new client: {}", id); + debug!("{:#?}", session); self.clients.insert(token, session); } @@ -295,7 +295,7 @@ impl CommandHub { /// - manage timeouts of tasks pub fn run(&mut self) { let mut events = Events::with_capacity(100); - debug!("running the command hub: {:?}", self); + debug!("running the command hub: {:#?}", self); loop { let run_state = self.run_state; @@ -400,7 +400,8 @@ impl CommandHub { server.handle_client_request(client, request); } ClientResult::CloseSession => { - info!("Closing client {:#?}", client); + info!("Closing client {}", client.id); + debug!("{:#?}", client); self.event_subscribers.remove(&token); self.clients.remove(&token); } @@ -511,7 +512,6 @@ pub enum ServerState { /// - scatter to workers /// - gather worker responses /// - trigger a finishing function when all responses are gathered -#[derive(Debug)] pub struct Server { pub config: Config, /// Sōzu clients that subscribed to events @@ -857,3 +857,23 @@ impl Server { } } } + +impl Debug for Server { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Server") + .field("config", &self.config) + .field("event_subscribers", &self.event_subscribers) + .field("executable_path", &self.executable_path) + .field("in_flight", &self.in_flight) + .field("next_client_id", &self.next_client_id) + .field("next_session_id", &self.next_session_id) + .field("next_task_id", &self.next_task_id) + .field("next_worker_id", &self.next_worker_id) + .field("poll", &self.poll) + .field("queued_tasks", &self.queued_tasks) + .field("run_state", &self.run_state) + .field("unix_listener", &self.unix_listener) + .field("workers", &self.workers) + .finish() + } +} diff --git a/bin/src/ctl/mod.rs b/bin/src/ctl/mod.rs index 148f9081f..d89808266 100644 --- a/bin/src/ctl/mod.rs +++ b/bin/src/ctl/mod.rs @@ -126,7 +126,7 @@ impl CommandManager { } => self.get_metrics(list, refresh, names, clusters, backends, no_clusters), _ => self.configure_metrics(cmd), }, - SubCmd::Logging { level } => self.logging_filter(&level), + SubCmd::Logging { filter } => self.logging_filter(filter), SubCmd::State { cmd } => match cmd { StateCmd::Save { file } => self.save_state(file), StateCmd::Load { file } => self.load_state(file), diff --git a/bin/src/ctl/request_builder.rs b/bin/src/ctl/request_builder.rs index 1affeb0aa..eb07929d7 100644 --- a/bin/src/ctl/request_builder.rs +++ b/bin/src/ctl/request_builder.rs @@ -18,8 +18,8 @@ use sozu_command_lib::{ use crate::{ cli::{ - BackendCmd, ClusterCmd, HttpFrontendCmd, HttpListenerCmd, HttpsListenerCmd, LoggingLevel, - MetricsCmd, TcpFrontendCmd, TcpListenerCmd, + BackendCmd, ClusterCmd, HttpFrontendCmd, HttpListenerCmd, HttpsListenerCmd, MetricsCmd, + TcpFrontendCmd, TcpListenerCmd, }, ctl::CommandManager, }; @@ -192,7 +192,7 @@ impl CommandManager { } let query_domain = QueryClusterByDomain { - hostname: splitted.get(0).ok_or(CtlError::NeedClusterDomain)?.clone(), + hostname: splitted.first().ok_or(CtlError::NeedClusterDomain)?.clone(), path: splitted.get(1).cloned().map(|path| format!("/{path}")), // We add the / again because of the splitn removing it }; @@ -484,8 +484,8 @@ impl CommandManager { ) } - pub fn logging_filter(&mut self, filter: &LoggingLevel) -> Result<(), CtlError> { - self.send_request(RequestType::Logging(filter.to_string().to_lowercase()).into()) + pub fn logging_filter(&mut self, filter: String) -> Result<(), CtlError> { + self.send_request(RequestType::Logging(filter).into()) } pub fn add_certificate( diff --git a/bin/src/upgrade.rs b/bin/src/upgrade.rs index 1188e54d0..aeefee09b 100644 --- a/bin/src/upgrade.rs +++ b/bin/src/upgrade.rs @@ -7,7 +7,7 @@ use std::{ process::Command, }; -use libc::{self, pid_t}; +use libc::pid_t; use mio::net::UnixStream; use nix::{ errno::Errno, diff --git a/bin/src/util.rs b/bin/src/util.rs index b888bd970..a3ae645bd 100644 --- a/bin/src/util.rs +++ b/bin/src/util.rs @@ -10,7 +10,6 @@ use nix::{ errno::Errno, fcntl::{fcntl, FcntlArg, FdFlag}, }; -use thiserror; use sozu_command_lib::config::Config; use sozu_lib::metrics::{self, MetricError}; @@ -163,9 +162,10 @@ pub unsafe fn get_executable_path() -> Result { Ok(path_str) } + #[cfg(target_os = "macos")] extern "C" { - pub fn _NSGetExecutablePath(buf: *mut c_char, size: *mut u32) -> i32; + pub fn _NSGetExecutablePath(buf: *mut libc::c_char, size: *mut u32) -> i32; } #[cfg(target_os = "macos")] diff --git a/bin/src/worker.rs b/bin/src/worker.rs index c6025b1b9..7d5b0394d 100644 --- a/bin/src/worker.rs +++ b/bin/src/worker.rs @@ -10,14 +10,10 @@ use std::{ process::Command, }; -#[cfg(target_os = "macos")] -use libc::c_char; -use libc::{self, pid_t}; -#[cfg(target_os = "freebsd")] -use libc::{sysctl, CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, PATH_MAX}; +use libc::pid_t; + use mio::net::UnixStream; use nix::{ - self, errno::Errno, unistd::{fork, ForkResult}, }; @@ -26,7 +22,7 @@ use tempfile::tempfile; use sozu_command_lib::{ channel::{Channel, ChannelError}, config::Config, - logging::setup_logging, + logging::{setup_logging, AccessLogFormat}, proto::command::{ServerConfig, WorkerRequest, WorkerResponse}, ready::Ready, request::{read_initial_state_from_file, RequestError}, @@ -110,10 +106,15 @@ pub fn begin_worker_process( let worker_id = format!("{}-{:02}", "WRK", id); + let access_log_format = AccessLogFormat::from(&worker_config.access_log_format()); + // do not try to log anything before this, or the logger will panic setup_logging( &worker_config.log_target, - worker_config.log_access_target.as_deref(), + worker_config.log_colored, + worker_config.access_logs_target.as_deref(), + Some(access_log_format), + Some(worker_config.log_colored), &worker_config.log_level, &worker_id, ); diff --git a/command/Cargo.toml b/command/Cargo.toml index 9e18c6bc5..898c5696e 100644 --- a/command/Cargo.toml +++ b/command/Cargo.toml @@ -38,6 +38,7 @@ nix = { version = "^0.28.0", features = ["socket", "uio"] } nom = "^7.1.3" prost = "^0.12.3" rand = "^0.8.5" +rusty_ulid = "^2.0.0" serde = { version = "^1.0.195", features = ["derive"] } serde_json = "^1.0.111" sha2 = "^0.10.8" @@ -52,6 +53,7 @@ x509-parser = "^0.16.0" unstable = [] logs-debug = [] logs-trace = [] +logs-cache = [] [badges] travis-ci = { repository = "sozu-proxy/sozu" } diff --git a/command/examples/bench_logger.rs b/command/examples/bench_logger.rs new file mode 100644 index 000000000..0519ab329 --- /dev/null +++ b/command/examples/bench_logger.rs @@ -0,0 +1,106 @@ +//! This file is used to bench the logger of Sozu by logging random lines. +//! You can change the number of logs, log target, log filter and if they are colored with env +//! variables. + +#[macro_use] +extern crate sozu_command_lib; + +use std::time::Instant; + +use rand::{rngs::StdRng, Rng, RngCore, SeedableRng}; + +use sozu_command_lib::logging::setup_logging; + +struct LogLine { + arg0: u32, + arg1: u32, + arg2: u32, + log_level: u32, +} + +#[allow(dead_code)] +fn random_string(rng: &mut StdRng) -> String { + (0..10) + .map(|_| rng.sample(rand::distributions::Alphanumeric) as char) + .collect() +} + +fn random_log_line(rng: &mut StdRng) -> LogLine { + LogLine { + arg0: rng.next_u32(), + arg1: rng.next_u32(), + arg2: rng.next_u32(), + log_level: rng.next_u32(), + } +} + +fn main() { + let mut rng: StdRng = SeedableRng::seed_from_u64(54321); + + let n: usize = std::env::var("BENCH_LOG_ITERS") + .ok() + .and_then(|n| n.parse().ok()) + .unwrap_or(1000); + let colored = std::env::var("BENCH_LOG_COLOR") + .map(|v| v == "true") + .unwrap_or(false); + let pre_generate = std::env::var("BENCH_LOG_PREGEN") + .map(|v| v == "true") + .unwrap_or(false); + let target = std::env::var("BENCH_LOG_TARGET").unwrap_or("stdout".to_string()); + let filter = std::env::var("BENCH_LOG_FILTER").unwrap_or("trace".to_string()); + + eprintln!( + "n={n}, pre_generate={pre_generate}, target={target}, colored={colored}, filter={filter}" + ); + setup_logging(&target, colored, None, None, None, &filter, "WRK-01"); + + let mut pre_generated_log_iterator; + let mut log_iterator = std::iter::repeat(()) + .take(n) + .map(|_| random_log_line(&mut rng)); + + let log_iterator = if pre_generate { + // the pre generated iterator allocates all LogLines to avoid generating them on the fly + // generation takes between 15% and 20% of the time + pre_generated_log_iterator = log_iterator.collect::>().into_iter(); + &mut pre_generated_log_iterator as &mut dyn Iterator + } else { + &mut log_iterator as &mut dyn Iterator + }; + + let start = Instant::now(); + for LogLine { + arg0, + arg1, + arg2, + log_level, + } in log_iterator.into_iter() + { + match log_level % 5 { + 0 => error!( + "first argument: {}, second: {}, third: {}", + arg0, arg1, arg2 + ), + 1 => warn!( + "first argument: {}, second: {}, third: {}", + arg0, arg1, arg2 + ), + 2 => info!( + "first argument: {}, second: {}, third: {}", + arg0, arg1, arg2 + ), + 3 => debug!( + "first argument: {}, second: {}, third: {}", + arg0, arg1, arg2 + ), + _ => trace!( + "first argument: {}, second: {}, third: {}", + arg0, + arg1, + arg2 + ), + } + } + eprintln!("logs took {:?}", start.elapsed()); +} diff --git a/command/src/channel.rs b/command/src/channel.rs index c4285499d..63fec51af 100644 --- a/command/src/channel.rs +++ b/command/src/channel.rs @@ -2,7 +2,6 @@ use std::{ cmp::min, fmt::Debug, io::{self, ErrorKind, Read, Write}, - iter::Iterator, marker::PhantomData, os::unix::{ io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}, diff --git a/command/src/command.proto b/command/src/command.proto index a3b4c2910..83d4d2d07 100644 --- a/command/src/command.proto +++ b/command/src/command.proto @@ -151,7 +151,7 @@ message HttpsListenerConfig { required uint32 request_timeout = 10 [default = 10]; // wether the listener is actively listening on its socket required bool active = 11 [default = false]; - // TLS versions + // TLS versions repeated TlsVersion versions = 12; repeated string cipher_list = 13; repeated string cipher_suites = 14; @@ -306,7 +306,7 @@ message CertificateAndKey { message QueryCertificatesFilters { // a domain name to filter certificate results optional string domain = 1; - // a hex-encoded fingerprint of the TLS certificate to find + // a hex-encoded fingerprint of the TLS certificate to find optional string fingerprint = 2; } @@ -612,7 +612,7 @@ message RequestCounts { } // matches std::net::SocketAddr in the Rust library -// beware that the ports are expressed with uint32 here, +// beware that the ports are expressed with uint32 here, // but they should NOT exceed uint16 value message SocketAddress { required IpAddress ip = 1; @@ -621,7 +621,7 @@ message SocketAddress { message IpAddress { oneof inner { - uint32 v4 = 1; + fixed32 v4 = 1; Uint128 v6 = 2; } } @@ -632,7 +632,6 @@ message Uint128 { required uint64 low = 1; // lower value, last 8 bytes of the ip required uint64 high = 2; - } // This is sent only from Sōzu to Sōzu @@ -671,10 +670,17 @@ message ServerConfig { required uint64 buffer_size = 9 [default = 16393]; required string log_level = 10 [default = "info"]; required string log_target = 11 [default = "stdout"]; - optional string log_access_target = 12; + optional string access_logs_target = 12; required uint64 command_buffer_size = 13 [default = 1000000]; required uint64 max_command_buffer_size = 14 [default = 2000000]; optional ServerMetricsConfig metrics = 15; + required ProtobufAccessLogFormat access_log_format = 16; + required bool log_colored = 17; +} + +enum ProtobufAccessLogFormat { + Ascii = 1; + Protobuf = 2; } // Addresses of listeners, passed to new workers @@ -691,4 +697,63 @@ message ListenersCount { // Consists in a collection of worker requests message InitialState { repeated WorkerRequest requests = 1; -} \ No newline at end of file +} + +// An access log, meant to be passed to another agent +message ProtobufAccessLog { + // error message if any + optional string message = 1; + // LogContext = request_id + cluster_id + backend_id + required Uint128 request_id = 2; + // id of the cluster (set of frontend, backend, routing rules) + optional string cluster_id = 3; + // id of the backend (the server to which the traffic is redirected) + optional string backend_id = 4; + // ip and port of the client + optional SocketAddress session_address = 5; + // socket address of the backend server + optional SocketAddress backend_address = 6; + // the protocol, with SSL/TLS version, for instance "HTTPS-TLS1.1" + required string protocol = 7; + // TCP or HTTP endpoint (method, path, context...) + required ProtobufEndpoint endpoint = 8; + // round trip time for the client (microseconds) + optional uint64 client_rtt = 9; + // round trip time for the backend (microseconds) + optional uint64 server_rtt = 10; + // time for the backend to respond (microseconds) + required uint64 response_time = 12; + // time spent on a session (microseconds) + required uint64 service_time = 13; + // number of bytes received from the client + required uint64 bytes_in = 14; + // number of bytes written to the client + required uint64 bytes_out = 15; + // value of the User-Agent header, if any + optional string user_agent = 16; + // custom tags as key-values, for instance owner_id: MyOrganisation + map tags = 17; + // short description of which process sends the log, for instance: "WRK-02" + required string tag = 18; + // POSIX timestamp, nanoseconds + required Uint128 time = 19; +} + +message ProtobufEndpoint { + oneof inner { + HttpEndpoint http = 1; + TcpEndpoint tcp = 2; + } +} + +message HttpEndpoint { + optional string method = 1; + optional string authority = 2; + optional string path = 3; + // warning: this should be a u16 but protobuf only has uint32. + // Make sure the value never exceeds u16 bounds. + optional uint32 status = 4; + optional string reason = 5; +} + +message TcpEndpoint {} diff --git a/command/src/config.rs b/command/src/config.rs index 7e019f8f3..be6fdbfce 100644 --- a/command/src/config.rs +++ b/command/src/config.rs @@ -49,7 +49,7 @@ //! 3. if a variable has not been set in the TOML file, it will be set to a default defined here use std::{ collections::{BTreeMap, HashMap, HashSet}, - env, + env, fmt, fs::{create_dir_all, metadata, File}, io::{ErrorKind, Read}, net::SocketAddr, @@ -57,16 +57,16 @@ use std::{ path::PathBuf, }; -use toml; - use crate::{ certificate::split_certificate_chain, + logging::AccessLogFormat, proto::command::{ request::RequestType, ActivateListener, AddBackend, AddCertificate, CertificateAndKey, Cluster, HttpListenerConfig, HttpsListenerConfig, ListenerType, LoadBalancingAlgorithms, - LoadBalancingParams, LoadMetric, MetricsConfiguration, PathRule, ProxyProtocolConfig, - Request, RequestHttpFrontend, RequestTcpFrontend, RulePosition, ServerConfig, - ServerMetricsConfig, SocketAddress, TcpListenerConfig, TlsVersion, WorkerRequest, + LoadBalancingParams, LoadMetric, MetricsConfiguration, PathRule, ProtobufAccessLogFormat, + ProxyProtocolConfig, Request, RequestHttpFrontend, RequestTcpFrontend, RulePosition, + ServerConfig, ServerMetricsConfig, SocketAddress, TcpListenerConfig, TlsVersion, + WorkerRequest, }, ObjectKind, }; @@ -515,7 +515,7 @@ impl ListenerBuilder { .ok() }) .map(split_certificate_chain) - .unwrap_or_else(Vec::new); + .unwrap_or_default(); let (answer_404, answer_503) = self.get_404_503_answers()?; @@ -1089,7 +1089,13 @@ pub struct FileConfig { pub log_level: Option, pub log_target: Option, #[serde(default)] - pub log_access_target: Option, + pub log_colored: bool, + #[serde(default)] + pub access_logs_target: Option, + #[serde(default)] + pub access_logs_format: Option, + #[serde(default)] + pub access_logs_colored: Option, pub worker_count: Option, pub worker_automatic_restart: Option, pub metrics: Option, @@ -1200,7 +1206,9 @@ impl ConfigBuilder { ctl_command_timeout: file_config.ctl_command_timeout.unwrap_or(1_000), front_timeout: file_config.front_timeout.unwrap_or(DEFAULT_FRONT_TIMEOUT), handle_process_affinity: file_config.handle_process_affinity.unwrap_or(false), - log_access_target: file_config.log_access_target.clone(), + access_logs_target: file_config.access_logs_target.clone(), + access_logs_format: file_config.access_logs_format.clone(), + access_logs_colored: file_config.access_logs_colored, log_level: file_config .log_level .clone() @@ -1209,6 +1217,7 @@ impl ConfigBuilder { .log_target .clone() .unwrap_or_else(|| String::from("stdout")), + log_colored: file_config.log_colored, max_buffers: file_config.max_buffers.unwrap_or(DEFAULT_MAX_BUFFERS), max_command_buffer_size: file_config .max_command_buffer_size @@ -1430,7 +1439,7 @@ impl ConfigBuilder { /// Sōzu configuration, populated with clusters and listeners. /// /// This struct is used on startup to generate `WorkerRequest`s -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Default, Deserialize)] +#[derive(Clone, PartialEq, Eq, Serialize, Default, Deserialize)] pub struct Config { pub config_path: String, pub command_socket: String, @@ -1445,8 +1454,11 @@ pub struct Config { pub automatic_state_save: bool, pub log_level: String, pub log_target: String, + pub log_colored: bool, #[serde(default)] - pub log_access_target: Option, + pub access_logs_target: Option, + pub access_logs_format: Option, + pub access_logs_colored: Option, pub worker_count: u16, pub worker_automatic_restart: bool, pub metrics: Option, @@ -1739,6 +1751,42 @@ impl Config { } } +impl fmt::Debug for Config { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Config") + .field("config_path", &self.config_path) + .field("command_socket", &self.command_socket) + .field("command_buffer_size", &self.command_buffer_size) + .field("max_command_buffer_size", &self.max_command_buffer_size) + .field("max_connections", &self.max_connections) + .field("min_buffers", &self.min_buffers) + .field("max_buffers", &self.max_buffers) + .field("buffer_size", &self.buffer_size) + .field("saved_state", &self.saved_state) + .field("automatic_state_save", &self.automatic_state_save) + .field("log_level", &self.log_level) + .field("log_target", &self.log_target) + .field("access_logs_target", &self.access_logs_target) + .field("access_logs_format", &self.access_logs_format) + .field("worker_count", &self.worker_count) + .field("worker_automatic_restart", &self.worker_automatic_restart) + .field("metrics", &self.metrics) + .field("disable_cluster_metrics", &self.disable_cluster_metrics) + .field("handle_process_affinity", &self.handle_process_affinity) + .field("ctl_command_timeout", &self.ctl_command_timeout) + .field("pid_file_path", &self.pid_file_path) + .field("activate_listeners", &self.activate_listeners) + .field("front_timeout", &self.front_timeout) + .field("back_timeout", &self.back_timeout) + .field("connect_timeout", &self.connect_timeout) + .field("zombie_check_interval", &self.zombie_check_interval) + .field("accept_queue_timeout", &self.accept_queue_timeout) + .field("request_timeout", &self.request_timeout) + .field("worker_timeout", &self.worker_timeout) + .finish() + } +} + fn display_toml_error(file: &str, error: &toml::de::Error) { println!("error parsing the configuration file '{file}': {error}"); if let Some(Range { start, end }) = error.span() { @@ -1773,10 +1821,12 @@ impl From<&Config> for ServerConfig { buffer_size: config.buffer_size, log_level: config.log_level.clone(), log_target: config.log_target.clone(), - log_access_target: config.log_access_target.clone(), + access_logs_target: config.access_logs_target.clone(), command_buffer_size: config.command_buffer_size, max_command_buffer_size: config.max_command_buffer_size, metrics, + access_log_format: ProtobufAccessLogFormat::from(&config.access_logs_format) as i32, + log_colored: config.log_colored, } } } diff --git a/command/src/lib.rs b/command/src/lib.rs index d7422ec4e..0e5ea044c 100644 --- a/command/src/lib.rs +++ b/command/src/lib.rs @@ -87,3 +87,29 @@ pub enum ObjectKind { TcpListener, TcpFrontend, } + +pub trait AsString { + fn as_string_or(&self, default: &'static str) -> String; +} + +impl AsString for Option { + fn as_string_or(&self, default: &'static str) -> String { + match self { + None => default.to_string(), + Some(t) => t.to_string(), + } + } +} + +pub trait AsStr { + fn as_str_or(&self, default: &'static str) -> &str; +} + +impl> AsStr for Option { + fn as_str_or(&self, default: &'static str) -> &str { + match self { + None => default, + Some(s) => s.as_ref(), + } + } +} diff --git a/command/src/logging.rs b/command/src/logging.rs deleted file mode 100644 index 0d6c3d5aa..000000000 --- a/command/src/logging.rs +++ /dev/null @@ -1,841 +0,0 @@ -use std::{ - cell::RefCell, - cmp::{self, Ord}, - env, - fmt::{format, Arguments}, - fs::{File, OpenOptions}, - io::{stdout, Stdout, Write}, - net::{SocketAddr, TcpStream, ToSocketAddrs, UdpSocket}, - path::Path, - str::FromStr, -}; - -use libc; -use mio::net::UnixDatagram; -use rand::{distributions::Alphanumeric, thread_rng, Rng}; - -use crate::config::Config; - -thread_local! { - pub static LOGGER: RefCell = RefCell::new(Logger::new()); - pub static TAG: String = LOGGER.with(|logger| {logger.borrow().tag.clone()}); -} - -// TODO: check if this error is critical: -// could not register compat logger: SetLoggerError(()) -// The CompatLogger may need a variable that tells wether it has been initiated already -pub static COMPAT_LOGGER: CompatLogger = CompatLogger; - -pub struct Logger { - pub directives: Vec, - pub backend: LoggerBackend, - pub access_backend: Option, - /// is displayed in each log, for instance "MAIN" or worker_id - pub tag: String, - /// the pid of the current process (main or worker) - pub pid: i32, - pub initialized: bool, -} - -impl Default for Logger { - fn default() -> Self { - Self { - directives: vec![LogDirective { - name: None, - level: LogLevelFilter::Error, - }], - backend: LoggerBackend::Stdout(stdout()), - access_backend: None, - tag: "SOZU".to_string(), - pid: 0, - initialized: false, - } - } -} - -impl Logger { - pub fn new() -> Self { - Self::default() - } - - pub fn init( - tag: String, - spec: &str, - backend: LoggerBackend, - access_backend: Option, - ) { - let directives = parse_logging_spec(spec); - LOGGER.with(|l| { - let logger = &mut (*l.borrow_mut()); - if !logger.initialized { - logger.set_directives(directives); - logger.backend = backend; - logger.access_backend = access_backend; - logger.tag = tag; - logger.pid = unsafe { libc::getpid() }; - logger.initialized = true; - - let _ = log::set_logger(&COMPAT_LOGGER) - .map_err(|e| println!("could not register compat logger: {e:?}")); - - log::set_max_level(log::LevelFilter::Info); - } - }); - } - - pub fn log(&mut self, meta: &Metadata, args: Arguments) { - if self.enabled(meta) { - match self.backend { - LoggerBackend::Stdout(ref mut stdout) => { - let _ = stdout.write_fmt(args); - } - //FIXME: should have a buffer to write to instead of allocating a string - LoggerBackend::Unix(ref mut socket) => { - let _ = socket.send(format(args).as_bytes()).map_err(|e| { - println!("cannot write logs to Unix socket: {e:?}"); - }); - } - //FIXME: should have a buffer to write to instead of allocating a string - LoggerBackend::Udp(ref mut socket, ref address) => { - let _ = socket - .send_to(format(args).as_bytes(), address) - .map_err(|e| { - println!("cannot write logs to UDP socket: {e:?}"); - }); - } - LoggerBackend::Tcp(ref mut socket) => { - let _ = socket.write_fmt(args).map_err(|e| { - println!("cannot write logs to TCP socket: {e:?}"); - }); - } - LoggerBackend::File(ref mut file) => { - let _ = file.write_fmt(args).map_err(|e| { - println!("cannot write logs to file: {e:?}"); - }); - } - } - } - } - - pub fn log_access(&mut self, meta: &Metadata, args: Arguments) { - if self.enabled(meta) { - let backend = self.access_backend.as_mut().unwrap_or(&mut self.backend); - match *backend { - LoggerBackend::Stdout(ref mut stdout) => { - let _ = stdout.write_fmt(args); - } - //FIXME: should have a buffer to write to instead of allocating a string - LoggerBackend::Unix(ref mut socket) => { - let _ = socket.send(format(args).as_bytes()).map_err(|e| { - println!("cannot write logs to Unix socket: {e:?}"); - }); - } - //FIXME: should have a buffer to write to instead of allocating a string - LoggerBackend::Udp(ref mut socket, ref address) => { - let _ = socket - .send_to(format(args).as_bytes(), address) - .map_err(|e| { - println!("cannot write logs to UDP socket: {e:?}"); - }); - } - LoggerBackend::Tcp(ref mut socket) => { - let _ = socket.write_fmt(args).map_err(|e| { - println!("cannot write logs to TCP socket: {e:?}"); - }); - } - LoggerBackend::File(ref mut file) => { - let _ = file.write_fmt(args).map_err(|e| { - println!("cannot write logs to file: {e:?}"); - }); - } - } - } - } - - pub fn compat_log(&mut self, meta: &log::Metadata, args: Arguments) { - if self.compat_enabled(meta) { - match self.backend { - LoggerBackend::Stdout(ref mut stdout) => { - let _ = stdout.write_fmt(args); - } - //FIXME: should have a buffer to write to instead of allocating a string - LoggerBackend::Unix(ref mut socket) => { - let _ = socket.send(format(args).as_bytes()).map_err(|e| { - println!("cannot write logs to Unix socket: {e:?}"); - }); - } - //FIXME: should have a buffer to write to instead of allocating a string - LoggerBackend::Udp(ref mut socket, ref address) => { - let _ = socket - .send_to(format(args).as_bytes(), address) - .map_err(|e| { - println!("cannot write logs to UDP socket: {e:?}"); - }); - } - LoggerBackend::Tcp(ref mut socket) => { - let _ = socket.write_fmt(args).map_err(|e| { - println!("cannot write logs to TCP socket: {e:?}"); - }); - } - LoggerBackend::File(ref mut file) => { - let _ = file.write_fmt(args).map_err(|e| { - println!("cannot write logs to file: {e:?}"); - }); - } - } - } - } - - pub fn set_directives(&mut self, directives: Vec) { - self.directives = directives; - } - - fn enabled(&self, meta: &Metadata) -> bool { - // Search for the longest match, the vector is assumed to be pre-sorted. - for directive in self.directives.iter().rev() { - match directive.name { - Some(ref name) if !meta.target.starts_with(&**name) => {} - Some(..) | None => return meta.level <= directive.level, - } - } - false - } - - fn compat_enabled(&self, meta: &log::Metadata) -> bool { - // Search for the longest match, the vector is assumed to be pre-sorted. - for directive in self.directives.iter().rev() { - match directive.name { - Some(ref name) if !meta.target().starts_with(&**name) => {} - Some(..) | None => { - let lvl: LogLevel = meta.level().into(); - return lvl <= directive.level; - } - } - } - false - } -} - -pub enum LoggerBackend { - Stdout(Stdout), - Unix(UnixDatagram), - Udp(UdpSocket, SocketAddr), - Tcp(TcpStream), - File(crate::writer::MultiLineWriter), -} - -#[repr(usize)] -#[derive(Copy, Eq, Debug)] -pub enum LogLevel { - /// The "error" level. - /// - /// Designates very serious errors. - Error = 1, // This way these line up with the discriminants for LogLevelFilter below - /// The "warn" level. - /// - /// Designates hazardous situations. - Warn, - /// The "info" level. - /// - /// Designates useful information. - Info, - /// The "debug" level. - /// - /// Designates lower priority information. - Debug, - /// The "trace" level. - /// - /// Designates very low priority, often extremely verbose, information. - Trace, -} - -static LOG_LEVEL_NAMES: [&str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"]; - -impl Clone for LogLevel { - #[inline] - fn clone(&self) -> LogLevel { - *self - } -} - -impl PartialEq for LogLevel { - #[inline] - fn eq(&self, other: &LogLevel) -> bool { - *self as usize == *other as usize - } -} - -impl PartialEq for LogLevel { - #[inline] - fn eq(&self, other: &LogLevelFilter) -> bool { - *self as usize == *other as usize - } -} - -impl PartialOrd for LogLevel { - #[inline] - fn partial_cmp(&self, other: &LogLevel) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialOrd for LogLevel { - #[inline] - fn partial_cmp(&self, other: &LogLevelFilter) -> Option { - Some((*self as usize).cmp(&(*other as usize))) - } -} - -impl Ord for LogLevel { - #[inline] - fn cmp(&self, other: &LogLevel) -> cmp::Ordering { - (*self as usize).cmp(&(*other as usize)) - } -} - -impl LogLevel { - fn from_usize(u: usize) -> Option { - match u { - 1 => Some(LogLevel::Error), - 2 => Some(LogLevel::Warn), - 3 => Some(LogLevel::Info), - 4 => Some(LogLevel::Debug), - 5 => Some(LogLevel::Trace), - _ => None, - } - } - - /// Returns the most verbose logging level. - #[inline] - pub fn max() -> LogLevel { - LogLevel::Trace - } - - /// Converts the `LogLevel` to the equivalent `LogLevelFilter`. - #[inline] - pub fn to_log_level_filter(self) -> LogLevelFilter { - LogLevelFilter::from_usize(self as usize).unwrap() - } -} - -#[repr(usize)] -#[derive(Copy, Eq, Debug)] -pub enum LogLevelFilter { - Off, - Error, - Warn, - Info, - Debug, - Trace, -} - -impl Clone for LogLevelFilter { - #[inline] - fn clone(&self) -> LogLevelFilter { - *self - } -} - -impl PartialEq for LogLevelFilter { - #[inline] - fn eq(&self, other: &LogLevelFilter) -> bool { - *self as usize == *other as usize - } -} - -impl PartialEq for LogLevelFilter { - #[inline] - fn eq(&self, other: &LogLevel) -> bool { - other.eq(self) - } -} - -impl PartialOrd for LogLevelFilter { - #[inline] - fn partial_cmp(&self, other: &LogLevelFilter) -> Option { - Some(self.cmp(other)) - } -} - -impl PartialOrd for LogLevelFilter { - #[inline] - fn partial_cmp(&self, other: &LogLevel) -> Option { - other.partial_cmp(self).map(|x| x.reverse()) - } -} - -impl Ord for LogLevelFilter { - #[inline] - fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering { - (*self as usize).cmp(&(*other as usize)) - } -} - -impl FromStr for LogLevelFilter { - type Err = (); - fn from_str(level: &str) -> Result { - ok_or( - LOG_LEVEL_NAMES - .iter() - .position(|&name| name.eq_ignore_ascii_case(level)) - .map(|p| LogLevelFilter::from_usize(p).unwrap()), - (), - ) - } -} - -impl LogLevelFilter { - fn from_usize(u: usize) -> Option { - match u { - 0 => Some(LogLevelFilter::Off), - 1 => Some(LogLevelFilter::Error), - 2 => Some(LogLevelFilter::Warn), - 3 => Some(LogLevelFilter::Info), - 4 => Some(LogLevelFilter::Debug), - 5 => Some(LogLevelFilter::Trace), - _ => None, - } - } - /// Returns the most verbose logging level filter. - #[inline] - pub fn max() -> LogLevelFilter { - LogLevelFilter::Trace - } - - /// Converts `self` to the equivalent `LogLevel`. - /// - /// Returns `None` if `self` is `LogLevelFilter::Off`. - #[inline] - pub fn to_log_level(self) -> Option { - LogLevel::from_usize(self as usize) - } -} - -/// Metadata about a log message. -#[derive(Debug)] -pub struct Metadata { - pub level: LogLevel, - pub target: &'static str, -} - -#[derive(Debug)] -pub struct LogDirective { - name: Option, - level: LogLevelFilter, -} - -fn ok_or(t: Option, e: E) -> Result { - match t { - Some(t) => Ok(t), - None => Err(e), - } -} - -pub fn parse_logging_spec(spec: &str) -> Vec { - let mut dirs = Vec::new(); - - let mut parts = spec.split('/'); - let mods = parts.next(); - let _ = parts.next(); - if parts.next().is_some() { - println!( - "warning: invalid logging spec '{spec}', \ - ignoring it (too many '/'s)" - ); - return dirs; - } - if let Some(m) = mods { - for s in m.split(',') { - if s.is_empty() { - continue; - } - let mut parts = s.split('='); - let (log_level, name) = - match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { - (Some(part0), None, None) => { - // if the single argument is a log-level string or number, - // treat that as a global fallback - match part0.parse() { - Ok(num) => (num, None), - Err(_) => (LogLevelFilter::max(), Some(part0)), - } - } - (Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)), - (Some(part0), Some(part1), None) => match part1.parse() { - Ok(num) => (num, Some(part0)), - _ => { - println!( - "warning: invalid logging spec '{part1}', \ - ignoring it" - ); - continue; - } - }, - _ => { - println!( - "warning: invalid logging spec '{s}', \ - ignoring it" - ); - continue; - } - }; - dirs.push(LogDirective { - name: name.map(|s| s.to_string()), - level: log_level, - }); - } - } - - dirs -} - -/// start the logger from config (takes RUST_LOG into account) -pub fn setup_logging_with_config(config: &Config, tag: &str) { - setup_logging( - &config.log_target, - config.log_access_target.as_deref(), - &config.log_level, - tag, - ) -} - -/// start the logger, after: -/// -/// - determining logging backends -/// - taking RUST_LOG into account -pub fn setup_logging( - log_target: &str, - log_access_target: Option<&str>, - log_level: &str, - tag: &str, -) { - let backend = target_to_backend(log_target); - let access_backend = log_access_target.map(target_to_backend); - - if let Ok(env_log_level) = env::var("RUST_LOG") { - Logger::init(tag.to_string(), &env_log_level, backend, access_backend); - } else { - Logger::init(tag.to_string(), log_level, backend, access_backend); - } -} - -pub fn target_to_backend(target: &str) -> LoggerBackend { - if target == "stdout" { - LoggerBackend::Stdout(stdout()) - } else if let Some(addr) = target.strip_prefix("udp://") { - match addr.to_socket_addrs() { - Err(e) => { - println!("invalid log target configuration ({e:?}): {target}"); - LoggerBackend::Stdout(stdout()) - } - Ok(mut addrs) => { - let socket = UdpSocket::bind(("0.0.0.0", 0)).unwrap(); - LoggerBackend::Udp(socket, addrs.next().unwrap()) - } - } - } else if let Some(addr) = target.strip_prefix("tcp://") { - match addr.to_socket_addrs() { - Err(e) => { - println!("invalid log target configuration ({e:?}): {target}"); - LoggerBackend::Stdout(stdout()) - } - Ok(mut addrs) => LoggerBackend::Tcp(TcpStream::connect(addrs.next().unwrap()).unwrap()), - } - } else if let Some(addr) = target.strip_prefix("unix://") { - let path = Path::new(addr); - if !path.exists() { - println!("invalid log target configuration: {addr} is not a file"); - LoggerBackend::Stdout(stdout()) - } else { - let mut dir = env::temp_dir(); - let s: String = thread_rng() - .sample_iter(&Alphanumeric) - .take(12) - .map(|c| c as char) - .collect(); - dir.push(s); - let socket = UnixDatagram::bind(dir).unwrap(); - socket.connect(path).unwrap(); - LoggerBackend::Unix(socket) - } - } else if let Some(addr) = target.strip_prefix("file://") { - let path = Path::new(addr); - match OpenOptions::new().create(true).append(true).open(path) { - Ok(file) => LoggerBackend::File(crate::writer::MultiLineWriter::new(file)), - Err(e) => { - println!( - "invalid log target configuration: could not open file at {addr} (error: {e:?})" - ); - LoggerBackend::Stdout(stdout()) - } - } - } else { - println!("invalid log target configuration: {target}"); - LoggerBackend::Stdout(stdout()) - } -} - -/// write a log with the custom logger (used in other macros, do not use directly) -#[macro_export] -macro_rules! log { - (__inner__ $target:expr, $lvl:expr, $format:expr, $level_tag:expr, - [$($transformed_args:ident),*], [$first_ident:ident $(, $other_idents:ident)*], $first_arg:expr $(, $other_args:expr)*) => ({ - let $first_ident = &$first_arg; - log!(__inner__ $target, $lvl, $format, $level_tag, [$($transformed_args,)* $first_ident], [$($other_idents),*] $(, $other_args)*); - }); - - (__inner__ $target:expr, $lvl:expr, $format:expr, $level_tag:expr, - [$($final_args:ident),*], [$($idents:ident),*]) => ({ - static _META: $crate::logging::Metadata = $crate::logging::Metadata { - level: $lvl, - target: module_path!(), - }; - { - $crate::logging::TAG.with(|tag| { - //let tag = t.borrow().tag; - $crate::logging::LOGGER.with(|l| { - let pid = l.borrow().pid; - - let (now, precise_time) = $crate::logging::now(); - l.borrow_mut().log( - &_META, - format_args!( - concat!("{} {} {} {} {}\t", $format, '\n'), - now, precise_time, pid, tag, - $level_tag $(, $final_args)*) - ); - }) - }); - } - }); - ($lvl:expr, $format:expr, $level_tag:expr $(, $args:expr)+) => { - log!(__inner__ module_path!(), $lvl, $format, $level_tag, [], [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v] - $(, $args)+) - }; - ($lvl:expr, $format:expr, $level_tag:expr) => { - log!(__inner__ module_path!(), $lvl, $format, $level_tag, [], [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v]) - }; -} - -/// log a failure concerning an HTTP or TCP request -#[macro_export] -macro_rules! log_access { - (__inner__ $target:expr, $lvl:expr, $format:expr, $level_tag:expr, - [$($transformed_args:ident),*], [$first_ident:ident $(, $other_idents:ident)*], $first_arg:expr $(, $other_args:expr)*) => ({ - let $first_ident = &$first_arg; - log_access!(__inner__ $target, $lvl, $format, $level_tag, [$($transformed_args,)* $first_ident], [$($other_idents),*] $(, $other_args)*); - }); - - (__inner__ $target:expr, $lvl:expr, $format:expr, $level_tag:expr, - [$($final_args:ident),*], [$($idents:ident),*]) => ({ - static _META: $crate::logging::Metadata = $crate::logging::Metadata { - level: $lvl, - target: module_path!(), - }; - { - $crate::logging::TAG.with(|tag| { - //let tag = t.borrow().tag; - $crate::logging::LOGGER.with(|l| { - let pid = l.borrow().pid; - - let (now, precise_time) = $crate::logging::now(); - l.borrow_mut().log_access( - &_META, - format_args!( - concat!("{} {} {} {} {}\t", $format, '\n'), - now, precise_time, pid, tag, - $level_tag $(, $final_args)*) - ); - }) - }); - } - }); - ($lvl:expr, $format:expr, $level_tag:expr $(, $args:expr)+) => { - log_access!(__inner__ module_path!(), $lvl, $format, $level_tag, [], [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v] - $(, $args)+) - }; - ($lvl:expr, $format:expr, $level_tag:expr) => { - log_access!(__inner__ module_path!(), $lvl, $format, $level_tag, [], [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v]) - }; -} - -/// log an error with Sōzu's custom log stack -#[macro_export] -macro_rules! error { - ($format:expr, $($arg:tt)*) => { - log!($crate::logging::LogLevel::Error, $format, "ERROR", $($arg)*) - }; - ($format:expr) => { - log!($crate::logging::LogLevel::Error, $format, "ERROR") - }; -} - -/// log a failure concerning an HTTP or TCP request -#[macro_export] -macro_rules! error_access { - ($format:expr, $($arg:tt)*) => { - log_access!($crate::logging::LogLevel::Error, $format, "ERROR", $($arg)*); - }; - ($format:expr) => { - log_access!($crate::logging::LogLevel::Error, $format, "ERROR"); - }; -} - -/// log a warning with Sōzu’s custom log stack -#[macro_export] -macro_rules! warn { - ($format:expr, $($arg:tt)*) => { - log!($crate::logging::LogLevel::Warn, $format, "WARN", $($arg)*); - }; - ($format:expr) => { - log!($crate::logging::LogLevel::Warn, $format, "WARN"); - } -} - -/// log an info with Sōzu’s custom log stack -#[macro_export] -macro_rules! info { - ($format:expr, $($arg:tt)*) => { - log!($crate::logging::LogLevel::Info, $format, "INFO", $($arg)*); - }; - ($format:expr) => { - log!($crate::logging::LogLevel::Info, $format, "INFO"); - } -} - -/// log the success of an HTTP or TCP request -#[macro_export] -macro_rules! info_access { - ($format:expr, $($arg:tt)*) => { - log_access!($crate::logging::LogLevel::Info, $format, "INFO", $($arg)*); - }; - ($format:expr) => { - log_access!($crate::logging::LogLevel::Info, $format, "INFO"); - } -} - -/// log a debug with Sōzu’s custom log stack -#[macro_export] -macro_rules! debug { - ($format:expr, $($arg:tt)*) => { - #[cfg(any(debug_assertions, feature = "logs-debug", feature = "logs-trace"))] - log!($crate::logging::LogLevel::Debug, concat!("{}\t", $format), - "DEBUG", {module_path!()}, $($arg)*); - }; - ($format:expr) => { - #[cfg(any(debug_assertions, feature = "logs-debug", feature = "logs-trace"))] - log!($crate::logging::LogLevel::Debug, concat!("{}\t", $format), - "DEBUG", {module_path!()}); - } -} - -/// log a trace with Sōzu’s custom log stack -#[macro_export] -macro_rules! trace { - ($format:expr, $($arg:tt)*) => ( - #[cfg(any(debug_assertions, feature = "logs-trace"))] - log!($crate::logging::LogLevel::Trace, concat!("{}\t", $format), - "TRACE", module_path!(), $($arg)*); - ); - ($format:expr) => ( - #[cfg(any(debug_assertions, feature = "logs-trace"))] - log!($crate::logging::LogLevel::Trace, concat!("{}\t", $format), - "TRACE", module_path!()); - ) -} - -/// write a log with a "FIXME" prefix on an info level -#[macro_export] -macro_rules! fixme { - () => { - log!($crate::logging::LogLevel::Info, "FIXME: {}:{} in {}", "INFO", file!(), line!(), module_path!()); - }; - ($($arg:tt)*) => { - log!($crate::logging::LogLevel::Info, "FIXME: {}:{} in {}: {}", "INFO", file!(), line!(), module_path!(), $($arg)*); - }; -} - -pub struct CompatLogger; - -impl From for LogLevel { - fn from(lvl: log::Level) -> Self { - match lvl { - log::Level::Error => LogLevel::Error, - log::Level::Warn => LogLevel::Warn, - log::Level::Info => LogLevel::Info, - log::Level::Debug => LogLevel::Debug, - log::Level::Trace => LogLevel::Trace, - } - } -} - -impl log::Log for CompatLogger { - fn enabled(&self, _: &log::Metadata) -> bool { - true - } - - fn log(&self, record: &log::Record) { - TAG.with(|tag| { - LOGGER.with(|l| { - let pid = l.borrow().pid; - let (now, precise_time) = now(); - l.borrow_mut().compat_log( - record.metadata(), - format_args!( - concat!("{} {} {} {} {}\t{}\n"), - now, - precise_time, - pid, - tag, - record.level(), - record.args() - ), - ); - }) - }); - } - - fn flush(&self) {} -} - -/// start a logger used in test environment -#[macro_export] -macro_rules! setup_test_logger { - () => { - $crate::logging::Logger::init( - module_path!().to_string(), - "error", - $crate::logging::LoggerBackend::Stdout(::std::io::stdout()), - None, - ); - }; -} - -pub struct Rfc3339Time { - inner: ::time::OffsetDateTime, -} - -impl std::fmt::Display for Rfc3339Time { - fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { - let t = self.inner; - write!( - f, - "{}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}Z", - t.year(), - t.month() as u8, - t.day(), - t.hour(), - t.minute(), - t.second(), - t.microsecond() - ) - } -} - -pub fn now() -> (Rfc3339Time, i128) { - let t = time::OffsetDateTime::now_utc(); - ( - Rfc3339Time { inner: t }, - (t - time::OffsetDateTime::UNIX_EPOCH).whole_nanoseconds(), - ) -} diff --git a/command/src/logging/access_logs.rs b/command/src/logging/access_logs.rs new file mode 100644 index 000000000..42ad85b4c --- /dev/null +++ b/command/src/logging/access_logs.rs @@ -0,0 +1,185 @@ +use std::{collections::BTreeMap, mem::ManuallyDrop, net::SocketAddr}; + +use rusty_ulid::Ulid; +use time::Duration; + +use crate::{ + logging::{LogLevel, Rfc3339Time}, + proto::command::{ + protobuf_endpoint, HttpEndpoint, ProtobufAccessLog, ProtobufEndpoint, TcpEndpoint, + }, +}; + +/// This uses unsafe to creates a "fake" owner of the underlying data. +/// Beware that for the compiler it is as legitimate as the original owner. +/// So you have to elide one of them (with std::mem::forget or ManuallyDrop) +/// before it is drop to avoid a double free. +/// +/// This trait works on &T and Option<&T> types +/// +/// After performance review, it seems not any more efficient than calling `clone()`, +/// probably because the cache of malloc is so well optimized these days. +trait DuplicateOwnership { + type Target; + /// Don't forget to use std::mem::forget or ManuallyDrop over one of your owners + unsafe fn duplicate(self) -> Self::Target; +} + +impl DuplicateOwnership for &T { + type Target = T; + unsafe fn duplicate(self) -> T { + std::ptr::read(self as *const T) + } +} +impl<'a, T> DuplicateOwnership for Option<&'a T> +where + T: ?Sized, + &'a T: DuplicateOwnership, +{ + type Target = Option<<&'a T as DuplicateOwnership>::Target>; + unsafe fn duplicate(self) -> Self::Target { + self.map(|t| t.duplicate()) + } +} +impl DuplicateOwnership for &str { + type Target = String; + unsafe fn duplicate(self) -> Self::Target { + String::from_raw_parts(self.as_ptr() as *mut _, self.len(), self.len()) + } +} +impl DuplicateOwnership for &[T] { + type Target = Vec; + unsafe fn duplicate(self) -> Self::Target { + Vec::from_raw_parts(self.as_ptr() as *mut _, self.len(), self.len()) + } +} + +pub struct LogMessage<'a>(pub Option<&'a str>); +pub struct LogDuration(pub Option); + +#[derive(Debug)] +pub struct LogContext<'a> { + pub request_id: Ulid, + pub cluster_id: Option<&'a str>, + pub backend_id: Option<&'a str>, +} + +pub enum EndpointRecord<'a> { + Http { + method: Option<&'a str>, + authority: Option<&'a str>, + path: Option<&'a str>, + status: Option, + reason: Option<&'a str>, + }, + Tcp, +} + +/// used to aggregate tags in a session +#[derive(Debug)] +pub struct CachedTags { + pub tags: BTreeMap, + pub concatenated: String, +} + +impl CachedTags { + pub fn new(tags: BTreeMap) -> Self { + let concatenated = tags + .iter() + .map(|(k, v)| format!("{k}={v}")) + .collect::>() + .join(", "); + Self { tags, concatenated } + } +} + +#[derive(Debug)] +pub struct FullTags<'a> { + pub concatenated: Option<&'a str>, + pub user_agent: Option<&'a str>, +} + +/// Intermediate representation of an access log agnostic of the final format. +/// Every field is a reference to avoid capturing ownership (as a logger should). +pub struct RequestRecord<'a> { + pub message: Option<&'a str>, + pub context: LogContext<'a>, + pub session_address: Option, + pub backend_address: Option, + pub protocol: &'a str, + pub endpoint: EndpointRecord<'a>, + pub tags: Option<&'a CachedTags>, + pub client_rtt: Option, + pub server_rtt: Option, + pub user_agent: Option<&'a str>, + pub service_time: Duration, + pub response_time: Duration, + pub bytes_in: usize, + pub bytes_out: usize, + + // added by the logger itself + pub pid: i32, + pub tag: &'a str, + pub level: LogLevel, + pub now: Rfc3339Time, + pub precise_time: i128, +} + +impl RequestRecord<'_> { + pub fn full_tags(&self) -> FullTags { + FullTags { + concatenated: self.tags.as_ref().map(|t| t.concatenated.as_str()), + user_agent: self.user_agent, + } + } + + /// Converts the RequestRecord in its protobuf representation. + /// Prost needs ownership over all the fields but we don't want to take it from the user + /// or clone them, so we use the unsafe DuplicateOwnership. + pub fn into_binary_access_log(self) -> ManuallyDrop { + unsafe { + let endpoint = match self.endpoint { + EndpointRecord::Http { + method, + authority, + path, + status, + reason, + } => protobuf_endpoint::Inner::Http(HttpEndpoint { + method: method.duplicate(), + authority: authority.duplicate(), + path: path.duplicate(), + status: status.map(|s| s as u32), + reason: reason.duplicate(), + }), + EndpointRecord::Tcp => protobuf_endpoint::Inner::Tcp(TcpEndpoint {}), + }; + + ManuallyDrop::new(ProtobufAccessLog { + backend_address: self.backend_address.map(Into::into), + backend_id: self.context.backend_id.duplicate(), + bytes_in: self.bytes_in as u64, + bytes_out: self.bytes_out as u64, + client_rtt: self.client_rtt.map(|t| t.whole_microseconds() as u64), + cluster_id: self.context.cluster_id.duplicate(), + endpoint: ProtobufEndpoint { + inner: Some(endpoint), + }, + message: self.message.duplicate(), + protocol: self.protocol.duplicate(), + request_id: self.context.request_id.into(), + response_time: self.response_time.whole_microseconds() as u64, + server_rtt: self.server_rtt.map(|t| t.whole_microseconds() as u64), + service_time: self.service_time.whole_microseconds() as u64, + session_address: self.session_address.map(Into::into), + tags: self + .tags + .map(|tags| tags.tags.duplicate()) + .unwrap_or_default(), + user_agent: self.user_agent.duplicate(), + tag: self.tag.duplicate(), + time: self.precise_time.into(), + }) + } + } +} diff --git a/command/src/logging/display.rs b/command/src/logging/display.rs new file mode 100644 index 000000000..287df6669 --- /dev/null +++ b/command/src/logging/display.rs @@ -0,0 +1,167 @@ +use std::fmt; + +use crate::{ + logging::{ + EndpointRecord, FullTags, LogContext, LogDuration, LogLevel, LogMessage, LoggerBackend, + Rfc3339Time, + }, + AsStr, +}; + +impl LogLevel { + pub const fn as_str(&self, access: bool, colored: bool) -> &'static str { + match (self, access, colored) { + (LogLevel::Error, false, false) => "ERROR", + (LogLevel::Warn, false, false) => "WARN ", + (LogLevel::Info, false, false) => "INFO ", + (LogLevel::Debug, false, false) => "DEBUG", + (LogLevel::Trace, false, false) => "TRACE", + + (LogLevel::Error, false, true) => "\x1b[;31;1mERROR", + (LogLevel::Warn, false, true) => "\x1b[;33;1mWARN ", + (LogLevel::Info, false, true) => "\x1b[;32;1mINFO ", + (LogLevel::Debug, false, true) => "\x1b[;34mDEBUG", + (LogLevel::Trace, false, true) => "\x1b[;90mTRACE", + + (LogLevel::Error, true, false) => "ERROR-ACCESS", + (LogLevel::Info, true, false) => "INFO-ACCESS ", + (_, true, false) => "???", + + (LogLevel::Error, true, true) => "\x1b[;35;1mERROR-ACCESS", + (LogLevel::Info, true, true) => "\x1b[;35;1mINFO-ACCESS ", + (_, true, true) => "\x1b[;35;1m???", + } + } +} + +impl AsRef for LoggerBackend { + fn as_ref(&self) -> &str { + match self { + LoggerBackend::Stdout(_) => "stdout", + LoggerBackend::Unix(_) => "UNIX socket", + LoggerBackend::Udp(_, _) => "UDP socket", + LoggerBackend::Tcp(_) => "TCP socket", + LoggerBackend::File(_) => "file", + } + } +} + +impl fmt::Display for Rfc3339Time { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let t = self.inner; + write!( + f, + "{}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}Z", + t.year(), + t.month() as u8, + t.day(), + t.hour(), + t.minute(), + t.second(), + t.microsecond() + ) + } +} + +impl fmt::Display for LogMessage<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.0 { + Some(message) => write!(f, " | {message}"), + None => Ok(()), + } + } +} + +impl fmt::Display for LogDuration { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.0 { + None => write!(f, "-"), + Some(duration) => { + let secs = duration.whole_seconds(); + if secs >= 10 { + return write!(f, "{secs}s"); + } + + let ms = duration.whole_milliseconds(); + if ms < 10 { + let us = duration.whole_microseconds(); + if us >= 10 { + return write!(f, "{us}μs"); + } + + let ns = duration.whole_nanoseconds(); + return write!(f, "{ns}ns"); + } + + write!(f, "{ms}ms") + } + } + } +} + +impl fmt::Display for LogContext<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "[{} {} {}]", + self.request_id, + self.cluster_id.unwrap_or("-"), + self.backend_id.unwrap_or("-") + ) + } +} + +impl fmt::Display for EndpointRecord<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Http { + method, + authority, + path, + status, + .. + } => write!( + f, + "{} {} {} {}", + authority.as_str_or("-"), + method.as_str_or("-"), + path.as_str_or("-"), + display_status(*status, f.alternate()), + ), + Self::Tcp => { + write!(f, "-") + } + } + } +} + +fn display_status(status: Option, pretty: bool) -> String { + match (status, pretty) { + (Some(s @ 200..=299), true) => format!("\x1b[32m{s}"), + (Some(s @ 300..=399), true) => format!("\x1b[34m{s}"), + (Some(s @ 400..=499), true) => format!("\x1b[33m{s}"), + (Some(s @ 500..=599), true) => format!("\x1b[31m{s}"), + (Some(s), _) => s.to_string(), + (None, _) => "-".to_string(), + } +} + +impl<'a> fmt::Display for FullTags<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match (self.concatenated, self.user_agent) { + (None, None) => Ok(()), + (Some(tags), None) => write!(f, "{tags}"), + (Some(tags), Some(ua)) if !tags.is_empty() => { + write!(f, "{tags}, user-agent={}", prepare_user_agent(ua)) + } + (_, Some(ua)) => write!(f, "user-agent={}", prepare_user_agent(ua)), + } + } +} + +fn prepare_user_agent(user_agent: &str) -> String { + user_agent + .replace(' ', "_") + .replace('[', "{") + .replace(']', "}") +} diff --git a/command/src/logging/logs.rs b/command/src/logging/logs.rs new file mode 100644 index 000000000..074166206 --- /dev/null +++ b/command/src/logging/logs.rs @@ -0,0 +1,1017 @@ +use std::{ + cell::RefCell, + cmp, + env, + fmt::Arguments, + fs::{File, OpenOptions}, + io::{stdout, Error as IoError, ErrorKind as IoErrorKind, Stdout, Write}, + net::{SocketAddr, TcpStream, ToSocketAddrs, UdpSocket}, + ops::{Deref, DerefMut}, + path::Path, + str::FromStr, +}; + +use mio::net::UnixDatagram; +use prost::{encoding::encoded_len_varint, Message}; +use rand::{distributions::Alphanumeric, thread_rng, Rng}; + +use crate::{ + config::Config, + logging::{LogDuration, LogMessage, RequestRecord}, + proto::command::ProtobufAccessLogFormat, + AsString, +}; + +thread_local! { + pub static LOGGER: RefCell = RefCell::new(Logger::new()); +} + +// TODO: check if this error is critical: +// could not register compat logger: SetLoggerError(()) +// The CompatLogger may need a variable that tells wether it has been initiated already +pub static COMPAT_LOGGER: CompatLogger = CompatLogger; + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "lowercase")] +pub enum AccessLogFormat { + Ascii, + Protobuf, +} + +impl From<&ProtobufAccessLogFormat> for AccessLogFormat { + fn from(value: &ProtobufAccessLogFormat) -> Self { + match value { + ProtobufAccessLogFormat::Ascii => Self::Ascii, + ProtobufAccessLogFormat::Protobuf => Self::Protobuf, + } + } +} + +impl From<&Option> for ProtobufAccessLogFormat { + fn from(value: &Option) -> Self { + match value { + Some(AccessLogFormat::Ascii) | None => Self::Ascii, + Some(AccessLogFormat::Protobuf) => Self::Protobuf, + } + } +} + +pub struct InnerLogger { + version: u8, + directives: Vec, + backend: LoggerBackend, + pub colored: bool, + /// target of the access logs + access_backend: Option, + /// how to format the access logs + access_format: AccessLogFormat, + access_colored: bool, + buffer: LoggerBuffer, +} + +pub struct Logger { + inner: InnerLogger, + /// is displayed in each log, for instance "MAIN" or worker_id + tag: String, + /// the pid of the current process (main or worker) + pid: i32, + initialized: bool, +} + +impl std::ops::Deref for Logger { + type Target = InnerLogger; + fn deref(&self) -> &Self::Target { + &self.inner + } +} +impl std::ops::DerefMut for Logger { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl Default for Logger { + fn default() -> Self { + Self { + inner: InnerLogger { + version: 1, // all call site start with a version of 0 + directives: vec![LogDirective { + name: None, + level: LogLevelFilter::Error, + }], + backend: LoggerBackend::Stdout(stdout()), + colored: false, + access_backend: None, + access_format: AccessLogFormat::Ascii, + access_colored: false, + buffer: LoggerBuffer(Vec::with_capacity(4096)), + }, + tag: "UNINITIALIZED".to_string(), + pid: 0, + initialized: false, + } + } +} + +impl Logger { + pub fn new() -> Self { + Self::default() + } + + pub fn init( + tag: String, + spec: &str, + backend: LoggerBackend, + colored: bool, + access_backend: Option, + access_format: Option, + access_colored: Option, + ) { + let (directives, _errors) = parse_logging_spec(spec); + LOGGER.with(|logger| { + let mut logger = logger.borrow_mut(); + if !logger.initialized { + logger.set_directives(directives); + logger.colored = match backend { + LoggerBackend::Stdout(_) => colored, + _ => false, + }; + logger.access_colored = match (&access_backend, &backend) { + (Some(LoggerBackend::Stdout(_)), _) | (None, LoggerBackend::Stdout(_)) => { + access_colored.unwrap_or(colored) + } + _ => false, + }; + logger.backend = backend; + logger.access_backend = access_backend; + logger.access_format = access_format.unwrap_or(AccessLogFormat::Ascii); + logger.tag = tag; + logger.pid = unsafe { libc::getpid() }; + logger.initialized = true; + + let _ = log::set_logger(&COMPAT_LOGGER) + .map_err(|e| println!("could not register compat logger: {e:?}")); + + log::set_max_level(log::LevelFilter::Info); + } + }); + } + + pub fn set_directives(&mut self, directives: Vec) { + self.version += 1; + if self.version >= LOG_LINE_ENABLED { + self.version = 0; + } + self.directives = directives; + } + + pub fn split(&mut self) -> (i32, &str, &mut InnerLogger) { + (self.pid, &self.tag, &mut self.inner) + } +} + +struct LoggerBuffer(Vec); + +impl Deref for LoggerBuffer { + type Target = Vec; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl DerefMut for LoggerBuffer { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl LoggerBuffer { + fn fmt Result>( + &mut self, + args: Arguments, + flush: F, + ) -> Result<(), IoError> { + self.clear(); + self.write_fmt(args)?; + flush(self.as_slice())?; + Ok(()) + } +} + +fn log_arguments( + args: Arguments, + backend: &mut LoggerBackend, + buffer: &mut LoggerBuffer, +) -> Result<(), IoError> { + match backend { + LoggerBackend::Stdout(stdout) => { + let _ = stdout.write_fmt(args); + Ok(()) + } + LoggerBackend::Tcp(socket) => socket.write_fmt(args), + LoggerBackend::File(file) => file.write_fmt(args), + LoggerBackend::Unix(socket) => buffer.fmt(args, |bytes| socket.send(bytes)), + LoggerBackend::Udp(sock, addr) => buffer.fmt(args, |b| sock.send_to(b, *addr)), + } +} + +impl InnerLogger { + pub fn log(&mut self, args: Arguments) { + if let Err(e) = log_arguments(args, &mut self.backend, &mut self.buffer) { + println!("Could not write log to {}: {e:?}", self.backend.as_ref()); + } + } + + /// write an access log to the proper logging target + /// + /// Protobuf access logs are written with a prost length delimiter before, and 2 empty bytes after + pub fn log_access(&mut self, log: RequestRecord) { + let backend = self.access_backend.as_mut().unwrap_or(&mut self.backend); + + let io_result = match self.access_format { + AccessLogFormat::Protobuf => { + let binary_log = log.into_binary_access_log(); + let log_length = binary_log.encoded_len(); + let total_length = log_length + encoded_len_varint(log_length as u64); + self.buffer.clear(); + let current_capacity = self.buffer.capacity(); + if current_capacity < total_length { + self.buffer.reserve(total_length - current_capacity); + } + + if let Err(e) = binary_log.encode_length_delimited(&mut self.buffer.0) { + Err(IoError::new(IoErrorKind::InvalidData, e)) + } else { + self.buffer.extend_from_slice(&[0, 0]); // add two empty bytes after each protobuf access log + let bytes = &self.buffer; + match backend { + LoggerBackend::Stdout(stdout) => { + let _ = stdout.write(bytes); + return; + } + LoggerBackend::Tcp(socket) => socket.write(bytes), + LoggerBackend::File(file) => file.write(bytes), + LoggerBackend::Unix(socket) => socket.send(bytes), + LoggerBackend::Udp(socket, address) => socket.send_to(bytes, *address), + } + .map(|_| ()) + } + } + AccessLogFormat::Ascii => crate::_prompt_log! { + logger: |args| log_arguments(args, backend, &mut self.buffer), + is_access: true, + condition: self.access_colored, + prompt: [ + log.now, + log.precise_time, + log.pid, + log.level, + log.tag, + ], + standard: { + formats: ["{} {} {} {}/{}/{}/{} {} {} [{}] {} {}{}\n"], + args: [ + log.context, + log.session_address.as_string_or("-"), + log.backend_address.as_string_or("-"), + LogDuration(Some(log.response_time)), + LogDuration(Some(log.service_time)), + LogDuration(log.client_rtt), + LogDuration(log.server_rtt), + log.bytes_in, + log.bytes_out, + log.full_tags(), + log.protocol, + log.endpoint, + LogMessage(log.message), + ] + }, + colored: { + formats: ["\x1b[;1m{}\x1b[m {} {} {}/{}/{}/{} {} {} \x1b[2m[{}] \x1b[;1m{} {:#}\x1b[m{}\n"], + args: @, + } + }, + }; + + if let Err(e) = io_result { + println!("Could not write access log to {}: {e:?}", backend.as_ref()); + } + } + + pub fn enabled(&self, meta: Metadata) -> bool { + // Search for the longest match, the vector is assumed to be pre-sorted. + for directive in self.directives.iter().rev() { + match &directive.name { + Some(name) if !meta.target.starts_with(name) => {} + Some(_) | None => return meta.level <= directive.level, + } + } + false + } + + pub fn cached_enabled(&self, call_site_state: &mut LogLineCachedState, meta: Metadata) -> bool { + if call_site_state.version() == self.version { + call_site_state.enabled() + } else { + let enabled = self.enabled(meta); + call_site_state.set(self.version, enabled); + enabled + } + } + + fn compat_enabled(&self, meta: &log::Metadata) -> bool { + // Search for the longest match, the vector is assumed to be pre-sorted. + for directive in self.directives.iter().rev() { + match &directive.name { + Some(name) if !meta.target().starts_with(name) => {} + Some(_) | None => return LogLevel::from(meta.level()) <= directive.level, + } + } + false + } +} + +pub enum LoggerBackend { + Stdout(Stdout), + Unix(UnixDatagram), + Udp(UdpSocket, SocketAddr), + Tcp(TcpStream), + File(crate::writer::MultiLineWriter), +} + +#[repr(usize)] +#[derive(Clone, Copy, Eq, Debug)] +pub enum LogLevel { + /// The "error" level. + /// + /// Designates very serious errors. + Error = 1, // This way these line up with the discriminants for LogLevelFilter below + /// The "warn" level. + /// + /// Designates hazardous situations. + Warn, + /// The "info" level. + /// + /// Designates useful information. + Info, + /// The "debug" level. + /// + /// Designates lower priority information. + Debug, + /// The "trace" level. + /// + /// Designates very low priority, often extremely verbose, information. + Trace, +} + +static LOG_LEVEL_NAMES: [&str; 6] = ["OFF", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"]; + +impl PartialEq for LogLevel { + #[inline] + fn eq(&self, other: &LogLevel) -> bool { + *self as usize == *other as usize + } +} + +impl PartialEq for LogLevel { + #[inline] + fn eq(&self, other: &LogLevelFilter) -> bool { + *self as usize == *other as usize + } +} + +impl PartialOrd for LogLevel { + #[inline] + fn partial_cmp(&self, other: &LogLevel) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialOrd for LogLevel { + #[inline] + fn partial_cmp(&self, other: &LogLevelFilter) -> Option { + Some((*self as usize).cmp(&(*other as usize))) + } +} + +impl Ord for LogLevel { + #[inline] + fn cmp(&self, other: &LogLevel) -> cmp::Ordering { + (*self as usize).cmp(&(*other as usize)) + } +} + +impl LogLevel { + fn from_usize(u: usize) -> Option { + match u { + 1 => Some(LogLevel::Error), + 2 => Some(LogLevel::Warn), + 3 => Some(LogLevel::Info), + 4 => Some(LogLevel::Debug), + 5 => Some(LogLevel::Trace), + _ => None, + } + } + + /// Returns the most verbose logging level. + #[inline] + pub fn max() -> LogLevel { + LogLevel::Trace + } + + /// Converts the `LogLevel` to the equivalent `LogLevelFilter`. + #[inline] + pub fn to_log_level_filter(self) -> LogLevelFilter { + LogLevelFilter::from_usize(self as usize).unwrap() + } +} + +#[repr(usize)] +#[derive(Clone, Copy, Eq, Debug)] +pub enum LogLevelFilter { + Off, + Error, + Warn, + Info, + Debug, + Trace, +} + +impl PartialEq for LogLevelFilter { + #[inline] + fn eq(&self, other: &LogLevelFilter) -> bool { + *self as usize == *other as usize + } +} + +impl PartialEq for LogLevelFilter { + #[inline] + fn eq(&self, other: &LogLevel) -> bool { + other.eq(self) + } +} + +impl PartialOrd for LogLevelFilter { + #[inline] + fn partial_cmp(&self, other: &LogLevelFilter) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialOrd for LogLevelFilter { + #[inline] + fn partial_cmp(&self, other: &LogLevel) -> Option { + other.partial_cmp(self).map(|x| x.reverse()) + } +} + +impl Ord for LogLevelFilter { + #[inline] + fn cmp(&self, other: &LogLevelFilter) -> cmp::Ordering { + (*self as usize).cmp(&(*other as usize)) + } +} + +impl FromStr for LogLevelFilter { + type Err = (); + fn from_str(level: &str) -> Result { + LOG_LEVEL_NAMES + .iter() + .position(|&name| name.eq_ignore_ascii_case(level)) + .map(|p| LogLevelFilter::from_usize(p).unwrap()) + .ok_or(()) + } +} + +impl LogLevelFilter { + fn from_usize(u: usize) -> Option { + match u { + 0 => Some(LogLevelFilter::Off), + 1 => Some(LogLevelFilter::Error), + 2 => Some(LogLevelFilter::Warn), + 3 => Some(LogLevelFilter::Info), + 4 => Some(LogLevelFilter::Debug), + 5 => Some(LogLevelFilter::Trace), + _ => None, + } + } + /// Returns the most verbose logging level filter. + #[inline] + pub fn max() -> LogLevelFilter { + LogLevelFilter::Trace + } + + /// Converts `self` to the equivalent `LogLevel`. + /// + /// Returns `None` if `self` is `LogLevelFilter::Off`. + #[inline] + pub fn to_log_level(self) -> Option { + LogLevel::from_usize(self as usize) + } +} + +/// Metadata about a log message. +#[derive(Debug)] +pub struct Metadata { + pub level: LogLevel, + pub target: &'static str, +} + +#[derive(Debug)] +pub struct LogDirective { + name: Option, + level: LogLevelFilter, +} + +#[derive(thiserror::Error, Debug)] +pub enum LogSpecParseError { + #[error("Too many '/'s: {0}")] + TooManySlashes(String), + #[error("Too many '='s: {0}")] + TooManyEquals(String), + #[error("Invalid log level: {0}")] + InvalidLogLevel(String), +} + +pub fn parse_logging_spec(spec: &str) -> (Vec, Vec) { + let mut dirs = Vec::new(); + let mut errors = Vec::new(); + + let mut parts = spec.split('/'); + let mods = parts.next(); + let _ = parts.next(); + if parts.next().is_some() { + errors.push(LogSpecParseError::TooManySlashes(spec.to_string())); + } + if let Some(m) = mods { + for s in m.split(',') { + if s.is_empty() { + continue; + } + let mut parts = s.split('='); + let (log_level, name) = + match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { + (Some(part0), None, None) => { + // if the single argument is a log-level string or number, + // treat that as a global fallback + match part0.parse() { + Ok(num) => (num, None), + Err(_) => (LogLevelFilter::max(), Some(part0)), + } + } + (Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)), + (Some(part0), Some(part1), None) => match part1.parse() { + Ok(num) => (num, Some(part0)), + _ => { + errors.push(LogSpecParseError::InvalidLogLevel(s.to_string())); + continue; + } + }, + _ => { + errors.push(LogSpecParseError::TooManyEquals(s.to_string())); + continue; + } + }; + dirs.push(LogDirective { + name: name.map(|s| s.to_string()), + level: log_level, + }); + } + } + + for error in &errors { + println!("{error:?}"); + } + (dirs, errors) +} + +/// start the logger with all logs and access logs on stdout +pub fn setup_default_logging(log_colored: bool, log_level: &str, tag: &str) { + setup_logging("stdout", log_colored, None, None, None, log_level, tag) +} + +/// start the logger from config (takes RUST_LOG into account) +pub fn setup_logging_with_config(config: &Config, tag: &str) { + setup_logging( + &config.log_target, + config.log_colored, + config.access_logs_target.as_deref(), + config.access_logs_format.clone(), + config.access_logs_colored, + &config.log_level, + tag, + ) +} + +/// start the logger, after: +/// +/// - determining logging backends +/// - taking RUST_LOG into account +pub fn setup_logging( + log_target: &str, + log_colored: bool, + access_logs_target: Option<&str>, + access_logs_format: Option, + access_logs_colored: Option, + log_level: &str, + tag: &str, +) { + let backend = target_to_backend(log_target); + let access_backend = access_logs_target.map(target_to_backend); + + Logger::init( + tag.to_string(), + env::var("RUST_LOG").as_deref().unwrap_or(log_level), + backend, + log_colored, + access_backend, + access_logs_format, + access_logs_colored, + ); +} + +pub fn target_to_backend(target: &str) -> LoggerBackend { + if target == "stdout" { + LoggerBackend::Stdout(stdout()) + } else if let Some(addr) = target.strip_prefix("udp://") { + match addr.to_socket_addrs() { + Err(e) => { + println!("invalid log target configuration ({e:?}): {target}"); + LoggerBackend::Stdout(stdout()) + } + Ok(mut addrs) => { + let socket = UdpSocket::bind(("0.0.0.0", 0)).unwrap(); + LoggerBackend::Udp(socket, addrs.next().unwrap()) + } + } + } else if let Some(addr) = target.strip_prefix("tcp://") { + match addr.to_socket_addrs() { + Err(e) => { + println!("invalid log target configuration ({e:?}): {target}"); + LoggerBackend::Stdout(stdout()) + } + Ok(mut addrs) => LoggerBackend::Tcp(TcpStream::connect(addrs.next().unwrap()).unwrap()), + } + } else if let Some(addr) = target.strip_prefix("unix://") { + let path = Path::new(addr); + if !path.exists() { + println!("invalid log target configuration: {addr} is not a file"); + LoggerBackend::Stdout(stdout()) + } else { + let mut dir = env::temp_dir(); + let s: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(12) + .map(|c| c as char) + .collect(); + dir.push(s); + let socket = UnixDatagram::bind(dir).unwrap(); + socket.connect(path).unwrap(); + LoggerBackend::Unix(socket) + } + } else if let Some(addr) = target.strip_prefix("file://") { + let path = Path::new(addr); + match OpenOptions::new().create(true).append(true).open(path) { + Ok(file) => LoggerBackend::File(crate::writer::MultiLineWriter::new(file)), + Err(e) => { + println!( + "invalid log target configuration: could not open file at {addr} (error: {e:?})" + ); + LoggerBackend::Stdout(stdout()) + } + } + } else { + println!("invalid log target configuration: {target}"); + LoggerBackend::Stdout(stdout()) + } +} + +#[macro_export] +macro_rules! _prompt_log { + { + logger: $logger:expr, + is_access: $access:expr, + condition: $cond:expr, + prompt: [$($p:tt)*], + standard: {$($std:tt)*}$(,)? + } => { + $crate::_prompt_log!{ + logger: $logger, + is_access: $access, + condition: $cond, + prompt: [$($p)*], + standard: {$($std)*}, + colored: {$($std)*}, + } + }; + { + logger: $logger:expr, + is_access: $access:expr, + condition: $cond:expr, + prompt: [$($p:tt)*], + standard: { + formats: [$($std_fmt:tt)*], + args: [$($std_args:expr),*$(,)?]$(,)? + }, + colored: { + formats: [$($col_fmt:tt)*], + args: @$(,)? + }$(,)? + } => { + $crate::_prompt_log!{ + logger: $logger, + is_access: $access, + condition: $cond, + prompt: [$($p)*], + standard: { + formats: [$($std_fmt)*], + args: [$($std_args),*], + }, + colored: { + formats: [$($col_fmt)*], + args: [$($std_args),*], + }, + } + }; + { + logger: $logger:expr, + is_access: $access:expr, + condition: $cond:expr, + prompt: [$now:expr, $precise_time:expr, $pid:expr, $lvl:expr, $tag:expr$(,)?], + standard: { + formats: [$($std_fmt:tt)*], + args: [$($std_args:expr),*$(,)?]$(,)? + }, + colored: { + formats: [$($col_fmt:tt)*], + args: [$($col_args:expr),*$(,)?]$(,)? + }$(,)? + } => { + if $cond { + $crate::_prompt_log!(@bind [$logger, concat!("{} \x1b[2m{} \x1b[;2;1m{} {} \x1b[0;1m{}\x1b[m\t", $($col_fmt)*)] [$now, $precise_time, $pid, $lvl.as_str($access, true), $tag] $($col_args),*) + } else { + $crate::_prompt_log!(@bind [$logger, concat!("{} {} {} {} {}\t", $($std_fmt)*)] [$now, $precise_time, $pid, $lvl.as_str($access, false), $tag] $($std_args),*) + } + }; + (@bind [$logger:expr, $fmt:expr] [$($bindings:expr),*] $arg:expr $(, $args:expr)*) => {{ + let binding = &$arg; + $crate::_prompt_log!(@bind [$logger, $fmt] [$($bindings),* , binding] $($args),*) + }}; + (@bind [$logger:expr, $fmt:expr] [$($bindings:expr),*]) => { + $logger(format_args!($fmt, $($bindings),*)) + }; +} + +#[derive(Clone, Copy, Debug)] +pub struct LogLineCachedState(u8); +const LOG_LINE_ENABLED: u8 = 1 << 7; + +impl LogLineCachedState { + pub const fn new() -> Self { + Self(0) + } + #[inline(always)] + pub fn version(&self) -> u8 { + self.0 & !LOG_LINE_ENABLED + } + #[inline(always)] + pub fn enabled(&self) -> bool { + self.0 & LOG_LINE_ENABLED != 0 + } + #[inline(always)] + pub fn set(&mut self, version: u8, enabled: bool) { + self.0 = version; + if enabled { + self.0 |= LOG_LINE_ENABLED + } + } +} + +#[macro_export] +macro_rules! _log_enabled { + ($logger:expr, $lvl:expr) => {{ + let logger = $logger.borrow_mut(); + let enable = if cfg!(feature = "logs-cache") { + static mut LOG_LINE_CACHED_STATE: $crate::logging::LogLineCachedState = + $crate::logging::LogLineCachedState::new(); + logger.cached_enabled( + unsafe { &mut LOG_LINE_CACHED_STATE }, + $crate::logging::Metadata { + level: $lvl, + target: module_path!(), + }, + ) + } else { + logger.enabled($crate::logging::Metadata { + level: $lvl, + target: module_path!(), + }) + }; + if !enable { + return; + } + logger + }}; +} + +#[macro_export] +macro_rules! _log { + ($lvl:expr, $format:expr $(, $args:expr)*) => {{ + $crate::logging::LOGGER.with(|logger| { + let mut logger = $crate::_log_enabled!(logger, $lvl); + let (pid, tag, inner) = logger.split(); + let (now, precise_time) = $crate::logging::now(); + + $crate::_prompt_log!{ + logger: |args| inner.log(args), + is_access: false, + condition: inner.colored, + prompt: [now, precise_time, pid, $lvl, tag], + standard: { + formats: [$format, '\n'], + args: [$($args),*] + } + }; + }) + }}; +} + +#[macro_export] +macro_rules! _log_access { + ($lvl:expr, $($request_record_fields:tt)*) => {{ + $crate::logging::LOGGER.with(|logger| { + let mut logger = $crate::_log_enabled!(logger, $lvl); + let (pid, tag, inner) = logger.split(); + let (now, precise_time) = $crate::logging::now(); + + inner.log_access( + $crate::_structured_access_log!([$crate::logging::RequestRecord] + pid, tag, now, precise_time, level: $lvl, $($request_record_fields)* + )); + }) + }}; +} + +#[macro_export] +macro_rules! _structured_access_log { + ([$($struct_name:tt)+] $($fields:tt)*) => {{ + $($struct_name)+ {$( + $fields + )*} + }}; +} + +#[macro_export] +/// dynamically chose between info_access and error_access +macro_rules! log_access { + ($error:expr, $($request_record_fields:tt)*) => { + let lvl = if $error { + $crate::logging::LogLevel::Error + } else { + $crate::logging::LogLevel::Info + }; + _log_access!(lvl, $($request_record_fields)*); + }; +} + +/// log a failure concerning an HTTP or TCP request +#[macro_export] +macro_rules! error_access { + ($($request_record_fields:tt)*) => { + $crate::_log_access!($crate::logging::LogLevel::Error, $($request_record_fields)*); + }; +} + +/// log the success of an HTTP or TCP request +#[macro_export] +macro_rules! info_access { + ($($request_record_fields:tt)*) => { + $crate::_log_access!($crate::logging::LogLevel::Info, $($request_record_fields)*); + }; +} + +/// log an error with Sōzu's custom log stack +#[macro_export] +macro_rules! error { + ($format:expr $(, $args:expr)* $(,)?) => { + $crate::_log!($crate::logging::LogLevel::Error, $format $(, $args)*) + }; +} + +/// log a warning with Sōzu’s custom log stack +#[macro_export] +macro_rules! warn { + ($format:expr $(, $args:expr)* $(,)?) => { + $crate::_log!($crate::logging::LogLevel::Warn, $format $(, $args)*) + }; +} + +/// log an info with Sōzu’s custom log stack +#[macro_export] +macro_rules! info { + ($format:expr $(, $args:expr)* $(,)?) => { + $crate::_log!($crate::logging::LogLevel::Info, $format $(, $args)*) + }; +} + +/// log a debug with Sōzu’s custom log stack +#[macro_export] +macro_rules! debug { + ($format:expr $(, $args:expr)* $(,)?) => {{ + #[cfg(any(debug_assertions, feature = "logs-debug", feature = "logs-trace"))] + $crate::_log!($crate::logging::LogLevel::Debug, concat!("{}\t", $format), module_path!() $(, $args)*); + #[cfg(not(any(debug_assertions, feature = "logs-trace")))] + {$( let _ = $args; )*} + }}; +} + +/// log a trace with Sōzu’s custom log stack +#[macro_export] +macro_rules! trace { + ($format:expr $(, $args:expr)* $(,)?) => {{ + #[cfg(any(debug_assertions, feature = "logs-trace"))] + $crate::_log!($crate::logging::LogLevel::Trace, concat!("{}\t", $format), module_path!() $(, $args)*); + #[cfg(not(any(debug_assertions, feature = "logs-trace")))] + {$( let _ = $args; )*} + }}; +} + +/// write a log with a "FIXME" prefix on an info level +#[macro_export] +macro_rules! fixme { + ($(, $args:expr)* $(,)?) => { + $crate::_log!($crate::logging::LogLevel::Info, "FIXME: {}:{} in {}: {}", file!(), line!(), module_path!() $(, $args)*) + }; +} + +pub struct CompatLogger; + +impl From for LogLevel { + fn from(lvl: log::Level) -> Self { + match lvl { + log::Level::Error => LogLevel::Error, + log::Level::Warn => LogLevel::Warn, + log::Level::Info => LogLevel::Info, + log::Level::Debug => LogLevel::Debug, + log::Level::Trace => LogLevel::Trace, + } + } +} + +impl log::Log for CompatLogger { + fn enabled(&self, _: &log::Metadata) -> bool { + true + } + + fn log(&self, record: &log::Record) { + LOGGER.with(|logger| { + let mut logger = logger.borrow_mut(); + if !logger.compat_enabled(record.metadata()) { + return; + } + let (pid, tag, inner) = logger.split(); + let (now, precise_time) = now(); + crate::_prompt_log! { + logger: |args| inner.log(args), + is_access: false, + condition: inner.colored, + prompt: [ + now, precise_time, pid, LogLevel::from(record.level()), tag + ], + standard: { + formats: ["{}\n"], + args: [record.args()] + } + }; + }) + } + + fn flush(&self) {} +} + +/// start a logger used in test environment +#[macro_export] +macro_rules! setup_test_logger { + () => { + $crate::logging::Logger::init( + module_path!().to_string(), + "error", + $crate::logging::LoggerBackend::Stdout(::std::io::stdout()), + false, + None, + None, + None, + ); + }; +} + +pub struct Rfc3339Time { + pub inner: ::time::OffsetDateTime, +} + +pub fn now() -> (Rfc3339Time, i128) { + let t = time::OffsetDateTime::now_utc(); + ( + Rfc3339Time { inner: t }, + (t - time::OffsetDateTime::UNIX_EPOCH).whole_nanoseconds(), + ) +} diff --git a/command/src/logging/mod.rs b/command/src/logging/mod.rs new file mode 100644 index 000000000..d38804c4a --- /dev/null +++ b/command/src/logging/mod.rs @@ -0,0 +1,17 @@ +//! Sōzu logs, optimized for performance +//! +//! Instead of relying on well-known logging or tracing solutions, +//! Sōzu has its own logging stack that prioritizes CPU performance +//! +//! The `logs-cache` flag, on of that, saves lookup time by storing +//! the ENABLED status of each log call-site, in a `static mut`. +//! The gain in performance is measurable with a lot of log directives, +//! but mostly negligible, since CPUs are clever enough to recognize such patterns. + +pub mod access_logs; +pub mod display; +#[macro_use] +pub mod logs; + +pub use crate::logging::access_logs::*; +pub use crate::logging::logs::*; diff --git a/command/src/parser.rs b/command/src/parser.rs index fa4ecd0fe..1cdc13f4c 100644 --- a/command/src/parser.rs +++ b/command/src/parser.rs @@ -1,5 +1,4 @@ use nom::{ - self, bytes::streaming::is_not, character::complete::char, combinator::map_res, diff --git a/command/src/proto/display.rs b/command/src/proto/display.rs index b123a0437..c4cfd7185 100644 --- a/command/src/proto/display.rs +++ b/command/src/proto/display.rs @@ -1,6 +1,6 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, - fmt::{Display, Formatter}, + fmt::{self, Display, Formatter}, net::SocketAddr, }; @@ -8,21 +8,24 @@ use prettytable::{cell, row, Row, Table}; use time::format_description; use x509_parser::time::ASN1Time; -use crate::proto::{ - command::{ - filtered_metrics, request::RequestType, response_content::ContentType, AggregatedMetrics, - AvailableMetrics, CertificateAndKey, CertificateSummary, CertificatesWithFingerprints, - ClusterMetrics, FilteredMetrics, ListOfCertificatesByAddress, ListedFrontends, - ListenersList, QueryCertificatesFilters, RequestCounts, Response, ResponseContent, - ResponseStatus, RunState, TlsVersion, WorkerInfos, WorkerMetrics, WorkerResponses, +use crate::{ + proto::{ + command::{ + filtered_metrics, protobuf_endpoint, request::RequestType, + response_content::ContentType, AggregatedMetrics, AvailableMetrics, CertificateAndKey, + CertificateSummary, CertificatesWithFingerprints, ClusterMetrics, FilteredMetrics, + HttpEndpoint, ListOfCertificatesByAddress, ListedFrontends, ListenersList, + ProtobufEndpoint, QueryCertificatesFilters, RequestCounts, Response, ResponseContent, + ResponseStatus, RunState, SocketAddress, TlsVersion, WorkerInfos, WorkerMetrics, + WorkerResponses, + }, + DisplayError, }, - DisplayError, + AsString, }; -use super::command::SocketAddress; - impl Display for CertificateAndKey { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let versions = self.versions.iter().fold(String::new(), |acc, tls_v| { acc + " " + match TlsVersion::try_from(*tls_v) { @@ -40,13 +43,13 @@ impl Display for CertificateAndKey { } impl Display for CertificateSummary { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}:\t{}", self.fingerprint, self.domain) } } impl Display for QueryCertificatesFilters { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { if let Some(d) = self.domain.clone() { write!(f, "domain:{}", d) } else if let Some(fp) = self.fingerprint.clone() { @@ -945,7 +948,32 @@ fn create_cluster_table(headers: Vec<&str>, data: &BTreeMap) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!(f, "{}", SocketAddr::from(self.clone())) } } + +impl Display for ProtobufEndpoint { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match &self.inner { + Some(protobuf_endpoint::Inner::Http(HttpEndpoint { + method, + authority, + path, + status, + .. + })) => write!( + f, + "{} {} {} -> {}", + authority.as_string_or("-"), + method.as_string_or("-"), + path.as_string_or("-"), + status.as_string_or("-"), + ), + Some(protobuf_endpoint::Inner::Tcp(_)) => { + write!(f, "-") + } + None => Ok(()), + } + } +} diff --git a/command/src/request.rs b/command/src/request.rs index 63826b095..f43e7c621 100644 --- a/command/src/request.rs +++ b/command/src/request.rs @@ -8,6 +8,7 @@ use std::{ }; use prost::{DecodeError, Message}; +use rusty_ulid::Ulid; use crate::{ proto::{ @@ -66,17 +67,20 @@ impl Request { | RequestType::RemoveBackend(_) | RequestType::SoftStop(_) | RequestType::HardStop(_) - | RequestType::Status(_) - | RequestType::QueryClusterById(_) - | RequestType::QueryClustersByDomain(_) - | RequestType::QueryClustersHashes(_) - | RequestType::QueryMetrics(_) - | RequestType::Logging(_) => { + | RequestType::Status(_) => { proxy_destination.to_http_proxy = true; proxy_destination.to_https_proxy = true; proxy_destination.to_tcp_proxy = true; } + // handled at worker level prior to this call + RequestType::ConfigureMetrics(_) + | RequestType::QueryMetrics(_) + | RequestType::Logging(_) + | RequestType::QueryClustersHashes(_) + | RequestType::QueryClusterById(_) + | RequestType::QueryClustersByDomain(_) => {} + // the Add***Listener and other Listener orders will be handled separately // by the notify_proxys function, so we don't give them destinations RequestType::AddHttpsListener(_) @@ -85,7 +89,6 @@ impl Request { | RequestType::RemoveListener(_) | RequestType::ActivateListener(_) | RequestType::DeactivateListener(_) - | RequestType::ConfigureMetrics(_) | RequestType::ReturnListenSockets(_) => {} // These won't ever reach a worker anyway @@ -283,3 +286,23 @@ impl From for Uint128 { Uint128 { low, high } } } + +impl From for Uint128 { + fn from(value: i128) -> Self { + Uint128::from(value as u128) + } +} + +impl From for Uint128 { + fn from(value: Ulid) -> Self { + let (low, high) = value.into(); + Uint128 { low, high } + } +} + +impl From for Ulid { + fn from(value: Uint128) -> Self { + let Uint128 { low, high } = value; + Ulid::from((low, high)) + } +} diff --git a/command/src/response.rs b/command/src/response.rs index 04677817c..341dbc7ae 100644 --- a/command/src/response.rs +++ b/command/src/response.rs @@ -1,4 +1,4 @@ -use std::{cmp::Ordering, collections::BTreeMap, default::Default, fmt, net::SocketAddr}; +use std::{cmp::Ordering, collections::BTreeMap, fmt, net::SocketAddr}; use crate::{ proto::command::{ @@ -130,7 +130,7 @@ pub fn is_default_path_rule(p: &PathRule) -> bool { PathRuleKind::try_from(p.kind) == Ok(PathRuleKind::Prefix) && p.value.is_empty() } -impl std::fmt::Display for PathRule { +impl fmt::Display for PathRule { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match PathRuleKind::try_from(self.kind) { Ok(PathRuleKind::Prefix) => write!(f, "prefix '{}'", self.value), diff --git a/command/src/scm_socket.rs b/command/src/scm_socket.rs index 9218a5644..85c373073 100644 --- a/command/src/scm_socket.rs +++ b/command/src/scm_socket.rs @@ -23,9 +23,9 @@ pub enum ScmSocketError { blocking: bool, error: std::io::Error, }, - #[error("could not send message per SCM socket: {0}")] + #[error("could not send message for SCM socket: {0}")] Send(String), - #[error("could not send message per SCM socket: {0}")] + #[error("could not receive message for SCM socket: {0}")] Receive(String), #[error("invalid char set: {0}")] InvalidCharSet(String), @@ -264,7 +264,7 @@ impl Listeners { } } -fn parse_addresses(addresses: &Vec) -> Result, ScmSocketError> { +fn parse_addresses(addresses: &[String]) -> Result, ScmSocketError> { let mut parsed_addresses = Vec::new(); for address in addresses { parsed_addresses.push(address.parse::().map_err(|error| { diff --git a/command/src/state.rs b/command/src/state.rs index 30cc1dddb..86ab6237c 100644 --- a/command/src/state.rs +++ b/command/src/state.rs @@ -6,7 +6,7 @@ use std::{ fs::File, hash::{Hash, Hasher}, io::Write, - iter::{repeat, FromIterator}, + iter::repeat, net::SocketAddr, }; @@ -130,18 +130,18 @@ impl ConfigState { RequestType::RemoveBackend(backend) => self.remove_backend(backend), // This is to avoid the error message - &RequestType::Logging(_) - | &RequestType::CountRequests(_) - | &RequestType::Status(_) - | &RequestType::SoftStop(_) - | &RequestType::QueryCertificatesFromWorkers(_) - | &RequestType::QueryClusterById(_) - | &RequestType::QueryClustersByDomain(_) - | &RequestType::QueryMetrics(_) - | &RequestType::QueryClustersHashes(_) - | &RequestType::ConfigureMetrics(_) - | &RequestType::ReturnListenSockets(_) - | &RequestType::HardStop(_) => Ok(()), + RequestType::Logging(_) + | RequestType::CountRequests(_) + | RequestType::Status(_) + | RequestType::SoftStop(_) + | RequestType::QueryCertificatesFromWorkers(_) + | RequestType::QueryClusterById(_) + | RequestType::QueryClustersByDomain(_) + | RequestType::QueryMetrics(_) + | RequestType::QueryClustersHashes(_) + | RequestType::ConfigureMetrics(_) + | RequestType::ReturnListenSockets(_) + | RequestType::HardStop(_) => Ok(()), _other_request => Err(StateError::UndispatchableRequest), } @@ -382,7 +382,7 @@ impl ConfigState { let entry = self .certificates .entry(add.address.clone().into()) - .or_insert_with(HashMap::new); + .or_default(); let mut add = add.clone(); add.certificate @@ -464,7 +464,7 @@ impl ConfigState { let tcp_frontends = self .tcp_fronts .entry(front.cluster_id.clone()) - .or_insert_with(Vec::new); + .or_default(); let tcp_frontend = TcpFrontend { cluster_id: front.cluster_id.clone(), @@ -514,7 +514,7 @@ impl ConfigState { let backends = self .backends .entry(backend.cluster_id.clone()) - .or_insert_with(Vec::new); + .or_default(); // we might be modifying the sticky id or load balancing parameters backends.retain(|b| b.backend_id != backend.backend_id || b.address != backend.address); diff --git a/doc/benchmark.md b/doc/benchmark.md index fa21ad56f..9eed9ff97 100644 --- a/doc/benchmark.md +++ b/doc/benchmark.md @@ -14,7 +14,7 @@ worker only seems ideal: worker_count = 1 ``` -The HTTP and HTTPS listeners should be identical. +The HTTP and HTTPS listeners should be identical. Chose a TLS version and a cipher list for the HTTPS listener, and stick to it. ```toml diff --git a/doc/configure.md b/doc/configure.md index f02188e92..da7272a00 100644 --- a/doc/configure.md +++ b/doc/configure.md @@ -21,7 +21,7 @@ Parameters in the global section allow you to define the global settings shared | `saved_state` | path from which sozu tries to load its state at startup | | | `log_level` | possible values are | `debug`, `trace`, `error`, `warn`, `info`| | `log_target` | possible values are | `stdout, tcp or udp address` | -| `log_access_target` | possible values are (if activated, sends access logs to a separate target) | `stdout`, `tcp` or `udp address` | +| `access_logs_target` | possible values are (if activated, sends access logs to a separate target) | `stdout`, `tcp` or `udp address` | | `command_socket` | path to the unix socket command | | | `command_buffer_size` | size, in bytes, of the buffer used by the main process to handle commands. | | | `max_command_buffer_size` | maximum size of the buffer used by the main process to handle commands. | | diff --git a/doc/debugging_strategies.md b/doc/debugging_strategies.md index 699756b46..ba0bed062 100644 --- a/doc/debugging_strategies.md +++ b/doc/debugging_strategies.md @@ -27,7 +27,7 @@ There are three configuration options related to logging: * `tcp://127.0.0.1:9876` * `unix:///var/sozu/logs` * `file:///var/logs/sozu.log` -* `log_access_target`: if activated, sends the access logs to a separate destination +* `access_logs_target`: if activated, sends the access logs to a separate destination `log_level` follows [env_logger's level directives](https://docs.rs/env_logger/0.5.13/env_logger/). Moreover, the `RUST_LOG` environment variable can be used to override the log level. diff --git a/e2e/src/mock/client.rs b/e2e/src/mock/client.rs index 2a1adae60..da0e1f242 100644 --- a/e2e/src/mock/client.rs +++ b/e2e/src/mock/client.rs @@ -36,8 +36,8 @@ impl Client { } } - /// Establish a TCP connection with its address, - /// register the yielded TCP stream, apply timeouts + /// Establish a TCP connection with its address, + /// register the yielded TCP stream, apply timeouts pub fn connect(&mut self) { let stream = TcpStream::connect(self.address).expect("could not connect"); stream diff --git a/e2e/src/mock/https_client.rs b/e2e/src/mock/https_client.rs index e46c11714..c931afaac 100644 --- a/e2e/src/mock/https_client.rs +++ b/e2e/src/mock/https_client.rs @@ -1,7 +1,6 @@ use std::{sync::Arc, time::SystemTime}; use hyper::{ - self, client::{connect::dns::GaiResolver, HttpConnector, ResponseFuture}, StatusCode, }; diff --git a/e2e/src/mock/sync_backend.rs b/e2e/src/mock/sync_backend.rs index 14712bb7f..b97d58384 100644 --- a/e2e/src/mock/sync_backend.rs +++ b/e2e/src/mock/sync_backend.rs @@ -42,13 +42,13 @@ impl Backend { } } - /// Binds itself to its address, stores the yielded TCP listener + /// Binds itself to its address, stores the yielded TCP listener pub fn connect(&mut self) { let listener = TcpListener::bind(self.address).expect("could not bind"); let timeout = Duration::from_millis(100); let timeout = libc::timeval { tv_sec: 0, - tv_usec: timeout.subsec_micros().try_into().unwrap(), + tv_usec: timeout.subsec_micros().into(), }; let listener = unsafe { let fd = listener.into_raw_fd(); diff --git a/e2e/src/sozu/worker.rs b/e2e/src/sozu/worker.rs index 7a15919b4..b3bc6605b 100644 --- a/e2e/src/sozu/worker.rs +++ b/e2e/src/sozu/worker.rs @@ -13,7 +13,7 @@ use sozu::server::Server; use sozu_command::{ channel::Channel, config::{ConfigBuilder, FileConfig}, - logging::setup_logging, + logging::setup_default_logging, proto::command::{ request::RequestType, AddBackend, Cluster, HardStop, LoadBalancingParams, PathRule, Request, RequestHttpFrontend, RequestTcpFrontend, ReturnListenSockets, RulePosition, @@ -133,7 +133,7 @@ impl Worker { println!("Setting up logging"); let server_job = thread::spawn(move || { - setup_logging("stdout", None, "error", &thread_name); + setup_default_logging(false, "error", &thread_name); let mut server = Server::try_new_from_config( cmd_worker_to_main, thread_scm_worker_to_main, diff --git a/e2e/src/tests/tests.rs b/e2e/src/tests/tests.rs index 36f2a171f..97618da9c 100644 --- a/e2e/src/tests/tests.rs +++ b/e2e/src/tests/tests.rs @@ -6,8 +6,8 @@ use std::{ use sozu_command_lib::{ config::{FileConfig, ListenerBuilder}, - info, log, - logging::setup_logging, + info, + logging::setup_default_logging, proto::command::{ request::RequestType, ActivateListener, AddCertificate, CertificateAndKey, ListenerType, RemoveBackend, RequestHttpFrontend, SocketAddress, @@ -631,7 +631,7 @@ pub fn try_hard_or_soft_stop(soft: bool) -> State { } fn try_http_behaviors() -> State { - setup_logging("stdout", None, "debug", "BEHAVE-OUT"); + setup_default_logging(false, "debug", "BEHAVE-OUT"); info!("starting up"); @@ -1069,7 +1069,7 @@ pub fn try_blue_geen() -> State { } pub fn try_keep_alive() -> State { - setup_logging("stdout", None, "debug", "KA-OUT"); + setup_default_logging(false, "debug", "KA-OUT"); let front_address = create_local_address(); diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 2b550e37f..5bc000fef 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -51,7 +51,6 @@ socket2 = { version = "^0.5.5", features = ["all"] } thiserror = "^1.0.56" time = "^0.3.31" once_cell = "1.19.0" - sozu-command-lib = { path = "../command", version = "^0.15.19" } [dev-dependencies] diff --git a/lib/assets/certificate-dominum.csr b/lib/assets/certificate-dominum.csr new file mode 100644 index 000000000..368d1b92f --- /dev/null +++ b/lib/assets/certificate-dominum.csr @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIEzDCCArQCAQAwXjELMAkGA1UEBhMCRlIxETAPBgNVBAgMCEhlaWxpZ2VzMQ8w +DQYDVQQHDAZOYW50ZXMxFTATBgNVBAoMDENsZXZlci1DbG91ZDEUMBIGA1UEAwwL +ZG9taW51bS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDEXCAq +ud46VcSFp+2hs6zoXCjCYUewmggq/0t7Hu5G3niCTyDu07UJambvN528/SudJF6A +mguECPS/vt4jN8ced8hln+Lg54HczNwUk+3a+kbj1A4xfmKNwRIb3p+r07DmhbsY +FcqTaKeC/mRTobxH+HC01Zvc0K48Hf+W84kuBNp2PSH3BxiuxjoFbxGAKCHkDTvW +IJfq/mYiFV/8bwNNCBesKPwTxcEd1TCS34ChTRtBxs/cYJmMb0/LRcy8/lb8EvUB +GztD6qfpoXhM3FbHBz9UD3Pi1AFLfKbaN0mPfMFYoQQvgTU8rmplVLOYPI3Sc7LC +W77tnUqAz9ojhRAvYohJA0P4oto4YiunwDfhSgh/HlxqrLH5jAWYN9yiNzOpTuAE +ivpVt/pF8l6nzR/nlVWPrJ0HR42QVnpWhtSUbsOrmUgr5gXTswN+VD5Osju7/gEv +NoW4hLNm3ozxH8O8U0nUMuJs5CRqhZCGWpvQq5PSm+ROsxe5Wb29nDh3RR3dUuun +nh0kLzDuPKvpnmEwt5fcUzoLEJWuh7q3bKwkkke+qE6c56+n9VQ5P+Kdr7a5Isn4 +YDdGSLwjVqKW89CFeXUA3RL2Spf9LBtcYrzMBSK/h3CKMx8jORR9VVoxrve99YBU +Fum3QfvQ+Uc2zMX2VVB90BUJNQqR3xUjMcmv4QIDAQABoCkwJwYJKoZIhvcNAQkO +MRowGDAWBgNVHREEDzANggtkb21pbnVtLmNvbTANBgkqhkiG9w0BAQsFAAOCAgEA +TbfwbQD9zP7fS9vpkBr3oK+CRWcyEar0vkxd/bK92+sIEZmaMnivxLet7A7VX0Mq +8rzqi+Ynvc5HP+O51XZ9TnXKpjmOgDPpIObKyirt3g/fHoE9J3vVfLiOzJ7nzw6H +94jPZrZOXKY9ERjDYldGRpDqGPyxmkJY7WvhR5p2dFZGcsR7V05Et+WqZMI0xLbW +wQ1FVh5/qCAZZM+y90TLO3OxRPsqihDSfugLxhWhibQHM9ozoatzP+bH2IskT4S3 +tV6zq9ZrvTWpLzEWK8QWWyPeu6VlnT1vVUqml536RGtGWjEVOFYULc4tVuh7Ymjl +1Z8Csz44G9/GXWkz/KTapHiCJhh0JmkkxDUGdxpaLLKvaJP0vNxxYmmX2lkbcNpF +hxB8b9biqZms7sWzHUZ84OWGPsgSwv7eoOte5PfP8PNGYXfWR2YHg3z0r60JjFe2 +7/nOozg/nNwzj4QX6Y0aDRT7cE014o8XEOLYIykuZ+2THH2juCwXqlZyGbY2anjn +Hr/tj/GcQPq30ozytzkZwGUUcghgaoszXAN7udF2zlLPADA79xzKhjmJgt/dm4JA +QOQ1caSskALLUrdvxLFN6NhrzM7b58jwl0i0XQ7tbH15uGHXct/E1VzLWISwksVd +URMs4x1xCZ3YVJGfqt3ZJpaahaEZVzcu01qewjs6+kc= +-----END CERTIFICATE REQUEST----- diff --git a/lib/assets/certificate-dominum.pem b/lib/assets/certificate-dominum.pem new file mode 100644 index 000000000..bcd4ccbc8 --- /dev/null +++ b/lib/assets/certificate-dominum.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIULMhT/P5RZKIbSQsKusAFPKmE+I8wDQYJKoZIhvcNAQEL +BQAwXjELMAkGA1UEBhMCRlIxETAPBgNVBAgMCEhlaWxpZ2VzMQ8wDQYDVQQHDAZO +YW50ZXMxFTATBgNVBAoMDENsZXZlci1DbG91ZDEUMBIGA1UEAwwLZG9taW51bS5j +b20wHhcNMjQwMjEzMTUyODIwWhcNMjYwMjEyMTUyODIwWjBeMQswCQYDVQQGEwJG +UjERMA8GA1UECAwISGVpbGlnZXMxDzANBgNVBAcMBk5hbnRlczEVMBMGA1UECgwM +Q2xldmVyLUNsb3VkMRQwEgYDVQQDDAtkb21pbnVtLmNvbTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAMRcICq53jpVxIWn7aGzrOhcKMJhR7CaCCr/S3se +7kbeeIJPIO7TtQlqZu83nbz9K50kXoCaC4QI9L++3iM3xx53yGWf4uDngdzM3BST +7dr6RuPUDjF+Yo3BEhven6vTsOaFuxgVypNop4L+ZFOhvEf4cLTVm9zQrjwd/5bz +iS4E2nY9IfcHGK7GOgVvEYAoIeQNO9Ygl+r+ZiIVX/xvA00IF6wo/BPFwR3VMJLf +gKFNG0HGz9xgmYxvT8tFzLz+VvwS9QEbO0Pqp+mheEzcVscHP1QPc+LUAUt8pto3 +SY98wVihBC+BNTyuamVUs5g8jdJzssJbvu2dSoDP2iOFEC9iiEkDQ/ii2jhiK6fA +N+FKCH8eXGqssfmMBZg33KI3M6lO4ASK+lW3+kXyXqfNH+eVVY+snQdHjZBWelaG +1JRuw6uZSCvmBdOzA35UPk6yO7v+AS82hbiEs2bejPEfw7xTSdQy4mzkJGqFkIZa +m9Crk9Kb5E6zF7lZvb2cOHdFHd1S66eeHSQvMO48q+meYTC3l9xTOgsQla6Hurds +rCSSR76oTpznr6f1VDk/4p2vtrkiyfhgN0ZIvCNWopbz0IV5dQDdEvZKl/0sG1xi +vMwFIr+HcIozHyM5FH1VWjGu9731gFQW6bdB+9D5RzbMxfZVUH3QFQk1CpHfFSMx +ya/hAgMBAAGjOTA3MBYGA1UdEQQPMA2CC2RvbWludW0uY29tMB0GA1UdDgQWBBSC +LZzCFrfpEi3e4cX25wi9Z6osbDANBgkqhkiG9w0BAQsFAAOCAgEAe2DKKvolPqY0 +Ptswq8CgRzoJX2ITZnR2keNilse573q9rzi1sVamd6hX+b+A1qbJL9UVC+EtFkx1 +QZh1BuTuXMNoma2wHPFYP9uZ9UQmg7BDlBPoTvG8UkHdMc5I4YsM2loUcr9pm/WN +ziiiD0AqZ2tapo/AX9ez4Zrkm9y2R2ZE5SvArLY2pBooluA3gMlE2IfbxskhZ2kk +SpT6xrmkvG1Qi9NxQQnUREKf10DNtw+gaFog+Mfap5fxAlfhBu24/AAed2zVdaZx +cEdqgdONr8QeYw7a5Vlo+PJGo1Vf/iMd1kXy4VZ7gS3bwcYDxXGogBaIQWxrzWNt +Mx+zOXjLEzgfKMID59ReU3d0w5wYqz1lLdAmPFhqXNdOeJRu7TwNJ1M73HjxXirg +LI8ZZGaoY9ClJQik7bozvMUEBhOu2uZ6iUVnME9xy0vEXLfRb4LxXmI809lDeWjj +0+FWQN37rC4mIXmxl0yTFv1W1rlQ0QJCtAIJkOj2B7jssjTfpzPZFjAr2oJuja1E ++hMJCn+uwqCPTdrO2DyzztKl1PG1jVA8kedS0yNTtycFyRdPUoa622YyWmz3QOVy +QnRGdETy5KRXbW1UUstC26Eoz+FCVy5r9AA7oZEfny5xRuhYYReEk/bzba9U5aFo +TWVXb6P4rHpcTaCSqL0PWViO55c+p2g= +-----END CERTIFICATE----- diff --git a/lib/assets/certificate-dominum2.csr b/lib/assets/certificate-dominum2.csr new file mode 100644 index 000000000..06c25fdca --- /dev/null +++ b/lib/assets/certificate-dominum2.csr @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIE2zCCAsMCAQAwXDELMAkGA1UEBhMCRlIxDzANBgNVBAgMBkZyYW5jZTEPMA0G +A1UEBwwGTmFudGVzMRUwEwYDVQQKDAxDbGV2ZXItQ2xvdWQxFDASBgNVBAMMC2Rv +bWludW0uY29tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxFwgKrne +OlXEhaftobOs6FwowmFHsJoIKv9Lex7uRt54gk8g7tO1CWpm7zedvP0rnSRegJoL +hAj0v77eIzfHHnfIZZ/i4OeB3MzcFJPt2vpG49QOMX5ijcESG96fq9Ow5oW7GBXK +k2ingv5kU6G8R/hwtNWb3NCuPB3/lvOJLgTadj0h9wcYrsY6BW8RgCgh5A071iCX +6v5mIhVf/G8DTQgXrCj8E8XBHdUwkt+AoU0bQcbP3GCZjG9Py0XMvP5W/BL1ARs7 +Q+qn6aF4TNxWxwc/VA9z4tQBS3ym2jdJj3zBWKEEL4E1PK5qZVSzmDyN0nOywlu+ +7Z1KgM/aI4UQL2KISQND+KLaOGIrp8A34UoIfx5caqyx+YwFmDfcojczqU7gBIr6 +Vbf6RfJep80f55VVj6ydB0eNkFZ6VobUlG7Dq5lIK+YF07MDflQ+TrI7u/4BLzaF +uISzZt6M8R/DvFNJ1DLibOQkaoWQhlqb0KuT0pvkTrMXuVm9vZw4d0Ud3VLrp54d +JC8w7jyr6Z5hMLeX3FM6CxCVroe6t2ysJJJHvqhOnOevp/VUOT/ina+2uSLJ+GA3 +Rki8I1ailvPQhXl1AN0S9kqX/SwbXGK8zAUiv4dwijMfIzkUfVVaMa73vfWAVBbp +t0H70PlHNszF9lVQfdAVCTUKkd8VIzHJr+ECAwEAAaA6MDgGCSqGSIb3DQEJDjEr +MCkwJwYDVR0RBCAwHoILZG9taW51bS5jb22CD3d3dy5kb21pbnVtLmNvbTANBgkq +hkiG9w0BAQsFAAOCAgEAadwTKf3FZ2F7idKQj1xU/2B09mCLQd3ZjbcPfESVg8bb +plyP13777y0j8/QGxIQFheVr5M3RxwQ7b77QYLHZxN5CdXUOKD4fCLC9xd7BEM0Q +RjYivsDB1G6rbf+czrkYiJBpZ28VePl6QkiD1x+sVWY9OgZMB+oKuRTiMktot3kc +PFerzhP7Dyr8EO9DiZHOW3LaN9O9HM9B+4YGyAuIKSCupNXq6318pQSi01RkSe1E +So/GXmrD9bhTfj6n62Lb3kQhahipWbUZW9D61kCOE+6WVxJGEvaE8USZRwrW5gYn +JvA9D22nybFEtK4hnOYriCK0ARVLZj89vJGw78ueZFSGeO6CXcRtxZJfUkSxp6o4 +/q10XhWRX4av4Lg8Dvk/EwXI3K97BDI8NXcjJpLAURs82DP01LT2ojeItvi/634E +uEvKzxUdXePSEaJmWWSzI1FzOoyPTlgAllFBV4b73y3t3pTKV+cqCihzXR1xbzA8 +tmZw6UuveICrfddjXPLW8Rfn12W1a3e/xlcjEpx8voEzrOsRNfqO7e1JIvesYE9h ++LJ4ht4Z6AAYX/Hdys5hCRatz3UTsea68ZKGmN1mHYhPqgMuPIauprirVuCwHZgv +8CIEs0+LEso8npTeo1kGqEroihUD7fUAouEs9w6HD11z4s3plSV9zR+HyhaBtoo= +-----END CERTIFICATE REQUEST----- diff --git a/lib/assets/certificate-dominum2.pem b/lib/assets/certificate-dominum2.pem new file mode 100644 index 000000000..66f33a177 --- /dev/null +++ b/lib/assets/certificate-dominum2.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIUVsVc5NHYJGg1F/o1em6KbFts7KcwDQYJKoZIhvcNAQEL +BQAwXDELMAkGA1UEBhMCRlIxDzANBgNVBAgMBkZyYW5jZTEPMA0GA1UEBwwGTmFu +dGVzMRUwEwYDVQQKDAxDbGV2ZXItQ2xvdWQxFDASBgNVBAMMC2RvbWludW0uY29t +MB4XDTI0MDIxMzE1MjkyM1oXDTI2MDIxMjE1MjkyM1owXDELMAkGA1UEBhMCRlIx +DzANBgNVBAgMBkZyYW5jZTEPMA0GA1UEBwwGTmFudGVzMRUwEwYDVQQKDAxDbGV2 +ZXItQ2xvdWQxFDASBgNVBAMMC2RvbWludW0uY29tMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEAxFwgKrneOlXEhaftobOs6FwowmFHsJoIKv9Lex7uRt54 +gk8g7tO1CWpm7zedvP0rnSRegJoLhAj0v77eIzfHHnfIZZ/i4OeB3MzcFJPt2vpG +49QOMX5ijcESG96fq9Ow5oW7GBXKk2ingv5kU6G8R/hwtNWb3NCuPB3/lvOJLgTa +dj0h9wcYrsY6BW8RgCgh5A071iCX6v5mIhVf/G8DTQgXrCj8E8XBHdUwkt+AoU0b +QcbP3GCZjG9Py0XMvP5W/BL1ARs7Q+qn6aF4TNxWxwc/VA9z4tQBS3ym2jdJj3zB +WKEEL4E1PK5qZVSzmDyN0nOywlu+7Z1KgM/aI4UQL2KISQND+KLaOGIrp8A34UoI +fx5caqyx+YwFmDfcojczqU7gBIr6Vbf6RfJep80f55VVj6ydB0eNkFZ6VobUlG7D +q5lIK+YF07MDflQ+TrI7u/4BLzaFuISzZt6M8R/DvFNJ1DLibOQkaoWQhlqb0KuT +0pvkTrMXuVm9vZw4d0Ud3VLrp54dJC8w7jyr6Z5hMLeX3FM6CxCVroe6t2ysJJJH +vqhOnOevp/VUOT/ina+2uSLJ+GA3Rki8I1ailvPQhXl1AN0S9kqX/SwbXGK8zAUi +v4dwijMfIzkUfVVaMa73vfWAVBbpt0H70PlHNszF9lVQfdAVCTUKkd8VIzHJr+EC +AwEAAaNKMEgwJwYDVR0RBCAwHoILZG9taW51bS5jb22CD3d3dy5kb21pbnVtLmNv +bTAdBgNVHQ4EFgQUgi2cwha36RIt3uHF9ucIvWeqLGwwDQYJKoZIhvcNAQELBQAD +ggIBAEbtoqWD7xyE/+8MtA6cAbO/pDbvSipleCjmXvjSw6+5QKxzFO2iX/Ipx/9w +k11F2UF10FehWE3NRXt0xVN/NwWcmVb4LHg7DDL0gvLjgryLkqv8ncD8DwYMxZEj +x5zKTZX6aEjVbAJ4z0y3UpgzMoaFOlpNAImx6FbbfTr6WL7GDln5VG81rOSSzzL+ +9ded8e5OQxW/Q7Mgdhcm/9wa/HOEzB/2GpzFw9TlJtgWGT9SRLeXSdnrwicbWF36 +KcUZWYbidtyCxzj4YR1oXGevYW5HsgtvwxO5mqPt/pHKlIwhctJ9yKKTMMida/iT +kzJ0EyxnLUcZA7wiLZVJvjNIj+kVO4pi2e5lwyKkgl9KAilIQKwbugPvksYSlG4e +lXZf5q6CfVOawpTo6Af3uJYblJNTDbdBQB+Krt6JUHFhCPaOqJB28gxbt67wLI5j +UQ+EqEfgQ1PFuR6kDa5avj9wd/BSYBbCoiSK7wjGkTJkzUBSmR7lUENAj4KLOlc0 +RkpbGTA3+o5B08svTF1qMVVcr/XKqAKLpl8OoQsIDoiLobhZT901oiN/IVmqMLLV +3T2MAMX193fT/C07+rFBT8Z/PmHJ1FXadkesgAlSOlCzxHIXVMF2zz9+b7639O3k +6Ng7gkIBTHnI1AWXsMBWISp3wiWUhF3cwg5IFYcgl8NLqHBz +-----END CERTIFICATE----- diff --git a/lib/assets/dominum.conf b/lib/assets/dominum.conf new file mode 100644 index 000000000..1280ad3e1 --- /dev/null +++ b/lib/assets/dominum.conf @@ -0,0 +1,23 @@ +# based on https://gist.github.com/croxton/ebfb5f3ac143cd86542788f972434c96 +[ req ] +default_bits = 4096 +distinguished_name = req_distinguished_name +req_extensions = req_ext + +[ req_distinguished_name ] +countryName = Country Name (2 letter code) +countryName_default = FR +stateOrProvinceName = State or Province Name (full name) +stateOrProvinceName_default = Heiliges +localityName = Locality Name (eg, city) +localityName_default = Nantes +organizationName = Organization Name (eg, company) +organizationName_default = Clever-Cloud +commonName = Common Name (eg, server fqdn) +commonName_default = dominum.com + +[ req_ext ] +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = dominum.com diff --git a/lib/assets/dominum2.conf b/lib/assets/dominum2.conf new file mode 100644 index 000000000..dfbb7f069 --- /dev/null +++ b/lib/assets/dominum2.conf @@ -0,0 +1,25 @@ +# based on https://gist.github.com/croxton/ebfb5f3ac143cd86542788f972434c96 +[ req ] +default_bits = 4096 +distinguished_name = req_distinguished_name +req_extensions = req_ext + +[ req_distinguished_name ] +countryName = Country Name (2 letter code) +countryName_default = FR +stateOrProvinceName = State or Province Name (full name) +stateOrProvinceName_default = France +localityName = Locality Name (eg, city) +localityName_default = Nantes +organizationName = Organization Name (eg, company) +organizationName_default = Clever-Cloud +commonName = Common Name (eg, server fqdn) +commonName_default = dominum.com + +[ req_ext ] +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = dominum.com +DNS.2 = www.dominum.com + diff --git a/lib/examples/http.rs b/lib/examples/http.rs index edb3f8a18..460f6c4b4 100644 --- a/lib/examples/http.rs +++ b/lib/examples/http.rs @@ -1,17 +1,13 @@ -extern crate time; - #[macro_use] extern crate sozu_command_lib; use std::{collections::BTreeMap, thread}; use anyhow::Context; - use sozu_command_lib::{ channel::Channel, config::ListenerBuilder, - info, - logging::setup_logging, + logging::setup_default_logging, proto::command::{ request::RequestType, AddBackend, Cluster, LoadBalancingAlgorithms, LoadBalancingParams, PathRule, RequestHttpFrontend, RulePosition, SocketAddress, WorkerRequest, WorkerResponse, @@ -19,7 +15,7 @@ use sozu_command_lib::{ }; fn main() -> anyhow::Result<()> { - setup_logging("stdout", None, "info", "EXAMPLE"); + setup_default_logging(true, "info", "EXAMPLE"); info!("starting up"); diff --git a/lib/examples/https.rs b/lib/examples/https.rs index 0db656841..281de4723 100644 --- a/lib/examples/https.rs +++ b/lib/examples/https.rs @@ -1,18 +1,16 @@ -#![allow(unused_variables, unused_must_use)] +#![allow(unused_must_use)] #[macro_use] extern crate sozu_lib; #[macro_use] extern crate sozu_command_lib; -extern crate time; use std::thread; use anyhow::Context; - use sozu_command_lib::{ channel::Channel, config::ListenerBuilder, - logging::setup_logging, + logging::setup_default_logging, proto::command::{ request::RequestType, AddBackend, AddCertificate, CertificateAndKey, LoadBalancingParams, PathRule, RequestHttpFrontend, SocketAddress, WorkerRequest, @@ -20,7 +18,7 @@ use sozu_command_lib::{ }; fn main() -> anyhow::Result<()> { - setup_logging("stdout", None, "info", "EXAMPLE"); + setup_default_logging(true, "info", "EXAMPLE"); info!("MAIN\tstarting up"); @@ -194,6 +192,7 @@ fn main() -> anyhow::Result<()> { info!("MAIN\tTLS -> {:?}", command2.read_message()); let _ = jg.join(); + let _ = jg2.join(); info!("MAIN\tgood bye"); Ok(()) } diff --git a/lib/examples/tcp.rs b/lib/examples/tcp.rs index 18bd99392..acad9ec04 100644 --- a/lib/examples/tcp.rs +++ b/lib/examples/tcp.rs @@ -1,15 +1,14 @@ -#![allow(unused_variables, unused_must_use)] +#![allow(unused_must_use)] extern crate sozu_lib; #[macro_use] extern crate sozu_command_lib; -extern crate time; use std::thread; use anyhow::Context; use sozu_command_lib::{ channel::Channel, - logging::setup_logging, + logging::setup_default_logging, proto::command::{ request::RequestType, AddBackend, LoadBalancingParams, RequestTcpFrontend, SocketAddress, TcpListenerConfig, WorkerRequest, @@ -17,7 +16,7 @@ use sozu_command_lib::{ }; fn main() -> anyhow::Result<()> { - setup_logging("stdout", None, "info", "EXAMPLE"); + setup_default_logging(true, "info", "EXAMPLE"); info!("starting up"); @@ -25,14 +24,13 @@ fn main() -> anyhow::Result<()> { Channel::generate(1000, 10000).with_context(|| "should create a channel")?; let jg = thread::spawn(move || { - let max_listeners = 500; let max_buffers = 500; let buffer_size = 16384; let listener = TcpListenerConfig { address: SocketAddress::new_v4(127, 0, 0, 1, 8080), ..Default::default() }; - setup_logging("stdout", None, "debug", "TCP"); + setup_default_logging(true, "debug", "TCP"); sozu_lib::tcp::testing::start_tcp_worker(listener, max_buffers, buffer_size, channel); }); diff --git a/lib/src/backends.rs b/lib/src/backends.rs index 1d054b162..dcc0678cb 100644 --- a/lib/src/backends.rs +++ b/lib/src/backends.rs @@ -213,7 +213,7 @@ impl BackendMap { pub fn add_backend(&mut self, cluster_id: &str, backend: Backend) { self.backends .entry(cluster_id.to_string()) - .or_insert_with(BackendList::new) + .or_default() .add_backend(backend); } @@ -351,7 +351,7 @@ impl BackendMap { pub fn get_or_create_backend_list_for_cluster(&mut self, cluster_id: &str) -> &mut BackendList { self.backends .entry(cluster_id.to_string()) - .or_insert_with(BackendList::new) + .or_default() } } diff --git a/lib/src/http.rs b/lib/src/http.rs index 666754ce5..8df5b4fa4 100644 --- a/lib/src/http.rs +++ b/lib/src/http.rs @@ -17,7 +17,7 @@ use rusty_ulid::Ulid; use time::{Duration, Instant}; use sozu_command::{ - logging, + logging::CachedTags, proto::command::{ request::RequestType, Cluster, HttpListenerConfig, ListenerType, RemoveListener, RequestHttpFrontend, WorkerRequest, WorkerResponse, @@ -42,7 +42,7 @@ use crate::{ server::{ListenToken, SessionManager}, socket::server_bind, timer::TimeoutContainer, - AcceptError, CachedTags, FrontendFromRequestError, L7ListenerHandler, L7Proxy, ListenerError, + AcceptError, FrontendFromRequestError, L7ListenerHandler, L7Proxy, ListenerError, ListenerHandler, Protocol, ProxyConfiguration, ProxyError, ProxySession, SessionIsToBeClosed, SessionMetrics, SessionResult, StateMachineBuilder, StateResult, }; @@ -245,7 +245,7 @@ impl HttpSession { Protocol::HTTP, http.context.id, http.context.session_address, - Some(ws_context), + ws_context, ); pipe.frontend_readiness.event = http.frontend_readiness.event; @@ -517,7 +517,7 @@ impl HttpProxy { } pub fn get_listener(&self, token: &Token) -> Option>> { - self.listeners.get(token).map(Clone::clone) + self.listeners.get(token).cloned() } pub fn remove_listener(&mut self, remove: RemoveListener) -> Result<(), ProxyError> { @@ -708,14 +708,6 @@ impl HttpProxy { Ok(()) } - - pub fn logging(&mut self, logging_filter: String) -> Result<(), ProxyError> { - logging::LOGGER.with(|l| { - let directives = logging::parse_logging_spec(&logging_filter); - l.borrow_mut().set_directives(directives); - }); - Ok(()) - } } impl HttpListener { @@ -844,13 +836,6 @@ impl ProxyConfiguration for HttpProxy { debug!("{} status", request_id); Ok(()) } - Some(RequestType::Logging(logging_filter)) => { - debug!( - "{} changing logging filter to {}", - request_id, logging_filter - ); - self.logging(logging_filter) - } other_command => { debug!( "{} unsupported message for HTTP proxy, ignoring: {:?}", @@ -890,7 +875,7 @@ impl ProxyConfiguration for HttpProxy { let listener = self .listeners .get(&Token(listener_token.0)) - .map(Clone::clone) + .cloned() .ok_or(AcceptError::IoError)?; if let Err(e) = frontend_sock.set_nodelay(true) { diff --git a/lib/src/https.rs b/lib/src/https.rs index 82dd18154..e1dfe01d3 100644 --- a/lib/src/https.rs +++ b/lib/src/https.rs @@ -37,7 +37,6 @@ use time::{Duration, Instant}; use sozu_command::{ certificate::Fingerprint, config::DEFAULT_CIPHER_SUITES, - logging, proto::command::{ request::RequestType, response_content::ContentType, AddCertificate, CertificateSummary, CertificatesByAddress, Cluster, HttpsListenerConfig, ListOfCertificatesByAddress, @@ -372,7 +371,7 @@ impl HttpsSession { Protocol::HTTP, http.context.id, http.context.session_address, - Some(ws_context), + ws_context, ); pipe.frontend_readiness.event = http.frontend_readiness.event; @@ -871,17 +870,6 @@ impl HttpsProxy { Ok(()) } - pub fn logging( - &mut self, - logging_filter: String, - ) -> Result, ProxyError> { - logging::LOGGER.with(|l| { - let directives = logging::parse_logging_spec(&logging_filter); - l.borrow_mut().set_directives(directives); - }); - Ok(None) - } - pub fn query_all_certificates(&mut self) -> Result, ProxyError> { let certificates = self .listeners @@ -1322,13 +1310,6 @@ impl ProxyConfiguration for HttpsProxy { debug!("{} status", request_id); Ok(None) } - RequestType::Logging(logging_filter) => { - debug!( - "{} changing logging filter to {}", - request_id, logging_filter - ); - self.logging(logging_filter) - } RequestType::QueryCertificatesFromWorkers(filters) => { if let Some(domain) = filters.domain { debug!("{} query certificate for domain {}", request_id, domain); diff --git a/lib/src/lib.rs b/lib/src/lib.rs index dfbd5ef7f..ae0d3635e 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -115,7 +115,7 @@ //! //! We can now define a frontend. A frontend is a way to recognize a request and match //! it to a `cluster_id`, depending on the hostname and the beginning of the URL path. -//! The `address` field must match the one of the HTTP listener we defined before: +//! The `address` field must match the one of the HTTP listener we defined before: //! //! ``` //! use std::collections::BTreeMap; @@ -209,20 +209,16 @@ //! Here is the complete example for reference, it matches the `examples/http.rs` example: //! //! ``` -//! extern crate time; -//! //! #[macro_use] //! extern crate sozu_command_lib; //! //! use std::{collections::BTreeMap, env, io::stdout, thread}; //! //! use anyhow::Context; -//! //! use sozu_command_lib::{ //! channel::Channel, //! config::ListenerBuilder, -//! info, -//! logging::setup_logging, +//! logging::setup_default_logging, //! proto::command::{ //! request::RequestType, AddBackend, Cluster, LoadBalancingAlgorithms, LoadBalancingParams, //! PathRule, Request, RequestHttpFrontend, RulePosition, SocketAddress,WorkerRequest, @@ -230,7 +226,7 @@ //! }; //! //! fn main() -> anyhow::Result<()> { -//! setup_logging("stdout", None, "info", "EXAMPLE"); +//! setup_default_logging(true, "info", "EXAMPLE"); //! //! info!("starting up"); //! @@ -318,7 +314,6 @@ extern crate quickcheck; pub mod util; #[macro_use] pub mod metrics; -mod logs; pub mod backends; pub mod features; @@ -360,10 +355,11 @@ use time::{Duration, Instant}; use tls::CertificateResolverError; use sozu_command::{ + logging::{CachedTags, LogContext}, proto::command::{Cluster, ListenerType, RequestHttpFrontend, WorkerRequest, WorkerResponse}, ready::Ready, state::ClusterId, - ObjectKind, + AsStr, ObjectKind, }; use crate::{backends::BackendMap, router::Route}; @@ -525,21 +521,6 @@ macro_rules! StateMachineBuilder { } } -pub struct CachedTags { - pub tags: BTreeMap, - pub concatenated: String, -} -impl CachedTags { - fn new(tags: BTreeMap) -> Self { - let concatenated = tags - .iter() - .map(|(k, v)| format!("{k}={v}")) - .collect::>() - .join(", "); - Self { tags, concatenated } - } -} - pub trait ListenerHandler { fn get_addr(&self) -> &SocketAddr; @@ -655,7 +636,7 @@ pub enum ListenerError { RemoveFrontend(RouterError), } -/// Returned by the HTTP, HTTPS and TCP proxies +/// Returned by the HTTP, HTTPS and TCP proxies #[derive(thiserror::Error, Debug)] pub enum ProxyError { #[error("error while soft stopping {proxy_protocol} proxy: {error}")] @@ -1054,6 +1035,41 @@ impl SessionMetrics { _ => None, } } + + pub fn register_end_of_session(&self, context: &LogContext) { + let response_time = self.response_time(); + let service_time = self.service_time(); + + if let Some(cluster_id) = context.cluster_id { + time!( + "response_time", + cluster_id, + response_time.whole_milliseconds() + ); + time!( + "service_time", + cluster_id, + service_time.whole_milliseconds() + ); + } + time!("response_time", response_time.whole_milliseconds()); + time!("service_time", service_time.whole_milliseconds()); + + if let Some(backend_id) = self.backend_id.as_ref() { + if let Some(backend_response_time) = self.backend_response_time() { + record_backend_metrics!( + context.cluster_id.as_str_or("-"), + backend_id, + backend_response_time.whole_milliseconds(), + self.backend_connection_time(), + self.backend_bin, + self.backend_bout + ); + } + } + + incr!("access_logs.count", context.cluster_id, context.backend_id); + } } /// exponentially weighted moving average with high sensibility to latency bursts diff --git a/lib/src/logs.rs b/lib/src/logs.rs deleted file mode 100644 index f28bfce04..000000000 --- a/lib/src/logs.rs +++ /dev/null @@ -1,271 +0,0 @@ -use std::{fmt, net::SocketAddr}; - -use rusty_ulid::Ulid; -use time::Duration; - -use crate::{protocol::http::parser::Method, SessionMetrics}; - -#[derive(Debug)] -pub struct LogContext<'a> { - pub request_id: Ulid, - pub cluster_id: Option<&'a str>, - pub backend_id: Option<&'a str>, -} - -impl fmt::Display for LogContext<'_> { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "{} {} {} \t", - self.request_id, - self.cluster_id.unwrap_or("-"), - self.backend_id.unwrap_or("-") - ) - } -} - -pub trait AsStr { - fn as_str_or(&self, default: &'static str) -> &str; -} -pub trait AsString { - fn as_str_or(&self, default: &'static str) -> String; -} - -impl AsStr for Option<&str> { - fn as_str_or(&self, default: &'static str) -> &str { - match self { - None => default, - Some(s) => s, - } - } -} -impl AsString for Option { - fn as_str_or(&self, default: &'static str) -> String { - match self { - None => default.to_string(), - Some(s) => s.to_string(), - } - } -} -impl AsString for Option { - fn as_str_or(&self, default: &'static str) -> String { - match self { - None => default.to_string(), - Some(s) => s.to_string(), - } - } -} -impl AsString for Option<&Method> { - fn as_str_or(&self, default: &'static str) -> String { - match self { - None => default.to_string(), - Some(s) => s.to_string(), - } - } -} - -pub struct LogDuration(pub Option); - -impl fmt::Display for LogDuration { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.0 { - None => write!(f, "-"), - Some(duration) => { - let secs = duration.whole_seconds(); - if secs >= 10 { - return write!(f, "{secs}s"); - } - - let ms = duration.whole_milliseconds(); - if ms < 10 { - let us = duration.whole_microseconds(); - if us >= 10 { - return write!(f, "{us}μs"); - } - - let ns = duration.whole_nanoseconds(); - return write!(f, "{ns}ns"); - } - - write!(f, "{ms}ms") - } - } - } -} - -#[derive(Debug)] -pub enum Endpoint<'a> { - Http { - method: Option<&'a Method>, - authority: Option<&'a str>, - path: Option<&'a str>, - status: Option, - reason: Option<&'a str>, - }, - Tcp { - context: Option<&'a str>, - }, -} - -impl fmt::Display for Endpoint<'_> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Endpoint::Http { - authority, - method, - path, - status, - .. - } => write!( - f, - "{} {} {} -> {}", - authority.as_str_or("-"), - method.as_str_or("-"), - path.as_str_or("-"), - status.as_str_or("-"), - ), - Endpoint::Tcp { context } => write!(f, "{}", context.as_str_or("-")), - } - } -} - -#[derive(Debug)] -pub struct RequestRecord<'a> { - pub error: Option<&'a str>, - pub context: LogContext<'a>, - pub session_address: Option, - pub backend_address: Option, - pub protocol: &'a str, - pub endpoint: Endpoint<'a>, - pub tags: Option<&'a str>, - pub client_rtt: Option, - pub server_rtt: Option, - pub metrics: &'a SessionMetrics, - pub user_agent: Option, -} - -impl RequestRecord<'_> { - pub fn log(self) { - let context = &self.context; - let cluster_id = context.cluster_id; - let tags = self.tags; - - let protocol = self.protocol; - let session_address = self.session_address; - let backend_address = self.backend_address; - let endpoint = &self.endpoint; - let mut user_agent = self.user_agent; - - let metrics = self.metrics; - // let backend_response_time = metrics.backend_response_time(); - // let backend_connection_time = metrics.backend_connection_time(); - // let backend_bin = metrics.backend_bin; - // let backend_bout = metrics.backend_bout; - let response_time = metrics.response_time(); - let service_time = metrics.service_time(); - // let wait_time = metrics.wait_time; - let client_rtt = self.client_rtt; - let server_rtt = self.server_rtt; - - if let Some(cluster_id) = cluster_id { - time!( - "response_time", - cluster_id, - response_time.whole_milliseconds() - ); - time!( - "service_time", - cluster_id, - service_time.whole_milliseconds() - ); - } - time!("response_time", response_time.whole_milliseconds()); - time!("service_time", service_time.whole_milliseconds()); - - if let Some(backend_id) = metrics.backend_id.as_ref() { - if let Some(backend_response_time) = metrics.backend_response_time() { - record_backend_metrics!( - cluster_id.as_str_or("-"), - backend_id, - backend_response_time.whole_milliseconds(), - metrics.backend_connection_time(), - metrics.backend_bin, - metrics.backend_bout - ); - } - } - - let (tags, ua_sep, user_agent) = match (tags, &mut user_agent) { - (None, None) => ("-", "", ""), - (Some(tags), None) => (tags, "", ""), - (None, Some(ua)) => { - prepare_user_agent(ua); - ("", "user-agent=", ua.as_str()) - } - (Some(tags), Some(ua)) => { - prepare_user_agent(ua); - (tags, ", user-agent=", ua.as_str()) - } - }; - - match self.error { - None => { - info_access!( - "{}{} -> {} \t{}/{}/{}/{} \t{} -> {} \t {}{}{} {} {}", - context, - session_address.as_str_or("X"), - backend_address.as_str_or("X"), - LogDuration(Some(response_time)), - LogDuration(Some(service_time)), - LogDuration(client_rtt), - LogDuration(server_rtt), - metrics.bin, - metrics.bout, - tags, - ua_sep, - user_agent, - protocol, - endpoint - ); - incr!( - "access_logs.count", - self.context.cluster_id, - self.context.backend_id - ); - } - Some(message) => error_access!( - "{}{} -> {} \t{}/{}/{}/{} \t{} -> {} \t {}{}{} {} {} | {}", - context, - session_address.as_str_or("X"), - backend_address.as_str_or("X"), - LogDuration(Some(response_time)), - LogDuration(Some(service_time)), - LogDuration(client_rtt), - LogDuration(server_rtt), - metrics.bin, - metrics.bout, - tags, - ua_sep, - user_agent, - protocol, - endpoint, - message - ), - } - } -} - -fn prepare_user_agent(ua: &mut String) { - let mut ua_bytes = std::mem::take(ua).into_bytes(); - for c in &mut ua_bytes { - if *c == b' ' { - *c = b'_'; - } - } - if let Some(last) = ua_bytes.last_mut() { - if *last == b',' { - *last = b'!' - } - } - *ua = unsafe { String::from_utf8_unchecked(ua_bytes) }; -} diff --git a/lib/src/metrics/local_drain.rs b/lib/src/metrics/local_drain.rs index 7a8c4e113..061bd8608 100644 --- a/lib/src/metrics/local_drain.rs +++ b/lib/src/metrics/local_drain.rs @@ -121,14 +121,13 @@ enum MetricMeta { pub struct LocalClusterMetrics { /// metric_name -> metric value cluster: BTreeMap, - /// backends: Vec, } impl LocalClusterMetrics { fn to_filtered_metrics( &self, - metric_names: &Vec, + metric_names: &[String], ) -> Result { let cluster = self .cluster @@ -182,7 +181,7 @@ pub struct LocalBackendMetrics { impl LocalBackendMetrics { fn to_filtered_metrics( &self, - metric_names: &Vec, + metric_names: &[String], ) -> Result { let filtered_backend_metrics = self .metrics @@ -306,7 +305,7 @@ impl LocalDrain { pub fn dump_all_metrics( &mut self, - metric_names: &Vec, + metric_names: &[String], ) -> Result { Ok(WorkerMetrics { proxy: self.dump_proxy_metrics(metric_names), @@ -316,7 +315,7 @@ impl LocalDrain { pub fn dump_proxy_metrics( &mut self, - metric_names: &Vec, + metric_names: &[String], ) -> BTreeMap { self.proxy_metrics .iter() @@ -333,7 +332,7 @@ impl LocalDrain { pub fn dump_cluster_metrics( &mut self, - metric_names: &Vec, + metric_names: &[String], ) -> Result, MetricError> { let mut cluster_data = BTreeMap::new(); @@ -348,7 +347,7 @@ impl LocalDrain { fn metrics_of_one_cluster( &self, cluster_id: &str, - metric_names: &Vec, + metric_names: &[String], ) -> Result { let aggregated = self .cluster_metrics @@ -363,7 +362,7 @@ impl LocalDrain { fn metrics_of_one_backend( &self, backend_id: &str, - metric_names: &Vec, + metric_names: &[String], ) -> Result { for cluster_metrics in self.cluster_metrics.values() { if let Some(backend_metrics) = cluster_metrics @@ -383,8 +382,8 @@ impl LocalDrain { fn query_clusters( &mut self, - cluster_ids: &Vec, - metric_names: &Vec, + cluster_ids: &[String], + metric_names: &[String], ) -> Result { debug!("Querying cluster with ids: {:?}", cluster_ids); let mut clusters: BTreeMap = BTreeMap::new(); @@ -405,8 +404,8 @@ impl LocalDrain { fn query_backends( &mut self, - backend_ids: &Vec, - metric_names: &Vec, + backend_ids: &[String], + metric_names: &[String], ) -> Result { let mut clusters: BTreeMap = BTreeMap::new(); diff --git a/lib/src/pool.rs b/lib/src/pool.rs index 0429e823b..9e95641f4 100644 --- a/lib/src/pool.rs +++ b/lib/src/pool.rs @@ -13,8 +13,6 @@ use std::{ sync::atomic::{AtomicUsize, Ordering}, }; -use poule; - static BUFFER_COUNT: AtomicUsize = AtomicUsize::new(0); pub struct Pool { diff --git a/lib/src/protocol/kawa_h1/mod.rs b/lib/src/protocol/kawa_h1/mod.rs index 1d1b5a34f..d41ca6930 100644 --- a/lib/src/protocol/kawa_h1/mod.rs +++ b/lib/src/protocol/kawa_h1/mod.rs @@ -13,13 +13,13 @@ use mio::{net::TcpStream, Interest, Token}; use rusty_ulid::Ulid; use sozu_command::{ config::MAX_LOOP_ITERATIONS, + logging::EndpointRecord, proto::command::{Event, EventKind, ListenerType}, }; use time::{Duration, Instant}; use crate::{ backends::{Backend, BackendError}, - logs::{Endpoint, LogContext, RequestRecord}, pool::{Checkout, Pool}, protocol::{ http::{editor::HttpContext, parser::Method}, @@ -29,13 +29,15 @@ use crate::{ router::Route, server::{push_event, CONN_RETRIES}, socket::{stats::socket_rtt, SocketHandler, SocketResult, TransportProtocol}, - sozu_command::ready::Ready, + sozu_command::{logging::LogContext, ready::Ready}, timer::TimeoutContainer, AcceptError, BackendConnectAction, BackendConnectionError, BackendConnectionStatus, L7ListenerHandler, L7Proxy, ListenerHandler, Protocol, ProxySession, Readiness, RetrieveClusterError, SessionIsToBeClosed, SessionMetrics, SessionResult, StateResult, }; +use super::pipe::WebSocketContext; + /// Generic Http representation using the Kawa crate using the Checkout of Sozu as buffer type GenericHttpStream = kawa::Kawa; @@ -763,6 +765,20 @@ impl Http EndpointRecord { + let status = match self.status { + SessionStatus::Normal => self.context.status, + SessionStatus::DefaultAnswer(answers, ..) => Some(answers.into()), + }; + EndpointRecord::Http { + method: self.context.method.as_deref(), + authority: self.context.authority.as_deref(), + path: self.context.path.as_deref(), + reason: self.context.reason.as_deref(), + status, + } + } + pub fn get_session_address(&self) -> Option { self.context .session_address @@ -798,64 +814,57 @@ impl Http String { - format!( - "{}", - Endpoint::Http { - method: self.context.method.as_ref(), - authority: self.context.authority.as_deref(), - path: self.context.path.as_deref(), - status: self.context.status, - reason: self.context.reason.as_deref(), - } - ) + pub fn websocket_context(&self) -> WebSocketContext { + WebSocketContext::Http { + method: self.context.method.clone(), + authority: self.context.authority.clone(), + path: self.context.path.clone(), + reason: self.context.reason.clone(), + status: self.context.status, + } } - pub fn log_request(&mut self, metrics: &SessionMetrics, message: Option<&str>) { + pub fn log_request(&self, metrics: &SessionMetrics, error: bool, message: Option<&str>) { let listener = self.listener.borrow(); let tags = self.context.authority.as_ref().and_then(|host| { let hostname = match host.split_once(':') { None => host, Some((hostname, _)) => hostname, }; - listener.get_concatenated_tags(hostname) + listener.get_tags(hostname) }); - let status = match self.status { - SessionStatus::Normal => self.context.status, - SessionStatus::DefaultAnswer(answers, ..) => Some(answers.into()), - }; - let user_agent = self.context.user_agent.take(); - RequestRecord { - error: message, - context: self.log_context(), + let context = self.log_context(); + metrics.register_end_of_session(&context); + + log_access! { + error, + message: message, + context, session_address: self.get_session_address(), backend_address: self.get_backend_address(), protocol: self.protocol_string(), - endpoint: Endpoint::Http { - method: self.context.method.as_ref(), - authority: self.context.authority.as_deref(), - path: self.context.path.as_deref(), - status, - reason: self.context.reason.as_deref(), - }, + endpoint: self.log_endpoint(), tags, client_rtt: socket_rtt(self.front_socket()), server_rtt: self.backend_socket.as_ref().and_then(socket_rtt), - metrics, - user_agent, - } - .log(); + service_time: metrics.service_time(), + response_time: metrics.response_time(), + bytes_in: metrics.bin, + bytes_out: metrics.bout, + user_agent: self.context.user_agent.as_deref(), + }; } pub fn log_request_success(&mut self, metrics: &SessionMetrics) { save_http_status_metric(self.context.status, self.log_context()); - self.log_request(metrics, None); + self.log_request(metrics, false, None); } - pub fn log_default_answer_success(&mut self, metrics: &SessionMetrics) { - self.log_request(metrics, None); + + pub fn log_default_answer_success(&self, metrics: &SessionMetrics) { + self.log_request(metrics, false, None); } - pub fn log_request_error(&mut self, metrics: &mut SessionMetrics, message: &str) { + pub fn log_request_error(&self, metrics: &mut SessionMetrics, message: &str) { incr!("http.errors"); error!( "{} Could not process request properly got: {}", @@ -863,7 +872,7 @@ impl Http>>) { @@ -1573,6 +1582,9 @@ impl Http fmt::Result { +impl AsRef for Method { + fn as_ref(&self) -> &str { match self { - Method::Get => write!(f, "GET"), - Method::Post => write!(f, "POST"), - Method::Head => write!(f, "HEAD"), - Method::Options => write!(f, "OPTIONS"), - Method::Put => write!(f, "PUT"), - Method::Delete => write!(f, "DELETE"), - Method::Trace => write!(f, "TRACE"), - Method::Connect => write!(f, "CONNECT"), - Method::Custom(s) => write!(f, "{s}"), + Self::Get => "GET", + Self::Post => "POST", + Self::Head => "HEAD", + Self::Options => "OPTIONS", + Self::Put => "PUT", + Self::Delete => "DELETE", + Self::Trace => "TRACE", + Self::Connect => "CONNECT", + Self::Custom(custom) => custom, } } } +impl Deref for Method { + type Target = str; + + fn deref(&self) -> &Self::Target { + self.as_ref() + } +} + +impl fmt::Display for Method { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.as_ref()) + } +} + #[cfg(feature = "tolerant-http1-parser")] fn is_hostname_char(i: u8) -> bool { is_alphanumeric(i) || diff --git a/lib/src/protocol/pipe.rs b/lib/src/protocol/pipe.rs index de60906e7..2b59ae8c7 100644 --- a/lib/src/protocol/pipe.rs +++ b/lib/src/protocol/pipe.rs @@ -2,13 +2,15 @@ use std::{cell::RefCell, net::SocketAddr, rc::Rc}; use mio::{net::TcpStream, Token}; use rusty_ulid::Ulid; -use sozu_command::config::MAX_LOOP_ITERATIONS; +use sozu_command::{ + config::MAX_LOOP_ITERATIONS, + logging::{EndpointRecord, LogContext}, +}; use crate::{ backends::Backend, - logs::{Endpoint, LogContext, RequestRecord}, pool::Checkout, - protocol::SessionState, + protocol::{http::parser::Method, SessionState}, socket::{stats::socket_rtt, SocketHandler, SocketResult, TransportProtocol}, sozu_command::ready::Ready, timer::TimeoutContainer, @@ -29,6 +31,18 @@ enum ConnectionStatus { Closed, } +/// matches sozu_command_lib::logging::access_logs::EndpointRecords +pub enum WebSocketContext { + Http { + method: Option, + authority: Option, + path: Option, + status: Option, + reason: Option, + }, + Tcp, +} + pub struct Pipe { backend_buffer: Checkout, backend_id: Option, @@ -49,7 +63,7 @@ pub struct Pipe { protocol: Protocol, request_id: Ulid, session_address: Option, - websocket_context: Option, + websocket_context: WebSocketContext, } impl Pipe { @@ -75,7 +89,7 @@ impl Pipe { protocol: Protocol, request_id: Ulid, session_address: Option, - websocket_context: Option, + websocket_context: WebSocketContext, ) -> Pipe { let frontend_status = ConnectionStatus::Normal; let backend_status = if backend_socket.is_none() { @@ -196,28 +210,32 @@ impl Pipe { } } - pub fn log_request(&self, metrics: &SessionMetrics, message: Option<&str>) { + pub fn log_request(&self, metrics: &SessionMetrics, error: bool, message: Option<&str>) { let listener = self.listener.borrow(); - RequestRecord { - error: message, - context: self.log_context(), + let context = self.log_context(); + let endpoint = self.log_endpoint(); + metrics.register_end_of_session(&context); + log_access!( + error, + message: message, + context, session_address: self.get_session_address(), backend_address: self.get_backend_address(), protocol: self.protocol_string(), - endpoint: Endpoint::Tcp { - context: self.websocket_context.as_deref(), - }, - tags: listener.get_concatenated_tags(&listener.get_addr().to_string()), + endpoint, + tags: listener.get_tags(&listener.get_addr().to_string()), client_rtt: socket_rtt(self.front_socket()), server_rtt: self.backend_socket.as_ref().and_then(socket_rtt), - metrics, - user_agent: None, - } - .log(); + service_time: metrics.service_time(), + response_time: metrics.response_time(), + bytes_in: metrics.bin, + bytes_out: metrics.bout, + user_agent: None + ); } pub fn log_request_success(&self, metrics: &SessionMetrics) { - self.log_request(metrics, None); + self.log_request(metrics, false, None); } pub fn log_request_error(&self, metrics: &SessionMetrics, message: &str) { @@ -228,7 +246,7 @@ impl Pipe { message ); self.print_state(self.protocol_string()); - self.log_request(metrics, Some(message)); + self.log_request(metrics, true, Some(message)); } pub fn check_connections(&self) -> bool { @@ -615,6 +633,25 @@ impl Pipe { backend_id: self.backend_id.as_deref(), } } + + fn log_endpoint(&self) -> EndpointRecord { + match &self.websocket_context { + WebSocketContext::Http { + method, + authority, + path, + status, + reason, + } => EndpointRecord::Http { + method: method.as_deref(), + authority: authority.as_deref(), + path: path.as_deref(), + status: status.to_owned(), + reason: reason.as_deref(), + }, + WebSocketContext::Tcp => EndpointRecord::Tcp, + } + } } impl SessionState for Pipe { diff --git a/lib/src/protocol/proxy_protocol/expect.rs b/lib/src/protocol/proxy_protocol/expect.rs index 80f9aef7a..1452dba09 100644 --- a/lib/src/protocol/proxy_protocol/expect.rs +++ b/lib/src/protocol/proxy_protocol/expect.rs @@ -3,12 +3,14 @@ use std::{cell::RefCell, rc::Rc}; use mio::{net::TcpStream, *}; use nom::{Err, HexDisplay}; use rusty_ulid::Ulid; -use sozu_command::config::MAX_LOOP_ITERATIONS; +use sozu_command::{config::MAX_LOOP_ITERATIONS, logging::LogContext}; use crate::{ - logs::LogContext, pool::Checkout, - protocol::{pipe::Pipe, SessionResult, SessionState}, + protocol::{ + pipe::{Pipe, WebSocketContext}, + SessionResult, SessionState, + }, socket::{SocketHandler, SocketResult}, sozu_command::ready::Ready, tcp::TcpListener, @@ -183,7 +185,7 @@ impl ExpectProxyProtocol { Protocol::TCP, self.request_id, addr, - None, + WebSocketContext::Tcp, ); pipe.frontend_readiness.event = self.frontend_readiness.event; diff --git a/lib/src/protocol/proxy_protocol/parser.rs b/lib/src/protocol/proxy_protocol/parser.rs index 8123763c7..9ddadab58 100644 --- a/lib/src/protocol/proxy_protocol/parser.rs +++ b/lib/src/protocol/proxy_protocol/parser.rs @@ -1,7 +1,4 @@ -use std::{ - convert::From, - net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, -}; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}; use nom::{ bytes::streaming::{tag, take}, diff --git a/lib/src/protocol/proxy_protocol/relay.rs b/lib/src/protocol/proxy_protocol/relay.rs index ace4bf0d8..594ea3819 100644 --- a/lib/src/protocol/proxy_protocol/relay.rs +++ b/lib/src/protocol/proxy_protocol/relay.rs @@ -6,7 +6,10 @@ use rusty_ulid::Ulid; use crate::{ pool::Checkout, - protocol::{pipe::Pipe, proxy_protocol::parser::parse_v2_header}, + protocol::{ + pipe::{Pipe, WebSocketContext}, + proxy_protocol::parser::parse_v2_header, + }, socket::{SocketHandler, SocketResult}, sozu_command::ready::Ready, tcp::TcpListener, @@ -192,7 +195,7 @@ impl RelayProxyProtocol { Protocol::TCP, self.request_id, addr, - None, + WebSocketContext::Tcp, ); pipe.frontend_readiness.event = self.frontend_readiness.event; diff --git a/lib/src/protocol/proxy_protocol/send.rs b/lib/src/protocol/proxy_protocol/send.rs index a608c40fb..061982509 100644 --- a/lib/src/protocol/proxy_protocol/send.rs +++ b/lib/src/protocol/proxy_protocol/send.rs @@ -10,7 +10,7 @@ use rusty_ulid::Ulid; use crate::{ pool::Checkout, protocol::{ - pipe::Pipe, + pipe::{Pipe, WebSocketContext}, proxy_protocol::header::{Command, HeaderV2, ProxyProtocolHeader}, }, socket::SocketHandler, @@ -176,7 +176,7 @@ impl SendProxyProtocol { Protocol::TCP, self.request_id, addr, - None, + WebSocketContext::Tcp, ); pipe.frontend_readiness = self.frontend_readiness; diff --git a/lib/src/protocol/rustls.rs b/lib/src/protocol/rustls.rs index 5299cc5ce..561d0c646 100644 --- a/lib/src/protocol/rustls.rs +++ b/lib/src/protocol/rustls.rs @@ -3,11 +3,11 @@ use std::{cell::RefCell, io::ErrorKind, net::SocketAddr, rc::Rc}; use mio::{net::TcpStream, Token}; use rustls::ServerConnection; use rusty_ulid::Ulid; -use sozu_command::config::MAX_LOOP_ITERATIONS; +use sozu_command::{config::MAX_LOOP_ITERATIONS, logging::LogContext}; use crate::{ - logs::LogContext, protocol::SessionState, timer::TimeoutContainer, Readiness, Ready, - SessionMetrics, SessionResult, StateResult, + protocol::SessionState, timer::TimeoutContainer, Readiness, Ready, SessionMetrics, + SessionResult, StateResult, }; pub enum TlsState { diff --git a/lib/src/retry.rs b/lib/src/retry.rs index d69935624..e9277bf7c 100644 --- a/lib/src/retry.rs +++ b/lib/src/retry.rs @@ -1,6 +1,6 @@ use std::{cmp, fmt::Debug, time}; -use rand::{self, Rng}; +use rand::Rng; #[derive(Debug, PartialEq, Eq)] pub enum RetryAction { diff --git a/lib/src/server.rs b/lib/src/server.rs index 3e158e5ec..8e0c3e01c 100644 --- a/lib/src/server.rs +++ b/lib/src/server.rs @@ -2,7 +2,6 @@ use std::{ cell::RefCell, collections::{HashSet, VecDeque}, - convert::TryFrom, io::Error as IoError, os::unix::io::{AsRawFd, FromRawFd}, rc::Rc, @@ -17,6 +16,7 @@ use time::{Duration, Instant}; use sozu_command::{ channel::Channel, + logging, proto::command::{ request::RequestType, response_content::ContentType, ActivateListener, AddBackend, CertificatesWithFingerprints, Cluster, ClusterHashes, ClusterInformations, @@ -673,7 +673,7 @@ impl Server { Some(RequestType::ReturnListenSockets(_)) => { info!("received ReturnListenSockets order"); match self.return_listen_sockets() { - Ok(_) => push_queue(WorkerResponse::ok(request.id.clone())), + Ok(_) => push_queue(WorkerResponse::ok(request.id)), Err(error) => push_queue(worker_response_error( request.id, format!("Could not send listeners on scm socket: {error:?}"), @@ -871,18 +871,47 @@ impl Server { } fn notify(&mut self, message: WorkerRequest) { - if let Some(RequestType::ConfigureMetrics(configuration)) = &message.content.request_type { - if let Ok(metrics_config) = MetricsConfiguration::try_from(*configuration) { + match &message.content.request_type { + Some(RequestType::ConfigureMetrics(configuration)) => { + match MetricsConfiguration::try_from(*configuration) { + Ok(metrics_config) => { + METRICS.with(|metrics| { + (*metrics.borrow_mut()).configure(&metrics_config); + push_queue(WorkerResponse::ok(message.id)); + }); + } + Err(e) => { + error!("Error configuring metrics: {}", e); + push_queue(WorkerResponse::error(message.id, e)); + } + } + return; + } + Some(RequestType::QueryMetrics(query_metrics_options)) => { METRICS.with(|metrics| { - (*metrics.borrow_mut()).configure(&metrics_config); - - push_queue(WorkerResponse::ok(message.id.clone())); + match (*metrics.borrow_mut()).query(query_metrics_options) { + Ok(c) => push_queue(WorkerResponse::ok_with_content(message.id, c)), + Err(e) => { + error!("Error querying metrics: {}", e); + push_queue(WorkerResponse::error(message.id, e)) + } + } }); return; } - } - - match &message.content.request_type { + Some(RequestType::Logging(logging_filter)) => { + info!( + "{} changing logging filter to {}", + message.id, logging_filter + ); + // there should not be any errors as it was already parsed by the main process + let (directives, _errors) = logging::parse_logging_spec(logging_filter); + logging::LOGGER.with(|logger| { + logger.borrow_mut().set_directives(directives); + }); + push_queue(WorkerResponse::ok(message.id)); + return; + } Some(RequestType::QueryClustersHashes(_)) => { push_queue(WorkerResponse::ok_with_content( message.id.clone(), @@ -943,15 +972,6 @@ impl Server { // if all certificates are queried, or filtered by domain name, // the request will be handled by the https proxy } - Some(RequestType::QueryMetrics(query_metrics_options)) => { - METRICS.with(|metrics| { - match (*metrics.borrow_mut()).query(query_metrics_options) { - Ok(c) => push_queue(WorkerResponse::ok_with_content(message.id.clone(), c)), - Err(e) => error!("Error querying metrics: {}", e), - } - }); - return; - } _other_request => {} } self.notify_proxys(message); @@ -1003,10 +1023,10 @@ impl Server { match request.content.request_type { // special case for adding listeners, because we need to register a listener - Some(RequestType::AddHttpListener(ref listener)) => { + Some(RequestType::AddHttpListener(listener)) => { push_queue(self.notify_add_http_listener(&req_id, listener)); } - Some(RequestType::AddHttpsListener(ref listener)) => { + Some(RequestType::AddHttpsListener(listener)) => { push_queue(self.notify_add_https_listener(&req_id, listener)); } Some(RequestType::AddTcpListener(listener)) => { @@ -1016,10 +1036,10 @@ impl Server { debug!("{} remove {:?} listener {:?}", req_id, remove.proxy, remove); self.base_sessions_count -= 1; let response = match ListenerType::try_from(remove.proxy) { - Ok(ListenerType::Http) => self.http.borrow_mut().notify(request.clone()), - Ok(ListenerType::Https) => self.https.borrow_mut().notify(request.clone()), - Ok(ListenerType::Tcp) => self.tcp.borrow_mut().notify(request.clone()), - Err(_) => worker_response_error(req_id, "Wrong variant ListenerType"), + Ok(ListenerType::Http) => self.http.borrow_mut().notify(request), + Ok(ListenerType::Https) => self.https.borrow_mut().notify(request), + Ok(ListenerType::Tcp) => self.tcp.borrow_mut().notify(request), + Err(_) => WorkerResponse::error(req_id, "Wrong variant ListenerType"), }; push_queue(response); } @@ -1072,7 +1092,7 @@ impl Server { fn notify_add_http_listener( &mut self, req_id: &str, - listener: &HttpListenerConfig, + listener: HttpListenerConfig, ) -> WorkerResponse { debug!("{} add http listener {:?}", req_id, listener); @@ -1084,7 +1104,7 @@ impl Server { let entry = session_manager.slab.vacant_entry(); let token = Token(entry.key()); - match self.http.borrow_mut().add_listener(listener.clone(), token) { + match self.http.borrow_mut().add_listener(listener, token) { Ok(_token) => { entry.insert(Rc::new(RefCell::new(ListenSession { protocol: Protocol::HTTPListen, @@ -1099,7 +1119,7 @@ impl Server { fn notify_add_https_listener( &mut self, req_id: &str, - listener: &HttpsListenerConfig, + listener: HttpsListenerConfig, ) -> WorkerResponse { debug!("{} add https listener {:?}", req_id, listener); @@ -1293,12 +1313,14 @@ impl Server { let (token, mut listener) = match self.https.borrow_mut().give_back_listener(address) { Ok((token, listener)) => (token, listener), - Err(e) => return worker_response_error( - req_id, - format!( + Err(e) => { + return worker_response_error( + req_id, + format!( "Couldn't deactivate HTTPS listener at address {address:?}: {e}", ), - ), + ) + } }; if let Err(e) = self.poll.registry().deregister(&mut listener) { error!( diff --git a/lib/src/socket.rs b/lib/src/socket.rs index 25e278211..8a5a38614 100644 --- a/lib/src/socket.rs +++ b/lib/src/socket.rs @@ -633,6 +633,9 @@ pub mod stats { let info = socket_info(fd); assert!(info.is_some()); println!("{:#?}", info); - println!("rtt: {}", crate::logs::LogDuration(socket_rtt(&sock))); + println!( + "rtt: {}", + sozu_command::logging::LogDuration(socket_rtt(&sock)) + ); } } diff --git a/lib/src/tcp.rs b/lib/src/tcp.rs index 01bb33e1c..2967e798a 100644 --- a/lib/src/tcp.rs +++ b/lib/src/tcp.rs @@ -14,13 +14,18 @@ use mio::{ use rusty_ulid::Ulid; use time::{Duration, Instant}; -use sozu_command::{config::MAX_LOOP_ITERATIONS, proto::command::request::RequestType, ObjectKind}; +use sozu_command::{ + config::MAX_LOOP_ITERATIONS, + logging::{EndpointRecord, LogContext}, + proto::command::request::RequestType, + ObjectKind, +}; use crate::{ backends::{Backend, BackendMap}, - logs::{Endpoint, LogContext, RequestRecord}, pool::{Checkout, Pool}, protocol::{ + pipe::WebSocketContext, proxy_protocol::{ expect::ExpectProxyProtocol, relay::RelayProxyProtocol, send::SendProxyProtocol, }, @@ -30,7 +35,6 @@ use crate::{ server::{push_event, ListenToken, SessionManager, CONN_RETRIES, TIMER}, socket::{server_bind, stats::socket_rtt}, sozu_command::{ - logging, proto::command::{ Event, EventKind, ProxyProtocolConfig, RequestTcpFrontend, TcpListenerConfig, WorkerRequest, WorkerResponse, @@ -156,7 +160,7 @@ impl TcpSession { Protocol::TCP, request_id, frontend_address, - None, + WebSocketContext::Tcp, ); pipe.set_cluster_id(cluster_id.clone()); TcpStateMachine::Pipe(pipe) @@ -191,20 +195,24 @@ impl TcpSession { fn log_request(&self) { let listener = self.listener.borrow(); - RequestRecord { - error: None, - context: self.log_context(), + let context = self.log_context(); + self.metrics.register_end_of_session(&context); + info_access!( + message: None, + context, session_address: self.frontend_address, backend_address: None, protocol: "TCP", - endpoint: Endpoint::Tcp { context: None }, - tags: listener.get_concatenated_tags(&listener.get_addr().to_string()), + endpoint: EndpointRecord::Tcp, + tags: listener.get_tags(&listener.get_addr().to_string()), client_rtt: socket_rtt(self.state.front_socket()), server_rtt: None, - metrics: &self.metrics, user_agent: None, - } - .log(); + service_time: self.metrics.service_time(), + response_time: self.metrics.response_time(), + bytes_in: self.metrics.bin, + bytes_out: self.metrics.bout + ); } fn front_hup(&mut self) -> SessionResult { @@ -1025,10 +1033,7 @@ impl ListenerHandler for TcpListener { } impl TcpListener { - fn new( - config: TcpListenerConfig, - token: Token, - ) -> Result { + fn new(config: TcpListenerConfig, token: Token) -> Result { Ok(TcpListener { cluster_id: None, listener: None, @@ -1184,7 +1189,10 @@ impl TcpProxy { let mut owned = listener.borrow_mut(); - let taken_listener = owned.listener.take().ok_or(ProxyError::UnactivatedListener)?; + let taken_listener = owned + .listener + .take() + .ok_or(ProxyError::UnactivatedListener)?; Ok((owned.token, taken_listener)) } @@ -1273,17 +1281,6 @@ impl ProxyConfiguration for TcpProxy { info!("{} status", message.id); WorkerResponse::ok(message.id) } - RequestType::Logging(logging_filter) => { - info!( - "{} changing logging filter to {}", - message.id, logging_filter - ); - logging::LOGGER.with(|l| { - let directives = logging::parse_logging_spec(&logging_filter); - l.borrow_mut().set_directives(directives); - }); - WorkerResponse::ok(message.id) - } RequestType::AddCluster(cluster) => { let config = ClusterConfiguration { proxy_protocol: cluster diff --git a/lib/src/timer.rs b/lib/src/timer.rs index 4ad248404..fd852a182 100644 --- a/lib/src/timer.rs +++ b/lib/src/timer.rs @@ -12,7 +12,6 @@ use crate::server::TIMER; // Conversion utilities mod convert { - use std::convert::TryFrom; use time::Duration; /// Convert a `Duration` to milliseconds, rounding up and saturating at @@ -338,7 +337,7 @@ impl Timer { .map(|state| self.set_timeout(delay_from_now, state)) } - // TODO: return Result with context + // TODO: return Result with context /// Cancel a timeout. /// /// If the timeout has not yet occurred, the return value holds the diff --git a/lib/src/tls.rs b/lib/src/tls.rs index 53e994597..b44e1b34f 100644 --- a/lib/src/tls.rs +++ b/lib/src/tls.rs @@ -4,10 +4,8 @@ //! [`CertifiedKey` format](https://docs.rs/rustls/latest/rustls/sign/struct.CertifiedKey.html), //! exposes them to the HTTPS listener for TLS handshakes. use std::{ - borrow::ToOwned, collections::{HashMap, HashSet}, - convert::From, - fmt::Debug, + fmt, io::BufReader, str::FromStr, sync::{Arc, Mutex}, @@ -184,7 +182,7 @@ impl CertificateResolver { self.name_fingerprint_idx .entry(new_name.to_owned()) - .or_insert_with(HashSet::new) + .or_default() .insert(cert_to_add.fingerprint.to_owned()); } @@ -390,7 +388,7 @@ impl ResolvesServerCert for MutexCertificateResolver { } } -impl Debug for MutexCertificateResolver { +impl fmt::Debug for MutexCertificateResolver { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("MutexWrappedCertificateResolver") } diff --git a/os-build/config.toml b/os-build/config.toml index cb816b152..8267341cd 100644 --- a/os-build/config.toml +++ b/os-build/config.toml @@ -31,7 +31,7 @@ log_target = "stdout" # optional different target for access logs (IP addresses, domains, URI, HTTP status, etc) # It supports the same options as log_target -# log_access_target = "file:///var/log/sozu-access.log" +# access_logs_target = "file:///var/log/sozu-access.log" # path to the unix socket file used to send commands to sozu # default value points to "sozu.sock" file in the current directory