Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reclaim TiKV's tikv_util crate #154

Merged
merged 5 commits into from
Sep 6, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions components/raftstore/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ tikv_alloc = { path = "../tikv_alloc" }
tikv_util = { path = "../tikv_util", default-features = false }
time = "0.1"
tokio = { version = "1.5", features = ["sync", "rt-multi-thread"] }
tokio-timer = { git = "https://github.com/tikv/tokio", branch = "tokio-timer-hotfix" }
txn_types = { path = "../txn_types", default-features = false }
uuid = { version = "0.8.1", features = ["serde", "v4"] }
yatp = { git = "https://github.com/tikv/yatp.git", branch = "master" }
Expand Down
1 change: 1 addition & 0 deletions components/raftstore/src/engine_store_ffi/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ use engine_traits::{
SstReader, CF_DEFAULT, CF_LOCK, CF_WRITE,
};
use kvproto::{kvrpcpb, metapb, raft_cmdpb};
use lazy_static::lazy_static;
use protobuf::Message;
pub use read_index_helper::ReadIndexClient;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,7 @@ impl<ER: RaftEngine, EK: KvEngine> ReadIndex for ReadIndexClient<ER, EK> {

futures::pin_mut!(read_index_fut);
let deadline = Instant::now() + timeout;
let delay = tikv_util::timer::PROXY_TIMER_HANDLE
.delay(deadline)
.compat();
let delay = super::utils::PROXY_TIMER_HANDLE.delay(deadline).compat();
let ret = futures::future::select(read_index_fut, delay);
match block_on(ret) {
futures::future::Either::Left(_) => true,
Expand Down
13 changes: 9 additions & 4 deletions components/raftstore/src/engine_store_ffi/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,14 @@
use std::time;

use futures_util::{compat::Future01CompatExt, future::BoxFuture, FutureExt};
use tikv_util::timer::start_global_timer;
use tokio_timer::timer::Handle;

use crate::engine_store_ffi::lazy_static;

lazy_static! {
pub static ref PROXY_TIMER_HANDLE: Handle = start_global_timer("proxy-timer");
}

pub type ArcNotifyWaker = std::sync::Arc<NotifyWaker>;

Expand All @@ -22,10 +30,7 @@ pub struct TimerTask {

pub fn make_timer_task(millis: u64) -> TimerTask {
let deadline = time::Instant::now() + time::Duration::from_millis(millis);
let delay = tikv_util::timer::PROXY_TIMER_HANDLE
.delay(deadline)
.compat()
.map(|_| {});
let delay = PROXY_TIMER_HANDLE.delay(deadline).compat().map(|_| {});
TimerTask {
future: Box::pin(delay),
}
Expand Down
2 changes: 1 addition & 1 deletion components/tikv_util/src/metrics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use prometheus_static_metric::*;
#[cfg(target_os = "linux")]
mod threads_linux;
#[cfg(target_os = "linux")]
pub use self::threads_linux::{cpu_total, get_thread_ids, monitor_threads, ThreadInfoStatistics};
pub use self::threads_linux::{monitor_threads, ThreadInfoStatistics};

#[cfg(target_os = "linux")]
mod process_linux;
Expand Down
4 changes: 0 additions & 4 deletions components/tikv_util/src/metrics/threads_dummy.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,6 @@ impl ThreadInfoStatistics {
}
}

pub fn dump_thread_stats() -> String {
"only support linux".into()
}

impl Default for ThreadInfoStatistics {
fn default() -> Self {
Self::new()
Expand Down
109 changes: 32 additions & 77 deletions components/tikv_util/src/metrics/threads_linux.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
// Copyright 2017 TiKV Project Authors. Licensed under Apache-2.0.

use std::{
fs,
io::{Error, ErrorKind, Result},
sync::Mutex,
time::Duration,
};

use collections::HashMap;
use lazy_static::lazy_static;
use libc::{self, pid_t};
use procinfo::pid;
use prometheus::{
self,
Expand All @@ -24,7 +21,7 @@ use crate::{

/// Monitors threads of the current process.
pub fn monitor_threads<S: Into<String>>(namespace: S) -> Result<()> {
let pid = unsafe { libc::getpid() };
let pid = thread::process_id();
let tc = ThreadsCollector::new(pid, namespace);
prometheus::register(Box::new(tc)).map_err(|e| to_io_err(format!("{:?}", e)))
}
Expand Down Expand Up @@ -62,7 +59,7 @@ impl Metrics {
).namespace(ns.clone()),
&["name", "tid", "io"],
)
.unwrap();
.unwrap();
let voluntary_ctxt_switches = IntGaugeVec::new(
Opts::new(
"thread_voluntary_context_switches",
Expand Down Expand Up @@ -113,14 +110,14 @@ impl Metrics {
/// A collector to collect threads metrics, including CPU usage
/// and threads state.
struct ThreadsCollector {
pid: pid_t,
pid: Pid,
descs: Vec<Desc>,
metrics: Mutex<Metrics>,
tid_retriever: Mutex<TidRetriever>,
}

impl ThreadsCollector {
fn new<S: Into<String>>(pid: pid_t, namespace: S) -> ThreadsCollector {
fn new<S: Into<String>>(pid: Pid, namespace: S) -> ThreadsCollector {
let metrics = Metrics::new(namespace);
ThreadsCollector {
pid,
Expand Down Expand Up @@ -149,9 +146,9 @@ impl Collector for ThreadsCollector {
}
for tid in tids {
let tid = *tid;
if let Ok(stat) = pid::stat_task(self.pid, tid) {
if let Ok(stat) = thread::full_thread_stat(self.pid, tid) {
// Threads CPU time.
let total = cpu_total(&stat);
let total = thread::linux::cpu_total(&stat);
// sanitize thread name before push metrics.
let name = sanitize_thread_name(tid, &stat.command);
let cpu_total = metrics
Expand Down Expand Up @@ -212,38 +209,6 @@ impl Collector for ThreadsCollector {
}
}

/// Gets thread ids of the given process id.
/// WARN: Don't call this function frequently. Otherwise there will be a lot of memory fragments.
pub fn get_thread_ids(pid: pid_t) -> Result<Vec<pid_t>> {
let mut tids: Vec<i32> = fs::read_dir(format!("/proc/{}/task", pid))?
.filter_map(|task| {
let file_name = match task {
Ok(t) => t.file_name(),
Err(e) => {
error!("read task failed"; "pid" => pid, "err" => ?e);
return None;
}
};

match file_name.to_str() {
Some(tid) => match tid.parse() {
Ok(tid) => Some(tid),
Err(e) => {
error!("read task failed"; "pid" => pid, "err" => ?e);
None
}
},
None => {
error!("read task failed"; "pid" => pid);
None
}
}
})
.collect();
tids.sort_unstable();
Ok(tids)
}

/// Sanitizes the thread name. Keeps `a-zA-Z0-9_:`, replaces `-` and ` ` with `_`, and drops the others.
///
/// Examples:
Expand All @@ -256,7 +221,7 @@ pub fn get_thread_ids(pid: pid_t) -> Result<Vec<pid_t>> {
/// assert_eq!(sanitize_thread_name(1, "@123"), "123");
/// assert_eq!(sanitize_thread_name(1, "@@@@"), "1");
/// ```
fn sanitize_thread_name(tid: pid_t, raw: &str) -> String {
fn sanitize_thread_name(tid: Pid, raw: &str) -> String {
let mut name = String::with_capacity(raw.len());
// sanitize thread name.
for c in raw.chars() {
Expand Down Expand Up @@ -293,23 +258,10 @@ fn state_to_str(state: &pid::State) -> &str {
}
}

pub fn cpu_total(state: &pid::Stat) -> f64 {
(state.utime + state.stime) as f64 / *CLK_TCK
}

fn to_io_err(s: String) -> Error {
Error::new(ErrorKind::Other, s)
}

lazy_static! {
// getconf CLK_TCK
static ref CLK_TCK: f64 = {
unsafe {
libc::sysconf(libc::_SC_CLK_TCK) as f64
}
};
}

#[inline]
fn get_name(command: &str) -> String {
if !command.is_empty() {
Expand Down Expand Up @@ -367,7 +319,7 @@ impl ThreadMetrics {

/// Use to collect cpu usages and disk I/O rates
pub struct ThreadInfoStatistics {
pid: pid_t,
pid: Pid,
last_instant: Instant,
tid_names: HashMap<i32, String>,
tid_retriever: TidRetriever,
Expand All @@ -377,7 +329,7 @@ pub struct ThreadInfoStatistics {

impl ThreadInfoStatistics {
pub fn new() -> Self {
let pid = unsafe { libc::getpid() };
let pid = thread::process_id();

let mut thread_stats = Self {
pid,
Expand Down Expand Up @@ -406,13 +358,13 @@ impl ThreadInfoStatistics {
for tid in tids {
let tid = *tid;

if let Ok(stat) = pid::stat_task(self.pid, tid) {
if let Ok(stat) = thread::full_thread_stat(self.pid, tid) {
let name = get_name(&stat.command);
self.tid_names.entry(tid).or_insert(name);

// To get a percentage result,
// we pre-multiply `cpu_time` by 100 here rather than inside the `update_metric`.
let cpu_time = cpu_total(&stat) * 100.0;
let cpu_time = thread::linux::cpu_total(&stat) * 100.0;
update_metric(
&mut self.metrics_total.cpu_times,
&mut self.metrics_rate.cpu_times,
Expand Down Expand Up @@ -472,28 +424,31 @@ const TID_MAX_UPDATE_INTERVAL: Duration = Duration::from_secs(10 * 60);

/// A helper that buffers the thread id list internally.
struct TidRetriever {
pid: pid_t,
pid: Pid,
tid_buffer: Vec<i32>,
tid_buffer_last_update: Instant,
tid_buffer_update_interval: Duration,
}

impl TidRetriever {
pub fn new(pid: pid_t) -> Self {
pub fn new(pid: Pid) -> Self {
let mut tid_buffer: Vec<_> = thread::thread_ids(pid).unwrap();
tid_buffer.sort_unstable();
Self {
pid,
tid_buffer: get_thread_ids(pid).unwrap(),
tid_buffer,
tid_buffer_last_update: Instant::now(),
tid_buffer_update_interval: TID_MIN_UPDATE_INTERVAL,
}
}

pub fn get_tids(&mut self) -> (&[pid_t], bool) {
pub fn get_tids(&mut self) -> (&[Pid], bool) {
// Update the tid list according to tid_buffer_update_interval.
// If tid is not changed, update the tid list less frequently.
let mut updated = false;
if self.tid_buffer_last_update.saturating_elapsed() >= self.tid_buffer_update_interval {
let new_tid_buffer = get_thread_ids(self.pid).unwrap();
let mut new_tid_buffer: Vec<_> = thread::thread_ids(self.pid).unwrap();
new_tid_buffer.sort_unstable();
if new_tid_buffer == self.tid_buffer {
self.tid_buffer_update_interval *= 2;
if self.tid_buffer_update_interval > TID_MAX_UPDATE_INTERVAL {
Expand Down Expand Up @@ -522,7 +477,7 @@ mod tests {
let name = "testthreadio";
let (tx, rx) = sync::mpsc::channel();
let (tx1, rx1) = sync::mpsc::channel();
let h = thread::Builder::new()
let h = std::thread::Builder::new()
.name(name.to_owned())
.spawn(move || {
// Make `io::write_bytes` > 0
Expand All @@ -541,13 +496,13 @@ mod tests {
rx1.recv().unwrap();

let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize };
let pid = unsafe { libc::getpid() };
let tids = get_thread_ids(pid).unwrap();
let pid = thread::process_id();
let tids: Vec<_> = thread::thread_ids(pid).unwrap();
assert!(tids.len() >= 2);

tids.iter()
.find(|t| {
pid::stat_task(pid, **t)
thread::full_thread_stat(pid, **t)
.map(|stat| stat.command == name)
.unwrap_or(false)
})
Expand All @@ -571,7 +526,7 @@ mod tests {
) -> (sync::mpsc::Sender<()>, sync::mpsc::Receiver<()>) {
let (tx, rx) = sync::mpsc::channel();
let (tx1, rx1) = sync::mpsc::channel();
thread::Builder::new()
std::thread::Builder::new()
.name(str1.to_owned())
.spawn(move || {
tx1.send(()).unwrap();
Expand Down Expand Up @@ -613,10 +568,10 @@ mod tests {
let mut thread_info = ThreadInfoStatistics::new();

let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as u64 };
let pid = unsafe { libc::getpid() };
let tids = get_thread_ids(pid).unwrap();
let pid = thread::process_id();
let tids: Vec<_> = thread::thread_ids(pid).unwrap();
for tid in tids {
if let Ok(stat) = pid::stat_task(pid, tid) {
if let Ok(stat) = thread::full_thread_stat(pid, tid) {
if stat.command.starts_with(s1) {
rx1.recv().unwrap();
thread_info.record();
Expand Down Expand Up @@ -657,7 +612,7 @@ mod tests {
) -> (sync::mpsc::Sender<()>, sync::mpsc::Receiver<()>) {
let (tx, rx) = sync::mpsc::channel();
let (tx1, rx1) = sync::mpsc::channel();
thread::Builder::new()
std::thread::Builder::new()
.name(name)
.spawn(move || {
tx1.send(()).unwrap();
Expand Down Expand Up @@ -687,10 +642,10 @@ mod tests {

let mut thread_info = ThreadInfoStatistics::new();

let pid = unsafe { libc::getpid() };
let tids = get_thread_ids(pid).unwrap();
let pid = thread::process_id();
let tids: Vec<_> = thread::thread_ids(pid).unwrap();
for tid in tids {
if let Ok(stat) = pid::stat_task(pid, tid) {
if let Ok(stat) = thread::full_thread_stat(pid, tid) {
if stat.command.starts_with(tn) {
rx.recv().unwrap();
thread_info.record();
Expand Down Expand Up @@ -748,7 +703,7 @@ mod tests {

#[test]
fn test_smoke() {
let pid = unsafe { libc::getpid() };
let pid = thread::process_id();
let tc = ThreadsCollector::new(pid, "smoke");
tc.collect();
tc.desc();
Expand Down
7 changes: 1 addition & 6 deletions components/tikv_util/src/mpsc/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ use std::{
use crossbeam::channel::{
self, RecvError, RecvTimeoutError, SendError, TryRecvError, TrySendError,
};
use fail::fail_point;

struct State {
sender_cnt: AtomicIsize,
Expand Down Expand Up @@ -240,11 +239,7 @@ impl<T> LooseBoundedSender<T> {
#[inline]
pub fn try_send(&self, t: T) -> Result<(), TrySendError<T>> {
let cnt = self.tried_cnt.get();
let check_interval = || {
fail_point!("loose_bounded_sender_check_interval", |_| 0);
CHECK_INTERVAL
};
if cnt < check_interval() {
if cnt < CHECK_INTERVAL {
self.tried_cnt.set(cnt + 1);
} else if self.len() < self.limit {
self.tried_cnt.set(1);
Expand Down
Loading