268 lines
7.7 KiB
Rust
268 lines
7.7 KiB
Rust
use chrono::{DateTime, Utc};
|
|
/**
|
|
This facilitates writing logs to a logtail server. This is a port of
|
|
[github.com/tailscale/tailscale/logtail](https://github.com/tailscale/tailscale/blob/main/logtail/logtail.go)'s
|
|
`logtail.go`.
|
|
*/
|
|
use reqwest::Client;
|
|
|
|
mod config;
|
|
pub use self::config::*;
|
|
|
|
/// DefaultHost is the default URL to upload logs to when Builder.base_url isn't provided.
|
|
pub const DEFAULT_HOST: &'static str = "https://log.tailscale.io";
|
|
|
|
/**
|
|
Builds a send/recv pair for the logtail service. Create a new Builder with the [Builder::default]
|
|
method. The only mandatory field is the `collection`.
|
|
*/
|
|
#[derive(Default)]
|
|
pub struct Builder {
|
|
collection: Option<String>,
|
|
private_id: Option<logtail::PrivateID>,
|
|
user_agent: Option<String>,
|
|
base_url: Option<String>,
|
|
client: Option<reqwest::Client>,
|
|
buffer_size: usize,
|
|
}
|
|
|
|
impl Builder {
|
|
/// The logtail collection to register logs to. This **MUST** be a hostname, even though
|
|
/// it is not used as a hostname. This is used to disambiguate multiple different programs
|
|
/// from eachother.
|
|
pub fn collection(mut self, collection: String) -> Self {
|
|
self.collection = Some(collection);
|
|
self
|
|
}
|
|
|
|
/// The private ID for this logtail identity. If one is not set then one will be auto-generated
|
|
/// but not saved. Users should store their local [logtail::PrivateID] on disk for later use.
|
|
pub fn private_id(mut self, id: logtail::PrivateID) -> Self {
|
|
self.private_id = Some(id);
|
|
self
|
|
}
|
|
|
|
/// The user agent to attribute logs to. If not set, one will not be sent.
|
|
pub fn user_agent(mut self, ua: String) -> Self {
|
|
self.user_agent = Some(ua);
|
|
self
|
|
}
|
|
|
|
/// The base logcatcher URL. If not set, this will default to [DEFAULT_HOST].
|
|
pub fn base_url(mut self, base_url: String) -> Self {
|
|
self.base_url = Some(base_url);
|
|
self
|
|
}
|
|
|
|
/// A custom [reqwest::Client] to use for all interactions. If set this makes
|
|
/// [Builder::user_agent] calls ineffectual.
|
|
pub fn client(mut self, client: Client) -> Self {
|
|
self.client = Some(client);
|
|
self
|
|
}
|
|
|
|
/// The number of log messages to buffer in memory. By default this is set to
|
|
/// 256 messages buffered until new ones are dropped.
|
|
pub fn buffer_size(mut self, buffer_size: usize) -> Self {
|
|
self.buffer_size = buffer_size;
|
|
self
|
|
}
|
|
|
|
/// A "low-memory" friendly value for the buffer size. This will only queue up to
|
|
/// 64 messages until new ones are dropped.
|
|
pub fn low_mem(self) -> Self {
|
|
self.buffer_size(64)
|
|
}
|
|
|
|
/// Trades the Builder in for an Ingress/Egress pair. Ingress will be safe to `clone`
|
|
/// as many times as you need to. Egress must have [Egress::post] called periodically
|
|
/// in order for log messages to get sent to the server.
|
|
pub fn build(self) -> Result<(Ingress, Egress), Error> {
|
|
let buf_size: usize = if self.buffer_size != 0 {
|
|
self.buffer_size
|
|
} else {
|
|
256
|
|
};
|
|
|
|
if let None = self.collection {
|
|
return Err(Error::NoCollection);
|
|
}
|
|
|
|
let (tx, rx) = crossbeam::channel::bounded(buf_size);
|
|
let private_id = self.private_id.unwrap_or(logtail::PrivateID::new());
|
|
let base_url = self.base_url.unwrap_or(DEFAULT_HOST.to_string());
|
|
let mut u = url::Url::parse(&base_url)?;
|
|
u.path_segments_mut()
|
|
.unwrap()
|
|
.push("c")
|
|
.push(&self.collection.unwrap())
|
|
.push(&private_id.as_hex());
|
|
|
|
let ing = Ingress { tx };
|
|
let eg = Egress {
|
|
url: u.as_str().to_string(),
|
|
client: self.client.unwrap_or({
|
|
let mut builder = Client::builder();
|
|
|
|
if let Some(ua) = self.user_agent {
|
|
builder = builder.user_agent(ua);
|
|
}
|
|
|
|
builder.build().unwrap()
|
|
}),
|
|
rx,
|
|
};
|
|
|
|
Ok((ing, eg))
|
|
}
|
|
}
|
|
|
|
#[derive(thiserror::Error, Debug)]
|
|
pub enum Error {
|
|
#[error("no collection defined")]
|
|
NoCollection,
|
|
|
|
#[error("can't put to in-memory buffer: {0}")]
|
|
TXFail(String),
|
|
|
|
#[error("can't get from in-memory buffer: {0}")]
|
|
RXFail(#[from] crossbeam::channel::TryRecvError),
|
|
|
|
#[error("can't parse a URL: {0}")]
|
|
URLParseError(#[from] url::ParseError),
|
|
|
|
#[error("can't post logs: {0}")]
|
|
ReqwestError(#[from] reqwest::Error),
|
|
|
|
#[error("can't encode to json: {0}")]
|
|
JsonError(#[from] serde_json::Error),
|
|
|
|
#[error("can't compress")]
|
|
ZstdError,
|
|
|
|
#[error("must be json object")]
|
|
MustBeJsonObject,
|
|
|
|
#[error("can't do compression task: {0}")]
|
|
JoinError(#[from] tokio::task::JoinError),
|
|
}
|
|
|
|
/// The sink you dump log messages to. You can clone this as many times as you
|
|
/// need to.
|
|
#[derive(Clone)]
|
|
pub struct Ingress {
|
|
tx: crossbeam::channel::Sender<serde_json::Value>,
|
|
}
|
|
|
|
impl Ingress {
|
|
/// Sends a JSON object to the log server. This MUST be a JSON object.
|
|
pub fn send(&self, val: serde_json::Value) -> Result<(), Error> {
|
|
if !val.is_object() {
|
|
return Err(Error::MustBeJsonObject);
|
|
}
|
|
|
|
let mut val = val.clone();
|
|
|
|
let header = LogtailHeader {
|
|
client_time: Utc::now(),
|
|
};
|
|
let obj = val.as_object_mut().unwrap();
|
|
obj.insert("logtail".to_string(), serde_json::to_value(header)?);
|
|
|
|
match self.tx.send(val) {
|
|
Ok(_) => Ok(()),
|
|
Err(why) => Err(Error::TXFail(format!("{}", why))),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Clone, serde::Serialize)]
|
|
struct LogtailHeader {
|
|
pub client_time: DateTime<Utc>,
|
|
}
|
|
|
|
/// The egressor of log messages buffered by its matching [Ingress].
|
|
pub struct Egress {
|
|
url: String,
|
|
client: reqwest::Client,
|
|
rx: crossbeam::channel::Receiver<serde_json::Value>,
|
|
}
|
|
|
|
impl Egress {
|
|
fn pull(&self) -> Vec<serde_json::Value> {
|
|
let mut values: Vec<serde_json::Value> = vec![];
|
|
|
|
loop {
|
|
match self.rx.try_recv() {
|
|
Ok(val) => values.push(val),
|
|
Err(_) => {
|
|
break;
|
|
}
|
|
};
|
|
}
|
|
|
|
values
|
|
}
|
|
|
|
/// Pushes log messages to logtail. This will push everything buffered into the
|
|
/// log server. This should be called periodically.
|
|
pub async fn post(&self) -> Result<(), Error> {
|
|
let values = self.pull();
|
|
self.push(values).await?;
|
|
Ok(())
|
|
}
|
|
|
|
async fn push(&self, values: Vec<serde_json::Value>) -> Result<(), Error> {
|
|
let bytes = serde_json::to_vec(&values)?;
|
|
let orig_len = bytes.len();
|
|
let compressed = tokio::task::spawn_blocking(move || {
|
|
zstd::block::compress(&bytes, 5).map_err(|_| Error::ZstdError)
|
|
})
|
|
.await??;
|
|
|
|
let resp = self
|
|
.client
|
|
.post(&self.url)
|
|
.header("Content-Encoding", "zstd")
|
|
.header("Orig-Content-Length", orig_len)
|
|
.body(compressed)
|
|
.timeout(std::time::Duration::from_secs(1))
|
|
.send()
|
|
.await?;
|
|
|
|
resp.error_for_status()?;
|
|
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::Builder;
|
|
|
|
#[derive(Clone, serde::Serialize)]
|
|
struct Data {
|
|
pub foo: String,
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn end_to_end() {
|
|
let (ing, mut eg) = Builder::default()
|
|
.collection("rebterlai.logtail-poster.test".to_string())
|
|
.user_agent("rebterlai/test".to_string())
|
|
.base_url("http://127.0.0.1:3848".to_string())
|
|
.build()
|
|
.unwrap();
|
|
|
|
ing.send(
|
|
serde_json::to_value(Data {
|
|
foo: "bar".to_string(),
|
|
})
|
|
.unwrap(),
|
|
)
|
|
.unwrap();
|
|
|
|
eg.post().await.unwrap();
|
|
}
|
|
}
|