From 0623dd9d7bf913d2c3695ce2b24dc3b68d49eeb3 Mon Sep 17 00:00:00 2001 From: Robert Detjens Date: Thu, 26 Dec 2024 15:00:28 -0800 Subject: [PATCH] WIP build artifacts copy, getting empty dl tar Signed-off-by: Robert Detjens --- Cargo.lock | 2 +- Cargo.toml | 2 +- src/access_handlers/docker.rs | 2 +- src/builder/artifacts.rs | 78 ++++++++++++++++ src/builder/docker.rs | 118 ++++++++++++++++++++--- src/builder/mod.rs | 171 ++++++++++++++++++++++++++-------- tests/repo/rcds.yaml | 4 +- 7 files changed, 319 insertions(+), 58 deletions(-) create mode 100644 src/builder/artifacts.rs diff --git a/Cargo.lock b/Cargo.lock index 0f8ddca..843537c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -264,7 +264,7 @@ dependencies = [ "clap-verbosity-flag", "figment", "fully_pub", - "futures-util", + "futures", "glob", "itertools", "k8s-openapi", diff --git a/Cargo.toml b/Cargo.toml index d4e9e58..7e551d5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ tera = "1.19.1" simplelog = { version = "0.12.2", features = ["paris"] } fully_pub = "0.1.4" void = "1" -futures-util = "0.3.30" +futures = "0.3.30" figment = { version = "0.10.19", features = ["env", "yaml", "test"] } # kubernetes: diff --git a/src/access_handlers/docker.rs b/src/access_handlers/docker.rs index 94b9489..04645a3 100644 --- a/src/access_handlers/docker.rs +++ b/src/access_handlers/docker.rs @@ -4,7 +4,7 @@ use bollard::{ image::{CreateImageOptions, PushImageOptions, TagImageOptions}, Docker, }; -use futures_util::{StreamExt, TryStreamExt}; +use futures::{StreamExt, TryStreamExt}; use itertools::Itertools; use simplelog::*; use tokio; diff --git a/src/builder/artifacts.rs b/src/builder/artifacts.rs new file mode 100644 index 0000000..4c6cd8b --- /dev/null +++ b/src/builder/artifacts.rs @@ -0,0 +1,78 @@ +use anyhow::{anyhow, Context, Error, Result}; +use futures::future::try_join_all; +use itertools::Itertools; +use simplelog::{debug, trace}; +use std::path::PathBuf; + +use crate::builder::docker::{client, copy_file, create_container}; +use crate::configparser::challenge::ProvideConfig; + +use super::docker; + +/// extract assets from given container name and provide config to challenge directory, return file path(s) extracted +#[tokio::main(flavor = "current_thread")] // make this a sync function +pub async fn extract_asset(provide: &ProvideConfig, container: &str) -> Result> { + debug!("extracting assets from container {}", container); + // This needs to handle three cases: + // - single or multiple files without renaming (no as: field) + // - single file with rename (one item with as:) + // - multiple files as archive (multiple items with as:) + + // TODO: since this puts artifacts in the repo source folder, this should + // try to not overwrite any existing files. + + match &provide.as_file { + // no renaming, copy out all as-is + None => extract_files(container, &provide.include).await, + // (as is keyword, so add underscore) + Some(as_) => { + if provide.include.len() == 1 { + // single file, rename + extract_rename(container, &provide.include[0], as_).await + } else { + // multiple files, zip as archive + extract_archive(container, &provide.include, as_).await + } + } + } +} + +/// Extract multiple files from container +async fn extract_files(container: &str, files: &Vec) -> Result> { + trace!( + "extracting {} files without renaming: {:?}", + files.len(), + files + ); + + try_join_all( + files + .iter() + .enumerate() // need index to avoid copy collisions + .map(|(i, f)| docker::copy_file(container, f, None)), + ) + .await + + // files + // .iter() + // .map(|f| docker::copy_file(container, f, None)) + // .collect::>>() +} + +/// Extract one file from container and rename +async fn extract_rename(container: &str, file: &str, new_name: &str) -> Result> { + trace!("extracting file and renaming it"); + + Ok(vec!["todo rename".to_string()]) +} + +/// Extract one or more file from container as archive +async fn extract_archive( + container: &str, + files: &Vec, + archive_name: &str, +) -> Result> { + trace!("extracting mutliple files into archive"); + + Ok(vec!["todo archive".to_string()]) +} diff --git a/src/builder/docker.rs b/src/builder/docker.rs index b8a9015..ade2381 100644 --- a/src/builder/docker.rs +++ b/src/builder/docker.rs @@ -1,14 +1,21 @@ use anyhow::{anyhow, bail, Context, Error, Result}; use bollard::auth::DockerCredentials; +use bollard::container::{ + Config, CreateContainerOptions, DownloadFromContainerOptions, RemoveContainerOptions, +}; use bollard::errors::Error as DockerError; use bollard::image::{BuildImageOptions, PushImageOptions}; use bollard::Docker; use core::fmt; -use futures_util::{StreamExt, TryStreamExt}; +use futures::{StreamExt, TryStreamExt}; use simplelog::*; +use std::fs::File; +use std::io::{Seek, Write}; +use std::sync::LazyLock; +use std::{fs, io}; use std::{io::Read, path::Path}; use tar; -use tempfile::tempfile; +use tempfile::{tempdir_in, tempfile}; use tokio; use crate::configparser::challenge::BuildObject; @@ -17,10 +24,7 @@ use crate::configparser::UserPass; #[tokio::main(flavor = "current_thread")] // make this a sync function pub async fn build_image(context: &Path, options: &BuildObject, tag: &str) -> Result { trace!("building image in directory {context:?} to tag {tag:?}"); - let client = client() - .await - // truncate error chain with new error (returned error is way too verbose) - .map_err(|_| anyhow!("could not talk to Docker daemon (is DOCKER_HOST correct?)"))?; + let client = client().await?; let build_opts = BuildImageOptions { dockerfile: options.dockerfile.clone(), @@ -76,10 +80,7 @@ pub async fn build_image(context: &Path, options: &BuildObject, tag: &str) -> Re #[tokio::main(flavor = "current_thread")] // make this a sync function pub async fn push_image(image_tag: &str, creds: &UserPass) -> Result { info!("pushing image {image_tag:?} to registry"); - let client = client() - .await - // truncate error chain with new error (returned error is way too verbose) - .map_err(|_| anyhow!("could not talk to Docker daemon (is DOCKER_HOST correct?)"))?; + let client = client().await?; let (image, tag) = image_tag .rsplit_once(":") @@ -114,15 +115,104 @@ pub async fn push_image(image_tag: &str, creds: &UserPass) -> Result { Ok(tag.to_string()) } +#[tokio::main(flavor = "current_thread")] // make this a sync function +pub async fn create_container(image_tag: &str, name: &str) -> Result { + debug!("creating container {name:?} from image {image_tag:?}"); + let client = client().await?; + + let opts = CreateContainerOptions { + name: name.to_string(), + ..Default::default() + }; + let config = Config { + image: Some(image_tag), + ..Default::default() + }; + + let container = client.create_container(Some(opts), config).await?; + Ok(container.id) +} + +#[tokio::main(flavor = "current_thread")] // make this a sync function +pub async fn remove_container(name: &str) -> Result<()> { + debug!("removing container {name:?}"); + let client = client().await?; + + let opts = RemoveContainerOptions { + force: true, + ..Default::default() + }; + client.remove_container(name, Some(opts)).await?; + + Ok(()) +} + +pub async fn copy_file( + container_id: &str, + from_path: &str, + rename_to: Option<&str>, +) -> Result { + let client = client().await?; + + // if no rename is given, use basename of `from` as target path + let target_path = match rename_to { + Some(to) => to, + None => Path::new(from_path).file_name().unwrap().to_str().unwrap(), + }; + + info!("copying {container_id}:{from_path} to {target_path}"); + + // Download single file from container in an archive + let opts = DownloadFromContainerOptions { path: from_path }; + let mut dl_stream = client.download_from_container(container_id, Some(opts)); + + // scratch dir in chal repo (two vars for scoping reasons) + // let mut tempdir_full = tempdir_in(".")?; + // let tempdir = tempdir_full.path(); + + fs::create_dir("./.tempdir"); + let tempdir = Path::new("./.tempdir"); + + // collect byte stream chunks into full file + let mut tarfile = File::create(tempdir.join(format!("download_{target_path}.tar")))?; + while let Some(chunk) = dl_stream.next().await { + tarfile.write_all(&chunk?)?; + } + tarfile.rewind(); + + // unpack file retrieved to temp dir + trace!("extracting download tar to {:?}", tempdir); + let mut tar = tar::Archive::new(tarfile); + + // extract single file from archive to disk + // we only copied out one file, so this tar should only have one file + if let Some(Ok(mut entry)) = tar.entries()?.next() { + let mut target = File::create_new(target_path)?; + io::copy(&mut entry, &mut target); + } else { + bail!("downloaded archive for {container_id}:{from_path} has no files in it!"); + } + + Ok(target_path.to_string()) +} + // // helper functions // + +// connect to Docker/Podman daemon once and share client +static CLIENT: LazyLock> = + LazyLock::new(|| { + debug!("connecting to docker..."); + Docker::connect_with_defaults() + }); pub async fn client() -> Result { - debug!("connecting to docker..."); - let client = Docker::connect_with_defaults()?; - client.ping().await?; + let c = CLIENT + .as_ref() + .map_err(|_| anyhow!("could not talk to Docker daemon (is DOCKER_HOST correct?)"))?; + c.ping().await?; - Ok(client) + Ok(c.clone()) } #[derive(Debug)] diff --git a/src/builder/mod.rs b/src/builder/mod.rs index c47701a..0643487 100644 --- a/src/builder/mod.rs +++ b/src/builder/mod.rs @@ -3,7 +3,7 @@ use anyhow::{anyhow, Context, Error, Result}; use bollard::image::BuildImageOptions; -use futures_util::stream::Iter; +use futures::stream::Iter; use itertools::Itertools; use simplelog::*; use std::default; @@ -11,50 +11,54 @@ use std::fmt::Pointer; use std::iter::zip; use std::path::Path; -use crate::configparser::challenge::{BuildObject, ChallengeConfig, ImageSource::*}; -use crate::configparser::{get_challenges, get_config, get_profile_config, get_profile_deploy}; +use crate::configparser::challenge::{ + BuildObject, ChallengeConfig, ImageSource::*, Pod, ProvideConfig, +}; +use crate::configparser::{enabled_challenges, get_config}; pub mod docker; -use docker::{build_image, push_image}; + +pub mod artifacts; +use artifacts::extract_asset; + +// define tag format as reusable macro +macro_rules! image_tag_str { + () => { + "{registry}/{challenge}-{container}:{profile}" + }; +} /// Build all enabled challenges for the given profile. Returns tags built -pub fn build_challenges(profile_name: &str) -> Result> { +pub fn build_challenges( + profile_name: &str, + push: bool, + extract_artifacts: bool, +) -> Result> { enabled_challenges(profile_name)? .iter() - .map(|chal| build_challenge_images(profile_name, chal)) + .map(|chal| build_challenge(profile_name, chal, push, extract_artifacts)) .flatten_ok() .collect::>() } -/// Get all enabled challenges for profile -pub fn enabled_challenges(profile_name: &str) -> Result> { - let config = get_config()?; - let challenges = get_challenges().unwrap(); - let deploy = &get_profile_deploy(profile_name)?.challenges; - - let enabled = deploy - .iter() - .filter_map(|(chal, enabled)| match enabled { - true => challenges.iter().find(|c| c.directory == Path::new(chal)), - false => None, - }) - .collect(); - - Ok(enabled) -} - -/// Build all images for challenge under given path, return image tag -fn build_challenge_images(profile_name: &str, chal: &ChallengeConfig) -> Result> { +/// Build all images from given challenge, optionally pushing image or extracting artifacts +fn build_challenge( + profile_name: &str, + chal: &ChallengeConfig, + push: bool, + extract_artifacts: bool, +) -> Result> { debug!("building images for chal {:?}", chal.directory); let config = get_config()?; - chal.pods + let built_tags: Vec<_> = chal + .pods .iter() .filter_map(|p| match &p.image_source { Image(_) => None, Build(b) => { let tag = format!( - "{registry}/{challenge}-{container}:{profile}", + image_tag_str!(), registry = config.registry.domain, challenge = chal.name, container = p.name, @@ -71,20 +75,109 @@ fn build_challenge_images(profile_name: &str, chal: &ChallengeConfig) -> Result< ) } }) - .collect::>() -} + .collect::>()?; -/// Push passed tags to registry -pub fn push_tags(tags: Vec) -> Result> { - let config = get_config()?; + if push { + debug!( + "pushing {} tags for chal {:?}", + built_tags.len(), + chal.directory + ); - let built_tags = tags - .iter() - .map(|tag| { - push_image(tag, &config.registry.build) - .with_context(|| format!("error pushing image {tag}")) - }) - .collect::>()?; + built_tags + .iter() + .map(|tag| { + docker::push_image(tag, &config.registry.build) + .with_context(|| format!("error pushing image {tag}")) + }) + .collect::>>()?; + } + + if extract_artifacts { + // find the matching tag for Provide entries that have a `from:` source + let image_assoc = chal + .provide + .iter() + .filter_map(|p| { + p.from.as_ref().map(|f| { + ( + p, + format!( + image_tag_str!(), + registry = config.registry.domain, + challenge = chal.name, + container = f, + profile = profile_name + ), + ) + }) + }) + .collect_vec(); + + debug!( + "extracting {} build artifacts for chal {:?}", + image_assoc.len(), + chal.directory + ); + + let assets = image_assoc + .into_iter() + .map(|(p, tag)| { + let name = format!( + "asset-container-{}-{}", + chal.directory.to_string_lossy().replace("/", "-"), + p.from.clone().unwrap() + ); + let container = docker::create_container(&tag, &name)?; + + let asset_result = extract_asset(p, &container); + + // clean up container even if it failed + docker::remove_container(&name)?; + asset_result + }) + .flatten_ok() + .collect::>>()?; + + debug!("Extracted assets: {:?}", assets); + } Ok(built_tags) } + +// /// Push passed tags to registry +// pub fn push_tags(tags: Vec) -> Result> { +// let config = get_config()?; + +// let built_tags = tags +// .iter() +// .map(|tag| { +// push_image(tag, &config.registry.build) +// .with_context(|| format!("error pushing image {tag}")) +// }) +// .collect::>()?; + +// Ok(built_tags) +// } + +// /// Extract any assets needed from given challenges +// pub fn extract_assets( +// profile_name: &str, +// built_chals: Vec<&ChallengeConfig>, +// ) -> Result> { +// built_chals.iter().map(|chal| { +// chal.provide.iter().filter(|p| p.from.is_some()).map(|p| { +// assets::extract_asset(p, container) +// }) + +// // let tag = format!( +// // image_tag!(), +// // registry = config.registry.domain, +// // challenge = chal.name, +// // container = p.name, +// // profile = profile_name +// // ); +// }); + +// Ok(vec![]) +// } diff --git a/tests/repo/rcds.yaml b/tests/repo/rcds.yaml index a229fd7..001ec9c 100644 --- a/tests/repo/rcds.yaml +++ b/tests/repo/rcds.yaml @@ -22,9 +22,9 @@ points: deploy: # control challenge deployment status explicitly per environment/profile testing: - misc/garf: true + # misc/garf: true pwn/notsh: true - web/bar: true + # web/bar: true profiles: # configure per-environment credentials etc