add compose down and structure project better

This commit is contained in:
lukas-heiligenbrunner 2024-08-27 00:18:33 +02:00
parent eb145cdf31
commit d22037031a
18 changed files with 521 additions and 330 deletions

47
Cargo.lock generated
View File

@ -147,19 +147,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "bollard-compose"
version = "0.1.0"
dependencies = [
"anyhow",
"bollard",
"clap",
"futures-util",
"serde",
"serde_yaml",
"tokio",
]
[[package]]
name = "bollard-stubs"
version = "1.45.0-rc.26.0.1"
@ -171,6 +158,30 @@ dependencies = [
"serde_with",
]
[[package]]
name = "bollard_compose"
version = "0.1.0"
dependencies = [
"anyhow",
"bollard",
"futures-util",
"log",
"serde",
"serde_yaml",
"tokio",
]
[[package]]
name = "bollard_compose_cli"
version = "0.1.0"
dependencies = [
"anyhow",
"bollard_compose",
"clap",
"log",
"tokio",
]
[[package]]
name = "bumpalo"
version = "3.16.0"
@ -808,6 +819,15 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "signal-hook-registry"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
dependencies = [
"libc",
]
[[package]]
name = "slab"
version = "0.4.9"
@ -927,6 +947,7 @@ dependencies = [
"libc",
"mio",
"pin-project-lite",
"signal-hook-registry",
"socket2",
"tokio-macros",
"windows-sys",

View File

@ -1,13 +1,8 @@
[package]
name = "bollard-compose"
version = "0.1.0"
edition = "2021"
[workspace]
members = ["cli", "lib"]
resolver = "2"
[dependencies]
bollard = "*"
serde = "1.0"
serde_yaml = "0.9"
clap = { version = "4.5.16", features = ["derive"] }
[workspace.dependencies]
log = "0.4.22"
tokio = { version = "*", features = ["rt", "rt-multi-thread", "macros"] }
anyhow = "*"
futures-util = "0.3.30"

16
cli/Cargo.toml Normal file
View File

@ -0,0 +1,16 @@
[package]
name = "bollard_compose_cli"
version = "0.1.0"
edition = "2021"
[dependencies]
bollard_compose = { path = "../lib" }
clap = { version = "4.5.16", features = ["derive"] }
log.workspace = true
tokio.workspace = true
anyhow.workspace = true
[[bin]]
name = "bollard_compose_cli"
path = "src/main.rs"

39
cli/src/main.rs Normal file
View File

@ -0,0 +1,39 @@
use bollard_compose::{down, ps, up};
use clap::{arg, Command};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let matches = Command::new("bollard_compose_cli")
.version(env!("CARGO_PKG_VERSION"))
.author("Your Name <your.email@example.com>")
.about("A CLI for managing Docker Compose in Rust")
.subcommand_required(true)
.arg_required_else_help(true)
.arg(arg!(-f --file <FILE> "Sets a custom Docker Compose file"))
.subcommand(
Command::new("up")
.about("Start the services defined in the Docker Compose file")
.arg(arg!(-d --detach "Run in the background")),
)
.subcommand(Command::new("down").about("Stop and remove the services"))
.subcommand(Command::new("ps").about("List containers"))
.get_matches();
match matches.subcommand() {
Some(("up", sub_matches)) => {
let detach = sub_matches.get_one::<bool>("detach").unwrap_or(&false);
let file = matches.get_one::<String>("file").cloned();
up(file, *detach).await?;
}
Some(("down", _)) => {
let file = matches.get_one::<String>("file").cloned();
down(file).await?;
}
Some(("ps", _)) => {
ps().await?;
}
_ => unreachable!(),
}
Ok(())
}

View File

@ -1,7 +1,7 @@
services:
memos:
image: ghcr.io/usememos/memos:0.22.4@sha256:b17a43b084327a8e37121fc3cce67a0a43b8a3ad75f9e9fa51c3f5b5ace290b4
#container_name: memos
container_name: memos
restart: unless-stopped
auto_remove: false
ports:

15
lib/Cargo.toml Normal file
View File

@ -0,0 +1,15 @@
[package]
name = "bollard_compose"
version = "0.1.0"
edition = "2021"
[dependencies]
bollard = "*"
serde = "1.0"
serde_yaml = "0.9"
tokio = {workspace = true, features = ["signal"]}
anyhow.workspace = true
log.workspace = true
futures-util = "0.3.30"

View File

@ -0,0 +1,2 @@
pub mod compose_types;
pub mod parser;

View File

@ -0,0 +1,12 @@
use crate::configuration::compose_types::DockerCompose;
use std::fs::File;
use std::io::Read;
pub fn parse_compose_config(file: Option<String>) -> anyhow::Result<DockerCompose> {
let file_path = file.unwrap_or("docker-compose.yaml".to_string());
let mut file = File::open(file_path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let compose: DockerCompose = serde_yaml::from_str(&contents)?;
Ok(compose)
}

215
lib/src/container.rs Normal file
View File

@ -0,0 +1,215 @@
use crate::configuration::compose_types::DockerCompose;
use crate::helpers::dir::parent_dir_name;
use anyhow::anyhow;
use bollard::container::{Config, CreateContainerOptions, RemoveContainerOptions};
use bollard::models::{HostConfig, PortBinding, PortMap, RestartPolicy, RestartPolicyNameEnum};
use bollard::Docker;
use std::collections::HashMap;
pub(crate) async fn create_containers(
compose: &DockerCompose,
docker: &Docker,
detach: bool,
) -> anyhow::Result<Vec<String>> {
let mut container_ids = Vec::new();
let parent_dir = parent_dir_name()?;
for (name, service) in &compose.services {
let env = create_env_vec(&service.environment);
let conf: Config<String> = Config {
image: service.image.clone(),
attach_stdout: Some(!detach),
attach_stderr: Some(!detach),
open_stdin: Some(false),
env,
host_config: Some(HostConfig {
auto_remove: service.auto_remove,
restart_policy: parse_restart_policy(&service.restart),
port_bindings: create_port_map(&service.ports),
binds: parse_volume_mounts(compose, &service.volumes)?,
..Default::default()
}),
..Default::default()
};
let container_name = service
.container_name
.clone()
.unwrap_or_else(|| format!("{}_{}_1", parent_dir, name));
let create_info = docker
.create_container::<String, String>(
Some(CreateContainerOptions {
name: container_name,
platform: None,
}),
conf,
)
.await?;
container_ids.push(create_info.id);
}
Ok(container_ids)
}
pub(crate) async fn start_containers(docker: &Docker, ids: Vec<String>) -> anyhow::Result<()> {
for id in ids {
docker.start_container::<String>(&id, None).await?;
}
Ok(())
}
pub async fn stop_containers(compose: &DockerCompose, docker: &Docker) -> anyhow::Result<()> {
let parent_dir = parent_dir_name()?;
for (name, service) in &compose.services {
let container_name = service
.container_name
.clone()
.unwrap_or_else(|| format!("{}_{}_1", parent_dir, name));
docker.stop_container(&container_name, None).await?;
}
Ok(())
}
pub async fn remove_containers(compose: &DockerCompose, docker: &Docker) -> anyhow::Result<()> {
let parent_dir = parent_dir_name()?;
for (name, service) in &compose.services {
let container_name = service
.container_name
.clone()
.unwrap_or_else(|| format!("{}_{}_1", parent_dir, name));
docker
.remove_container(
&container_name,
Some(RemoveContainerOptions {
force: true,
..Default::default()
}),
)
.await?;
}
Ok(())
}
pub(crate) async fn stop_containers_by_ids(
docker: &Docker,
ids: Vec<String>,
) -> anyhow::Result<()> {
for id in ids {
docker.stop_container(&id, None).await?;
}
Ok(())
}
fn create_env_vec(env: &Option<HashMap<String, String>>) -> Option<Vec<String>> {
match env {
Some(env) => {
let list = env
.iter()
.map(|(key, value)| format!("{}={}", key, value))
.collect();
Some(list)
}
None => None,
}
}
fn create_port_map(ports: &Option<Vec<String>>) -> Option<PortMap> {
ports.as_ref().map(|ports| {
ports
.iter()
.map(|port| {
let parts: Vec<&str> = port.split(':').collect();
(
parts[1].to_string(),
Some(vec![PortBinding {
host_ip: None,
host_port: Some(parts[0].to_string()),
}]),
)
})
.collect()
})
}
fn parse_restart_policy(restart: &Option<String>) -> Option<RestartPolicy> {
match restart {
None => None,
Some(restart) => match restart.as_str() {
"no" => Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::NO),
..Default::default()
}),
"always" => Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::ALWAYS),
..Default::default()
}),
"unless-stopped" => Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::UNLESS_STOPPED),
..Default::default()
}),
"on-failure" => Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::NO),
..Default::default()
}),
v => {
// handle special case when retry count is specified
if v.starts_with("on-failure:") {
let parts: Vec<&str> = v.split(':').collect();
Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::ON_FAILURE),
maximum_retry_count: Some(parts[1].parse().unwrap()),
})
} else {
None
}
}
},
}
}
fn parse_volume_mount(compose: &DockerCompose, volume: String) -> anyhow::Result<String> {
let parts: Vec<&str> = volume.split(':').collect();
if parts.len() != 2 {
return Err(anyhow!("Invalid volume mount: {}", volume));
}
if parts[0].starts_with('/') {
return Ok(volume);
}
let parent_dir = parent_dir_name()?;
let field_missing = format!("volume field for {} missing", volume);
// volumes map has to contain the volume defined here
if compose
.volumes
.as_ref()
.ok_or(anyhow!(field_missing.clone()))?
.contains_key(parts[0])
{
Ok(format!("{}_{}:{}", parent_dir, parts[0], parts[1]))
} else {
Err(anyhow!(field_missing))
}
}
fn parse_volume_mounts(
compose: &DockerCompose,
volumes: &Option<Vec<String>>,
) -> anyhow::Result<Option<Vec<String>>> {
match volumes {
None => Ok(None),
Some(volumes) => {
let mounts: Vec<anyhow::Result<String>> = volumes
.iter()
.map(|volume| parse_volume_mount(compose, volume.clone()))
.collect();
let mounts = mounts
.into_iter()
.collect::<anyhow::Result<Vec<String>>>()?;
if mounts.is_empty() {
Ok(None)
} else {
Ok(Some(mounts))
}
}
}
}

13
lib/src/helpers/dir.rs Normal file
View File

@ -0,0 +1,13 @@
use anyhow::anyhow;
use std::env;
pub fn parent_dir_name() -> anyhow::Result<String> {
let current_dir = env::current_dir().expect("Failed to get current directory");
// Get the parent directory of the current directory
let parent_dir = current_dir
.file_name()
.ok_or(anyhow!("Current directory has no parent"))?;
let parent_dir_name = parent_dir.to_string_lossy().to_string();
Ok(parent_dir_name)
}

12
lib/src/helpers/docker.rs Normal file
View File

@ -0,0 +1,12 @@
use anyhow::anyhow;
use bollard::Docker;
pub async fn init_docker() -> anyhow::Result<Docker> {
let docker = Docker::connect_with_local_defaults()?;
docker
.ping()
.await
.map_err(|e| anyhow!("Connection to Docker Socket failed: {}", e))?;
Ok(docker)
}

2
lib/src/helpers/mod.rs Normal file
View File

@ -0,0 +1,2 @@
pub mod dir;
pub mod docker;

32
lib/src/images.rs Normal file
View File

@ -0,0 +1,32 @@
use crate::configuration::compose_types::DockerCompose;
use bollard::image::CreateImageOptions;
use bollard::models::CreateImageInfo;
use bollard::Docker;
use futures_util::StreamExt;
pub(crate) async fn pull_images(compose: &DockerCompose, docker: &Docker) {
for service in compose.services.values() {
println!("Trying to pull {}", service.image.clone().unwrap());
let mut stream = docker.create_image(
Some(CreateImageOptions {
from_image: service.image.clone().unwrap(),
..Default::default()
}),
None,
None,
);
while let Some(pull_result) = stream.next().await {
match pull_result {
Err(e) => println!("{:?}", e),
Ok(CreateImageInfo {
status: Some(status),
..
}) => {
println!("{}", status)
}
_ => {}
}
}
}
}

42
lib/src/lib.rs Normal file
View File

@ -0,0 +1,42 @@
use crate::configuration::parser::parse_compose_config;
use crate::container::{create_containers, remove_containers, start_containers, stop_containers};
use crate::helpers::docker::init_docker;
use crate::images::pull_images;
use crate::monitor::monitor_build_outputs;
use crate::volume::create_volumes;
mod configuration;
mod container;
mod helpers;
mod images;
mod monitor;
mod volume;
pub async fn down(compose_file: Option<String>) -> anyhow::Result<()> {
let compose = parse_compose_config(compose_file)?;
let docker = init_docker().await?;
stop_containers(&compose, &docker).await?;
remove_containers(&compose, &docker).await?;
Ok(())
}
pub async fn ps() -> anyhow::Result<()> {
// Here you would implement starting the services.
Ok(())
}
pub async fn up(compose_file: Option<String>, detach: bool) -> anyhow::Result<()> {
let compose = parse_compose_config(compose_file)?;
let docker = init_docker().await?;
pull_images(&compose, &docker).await;
create_volumes(&compose, &docker).await?;
let ids = create_containers(&compose, &docker, detach).await?;
start_containers(&docker, ids.clone()).await?;
if !detach {
monitor_build_outputs(&docker, ids).await?;
}
Ok(())
}

59
lib/src/monitor.rs Normal file
View File

@ -0,0 +1,59 @@
use crate::container::stop_containers_by_ids;
use bollard::container::{AttachContainerOptions, LogOutput};
use bollard::Docker;
use futures_util::future::join_all;
use futures_util::StreamExt;
pub(crate) async fn monitor_build_outputs(docker: &Docker, ids: Vec<String>) -> anyhow::Result<()> {
let mut handles = vec![];
for id in ids.clone() {
let docker = docker.clone();
handles.push(tokio::spawn(async move {
let r = monitor_logs(&docker, id.clone()).await;
println!("Container {} exited with {:?}", id, r)
}));
}
tokio::select! {
_ = tokio::signal::ctrl_c() => {
println!("Ctrl-c received, stopping containers");
stop_containers_by_ids(docker, ids.clone()).await?;
}
_ = join_all(handles) => {
println!("All containers exited");
}
}
Ok(())
}
async fn monitor_logs(docker: &Docker, id: String) -> anyhow::Result<()> {
let mut attach_results = docker
.attach_container(
&id,
Some(AttachContainerOptions::<String> {
stdout: Some(true),
stderr: Some(true),
stdin: Some(false),
stream: Some(true),
..Default::default()
}),
)
.await?;
while let Some(log_result) = attach_results.output.next().await {
match log_result {
Ok(chunk) => match chunk {
LogOutput::StdIn { .. } => unreachable!(),
LogOutput::Console { .. } => unreachable!(),
LogOutput::StdOut { message } => {
println!("{}", String::from_utf8_lossy(&message));
}
LogOutput::StdErr { message } => {
println!("{}", String::from_utf8_lossy(&message));
}
},
Err(e) => println!("{}", e),
}
}
Ok(())
}

21
lib/src/volume.rs Normal file
View File

@ -0,0 +1,21 @@
use crate::configuration::compose_types::DockerCompose;
use crate::helpers::dir::parent_dir_name;
use bollard::volume::CreateVolumeOptions;
use bollard::Docker;
pub(crate) async fn create_volumes(compose: &DockerCompose, docker: &Docker) -> anyhow::Result<()> {
if let Some(volumes) = &compose.volumes {
let parent_dir = parent_dir_name()?;
for (name, volume) in volumes {
docker
.create_volume::<String>(CreateVolumeOptions {
name: format!("{}_{}", parent_dir, name),
driver: volume.driver.clone().unwrap_or_else(Default::default),
driver_opts: volume.driver_opts.clone().unwrap_or_else(Default::default),
labels: volume.labels.clone().unwrap_or_else(Default::default),
})
.await?;
}
}
Ok(())
}

View File

@ -1,305 +0,0 @@
mod compose_types;
use crate::compose_types::DockerCompose;
use anyhow::anyhow;
use bollard::container::{Config, CreateContainerOptions};
use bollard::errors::Error;
use bollard::image::CreateImageOptions;
use bollard::models::{CreateImageInfo, HostConfig, PortBinding, PortMap, RestartPolicy, RestartPolicyNameEnum};
use bollard::Docker;
use clap::{Arg, Command};
use futures_util::StreamExt;
use std::collections::HashMap;
use std::env;
use std::fs::File;
use std::io::Read;
use bollard::volume::CreateVolumeOptions;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let matches = Command::new("docker-compose-rust")
.version("1.0")
.author("Your Name <your.email@example.com>")
.about("A CLI for managing Docker Compose in Rust")
.subcommand(
Command::new("up")
.about("Start the services defined in the Docker Compose file")
.arg(
Arg::new("file")
.short('f')
.long("file")
.value_name("FILE")
.help("Sets a custom Docker Compose file"),
),
)
.subcommand(Command::new("down").about("Stop and remove the services"))
.subcommand(Command::new("ps").about("List containers"))
.get_matches();
let docker = Docker::connect_with_local_defaults()?;
docker
.ping()
.await
.map_err(|e| anyhow!("Connection to Docker Socket failed: {}", e))?;
// Handle the "up" subcommand
if let Some(matches) = matches.subcommand_matches("up") {
//let file = matches.try_get_one("file")??;
let compose = read_docker_compose("docker-compose.yaml")?;
up(compose, &docker).await?;
// Here you would implement starting the services.
}
// Handle the "down" subcommand
if matches.subcommand_matches("down").is_some() {
println!("Stopping services...");
// Here you would implement stopping the services.
}
// Handle the "ps" subcommand
if matches.subcommand_matches("ps").is_some() {
println!("Listing containers...");
// Here you would implement listing running containers.
}
Ok(())
}
fn parent_dir_name() -> anyhow::Result<String> {
let current_dir = env::current_dir().expect("Failed to get current directory");
// Get the parent directory of the current directory
let parent_dir = current_dir.file_name().ok_or(anyhow!("Current directory has no parent"))?;;
let parent_dir_name = parent_dir.to_string_lossy().to_string();
Ok(parent_dir_name)
}
fn read_docker_compose(file_path: &str) -> anyhow::Result<DockerCompose> {
let mut file = File::open(file_path)?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let compose: DockerCompose = serde_yaml::from_str(&contents)?;
Ok(compose)
}
async fn up(compose: DockerCompose, docker: &Docker) -> anyhow::Result<()> {
pull_images(&compose, docker).await;
create_volumes(&compose, docker).await?;
let ids = create_containers(&compose, docker).await?;
start_containers(&compose, docker, ids).await?;
Ok(())
}
fn create_env_vec(env: &Option<HashMap<String, String>>) -> Option<Vec<String>> {
match env {
Some(env) => {
let list = env
.into_iter()
.map(|(key, value)| format!("{}={}", key, value))
.collect();
Some(list)
}
None => None,
}
}
fn create_port_map(ports: &Option<Vec<String>>) -> Option<PortMap> {
match ports {
None => None,
Some(ports) => Some(
ports
.iter()
.map(|port| {
let parts: Vec<&str> = port.split(':').collect();
(
parts[1].to_string(),
Some(vec![PortBinding {
host_ip: None,
host_port: Some(parts[0].to_string()),
}]),
)
})
.collect(),
),
}
}
fn parse_restart_policy(restart: &Option<String>) -> Option<RestartPolicy> {
match restart {
None => None,
Some(restart) => match restart.as_str() {
"no" => Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::NO),
..Default::default()
}),
"always" => Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::ALWAYS),
..Default::default()
}),
"unless-stopped" => Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::UNLESS_STOPPED),
..Default::default()
}),
"on-failure" => Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::NO),
..Default::default()
}),
v => {
// handle special case when retry count is specified
if v.starts_with("on-failure:") {
let parts: Vec<&str> = v.split(':').collect();
Some(RestartPolicy {
name: Some(RestartPolicyNameEnum::ON_FAILURE),
maximum_retry_count: Some(parts[1].parse().unwrap()),
})
} else {
None
}
},
},
}
}
fn parse_volume_mount(compose: &DockerCompose, volume: String) -> anyhow::Result<String> {
let parts: Vec<&str> = volume.split(':').collect();
if parts.len() != 2 {
return Err(anyhow!("Invalid volume mount: {}", volume));
}
if(parts[0].starts_with('/')) {
return Ok(volume);
}
let parent_dir = parent_dir_name()?;
let field_missing = format!("volume field for {} missing", volume);
// volumes map has to contain the volume defined here
if compose.volumes.as_ref().ok_or(anyhow!(field_missing.clone()))?.contains_key(parts[0]) {
Ok(format!("{}_{}:{}", parent_dir, parts[0], parts[1]))
} else {
Err(anyhow!(field_missing))
}
}
fn parse_volume_mounts(compose: &DockerCompose, volumes: &Option<Vec<String>>) -> anyhow::Result<Option<Vec<String>>> {
match volumes {
None => Ok(None),
Some(volumes) => {
let mounts: Vec<anyhow::Result<String>> = volumes
.iter()
.map(|volume| parse_volume_mount(compose, volume.clone()))
.collect();
let mounts = mounts.into_iter().collect::<anyhow::Result<Vec<String>>>()?;
if mounts.is_empty() {
Ok(None)
} else {
Ok(Some(mounts))
}
},
}
}
async fn create_volumes(
compose: &DockerCompose,
docker: &Docker,
) -> anyhow::Result<()> {
if let Some(volumes) = &compose.volumes {
let parent_dir = parent_dir_name()?;
for (name, volume) in volumes {
let create_info = docker
.create_volume::<String>(
CreateVolumeOptions {
name: format!("{}_{}", parent_dir, name),
driver: volume.driver.clone().unwrap_or_else(Default::default),
driver_opts: volume.driver_opts.clone().unwrap_or_else(Default::default),
labels: volume.labels.clone().unwrap_or_else(Default::default),
},
)
.await?;
}
}
Ok(())
}
async fn create_containers(
compose: &DockerCompose,
docker: &Docker,
) -> anyhow::Result<Vec<String>> {
let mut container_ids = Vec::new();
let parent_dir = parent_dir_name()?;
for (name, service) in &compose.services {
let env = create_env_vec(&service.environment);
let conf: Config<String> = Config {
image: service.image.clone(),
attach_stdout: Some(true),
attach_stderr: Some(true),
open_stdin: Some(false),
env,
host_config: Some(HostConfig {
auto_remove: service.auto_remove,
restart_policy: parse_restart_policy(&service.restart),
port_bindings: create_port_map(&service.ports),
binds: parse_volume_mounts(&compose, &service.volumes)?,
..Default::default()
}),
..Default::default()
};
let container_name = service.container_name.clone().unwrap_or_else(|| format!("{}_{}_1", parent_dir, name));
let create_info = docker
.create_container::<String, String>(
Some(CreateContainerOptions {
name: container_name,
platform: None,
}),
conf,
)
.await?;
container_ids.push(create_info.id);
}
Ok(container_ids)
}
async fn start_containers(
compose: &DockerCompose,
docker: &Docker,
ids: Vec<String>,
) -> anyhow::Result<()> {
for id in ids {
docker.start_container::<String>(&id, None).await?;
}
Ok(())
}
async fn pull_images(compose: &DockerCompose, docker: &Docker) {
for (name, service) in &compose.services {
println!("Trying to pull {}", service.image.clone().unwrap());
let mut stream = docker.create_image(
Some(CreateImageOptions {
from_image: service.image.clone().unwrap(),
..Default::default()
}),
None,
None,
);
while let Some(pull_result) = stream.next().await {
match pull_result {
Err(e) => println!("{:?}", e),
Ok(CreateImageInfo {
status: Some(status),
..
}) => {
println!("{}", status)
}
_ => {}
}
}
}
}
fn down(compose: &DockerCompose) {
// Here you would implement starting the services.
}