diff --git a/Justfile b/Justfile
index cfd7891..12b3d68 100644
--- a/Justfile
+++ b/Justfile
@@ -13,7 +13,7 @@ run:
'RUST_BACKTRACE=1 '$CARGO_PATH' run --bin vmm -- cli --memory 512 --cpus 1 \
--kernel tools/kernel/linux-cloud-hypervisor/arch/x86/boot/compressed/vmlinux.bin \
--iface-host-addr 172.29.0.1 --netmask 255.255.0.0 --iface-guest-addr 172.29.0.2 \
- --initramfs=tools/rootfs/initramfs.img'
+ --initramfs=../virt-do/initramfs.img'
build-kernel:
#!/bin/bash
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..c32f809
--- /dev/null
+++ b/README.md
@@ -0,0 +1,141 @@
+
+
Cloudlet
+
The almost fast FaaS
+
+
+
+## Table of Contents
+
+- [Table of Contents](#table-of-contents)
+- [Prerequisites](#prerequisites)
+- [Run Locally](#run-locally)
+ - [Clone the project](#clone-the-project)
+ - [Setup](#setup)
+ - [Start the VMM](#start-the-vmm)
+ - [Run the API](#run-the-api)
+ - [Send the request using the CLI](#send-the-request-using-the-cli)
+- [Architecture](#architecture)
+- [Config file](#config-file)
+
+## Prerequisites
+
+Install the dependencies. On Debian/Ubuntu:
+
+```bash
+apt install build-essential cmake pkg-config libssl-dev flex bison libelf-dev
+```
+
+Then, configure the Rust toolchain and install [Just](https://github.com/casey/just):
+
+```bash
+rustup target add x86_64-unknown-linux-musl
+cargo install just
+```
+
+Finally, install [the protobuf compiler](https://github.com/protocolbuffers/protobuf?tab=readme-ov-file#protobuf-compiler-installation).
+
+## Run Locally
+
+### Clone the project
+
+```bash
+git clone https://github.com/virt-do/cloudlet
+```
+
+### Setup
+
+Go to the project directory:
+
+```bash
+cd cloudlet
+```
+
+Create a TOML config file or update the [existing one](./src/cli/examples/config.toml):
+
+```bash
+cat << EOF > src/cli/examples/config.toml
+workload-name = "fibonacci"
+language = "rust"
+action = "prepare-and-run"
+
+[server]
+address = "localhost"
+port = 50051
+
+[build]
+source-code-path = "$(readlink -f ./src/cli/examples/main.rs)"
+release = true
+EOF
+```
+
+Make sure to update the `source-code-path` to the path of the source code you want to run.
+Use an absolute path.
+
+[Here](#config-file) are more informations about each field
+
+### Start the VMM
+
+> [!WARNING]
+> Make sure to replace `CARGO_PATH` environment variable with the path to your cargo binary
+>
+> ```bash
+> export CARGO_PATH=$(which cargo)
+> ```
+
+```bash
+sudo -E capsh --keep=1 --user=$USER --inh=cap_net_admin --addamb=cap_net_admin -- -c 'RUST_BACKTRACE=1 '$CARGO_PATH' run --bin vmm -- grpc'
+```
+
+### Run the API
+
+```bash
+cargo run --bin api
+```
+
+### Send the request using the CLI
+
+```bash
+cargo run --bin cli -- run --config-path src/cli/examples/config.toml
+```
+
+> [!NOTE]
+> If it's your first time running the request, `cloudlet` will have to compile a kernel and an initramfs image.
+> This will take a while, so make sure you do something else while you wait...
+
+## Architecture
+
+Here is a simple sequence diagram of Cloudlet:
+
+```mermaid
+sequenceDiagram
+ participant CLI
+ participant API
+ participant VMM
+ participant Agent
+
+ CLI->>API: HTTP Request /run
+ API->>VMM: gRPC Request to create VM
+ VMM->>Agent: Creation of the VM
+ VMM->>Agent: gRPC Request to the agent
+ Agent->>Agent: Build and run code
+ Agent-->>VMM: Stream Response
+ VMM-->>API: Stream Response
+ API-->>CLI: HTTP Response
+```
+
+1. The CLI sends an HTTP request to the API which in turn sends a gRPC request to the VMM
+2. The VMM then creates a VM
+3. When a VM starts it boots on the agent which holds another gRPC server to handle requests
+4. The agent then builds and runs the code
+5. The response is streamed back to the VMM and then to the API and finally to the CLI.
+
+## Config file
+| Field | Description | Type |
+| --- | --- | --- |
+| workload-name | Name of the workload you wanna run | String |
+| language | Language of the source code | String enum: rust, python node |
+| action | Action to perform | String enum: prepare-and-run |
+| server.address | Address of the server (currently not used) | String |
+| server.port | Port of the server (currently not used) | Integer |
+| build.source-code-path | Path to the source code on your local machine | String |
+| build.release | Build the source code in release mode | Boolean |
\ No newline at end of file
diff --git a/assets/demo.gif b/assets/demo.gif
new file mode 100644
index 0000000..bb2e98e
Binary files /dev/null and b/assets/demo.gif differ
diff --git a/src/agent/Cargo.toml b/src/agent/Cargo.toml
index daa40d6..3f531e1 100644
--- a/src/agent/Cargo.toml
+++ b/src/agent/Cargo.toml
@@ -8,7 +8,10 @@ name = "agent"
path = "src/lib.rs"
[dependencies]
+async-trait = "0.1.80"
clap = { version = "4.5.4", features = ["derive", "env"] }
+nix = { version = "0.28.0", features = ["signal"] }
+once_cell = "1.19.0"
prost = "0.12.4"
rand = "0.8.5"
serde = { version = "1.0.197", features = ["derive"] }
diff --git a/src/agent/src/agents/debug.rs b/src/agent/src/agents/debug.rs
index ceae270..dbbf241 100644
--- a/src/agent/src/agents/debug.rs
+++ b/src/agent/src/agents/debug.rs
@@ -1,8 +1,12 @@
use super::AgentOutput;
use crate::agents::Agent;
use crate::{workload, AgentResult};
+use async_trait::async_trait;
+use std::collections::HashSet;
use std::fs::create_dir_all;
+use std::sync::Arc;
use std::time::SystemTime;
+use tokio::sync::Mutex;
pub struct DebugAgent {
workload_config: workload::config::Config,
@@ -14,8 +18,9 @@ impl From for DebugAgent {
}
}
+#[async_trait]
impl Agent for DebugAgent {
- fn prepare(&self) -> AgentResult {
+ async fn prepare(&self, _: Arc>>) -> AgentResult {
let dir = format!("/tmp/{}", self.workload_config.workload_name);
println!("Function directory: {}", dir);
@@ -39,7 +44,7 @@ impl Agent for DebugAgent {
})
}
- fn run(&self) -> AgentResult {
+ async fn run(&self, _: Arc>>) -> AgentResult {
let dir = format!("/tmp/{}", self.workload_config.workload_name);
let content = std::fs::read_to_string(format!("{}/debug.txt", &dir))
diff --git a/src/agent/src/agents/mod.rs b/src/agent/src/agents/mod.rs
index f70d43b..9d4032c 100644
--- a/src/agent/src/agents/mod.rs
+++ b/src/agent/src/agents/mod.rs
@@ -1,5 +1,9 @@
use crate::{AgentError, AgentResult};
+use async_trait::async_trait;
use serde::Deserialize;
+use std::collections::HashSet;
+use std::sync::Arc;
+use tokio::sync::Mutex;
#[cfg(feature = "debug-agent")]
pub mod debug;
@@ -12,9 +16,10 @@ pub struct AgentOutput {
pub stderr: String,
}
+#[async_trait]
pub trait Agent {
- fn prepare(&self) -> AgentResult;
- fn run(&self) -> AgentResult;
+ async fn prepare(&self, child_processes: Arc>>) -> AgentResult;
+ async fn run(&self, child_processes: Arc>>) -> AgentResult;
}
#[derive(Debug, Clone, Deserialize)]
diff --git a/src/agent/src/agents/rust.rs b/src/agent/src/agents/rust.rs
index c34f3b4..207276e 100644
--- a/src/agent/src/agents/rust.rs
+++ b/src/agent/src/agents/rust.rs
@@ -1,8 +1,13 @@
use super::{Agent, AgentOutput};
use crate::{workload, AgentError, AgentResult};
+use async_trait::async_trait;
use rand::distributions::{Alphanumeric, DistString};
use serde::Deserialize;
-use std::{fs::create_dir_all, process::Command};
+use std::collections::HashSet;
+use std::fs::create_dir_all;
+use std::sync::Arc;
+use tokio::process::Command;
+use tokio::sync::Mutex;
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
@@ -21,27 +26,45 @@ pub struct RustAgent {
}
impl RustAgent {
- fn build(&self, function_dir: &String) -> AgentResult {
+ async fn build(
+ &self,
+ function_dir: &str,
+ child_processes: Arc>>,
+ ) -> AgentResult {
if self.rust_config.build.release {
- let output = Command::new("cargo")
+ let child = Command::new("cargo")
.arg("build")
.arg("--release")
.current_dir(function_dir)
- .output()
+ .spawn()
.expect("Failed to build function");
+ {
+ child_processes.lock().await.insert(child.id().unwrap());
+ }
+
+ let output = child
+ .wait_with_output()
+ .await
+ .expect("Failed to wait on child");
+
Ok(AgentOutput {
exit_code: output.status.code().unwrap(),
stdout: std::str::from_utf8(&output.stdout).unwrap().to_string(),
stderr: std::str::from_utf8(&output.stderr).unwrap().to_string(),
})
} else {
- let output = Command::new("cargo")
+ let child = Command::new("cargo")
.arg("build")
.current_dir(function_dir)
- .output()
+ .spawn()
.expect("Failed to build function");
+ let output = child
+ .wait_with_output()
+ .await
+ .expect("Failed to wait on child");
+
Ok(AgentOutput {
exit_code: output.status.code().unwrap(),
stdout: std::str::from_utf8(&output.stdout).unwrap().to_string(),
@@ -63,8 +86,9 @@ impl From for RustAgent {
}
}
+#[async_trait]
impl Agent for RustAgent {
- fn prepare(&self) -> AgentResult {
+ async fn prepare(&self, child_processes: Arc>>) -> AgentResult {
let function_dir = format!(
"/tmp/{}",
Alphanumeric.sample_string(&mut rand::thread_rng(), 16)
@@ -85,7 +109,7 @@ impl Agent for RustAgent {
[package]
name = "{}"
version = "0.1.0"
- edition = "2018"
+ edition = "2021"
"#,
self.workload_config.workload_name
);
@@ -93,7 +117,7 @@ impl Agent for RustAgent {
std::fs::write(format!("{}/Cargo.toml", &function_dir), cargo_toml)
.expect("Unable to write Cargo.toml file");
- let result = self.build(&function_dir)?;
+ let result = self.build(&function_dir, child_processes).await?;
if result.exit_code != 0 {
println!("Build failed: {:?}", result);
@@ -131,11 +155,20 @@ impl Agent for RustAgent {
})
}
- fn run(&self) -> AgentResult {
- let output = Command::new(format!("/tmp/{}", self.workload_config.workload_name))
- .output()
+ async fn run(&self, child_processes: Arc>>) -> AgentResult {
+ let child = Command::new(format!("/tmp/{}", self.workload_config.workload_name))
+ .spawn()
.expect("Failed to run function");
+ {
+ child_processes.lock().await.insert(child.id().unwrap());
+ }
+
+ let output = child
+ .wait_with_output()
+ .await
+ .expect("Failed to wait on child");
+
let agent_output = AgentOutput {
exit_code: output.status.code().unwrap(),
stdout: std::str::from_utf8(&output.stdout).unwrap().to_string(),
diff --git a/src/agent/src/workload/runner.rs b/src/agent/src/workload/runner.rs
index ae4e175..1cbeb61 100644
--- a/src/agent/src/workload/runner.rs
+++ b/src/agent/src/workload/runner.rs
@@ -1,46 +1,63 @@
+use super::config::Config;
use crate::{
agent::ExecuteRequest,
agents::{rust, Agent, AgentOutput, Language},
workload::config::Action,
AgentError, AgentResult,
};
+use std::collections::HashSet;
+use std::sync::Arc;
+use tokio::sync::Mutex;
#[cfg(feature = "debug-agent")]
use crate::agents::debug;
-use super::config::Config;
-
/// Runner for a workload.
/// Will execute the workload based on the inner agent (language).
pub struct Runner {
config: Config,
agent: Box,
+ child_processes: Arc>>,
}
impl Runner {
- pub fn new(config: Config) -> Self {
+ pub fn new(config: Config, child_processes: Arc>>) -> Self {
let agent: Box = match config.language {
Language::Rust => Box::new(rust::RustAgent::from(config.clone())),
#[cfg(feature = "debug-agent")]
Language::Debug => Box::new(debug::DebugAgent::from(config.clone())),
};
- Runner { config, agent }
+ Runner {
+ config,
+ agent,
+ child_processes,
+ }
}
- pub fn new_from_execute_request(execute_request: ExecuteRequest) -> Result {
+ pub fn new_from_execute_request(
+ execute_request: ExecuteRequest,
+ child_processes: Arc>>,
+ ) -> Result {
let config = Config::new_from_execute_request(execute_request)?;
- Ok(Self::new(config))
+ Ok(Self::new(config, child_processes))
}
- pub fn run(&self) -> AgentResult {
+ pub async fn run(&self) -> AgentResult {
let result = match self.config.action {
- Action::Prepare => self.agent.prepare()?,
- Action::Run => self.agent.run()?,
+ Action::Prepare => {
+ self.agent
+ .prepare(Arc::clone(&self.child_processes))
+ .await?
+ }
+ Action::Run => self.agent.run(Arc::clone(&self.child_processes)).await?,
Action::PrepareAndRun => {
- let res = self.agent.prepare()?;
+ let res = self
+ .agent
+ .prepare(Arc::clone(&self.child_processes))
+ .await?;
println!("Prepare result {:?}", res);
- self.agent.run()?
+ self.agent.run(Arc::clone(&self.child_processes)).await?
}
};
diff --git a/src/agent/src/workload/service.rs b/src/agent/src/workload/service.rs
index 3b05386..996d256 100644
--- a/src/agent/src/workload/service.rs
+++ b/src/agent/src/workload/service.rs
@@ -1,12 +1,18 @@
use super::runner::Runner;
use crate::agent::{self, execute_response::Stage, ExecuteRequest, ExecuteResponse, SignalRequest};
use agent::workload_runner_server::WorkloadRunner;
-use std::process;
+use once_cell::sync::Lazy;
+use std::collections::HashSet;
+use std::{process, sync::Arc};
+use tokio::sync::Mutex;
use tokio_stream::wrappers::ReceiverStream;
use tonic::{Request, Response};
type Result = std::result::Result, tonic::Status>;
+static CHILD_PROCESSES: Lazy>>> =
+ Lazy::new(|| Arc::new(Mutex::new(HashSet::new())));
+
pub struct WorkloadRunnerService;
#[tonic::async_trait]
@@ -18,11 +24,12 @@ impl WorkloadRunner for WorkloadRunnerService {
let execute_request = req.into_inner();
- let runner = Runner::new_from_execute_request(execute_request)
+ let runner = Runner::new_from_execute_request(execute_request, CHILD_PROCESSES.clone())
.map_err(|e| tonic::Status::internal(e.to_string()))?;
let res = runner
.run()
+ .await
.map_err(|e| tonic::Status::internal(e.to_string()))?;
let _ = tx
@@ -42,6 +49,21 @@ impl WorkloadRunner for WorkloadRunnerService {
}
async fn signal(&self, _: Request) -> Result<()> {
+ let child_processes = CHILD_PROCESSES.lock().await;
+
+ for &child_id in child_processes.iter() {
+ match nix::sys::signal::kill(
+ nix::unistd::Pid::from_raw(child_id as i32),
+ nix::sys::signal::Signal::SIGTERM,
+ ) {
+ Ok(_) => println!("Sent SIGTERM to child process {}", child_id),
+ Err(e) => println!(
+ "Failed to send SIGTERM to child process {}: {}",
+ child_id, e
+ ),
+ }
+ }
+
process::exit(0);
}
}
diff --git a/src/agent/examples/config.toml b/src/cli/examples/config.toml
similarity index 71%
rename from src/agent/examples/config.toml
rename to src/cli/examples/config.toml
index 013193c..f77b218 100644
--- a/src/agent/examples/config.toml
+++ b/src/cli/examples/config.toml
@@ -7,5 +7,5 @@ address = "localhost"
port = 50051
[build]
-source-code-path = "CHANGEME/src/agent/examples/main.rs"
+source-code-path = "CHANGEME/src/cli/examples/main.rs"
release = true
diff --git a/src/agent/examples/main.rs b/src/cli/examples/main.rs
similarity index 100%
rename from src/agent/examples/main.rs
rename to src/cli/examples/main.rs
diff --git a/src/fs-gen/Cargo.toml b/src/fs-gen/Cargo.toml
index eb0b4eb..792e83f 100644
--- a/src/fs-gen/Cargo.toml
+++ b/src/fs-gen/Cargo.toml
@@ -22,3 +22,4 @@ anyhow = "1.0.82"
tracing = "0.1.40"
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
thiserror = "1.0.59"
+clap-stdin = "0.4.0"
diff --git a/src/fs-gen/resources/initfile b/src/fs-gen/resources/initfile
index 4d72bc6..14945cd 100644
--- a/src/fs-gen/resources/initfile
+++ b/src/fs-gen/resources/initfile
@@ -14,6 +14,8 @@ export RUST_VERSION='1.77.2'
export PATH=$CARGO_HOME/bin:$PATH
+ln -s /proc/net/pnp /etc/resolv.conf
+
/agent
reboot
\ No newline at end of file
diff --git a/src/fs-gen/src/cli_args.rs b/src/fs-gen/src/cli_args.rs
index a736113..6ae15ec 100644
--- a/src/fs-gen/src/cli_args.rs
+++ b/src/fs-gen/src/cli_args.rs
@@ -1,6 +1,7 @@
use std::{env, path::PathBuf};
use clap::{command, error::ErrorKind, ArgAction, CommandFactory, Parser};
+use clap_stdin::MaybeStdin;
use regex::Regex;
use once_cell::sync::Lazy;
@@ -14,7 +15,7 @@ static RE_IMAGE_NAME: Lazy = Lazy::new(|| {
#[derive(Parser, Debug, Clone)]
#[command(version, about, long_about = None)]
pub struct CliArgs {
- /// The name of the image to download
+ /// The name of the image to download, can include repository and tag: [REPOSITORY/NAME:TAG]
pub image_name: String,
/// The host path to the guest agent binary
@@ -36,6 +37,18 @@ pub struct CliArgs {
#[arg(short='d', long="debug", action=ArgAction::SetTrue)]
pub debug: bool,
+
+ /// Username to pull image from a private repository
+ #[arg(short='u', long="username", default_value=None)]
+ pub username: Option,
+
+ /// Password can also be passed via STDIN: [echo | fs-gen ... -p -]
+ #[arg(short='p', long="password", default_value=None)]
+ pub password: Option>,
+
+ /// Allow invalid TLS certificates
+ #[arg(long="insecure", action=ArgAction::SetTrue)]
+ pub insecure: bool,
}
impl CliArgs {
@@ -45,6 +58,7 @@ impl CliArgs {
args.validate_image();
args.validate_host_path();
+ args.validate_auth();
args
}
@@ -73,6 +87,26 @@ impl CliArgs {
.exit();
}
}
+
+ fn validate_auth(&self) {
+ let mut cmd = CliArgs::command();
+ let instruction =
+ "Define both username and password to connect to a private image repository.";
+ if self.username.is_none() && self.password.is_some() {
+ cmd.error(
+ ErrorKind::InvalidValue,
+ format!("Username not provided. {}", instruction),
+ )
+ .exit();
+ }
+ if self.username.is_some() && self.password.is_none() {
+ cmd.error(
+ ErrorKind::InvalidValue,
+ format!("Password not provided. {}", instruction),
+ )
+ .exit();
+ }
+ }
}
/// Get the default temporary directory for the current execution.
diff --git a/src/fs-gen/src/image_builder.rs b/src/fs-gen/src/image_builder.rs
index fa8803a..464d300 100644
--- a/src/fs-gen/src/image_builder.rs
+++ b/src/fs-gen/src/image_builder.rs
@@ -115,8 +115,10 @@ pub fn merge_layer(blob_paths: &[PathBuf], output_folder: &Path, tmp_folder: &Pa
.with_context(|| "Failed to create a new channel".to_string())?,
};
- let handle = thread::spawn(move || {
- let _ = server.svc_loop();
+ let handle = thread::spawn(move || -> Result<()> {
+ server
+ .svc_loop()
+ .with_context(|| "Failed to execute the fuse server loop")
});
debug!("Starting copy...");
@@ -135,7 +137,10 @@ pub fn merge_layer(blob_paths: &[PathBuf], output_folder: &Path, tmp_folder: &Pa
se.umount()
.with_context(|| "Failed to unmount the fuse session".to_string())?;
- let _ = handle.join();
+ let server_result = handle.join();
+ if server_result.is_err() || server_result.is_ok_and(|val| val.is_err()) {
+ return Err(anyhow!("Failed to execute the fuse server"));
+ }
info!("Finished merging layers!");
Ok(())
diff --git a/src/fs-gen/src/initramfs_generator.rs b/src/fs-gen/src/initramfs_generator.rs
index c216fa0..8462161 100644
--- a/src/fs-gen/src/initramfs_generator.rs
+++ b/src/fs-gen/src/initramfs_generator.rs
@@ -61,7 +61,7 @@ pub fn generate_initramfs(root_directory: &Path, output: &Path) -> Result<()> {
.current_dir(root_directory)
.stdout(Stdio::from(file))
.arg("-c")
- .arg("find . -print0 | cpio -0 --create --owner=root:root --format=newc | xz -9 --format=lzma")
+ .arg("find . -print0 | cpio -0 --create --owner=root:root --format=newc | xz -9 -T0 --format=lzma")
.spawn()
.with_context(|| "Failed to package initramfs into bundle".to_string())?;
diff --git a/src/fs-gen/src/loader/download.rs b/src/fs-gen/src/loader/download.rs
index 1cc8c65..0943865 100644
--- a/src/fs-gen/src/loader/download.rs
+++ b/src/fs-gen/src/loader/download.rs
@@ -1,69 +1,73 @@
use crate::loader::errors::ImageLoaderError;
+use crate::loader::structs::{Layer, ManifestV2};
use crate::loader::utils::{get_docker_download_token, unpack_tarball};
use anyhow::{Context, Result};
+use clap_stdin::MaybeStdin;
use reqwest::blocking::Client;
-use serde_json::Value;
use std::fs::create_dir_all;
use std::path::{Path, PathBuf};
use tracing::{debug, info, warn};
+use super::structs::Image;
+
pub(crate) fn download_image_fs(
image_name: &str,
architecture: &str,
output_file: PathBuf,
+ username: Option,
+ password: Option>,
+ insecure: bool,
) -> Result, ImageLoaderError> {
info!("Downloading image...");
-
- // Get image's name and tag
- let image_and_tag: Vec<&str> = image_name.split(':').collect();
- let image_name = image_and_tag[0];
- let tag = if image_and_tag.len() < 2 {
- "latest"
- } else {
- image_and_tag[1]
- };
+ let image = Image::from_str(image_name);
+ debug!(
+ registry = image.registry,
+ repository = image.repository,
+ name = image.name,
+ tag = image.tag,
+ "image:",
+ );
// Get download token and download manifest
- let client = Client::new();
- let token = &get_docker_download_token(&client, image_name)?;
- let manifest = download_manifest(&client, token, image_name, tag)
+ let client = Client::builder()
+ .danger_accept_invalid_certs(insecure)
+ .build()
+ .map_err(|e| ImageLoaderError::Error { source: e.into() })?;
+
+ let token = &get_docker_download_token(&client, &image, username, password)?;
+ let manifest = download_manifest(&client, token, &image, &image.tag)
.map_err(|e| ImageLoaderError::Error { source: e })?;
- if let Some(layers) = manifest["layers"].as_array() {
- // We have layers already, no need to look into sub-manifests.
+ if let ManifestV2::ImageManifest(m) = manifest {
+ // We directly get the image manifest rather than a list of manifests (fat manifest)
info!("Found layers in manifest");
warn!(
- architecture,
- "Manifest did not specify architecture, the initramfs may not work for the requested architecture"
+ "{}:{} is not a multi-platform image, the initramfs is not guaranteed to work correctly on the architecture {}",
+ image.name, image.tag, architecture
);
create_dir_all(&output_file)
.with_context(|| "Could not create output directory for image downloading")?;
- return download_layers(layers, &client, token, image_name, &output_file)
+ return download_layers(&m.layers, &client, token, &image, &output_file)
.map_err(|e| ImageLoaderError::Error { source: e });
}
- // Below, we assume there are no layers found.
+ // Below, we assume that the image is multi-platform and we received a list of manifests (fat manifest).
// We dig into sub-manifests to try and find a suitable one to download, with hopefully layers inside.
- let manifest_list = match manifest["manifests"].as_array() {
- // No sub-manifests found, we throw an error.
- None => Err(ImageLoaderError::ManifestNotFound(
- image_name.to_string(),
- tag.to_string(),
- ))?,
- Some(m) => m,
+ let manifest_list = match manifest {
+ // The manifest structure doesn't correspond to a fat manifest, we throw an error.
+ ManifestV2::ManifestList(m) => m,
+ _ => Err(ImageLoaderError::ManifestNotFound(image.clone()))?,
};
info!(
architecture,
"Manifest list found. Looking for an architecture-specific manifest..."
);
- let arch_specific_manifest = manifest_list.iter().find(|manifest| {
- manifest["platform"].as_object().unwrap()["architecture"]
- .as_str()
- .unwrap()
- == architecture
- });
+ let arch_specific_manifest = manifest_list
+ .manifests
+ .iter()
+ .find(|manifest| manifest.platform.architecture == architecture);
let submanifest = match arch_specific_manifest {
None => Err(ImageLoaderError::UnsupportedArchitecture(
@@ -72,35 +76,36 @@ pub(crate) fn download_image_fs(
Some(m) => {
debug!("Downloading architecture-specific manifest");
- download_manifest(&client, token, image_name, m["digest"].as_str().unwrap())
+ download_manifest(&client, token, &image, &m.digest)
.map_err(|e| ImageLoaderError::Error { source: e })?
}
};
- match submanifest["layers"].as_array() {
- None => Err(ImageLoaderError::LayersNotFound)?,
- Some(layers) => {
+ match submanifest {
+ // The submanifest structure doesn't correspond to an image manifest, we throw an error.
+ ManifestV2::ImageManifest(m) => {
create_dir_all(&output_file)
.with_context(|| "Could not create output directory for image downloading")?;
- download_layers(layers, &client, token, image_name, &output_file)
+ download_layers(&m.layers, &client, token, &image, &output_file)
.map_err(|e| ImageLoaderError::Error { source: e })
}
+ _ => Err(ImageLoaderError::ImageManifestNotFound(image.clone()))?,
}
}
fn download_manifest(
client: &Client,
token: &str,
- image_name: &str,
+ image: &Image,
digest: &str,
-) -> Result {
+) -> Result {
// Query Docker Hub API to get the image manifest
let manifest_url = format!(
- "https://registry-1.docker.io/v2/library/{}/manifests/{}",
- image_name, digest
+ "{}/v2/{}/{}/manifests/{}",
+ image.registry, image.repository, image.name, digest
);
- let manifest: Value = client
+ let manifest: ManifestV2 = client
.get(manifest_url)
.header(
"Accept",
@@ -111,6 +116,7 @@ fn download_manifest(
"application/vnd.docker.distribution.manifest.list.v2+json",
)
.header("Accept", "application/vnd.oci.image.manifest.v1+json")
+ .header("Accept", "application/vnd.oci.image.index.v1+json")
.bearer_auth(token)
.send()
.with_context(|| "Could not send request to get manifest data".to_string())?
@@ -126,10 +132,10 @@ fn download_manifest(
}
fn download_layers(
- layers: &Vec,
+ layers: &Vec,
client: &Client,
token: &str,
- image_name: &str,
+ image: &Image,
output_dir: &Path,
) -> Result> {
info!("Downloading and unpacking layers...");
@@ -138,12 +144,10 @@ fn download_layers(
// Download and unpack each layer
for layer in layers {
- let digest = layer["digest"]
- .as_str()
- .with_context(|| "Failed to get digest for layer".to_string())?;
+ let digest = &layer.digest;
let layer_url = format!(
- "https://registry-1.docker.io/v2/library/{}/blobs/{}",
- image_name, digest
+ "{}/v2/{}/{}/blobs/{}",
+ image.registry, image.repository, image.name, digest
);
let response = client
diff --git a/src/fs-gen/src/loader/errors.rs b/src/fs-gen/src/loader/errors.rs
index d262c04..7d38ac5 100644
--- a/src/fs-gen/src/loader/errors.rs
+++ b/src/fs-gen/src/loader/errors.rs
@@ -1,19 +1,24 @@
+use crate::loader::structs::Image;
use anyhow::Error;
use thiserror::Error;
#[derive(Debug, Error)]
pub(crate) enum ImageLoaderError {
/// There is no existing manifest for the given image.
- #[error("Could not find Docker v2 or OCI manifest for `{0}:{1}`")]
- ManifestNotFound(String, String),
+ #[error("Could not find Docker v2 or OCI manifest for `{0}`")]
+ ManifestNotFound(Image),
/// Image doesn't support the requested architecture.
#[error("This image doesn't support {0} architecture")]
UnsupportedArchitecture(String),
- /// The manifest doesn't contain any layers to unpack.
- #[error("Could not find image layers in the manifest")]
- LayersNotFound,
+ /// The image manifest doesn't match the expected structure (no "layers" property).
+ #[error("Could not find Docker v2 or OCI image manifest for `{0}`")]
+ ImageManifestNotFound(Image),
+
+ /// The requested container registry didn't provide the auth link and service in www-authenticate header
+ #[error("Could not get the auth link and service for the `{0}` registry.")]
+ RegistryAuthDataNotFound(String),
/// Encountered an error during the flow.
#[error("Image loading error: {}", .source)]
diff --git a/src/fs-gen/src/loader/mod.rs b/src/fs-gen/src/loader/mod.rs
index f79903f..417fcef 100644
--- a/src/fs-gen/src/loader/mod.rs
+++ b/src/fs-gen/src/loader/mod.rs
@@ -1,3 +1,4 @@
pub(crate) mod download;
pub(crate) mod errors;
+mod structs;
mod utils;
diff --git a/src/fs-gen/src/loader/structs.rs b/src/fs-gen/src/loader/structs.rs
new file mode 100644
index 0000000..3b554ea
--- /dev/null
+++ b/src/fs-gen/src/loader/structs.rs
@@ -0,0 +1,120 @@
+use serde::Deserialize;
+use serde_json::Value;
+use std::fmt;
+
+// Any json returned by the request: image manifest, fat manifest, error...
+#[derive(Debug, Deserialize)]
+#[serde(untagged)]
+pub enum ManifestV2 {
+ ImageManifest(ImageManifest),
+ ManifestList(ManifestList),
+ Other(Value),
+}
+
+// Docker v2 or OCI mage manifest containing image layers
+#[derive(Debug, Deserialize)]
+pub struct ImageManifest {
+ pub layers: Vec,
+}
+
+// Image layer
+#[derive(Debug, Deserialize)]
+pub struct Layer {
+ pub digest: String,
+}
+
+// Docker v2 manifest list or OCI image index containing image manifests
+#[derive(Debug, Deserialize)]
+pub struct ManifestList {
+ pub manifests: Vec,
+}
+
+// SubManifest for a specific platform
+#[derive(Debug, Deserialize)]
+pub struct SubManifest {
+ pub digest: String,
+ pub platform: Platform,
+}
+
+// Supported image platform: architecture and OS
+#[derive(Debug, Deserialize)]
+pub struct Platform {
+ pub architecture: String,
+ pub os: String,
+}
+
+// Container image definition consisting of repository, name and tag
+#[derive(Debug, Clone)]
+pub struct Image {
+ pub registry: String,
+ pub repository: String,
+ pub name: String,
+ pub tag: String,
+}
+
+impl Image {
+ // Get image's repository, name and tag
+ pub fn from_str(image_name: &str) -> Image {
+ const DEFAULT_REGISTRY: &str = "https://registry-1.docker.io";
+ const DEFAULT_REPOSITORY: &str = "library";
+ const DEFAULT_TAG: &str = "latest";
+
+ let protocol = if image_name.starts_with("http://") {
+ "http://"
+ } else {
+ "https://"
+ };
+
+ let mut image_data: Vec<&str> = image_name
+ .trim_start_matches("http://")
+ .trim_start_matches("https://")
+ .trim_start_matches("docker.io/")
+ .splitn(3, '/')
+ .collect();
+
+ // Get registry link (part of name before the first '/' with dots) or use the default registry
+ let registry = if image_data[0].contains('.') {
+ format!("{}{}", protocol, image_data.remove(0))
+ } else {
+ DEFAULT_REGISTRY.to_string()
+ };
+
+ // Set image repository and name
+ let (repository, name) = match image_data.len() {
+ 1 => (DEFAULT_REPOSITORY.to_string(), image_data[0].to_string()),
+ 2 => (image_data[0].to_string(), image_data[1].to_string()),
+ _ => (
+ image_data[0].to_string(),
+ image_data[1].to_string() + "/" + image_data[2],
+ ),
+ };
+
+ // Set image tag, default: 'latest'
+ let image_and_tag: Vec<&str> = name.split(':').collect();
+ let (name, tag) = if image_and_tag.len() < 2 {
+ (image_and_tag[0].to_string(), DEFAULT_TAG.to_string())
+ } else {
+ (image_and_tag[0].to_string(), image_and_tag[1].to_string())
+ };
+
+ Image {
+ registry,
+ repository,
+ name,
+ tag,
+ }
+ }
+}
+
+impl fmt::Display for Image {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}/{}:{}", self.repository, self.name, self.tag)
+ }
+}
+
+#[derive(Debug, Deserialize, Clone)]
+pub struct Registry {
+ pub name: String,
+ pub auth_link: String,
+ pub auth_service: String,
+}
diff --git a/src/fs-gen/src/loader/utils.rs b/src/fs-gen/src/loader/utils.rs
index d65d247..6f23cbc 100644
--- a/src/fs-gen/src/loader/utils.rs
+++ b/src/fs-gen/src/loader/utils.rs
@@ -1,4 +1,7 @@
+use crate::loader::errors::ImageLoaderError;
+use crate::loader::structs::{Image, Registry};
use anyhow::{Context, Result};
+use clap_stdin::MaybeStdin;
use flate2::read::GzDecoder;
use reqwest::blocking::{Client, Response};
use std::path::Path;
@@ -13,16 +16,77 @@ pub(super) fn unpack_tarball(response: Response, output_dir: &Path) -> Result<()
}
/// Get a token for anonymous authentication to Docker Hub.
-pub(super) fn get_docker_download_token(client: &Client, image_name: &str) -> Result {
- let token_json: serde_json::Value = client
- .get(format!("https://auth.docker.io/token?service=registry.docker.io&scope=repository:library/{image_name}:pull"))
- .send().with_context(|| "Could not send request for anonymous authentication".to_string())?
- .json().with_context(|| "Failed to parse JSON response for anonymous authentication".to_string())?;
-
- match token_json["token"]
- .as_str()
- .with_context(|| "Failed to get token from anon auth response".to_string())
- {
+pub(super) fn get_registry_auth_data(
+ client: &Client,
+ image: &Image,
+) -> Result {
+ let manifest_url = format!(
+ "{}/v2/{}/{}/manifests/{}",
+ image.registry, image.repository, image.name, image.tag
+ );
+
+ let unauth_response = client
+ .get(manifest_url)
+ .send()
+ .with_context(|| format!("Could not send request to {}", image.registry))?;
+
+ let auth_header: &str = unauth_response.headers()["www-authenticate"]
+ .to_str()
+ .map_err(|e| ImageLoaderError::Error { source: e.into() })?;
+ let auth_data: Vec<&str> = auth_header.split('"').collect();
+ if auth_data.len() != 7 {
+ Err(ImageLoaderError::RegistryAuthDataNotFound(
+ image.registry.clone(),
+ ))?
+ }
+ Ok(Registry {
+ name: image.registry.clone(),
+ auth_link: auth_data[1].to_string(),
+ auth_service: auth_data[3].to_string(),
+ })
+}
+
+/// Get a token for anonymous authentication to Docker Hub.
+pub(super) fn get_docker_download_token(
+ client: &Client,
+ image: &Image,
+ username: Option,
+ password: Option>,
+) -> Result {
+ let registry = get_registry_auth_data(client, image)?;
+
+ let mut request = client.get(format!(
+ "{}?service={}&scope=repository:{}/{}:pull",
+ registry.auth_link, registry.auth_service, image.repository, image.name
+ ));
+ let mut auth_type = "";
+
+ match username {
+ Some(u) => {
+ request = request.basic_auth(u, password);
+ }
+ None => {
+ auth_type = "anonymous ";
+ }
+ };
+
+ let token_json: serde_json::Value = request
+ .send()
+ .with_context(|| format!("Could not send request for {}authentication", auth_type))?
+ .json()
+ .with_context(|| {
+ format!(
+ "Failed to parse JSON response for {}authentication",
+ auth_type
+ )
+ })?;
+
+ match token_json["token"].as_str().with_context(|| {
+ format!(
+ "Failed to get token from the {}authentication response",
+ auth_type
+ )
+ }) {
Ok(t) => Ok(t.to_owned()),
Err(e) => Err(e),
}
diff --git a/src/fs-gen/src/main.rs b/src/fs-gen/src/main.rs
index 045e1c0..b874510 100644
--- a/src/fs-gen/src/main.rs
+++ b/src/fs-gen/src/main.rs
@@ -21,8 +21,14 @@ fn run(args: CliArgs) -> Result<()> {
let output_subdir = _binding.as_path();
// image downloading and unpacking
- let layers_paths = match download_image_fs(&args.image_name, &args.architecture, layers_subdir)
- {
+ let layers_paths = match download_image_fs(
+ &args.image_name,
+ &args.architecture,
+ layers_subdir,
+ args.username,
+ args.password,
+ args.insecure,
+ ) {
Err(e) => bail!(e),
Ok(e) => e,
};
@@ -62,10 +68,6 @@ fn main() -> Result<()> {
)
.init();
- // tracing_subscriber::fmt()
- // .with_max_level(if args.debug { Level::DEBUG } else { Level::INFO })
- // .init();
-
info!(
"Cloudlet initramfs generator: '{}' v{}",
env!("CARGO_PKG_NAME"),
diff --git a/src/vmm/Cargo.toml b/src/vmm/Cargo.toml
index 6f5aa30..c57316c 100644
--- a/src/vmm/Cargo.toml
+++ b/src/vmm/Cargo.toml
@@ -14,6 +14,8 @@ clap = { version = "4.5.1", features = ["derive", "env"] }
clap-verbosity-flag = "2.2.0"
epoll = "4.3.3"
event-manager = { version = "0.4.0", features = ["remote_endpoint"] }
+futures = "0.3.30"
+iptables = "0.5.1"
kvm-bindings = { version = "0.7.0", features = ["fam-wrappers"] }
kvm-ioctls = "0.16.0"
libc = "0.2.153"
@@ -22,7 +24,9 @@ log = "0.4.20"
nix = { version = "0.28.0", features = ["term"] }
openpty = "0.2.0"
prost = "0.11"
+rtnetlink = "0.14.1"
tokio = { version = "1.37.0", features = ["full"] }
+tokio-stream = "0.1.15"
tonic = "0.9"
tracing = "0.1.40"
tracing-subscriber = "0.3.18"
@@ -34,7 +38,6 @@ vm-device = "0.1.0"
vm-memory = { version = "0.14.1", features = ["backend-mmap"] }
vm-superio = "0.7.0"
vmm-sys-util = "0.12.1"
-tokio-stream = "0.1.15"
[build-dependencies]
tonic-build = "0.9"
diff --git a/src/vmm/src/core/devices/virtio/net/bridge.rs b/src/vmm/src/core/devices/virtio/net/bridge.rs
new file mode 100644
index 0000000..a0f1500
--- /dev/null
+++ b/src/vmm/src/core/devices/virtio/net/bridge.rs
@@ -0,0 +1,137 @@
+use std::net::{IpAddr, Ipv4Addr};
+
+use futures::stream::TryStreamExt;
+use rtnetlink::{new_connection, Error, Handle};
+
+use super::xx_netmask_width;
+
+#[derive(Clone)]
+pub struct Bridge {
+ name: String,
+ handle: Handle,
+}
+
+impl Bridge {
+ pub fn new(name: String) -> Self {
+ let (connection, handle, _) = new_connection().unwrap();
+ tokio::spawn(connection);
+
+ let br = Self { name, handle };
+ br.create_bridge_if_not_exist();
+
+ br
+ }
+
+ fn create_bridge_if_not_exist(&self) {
+ futures::executor::block_on(async {
+ let mut bridge_names = self
+ .handle
+ .link()
+ .get()
+ .match_name(self.name.clone())
+ .execute();
+
+ let _ = match bridge_names.try_next().await {
+ Ok(_) => Ok(()),
+ Err(_) => self
+ .handle
+ .link()
+ .add()
+ .bridge(self.name.clone())
+ .execute()
+ .await
+ .map_err(|_| Error::RequestFailed),
+ };
+ });
+ }
+
+ pub fn set_addr(&self, addr: Ipv4Addr, netmask: Ipv4Addr) {
+ futures::executor::block_on(async {
+ let mut bridge_names = self
+ .handle
+ .link()
+ .get()
+ .match_name(self.name.clone())
+ .execute();
+
+ let bridge_index = match bridge_names.try_next().await {
+ Ok(Some(link)) => link.header.index,
+ Ok(None) => panic!(),
+ Err(_) => panic!(),
+ };
+
+ let prefix_len = xx_netmask_width(netmask.octets());
+
+ let _ = self
+ .handle
+ .address()
+ .add(bridge_index, IpAddr::V4(addr), prefix_len)
+ .execute()
+ .await
+ .map_err(|_| Error::RequestFailed);
+ });
+ }
+
+ pub fn set_up(&self) {
+ futures::executor::block_on(async {
+ let mut bridge_names = self
+ .handle
+ .link()
+ .get()
+ .match_name(self.name.clone())
+ .execute();
+
+ let bridge_index = match bridge_names.try_next().await {
+ Ok(Some(link)) => link.header.index,
+ Ok(None) => panic!(),
+ Err(_) => panic!(),
+ };
+
+ let _ = self
+ .handle
+ .link()
+ .set(bridge_index)
+ .up()
+ .execute()
+ .await
+ .map_err(|_| Error::RequestFailed);
+ });
+ }
+
+ pub fn attach_link(&self, link_name: String) {
+ futures::executor::block_on(async {
+ let mut link_names = self
+ .handle
+ .link()
+ .get()
+ .match_name(link_name.clone())
+ .execute();
+ let mut master_names = self
+ .handle
+ .link()
+ .get()
+ .match_name(self.name.clone())
+ .execute();
+
+ let link_index = match link_names.try_next().await {
+ Ok(Some(link)) => link.header.index,
+ Ok(None) => panic!(),
+ Err(_) => panic!(),
+ };
+ let master_index = match master_names.try_next().await {
+ Ok(Some(link)) => link.header.index,
+ Ok(None) => panic!(),
+ Err(_) => panic!(),
+ };
+
+ let _ = self
+ .handle
+ .link()
+ .set(link_index)
+ .controller(master_index)
+ .execute()
+ .await
+ .map_err(|_| Error::RequestFailed);
+ });
+ }
+}
diff --git a/src/vmm/src/core/devices/virtio/net/device.rs b/src/vmm/src/core/devices/virtio/net/device.rs
index bf8acf8..5432cce 100644
--- a/src/vmm/src/core/devices/virtio/net/device.rs
+++ b/src/vmm/src/core/devices/virtio/net/device.rs
@@ -1,3 +1,5 @@
+use super::bridge::Bridge;
+use super::iptables::iptables_ip_masq;
use super::queue_handler::QueueHandler;
use super::{
simple_handler::SimpleHandler, tuntap::tap::Tap, Error, Result, NET_DEVICE_ID,
@@ -34,6 +36,7 @@ pub struct Net {
mem: Arc,
pub config: Config,
tap: Arc>,
+ _bridge: Bridge,
}
impl Net {
@@ -42,7 +45,7 @@ impl Net {
mem: Arc,
device_mgr: Arc>,
mmio_cfg: MmioConfig,
- tap_addr: Ipv4Addr,
+ iface_host_addr: Ipv4Addr,
netmask: Ipv4Addr,
iface_guest_addr: Ipv4Addr,
irq: u32,
@@ -74,25 +77,34 @@ impl Net {
// Set offload flags to match the relevant virtio features of the device (for now,
// statically set in the constructor.
- let tap = open_tap(None, Some(tap_addr), Some(netmask), &mut None, None, None)
- .map_err(Error::TunTap)?;
+ let tap = open_tap(None, None, None, &mut None, None, None).map_err(Error::TunTap)?;
// The layout of the header is specified in the standard and is 12 bytes in size. We
// should define this somewhere.
tap.set_vnet_hdr_size(VIRTIO_NET_HDR_SIZE as i32)
.map_err(Error::Tap)?;
+ let bridge_name = "br0".to_string();
+ let bridge = Bridge::new(bridge_name.clone());
+ bridge.set_addr(iface_host_addr, netmask);
+ bridge.attach_link(tap.get_name().map_err(Error::Tap)?);
+ bridge.set_up();
+
+ // Get internet access
+ iptables_ip_masq(iface_host_addr & netmask, netmask, bridge_name);
+
let net = Arc::new(Mutex::new(Net {
mem,
config: cfg,
- tap: Arc::new(Mutex::new(tap)),
+ tap: Arc::new(Mutex::new(tap.clone())),
+ _bridge: bridge,
}));
let vmmio_param = register_mmio_device(mmio_cfg, device_mgr, irq, None, net.clone())
.map_err(Error::Virtio)?;
let ip_pnp_param: String = format!(
- "ip={}::{}:{}::eth0:off",
- iface_guest_addr, tap_addr, netmask
+ "ip={}::{}:{}::eth0:off:1.1.1.1",
+ iface_guest_addr, iface_host_addr, netmask
);
cmdline_extra_parameters.push(vmmio_param);
diff --git a/src/vmm/src/core/devices/virtio/net/iptables.rs b/src/vmm/src/core/devices/virtio/net/iptables.rs
new file mode 100644
index 0000000..2c61579
--- /dev/null
+++ b/src/vmm/src/core/devices/virtio/net/iptables.rs
@@ -0,0 +1,16 @@
+use std::net::Ipv4Addr;
+
+use super::xx_netmask_width;
+
+pub fn iptables_ip_masq(network: Ipv4Addr, netmask: Ipv4Addr, link_name: String) {
+ let prefix_len = xx_netmask_width(netmask.octets());
+ let source = format!("{}/{}", network, prefix_len);
+
+ let ipt = iptables::new(false).unwrap();
+ let rule = format!("-s {} ! -o {} -j MASQUERADE", source, link_name);
+
+ let exists = ipt.exists("nat", "POSTROUTING", rule.as_str()).unwrap();
+ if !exists {
+ let _ = ipt.insert_unique("nat", "POSTROUTING", rule.as_str(), 1);
+ }
+}
diff --git a/src/vmm/src/core/devices/virtio/net/mod.rs b/src/vmm/src/core/devices/virtio/net/mod.rs
index 765986e..1277932 100644
--- a/src/vmm/src/core/devices/virtio/net/mod.rs
+++ b/src/vmm/src/core/devices/virtio/net/mod.rs
@@ -1,4 +1,6 @@
+mod bridge;
pub mod device;
+pub mod iptables;
mod queue_handler;
mod simple_handler;
pub mod tuntap;
@@ -20,3 +22,7 @@ pub enum Error {
}
pub type Result = std::result::Result;
+
+pub fn xx_netmask_width(netmask: [u8; SZ]) -> u8 {
+ netmask.iter().map(|x| x.count_ones() as u8).sum()
+}
diff --git a/src/vmm/src/core/devices/virtio/net/tuntap/tap.rs b/src/vmm/src/core/devices/virtio/net/tuntap/tap.rs
index 0eb1ec3..1dbabb9 100644
--- a/src/vmm/src/core/devices/virtio/net/tuntap/tap.rs
+++ b/src/vmm/src/core/devices/virtio/net/tuntap/tap.rs
@@ -200,6 +200,12 @@ impl Tap {
unsafe { Self::ioctl_with_ref(&sock, net_gen::sockios::SIOCSIFHWADDR as c_ulong, &ifreq) }
}
+ /// Get tap name
+ pub fn get_name(&self) -> Result {
+ let name = String::from_utf8(self.if_name.clone()).map_err(|_| Error::InvalidIfname)?;
+ Ok(name)
+ }
+
/// Get mac addr for tap interface.
pub fn get_mac_addr(&self) -> Result {
let sock = create_unix_socket().map_err(Error::NetUtil)?;
diff --git a/src/vmm/src/core/vmm.rs b/src/vmm/src/core/vmm.rs
index c111a4c..d3e6ca5 100644
--- a/src/vmm/src/core/vmm.rs
+++ b/src/vmm/src/core/vmm.rs
@@ -13,7 +13,7 @@ use std::io::{self, stdout, Stdout};
use std::net::Ipv4Addr;
use std::os::unix::io::AsRawFd;
use std::os::unix::prelude::RawFd;
-use std::path::{Path, PathBuf};
+use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use std::thread;
use tracing::info;
@@ -58,7 +58,7 @@ pub struct VMM {
event_mgr: EventMgr,
vcpus: Vec,
- tap_addr: Ipv4Addr,
+ iface_host_addr: Ipv4Addr,
netmask: Ipv4Addr,
iface_guest_addr: Ipv4Addr,
net_devices: Vec>>,
@@ -69,7 +69,11 @@ pub struct VMM {
impl VMM {
/// Create a new VMM.
- pub fn new(tap_addr: Ipv4Addr, netmask: Ipv4Addr, iface_guest_addr: Ipv4Addr) -> Result {
+ pub fn new(
+ iface_host_addr: Ipv4Addr,
+ netmask: Ipv4Addr,
+ iface_guest_addr: Ipv4Addr,
+ ) -> Result {
// Open /dev/kvm and get a file descriptor to it.
let kvm = Kvm::new().map_err(Error::KvmIoctl)?;
@@ -111,7 +115,7 @@ impl VMM {
)),
slip_pty: Arc::new(Mutex::new(slip_pty)),
epoll,
- tap_addr,
+ iface_host_addr,
netmask,
iface_guest_addr,
net_devices: Vec::new(),
@@ -335,7 +339,7 @@ impl VMM {
&mut self,
num_vcpus: u8,
mem_size_mb: u32,
- kernel_path: &Path,
+ kernel_path: PathBuf,
initramfs_path: &Option,
) -> Result<()> {
let cmdline_extra_parameters = &mut Vec::new();
@@ -346,7 +350,7 @@ impl VMM {
let kernel_load = kernel::kernel_setup(
&self.guest_memory,
- kernel_path.to_path_buf(),
+ kernel_path,
initramfs_path.clone(),
cmdline_extra_parameters,
)?;
@@ -382,7 +386,7 @@ impl VMM {
mem,
self.device_mgr.clone(),
mmio_cfg,
- self.tap_addr,
+ self.iface_host_addr,
self.netmask,
self.iface_guest_addr,
irq,
diff --git a/src/vmm/src/grpc/server.rs b/src/vmm/src/grpc/server.rs
index e5c6919..bba8cae 100644
--- a/src/vmm/src/grpc/server.rs
+++ b/src/vmm/src/grpc/server.rs
@@ -4,6 +4,7 @@ use self::vmmorchestrator::{
use crate::grpc::client::agent::ExecuteRequest;
use crate::VmmErrors;
use crate::{core::vmm::VMM, grpc::client::WorkloadClient};
+use std::ffi::OsStr;
use std::time::Duration;
use std::{
convert::From,
@@ -34,6 +35,9 @@ impl From for Status {
VmmErrors::VmmNew(_) => Status::internal("Error creating VMM"),
VmmErrors::VmmConfigure(_) => Status::internal("Error configuring VMM"),
VmmErrors::VmmRun(_) => Status::internal("Error running VMM"),
+ VmmErrors::VmmBuildEnvironment(_) => {
+ Status::internal("Error while compiling the necessary files for the VMM")
+ }
}
}
}
@@ -41,6 +45,116 @@ impl From for Status {
#[derive(Default)]
pub struct VmmService;
+impl VmmService {
+ pub fn get_initramfs(
+ &self,
+ language: &str,
+ curr_dir: &OsStr,
+ ) -> std::result::Result {
+ // define initramfs file placement
+ let mut initramfs_entire_file_path = curr_dir.to_os_string();
+ initramfs_entire_file_path.push(&format!("/tools/rootfs/{language}.img"));
+ // set image name
+ let image = format!("{language}:alpine");
+
+ // check if an initramfs already exists
+ let rootfs_exists = Path::new(&initramfs_entire_file_path)
+ .try_exists()
+ .map_err(VmmErrors::VmmBuildEnvironment)?;
+ if !rootfs_exists {
+ // build the agent
+ let agent_file_name = self.get_path(
+ curr_dir,
+ "/target/x86_64-unknown-linux-musl/release/agent",
+ "cargo",
+ vec![
+ "build",
+ "--release",
+ "--bin",
+ "agent",
+ "--target=x86_64-unknown-linux-musl",
+ ],
+ )?;
+ // build initramfs
+ info!("Building initramfs");
+ let _ = self
+ .run_command(
+ "sh",
+ vec![
+ "./tools/rootfs/mkrootfs.sh",
+ &image,
+ &agent_file_name.to_str().unwrap(),
+ &initramfs_entire_file_path.to_str().unwrap(),
+ ],
+ )
+ .map_err(VmmErrors::VmmBuildEnvironment);
+ }
+ Ok(PathBuf::from(&initramfs_entire_file_path))
+ }
+
+ pub fn run_command(
+ &self,
+ command_type: &str,
+ args: Vec<&str>,
+ ) -> std::result::Result<(), std::io::Error> {
+ // Execute the script using sh and capture output and error streams
+ Command::new(command_type)
+ .args(args)
+ .stdout(Stdio::inherit())
+ .stderr(Stdio::inherit())
+ .output()
+ .expect("Failed to execute the script");
+ Ok(())
+ }
+
+ pub fn get_path(
+ &self,
+ curr_dir: &OsStr,
+ end_path: &str,
+ command_type: &str,
+ args: Vec<&str>,
+ ) -> std::result::Result {
+ // define file path
+ let mut entire_path = curr_dir.to_os_string();
+ entire_path.push(&end_path);
+
+ // Check if the file is on the system, else build it
+ let exists = Path::new(&entire_path)
+ .try_exists()
+ .map_err(VmmErrors::VmmBuildEnvironment)?;
+
+ if !exists {
+ info!(
+ "File {:?} not found, building it",
+ &entire_path
+ );
+ let _ = self
+ .run_command(command_type, args)
+ .map_err(VmmErrors::VmmBuildEnvironment);
+ info!(
+ "File {:?} successfully build",
+ &entire_path
+ );
+ };
+ Ok(PathBuf::from(&entire_path))
+ }
+
+ pub fn get_agent_request(
+ &self,
+ vmm_request: RunVmmRequest,
+ language: String,
+ ) -> ExecuteRequest {
+ // Send the grpc request to start the agent
+ ExecuteRequest {
+ workload_name: vmm_request.workload_name,
+ language,
+ action: 2, // Prepare and run
+ code: vmm_request.code,
+ config_str: "[build]\nrelease = true".to_string(),
+ }
+ }
+}
+
#[tonic::async_trait]
impl VmmServiceTrait for VmmService {
type RunStream =
@@ -54,108 +168,26 @@ impl VmmServiceTrait for VmmService {
const GUEST_IP: Ipv4Addr = Ipv4Addr::new(172, 29, 0, 2);
// get current directory
- let mut curr_dir =
- current_dir().expect("Need to be able to access current directory path.");
-
- // define kernel path
- let mut kernel_entire_path = curr_dir.as_os_str().to_owned();
- kernel_entire_path
- .push("/tools/kernel/linux-cloud-hypervisor/arch/x86/boot/compressed/vmlinux.bin");
-
- // Check if the kernel is on the system, else build it
- let kernel_exists = Path::new(&kernel_entire_path)
- .try_exists()
- .unwrap_or_else(|_| panic!("Could not access folder {:?}", &kernel_entire_path));
-
- if !kernel_exists {
- info!("Kernel not found, building kernel");
- // Execute the script using sh and capture output and error streams
- let output = Command::new("sh")
- .arg("./tools/kernel/mkkernel.sh")
- .stdout(Stdio::piped())
- .stderr(Stdio::piped())
- .output()
- .expect("Failed to execute the kernel build script");
-
- // Print output and error streams
- info!("Script output: {}", String::from_utf8_lossy(&output.stdout));
- error!("Script errors: {}", String::from_utf8_lossy(&output.stderr));
- };
- let kernel_path = Path::new(&kernel_entire_path);
-
- // define initramfs file placement
- let mut initramfs_entire_file_path = curr_dir.as_os_str().to_owned();
- initramfs_entire_file_path.push("/tools/rootfs/");
+ let curr_dir = current_dir()
+ .map_err(VmmErrors::VmmBuildEnvironment)?
+ .into_os_string();
+
+ // build kernel if necessary
+ let kernel_path: PathBuf = self.get_path(
+ &curr_dir,
+ "/tools/kernel/linux-cloud-hypervisor/arch/x86/boot/compressed/vmlinux.bin",
+ "sh",
+ vec!["./tools/kernel/mkkernel.sh"],
+ )?;
// get request with the language
let vmm_request = request.into_inner();
- let language: Language =
- Language::from_i32(vmm_request.language).expect("Unknown language");
-
- let image = match language {
- Language::Rust => {
- initramfs_entire_file_path.push("rust.img");
- "rust:alpine"
- }
- Language::Python => {
- initramfs_entire_file_path.push("python.img");
- "python:alpine"
- }
- Language::Node => {
- initramfs_entire_file_path.push("node.img");
- "node:alpine"
- }
- };
-
- let rootfs_exists = Path::new(&initramfs_entire_file_path)
- .try_exists()
- .unwrap_or_else(|_| {
- panic!("Could not access folder {:?}", &initramfs_entire_file_path)
- });
- if !rootfs_exists {
- // check if agent binary exists
- let agent_file_name = curr_dir.as_mut_os_string();
- agent_file_name.push("/target/x86_64-unknown-linux-musl/release/agent");
-
- // if agent hasn't been build, build it
- let agent_exists = Path::new(&agent_file_name)
- .try_exists()
- .unwrap_or_else(|_| panic!("Could not access folder {:?}", &agent_file_name));
- if !agent_exists {
- //build agent
- info!("Building agent binary");
- // Execute the script using sh and capture output and error streams
- let output = Command::new("just")
- .arg("build-musl-agent")
- .stdout(Stdio::piped())
- .stderr(Stdio::piped())
- .output()
- .expect("Failed to execute the just build script for the agent");
-
- // Print output and error streams
- info!("Script output: {}", String::from_utf8_lossy(&output.stdout));
- error!("Script errors: {}", String::from_utf8_lossy(&output.stderr));
- info!("Agent binary successfully built.")
- }
+ let language: String = Language::from_i32(vmm_request.language)
+ .expect("Unknown language")
+ .as_str_name()
+ .to_lowercase();
- info!("Building initramfs");
- // Execute the script using sh and capture output and error streams
- let output = Command::new("sh")
- .arg("./tools/rootfs/mkrootfs.sh")
- .arg(image)
- .arg(&agent_file_name)
- .arg(&initramfs_entire_file_path)
- .stdout(Stdio::piped())
- .stderr(Stdio::piped())
- .output()
- .expect("Failed to execute the initramfs build script");
-
- // Print output and error streams
- info!("Script output: {}", String::from_utf8_lossy(&output.stdout));
- error!("Script errors: {}", String::from_utf8_lossy(&output.stderr));
- info!("Initramfs successfully built.")
- }
- let initramfs_path = PathBuf::from(&initramfs_entire_file_path);
+ let initramfs_path = self.get_initramfs(&language, curr_dir.as_os_str())?;
let mut vmm = VMM::new(HOST_IP, HOST_NETMASK, GUEST_IP).map_err(VmmErrors::VmmNew)?;
@@ -171,29 +203,18 @@ impl VmmServiceTrait for VmmService {
}
});
+ // run the grpc client
let grpc_client = tokio::spawn(async move {
// Wait 2 seconds
tokio::time::sleep(Duration::from_secs(2)).await;
- println!("Connecting to Agent service");
+ info!("Connecting to Agent service");
WorkloadClient::new(GUEST_IP, 50051).await
})
.await
.unwrap();
- // Send the grpc request to start the agent
- let agent_request = ExecuteRequest {
- workload_name: vmm_request.workload_name,
- language: match vmm_request.language {
- 0 => "rust".to_string(),
- 1 => "python".to_string(),
- 2 => "node".to_string(),
- _ => unreachable!("Invalid language"),
- },
- action: 2, // Prepare and run
- code: vmm_request.code,
- config_str: "[build]\nrelease = true".to_string(),
- };
+ let agent_request = self.get_agent_request(vmm_request, language);
match grpc_client {
Ok(mut client) => {
diff --git a/src/vmm/src/lib.rs b/src/vmm/src/lib.rs
index c4b00ad..ba51a25 100644
--- a/src/vmm/src/lib.rs
+++ b/src/vmm/src/lib.rs
@@ -9,4 +9,5 @@ pub enum VmmErrors {
VmmNew(core::Error),
VmmConfigure(core::Error),
VmmRun(core::Error),
+ VmmBuildEnvironment(std::io::Error),
}
diff --git a/src/vmm/src/main.rs b/src/vmm/src/main.rs
index 7f6a748..36f8b9f 100644
--- a/src/vmm/src/main.rs
+++ b/src/vmm/src/main.rs
@@ -51,7 +51,7 @@ async fn main() -> Result<(), Box> {
vmm.configure(
cli_args.cpus,
cli_args.memory,
- &cli_args.kernel,
+ cli_args.kernel,
&cli_args.initramfs,
)
.map_err(VmmErrors::VmmConfigure)
diff --git a/tools/rootfs/mkrootfs.sh b/tools/rootfs/mkrootfs.sh
index f26a70a..42eb82e 100755
--- a/tools/rootfs/mkrootfs.sh
+++ b/tools/rootfs/mkrootfs.sh
@@ -7,7 +7,7 @@ then
fi
# augment the open file limit
-ulimit -Sn 8192
+ulimit -Sn $(ulimit -Hn)
if [ -d fs-gen ]
then