diff --git a/Cargo.lock b/Cargo.lock index 73fb8ea..1da1393 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,6 +35,12 @@ version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9d4ee0d472d1cd2e28c97dfa124b3d8d992e10eb0a035f33f5d12e3a177ba3b" +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -243,6 +249,20 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-targets 0.52.3", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1224,6 +1244,15 @@ dependencies = [ "tempfile", ] +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + [[package]] name = "num_cpus" version = "1.16.0" @@ -1552,6 +1581,16 @@ dependencies = [ "spin-sdk", ] +[[package]] +name = "rust-outbound-pg-v3" +version = "0.1.0" +dependencies = [ + "anyhow", + "chrono", + "http 1.0.0", + "spin-sdk", +] + [[package]] name = "rust-outbound-redis" version = "0.1.0" @@ -1855,6 +1894,7 @@ dependencies = [ "anyhow", "async-trait", "bytes", + "chrono", "form_urlencoded", "futures", "http 1.0.0", diff --git a/Cargo.toml b/Cargo.toml index d84aa71..1e39450 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ name = "spin_sdk" [dependencies] anyhow = "1" async-trait = "0.1.74" +chrono = "0.4.38" form_urlencoded = "1.0" spin-executor = { version = "3.0.1", path = "crates/executor" } spin-macro = { version = "3.0.1", path = "crates/macro" } @@ -52,6 +53,7 @@ members = [ "examples/key-value", "examples/mysql", "examples/postgres", + "examples/postgres-v3", "examples/redis-outbound", "examples/mqtt-outbound", "examples/variables", diff --git a/examples/postgres-v3/.cargo/config.toml b/examples/postgres-v3/.cargo/config.toml new file mode 100644 index 0000000..6b77899 --- /dev/null +++ b/examples/postgres-v3/.cargo/config.toml @@ -0,0 +1,2 @@ +[build] +target = "wasm32-wasi" diff --git a/examples/postgres-v3/Cargo.toml b/examples/postgres-v3/Cargo.toml new file mode 100644 index 0000000..03f3145 --- /dev/null +++ b/examples/postgres-v3/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "rust-outbound-pg-v3" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +# Useful crate to handle errors. +anyhow = "1" +# General-purpose crate with common HTTP types. +http = "1.0.0" +# The Spin SDK. +spin-sdk = { path = "../.." } +# For handling date/time types +chrono = "0.4.38" diff --git a/examples/postgres-v3/README.md b/examples/postgres-v3/README.md new file mode 100644 index 0000000..ac96316 --- /dev/null +++ b/examples/postgres-v3/README.md @@ -0,0 +1,69 @@ +# Spin Outbound PostgreSQL example + +This example shows how to access a PostgreSQL database from Spin component. + +## Spin up + +From example root: + +``` +createdb spin_dev +psql -d spin_dev -f db/testdata.sql +RUST_LOG=spin=trace spin build --up +``` + +Curl the read route: + +``` +$ curl -i localhost:3000/read +HTTP/1.1 200 OK +transfer-encoding: chunked +date: Wed, 06 Nov 2024 20:17:03 GMT + +Found 2 article(s) as follows: +article: Article { + id: 1, + title: "My Life as a Goat", + content: "I went to Nepal to live as a goat, and it was much better than being a butler.", + authorname: "E. Blackadder", + published: Date( + 2024-11-05, + ), + coauthor: None, +} +article: Article { + id: 2, + title: "Magnificent Octopus", + content: "Once upon a time there was a lovely little sausage.", + authorname: "S. Baldrick", + published: Date( + 2024-11-06, + ), + coauthor: None, +} + +(Column info: id:DbDataType::Int32, title:DbDataType::Str, content:DbDataType::Str, authorname:DbDataType::Str, published:DbDataType::Date, coauthor:DbDataType::Str) +``` + +Curl the write route: + +``` +$ curl -i localhost:3000/write +HTTP/1.1 200 OK +content-length: 9 +date: Sun, 25 Sep 2022 15:46:22 GMT + +Count: 3 +``` + +Curl the write_datetime_info route to experiment with date time types: +``` +$ curl -i localhost:3000/write_datetime_info +HTTP/1.1 200 OK +content-length: 9 +date: Sun, 25 Sep 2022 15:46:22 GMT + +Count: 4 +``` + +Read endpoint should now also show a row with publisheddate, publishedtime, publisheddatetime and readtime values. diff --git a/examples/postgres-v3/db/testdata.sql b/examples/postgres-v3/db/testdata.sql new file mode 100644 index 0000000..d9d30ee --- /dev/null +++ b/examples/postgres-v3/db/testdata.sql @@ -0,0 +1,25 @@ +CREATE TABLE articletest ( + id integer GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, + title varchar(40) NOT NULL, + content text NOT NULL, + authorname varchar(40) NOT NULL , + publisheddate date NOT NULL, + publishedtime time, + publisheddatetime timestamp, + readtime bigint, + coauthor text +); + +INSERT INTO articletest (title, content, authorname, publisheddate) VALUES +( + 'My Life as a Goat', + 'I went to Nepal to live as a goat, and it was much better than being a butler.', + 'E. Blackadder', + '2024-11-05' +), +( + 'Magnificent Octopus', + 'Once upon a time there was a lovely little sausage.', + 'S. Baldrick', + '2024-11-06' +); diff --git a/examples/postgres-v3/spin.toml b/examples/postgres-v3/spin.toml new file mode 100644 index 0000000..04ca806 --- /dev/null +++ b/examples/postgres-v3/spin.toml @@ -0,0 +1,17 @@ +spin_manifest_version = 2 + +[application] +authors = ["Fermyon Engineering "] +name = "rust-outbound-pg-v3-example" +version = "0.1.0" + +[[trigger.http]] +route = "/..." +component = "outbound-pg" + +[component.outbound-pg] +environment = { DB_URL = "host=localhost user=postgres dbname=spin_dev" } +source = "../../target/wasm32-wasi/release/rust_outbound_pg_v3.wasm" +allowed_outbound_hosts = ["postgres://localhost"] +[component.outbound-pg.build] +command = "cargo build --target wasm32-wasi --release" diff --git a/examples/postgres-v3/src/lib.rs b/examples/postgres-v3/src/lib.rs new file mode 100644 index 0000000..ae8c6f4 --- /dev/null +++ b/examples/postgres-v3/src/lib.rs @@ -0,0 +1,164 @@ +#![allow(dead_code)] +use anyhow::Result; +use http::{Request, Response}; +use spin_sdk::{http_component, pg3, pg3::Decode}; + +// The environment variable set in `spin.toml` that points to the +// address of the Pg server that the component will write to +const DB_URL_ENV: &str = "DB_URL"; + +#[derive(Debug, Clone)] +struct Article { + id: i32, + title: String, + content: String, + authorname: String, + published_date: chrono::NaiveDate, + published_time: Option, + published_datetime: Option, + read_time: Option, + coauthor: Option, +} + +impl TryFrom<&pg3::Row> for Article { + type Error = anyhow::Error; + + fn try_from(row: &pg3::Row) -> Result { + let id = i32::decode(&row[0])?; + let title = String::decode(&row[1])?; + let content = String::decode(&row[2])?; + let authorname = String::decode(&row[3])?; + let published_date = chrono::NaiveDate::decode(&row[4])?; + let published_time = Option::::decode(&row[5])?; + let published_datetime = Option::::decode(&row[6])?; + let read_time = Option::::decode(&row[7])?; + let coauthor = Option::::decode(&row[8])?; + + Ok(Self { + id, + title, + content, + authorname, + published_date, + published_time, + published_datetime, + read_time, + coauthor, + }) + } +} + +#[http_component] +fn process(req: Request<()>) -> Result> { + match req.uri().path() { + "/read" => read(req), + "/write" => write(req), + "/write_datetime_info" => write_datetime_info(req), + "/pg_backend_pid" => pg_backend_pid(req), + _ => Ok(http::Response::builder() + .status(404) + .body("Not found".into())?), + } +} + +fn read(_req: Request<()>) -> Result> { + let address = std::env::var(DB_URL_ENV)?; + let conn = pg3::Connection::open(&address)?; + + let sql = "SELECT id, title, content, authorname, publisheddate, publishedtime, publisheddatetime, readtime, coauthor FROM articletest"; + let rowset = conn.query(sql, &[])?; + + let column_summary = rowset + .columns + .iter() + .map(format_col) + .collect::>() + .join(", "); + + let mut response_lines = vec![]; + + for row in rowset.rows { + let article = Article::try_from(&row)?; + + println!("article: {:#?}", article); + response_lines.push(format!("article: {:#?}", article)); + } + + // use it in business logic + + let response = format!( + "Found {} article(s) as follows:\n{}\n\n(Column info: {})\n", + response_lines.len(), + response_lines.join("\n"), + column_summary, + ); + + Ok(http::Response::builder().status(200).body(response)?) +} + +fn write_datetime_info(_req: Request<()>) -> Result> { + let address = std::env::var(DB_URL_ENV)?; + let conn = pg3::Connection::open(&address)?; + + let date: chrono::NaiveDate = chrono::NaiveDate::from_ymd_opt(2024, 1, 1).unwrap(); + let time: chrono::NaiveTime = chrono::NaiveTime::from_hms_nano_opt(12, 34, 56, 1).unwrap(); + let datetime: chrono::NaiveDateTime = chrono::NaiveDateTime::new(date, time); + let readtime = 123i64; + + let nrow_executed = conn.execute( + "INSERT INTO articletest(title, content, authorname, publisheddate, publishedtime, publisheddatetime, readtime) VALUES ($1, $2, $3, $4, $5, $6, $7)", + &[ "aaa".to_string().into(), "bbb".to_string().into(), "ccc".to_string().into(), date.into(), time.into(), datetime.into(), readtime.into() ], + ); + + println!("nrow_executed: {:?}", nrow_executed); + + let sql = "SELECT COUNT(id) FROM articletest"; + let rowset = conn.query(sql, &[])?; + let row = &rowset.rows[0]; + let count = i64::decode(&row[0])?; + let response = format!("Count: {}\n", count); + + Ok(http::Response::builder().status(200).body(response)?) +} + +fn write(_req: Request<()>) -> Result> { + let address = std::env::var(DB_URL_ENV)?; + let conn = pg3::Connection::open(&address)?; + + let sql = + "INSERT INTO articletest (title, content, authorname, published) VALUES ('aaa', 'bbb', 'ccc', '2024-01-01')"; + let nrow_executed = conn.execute(sql, &[])?; + + println!("nrow_executed: {}", nrow_executed); + + let sql = "SELECT COUNT(id) FROM articletest"; + let rowset = conn.query(sql, &[])?; + let row = &rowset.rows[0]; + let count = i64::decode(&row[0])?; + let response = format!("Count: {}\n", count); + + Ok(http::Response::builder().status(200).body(response)?) +} + +fn pg_backend_pid(_req: Request<()>) -> Result> { + let address = std::env::var(DB_URL_ENV)?; + let conn = pg3::Connection::open(&address)?; + let sql = "SELECT pg_backend_pid()"; + + let get_pid = || { + let rowset = conn.query(sql, &[])?; + let row = &rowset.rows[0]; + + i32::decode(&row[0]) + }; + + assert_eq!(get_pid()?, get_pid()?); + + let response = format!("pg_backend_pid: {}\n", get_pid()?); + + Ok(http::Response::builder().status(200).body(response)?) +} + +fn format_col(column: &pg3::Column) -> String { + format!("{}:{:?}", column.name, column.data_type) +} diff --git a/src/lib.rs b/src/lib.rs index 99bd617..53f6b6f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,6 +34,7 @@ pub mod wit { } }); pub use fermyon::spin2_0_0 as v2; + pub use spin::postgres::postgres as pg3; } /// Needed by the export macro @@ -101,6 +102,9 @@ pub mod redis { /// Implementation of the spin postgres db interface. pub mod pg; +/// Implementation of the spin postgres v3 db interface. +pub mod pg3; + /// Implementation of the Spin MySQL database interface. pub mod mysql; diff --git a/src/pg3.rs b/src/pg3.rs new file mode 100644 index 0000000..3787a08 --- /dev/null +++ b/src/pg3.rs @@ -0,0 +1,419 @@ +//! Conversions between Rust, WIT and **Postgres** types. +//! +//! # Types +//! +//! | Rust type | WIT (db-value) | Postgres type(s) | +//! |-------------------------|-----------------------------------------------|----------------------------- | +//! | `bool` | boolean(bool) | BOOL | +//! | `i16` | int16(s16) | SMALLINT, SMALLSERIAL, INT2 | +//! | `i32` | int32(s32) | INT, SERIAL, INT4 | +//! | `i64` | int64(s64) | BIGINT, BIGSERIAL, INT8 | +//! | `f32` | floating32(float32) | REAL, FLOAT4 | +//! | `f64` | floating64(float64) | DOUBLE PRECISION, FLOAT8 | +//! | `String` | str(string) | VARCHAR, CHAR(N), TEXT | +//! | `Vec` | binary(list\) | BYTEA | +//! | `chrono::NaiveDate` | date(tuple) | DATE | +//! | `chrono::NaiveTime` | time(tuple) | TIME | +//! | `chrono::NaiveDateTime` | datetime(tuple) | TIMESTAMP | +//! | `chrono::Duration` | timestamp(s64) | BIGINT | + +#[doc(inline)] +pub use super::wit::pg3::{Error as PgError, *}; + +use chrono::{Datelike, Timelike}; + +/// A pg error +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Failed to deserialize [`DbValue`] + #[error("error value decoding: {0}")] + Decode(String), + /// Pg query failed with an error + #[error(transparent)] + PgError(#[from] PgError), +} + +/// A type that can be decoded from the database. +pub trait Decode: Sized { + /// Decode a new value of this type using a [`DbValue`]. + fn decode(value: &DbValue) -> Result; +} + +impl Decode for Option +where + T: Decode, +{ + fn decode(value: &DbValue) -> Result { + match value { + DbValue::DbNull => Ok(None), + v => Ok(Some(T::decode(v)?)), + } + } +} + +impl Decode for bool { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Boolean(boolean) => Ok(*boolean), + _ => Err(Error::Decode(format_decode_err("BOOL", value))), + } + } +} + +impl Decode for i16 { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Int16(n) => Ok(*n), + _ => Err(Error::Decode(format_decode_err("SMALLINT", value))), + } + } +} + +impl Decode for i32 { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Int32(n) => Ok(*n), + _ => Err(Error::Decode(format_decode_err("INT", value))), + } + } +} + +impl Decode for i64 { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Int64(n) => Ok(*n), + _ => Err(Error::Decode(format_decode_err("BIGINT", value))), + } + } +} + +impl Decode for f32 { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Floating32(n) => Ok(*n), + _ => Err(Error::Decode(format_decode_err("REAL", value))), + } + } +} + +impl Decode for f64 { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Floating64(n) => Ok(*n), + _ => Err(Error::Decode(format_decode_err("DOUBLE PRECISION", value))), + } + } +} + +impl Decode for Vec { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Binary(n) => Ok(n.to_owned()), + _ => Err(Error::Decode(format_decode_err("BYTEA", value))), + } + } +} + +impl Decode for String { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Str(s) => Ok(s.to_owned()), + _ => Err(Error::Decode(format_decode_err( + "CHAR, VARCHAR, TEXT", + value, + ))), + } + } +} + +impl Decode for chrono::NaiveDate { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Date((year, month, day)) => { + let naive_date = + chrono::NaiveDate::from_ymd_opt(*year, (*month).into(), (*day).into()) + .ok_or_else(|| { + Error::Decode(format!( + "invalid date y={}, m={}, d={}", + year, month, day + )) + })?; + Ok(naive_date) + } + _ => Err(Error::Decode(format_decode_err("DATE", value))), + } + } +} + +impl Decode for chrono::NaiveTime { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Time((hour, minute, second, nanosecond)) => { + let naive_time = chrono::NaiveTime::from_hms_nano_opt( + (*hour).into(), + (*minute).into(), + (*second).into(), + *nanosecond, + ) + .ok_or_else(|| { + Error::Decode(format!( + "invalid time {}:{}:{}:{}", + hour, minute, second, nanosecond + )) + })?; + Ok(naive_time) + } + _ => Err(Error::Decode(format_decode_err("TIME", value))), + } + } +} + +impl Decode for chrono::NaiveDateTime { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Datetime((year, month, day, hour, minute, second, nanosecond)) => { + let naive_date = + chrono::NaiveDate::from_ymd_opt(*year, (*month).into(), (*day).into()) + .ok_or_else(|| { + Error::Decode(format!( + "invalid date y={}, m={}, d={}", + year, month, day + )) + })?; + let naive_time = chrono::NaiveTime::from_hms_nano_opt( + (*hour).into(), + (*minute).into(), + (*second).into(), + *nanosecond, + ) + .ok_or_else(|| { + Error::Decode(format!( + "invalid time {}:{}:{}:{}", + hour, minute, second, nanosecond + )) + })?; + let dt = chrono::NaiveDateTime::new(naive_date, naive_time); + Ok(dt) + } + _ => Err(Error::Decode(format_decode_err("DATETIME", value))), + } + } +} + +impl Decode for chrono::Duration { + fn decode(value: &DbValue) -> Result { + match value { + DbValue::Timestamp(n) => Ok(chrono::Duration::seconds(*n)), + _ => Err(Error::Decode(format_decode_err("BIGINT", value))), + } + } +} + +macro_rules! impl_parameter_value_conversions { + ($($ty:ty => $id:ident),*) => { + $( + impl From<$ty> for ParameterValue { + fn from(v: $ty) -> ParameterValue { + ParameterValue::$id(v) + } + } + )* + }; +} + +impl_parameter_value_conversions! { + i8 => Int8, + i16 => Int16, + i32 => Int32, + i64 => Int64, + f32 => Floating32, + f64 => Floating64, + bool => Boolean, + String => Str, + Vec => Binary +} + +impl From for ParameterValue { + fn from(v: chrono::NaiveDateTime) -> ParameterValue { + ParameterValue::Datetime(( + v.year(), + v.month() as u8, + v.day() as u8, + v.hour() as u8, + v.minute() as u8, + v.second() as u8, + v.nanosecond(), + )) + } +} + +impl From for ParameterValue { + fn from(v: chrono::NaiveTime) -> ParameterValue { + ParameterValue::Time(( + v.hour() as u8, + v.minute() as u8, + v.second() as u8, + v.nanosecond(), + )) + } +} + +impl From for ParameterValue { + fn from(v: chrono::NaiveDate) -> ParameterValue { + ParameterValue::Date((v.year(), v.month() as u8, v.day() as u8)) + } +} + +impl From for ParameterValue { + fn from(v: chrono::TimeDelta) -> ParameterValue { + ParameterValue::Timestamp(v.num_seconds()) + } +} + +impl> From> for ParameterValue { + fn from(o: Option) -> ParameterValue { + match o { + Some(v) => v.into(), + None => ParameterValue::DbNull, + } + } +} + +fn format_decode_err(types: &str, value: &DbValue) -> String { + format!("Expected {} from the DB but got {:?}", types, value) +} + +#[cfg(test)] +mod tests { + use chrono::NaiveDateTime; + + use super::*; + + #[test] + fn boolean() { + assert!(bool::decode(&DbValue::Boolean(true)).unwrap()); + assert!(bool::decode(&DbValue::Int32(0)).is_err()); + assert!(Option::::decode(&DbValue::DbNull).unwrap().is_none()); + } + + #[test] + fn int16() { + assert_eq!(i16::decode(&DbValue::Int16(0)).unwrap(), 0); + assert!(i16::decode(&DbValue::Int32(0)).is_err()); + assert!(Option::::decode(&DbValue::DbNull).unwrap().is_none()); + } + + #[test] + fn int32() { + assert_eq!(i32::decode(&DbValue::Int32(0)).unwrap(), 0); + assert!(i32::decode(&DbValue::Boolean(false)).is_err()); + assert!(Option::::decode(&DbValue::DbNull).unwrap().is_none()); + } + + #[test] + fn int64() { + assert_eq!(i64::decode(&DbValue::Int64(0)).unwrap(), 0); + assert!(i64::decode(&DbValue::Boolean(false)).is_err()); + assert!(Option::::decode(&DbValue::DbNull).unwrap().is_none()); + } + + #[test] + fn floating32() { + assert!(f32::decode(&DbValue::Floating32(0.0)).is_ok()); + assert!(f32::decode(&DbValue::Boolean(false)).is_err()); + assert!(Option::::decode(&DbValue::DbNull).unwrap().is_none()); + } + + #[test] + fn floating64() { + assert!(f64::decode(&DbValue::Floating64(0.0)).is_ok()); + assert!(f64::decode(&DbValue::Boolean(false)).is_err()); + assert!(Option::::decode(&DbValue::DbNull).unwrap().is_none()); + } + + #[test] + fn str() { + assert_eq!( + String::decode(&DbValue::Str(String::from("foo"))).unwrap(), + String::from("foo") + ); + + assert!(String::decode(&DbValue::Int32(0)).is_err()); + assert!(Option::::decode(&DbValue::DbNull) + .unwrap() + .is_none()); + } + + #[test] + fn binary() { + assert!(Vec::::decode(&DbValue::Binary(vec![0, 0])).is_ok()); + assert!(Vec::::decode(&DbValue::Boolean(false)).is_err()); + assert!(Option::>::decode(&DbValue::DbNull) + .unwrap() + .is_none()); + } + + #[test] + fn date() { + assert_eq!( + chrono::NaiveDate::decode(&DbValue::Date((1, 2, 4))).unwrap(), + chrono::NaiveDate::from_ymd_opt(1, 2, 4).unwrap() + ); + assert_ne!( + chrono::NaiveDate::decode(&DbValue::Date((1, 2, 4))).unwrap(), + chrono::NaiveDate::from_ymd_opt(1, 2, 5).unwrap() + ); + assert!(Option::::decode(&DbValue::DbNull) + .unwrap() + .is_none()); + } + + #[test] + fn time() { + assert_eq!( + chrono::NaiveTime::decode(&DbValue::Time((1, 2, 3, 4))).unwrap(), + chrono::NaiveTime::from_hms_nano_opt(1, 2, 3, 4).unwrap() + ); + assert_ne!( + chrono::NaiveTime::decode(&DbValue::Time((1, 2, 3, 4))).unwrap(), + chrono::NaiveTime::from_hms_nano_opt(1, 2, 4, 5).unwrap() + ); + assert!(Option::::decode(&DbValue::DbNull) + .unwrap() + .is_none()); + } + + #[test] + fn datetime() { + let date = chrono::NaiveDate::from_ymd_opt(1, 2, 3).unwrap(); + let mut time = chrono::NaiveTime::from_hms_nano_opt(4, 5, 6, 7).unwrap(); + assert_eq!( + chrono::NaiveDateTime::decode(&DbValue::Datetime((1, 2, 3, 4, 5, 6, 7))).unwrap(), + chrono::NaiveDateTime::new(date, time) + ); + + time = chrono::NaiveTime::from_hms_nano_opt(4, 5, 6, 8).unwrap(); + assert_ne!( + NaiveDateTime::decode(&DbValue::Datetime((1, 2, 3, 4, 5, 6, 7))).unwrap(), + chrono::NaiveDateTime::new(date, time) + ); + assert!(Option::::decode(&DbValue::DbNull) + .unwrap() + .is_none()); + } + + #[test] + fn timestamp() { + assert_eq!( + chrono::Duration::decode(&DbValue::Timestamp(1)).unwrap(), + chrono::Duration::seconds(1), + ); + assert_ne!( + chrono::Duration::decode(&DbValue::Timestamp(2)).unwrap(), + chrono::Duration::seconds(1) + ); + assert!(Option::::decode(&DbValue::DbNull) + .unwrap() + .is_none()); + } +} diff --git a/wit/deps/keyvalue-2024-10-17/atomic.wit b/wit/deps/keyvalue-2024-10-17/atomic.wit new file mode 100644 index 0000000..2c3e0d0 --- /dev/null +++ b/wit/deps/keyvalue-2024-10-17/atomic.wit @@ -0,0 +1,46 @@ +/// A keyvalue interface that provides atomic operations. +/// +/// Atomic operations are single, indivisible operations. When a fault causes an atomic operation to +/// fail, it will appear to the invoker of the atomic operation that the action either completed +/// successfully or did nothing at all. +/// +/// Please note that this interface is bare functions that take a reference to a bucket. This is to +/// get around the current lack of a way to "extend" a resource with additional methods inside of +/// wit. Future version of the interface will instead extend these methods on the base `bucket` +/// resource. +interface atomics { + use store.{bucket, error}; + + /// The error returned by a CAS operation + variant cas-error { + /// A store error occurred when performing the operation + store-error(error), + /// The CAS operation failed because the value was too old. This returns a new CAS handle + /// for easy retries. Implementors MUST return a CAS handle that has been updated to the + /// latest version or transaction. + cas-failed(cas), + } + + /// A handle to a CAS (compare-and-swap) operation. + resource cas { + /// Construct a new CAS operation. Implementors can map the underlying functionality + /// (transactions, versions, etc) as desired. + new: static func(bucket: borrow, key: string) -> result; + /// Get the current value of the key (if it exists). This allows for avoiding reads if all + /// that is needed to ensure the atomicity of the operation + current: func() -> result>, error>; + } + + /// Atomically increment the value associated with the key in the store by the given delta. It + /// returns the new value. + /// + /// If the key does not exist in the store, it creates a new key-value pair with the value set + /// to the given delta. + /// + /// If any other error occurs, it returns an `Err(error)`. + increment: func(bucket: borrow, key: string, delta: s64) -> result; + + /// Perform the swap on a CAS operation. This consumes the CAS handle and returns an error if + /// the CAS operation failed. + swap: func(cas: cas, value: list) -> result<_, cas-error>; +} diff --git a/wit/deps/keyvalue-2024-10-17/batch.wit b/wit/deps/keyvalue-2024-10-17/batch.wit new file mode 100644 index 0000000..6d6e873 --- /dev/null +++ b/wit/deps/keyvalue-2024-10-17/batch.wit @@ -0,0 +1,63 @@ +/// A keyvalue interface that provides batch operations. +/// +/// A batch operation is an operation that operates on multiple keys at once. +/// +/// Batch operations are useful for reducing network round-trip time. For example, if you want to +/// get the values associated with 100 keys, you can either do 100 get operations or you can do 1 +/// batch get operation. The batch operation is faster because it only needs to make 1 network call +/// instead of 100. +/// +/// A batch operation does not guarantee atomicity, meaning that if the batch operation fails, some +/// of the keys may have been modified and some may not. +/// +/// This interface does has the same consistency guarantees as the `store` interface, meaning that +/// you should be able to "read your writes." +/// +/// Please note that this interface is bare functions that take a reference to a bucket. This is to +/// get around the current lack of a way to "extend" a resource with additional methods inside of +/// wit. Future version of the interface will instead extend these methods on the base `bucket` +/// resource. +interface batch { + use store.{bucket, error}; + + /// Get the key-value pairs associated with the keys in the store. It returns a list of + /// key-value pairs. + /// + /// If any of the keys do not exist in the store, it returns a `none` value for that pair in the + /// list. + /// + /// MAY show an out-of-date value if there are concurrent writes to the store. + /// + /// If any other error occurs, it returns an `Err(error)`. + get-many: func(bucket: borrow, keys: list) -> result>>>, error>; + + /// Set the values associated with the keys in the store. If the key already exists in the + /// store, it overwrites the value. + /// + /// Note that the key-value pairs are not guaranteed to be set in the order they are provided. + /// + /// If any of the keys do not exist in the store, it creates a new key-value pair. + /// + /// If any other error occurs, it returns an `Err(error)`. When an error occurs, it does not + /// rollback the key-value pairs that were already set. Thus, this batch operation does not + /// guarantee atomicity, implying that some key-value pairs could be set while others might + /// fail. + /// + /// Other concurrent operations may also be able to see the partial results. + set-many: func(bucket: borrow, key-values: list>>) -> result<_, error>; + + /// Delete the key-value pairs associated with the keys in the store. + /// + /// Note that the key-value pairs are not guaranteed to be deleted in the order they are + /// provided. + /// + /// If any of the keys do not exist in the store, it skips the key. + /// + /// If any other error occurs, it returns an `Err(error)`. When an error occurs, it does not + /// rollback the key-value pairs that were already deleted. Thus, this batch operation does not + /// guarantee atomicity, implying that some key-value pairs could be deleted while others might + /// fail. + /// + /// Other concurrent operations may also be able to see the partial results. + delete-many: func(bucket: borrow, keys: list) -> result<_, error>; +} diff --git a/wit/deps/keyvalue-2024-10-17/store.wit b/wit/deps/keyvalue-2024-10-17/store.wit new file mode 100644 index 0000000..c7fef41 --- /dev/null +++ b/wit/deps/keyvalue-2024-10-17/store.wit @@ -0,0 +1,122 @@ +/// A keyvalue interface that provides eventually consistent key-value operations. +/// +/// Each of these operations acts on a single key-value pair. +/// +/// The value in the key-value pair is defined as a `u8` byte array and the intention is that it is +/// the common denominator for all data types defined by different key-value stores to handle data, +/// ensuring compatibility between different key-value stores. Note: the clients will be expecting +/// serialization/deserialization overhead to be handled by the key-value store. The value could be +/// a serialized object from JSON, HTML or vendor-specific data types like AWS S3 objects. +/// +/// Data consistency in a key value store refers to the guarantee that once a write operation +/// completes, all subsequent read operations will return the value that was written. +/// +/// Any implementation of this interface must have enough consistency to guarantee "reading your +/// writes." In particular, this means that the client should never get a value that is older than +/// the one it wrote, but it MAY get a newer value if one was written around the same time. These +/// guarantees only apply to the same client (which will likely be provided by the host or an +/// external capability of some kind). In this context a "client" is referring to the caller or +/// guest that is consuming this interface. Once a write request is committed by a specific client, +/// all subsequent read requests by the same client will reflect that write or any subsequent +/// writes. Another client running in a different context may or may not immediately see the result +/// due to the replication lag. As an example of all of this, if a value at a given key is A, and +/// the client writes B, then immediately reads, it should get B. If something else writes C in +/// quick succession, then the client may get C. However, a client running in a separate context may +/// still see A or B +interface store { + /// The set of errors which may be raised by functions in this package + variant error { + /// The host does not recognize the store identifier requested. + no-such-store, + + /// The requesting component does not have access to the specified store + /// (which may or may not exist). + access-denied, + + /// Some implementation-specific error has occurred (e.g. I/O) + other(string) + } + + /// A response to a `list-keys` operation. + record key-response { + /// The list of keys returned by the query. + keys: list, + /// The continuation token to use to fetch the next page of keys. If this is `null`, then + /// there are no more keys to fetch. + cursor: option + } + + /// Get the bucket with the specified identifier. + /// + /// `identifier` must refer to a bucket provided by the host. + /// + /// `error::no-such-store` will be raised if the `identifier` is not recognized. + open: func(identifier: string) -> result; + + /// A bucket is a collection of key-value pairs. Each key-value pair is stored as a entry in the + /// bucket, and the bucket itself acts as a collection of all these entries. + /// + /// It is worth noting that the exact terminology for bucket in key-value stores can very + /// depending on the specific implementation. For example: + /// + /// 1. Amazon DynamoDB calls a collection of key-value pairs a table + /// 2. Redis has hashes, sets, and sorted sets as different types of collections + /// 3. Cassandra calls a collection of key-value pairs a column family + /// 4. MongoDB calls a collection of key-value pairs a collection + /// 5. Riak calls a collection of key-value pairs a bucket + /// 6. Memcached calls a collection of key-value pairs a slab + /// 7. Azure Cosmos DB calls a collection of key-value pairs a container + /// + /// In this interface, we use the term `bucket` to refer to a collection of key-value pairs + resource bucket { + /// Get the value associated with the specified `key` + /// + /// The value is returned as an option. If the key-value pair exists in the + /// store, it returns `Ok(value)`. If the key does not exist in the + /// store, it returns `Ok(none)`. + /// + /// If any other error occurs, it returns an `Err(error)`. + get: func(key: string) -> result>, error>; + + /// Set the value associated with the key in the store. If the key already + /// exists in the store, it overwrites the value. + /// + /// If the key does not exist in the store, it creates a new key-value pair. + /// + /// If any other error occurs, it returns an `Err(error)`. + set: func(key: string, value: list) -> result<_, error>; + + /// Delete the key-value pair associated with the key in the store. + /// + /// If the key does not exist in the store, it does nothing. + /// + /// If any other error occurs, it returns an `Err(error)`. + delete: func(key: string) -> result<_, error>; + + /// Check if the key exists in the store. + /// + /// If the key exists in the store, it returns `Ok(true)`. If the key does + /// not exist in the store, it returns `Ok(false)`. + /// + /// If any other error occurs, it returns an `Err(error)`. + exists: func(key: string) -> result; + + /// Get all the keys in the store with an optional cursor (for use in pagination). It + /// returns a list of keys. Please note that for most KeyValue implementations, this is a + /// can be a very expensive operation and so it should be used judiciously. Implementations + /// can return any number of keys in a single response, but they should never attempt to + /// send more data than is reasonable (i.e. on a small edge device, this may only be a few + /// KB, while on a large machine this could be several MB). Any response should also return + /// a cursor that can be used to fetch the next page of keys. See the `key-response` record + /// for more information. + /// + /// Note that the keys are not guaranteed to be returned in any particular order. + /// + /// If the store is empty, it returns an empty list. + /// + /// MAY show an out-of-date list of keys if there are concurrent writes to the store. + /// + /// If any error occurs, it returns an `Err(error)`. + list-keys: func(cursor: option) -> result; + } +} diff --git a/wit/deps/keyvalue-2024-10-17/watch.wit b/wit/deps/keyvalue-2024-10-17/watch.wit new file mode 100644 index 0000000..9299119 --- /dev/null +++ b/wit/deps/keyvalue-2024-10-17/watch.wit @@ -0,0 +1,16 @@ +/// A keyvalue interface that provides watch operations. +/// +/// This interface is used to provide event-driven mechanisms to handle +/// keyvalue changes. +interface watcher { + /// A keyvalue interface that provides handle-watch operations. + use store.{bucket}; + + /// Handle the `set` event for the given bucket and key. It includes a reference to the `bucket` + /// that can be used to interact with the store. + on-set: func(bucket: bucket, key: string, value: list); + + /// Handle the `delete` event for the given bucket and key. It includes a reference to the + /// `bucket` that can be used to interact with the store. + on-delete: func(bucket: bucket, key: string); +} diff --git a/wit/deps/keyvalue-2024-10-17/world.wit b/wit/deps/keyvalue-2024-10-17/world.wit new file mode 100644 index 0000000..64eb4e1 --- /dev/null +++ b/wit/deps/keyvalue-2024-10-17/world.wit @@ -0,0 +1,26 @@ +package wasi: keyvalue@0.2.0-draft2; + +/// The `wasi:keyvalue/imports` world provides common APIs for interacting with key-value stores. +/// Components targeting this world will be able to do: +/// +/// 1. CRUD (create, read, update, delete) operations on key-value stores. +/// 2. Atomic `increment` and CAS (compare-and-swap) operations. +/// 3. Batch operations that can reduce the number of round trips to the network. +world imports { + /// The `store` capability allows the component to perform eventually consistent operations on + /// the key-value store. + import store; + + /// The `atomic` capability allows the component to perform atomic / `increment` and CAS + /// (compare-and-swap) operations. + import atomics; + + /// The `batch` capability allows the component to perform eventually consistent batch + /// operations that can reduce the number of round trips to the network. + import batch; +} + +world watch-service { + include imports; + export watcher; +} diff --git a/wit/deps/spin-postgres@3.0.0/postgres.wit b/wit/deps/spin-postgres@3.0.0/postgres.wit new file mode 100644 index 0000000..6e94a40 --- /dev/null +++ b/wit/deps/spin-postgres@3.0.0/postgres.wit @@ -0,0 +1,100 @@ +package spin:postgres@3.0.0; + +interface postgres { + /// Errors related to interacting with a database. + variant error { + connection-failed(string), + bad-parameter(string), + query-failed(string), + value-conversion-failed(string), + other(string) + } + + /// Data types for a database column + enum db-data-type { + boolean, + int8, + int16, + int32, + int64, + floating32, + floating64, + str, + binary, + date, + time, + datetime, + timestamp, + other, + } + + /// Database values + variant db-value { + boolean(bool), + int8(s8), + int16(s16), + int32(s32), + int64(s64), + floating32(float32), + floating64(float64), + str(string), + binary(list), + date(tuple), // (year, month, day) + time(tuple), // (hour, minute, second, nanosecond) + /// Date-time types are always treated as UTC (without timezone info). + /// The instant is represented as a (year, month, day, hour, minute, second, nanosecond) tuple. + datetime(tuple), + /// Unix timestamp (seconds since epoch) + timestamp(s64), + db-null, + unsupported, + } + + /// Values used in parameterized queries + variant parameter-value { + boolean(bool), + int8(s8), + int16(s16), + int32(s32), + int64(s64), + floating32(float32), + floating64(float64), + str(string), + binary(list), + date(tuple), // (year, month, day) + time(tuple), // (hour, minute, second, nanosecond) + /// Date-time types are always treated as UTC (without timezone info). + /// The instant is represented as a (year, month, day, hour, minute, second, nanosecond) tuple. + datetime(tuple), + /// Unix timestamp (seconds since epoch) + timestamp(s64), + db-null, + } + + /// A database column + record column { + name: string, + data-type: db-data-type, + } + + /// A database row + type row = list; + + /// A set of database rows + record row-set { + columns: list, + rows: list, + } + + /// A connection to a postgres database. + resource connection { + /// Open a connection to the Postgres instance at `address`. + open: static func(address: string) -> result; + + /// Query the database. + query: func(statement: string, params: list) -> result; + + /// Execute command to the database. + execute: func(statement: string, params: list) -> result; + } +} diff --git a/wit/world.wit b/wit/world.wit index b38f1f4..07da9cd 100644 --- a/wit/world.wit +++ b/wit/world.wit @@ -9,5 +9,7 @@ world http-trigger { /// The imports needed for a guest to run on a Spin host world platform { include fermyon:spin/platform@2.0.0; + include wasi:keyvalue/imports@0.2.0-draft2; + import spin:postgres/postgres@3.0.0; import wasi:config/store@0.2.0-draft-2024-09-27; }