diff --git a/consts/envs.ts b/consts/envs.ts index c339066..f3c9313 100644 --- a/consts/envs.ts +++ b/consts/envs.ts @@ -14,6 +14,10 @@ export const LLM_MODELS_MAPPING = "LLM_MODELS_MAPPING"; export const LLM_HAS_MULTIMODAL_MODELS = "LLM_HAS_MULTIMODAL_MODELS"; export const MODEL_KNOWLEDGE_CUTOFF = "MODEL_KNOWLEDGE_CUTOFF"; +// this env names for embedding +export const EMBEDDING_POVIDER = "EMBEDDING_POVIDER"; +export const EMBEDDING_MODELS = "EMBEDDING_MODELS"; + // the env var names of anthropic export const ANTHROPIC_API_URL = "ANTHROPIC_API_URL"; export const ANTHROPIC_API_KEY = "ANTHROPIC_API_KEY"; @@ -29,3 +33,6 @@ export const RETRIEVAL_PROVIDER = "RETRIEVAL_PROVIDER"; // open retrieval export const OPEN_RETRIEVAL_API_URL = "OPEN_RETRIEVAL_API_URL"; export const OPEN_RETRIEVAL_API_KEY = "OPEN_RETRIEVAL_API_KEY"; + +// open tokeniser +export const TOKENISER_API_URL = "TOKENISER_API_URL"; diff --git a/consts/providers.ts b/consts/providers.ts new file mode 100644 index 0000000..045a4b2 --- /dev/null +++ b/consts/providers.ts @@ -0,0 +1,4 @@ +export const ANTHROPIC = "anthropic"; +export const OLLAMA = "ollama"; +export const OPENAI = "openai"; +export const GOOGLE = "google"; diff --git a/deno.json b/deno.json index 893dd01..a93c228 100644 --- a/deno.json +++ b/deno.json @@ -16,7 +16,8 @@ "imports": { "$/": "./", "$fresh/": "https://deno.land/x/fresh@1.6.8/", - "$open-schemas/": "https://deno.land/x/open_schemas@2.2.0/", + "$open-schemas/": "https://deno.land/x/open_schemas@2.2.1/", + "$postgres/": "https://deno.land/x/postgres@v0.19.3/", "$std/": "https://deno.land/std@0.219.0/", "@open-schemas/zod": "jsr:@open-schemas/zod@^0.10.2", "@std/assert": "jsr:@std/assert@^0.221.0", diff --git a/deno.lock b/deno.lock index 4779503..3108d24 100644 --- a/deno.lock +++ b/deno.lock @@ -180,6 +180,106 @@ "https://deno.land/std@0.208.0/path/windows/separator.ts": "ae21f27015f10510ed1ac4a0ba9c4c9c967cbdd9d9e776a3e4967553c397bd5d", "https://deno.land/std@0.208.0/path/windows/to_file_url.ts": "8e9ea9e1ff364aa06fa72999204229952d0a279dbb876b7b838b2b2fea55cce3", "https://deno.land/std@0.208.0/path/windows/to_namespaced_path.ts": "e0f4d4a5e77f28a5708c1a33ff24360f35637ba6d8f103d19661255ef7bfd50d", + "https://deno.land/std@0.214.0/assert/assert.ts": "bec068b2fccdd434c138a555b19a2c2393b71dfaada02b7d568a01541e67cdc5", + "https://deno.land/std@0.214.0/assert/assertion_error.ts": "9f689a101ee586c4ce92f52fa7ddd362e86434ffdf1f848e45987dc7689976b8", + "https://deno.land/std@0.214.0/async/delay.ts": "8e1d18fe8b28ff95885e2bc54eccec1713f57f756053576d8228e6ca110793ad", + "https://deno.land/std@0.214.0/bytes/copy.ts": "f29c03168853720dfe82eaa57793d0b9e3543ebfe5306684182f0f1e3bfd422a", + "https://deno.land/std@0.214.0/crypto/_fnv/fnv32.ts": "ba2c5ef976b9f047d7ce2d33dfe18671afc75154bcf20ef89d932b2fe8820535", + "https://deno.land/std@0.214.0/crypto/_fnv/fnv64.ts": "580cadfe2ff333fe253d15df450f927c8ac7e408b704547be26aab41b5772558", + "https://deno.land/std@0.214.0/crypto/_fnv/mod.ts": "8dbb60f062a6e77b82f7a62ac11fabfba52c3cd408c21916b130d8f57a880f96", + "https://deno.land/std@0.214.0/crypto/_fnv/util.ts": "27b36ce3440d0a180af6bf1cfc2c326f68823288540a354dc1d636b781b9b75f", + "https://deno.land/std@0.214.0/crypto/_wasm/lib/deno_std_wasm_crypto.generated.mjs": "76c727912539737def4549bb62a96897f37eb334b979f49c57b8af7a1617635e", + "https://deno.land/std@0.214.0/crypto/_wasm/mod.ts": "c55f91473846827f077dfd7e5fc6e2726dee5003b6a5747610707cdc638a22ba", + "https://deno.land/std@0.214.0/crypto/crypto.ts": "4448f8461c797adba8d70a2c60f7795a546d7a0926e96366391bffdd06491c16", + "https://deno.land/std@0.214.0/datetime/_common.ts": "a62214c1924766e008e27d3d843ceba4b545dc2aa9880de0ecdef9966d5736b6", + "https://deno.land/std@0.214.0/datetime/parse.ts": "bb248bbcb3cd54bcaf504a1ee670fc4695e429d9019c06af954bbe2bcb8f1d02", + "https://deno.land/std@0.214.0/encoding/_util.ts": "beacef316c1255da9bc8e95afb1fa56ed69baef919c88dc06ae6cb7a6103d376", + "https://deno.land/std@0.214.0/encoding/base64.ts": "96e61a556d933201266fea84ae500453293f2aff130057b579baafda096a96bc", + "https://deno.land/std@0.214.0/encoding/hex.ts": "4d47d3b25103cf81a2ed38f54b394d39a77b63338e1eaa04b70c614cb45ec2e6", + "https://deno.land/std@0.214.0/fmt/colors.ts": "aeaee795471b56fc62a3cb2e174ed33e91551b535f44677f6320336aabb54fbb", + "https://deno.land/std@0.214.0/io/buf_reader.ts": "c73aad99491ee6db3d6b001fa4a780e9245c67b9296f5bad9c0fa7384e35d47a", + "https://deno.land/std@0.214.0/io/buf_writer.ts": "f82f640c8b3a820f600a8da429ad0537037c7d6a78426bbca2396fb1f75d3ef4", + "https://deno.land/std@0.214.0/io/types.ts": "748bbb3ac96abda03594ef5a0db15ce5450dcc6c0d841c8906f8b10ac8d32c96", + "https://deno.land/std@0.214.0/path/_common/assert_path.ts": "2ca275f36ac1788b2acb60fb2b79cb06027198bc2ba6fb7e163efaedde98c297", + "https://deno.land/std@0.214.0/path/_common/basename.ts": "569744855bc8445f3a56087fd2aed56bdad39da971a8d92b138c9913aecc5fa2", + "https://deno.land/std@0.214.0/path/_common/common.ts": "6157c7ec1f4db2b4a9a187efd6ce76dcaf1e61cfd49f87e40d4ea102818df031", + "https://deno.land/std@0.214.0/path/_common/constants.ts": "dc5f8057159f4b48cd304eb3027e42f1148cf4df1fb4240774d3492b5d12ac0c", + "https://deno.land/std@0.214.0/path/_common/dirname.ts": "684df4aa71a04bbcc346c692c8485594fc8a90b9408dfbc26ff32cf3e0c98cc8", + "https://deno.land/std@0.214.0/path/_common/format.ts": "92500e91ea5de21c97f5fe91e178bae62af524b72d5fcd246d6d60ae4bcada8b", + "https://deno.land/std@0.214.0/path/_common/from_file_url.ts": "d672bdeebc11bf80e99bf266f886c70963107bdd31134c4e249eef51133ceccf", + "https://deno.land/std@0.214.0/path/_common/glob_to_reg_exp.ts": "2007aa87bed6eb2c8ae8381adcc3125027543d9ec347713c1ad2c68427330770", + "https://deno.land/std@0.214.0/path/_common/normalize.ts": "684df4aa71a04bbcc346c692c8485594fc8a90b9408dfbc26ff32cf3e0c98cc8", + "https://deno.land/std@0.214.0/path/_common/normalize_string.ts": "dfdf657a1b1a7db7999f7c575ee7e6b0551d9c20f19486c6c3f5ff428384c965", + "https://deno.land/std@0.214.0/path/_common/relative.ts": "faa2753d9b32320ed4ada0733261e3357c186e5705678d9dd08b97527deae607", + "https://deno.land/std@0.214.0/path/_common/strip_trailing_separators.ts": "7024a93447efcdcfeaa9339a98fa63ef9d53de363f1fbe9858970f1bba02655a", + "https://deno.land/std@0.214.0/path/_common/to_file_url.ts": "7f76adbc83ece1bba173e6e98a27c647712cab773d3f8cbe0398b74afc817883", + "https://deno.land/std@0.214.0/path/_interface.ts": "a1419fcf45c0ceb8acdccc94394e3e94f99e18cfd32d509aab514c8841799600", + "https://deno.land/std@0.214.0/path/_os.ts": "8fb9b90fb6b753bd8c77cfd8a33c2ff6c5f5bc185f50de8ca4ac6a05710b2c15", + "https://deno.land/std@0.214.0/path/basename.ts": "5d341aadb7ada266e2280561692c165771d071c98746fcb66da928870cd47668", + "https://deno.land/std@0.214.0/path/common.ts": "03e52e22882402c986fe97ca3b5bb4263c2aa811c515ce84584b23bac4cc2643", + "https://deno.land/std@0.214.0/path/constants.ts": "0c206169ca104938ede9da48ac952de288f23343304a1c3cb6ec7625e7325f36", + "https://deno.land/std@0.214.0/path/dirname.ts": "85bd955bf31d62c9aafdd7ff561c4b5fb587d11a9a5a45e2b01aedffa4238a7c", + "https://deno.land/std@0.214.0/path/extname.ts": "593303db8ae8c865cbd9ceec6e55d4b9ac5410c1e276bfd3131916591b954441", + "https://deno.land/std@0.214.0/path/format.ts": "98fad25f1af7b96a48efb5b67378fcc8ed77be895df8b9c733b86411632162af", + "https://deno.land/std@0.214.0/path/from_file_url.ts": "911833ae4fd10a1c84f6271f36151ab785955849117dc48c6e43b929504ee069", + "https://deno.land/std@0.214.0/path/glob_to_regexp.ts": "83c5fd36a8c86f5e72df9d0f45317f9546afa2ce39acaafe079d43a865aced08", + "https://deno.land/std@0.214.0/path/is_absolute.ts": "4791afc8bfd0c87f0526eaa616b0d16e7b3ab6a65b62942e50eac68de4ef67d7", + "https://deno.land/std@0.214.0/path/is_glob.ts": "a65f6195d3058c3050ab905705891b412ff942a292bcbaa1a807a74439a14141", + "https://deno.land/std@0.214.0/path/join.ts": "ae2ec5ca44c7e84a235fd532e4a0116bfb1f2368b394db1c4fb75e3c0f26a33a", + "https://deno.land/std@0.214.0/path/join_globs.ts": "e9589869a33dc3982101898ee50903db918ca00ad2614dbe3934d597d7b1fbea", + "https://deno.land/std@0.214.0/path/mod.ts": "ffeaccb713dbe6c72e015b7c767f753f8ec5fbc3b621ff5eeee486ffc2c0ddda", + "https://deno.land/std@0.214.0/path/normalize.ts": "4155743ccceeed319b350c1e62e931600272fad8ad00c417b91df093867a8352", + "https://deno.land/std@0.214.0/path/normalize_glob.ts": "98ee8268fad271193603271c203ae973280b5abfbdd2cbca1053fd2af71869ca", + "https://deno.land/std@0.214.0/path/parse.ts": "65e8e285f1a63b714e19ef24b68f56e76934c3df0b6e65fd440d3991f4f8aefb", + "https://deno.land/std@0.214.0/path/posix/_util.ts": "1e3937da30f080bfc99fe45d7ed23c47dd8585c5e473b2d771380d3a6937cf9d", + "https://deno.land/std@0.214.0/path/posix/basename.ts": "39ee27a29f1f35935d3603ccf01d53f3d6e0c5d4d0f84421e65bd1afeff42843", + "https://deno.land/std@0.214.0/path/posix/common.ts": "26f60ccc8b2cac3e1613000c23ac5a7d392715d479e5be413473a37903a2b5d4", + "https://deno.land/std@0.214.0/path/posix/constants.ts": "93481efb98cdffa4c719c22a0182b994e5a6aed3047e1962f6c2c75b7592bef1", + "https://deno.land/std@0.214.0/path/posix/dirname.ts": "6535d2bdd566118963537b9dda8867ba9e2a361015540dc91f5afbb65c0cce8b", + "https://deno.land/std@0.214.0/path/posix/extname.ts": "8d36ae0082063c5e1191639699e6f77d3acf501600a3d87b74943f0ae5327427", + "https://deno.land/std@0.214.0/path/posix/format.ts": "185e9ee2091a42dd39e2a3b8e4925370ee8407572cee1ae52838aed96310c5c1", + "https://deno.land/std@0.214.0/path/posix/from_file_url.ts": "951aee3a2c46fd0ed488899d024c6352b59154c70552e90885ed0c2ab699bc40", + "https://deno.land/std@0.214.0/path/posix/glob_to_regexp.ts": "54d3ff40f309e3732ab6e5b19d7111d2d415248bcd35b67a99defcbc1972e697", + "https://deno.land/std@0.214.0/path/posix/is_absolute.ts": "cebe561ad0ae294f0ce0365a1879dcfca8abd872821519b4fcc8d8967f888ede", + "https://deno.land/std@0.214.0/path/posix/is_glob.ts": "8a8b08c08bf731acf2c1232218f1f45a11131bc01de81e5f803450a5914434b9", + "https://deno.land/std@0.214.0/path/posix/join.ts": "aef88d5fa3650f7516730865dbb951594d1a955b785e2450dbee93b8e32694f3", + "https://deno.land/std@0.214.0/path/posix/join_globs.ts": "ee2f4676c5b8a0dfa519da58b8ade4d1c4aa8dd3fe35619edec883ae9df1f8c9", + "https://deno.land/std@0.214.0/path/posix/mod.ts": "563a18c2b3ddc62f3e4a324ff0f583e819b8602a72ad880cb98c9e2e34f8db5b", + "https://deno.land/std@0.214.0/path/posix/normalize.ts": "baeb49816a8299f90a0237d214cef46f00ba3e95c0d2ceb74205a6a584b58a91", + "https://deno.land/std@0.214.0/path/posix/normalize_glob.ts": "65f0138fa518ef9ece354f32889783fc38cdf985fb02dcf1c3b14fa47d665640", + "https://deno.land/std@0.214.0/path/posix/parse.ts": "d5bac4eb21262ab168eead7e2196cb862940c84cee572eafedd12a0d34adc8fb", + "https://deno.land/std@0.214.0/path/posix/relative.ts": "3907d6eda41f0ff723d336125a1ad4349112cd4d48f693859980314d5b9da31c", + "https://deno.land/std@0.214.0/path/posix/resolve.ts": "bac20d9921beebbbb2b73706683b518b1d0c1b1da514140cee409e90d6b2913a", + "https://deno.land/std@0.214.0/path/posix/separator.ts": "c9ecae5c843170118156ac5d12dc53e9caf6a1a4c96fc8b1a0ab02dff5c847b0", + "https://deno.land/std@0.214.0/path/posix/to_file_url.ts": "7aa752ba66a35049e0e4a4be5a0a31ac6b645257d2e031142abb1854de250aaf", + "https://deno.land/std@0.214.0/path/posix/to_namespaced_path.ts": "28b216b3c76f892a4dca9734ff1cc0045d135532bfd9c435ae4858bfa5a2ebf0", + "https://deno.land/std@0.214.0/path/relative.ts": "ab739d727180ed8727e34ed71d976912461d98e2b76de3d3de834c1066667add", + "https://deno.land/std@0.214.0/path/resolve.ts": "a6f977bdb4272e79d8d0ed4333e3d71367cc3926acf15ac271f1d059c8494d8d", + "https://deno.land/std@0.214.0/path/separator.ts": "c6c890507f944a1f5cb7d53b8d638d6ce3cf0f34609c8d84a10c1eaa400b77a9", + "https://deno.land/std@0.214.0/path/to_file_url.ts": "88f049b769bce411e2d2db5bd9e6fd9a185a5fbd6b9f5ad8f52bef517c4ece1b", + "https://deno.land/std@0.214.0/path/to_namespaced_path.ts": "b706a4103b104cfadc09600a5f838c2ba94dbcdb642344557122dda444526e40", + "https://deno.land/std@0.214.0/path/windows/_util.ts": "d5f47363e5293fced22c984550d5e70e98e266cc3f31769e1710511803d04808", + "https://deno.land/std@0.214.0/path/windows/basename.ts": "e2dbf31d1d6385bfab1ce38c333aa290b6d7ae9e0ecb8234a654e583cf22f8fe", + "https://deno.land/std@0.214.0/path/windows/common.ts": "26f60ccc8b2cac3e1613000c23ac5a7d392715d479e5be413473a37903a2b5d4", + "https://deno.land/std@0.214.0/path/windows/constants.ts": "5afaac0a1f67b68b0a380a4ef391bf59feb55856aa8c60dfc01bd3b6abb813f5", + "https://deno.land/std@0.214.0/path/windows/dirname.ts": "33e421be5a5558a1346a48e74c330b8e560be7424ed7684ea03c12c21b627bc9", + "https://deno.land/std@0.214.0/path/windows/extname.ts": "165a61b00d781257fda1e9606a48c78b06815385e7d703232548dbfc95346bef", + "https://deno.land/std@0.214.0/path/windows/format.ts": "bbb5ecf379305b472b1082cd2fdc010e44a0020030414974d6029be9ad52aeb6", + "https://deno.land/std@0.214.0/path/windows/from_file_url.ts": "ced2d587b6dff18f963f269d745c4a599cf82b0c4007356bd957cb4cb52efc01", + "https://deno.land/std@0.214.0/path/windows/glob_to_regexp.ts": "6dcd1242bd8907aa9660cbdd7c93446e6927b201112b0cba37ca5d80f81be51b", + "https://deno.land/std@0.214.0/path/windows/is_absolute.ts": "4a8f6853f8598cf91a835f41abed42112cebab09478b072e4beb00ec81f8ca8a", + "https://deno.land/std@0.214.0/path/windows/is_glob.ts": "8a8b08c08bf731acf2c1232218f1f45a11131bc01de81e5f803450a5914434b9", + "https://deno.land/std@0.214.0/path/windows/join.ts": "e0b3356615c1a75c56ebb6a7311157911659e11fd533d80d724800126b761ac3", + "https://deno.land/std@0.214.0/path/windows/join_globs.ts": "ee2f4676c5b8a0dfa519da58b8ade4d1c4aa8dd3fe35619edec883ae9df1f8c9", + "https://deno.land/std@0.214.0/path/windows/mod.ts": "7d6062927bda47c47847ffb55d8f1a37b0383840aee5c7dfc93984005819689c", + "https://deno.land/std@0.214.0/path/windows/normalize.ts": "78126170ab917f0ca355a9af9e65ad6bfa5be14d574c5fb09bb1920f52577780", + "https://deno.land/std@0.214.0/path/windows/normalize_glob.ts": "179c86ba89f4d3fe283d2addbe0607341f79ee9b1ae663abcfb3439db2e97810", + "https://deno.land/std@0.214.0/path/windows/parse.ts": "b9239edd892a06a06625c1b58425e199f018ce5649ace024d144495c984da734", + "https://deno.land/std@0.214.0/path/windows/relative.ts": "3e1abc7977ee6cc0db2730d1f9cb38be87b0ce4806759d271a70e4997fc638d7", + "https://deno.land/std@0.214.0/path/windows/resolve.ts": "75b2e3e1238d840782cee3d8864d82bfaa593c7af8b22f19c6422cf82f330ab3", + "https://deno.land/std@0.214.0/path/windows/separator.ts": "e51c5522140eff4f8402617c5c68a201fdfa3a1a8b28dc23587cff931b665e43", + "https://deno.land/std@0.214.0/path/windows/to_file_url.ts": "1cd63fd35ec8d1370feaa4752eccc4cc05ea5362a878be8dc7db733650995484", + "https://deno.land/std@0.214.0/path/windows/to_namespaced_path.ts": "4ffa4fb6fae321448d5fe810b3ca741d84df4d7897e61ee29be961a6aac89a4c", "https://deno.land/std@0.216.0/assert/_constants.ts": "a271e8ef5a573f1df8e822a6eb9d09df064ad66a4390f21b3e31f820a38e0975", "https://deno.land/std@0.216.0/assert/_diff.ts": "dcc63d94ca289aec80644030cf88ccbf7acaa6fbd7b0f22add93616b36593840", "https://deno.land/std@0.216.0/assert/_format.ts": "0ba808961bf678437fb486b56405b6fefad2cf87b5809667c781ddee8c32aff4", @@ -632,6 +732,40 @@ "https://deno.land/x/open_schemas@2.2.0/zod/openai/files.ts": "8d04da718feb619ac9a354dc712eeb5e2d6d47f002f758da00102445f4ce6ede", "https://deno.land/x/open_schemas@2.2.0/zod/openai/mod.ts": "51c5ce3419d0cf96e63006dc1370f86b0f5bbc39d36c819cdb9d917a92cc689d", "https://deno.land/x/open_schemas@2.2.0/zod/openai/models.ts": "ce0e620058cc98a09d42366f27cf68c9f4fc54ae03274b44605aa35eadf242a7", + "https://deno.land/x/open_schemas@2.2.1/types/openai/assistants.ts": "8fc2205010cf8be37fb3375d45cab3f4120810c964a7c3404f5e088e5e9f814a", + "https://deno.land/x/open_schemas@2.2.1/types/openai/chat.ts": "7dc1f14ed094a1fc64da9f477d80e0af757d209e9b78490f5131236dc7294d1b", + "https://deno.land/x/open_schemas@2.2.1/types/openai/files.ts": "3d8c116bce2010e7658c863f0359f3dd38a9830996a5220e0e2ab98e314ff69f", + "https://deno.land/x/open_schemas@2.2.1/types/openai/mod.ts": "9728c28bc6fd7a26d5b15a5c607b7aa9ee2a860f612678f604e0b625762029db", + "https://deno.land/x/open_schemas@2.2.1/types/openai/models.ts": "982f7666cfeda2c7c41e0195d698903bfb02bdc13bf50b35e79a9d8bdbed7ef5", + "https://deno.land/x/open_schemas@2.2.1/zod/openai/assistants.ts": "6151c70daec94db7421204c421fe96876ea8fb2459f161843133d7fc2614746c", + "https://deno.land/x/open_schemas@2.2.1/zod/openai/chat.ts": "5e8ec72d81613b4538dd3bfcca66d8f53f73c63fee2beb238f9863450bd2347b", + "https://deno.land/x/open_schemas@2.2.1/zod/openai/extra.ts": "6c72b5ebc05a5b123145ecc4d2a6d5d27faf12800381f7735242bc73396bd26e", + "https://deno.land/x/open_schemas@2.2.1/zod/openai/files.ts": "8d04da718feb619ac9a354dc712eeb5e2d6d47f002f758da00102445f4ce6ede", + "https://deno.land/x/open_schemas@2.2.1/zod/openai/mod.ts": "51c5ce3419d0cf96e63006dc1370f86b0f5bbc39d36c819cdb9d917a92cc689d", + "https://deno.land/x/open_schemas@2.2.1/zod/openai/models.ts": "ce0e620058cc98a09d42366f27cf68c9f4fc54ae03274b44605aa35eadf242a7", + "https://deno.land/x/postgres@v0.19.3/client.ts": "d141c65c20484c545a1119c9af7a52dcc24f75c1a5633de2b9617b0f4b2ed5c1", + "https://deno.land/x/postgres@v0.19.3/client/error.ts": "05b0e35d65caf0ba21f7f6fab28c0811da83cd8b4897995a2f411c2c83391036", + "https://deno.land/x/postgres@v0.19.3/connection/auth.ts": "db15c1659742ef4d2791b32834950278dc7a40cb931f8e434e6569298e58df51", + "https://deno.land/x/postgres@v0.19.3/connection/connection.ts": "198a0ecf92a0d2aa72db3bb88b8f412d3b1f6b87d464d5f7bff9aa3b6aff8370", + "https://deno.land/x/postgres@v0.19.3/connection/connection_params.ts": "463d7a9ed559f537a55d6928cab62e1c31b808d08cd0411b6ae461d0c0183c93", + "https://deno.land/x/postgres@v0.19.3/connection/message.ts": "20da5d80fc4d7ddb7b850083e0b3fa8734eb26642221dad89c62e27d78e57a4d", + "https://deno.land/x/postgres@v0.19.3/connection/message_code.ts": "12bcb110df6945152f9f6c63128786558d7ad1e61006920daaa16ef85b3bab7d", + "https://deno.land/x/postgres@v0.19.3/connection/packet.ts": "050aeff1fc13c9349e89451a155ffcd0b1343dc313a51f84439e3e45f64b56c8", + "https://deno.land/x/postgres@v0.19.3/connection/scram.ts": "532d4d58b565a2ab48fb5e1e14dc9bfb3bb283d535011e371e698eb4a89dd994", + "https://deno.land/x/postgres@v0.19.3/debug.ts": "8add17699191f11e6830b8c95d9de25857d221bb2cf6c4ae22254d395895c1f9", + "https://deno.land/x/postgres@v0.19.3/deps.ts": "c312038fe64b8368f8a294119f11d8f235fe67de84d7c3b0ef67b3a56628171a", + "https://deno.land/x/postgres@v0.19.3/mod.ts": "4930c7b44f8d16ea71026f7e3ef22a2322d84655edceacd55f7461a9218d8560", + "https://deno.land/x/postgres@v0.19.3/pool.ts": "2289f029e7a3bd3d460d4faa71399a920b7406c92a97c0715d6e31dbf1380ec3", + "https://deno.land/x/postgres@v0.19.3/query/array_parser.ts": "ff72d3e026e3022a1a223a6530be5663f8ebbd911ed978291314e7fe6c2f2464", + "https://deno.land/x/postgres@v0.19.3/query/decode.ts": "3e89ad2a662eab66a4f4e195ff0924d71d199af3c2f5637d1ae650301a03fa9b", + "https://deno.land/x/postgres@v0.19.3/query/decoders.ts": "6a73da1024086ab91e233648c850dccbde59248b90d87054bbbd7f0bf4a50681", + "https://deno.land/x/postgres@v0.19.3/query/encode.ts": "5b1c305bc7352a6f9fe37f235dddfc23e26419c77a133b4eaea42cf136481aa6", + "https://deno.land/x/postgres@v0.19.3/query/oid.ts": "21fc714ac212350ba7df496f88ea9e01a4ee0458911d0f2b6a81498e12e7af4c", + "https://deno.land/x/postgres@v0.19.3/query/query.ts": "510f9a27da87ed7b31b5cbcd14bf3028b441ac2ddc368483679d0b86a9d9f213", + "https://deno.land/x/postgres@v0.19.3/query/transaction.ts": "8f4eef68f8e9b4be216199404315e6e08fe1fe98afb2e640bffd077662f79678", + "https://deno.land/x/postgres@v0.19.3/query/types.ts": "540f6f973d493d63f2c0059a09f3368071f57931bba68bea408a635a3e0565d6", + "https://deno.land/x/postgres@v0.19.3/utils/deferred.ts": "5420531adb6c3ea29ca8aac57b9b59bd3e4b9a938a4996bbd0947a858f611080", + "https://deno.land/x/postgres@v0.19.3/utils/utils.ts": "ca47193ea03ff5b585e487a06f106d367e509263a960b787197ce0c03113a738", "https://deno.land/x/ts_morph@21.0.1/common/DenoRuntime.ts": "a505f1feae9a77c8f6ab1c18c55d694719e96573f68e9c36463b243e1bef4c3e", "https://deno.land/x/ts_morph@21.0.1/common/mod.ts": "01985d2ee7da8d1caee318a9d07664774fbee4e31602bc2bb6bb62c3489555ed", "https://deno.land/x/ts_morph@21.0.1/common/ts_morph_common.js": "236475fb18476307e07b3a97dc92fe8fb69e4a9df4ca59aa098dd6430bae7237", diff --git a/fresh.gen.ts b/fresh.gen.ts index 893722e..54bd5fa 100644 --- a/fresh.gen.ts +++ b/fresh.gen.ts @@ -28,6 +28,8 @@ import * as $v1_threads_thread_id_runs_run_id_submit_tool_outputs from "./routes import * as $v1_threads_thread_id_runs_index from "./routes/v1/threads/[thread_id]/runs/index.ts"; import * as $v1_threads_index from "./routes/v1/threads/index.ts"; import * as $v1_vector_stores_vector_store_id_ from "./routes/v1/vector_stores/[vector_store_id].ts"; +import * as $v1_vector_stores_vector_store_id_files_file_id_ from "./routes/v1/vector_stores/[vector_store_id]/files/[file_id].ts"; +import * as $v1_vector_stores_vector_store_id_files_index from "./routes/v1/vector_stores/[vector_store_id]/files/index.ts"; import * as $v1_vector_stores_index from "./routes/v1/vector_stores/index.ts"; import { type Manifest } from "$fresh/server.ts"; @@ -69,6 +71,10 @@ const manifest = { "./routes/v1/threads/index.ts": $v1_threads_index, "./routes/v1/vector_stores/[vector_store_id].ts": $v1_vector_stores_vector_store_id_, + "./routes/v1/vector_stores/[vector_store_id]/files/[file_id].ts": + $v1_vector_stores_vector_store_id_files_file_id_, + "./routes/v1/vector_stores/[vector_store_id]/files/index.ts": + $v1_vector_stores_vector_store_id_files_index, "./routes/v1/vector_stores/index.ts": $v1_vector_stores_index, }, islands: {}, diff --git a/jobs/job.ts b/jobs/job.ts index 5348fe8..056c0d7 100644 --- a/jobs/job.ts +++ b/jobs/job.ts @@ -5,6 +5,7 @@ import { RetrievalJob } from "$/jobs/retrieval.ts"; import { CodeInterpreterJob } from "$/jobs/code_interpreter.ts"; import { VectorStoreJob } from "$/jobs/vector_store.ts"; import { FileJob } from "$/jobs/file.ts"; +import { VectorStoreFileJob } from "$/jobs/vector_store_file.ts"; /** * Represents a job message containing information about a job to be executed. @@ -23,7 +24,8 @@ export interface JobMessage { | "retrieval" | "code_interpreter" | "file" - | "vector_store"; + | "vector_store" + | "vector_store_file"; } /** @@ -59,6 +61,9 @@ export class Job { case "vector_store": await VectorStoreJob.execute(params); break; + case "vector_store_file": + await VectorStoreFileJob.execute(params); + break; default: error(`Unknown the type(${type}) of job message.`); } diff --git a/jobs/vector_store.ts b/jobs/vector_store.ts index 40abde0..c5a13aa 100644 --- a/jobs/vector_store.ts +++ b/jobs/vector_store.ts @@ -2,17 +2,67 @@ import * as log from "$std/log/mod.ts"; import { VectorStoreRepository } from "$/repositories/vector_store.ts"; import { now } from "$/utils/date.ts"; import { kv } from "$/repositories/_repository.ts"; +import Client from "$/providers/vector_db/pgvector.ts"; +import { VectorStoreFileRepository } from "$/repositories/vector_store_file.ts"; + +const LOG_TAG = "[VectorStoreJob]"; export class VectorStoreJob { - // private static index( - // organization: string, - // vectorStoreId: string, - // fileId?: string, - // ) {} + private static async create(organization: string, vectorStoreId: string) { + const logName = `vector store(${vectorStoreId})`; + log.info(`${LOG_TAG} start {create} action for ${logName}`); + + const vsRepo = VectorStoreRepository.getInstance(); + const vsfRepo = VectorStoreFileRepository.getInstance(); + + const vs = await vsRepo.findById(vectorStoreId, organization); + const files = await vsfRepo.findAll( + vectorStoreId, + ); + if (!vs || !files) { + log.warn(`${LOG_TAG} can not find ${logName} or files.`); + return; + } + + await Client.create(vectorStoreId); + + const operation = kv.atomic(); + files.forEach((f) => { + operation.enqueue({ + type: "vector_store_file", + args: JSON.stringify({ + action: "index", + organization, + vectorStoreId, + fileId: f.id, + }), + }); + }); + vsRepo.update( + vs, + { + file_counts: { + ...vs.file_counts, + in_progress: vs.file_counts.in_progress + files.length, + }, + }, + organization, + operation, + ); + await operation.commit(); + log.info(`${LOG_TAG} completed {create} action for ${logName}`); + } + + private static async delete(vectorStoreId: string) { + const logName = `vector store(${vectorStoreId})`; + log.info(`${LOG_TAG} start {delete} action for ${logName}`); + await Client.drop(vectorStoreId); + log.info(`${LOG_TAG} completed {delete} action for ${logName}`); + } private static async expire(organization: string, vectorStoreId: string) { const logName = `vector store(${organization}/${vectorStoreId})`; - log.info(`[VectorStoreJob] start expiring ${logName}`); + log.info(`${LOG_TAG} start expiring ${logName}`); const repository = VectorStoreRepository.getInstance(); const vectorStore = await repository.findById(vectorStoreId, organization); if (vectorStore.status === "expired") { @@ -46,10 +96,10 @@ export class VectorStoreJob { .commit(); if (ok) { - log.info(`[VectorStoreJob] enqueue next expiring job for ${logName}`); + log.info(`${LOG_TAG} enqueue next expiring job for ${logName}`); } else { log.error( - `[VectorStoreJob] can't enqueue next expiring job for ${logName}`, + `${LOG_TAG} can't enqueue next expiring job for ${logName}`, ); } } @@ -58,10 +108,16 @@ export class VectorStoreJob { public static async execute(args: { organization: string; vectorStoreId: string; - action: "expire"; + action: "create" | "delete" | "expire"; }) { const { action, vectorStoreId, organization } = args; switch (action) { + case "create": + await this.create(organization, vectorStoreId); + break; + case "delete": + await this.delete(vectorStoreId); + break; case "expire": await this.expire(organization, vectorStoreId); break; diff --git a/jobs/vector_store_file.ts b/jobs/vector_store_file.ts new file mode 100644 index 0000000..94e820c --- /dev/null +++ b/jobs/vector_store_file.ts @@ -0,0 +1,186 @@ +import { kv } from "$/repositories/base.ts"; +import { VectorStoreRepository } from "$/repositories/vector_store.ts"; +import { VectorStoreFileRepository } from "$/repositories/vector_store_file.ts"; +import { NotFound } from "$/utils/errors.ts"; +import * as log from "$std/log/mod.ts"; +import { Jina } from "$/providers/reader/jina.ts"; +import { getClient } from "$/providers/embedding/client.ts"; +import { + StaticChunkingStrategy, + VectorStoreFileObject, +} from "$/schemas/openai/mod.ts"; +import Tokeniser from "$/providers/tokeniser/open_tokeniser.ts"; +import { VectorStoreRecord } from "$/types/open_assistant/mod.ts"; +import VectorDb from "$/providers/vector_db/pgvector.ts"; + +const LOG_TAG = "[VectorStoreFileJob]"; + +export class VectorStoreFileJob { + private static async getFileContent( + vsf: VectorStoreFileObject, + ): Promise<{ fileName: string; content: string }> { + if (vsf.type === "url") { + const { title, content } = await Jina.read(vsf.url as string); + return { + fileName: title, + content, + }; + } + return { + fileName: "", + content: "", + }; + } + + private static async chunkFileContent( + content: string, + strategy: StaticChunkingStrategy, + ) { + const { static: staticStrategy } = strategy; + return await Tokeniser.createChunks( + content, + staticStrategy.max_chunk_size_tokens, + staticStrategy.chunk_overlap_tokens, + ); + } + + private static async embedContent( + chunks: string[], + fileId: string, + fileName: string, + ) { + const embeddingClient = await getClient(); + const embeddingsData = chunks.map(async (content: string) => { + const { data: [embedding] } = await embeddingClient.createEmbedding({ + input: content, + model: "models/text-embedding-004", + }); + + return { + file_id: fileId, + file_name: fileName, + content, + embedding: embedding.embedding, + } as VectorStoreRecord; + }); + return await Promise.all(embeddingsData); + } + + private static async index( + organization: string, + vectorStoreId: string, + fileId: string, + ) { + const logName = `vector store(${vectorStoreId}) and file(${fileId})`; + log.info(`${LOG_TAG} start {index} action for ${logName}`); + + const vsfRepo = VectorStoreFileRepository.getInstance(); + const vsRepo = VectorStoreRepository.getInstance(); + let vsf, vs; + try { + vsf = await vsfRepo.findById(fileId, vectorStoreId); + vs = await vsRepo.findById(vectorStoreId, organization); + } catch (e) { + log.error(`${LOG_TAG} find ${logName} with ${e}`); + } + if (!vsf || !vs) return; + + const { fileName, content } = await this.getFileContent(vsf); + const chunks = await this.chunkFileContent( + content, + vsf.chunking_strategy as StaticChunkingStrategy, + ); + const records = await this.embedContent( + chunks.map((c) => c.content), + fileId, + fileName, + ); + await VectorDb.insert(vectorStoreId, records); + + try { + const operation = kv.atomic(); + vsfRepo.update( + vsf, + { + status: "completed", + }, + vectorStoreId, + operation, + ); + vsRepo.update( + vs, + { + file_counts: { + ...vs.file_counts, + in_progress: vs.file_counts.in_progress - 1, + completed: vs.file_counts.completed + 1, + }, + usage_bytes: await VectorDb.size(vectorStoreId), + }, + organization, + operation, + ); + await operation.commit(); + log.info(`${LOG_TAG} completed {index} action for ${logName}`); + } catch (e) { + switch (e.constructor) { + case NotFound: + log.error(`${LOG_TAG} ${logName} were not found`); + return; + } + } + } + + private static async delete( + organization: string, + vectorStoreId: string, + fileId: string, + ) { + const logName = `vector store(${vectorStoreId}) and file(${fileId})`; + log.info(`${LOG_TAG} start {delete} action for ${logName}`); + const vsfRepo = VectorStoreFileRepository.getInstance(); + const vsRepo = VectorStoreRepository.getInstance(); + let vsf, vs; + try { + vsf = await vsfRepo.findById(fileId, vectorStoreId); + vs = await vsRepo.findById(vectorStoreId, organization); + } catch (e) { + log.error(`${LOG_TAG} find ${logName} with ${e}`); + } + if (!vsf || !vs) return; + + await VectorDb.delete(vectorStoreId, fileId); + + vsRepo.update( + vs, + { + usage_bytes: await VectorDb.size(vectorStoreId), + file_counts: { + ...vs.file_counts, + [vsf.status]: vs.file_counts[vsf.status] - 1, + total: vs.file_counts.total - 1, + }, + }, + organization, + ); + + log.info(`${LOG_TAG} start {delete} action for ${logName}`); + } + + public static async execute(args: { + organization: string; + vectorStoreId: string; + fileId: string; + action: "index" | "delete"; + }) { + const { action, vectorStoreId, organization, fileId } = args; + switch (action) { + case "index": + await this.index(organization, vectorStoreId, fileId); + break; + case "delete": + await this.delete(organization, vectorStoreId, fileId); + break; + } + } +} diff --git a/providers/client.ts b/providers/client.ts index f9aaa44..08f2014 100644 --- a/providers/client.ts +++ b/providers/client.ts @@ -1,7 +1,4 @@ -import { - CreateChatCompletionRequest, - CreateChatCompletionResponse, -} from "@open-schemas/zod"; +import { CreateChatCompletionRequest } from "$open-schemas/types/openai/mod.ts"; import google from "$/providers/google/client.ts"; import ollama from "$/providers/ollama/client.ts"; import { getProvider } from "$/utils/llm.ts"; @@ -40,7 +37,7 @@ export default class Client { public static createChatCompletion( request: CreateChatCompletionRequest, mappedModel?: string, - ): Promise { + ) { const client = this.getProviderClient(); return client.createChatCompletion(request, mappedModel); } diff --git a/providers/client/google.ts b/providers/client/google.ts new file mode 100644 index 0000000..e66d0ca --- /dev/null +++ b/providers/client/google.ts @@ -0,0 +1,19 @@ +import { GOOGLE_API_KEY, GOOGLE_API_URL } from "$/utils/constants.ts"; + +export default class Client { + static apiVersion = "v1"; + static baseURL = Deno.env.get(GOOGLE_API_URL) ?? + "https://generativelanguage.googleapis.com"; + + static fetch(input: string, init?: RequestInit) { + return fetch(`${this.baseURL}/${this.apiVersion}${input}`, { + ...init, + headers: { + ...init?.headers, + "X-Goog-Api-Key": Deno.env.get(GOOGLE_API_KEY) as string, + }, + }).then((r) => { + return r.json(); + }); + } +} diff --git a/providers/embedding/base.ts b/providers/embedding/base.ts new file mode 100644 index 0000000..7e88e99 --- /dev/null +++ b/providers/embedding/base.ts @@ -0,0 +1,52 @@ +import { NotImplemented } from "$/utils/errors.ts"; +import { + CreateEmbeddingRequest, + CreateEmbeddingResponse, +} from "$/types/openai/embedding.ts"; + +/** + * Base class for embedding model providers. + * Provides a common interface for creating chat completions. + * + * @example + * ```ts + * import { Base } from "$/providers/embedding/base.ts"; + * + * class Client extends Base { + * static createEmbedding() { + * ... + * } + * } + * ``` + */ +export class Base { + protected static _fetch( + _input: string, + _init?: RequestInit, + ): Promise { + throw new NotImplemented("Base._fetch"); + } + + /** + * Creates an embedding vector representing the input text. + * + * @param _request - The embedding request. + * @param _mappedModel - Optional mapped model to use for the embedding. + * + * @returns A promise that resolves to the create embedding response. + * @throws {NotImplemented} If the method is not implemented. + * + * @example + * ```ts + * import { Base } from "$/providers/embedding/base.ts"; + * + * Base.createEmbedding(...); + * ``` + */ + static createEmbedding( + _request: CreateEmbeddingRequest, + _mappedModel?: string, + ): Promise { + throw new NotImplemented("Base.createEmbedding"); + } +} diff --git a/providers/embedding/client.ts b/providers/embedding/client.ts new file mode 100644 index 0000000..094eb1b --- /dev/null +++ b/providers/embedding/client.ts @@ -0,0 +1,34 @@ +import { EMBEDDING_POVIDER } from "$/consts/envs.ts"; +import { GOOGLE } from "$/consts/providers.ts"; +import { getEnv } from "$/utils/env.ts"; + +function getModule(provider?: string) { + switch (provider) { + case GOOGLE: + return import("$/providers/embedding/google.ts"); + default: + throw new Error(`Try one of the following: ${GOOGLE}.`, { + cause: `Unsupported LLM provider: ${provider}.`, + }); + } +} + +/** + * Returns the embedding client based on the environment variable EMBEDDING_POVIDER. + * + * @returns The embedding client. + * @throws {EnvNotSet} If the EMBEDDING_POVIDER environment variable is not set. + * @throws {Error} If an unsupported embedding provider is specified. + * + * @example + * ```typescript + * import { getClient } from "$/providers/embedding/client.ts"; + * + * const client = await getClient(); + * const response = await client.createEmbedding({}); + * console.log(response); + * ``` + */ +export async function getClient() { + return (await getModule(getEnv(EMBEDDING_POVIDER))).default; +} diff --git a/providers/embedding/google.ts b/providers/embedding/google.ts new file mode 100644 index 0000000..c4d6fd9 --- /dev/null +++ b/providers/embedding/google.ts @@ -0,0 +1,61 @@ +import Base from "$/providers/client/google.ts"; +import { + CreateEmbeddingRequest, + CreateEmbeddingResponse, +} from "$/schemas/openai/embedding.ts"; +import { + EmbedContentRequest, + EmbedContentResponse, +} from "$/schemas/google/mod.ts"; + +export default class Google extends Base { + static async createEmbedding( + request: CreateEmbeddingRequest, + mappedModel?: string, + ): Promise { + const ecq = CreateEmbeddingRequest.transform( + ({ input, dimensions }): EmbedContentRequest => { + let parts: EmbedContentRequest["content"]["parts"] = []; + if (typeof input === "string") { + parts = [{ + text: input, + }]; + } else { + if (input.every((item) => typeof item === "string")) { + parts = input.map((i) => ({ text: i as string })); + } + } + return { + content: { parts }, + outputDimensionality: dimensions, + }; + }, + ) + .parse(request); + + const ecp = await this.fetch(`/${request.model}:embedContent`, { + method: "POST", + body: JSON.stringify(ecq), + }); + + return EmbedContentResponse.transform( + ({ embedding }): CreateEmbeddingResponse => { + return { + object: "list", + data: [ + { + obejct: "embedding", + embedding: embedding.values, + index: 0, + }, + ], + model: mappedModel ?? request.model, + usage: { + prompt_tokens: 0, + total_tokens: 0, + }, + }; + }, + ).parse(ecp); + } +} diff --git a/providers/google/client.ts b/providers/google/client.ts index 0a6fcc5..75de24b 100644 --- a/providers/google/client.ts +++ b/providers/google/client.ts @@ -1,5 +1,5 @@ -import { type CreateChatCompletionRequestType } from "openai_schemas"; -import { GOOGLE_API_URL, GOOGLE_API_KEY } from "$/utils/constants.ts"; +import { type CreateChatCompletionRequest } from "$open-schemas/types/openai/mod.ts"; +import { GOOGLE_API_KEY, GOOGLE_API_URL } from "$/utils/constants.ts"; import { CreateChatCompletionRequestToGenerateContentRequest, GenerateContentResponseToCreateChatCompletionResponse, @@ -11,7 +11,7 @@ export default class Client { static apiVersion = "v1beta"; public static async createChatCompletion( - request: CreateChatCompletionRequestType, + request: CreateChatCompletionRequest, mappedModel?: string, ) { const modelMethod = request.stream @@ -35,8 +35,8 @@ export default class Client { return readable; } - const completion = - GenerateContentResponseToCreateChatCompletionResponse.parse( + const completion = GenerateContentResponseToCreateChatCompletionResponse + .parse( await response.json(), ); completion.model = mappedModel ?? request.model; diff --git a/providers/ollama/client.ts b/providers/ollama/client.ts index ef64973..886668c 100644 --- a/providers/ollama/client.ts +++ b/providers/ollama/client.ts @@ -1,7 +1,7 @@ -import { CreateChatCompletionRequestType } from "openai_schemas"; +import { CreateChatCompletionRequest } from "$open-schemas/types/openai/mod.ts"; import { - CreateChatCompletionRequestToChatRequest, ChatResponseToCreateChatCompletionResponse, + CreateChatCompletionRequestToChatRequest, } from "$/providers/ollama/transforms.ts"; import { ChatTransformStream } from "$/providers/ollama/streams.ts"; import { InternalServerError } from "$/utils/errors.ts"; @@ -15,14 +15,14 @@ export default class Client { /** * Map openai's `/chat/completions` api to `/api/chat` api of ollama. * - * @param {CreateChatCompletionRequestType} request in openai format + * @param {CreateChatCompletionRequest} request in openai format * @param {string} [mappedModel] - Optional model override for the anthropic response. * If provided, the model property will be set to this value. * * @returns the chat completion object or the readable stream of chat completion chunk. */ public static async createChatCompletion( - request: CreateChatCompletionRequestType, + request: CreateChatCompletionRequest, mappedModel?: string, ) { const response = await this.fetch("/chat", { @@ -53,7 +53,9 @@ export default class Client { if (response.status >= 400) { response.json().then((body) => { log.error( - `[${LOG_TAG}] client fetch with response status: ${response.status}, body: ${JSON.stringify(body)}`, + `[${LOG_TAG}] client fetch with response status: ${response.status}, body: ${ + JSON.stringify(body) + }`, ); }); throw new InternalServerError(); diff --git a/providers/reader/jina.ts b/providers/reader/jina.ts new file mode 100644 index 0000000..8aa5b3f --- /dev/null +++ b/providers/reader/jina.ts @@ -0,0 +1,20 @@ +export class Jina { + static MATCHER = /http[s]?\:\/\//; + + static async read(url: string): Promise<{ title: string; content: string }> { + return await fetch(`https://r.jina.ai/${url}`, { + method: "GET", + headers: { + "Accept": "application/json", + "X-With-Links-Summary": "true", + "X-With-Images-Summary": "true", + "X-With-Generated-Alt": "true", + }, + }).then((r) => r.json()) + .then((body) => { + if (body["code"] === 200) { + return body["data"]; + } + }); + } +} diff --git a/providers/tokeniser/open_tokeniser.ts b/providers/tokeniser/open_tokeniser.ts new file mode 100644 index 0000000..e73307f --- /dev/null +++ b/providers/tokeniser/open_tokeniser.ts @@ -0,0 +1,36 @@ +import { TOKENISER_API_URL } from "$/consts/envs.ts"; + +/** + * The client for OpenTokeniser service. + */ +export default class Client { + static baseURL = Deno.env.get(TOKENISER_API_URL) ?? + "http://localhost:8001"; + + static fetch(input: string, init?: RequestInit) { + return fetch(`${this.baseURL}${input}`, { + ...init, + headers: { + ...init?.headers, + "Content-Type": "application/json", + }, + }); + } + + static async createChunks( + content: string, + maxChunkSize?: number, + chunkOverlap?: number, + ): Promise<{ content: string; tokens: number }[]> { + return await this.fetch("/chunks", { + method: "POST", + body: JSON.stringify({ + content, + max_chunk_size_tokens: maxChunkSize, + chunk_overlap_tokens: chunkOverlap, + }), + }).then((r) => { + return r.json(); + }); + } +} diff --git a/providers/vector_db/pgvector.ts b/providers/vector_db/pgvector.ts new file mode 100644 index 0000000..6d592f6 --- /dev/null +++ b/providers/vector_db/pgvector.ts @@ -0,0 +1,77 @@ +import { Pool, QueryArguments } from "$postgres/mod.ts"; +import * as log from "$std/log/mod.ts"; + +const databaseUrl = Deno.env.get("PGVECTOR_URL")!; +const embeddingDimension = Deno.env.get("EMBEDDING_DIMENSION")!; + +export default class Client { + static pool = new Pool(databaseUrl, 3, true); + + public static async create(vectorStoreId: string) { + const sql = ` + CREATE TABLE IF NOT EXISTS ${vectorStoreId} ( + id BIGSERIAL PRIMARY KEY, + file_id TEXT NOT NULL, + file_name TEXT NOT NULL, + content TEXT NOT NULL, + embedding VECTOR(${embeddingDimension}) + ) + `; + await this.query(sql); + } + + public static async drop(vectorStoreId: string) { + await this.query(`DROP TABLE IF EXISTS ${vectorStoreId}`); + } + + public static async delete(vectorStoreId: string, fileId: string) { + await this.query(`DELETE FROM ${vectorStoreId} WHERE file_id = $1`, [ + fileId, + ]); + } + + public static async insert( + vectorStoreId: string, + data: { + file_id: string; + file_name: string; + content: string; + embedding: number[]; + }[], + ) { + const sql = + `INSERT INTO ${vectorStoreId}(file_id, file_name, content, embedding) VALUES ${ + data.map((d) => + `('${d.file_id}','${this.escapeSql(d.file_name)}','${ + this.escapeSql(d.content) + }','[${d.embedding}]')` + ).join(", ") + }`; + + await this.query(sql); + } + + public static async size(vectorStoreId: string): Promise { + const sql = `SELECT pg_table_size('${vectorStoreId}') as size`; + + const { rows: [{ size }] } = await this.query(sql); + return Number(size); + } + + private static escapeSql(sql: string) { + return sql.replace(/(['\\])/g, "$1$1"); + } + + private static async query(query: string, args?: QueryArguments) { + const connection = await this.pool.connect(); + let result = { rows: [] }; + try { + result = await connection.queryObject(query, args); + } catch (e) { + log.error(`[pgvector] execute query(${query}) with ${e}`); + } finally { + connection.release(); + } + return result; + } +} diff --git a/repositories/base.ts b/repositories/base.ts index 35c6e88..a58877c 100644 --- a/repositories/base.ts +++ b/repositories/base.ts @@ -1,4 +1,4 @@ -import { ObjectMeta, Pagination, Ordering } from "@open-schemas/zod/openai"; +import { ObjectMeta, Ordering, Pagination } from "@open-schemas/zod/openai"; import { z } from "zod"; import { ulid } from "$std/ulid/mod.ts"; import { Conflict, NotFound } from "$/utils/errors.ts"; @@ -6,7 +6,10 @@ import { now } from "$/utils/date.ts"; import { DENO_KV_PATH } from "$/consts/envs.ts"; let path = undefined; -if ((await Deno.permissions.query({ name: "env", variable: DENO_KV_PATH })).state === "granted") { +if ( + (await Deno.permissions.query({ name: "env", variable: DENO_KV_PATH })) + .state === "granted" +) { path = Deno.env.get(DENO_KV_PATH); } export const kv = await Deno.openKv(path); @@ -75,7 +78,10 @@ export class Repository { const options = { reverse: ordering?.order === "desc", }; - return await Array.fromAsync(kv.list(selector, options), ({ value }) => value); + return await Array.fromAsync( + kv.list(selector, options), + ({ value }) => value, + ); } async findAllByPage( @@ -124,7 +130,10 @@ export class Repository { reverse: true, } as Deno.KvListOptions; - const entrys = await Array.fromAsync(kv.list(selector, options), ({ value }) => value); + const entrys = await Array.fromAsync( + kv.list(selector, options), + ({ value }) => value, + ); return entrys.at(0); } @@ -156,7 +165,7 @@ export class Repository { operation = kv.atomic(); } - const id = `${this.idPrefix}-${ulid()}`; + const id = `${this.idPrefix}_${ulid()}`; const value = { id, object: this.object, @@ -169,7 +178,10 @@ export class Repository { if (this.hasSecondaryKey) { const secondaryKey = this.genKvKey(undefined, id); - operation.check({ key: secondaryKey, versionstamp: null }).set(secondaryKey, key); + operation.check({ key: secondaryKey, versionstamp: null }).set( + secondaryKey, + key, + ); } if (commit) { @@ -206,7 +218,11 @@ export class Repository { return value; } - async destory(id: string, parentId?: string, operation?: Deno.AtomicOperation) { + async destory( + id: string, + parentId?: string, + operation?: Deno.AtomicOperation, + ) { let commit = true; if (operation) { commit = false; diff --git a/repositories/vector_store.ts b/repositories/vector_store.ts index f23f0d3..d92e5a8 100644 --- a/repositories/vector_store.ts +++ b/repositories/vector_store.ts @@ -2,7 +2,7 @@ import { kv, Repository } from "$/repositories/base.ts"; import { CreateVectorStoreRequest, VectorStoreObject, -} from "$open-schemas/types/openai/mod.ts"; +} from "$/schemas/openai/assistant.ts"; import { ORGANIZATION, VECTOR_STORE_KEY, @@ -51,7 +51,7 @@ export class VectorStoreRepository extends Repository { totalFilesCount = fields.file_ids.length; } - const id = `${this.idPrefix}-${ulid()}`; + const id = `${this.idPrefix}_${ulid()}`; const key = this.genKvKey(org, id); const currentTime = now(); const value = { @@ -80,11 +80,21 @@ export class VectorStoreRepository extends Repository { await VectorStoreFileRepository.getInstance().createByFileId( fileId, id, + fields.chunking_strategy, operation, ); }); } + operation.enqueue({ + type: "vector_store", + args: JSON.stringify({ + action: "create", + organization: org, + vectorStoreId: id, + }), + }); + if (fields.expires_after) { operation.enqueue({ type: "vector_store", @@ -104,4 +114,38 @@ export class VectorStoreRepository extends Repository { } return value; } + + async destroyWithFiles( + vectorStoreId: string, + organization: string, + operation?: Deno.AtomicOperation, + ) { + let commit = true; + if (operation) { + commit = false; + } else { + operation = kv.atomic(); + } + + const vsfRepo = VectorStoreFileRepository.getInstance(); + const files = await vsfRepo.findAll(vectorStoreId); + files.forEach((f) => { + operation.delete(vsfRepo.genKvKey(vectorStoreId, f.id)); + }); + + this.destory(vectorStoreId, organization, operation); + + operation.enqueue({ + type: "vector_store", + args: JSON.stringify({ + action: "delete", + organization, + vectorStoreId, + }), + }); + + if (commit) { + await operation.commit(); + } + } } diff --git a/repositories/vector_store_file.ts b/repositories/vector_store_file.ts index 72d1530..543bb3f 100644 --- a/repositories/vector_store_file.ts +++ b/repositories/vector_store_file.ts @@ -1,6 +1,6 @@ import { kv, Repository } from "$/repositories/base.ts"; -import { VectorStoreFile } from "$open-schemas/types/openai/assistants.ts"; import { + FILE_PREFIX, VECTOR_STORE_FILE_KEY, VECTOR_STORE_FILE_OBJECT, VECTOR_STORE_FILE_PREFIX, @@ -8,8 +8,25 @@ import { } from "$/consts/api.ts"; import { now } from "$/utils/date.ts"; import { Conflict } from "$/utils/errors.ts"; +import { + AutoChunkingStrategy, + StaticChunkingStrategy, + VectorStoreFileObject, + VectorStoreObject, +} from "$/schemas/openai/mod.ts"; +import { Jina } from "$/providers/reader/jina.ts"; +import { ulid } from "$std/ulid/mod.ts"; + +const DEFAULT_CHUNKING_STRATEGY = { + type: "static", + static: { + max_chunk_size_tokens: 800, + chunk_overlap_tokens: 400, + }, +}; -export class VectorStoreFileRepository extends Repository { +export class VectorStoreFileRepository + extends Repository { private static instance: VectorStoreFileRepository; private constructor() { super( @@ -30,6 +47,7 @@ export class VectorStoreFileRepository extends Repository { async createByFileId( fileId: string, vectorStoreId: string, + chunkingStrategy?: AutoChunkingStrategy | StaticChunkingStrategy | null, operation?: Deno.AtomicOperation, ) { let commit = true; @@ -39,29 +57,84 @@ export class VectorStoreFileRepository extends Repository { operation = kv.atomic(); } + let type, url, realFileId = fileId; + if (Jina.MATCHER.test(fileId)) { + realFileId = `${FILE_PREFIX}_${ulid()}`; + type = "url"; + url = fileId; + } + const value = { - id: fileId, + id: realFileId, object: this.object, created_at: now(), vector_store_id: vectorStoreId, usage_bytes: 0, status: "in_progress", - } as VectorStoreFile; + chunking_strategy: (!chunkingStrategy || chunkingStrategy.type === "auto") + ? DEFAULT_CHUNKING_STRATEGY + : chunkingStrategy, + type, + url, + } as VectorStoreFileObject; - const key = this.genKvKey(vectorStoreId, fileId); - operation.check({ key, versionstamp: null }).set(key, value).enqueue({ + const key = this.genKvKey(vectorStoreId, realFileId); + operation.check({ key, versionstamp: null }).set(key, value); + + if (commit) { + const { ok } = await operation.commit(); + if (!ok) throw new Conflict(); + } + return value; + } + + async createByFileIdWithJob( + fileId: string, + vectorStoreId: string, + organization: string, + chunkingStrategy?: AutoChunkingStrategy | StaticChunkingStrategy | null, + ) { + const operation = kv.atomic(); + + const value = await this.createByFileId( + fileId, + vectorStoreId, + chunkingStrategy, + operation, + ); + operation.enqueue({ type: "vector_store_file", args: JSON.stringify({ action: "index", vectorStoreId, fileId, + organization, }), }); - if (commit) { - const { ok } = await operation.commit(); - if (!ok) throw new Conflict(); - } + const { ok } = await operation.commit(); + if (!ok) throw new Conflict(); return value; } + + async destoryByObject( + vs: VectorStoreObject, + vsf: VectorStoreFileObject, + organization: string, + ) { + const operation = kv.atomic(); + const key = this.genKvKey(vs.id, vsf.id); + operation.delete(key) + .enqueue({ + type: "vector_store_file", + args: JSON.stringify({ + action: "delete", + vectorStoreId: vs.id, + fileId: vsf.id, + organization, + }), + }); + + await operation.commit(); + } } diff --git a/routes/v1/vector_stores/[vector_store_id].ts b/routes/v1/vector_stores/[vector_store_id].ts index a77f5b3..3f8941f 100644 --- a/routes/v1/vector_stores/[vector_store_id].ts +++ b/routes/v1/vector_stores/[vector_store_id].ts @@ -41,7 +41,7 @@ export const handler: Handlers = { await getVectorStore(ctx); const { id, parentId } = getIDs(ctx); - await VectorStoreRepository.getInstance().destory(id, parentId); + await VectorStoreRepository.getInstance().destroyWithFiles(id, parentId); return Response.json(DeleteVectorStoreResponse.parse({ id })); }, diff --git a/routes/v1/vector_stores/[vector_store_id]/files/[file_id].ts b/routes/v1/vector_stores/[vector_store_id]/files/[file_id].ts new file mode 100644 index 0000000..4057e08 --- /dev/null +++ b/routes/v1/vector_stores/[vector_store_id]/files/[file_id].ts @@ -0,0 +1,36 @@ +import { FreshContext, Handlers } from "$fresh/server.ts"; +import type { VectorStoreFileObject } from "$open-schemas/types/openai/mod.ts"; +import { DeleteVectorStoreFileResponse } from "$open-schemas/zod/openai/mod.ts"; +import { VectorStoreFileRepository } from "$/repositories/vector_store_file.ts"; +import { getVectorStore } from "$/routes/v1/vector_stores/[vector_store_id].ts"; + +const getIDs = (ctx: FreshContext) => ({ + id: ctx.params.file_id as string, + parentId: ctx.params.vector_store_id as string, +}); + +export async function getVectorStoreFile(ctx: FreshContext) { + const { id, parentId } = getIDs(ctx); + + return await VectorStoreFileRepository.getInstance().findById(id, parentId); +} + +export const handler: Handlers = { + async GET(_req, ctx: FreshContext) { + return Response.json(await getVectorStoreFile(ctx)); + }, + + async DELETE(_req: Request, ctx: FreshContext) { + const vs = await getVectorStore(ctx); + const vsf = await getVectorStoreFile(ctx); + const organization = ctx.state.organization as string; + + await VectorStoreFileRepository.getInstance().destoryByObject( + vs, + vsf, + organization, + ); + + return Response.json(DeleteVectorStoreFileResponse.parse({ id: vsf.id })); + }, +}; diff --git a/routes/v1/vector_stores/[vector_store_id]/files/index.ts b/routes/v1/vector_stores/[vector_store_id]/files/index.ts new file mode 100644 index 0000000..2303534 --- /dev/null +++ b/routes/v1/vector_stores/[vector_store_id]/files/index.ts @@ -0,0 +1,39 @@ +import { FreshContext, Handlers } from "$fresh/server.ts"; +import type { VectorStoreFileObject } from "$open-schemas/types/openai/mod.ts"; +import { + CreateVectorStoreFileRequest, + Ordering, + Pagination, +} from "$/schemas/openai/mod.ts"; +import { VectorStoreFileRepository } from "$/repositories/vector_store_file.ts"; + +export const handler: Handlers = { + async GET(_req: Request, ctx: FreshContext) { + const params = Object.fromEntries(ctx.url.searchParams); + const organization = ctx.state.organization as string; + + const page = await VectorStoreFileRepository.getInstance().findAllByPage( + organization, + Pagination.parse(params), + Ordering.parse(params), + ); + + return Response.json(page); + }, + + async POST(req: Request, ctx: FreshContext) { + const fields = CreateVectorStoreFileRequest.parse(await req.json()); + const vectorStoreId = ctx.params.vector_store_id as string; + const organization = ctx.state.organization as string; + + const vectorStoreFile = await VectorStoreFileRepository.getInstance() + .createByFileIdWithJob( + fields.file_id, + vectorStoreId, + organization, + fields.chunking_strategy, + ); + + return Response.json(vectorStoreFile, { status: 201 }); + }, +}; diff --git a/routes/v1/vector_stores/index.ts b/routes/v1/vector_stores/index.ts index 1bc13d0..429cca9 100644 --- a/routes/v1/vector_stores/index.ts +++ b/routes/v1/vector_stores/index.ts @@ -4,7 +4,7 @@ import { Ordering, Pagination, type VectorStoreObject, -} from "$open-schemas/zod/openai/mod.ts"; +} from "$/schemas/openai/mod.ts"; import { VectorStoreRepository } from "$/repositories/vector_store.ts"; export const handler: Handlers = { diff --git a/schemas/google/mod.ts b/schemas/google/mod.ts new file mode 100644 index 0000000..b780346 --- /dev/null +++ b/schemas/google/mod.ts @@ -0,0 +1,50 @@ +import z from "zod"; + +export type Part = z.infer; +export const Part = z.union([ + z.object({ + text: z.string(), + }), + z.object({ + inlineData: z.object({ + mimeType: z.string(), + data: z.string(), + }), + }), +]); + +export type Content = z.infer; +export const Content = z.object({ + parts: z.array(Part), + role: z.union([z.literal("use"), z.literal("model")]).optional(), +}); + +export type TaskType = z.infer; +export const TaskType = z.union([ + z.literal("TASK_TYPE_UNSPECIFIED"), + z.literal("RETRIEVAL_QUERY"), + z.literal("RETRIEVAL_DOCUMENT"), + z.literal("SEMANTIC_SIMILARITY"), + z.literal("CLASSIFICATION"), + z.literal("CLUSTERING"), + z.literal("QUESTION_ANSWERING"), + z.literal("FACT_VERIFICATION"), +]); + +export type EmbedContentRequest = z.infer; +export const EmbedContentRequest = z.object({ + content: Content, + taskType: TaskType.optional(), + title: z.string().optional(), + outputDimensionality: z.union([z.number(), z.null()]).optional(), +}); + +export type ContentEmbedding = z.infer; +export const ContentEmbedding = z.object({ + values: z.array(z.number()), +}); + +export type EmbedContentResponse = z.infer; +export const EmbedContentResponse = z.object({ + embedding: ContentEmbedding, +}); diff --git a/schemas/openai/assistant.ts b/schemas/openai/assistant.ts new file mode 100644 index 0000000..c381759 --- /dev/null +++ b/schemas/openai/assistant.ts @@ -0,0 +1,604 @@ +import z from "zod"; + +export type Metadata = z.infer; +export const Metadata = z.record(z.string()); + +export type ObjectMeta = z.infer; +export const ObjectMeta = z.object({ + id: z.string(), + created_at: z.number(), +}); + +export type CodeInterpreterTool = z.infer; +export const CodeInterpreterTool = z.object({ + type: z.literal("code_interpreter").default("code_interpreter"), +}); + +export type RetrievalTool = z.infer; +export const RetrievalTool = z.object({ + type: z.literal("retrieval").default("retrieval"), +}); + +export type FunctionTool = z.infer; +export const FunctionTool = z.object({ + type: z.literal("function").default("function"), + function: z.object({ + description: z.string(), + name: z.string().max(64), + parameters: z.record(z.unknown()), + }), +}); + +export type Tool = z.infer; +export const Tool = z.union([CodeInterpreterTool, RetrievalTool, FunctionTool]); + +export type AssistantObject = z.infer; +export const AssistantObject = z.intersection( + z.object({ + object: z.literal("assistant").default("assistant"), + name: z.string().max(256).nullish(), + description: z.string().max(512).nullish(), + model: z.string(), + instructions: z.string().nullish(), + tools: z.array(Tool).nullish(), + file_ids: z.array(z.string()).max(20).nullish(), + metadata: Metadata.nullish(), + }), + ObjectMeta, +); + +export type AssistantFileObject = z.infer; +export const AssistantFileObject = z.object({ + object: z.literal("assistant.file").default("assistant.file"), + assistant_id: z.string(), +}); + +export type ThreadObject = z.infer; +export const ThreadObject = z.intersection( + z.object({ + object: z.literal("thread").default("thread"), + metadata: Metadata.nullish(), + }), + ObjectMeta, +); + +export type MessageImageFileContent = z.infer; +export const MessageImageFileContent = z.object({ + type: z.literal("image_file"), + image_file: z.object({ + file_id: z.string(), + }), +}); + +export type FileCitationAnnotation = z.infer; +export const FileCitationAnnotation = z.object({ + type: z.literal("file_citation").default("file_citation"), + text: z.string(), + file_citation: z.object({ + file_id: z.string(), + quote: z.string(), + }), + start_index: z.number(), + end_index: z.number(), +}); + +export type FilePathAnnotation = z.infer; +export const FilePathAnnotation = z.object({ + type: z.literal("file_path").default("file_path"), + text: z.string(), + file_path: z.object({ + file_id: z.string(), + }), + start_index: z.number(), + end_index: z.number(), +}); + +export type MessageTextContent = z.infer; +export const MessageTextContent = z.object({ + type: z.literal("text").default("text"), + text: z.object({ + value: z.string(), + annotations: z.array(z.union([FileCitationAnnotation, FilePathAnnotation])) + .nullish(), + }), +}); + +export type MessageObject = z.infer; +export const MessageObject = z.intersection( + z.object({ + object: z.literal("thread.message").default("thread.message"), + thread_id: z.string(), + status: z + .union([ + z.literal("in_progress"), + z.literal("incomplete"), + z.literal("completed"), + ]) + .nullish(), + incomplete_details: z + .object({ + reason: z.string(), + }) + .nullish(), + completed_at: z.number().nullish(), + incomplete_at: z.number().nullish(), + role: z.union([z.literal("user"), z.literal("assistant")]), + content: z.array(z.union([MessageImageFileContent, MessageTextContent])), + assistant_id: z.string().nullish(), + run_id: z.string().nullish(), + file_ids: z.array(z.string()).max(10).nullish(), + metadata: Metadata.nullish(), + }), + ObjectMeta, +); + +export type MessageFileObject = z.infer; +export const MessageFileObject = z.intersection( + z.object({ + object: z.literal("thread.message.file").default("thread.message.file"), + message_id: z.string(), + }), + ObjectMeta, +); + +export type CodeInterpreterLogOutput = z.infer; +export const CodeInterpreterLogOutput = z.object({ + type: z.literal("logs").default("logs"), + logs: z.string(), +}); + +export type CodeInterpreterImageOutput = z.infer< + typeof CodeInterpreterImageOutput +>; +export const CodeInterpreterImageOutput = z.object({ + type: z.literal("image").default("image"), + image: z.object({ + file_id: z.string(), + }), +}); + +export type CodeInterpreterOutput = z.infer; +export const CodeInterpreterOutput = z.union([ + CodeInterpreterImageOutput, + CodeInterpreterLogOutput, +]); + +export type CodeInterpreterToolCall = z.infer; +export const CodeInterpreterToolCall = z.object({ + id: z.string(), + type: z.literal("code_interpreter"), + code_interpreter: z.object({ + input: z.string(), + outputs: z.array(CodeInterpreterOutput), + }), +}); + +export type RetrievalToolCall = z.infer; +export const RetrievalToolCall = z.object({ + id: z.string(), + type: z.literal("retrieval").default("retrieval"), + retrieval: z.object({ + name: z.string(), + input: z.string(), + output: z.string().optional(), + }), +}); + +export type FunctionToolCall = z.infer; +export const FunctionToolCall = z.object({ + id: z.string(), + type: z.literal("function"), + function: z.object({ + name: z.string(), + arguments: z.string(), + output: z.string().nullish(), + }), +}); + +export type ToolCall = z.infer; +export const ToolCall = z.union([ + CodeInterpreterToolCall, + RetrievalToolCall, + FunctionToolCall, +]); + +export type SubmitToolOutputsAction = z.infer; +export const SubmitToolOutputsAction = z.object({ + type: z.literal("submit_tool_outputs"), + submit_tool_outputs: z.object({ + tool_calls: z.array(FunctionToolCall), + }), +}); + +export type Usage = z.infer; +export const Usage = z.object({ + completion_tokens: z.number(), + prompt_tokens: z.number(), + total_tokens: z.number(), +}); + +export type RunObject = z.infer; +export const RunObject = z.intersection( + z.object({ + object: z.literal("thread.run").default("thread.run"), + thread_id: z.string(), + assistant_id: z.string(), + status: z.union([ + z.literal("queued"), + z.literal("in_progress"), + z.literal("requires_action"), + z.literal("cancelling"), + z.literal("cancelled"), + z.literal("failed"), + z.literal("completed"), + z.literal("expired"), + ]), + required_action: SubmitToolOutputsAction.nullish(), + last_error: z + .object({ + code: z.union([ + z.literal("server_error"), + z.literal("rate_limit_exceeded"), + z.literal("invalid_prompt"), + ]), + message: z.string(), + }) + .nullish(), + expires_at: z.number().nullish(), + started_at: z.number().nullish(), + cancelled_at: z.number().nullish(), + failed_at: z.number().nullish(), + completed_at: z.number().nullish(), + model: z.string().nullish(), + instructions: z.string().nullish(), + tools: z.array(Tool).nullish(), + file_ids: z.array(z.string()).nullish(), + metadata: Metadata.nullish(), + usage: Usage.nullish(), + temperature: z.number().min(0).max(1).default(1).nullish(), + }), + ObjectMeta, +); + +export type MessageCreationDetail = z.infer; +export const MessageCreationDetail = z.object({ + type: z.literal("message_creation"), + message_creation: z.object({ + message_id: z.string(), + }), +}); + +export type ToolCallsDetail = z.infer; +export const ToolCallsDetail = z.object({ + type: z.literal("tool_calls"), + tool_calls: z.array(ToolCall), +}); + +export type StepObject = z.infer; +export const StepObject = z.intersection( + z.object({ + object: z.literal("thread.run.step").default("thread.run.step"), + assistant_id: z.string(), + thread_id: z.string(), + run_id: z.string(), + type: z.union([z.literal("message_creation"), z.literal("tool_calls")]), + status: z.union([ + z.literal("in_progress"), + z.literal("cancelled"), + z.literal("failed"), + z.literal("completed"), + z.literal("expired"), + ]), + step_details: z.union([MessageCreationDetail, ToolCallsDetail]), + last_error: z + .object({ + code: z.union([ + z.literal("server_error"), + z.literal("rate_limit_exceeded"), + ]), + message: z.string(), + }) + .nullish(), + expired_at: z.number().nullish(), + cancelled_at: z.number().nullish(), + failed_at: z.number().nullish(), + completed_at: z.number().nullish(), + metadata: Metadata.nullish(), + usage: Usage.nullish(), + }), + ObjectMeta, +); + +export type VectorStoreObject = z.infer; +export const VectorStoreObject = z.intersection( + z.object({ + object: z.literal("vector_store").default("vector_store"), + name: z.string().nullish(), + usage_bytes: z.number(), + file_counts: z.object({ + in_progress: z.number(), + completed: z.number(), + failed: z.number(), + cancelled: z.number(), + total: z.number(), + }), + status: z.union([ + z.literal("expired"), + z.literal("in_progress"), + z.literal("completed"), + ]), + expires_after: z + .object({ + anchor: z.literal("last_active_at"), + days: z.number(), + }) + .nullish(), + expires_at: z.number().nullish(), + last_active_at: z.number(), + metadata: Metadata.nullish(), + }), + ObjectMeta, +); + +export type AutoChunkingStrategy = z.infer; +export const AutoChunkingStrategy = z.object({ + type: z.literal("auto"), +}); + +export type OtherChunkingStrategy = z.infer; +export const OtherChunkingStrategy = z.object({ + type: z.literal("other"), +}); + +export type StaticChunkingStrategy = z.infer; +export const StaticChunkingStrategy = z.object({ + type: z.literal("static").default("static"), + static: z.object({ + max_chunk_size_tokens: z.number().min(100).max(4096).default(800), + chunk_overlap_tokens: z.number().default(400), + }), +}); + +export type VectorStoreFileObject = z.infer; +export const VectorStoreFileObject = z.intersection( + z.object({ + object: z.literal("vector_store.file").default("vector_store.file"), + usage_bytes: z.number().default(0), + vector_store_id: z.string(), + status: z.union([ + z.literal("in_progress"), + z.literal("completed"), + z.literal("cancelled"), + z.literal("failed"), + ]), + last_error: z.union([ + z.object({ + code: z.union([ + z.literal("server_error"), + z.literal("rate_limit_exceeded"), + ]), + message: z.string(), + }), + z.null(), + ]), + chunking_strategy: z.union([OtherChunkingStrategy, StaticChunkingStrategy]), + type: z + .union([z.literal("file"), z.literal("url")]) + .default("file") + .optional(), + url: z.string().optional(), + }), + ObjectMeta, +); + +export type CreateAssistantRequest = z.infer; +export const CreateAssistantRequest = z.intersection( + z.object({ + name: z.string().max(256).nullish(), + description: z.string().max(512).nullish(), + model: z.string(), + instructions: z.string().nullish(), + tools: z.array(Tool).nullish(), + file_ids: z.array(z.string()).max(20).nullish(), + metadata: Metadata.nullish(), + }), + z.object({}), +); + +export type CreateAssistantFileRequest = z.infer< + typeof CreateAssistantFileRequest +>; +export const CreateAssistantFileRequest = z.object({ + file_id: z.string(), +}); + +export type ModifyAssistantRequest = z.infer; +export const ModifyAssistantRequest = z.intersection( + z.object({ + name: z.string().max(256).nullish(), + description: z.string().max(512).nullish(), + model: z.string().optional(), + instructions: z.string().nullish(), + tools: z.array(Tool).nullish(), + file_ids: z.array(z.string()).max(20).nullish(), + metadata: Metadata.nullish(), + }), + z.object({}), +); + +export type CreateMessageRequest = z.infer; +export const CreateMessageRequest = z.object({ + role: z.union([z.literal("user"), z.literal("assistant")]), + content: z.string(), + file_ids: z.array(z.string()).nullish(), +}); + +export type CreateThreadRequest = z.infer; +export const CreateThreadRequest = z.object({ + messages: z.array(CreateMessageRequest).nullish(), + metadata: Metadata.nullish(), +}); + +export type ModifyThreadRequest = z.infer; +export const ModifyThreadRequest = z.object({ + metadata: Metadata.nullish(), +}); + +export type ModifyMessageRequest = z.infer; +export const ModifyMessageRequest = z.object({ + metadata: Metadata.nullish(), +}); + +export type CreateRunRequest = z.infer; +export const CreateRunRequest = z.intersection( + z.object({ + additional_instructions: z.string().nullish(), + stream: z.boolean().default(false).nullish(), + }), + z.intersection( + z.object({ + assistant_id: z.string(), + model: z.string().nullish(), + instructions: z.string().nullish(), + tools: z.array(Tool).nullish(), + metadata: Metadata.nullish(), + temperature: z.number().min(0).max(1).default(1).nullish(), + }), + z.object({}), + ), +); + +export type CreateThreadAndRunRequest = z.infer< + typeof CreateThreadAndRunRequest +>; +export const CreateThreadAndRunRequest = z.intersection( + z.object({ + thread: CreateThreadRequest.nullish(), + }), + CreateRunRequest, +); + +export type ModifyRunRequest = z.infer; +export const ModifyRunRequest = z.object({ + metadata: Metadata.nullish(), +}); + +export type ToolOutput = z.infer; +export const ToolOutput = z.object({ + tool_call_id: z.string(), + output: z.string().nullish(), +}); + +export type SubmitToolOutputsToRunRequest = z.infer< + typeof SubmitToolOutputsToRunRequest +>; +export const SubmitToolOutputsToRunRequest = z.object({ + tool_outputs: z.array(ToolOutput), + stream: z.boolean().default(false).nullish(), +}); + +export type CreateVectorStoreRequest = z.infer; +export const CreateVectorStoreRequest = z.object({ + file_ids: z.array(z.string()).max(10000).nullish(), + name: z.string().nullish(), + expires_after: z + .object({ + anchor: z.literal("last_active_at").default("last_active_at"), + days: z.number(), + }) + .nullish(), + chunking_strategy: z + .union([AutoChunkingStrategy, StaticChunkingStrategy, z.null()]) + .optional(), + metadata: Metadata.nullish(), +}); + +export type ModifyVectorStoreRequest = z.infer; +export const ModifyVectorStoreRequest = z.object({ + name: z.union([z.string(), z.null()]).nullish(), + expires_after: z + .object({ + anchor: z.literal("last_active_at").default("last_active_at"), + days: z.number(), + }) + .nullish(), + metadata: Metadata.nullish(), +}); + +export type CreateVectorStoreFileRequest = z.infer< + typeof CreateVectorStoreFileRequest +>; +export const CreateVectorStoreFileRequest = z.object({ + file_id: z.string(), + chunking_strategy: z + .union([AutoChunkingStrategy, StaticChunkingStrategy, z.null()]) + .optional(), +}); + +export type DeleteResponse = z.infer; +export const DeleteResponse = z.object({ + id: z.string(), + deleted: z.literal(true).default(true), +}); + +export type DeleteAssistantResponse = z.infer; +export const DeleteAssistantResponse = z.intersection( + z.object({ + object: z.literal("assistant.deleted").default("assistant.deleted"), + }), + DeleteResponse, +); + +export type DeleteAssistantFileResponse = z.infer< + typeof DeleteAssistantFileResponse +>; +export const DeleteAssistantFileResponse = z.intersection( + z.object({ + object: z.literal("assistant.file.deleted").default( + "assistant.file.deleted", + ), + }), + DeleteResponse, +); + +export type DeleteThreadResponse = z.infer; +export const DeleteThreadResponse = z.intersection( + z.object({ + object: z.literal("thread.deleted").default("thread.deleted"), + }), + DeleteResponse, +); + +export type DeleteVectorStoreResponse = z.infer< + typeof DeleteVectorStoreResponse +>; +export const DeleteVectorStoreResponse = z.intersection( + z.object({ + object: z.literal("vector_store.deleted").default("vector_store.deleted"), + }), + DeleteResponse, +); + +export type DeleteVectorStoreFileResponse = z.infer< + typeof DeleteVectorStoreFileResponse +>; +export const DeleteVectorStoreFileResponse = z.intersection( + z.object({ + object: z + .literal("vector_store.file.deleted") + .default("vector_store.file.deleted"), + }), + DeleteResponse, +); + +export type Pagination = z.infer; +export const Pagination = z.object({ + limit: z.coerce.number().min(1).max(100).default(20), + after: z.string().optional(), + before: z.string().optional(), +}); + +export type Ordering = z.infer; +export const Ordering = z.object({ + order: z.union([z.literal("asc"), z.literal("desc")]).default("desc"), +}); diff --git a/schemas/openai/embedding.ts b/schemas/openai/embedding.ts new file mode 100644 index 0000000..37d5ece --- /dev/null +++ b/schemas/openai/embedding.ts @@ -0,0 +1,36 @@ +import z from "zod"; + +export type EmbeddingObject = z.infer; +export const EmbeddingObject = z.object({ + obejct: z.literal("embedding").default("embedding"), + embedding: z.array(z.number()), + index: z.number(), +}); + +export type CreateEmbeddingRequest = z.infer; +export const CreateEmbeddingRequest = z.object({ + input: z.union([ + z.string(), + z.array(z.string()), + z.array(z.number()), + z.array(z.array(z.number())), + ]), + model: z.string(), + encoding_format: z + .union([z.literal("float"), z.literal("base64"), z.null()]) + .default("float") + .optional(), + dimensions: z.union([z.number(), z.null()]).optional(), + user: z.union([z.string(), z.null()]).optional(), +}); + +export type CreateEmbeddingResponse = z.infer; +export const CreateEmbeddingResponse = z.object({ + object: z.literal("list").default("list"), + data: z.array(EmbeddingObject), + model: z.string(), + usage: z.object({ + prompt_tokens: z.number(), + total_tokens: z.number(), + }), +}); diff --git a/schemas/openai/mod.ts b/schemas/openai/mod.ts new file mode 100644 index 0000000..4074d2e --- /dev/null +++ b/schemas/openai/mod.ts @@ -0,0 +1,2 @@ +export * from "./assistant.ts"; +export * from "./embedding.ts"; diff --git a/types/google/mod.ts b/types/google/mod.ts new file mode 100644 index 0000000..3ba7b69 --- /dev/null +++ b/types/google/mod.ts @@ -0,0 +1,85 @@ +export type Part = { + /** + * Inline text. + */ + text: string; +} | { + inlineData: { + /** + * The IANA standard MIME type of the source data. Examples: - image/png - image/jpeg If an unsupported MIME type is provided, an error will be returned. + */ + mimeType: string; + /** + * Raw bytes for media formats. A base64-encoded string. + */ + data: string; + }; +}; + +/** + * The base structured datatype containing multi-part content of a message. + * A Content includes a role field designating the producer of the Content and a parts field containing multi-part data that contains the content of the message turn. + */ +export type Content = { + /** + * Ordered Parts that constitute a single message. Parts may have different MIME types. + */ + parts: Part[]; + /** + * The producer of the content. Must be either 'user' or 'model'. + * Useful to set for multi-turn conversations, otherwise can be left blank or unset. + */ + role?: "use" | "model"; +}; + +/** + * Type of task for which the embedding will be used. + */ +export type TaskType = + | "TASK_TYPE_UNSPECIFIED" // Unset value, which will default to one of the other enum values. + | "RETRIEVAL_QUERY" // Specifies the given text is a query in a search/retrieval setting. + | "RETRIEVAL_DOCUMENT" // Specifies the given text is a document from the corpus being searched. + | "SEMANTIC_SIMILARITY" // Specifies the given text will be used for STS. + | "CLASSIFICATION" // Specifies that the given text will be classified. + | "CLUSTERING" // Specifies that the embeddings will be used for clustering. + | "QUESTION_ANSWERING" // Specifies that the given text will be used for question answering. + | "FACT_VERIFICATION"; // Specifies that the given text will be used for fact verification. + +export type EmbedContentRequest = { + /** + * The content to embed. Only the parts.text fields will be counted. + */ + content: Content; + /** + * Optional task type for which the embeddings will be used. Can only be set for models/embedding-001. + */ + taskType?: TaskType; + /** + * An optional title for the text. Only applicable when TaskType is RETRIEVAL_DOCUMENT. + * Note: Specifying a title for RETRIEVAL_DOCUMENT provides better quality embeddings for retrieval. + */ + title?: string; + /** + * Optional reduced dimension for the output embedding. If set, excessive values in the output + * embedding are truncated from the end. Supported by newer models since 2024, and the earlier + * model (models/embedding-001) cannot specify this value. + */ + outputDimensionality?: number | null; +}; + +/** + * A list of floats representing an embedding. + */ +export type ContentEmbedding = { + /** + * The embedding values. + */ + values: number[]; +}; + +export type EmbedContentResponse = { + /** + * The embedding generated from the input content. + */ + embedding: ContentEmbedding; +}; diff --git a/types/open_assistant/mod.ts b/types/open_assistant/mod.ts new file mode 100644 index 0000000..71a493a --- /dev/null +++ b/types/open_assistant/mod.ts @@ -0,0 +1,6 @@ +export type VectorStoreRecord = { + file_id: string; + file_name: string; + content: string; + embedding: number[]; +}; diff --git a/types/openai/assistant.ts b/types/openai/assistant.ts new file mode 100644 index 0000000..9542d7d --- /dev/null +++ b/types/openai/assistant.ts @@ -0,0 +1,1282 @@ +/* ---------------------------------------------------------------------------- */ +/* --------------------------------- Objects ---------------------------------- */ +/* ---------------------------------------------------------------------------- */ + +/** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for storing + * additional information about the object in a structured format. Keys can be a maximum of 64 + * characters long and values can be a maxium of 512 characters long. + */ +export type Metadata = Record; + +export type ObjectMeta = { + /** + * The identifier, which can be referenced in API endpoints. + */ + id: string; + /** + * The Unix timestamp (in seconds) for when the object was created. + */ + created_at: number; +}; + +/** + * Code interpreter tool + */ +export type CodeInterpreterTool = { + /** + * The type of tool being defined: code_interpreter. + * + * @default code_interpreter + */ + type: "code_interpreter"; +}; +/** + * Retrieval tool + */ +export type RetrievalTool = { + /** + * The type of tool being defined: retrieval. + * + * @default retrieval + */ + type: "retrieval"; +}; + +/** + * Function tool + */ +export type FunctionTool = { + /** + * The type of tool being defined: function + * + * @default function + */ + type: "function"; + + function: { + /** + * A description of what the function does, used by the model to choose when and how to call + * the function. + */ + description: string; + /** + * The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + * + * @maxLength 64 + */ + name: string; + + /** + * The parameters the functions accepts, described as a JSON Schema object. See the guide for + * examples, and the JSON Schema reference for documentation about the format. + */ + parameters: Record; + }; +}; + +export type Tool = CodeInterpreterTool | RetrievalTool | FunctionTool; + +/** + * Represents an assistant that can call the model and use tools. + */ +export type AssistantObject = { + /** + * The object type, which is always assistant. + * + * @default assistant + */ + object: "assistant"; + + /** + * The name of the assistant. The maximum length is 256 characters. + * + * @maxLength 256 + */ + name?: string; + + /** + * The description of the assistant. The maximum length is 512 characters. + * + * @maxLength 512 + */ + description?: string; + + /** + * ID of the model to use. You can use the List models API to see all of your available models, + * or see our Model overview for descriptions of them. + */ + model: string; + + /** + * The system instructions that the assistant uses. The maximum length is 32768 characters. + */ + instructions?: string; + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. + * Tools can be of types code_interpreter, retrieval, or function. + */ + tools?: Tool[]; + + /** + * A list of file IDs attached to this assistant. There can be a maximum of 20 files attached to + * the assistant. Files are ordered by their creation date in ascending order. + * + * @maxItems 20 + */ + file_ids?: string[]; + + metadata?: Metadata; +} & ObjectMeta; + +/** + * A list of Files attached to an assistant. + */ +export type AssistantFileObject = { + /** + * The object type, which is always assistant.file. + * + * @default assistant.file + */ + object: "assistant.file"; + + /** + * The assistant ID that the file is attached to. + */ + assistant_id: string; +}; + +/** + * Represents a thread that contains messages. + */ +export type ThreadObject = { + /** + * The object type, which is always thread. + * + * @default thread + */ + object: "thread"; + + metadata?: Metadata; +} & ObjectMeta; + +/** + * References an image File in the content of a message. + */ +export type MessageImageFileContent = { + type: "image_file"; + image_file: { + /** + * The File ID of the image in the message content. + */ + file_id: string; + }; +}; + +/** + * A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. + * Generated when the assistant uses the retrieval tool to search files. + */ +export type FileCitationAnnotation = { + /** + * @default file_citation + */ + type: "file_citation"; + /** + * The text in the message content that needs to be replaced. + */ + text: string; + file_citation: { + /** + * The ID of the specific File the citation is from. + */ + file_id: string; + /** + * The specific quote in the file. + */ + quote: string; + }; + start_index: number; + end_index: number; +}; + +/** + * A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file. + */ +export type FilePathAnnotation = { + /** + * @default file_path + */ + type: "file_path"; + /** + * The text in the message content that needs to be replaced. + */ + text: string; + file_path: { + file_id: string; + }; + start_index: number; + end_index: number; +}; + +/** + * The text content that is part of a message. + */ +export type MessageTextContent = { + /** + * Always text. + * + * @default text + */ + type: "text"; + /** + * Detail of text. + */ + text: { + /** + * The data that makes up the text. + */ + value: string; + annotations?: (FileCitationAnnotation | FilePathAnnotation)[]; + }; +}; + +/** + * Represents a message within a thread. + */ +export type MessageObject = { + /** + * The object type, which is always thread.message. + * + * @default thread.message + */ + object: "thread.message"; + /** + * The thread ID that this message belongs to. + */ + thread_id: string; + /** + * The status of the message, which can be either in_progress, incomplete, or completed. + */ + status?: "in_progress" | "incomplete" | "completed"; + /** + * On an incomplete message, details about why the message is incomplete. + */ + incomplete_details?: { + /** + * The reason the message is incomplete. + */ + reason: string; + }; + /** + * The Unix timestamp (in seconds) for when the message was completed. + */ + completed_at?: number; + /** + * The Unix timestamp (in seconds) for when the message was marked as incomplete. + */ + incomplete_at?: number; + /** + * The entity that produced the message. One of user or assistant. + */ + role: "user" | "assistant"; + /** + * The content of the message in array of text and/or images. + */ + content: (MessageImageFileContent | MessageTextContent)[]; + /** + * If applicable, the ID of the assistant that authored this message. + */ + assistant_id?: string; + /** + * The ID of the run associated with the creation of this message. Value is null when messages + * are created manually using the create message or create thread endpoints. + */ + run_id?: string; + /** + * A list of file IDs that the assistant should use. Useful for tools like retrieval and + * code_interpreter that can access files. + * + * @maxItems 10 + */ + file_ids?: string[]; + metadata?: Metadata; +} & ObjectMeta; + +/** + * A list of files attached to a message. + */ +export type MessageFileObject = { + /** + * The object type, which is always thread.message.file. + * + * @default thread.message.file + */ + object: "thread.message.file"; + /** + * The ID of the message that the File is attached to. + */ + message_id: string; +} & ObjectMeta; + +/** + * Text output from the Code Interpreter tool call as part of a run step. + */ +export type CodeInterpreterLogOutput = { + /** + * Always logs. + * + * @default logs + */ + type: "logs"; + + /** + * The text output from the Code Interpreter tool call. + */ + logs: string; +}; + +/** + * Image output from the Code Interpreter tool call as part of a run step. + */ +export type CodeInterpreterImageOutput = { + /** + * Always image. + * + * @default image + */ + type: "image"; + + /** + * The text output from the Code Interpreter tool call. + */ + image: { + /** + * The file ID of the image. + */ + file_id: string; + }; +}; + +/** + * The output of code interpreter tool + */ +export type CodeInterpreterOutput = + | CodeInterpreterImageOutput + | CodeInterpreterLogOutput; + +/** + * Details of the Code Interpreter tool call the run step was involved in. + */ +export type CodeInterpreterToolCall = { + /** + * The ID of the tool call. + */ + id: string; + + /** + * The type of tool call. This is always going to be code_interpreter for this type of tool call. + */ + type: "code_interpreter"; + + /** + * The Code Interpreter tool call definition. + */ + code_interpreter: { + /** + * The input to the Code Interpreter tool call. + */ + input: string; + + /** + * The outputs from the Code Interpreter tool call. Code Interpreter can output one or more + * items, including text (logs) or images (image). Each of these are represented by a different + * object type. + */ + outputs: CodeInterpreterOutput[]; + }; +}; + +/** + * Retrieval tool call + */ +export type RetrievalToolCall = { + /** + * The ID of the tool call object. + */ + id: string; + + /** + * The type of tool call. This is always going to be retrieval for this type of tool call. + * + * @default retrieval + */ + type: "retrieval"; + + /** + * Detail of retrieval. + */ + retrieval: { + /** + * The name of retrieval tool. + */ + name: string; + /** + * The input to retrieval tool. + */ + input: string; + /** + * The output from retrieval tool. + */ + output?: string; + }; +}; + +/** + * Function tool call + */ +export type FunctionToolCall = { + /** + * The ID of the tool call. This ID must be referenced when you submit the tool outputs in using + * the Submit tool outputs to run endpoint. + */ + id: string; + + /** + * The type of tool call the output is required for. For now, this is always function. + */ + type: "function"; + + /** + * The function definition. + */ + function: { + /** + * The name of the function. + */ + name: string; + + /** + * The arguments that the model expects you to pass to the function. + */ + arguments: string; + /** + * The output of the function. This will be null if the outputs have not been submitted yet. + */ + output?: string; + }; +}; + +export type ToolCall = + | CodeInterpreterToolCall + | RetrievalToolCall + | FunctionToolCall; + +/** + * SubmitToolOutputs action. + */ +export type SubmitToolOutputsAction = { + /** + * For now, this is always submit_tool_outputs. + */ + type: "submit_tool_outputs"; + /** + * Details on the tool outputs needed for this run to continue. + */ + submit_tool_outputs: { + /** + * A list of the relevant tool calls. + */ + tool_calls: FunctionToolCall[]; + }; +}; + +/** + * Usage statistics related to the run or step. + */ +export type Usage = { + /** + * Number of completion tokens used over the course of the run or step. + */ + completion_tokens: number; + + /** + * Number of prompt tokens used over the course of the run or step. + */ + prompt_tokens: number; + + /** + * Total number of tokens used (prompt + completion). + */ + total_tokens: number; +}; + +/** + * Represents an execution run on a thread. + */ +export type RunObject = { + /** + * The object type, which is always thread.run. + * + * @default thread.run + */ + object: "thread.run"; + /** + * The ID of the thread that was executed on as a part of this run. + */ + thread_id: string; + + /** + * The ID of the assistant used for execution of this run. + */ + assistant_id: string; + + /** + * The status of the run, which can be either . + */ + status: + | "queued" + | "in_progress" + | "requires_action" + | "cancelling" + | "cancelled" + | "failed" + | "completed" + | "expired"; + + /** + * Details on the action required to continue the run. Will be null if no action is required. + */ + required_action?: SubmitToolOutputsAction; + + /** + * The last error associated with this run. Will be null if there are no errors. + */ + last_error?: { + /** + * One of server_error, rate_limit_exceeded, or invalid_prompt. + */ + code: "server_error" | "rate_limit_exceeded" | "invalid_prompt"; + + /** + * A human-readable description of the error. + */ + message: string; + }; + + /** + * The Unix timestamp (in seconds) for when the run will expire. + */ + expires_at?: number; + + /** + * The Unix timestamp (in seconds) for when the run was started. + */ + started_at?: number; + + /** + * The Unix timestamp (in seconds) for when the run was cancelled. + */ + cancelled_at?: number; + + /** + * The Unix timestamp (in seconds) for when the run failed. + */ + failed_at?: number; + + /** + * The Unix timestamp (in seconds) for when the run was completed. + */ + completed_at?: number; + + /** + * The model that the assistant used for this run. + */ + model?: string; + + /** + * The instructions that the assistant used for this run. + */ + instructions?: string; + + /** + * The list of tools that the assistant used for this run. + */ + tools?: Tool[]; + + /** + * The list of File IDs the assistant used for this run. + */ + file_ids?: string[]; + + metadata?: Metadata; + + /** + * Usage statistics related to the run. This value will be null if the run is not in a terminal + * state (i.e. in_progress, queued, etc.). + */ + usage?: Usage; + + /** + * The sampling temperature used for this run. + * + * @default 1 + * @maximum 1 + * @minimum 0 + */ + temperature?: number; +} & ObjectMeta; + +/** + * Message creation detail + */ +export type MessageCreationDetail = { + /** + * Always message_creation. + */ + type: "message_creation"; + /** + * Details of the message creation by the run step. + */ + message_creation: { + /** + * The ID of the message that was created by this run step. + */ + message_id: string; + }; +}; + +/** + * Tool calls detail + */ +export type ToolCallsDetail = { + /** + * Always tool_calls. + */ + type: "tool_calls"; + + /** + * An array of tool calls the run step was involved in. These can be associated with one of three + * types of tools: code_interpreter, retrieval, or function. + */ + tool_calls: ToolCall[]; +}; + +export type StepObject = { + /** + * The object type, which is always thread.run.step. + * + * @default thread.run.step + */ + object: "thread.run.step"; + /** + * The ID of the assistant associated with the run step. + */ + assistant_id: string; + + /** + * The ID of the thread that was run. + */ + thread_id: string; + + /** + * The ID of the run that this run step is a part of. + */ + run_id: string; + + /** + * The type of run step, which can be either message_creation or tool_calls. + */ + type: "message_creation" | "tool_calls"; + + /** + * The status of the run step, which can be either in_progress, cancelled, failed, completed, or + * expired. + */ + status: "in_progress" | "cancelled" | "failed" | "completed" | "expired"; + + /** + * The details of the run step. + */ + step_details: MessageCreationDetail | ToolCallsDetail; + + /** + * The last error associated with this run step. Will be null if there are no errors. + */ + last_error?: { + /** + * One of server_error or rate_limit_exceeded. + */ + code: "server_error" | "rate_limit_exceeded"; + + /** + * A human-readable description of the error. + */ + message: string; + }; + /** + * The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if + * the parent run is expired. + */ + expired_at?: number; + + /** + * The Unix timestamp (in seconds) for when the run step was cancelled. + */ + cancelled_at?: number; + + /** + * The Unix timestamp (in seconds) for when the run step failed. + */ + failed_at?: number; + + /** + * The Unix timestamp (in seconds) for when the run step completed. + */ + completed_at?: number; + + metadata?: Metadata; + + /** + * Usage statistics related to the run step. This value will be null while the run step's status + * is in_progress. + */ + usage?: Usage; +} & ObjectMeta; + +/** + * A vector store is a collection of processed files can be used by the file_search tool. + */ +export type VectorStoreObject = { + /** + * The object type, which is always vector_store. + * + * @default vector_store + */ + object: "vector_store"; + + /** + * The name of the vector store. + */ + name?: string | null; + + /** + * The total number of bytes used by the files in the vector store. + */ + usage_bytes: number; + + file_counts: { + /** + * The number of files that are currently being processed. + */ + in_progress: number; + + /** + * The number of files that have been successfully processed. + */ + completed: number; + + /** + * The number of files that have failed to process. + */ + failed: number; + + /** + * The number of files that were cancelled. + */ + cancelled: number; + + /** + * The total number of files. + */ + total: number; + }; + /** + * The status of the vector store. + * Can be either expired, in_progress, or completed. + * A status of completed indicates that the vector store is ready for use. + */ + status: "expired" | "in_progress" | "completed"; + + /** + * The expiration policy for a vector store. + */ + expires_after?: { + /** + * Anchor timestamp after which the expiration policy applies. + * Supported anchors: last_active_at. + */ + anchor: "last_active_at"; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } | null; + + /** + * The Unix timestamp (in seconds) for when the vector store will expire. + */ + expires_at?: number | null; + + /** + * The Unix timestamp (in seconds) for when the vector store was last active. + */ + last_active_at: number; + + metadata?: Metadata | null; +} & ObjectMeta; + +export type AutoChunkingStrategy = { + type: "auto"; +}; + +export type OtherChunkingStrategy = { + type: "other"; +}; + +export type StaticChunkingStrategy = { + /** + * Always static. + * + * @default 'static' + */ + type: "static"; + /** + * Static Chunking Strategy + */ + static: { + /** + * The maximum number of tokens in each chunk. + * + * @default 800 + * @maximum 4096 + * @minimum 100 + */ + max_chunk_size_tokens: number; + /** + * The number of tokens that overlap between chunks. + * Note that the overlap must not exceed half of max_chunk_size_tokens. + * + * @default 400 + */ + chunk_overlap_tokens: number; + }; +}; + +/** + * Represents a file attached to a vector store. + */ +export type VectorStoreFileObject = { + /** + * The object type, which is always vector_store.file. + * + * @default vector_store.file + */ + object: "vector_store.file"; + + /** + * The total vector store usage in bytes. + * Note that this may be different from the original file size. + * + * @default 0 + */ + usage_bytes: number; + + /** + * The ID of the vector store that the File is attached to. + */ + vector_store_id: string; + + /** + * The status of the vector store file. + * Can be either in_progress, completed, cancelled, or failed. + * The status completed indicates that the vector store file is ready for use. + */ + status: "in_progress" | "completed" | "cancelled" | "failed"; + + /** + * The last error associated with this vector store file. + * Will be null if there are no errors. + */ + last_error: { + /** + * One of server_error or rate_limit_exceeded. + */ + code: "server_error" | "rate_limit_exceeded"; + + /** + * A human-readable description of the error. + */ + message: string; + } | null; + /** + * The strategy used to chunk the file. + */ + chunking_strategy: OtherChunkingStrategy | StaticChunkingStrategy; + /** + * FileId in request is url, when type is url. The default is file. + * + * @default 'file' + */ + type?: "file" | "url"; + /** + * Read content from the url, when type is url. + */ + url?: string; +} & ObjectMeta; + +/* ----------------------------------------------------------------------------- */ +/* -------------------------------- Reuqests ----------------------------------- */ +/* ----------------------------------------------------------------------------- */ + +/** + * Create an assistant with a model and instructions. + */ +export type CreateAssistantRequest = Omit< + AssistantObject, + "id" | "object" | "created_at" +>; + +/** + * Create an assistant file by attaching a File to an assistant. + */ +export type CreateAssistantFileRequest = { + /** + * A File ID (with purpose="assistants") that the assistant should use. Useful for tools like + * retrieval and code_interpreter that can access files. + */ + file_id: string; +}; + +/** + * Modifies an assistant. + */ +export type ModifyAssistantRequest = Partial; + +export type CreateMessageRequest = { + /** + * The role of the entity that is creating the message. + */ + role: "user" | "assistant"; + /** + * The content of the message. + */ + content: string; + /** + * A list of File IDs that the message should use. There can be a maximum of 10 files attached to + * a message. Useful for tools like retrieval and code_interpreter that can access and use files. + */ + file_ids?: string[]; +}; + +/** + * Create a thread. + */ +export type CreateThreadRequest = { + /** + * A list of messages to start the thread with. + */ + messages?: CreateMessageRequest[]; + metadata?: Metadata; +}; + +/** + * Modifies a thread. + */ +export type ModifyThreadRequest = { + metadata?: Metadata; +}; + +/** + * Modifies a message. + */ +export type ModifyMessageRequest = { + metadata?: Metadata; +}; + +/** + * Create a run + */ +export type CreateRunRequest = + & { + /** + * Appends additional instructions at the end of the instructions for the run. This is useful + * for modifying the behavior on a per-run basis without overriding other instructions. + */ + additional_instructions?: string; + /** + * If true, returns a stream of events that happen during the Run as server-sent events, + * terminating when the Run enters a terminal state with a data: [DONE] message. + * + * @default false + */ + stream?: boolean; + } + & Pick< + RunObject, + | "assistant_id" + | "model" + | "instructions" + | "tools" + | "metadata" + | "temperature" + >; + +/** + * Create a thread and run it in one request. + */ +export type CreateThreadAndRunRequest = { + thread?: CreateThreadRequest; +} & CreateRunRequest; + +/** + * Modifies a run. + */ +export type ModifyRunRequest = { + metadata?: Metadata; +}; + +/** + * Detial of tool output. + */ +export type ToolOutput = { + /** + * The ID of the tool call in the required_action object within the run object the output is + * being submitted for. + */ + tool_call_id: string; + + /** + * The output of the tool call to be submitted to continue the run. + */ + output?: string; +}; + +/** + * Submit tool outputs to run. + */ +export type SubmitToolOutputsToRunRequest = { + /** + * A list of tools for which the outputs are being submitted. + */ + tool_outputs: ToolOutput[]; + /** + * If true, returns a stream of events that happen during the Run as server-sent events, + * terminating when the Run enters a terminal state with a data: [DONE] message. + * + * @default false + */ + stream?: boolean; +}; + +/** + * Represents the request body for creating a vector store. + */ +export type CreateVectorStoreRequest = { + /** + * A list of File IDs that the vector store should use. + * Useful for tools like file_search that can access files. + * + * @maxItems 10000 + */ + file_ids?: string[] | null; + + /** + * The name of the vector store. + */ + name?: string | null; + + /** + * The expiration policy for a vector store. + */ + expires_after?: { + /** + * Anchor timestamp after which the expiration policy applies. + * Supported anchors: last_active_at. + * + * @default last_active_at + */ + anchor: "last_active_at"; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } | null; + + /** + * The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only + * applicable if file_ids is non-empty. + */ + chunking_strategy?: AutoChunkingStrategy | StaticChunkingStrategy | null; + + metadata?: Metadata | null; +}; + +/** + * Represents the request body for creating a vector store file. + */ +export type CreateVectorStoreFileRequest = { + /** + * A File ID that the vector store should use. + * Useful for tools like file_search that can access files. + */ + file_id: string; + /** + * The chunking strategy used to chunk the file(s). If not set, will use the auto strategy. Only + * applicable if file_ids is non-empty. + */ + chunking_strategy?: AutoChunkingStrategy | StaticChunkingStrategy | null; +}; + +/** + * Represents the request body for modifying a vector store. + */ +export type ModifyVectorStoreRequest = { + /** + * The name of the vector store. + * Can be set to null to remove the name. + */ + name?: string | null; + + /** + * The expiration policy for a vector store. + */ + expires_after?: { + /** + * Anchor timestamp after which the expiration policy applies. + * Supported anchors: last_active_at. + * + * @default last_active_at + */ + anchor: "last_active_at"; + + /** + * The number of days after the anchor time that the vector store will expire. + */ + days: number; + } | null; + + metadata?: Metadata | null; +}; + +/* ------------------------------------------------------------------------------ */ +/* -------------------------------- Responses ----------------------------------- */ +/* ------------------------------------------------------------------------------ */ + +/** + * Delete response + */ +export type DeleteResponse = { + /** + * The id of object deleted. + */ + id: string; + /** + * This always be true. + * + * @default true + */ + deleted: true; +}; + +/** + * Delete an assistant. + */ +export type DeleteAssistantResponse = { + /** + * The object type, which is always assistant.deleted. + * + * @default assistant.deleted + */ + object: "assistant.deleted"; +} & DeleteResponse; + +/** + * Delete an assistant file. + */ +export type DeleteAssistantFileResponse = { + /** + * The object type, which is always assistant.file.deleted. + * + * @default assistant.file.deleted + */ + object: "assistant.file.deleted"; +} & DeleteResponse; + +/** + * Delete an thread. + */ +export type DeleteThreadResponse = { + /** + * The object type, which is always thread.deleted. + * + * @default thread.deleted + */ + object: "thread.deleted"; +} & DeleteResponse; + +/** + * Delete a vector store. + */ +export type DeleteVectorStoreResponse = { + /** + * The object type, which is always vector_store.deleted. + * + * @default vector_store.deleted + */ + object: "vector_store.deleted"; +} & DeleteResponse; + +/** + * Delete a vector store file. + */ +export type DeleteVectorStoreFileResponse = { + /** + * The object type, which is always vector_store.file.deleted. + * + * @default vector_store.file.deleted + */ + object: "vector_store.file.deleted"; +} & DeleteResponse; + +/* ------------------------------------------------------------------------------- */ +/* -------------------------------- Parameters ----------------------------------- */ +/* ------------------------------------------------------------------------------- */ + +/** + * Pagination parameters + */ +export type Pagination = { + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + * + * @default 20 + * @maximum 100 + * @minimum 1 + */ + limit: number; + /** + * A cursor for use in pagination. after is an object ID that defines your place in the list. + */ + after?: string; + /** + * A cursor for use in pagination. before is an object ID that defines your place in the list. + */ + before?: string; +}; + +/** + * Ordering parameters + */ +export type Ordering = { + /** + * Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. + * + * @default desc + */ + order?: "asc" | "desc"; +}; diff --git a/types/openai/embedding.ts b/types/openai/embedding.ts new file mode 100644 index 0000000..c289c8a --- /dev/null +++ b/types/openai/embedding.ts @@ -0,0 +1,74 @@ +export type EmbeddingObject = { + /** + * The object type, which is always "embedding". + * + * @default embedding + */ + obejct: "embedding"; + /** + * The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide. + */ + embedding: number[]; + /** + * The index of the embedding in the list of embeddings. + */ + index: number; +}; + +/** + * Creates an embedding vector representing the input text. + */ +export type CreateEmbeddingRequest = { + /** + * Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model, cannot be an empty string, and any array must be 2048 dimensions or less. + * + * string - The string that will be turned into an embedding. + * string[] - The array of strings that will be turned into an embedding. + * number[] - The array of integers that will be turned into an embedding. + * number[][] - The array of arrays containing integers that will be turned into an embedding. + */ + input: string | string[] | number[] | number[][]; + + /** + * ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. + */ + model: string; + + /** + * The format to return the embeddings in. Can be either float or base64. + * + * @default float + */ + encoding_format?: "float" | "base64" | null; + + /** + * The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. + */ + dimensions?: number | null; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + */ + user?: string | null; +}; + +/** + * A list of embedding objects. + */ +export type CreateEmbeddingResponse = { + /** + * The object type, which is always "list". + * + * @default list + */ + object: "list"; + data: EmbeddingObject[]; + /** + * ID of the model to use. + */ + model: string; + usage: { + prompt_tokens: number; + total_tokens: number; + }; +};