From 087c992bcd1ec67ee3523ae2afd6fc3f41e2142f Mon Sep 17 00:00:00 2001 From: kodster28 Date: Tue, 20 Aug 2024 14:14:09 -0500 Subject: [PATCH] Final block of code blocks --- ...-information-for-troubleshooting-sites.mdx | 124 +- .../restoring-original-visitor-ips.mdx | 127 +- .../import-cloudflare-resources.mdx | 52 +- .../provider-customization.mdx | 9 +- src/content/docs/terraform/installing.mdx | 25 +- .../terraform/tutorial/add-page-rules.mdx | 61 +- .../tutorial/configure-https-settings.mdx | 64 +- .../tutorial/initialize-terraform.mdx | 35 +- .../tutorial/revert-configuration.mdx | 37 +- .../docs/terraform/tutorial/track-history.mdx | 57 +- .../terraform/tutorial/use-load-balancing.mdx | 112 +- .../docs/time-services/roughtime/usage.mdx | 7 +- .../get-started/server-side-validation.mdx | 83 +- .../docs/turnstile/get-started/terraform.mdx | 47 +- .../best-practices/create-indexes.mdx | 19 +- .../best-practices/insert-vectors.mdx | 85 +- .../docs/vectorize/get-started/embeddings.mdx | 107 +- .../docs/vectorize/get-started/intro.mdx | 200 +- .../docs/vectorize/reference/client-api.mdx | 75 +- .../reference/metadata-filtering.mdx | 182 +- .../command-line/decrypt-payload.mdx | 24 +- .../command-line/generate-key-pair.mdx | 12 +- .../firewall-rules-to-custom-rules.mdx | 136 +- .../waf-managed-rules-migration.mdx | 234 +- .../docs/warp-client/get-started/linux.mdx | 23 +- .../docs/warp-client/get-started/macOS.mdx | 17 +- .../reference/supported-api-methods.mdx | 177 +- .../docs/web3/how-to/use-ethereum-gateway.mdx | 122 +- .../function-calling/embedded/examples/kv.mdx | 66 +- .../function-calling/embedded/get-started.mdx | 27 +- .../workers-ai/function-calling/index.mdx | 249 +- .../docs/workers-ai/get-started/rest-api.mdx | 19 +- .../get-started/workers-wrangler.mdx | 59 +- ...ld-a-retrieval-augmented-generation-ai.mdx | 257 +- .../configuration/cron-triggers/index.mdx | 57 +- .../configuration/integrations/apis.mdx | 35 +- .../integrations/external-services.mdx | 3 +- .../docs/workers/configuration/secrets.mdx | 13 +- .../sites/start-from-existing.mdx | 105 +- .../sites/start-from-scratch.mdx | 25 +- .../configuration/sites/start-from-worker.mdx | 75 +- .../workers/configuration/smart-placement.mdx | 24 +- .../gradual-deployments.mdx | 51 +- .../databases/connecting-to-databases.mdx | 21 +- .../databases/native-integrations/neon.mdx | 5 +- .../native-integrations/planetscale.mdx | 49 +- .../native-integrations/supabase.mdx | 11 +- .../databases/native-integrations/turso.mdx | 108 +- .../databases/native-integrations/upstash.mdx | 64 +- .../databases/native-integrations/xata.mdx | 43 +- .../docs/workers/examples/cron-trigger.mdx | 4 +- .../examples/multiple-cron-triggers.mdx | 4 +- .../docs/workers/get-started/guide.mdx | 101 +- .../docs/workers/get-started/quickstarts.mdx | 88 +- .../docs/workers/languages/python/index.mdx | 28 +- .../docs/workers/languages/rust/index.mdx | 60 +- .../observability/logging/real-time-logs.mdx | 88 +- src/content/docs/workers/platform/limits.mdx | 108 +- .../workers/runtime-apis/bindings/mTLS.mdx | 22 +- .../runtime-apis/handlers/scheduled.mdx | 46 +- .../workers/runtime-apis/nodejs/index.mdx | 11 +- .../workers/runtime-apis/rpc/lifecycle.mdx | 61 +- .../runtime-apis/webassembly/javascript.mdx | 31 +- .../workers/testing/local-development.mdx | 47 +- .../get-started/migrate-from-miniflare-2.mdx | 27 +- .../get-started/write-your-first-test.mdx | 167 +- .../tutorials/build-a-jamstack-app/index.mdx | 331 +-- .../build-a-qr-code-generator/index.mdx | 134 +- .../tutorials/build-a-slackbot/index.mdx | 354 +-- .../connect-to-turso-using-workers/index.mdx | 238 +- .../index.mdx | 91 +- .../deploy-a-realtime-chat-app/index.mdx | 19 +- .../index.mdx | 259 +- .../index.mdx | 147 +- .../index.mdx | 158 +- .../openai-function-calls-workers/index.mdx | 224 +- .../docs/workers/tutorials/postgres/index.mdx | 119 +- .../send-emails-with-postmark/index.mdx | 70 +- .../send-emails-with-resend/index.mdx | 62 +- .../tutorials/store-data-with-fauna/index.mdx | 224 +- .../tutorials/upload-assets-with-r2/index.mdx | 115 +- .../tutorials/workers-kv-from-rust/index.mdx | 29 +- .../docs/workers/wrangler/commands.mdx | 2216 +++++++---------- .../docs/workers/wrangler/environments.mdx | 34 +- .../workers/wrangler/install-and-update.mdx | 17 +- .../migration/v1-to-v2/update-v1-to-v2.mdx | 26 +- .../wrangler-legacy/authentication.mdx | 31 +- .../v1-to-v2/wrangler-legacy/commands.mdx | 676 +++-- .../wrangler-legacy/install-update.mdx | 21 +- .../cloudflare-one/ssh/modify-sshd.mdx | 3 +- .../cloudflare-one/ssh/public-key.mdx | 5 +- .../cloudflare-one/ssh/restart-server.mdx | 9 +- .../partials/cloudflare-one/ssh/usernames.mdx | 13 +- .../terraform/config-directory.mdx | 5 +- .../terraform/deploy-terraform.mdx | 7 +- .../terraform/input-variables.mdx | 3 +- .../terraform/variable-values.mdx | 5 +- .../tunnel/cloudflared-debian-install.mdx | 9 +- .../tunnel/enable-gateway-proxy.mdx | 16 +- .../tunnel/troubleshoot-private-networks.mdx | 29 +- .../mx-deployment-prerequisites.mdx | 14 +- .../block-cloudflare-ips-tactical.mdx | 5 +- .../hyperdrive/create-hyperdrive-config.mdx | 33 +- .../create-github-repository.mdx | 20 +- .../create-github-repository_no_init.mdx | 14 +- .../partials/ssl/custom-cert-file-example.mdx | 12 +- .../partials/ssl/keyless-key-server-setup.mdx | 36 +- .../workers/c3-run-command-no-directory.mdx | 7 +- .../workers/dash-creation-next-steps.mdx | 13 +- .../partials/workers/install_wrangler.mdx | 7 +- 110 files changed, 5159 insertions(+), 5213 deletions(-) diff --git a/src/content/docs/support/troubleshooting/general-troubleshooting/gathering-information-for-troubleshooting-sites.mdx b/src/content/docs/support/troubleshooting/general-troubleshooting/gathering-information-for-troubleshooting-sites.mdx index 49528399c6becf..baa099c352fe87 100644 --- a/src/content/docs/support/troubleshooting/general-troubleshooting/gathering-information-for-troubleshooting-sites.mdx +++ b/src/content/docs/support/troubleshooting/general-troubleshooting/gathering-information-for-troubleshooting-sites.mdx @@ -2,7 +2,6 @@ pcx_content_type: troubleshooting source: https://support.cloudflare.com/hc/en-us/articles/203118044-Gathering-information-for-troubleshooting-sites title: Gathering information for troubleshooting sites - --- ## Overview @@ -12,10 +11,10 @@ It is important to capture as much information as possible to diagnose an issue :::note Cloudflare support cannot make configuration changes on behalf of -customers due to security and liability concerns. +customers due to security and liability concerns. ::: -*** +--- ## Generate a HAR file @@ -28,7 +27,6 @@ information, and private keys. Remove sensitive information using a [HAR Sanitizer](https://har-sanitizer.pages.dev/). - ::: Some browsers either require a browser extension or cannot generate a HAR. When installing a browser extension, follow the instructions from the extension provider. @@ -37,7 +35,7 @@ Some browsers either require a browser extension or cannot generate a HAR. When 1. In a browser page, right-click anywhere and select **Inspect Element**. -2. The developer tools either appear at the bottom or left side of the browser. Click the **Network** tab.  +2. The developer tools either appear at the bottom or left side of the browser. Click the **Network** tab. ![HAR network tab screenshot from Chrome developer tools](~/assets/images/support/gathering_har_file_network.png) @@ -51,7 +49,7 @@ Some browsers either require a browser extension or cannot generate a HAR. When ![HAR save menu in Chrome developer tools.](~/assets/images/support/gathering_har_file_save.png) - 6. Attach the HAR file to your support ticket. +6. Attach the HAR file to your support ticket. ### In Firefox @@ -81,7 +79,7 @@ Some browsers either require a browser extension or cannot generate a HAR. When ### In Mobile -**For Android:**  +**For Android:** 1. Enable USB Debugging mode on your mobile device. @@ -107,13 +105,13 @@ Some browsers either require a browser extension or cannot generate a HAR. When 9. Attach the HAR file to your support ticket alongside a screen recording from the affected Samsung device. Instructions on how to do this from Samsung devices can be found in [Samsung's documentation here](https://www.samsung.com/au/support/mobile-devices/screen-recorder/). -*** +--- **For iPhone:** Refer to [Okta](https://support.okta.com/help/s/article/How-to-generate-a-HAR-capture-on-an-iOS-device?language=en_US) or [Apple's](https://developer.apple.com/library/archive/documentation/AppleApplications/Conceptual/Safari_Developer_Guide/GettingStarted/GettingStarted.html#//apple_ref/doc/uid/TP40007874-CH2-SW1) support article on how to generate a HAR file from an iOS device. Attach the HAR file to your support ticket alongside a screen recording from the affected iOS device. Apple devices now have [built-in screen recording functionality](https://support.apple.com/en-us/HT207935). -*** +--- ## Export Console Log @@ -153,7 +151,7 @@ In certain situations when request is not issued or cancelled by the browser (fo 4. Select all the messages, right click and **Save Selected** to a log file. 5. Attach the log file to your support ticket. -*** +--- ## Capture a NetLog dump @@ -161,14 +159,14 @@ In some cases, in order to further troubleshoot issues related to protocols (err :::caution -You can only generate a NetLog dump on the Google Chrome, Opera or Microsoft Edge browsers. +You can only generate a NetLog dump on the Google Chrome, Opera or Microsoft Edge browsers. ::: 1. Open a new tab and enter the following depending on the browser you're using: -* `chrome://net-export` -* `edge://net-export` -* `opera://net-export` +- `chrome://net-export` +- `edge://net-export` +- `opera://net-export` 2. Click the **Start Logging To Disk** button. 3. Reproduce the network problem in a different tab. @@ -176,7 +174,7 @@ You can only generate a NetLog dump on the Google Chrome, Opera or Microsoft Edg 4. Click **Stop Logging** button. 5. Attach the log file to your support ticket. -*** +--- ## Identify the Cloudflare data center serving your request @@ -187,36 +185,36 @@ Determine the Cloudflare data center serving requests for your browser by visiti Replace `www.example.com` with your domain and hostname.  Note the `colo` field from the output. -*** +--- ## Troubleshoot requests with curl [curl](https://curl.se/) is a command line tool for sending HTTP/HTTPS requests and is useful for troubleshooting: -* HTTP/HTTPS Performance -* HTTP Error Responses -* HTTP Headers -* APIs -* Comparing Server/Proxy Responses -* SSL Certificates +- HTTP/HTTPS Performance +- HTTP Error Responses +- HTTP Headers +- APIs +- Comparing Server/Proxy Responses +- SSL Certificates :::note If you are using Windows, you can find more details on how to use curl on Windows in our [Making API calls on Windows -](/fundamentals/api/how-to/make-api-calls/#making-api-calls-on-windows) article. +](/fundamentals/api/how-to/make-api-calls/#making-api-calls-on-windows) article. ::: Run the following command to send a standard HTTP GET request to your website (replace `www.example.com` with your hostname): ``` -$ curl -svo /dev/null http://www.example.com/ +curl -svo /dev/null http://www.example.com/ ``` This example curl command returns output detailing the HTTP response and request headers but discards the page body output. curl output confirms the HTTP response and whether Cloudflare is currently proxying traffic for the site. :::note -Review the [curl command options](https://curl.se/docs/manpage.html) for additional functionality. +Review the [curl command options](https://curl.se/docs/manpage.html) for additional functionality. ::: View the sections below for tips on troubleshooting HTTP errors, performance, caching, and SSL/TLS certificates: @@ -226,7 +224,7 @@ View the sections below for tips on troubleshooting HTTP errors, performance, ca When troubleshooting HTTP errors in responses from Cloudflare, test whether your origin caused the errors by sending requests directly to your origin web server. To troubleshoot HTTP errors, run a curl directly to your origin web server IP address (bypassing Cloudflare’s proxy): ``` -$ curl -svo /dev/null http://example.com --connect-to ::203.0.113.34 +curl -svo /dev/null http://example.com --connect-to ::203.0.113.34 ``` :::note @@ -234,7 +232,7 @@ $ curl -svo /dev/null http://example.com --connect-to ::203.0.113.34 If you have multiple origin web servers, test each one to ensure there are no response differences. If you observe the issue when connecting directly to your origin web server, contact your hosting provider for -assistance. +assistance. ::: ### Performance @@ -267,22 +265,22 @@ curl -svo /dev/null https://example.com/ -w "\nContent Type: %{content_type} \ As demonstrated in the preceding example, cleaner results are achieved by denoting a new line with `\n` before each variable. Otherwise, all -metrics are displayed together on a single line. +metrics are displayed together on a single line. ::: ### Caching cURL helps review the HTTP response headers that influence caching. In particular, review several HTTP headers when troubleshooting Cloudflare caching: -* CF-Cache-Status -* Cache-Control/Pragma -* Expires -* Last-Modified -* s-maxage +- CF-Cache-Status +- Cache-Control/Pragma +- Expires +- Last-Modified +- s-maxage :::note -You can refer to the [Cloudflare Cache documentation](/cache/get-started/) for more details. +You can refer to the [Cloudflare Cache documentation](/cache/get-started/) for more details. ::: ### SSL/TLS certificates @@ -292,37 +290,37 @@ You can refer to the [Cloudflare Cache documentation](/cache/get-started/) for m The following curl command shows the SSL certificate served by Cloudflare during an HTTPS request (replace `www.example.com` with your hostname): ```sh -$ curl -svo /dev/null https://www.example.com/ 2>&1 | egrep -v "^{.*$|^}.*$|^* http.*$" +curl -svo /dev/null https://www.example.com/ 2>&1 | egrep -v "^{.*$|^}.*$|^* http.*$" ``` :::note `2\*>&1 | egrep -v "^{.*$|^}.*$|^\* http.\*$" \*` cleans and -parses the TLS handshake and certificate information. +parses the TLS handshake and certificate information. ::: To display the origin certificate (assuming one is installed), replace `203.0.113.34` below with the actual IP address of your origin web server and replace `www.example.com` with your domain and hostname: ```sh -$ curl -svo /dev/null https://www.example.com --connect-to ::203.0.113.34 2>&1 | egrep -v "^{.*$|^}.*$|^* http.*$" +curl -svo /dev/null https://www.example.com --connect-to ::203.0.113.34 2>&1 | egrep -v "^{.*$|^}.*$|^* http.*$" ``` #### Testing TLS Versions If troubleshooting browser support or confirming what TLS versions are supported, curl allows you to test a specific TLS version by adding the [--tlsv1.X](https://curl.se/docs/manpage.html#--tlsv10) and [--tls-max](https://curl.se/docs/manpage.html#--tls-max) options to your curl: -* `--tlsv1.0 --tls-max 1.0` -* `--tlsv1.1 --tls-max 1.1` -* `--tlsv1.2 --tls-max 1.2` -* `--tlsv1.3 --tls-max 1.3` +- `--tlsv1.0 --tls-max 1.0` +- `--tlsv1.1 --tls-max 1.1` +- `--tlsv1.2 --tls-max 1.2` +- `--tlsv1.3 --tls-max 1.3` -*** +--- ## Temporarily pause Cloudflare For more details, refer to [Pause Cloudflare](/fundamentals/setup/manage-domains/pause-cloudflare/). -*** +--- ## Perform a traceroute @@ -331,7 +329,7 @@ Traceroute is a network diagnostic tool that measures the route latency of packe :::note Timeouts are possible for ping results because Cloudflare limits ping -requests. +requests. ::: Review the instructions below for running traceroute on different operating systems. Replace `www.example.com` with your domain and hostname in the examples below: @@ -347,13 +345,13 @@ Review the instructions below for running traceroute on different operating sys 4. At the command line prompt, type: For IPv4 - ```sh -$ tracert www.example.com +tracert www.example.com ``` For IPv6 - ```sh -$ tracert -6 www.example.com +tracert -6 www.example.com ``` 5. Press **Enter**. @@ -369,13 +367,13 @@ $ tracert -6 www.example.com For IPv4 - ```sh -$ traceroute www.example.com +traceroute www.example.com ``` For IPv6 - ```sh -$ traceroute -6 www.example.com +traceroute -6 www.example.com ``` 3. You can copy the results to save in a file or paste in another program. @@ -389,7 +387,7 @@ $ traceroute -6 www.example.com Alternatively, follow the same Linux traceroute instructions above when using the Mac OS terminal program. -*** +--- ## Add the CF-RAY header to your logs @@ -410,7 +408,7 @@ log_format cf_custom '$remote_addr - $remote_user [$time_local] ' '$http_cf_ray'; ``` -*** +--- ## Perform a MTR @@ -423,7 +421,7 @@ Like traceroute, MTR can use ICMP or UDP for outgoing packets but relies on ICMP :::note For MacOS users, MTR can be installed through [homebrew](https://formulae.brew.sh/formula/mtr). -For Windows users, see [WinMTR](https://github.com/White-Tiger/WinMTR/releases). +For Windows users, see [WinMTR](https://github.com/White-Tiger/WinMTR/releases). ::: ### How do I use MTR to generate network path report? @@ -433,18 +431,18 @@ For Windows users, see [WinMTR](https://github.com/White-Tiger/WinMTR/releases). Generally, we'd use MTR as the following: ```sh -$ mtr -rw e.g.: mtr -rw one.one.one.one +mtr -rw e.g.: mtr -rw one.one.one.one ``` or with destination IP: ```sh -$ mtr -rw e.g.: mtr -rw 1.1.1.1 +mtr -rw e.g.: mtr -rw 1.1.1.1 ``` Please refer to this documentation, which explains more about analysing MTR: [How to read MTR](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtr/).[](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtr/) -*** +--- ## Run Packet Captures @@ -459,24 +457,24 @@ Some HTTP errors generated by Cloudflare, such as [520s](https://support.cloudfl :::caution Please be aware, if you transmit any sensitive information while a -packet capture is running, it will be recorded. +packet capture is running, it will be recorded. ::: -Cloudflare suggests [Wireshark](https://www.wireshark.org/download.html) for running packet captures. For instructions on how to use the *tcpdump* command line, refer to [this](https://www.wireshark.org/docs/wsug_html_chunked/AppToolstcpdump.html) article. +Cloudflare suggests [Wireshark](https://www.wireshark.org/download.html) for running packet captures. For instructions on how to use the _tcpdump_ command line, refer to [this](https://www.wireshark.org/docs/wsug_html_chunked/AppToolstcpdump.html) article. 1. Close all programs/browser tabs that could be sending data in the background to avoid having to use a lot of display filters later. 2. Create your Wireshark capture filter (refer to [this](https://wiki.wireshark.org/CaptureFilters) article for more information). 3. Select the appropriate interface (e.g. Wi-Fi: en0). If you're not sure which interface to use, Wireshark provides an I/O graph of each interface to give you a hint. -4. Click the blue shark fin icon in the top left-hand corner to start your packet capture.  +4. Click the blue shark fin icon in the top left-hand corner to start your packet capture. 5. Reproduce the issue while running capture. -6. Click the red square icon in the top left-hand corner to stop your packet capture.  +6. Click the red square icon in the top left-hand corner to stop your packet capture. 7. Save as a `.pcap` file and attach it to your support ticket. -*** +--- ## Related resources -* [Contacting Cloudflare Support](/support/contacting-cloudflare-support/) -* [Troubleshooting Cloudflare HTTP 5XX errors](https://support.cloudflare.com/hc/articles/115003011431) -* [Diagnosing network issues with MTR and traceroute](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtr/) -* [cURL command line tool](https://curl.haxx.se/) +- [Contacting Cloudflare Support](/support/contacting-cloudflare-support/) +- [Troubleshooting Cloudflare HTTP 5XX errors](https://support.cloudflare.com/hc/articles/115003011431) +- [Diagnosing network issues with MTR and traceroute](https://www.cloudflare.com/en-gb/learning/network-layer/what-is-mtr/) +- [cURL command line tool](https://curl.haxx.se/) diff --git a/src/content/docs/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips.mdx b/src/content/docs/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips.mdx index 33d490a9785bfe..7a9c891dc76917 100644 --- a/src/content/docs/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips.mdx +++ b/src/content/docs/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips.mdx @@ -2,21 +2,18 @@ pcx_content_type: troubleshooting source: https://support.cloudflare.com/hc/en-us/articles/200170786-Restoring-original-visitor-IPs title: Restoring original visitor IPs - --- -import { Render } from "~/components" +import { Render } from "~/components"; When your [website traffic is routed through the Cloudflare network](https://support.cloudflare.com/hc/articles/205177068), we act as a reverse proxy. This allows Cloudflare to speed up page load time by routing packets more efficiently and caching static resources (images, JavaScript, CSS, etc.). As a result, when responding to requests and logging them, your origin server returns a [Cloudflare IP address](https://www.cloudflare.com/ips/). -For example, if you install applications that depend on the incoming IP address of the original visitor, a Cloudflare IP address is logged by default. The original visitor IP address appears in an appended HTTP header called [*CF-Connecting-IP*](/fundamentals/reference/http-request-headers/). By following our [web server instructions](#web-server-instructions), you can log the original visitor IP address at your origin server. If this HTTP header is not available when requests reach your origin server, check your [Transform Rules](/rules/transform/) and [Managed Transforms](/rules/transform/managed-transforms/) configuration. +For example, if you install applications that depend on the incoming IP address of the original visitor, a Cloudflare IP address is logged by default. The original visitor IP address appears in an appended HTTP header called [_CF-Connecting-IP_](/fundamentals/reference/http-request-headers/). By following our [web server instructions](#web-server-instructions), you can log the original visitor IP address at your origin server. If this HTTP header is not available when requests reach your origin server, check your [Transform Rules](/rules/transform/) and [Managed Transforms](/rules/transform/managed-transforms/) configuration. :::note - - ::: The diagram below illustrates the different ways that IP addresses are handled with and without Cloudflare. @@ -25,29 +22,29 @@ The diagram below illustrates the different ways that IP addresses are handled w :::caution -Cloudflare no longer updates and supports *mod\_cloudflare*, starting +Cloudflare no longer updates and supports _mod_cloudflare_, starting with versions **Debian 9** and **Ubuntu 18.04 LTS** of the Linux operating system. We now recommend -[*mod\_remoteip*](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) +[_mod_remoteip_](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) for customers using Apache web servers. Customers who are interested in -building the *mod\_cloudflare* package can [download the -codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. +building the _mod_cloudflare_ package can [download the +codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. ::: -*** +--- -## mod\_remoteip +## mod_remoteip -Cloudflare no longer updates and supports *mod\_cloudflare.* However, if you are using an Apache web server with an operating system such as **Ubuntu Server 18.04** and **Debian 9 Stretch**, you can use *mod\_remoteip* to log your visitor’s original IP address. +Cloudflare no longer updates and supports *mod_cloudflare.* However, if you are using an Apache web server with an operating system such as **Ubuntu Server 18.04** and **Debian 9 Stretch**, you can use *mod_remoteip* to log your visitor’s original IP address. **As this module was created by an outside party, we can't provide technical support for issues related to the plugin.** -To install *mod\_remoteip* on your Apache web server: +To install *mod_remoteip* on your Apache web server: -1. Enable *mod\_remoteip* by issuing the following command: +1. Enable *mod_remoteip* by issuing the following command: ```sh -$ sudo a2enmod remoteip +sudo a2enmod remoteip ``` 2. Update the site configuration to include *RemoteIPHeader CF-Connecting-IP*, e.g. `/etc/apache2/sites-available/000-default.conf` @@ -85,7 +82,10 @@ RemoteIPTrustedProxy 192.0.2.2 (example IP address) 5. Enable Apache configuration: ```sh -$ sudo a2enconf remoteip +sudo a2enconf remoteip +``` + +```sh output Enabling conf remoteip. To activate the new configuration, you need to run: service apache2 reload @@ -94,48 +94,51 @@ service apache2 reload 6. Test Apache configuration: ```sh -$ sudo apache2ctl configtest +sudo apache2ctl configtest +``` + +```sh output Syntax OK ``` 7. Restart Apache: ```sh -$ sudo systemctl restart apache2 +sudo systemctl restart apache2 ``` :::note -For more information on *mod\_remoteip*, refer to the [Apache -documentation](https://httpd.apache.org/docs/2.4/mod/mod_remoteip.html "Apache Module mod_remoteip"). +For more information on _mod_remoteip_, refer to the [Apache +documentation](https://httpd.apache.org/docs/2.4/mod/mod_remoteip.html "Apache Module mod_remoteip"). ::: -*** +--- -## mod\_cloudflare +## mod_cloudflare :::caution -Cloudflare no longer updates and supports *mod\_cloudflare*, starting +Cloudflare no longer updates and supports _mod_cloudflare_, starting with versions **Debian 9** and **Ubuntu 18.04 LTS** of the Linux operating system. We now recommend -[*mod\_remoteip*](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) +[_mod_remoteip_](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) for customers using Apache web servers. Customers who are interested in -building the *mod\_cloudflare* package can [download the -codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. +building the _mod_cloudflare_ package can [download the +codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. ::: ### Installing -There are two methods for installing mod\_cloudflare: by downloading the Apache extension from GitHub or by adding code to your origin web server. +There are two methods for installing mod_cloudflare: by downloading the Apache extension from GitHub or by adding code to your origin web server. #### Downloading packets or scripts from GitHub -If you are using an Apache web server, you can download mod\_cloudflare from [GitHub](https://github.com/cloudflare/mod_cloudflare). +If you are using an Apache web server, you can download mod_cloudflare from [GitHub](https://github.com/cloudflare/mod_cloudflare). #### Adding code to your origin web server -If you can't install mod\_cloudflare, or if there is no Cloudflare plugin available for your content management system platform to restore original visitor IP, add this code to your origin web server in or before the `` tag on any page that needs the original visitor IPs: +If you can't install mod_cloudflare, or if there is no Cloudflare plugin available for your content management system platform to restore original visitor IP, add this code to your origin web server in or before the `` tag on any page that needs the original visitor IPs: ```php @@ -147,25 +150,25 @@ This command will only make the IP address available to scripts that need it. It #### Apache -To remove *mod\_cloudflare*, you should comment out the Apache config line that loads *mod\_cloudflare*. +To remove *mod_cloudflare*, you should comment out the Apache config line that loads *mod_cloudflare*. This varies based on your Linux distribution, but for most people, if you look `in /etc/apache2`, you should be able to search to find the line: `LoadModule cloudflare_module` -Comment or remove this line, then restart apache, and *mod\_cloudflare* should be gone. +Comment or remove this line, then restart apache, and *mod_cloudflare* should be gone. If you are running Ubuntu or Debian, you should see. `file/etc/apache2/mods-enabled/cloudflare.load` -delete this file to remove *mod\_cloudflare*, then restart Apache. +delete this file to remove *mod_cloudflare*, then restart Apache. #### Nginx -*mod\_cloudflare* is not needed for Nginx. Use the [`ngx_http_realip_module` NGINX module](http://nginx.org/en/docs/http/ngx_http_realip_module.html) and the configuration parameters described in the [Web server instructions](https://developers.cloudflare.com/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips/#web-server-instructions) instead. +_mod_cloudflare_ is not needed for Nginx. Use the [`ngx_http_realip_module` NGINX module](http://nginx.org/en/docs/http/ngx_http_realip_module.html) and the configuration parameters described in the [Web server instructions](https://developers.cloudflare.com/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips/#web-server-instructions) instead. -*** +--- ## Web server instructions @@ -175,25 +178,25 @@ Refer below for instructions on how to configure your web server to log origina :::caution -Cloudflare no longer updates and supports *mod\_cloudflare*, starting +Cloudflare no longer updates and supports _mod_cloudflare_, starting with versions **Debian 9** and **Ubuntu 18.04 LTS** of the Linux operating system. We now recommend -[*mod\_remoteip*](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) +[_mod_remoteip_](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) for customers using Apache web servers. Customers who are interested in -building the *mod\_cloudflare* package can [download the -codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. +building the _mod_cloudflare_ package can [download the +codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. ::: 1. Make sure the following is installed: - * Red Hat/Fedora`sudo yum install httpd-devel libtool git` - * Debian/Ubuntu`sudo apt-get install apache2-dev libtool git` -2. Clone the following for the most recent build of *mod\_cloudflare*: - * Red Hat/Fedora/Debian/Ubuntu:`git clone https://github.com/cloudflare/mod_cloudflare.git; cd mod_cloudflare` + - Red Hat/Fedora`sudo yum install httpd-devel libtool git` + - Debian/Ubuntu`sudo apt-get install apache2-dev libtool git` +2. Clone the following for the most recent build of *mod_cloudflare*: + - Red Hat/Fedora/Debian/Ubuntu:`git clone https://github.com/cloudflare/mod_cloudflare.git; cd mod_cloudflare` 3. Use the Apache extension tool to convert the .c file into a module: - * Red Hat/Fedora/Debian/Ubuntu:`apxs -a -i -c mod_cloudflare.c` + - Red Hat/Fedora/Debian/Ubuntu:`apxs -a -i -c mod_cloudflare.c` 4. Restart and verify the module is active: - * Red Hat/Fedora`service httpd restart; httpd -M|grep cloudflare` - * Debian/Ubuntu:`sudo apachectl restart; apache2ctl -M|grep cloudflare` + - Red Hat/Fedora`service httpd restart; httpd -M|grep cloudflare` + - Debian/Ubuntu:`sudo apachectl restart; apache2ctl -M|grep cloudflare` 5. If your web server is behind a load balancer, add the following line to your Apache configuration (httpd.conf usually) and replace 123.123.123.123 with your load balancer's IP address: ``` @@ -222,8 +225,8 @@ That list of prefixes needs to be updated regularly, and we publish the full lis :::note To Include the original visitor IP in your logs, add the variables -$http\_cf\_connecting\_ip and $http\_x\_forwarded\_for in the -log\_format directive. +$http_cf_connecting_ip and $http_x_forwarded_for in the +log_format directive. ::: Also refer to: [Cloudflare and NGINX](https://danielmiessler.com/blog/getting-real-ip-addresses-using-cloudflare-nginx-and-varnish/). @@ -232,17 +235,17 @@ Also refer to: [Cloudflare and NGINX](https://danielmiessler.com/blog/getting-r :::caution -Cloudflare no longer updates and supports *mod\_cloudflare*, starting +Cloudflare no longer updates and supports _mod_cloudflare_, starting with versions **Debian 9** and **Ubuntu 18.04 LTS** of the Linux operating system. We now recommend -[*mod\_remoteip*](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) +[_mod_remoteip_](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) for customers using Apache web servers. Customers who are interested in -building the *mod\_cloudflare* package can [download the -codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. +building the _mod_cloudflare_ package can [download the +codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. ::: -1. Run the following script to install mod\_cloudflare as part of EasyApache: `bash <(curl -s https://raw.githubusercontent.com/cloudflare/mod_cloudflare/master/EasyApache/installer.sh)` -2. Upon installing, you will need to recompile your Apache with the new mod\_cloudflare plugin. +1. Run the following script to install mod_cloudflare as part of EasyApache: `bash <(curl -s https://raw.githubusercontent.com/cloudflare/mod_cloudflare/master/EasyApache/installer.sh)` +2. Upon installing, you will need to recompile your Apache with the new mod_cloudflare plugin. 3. To fix this, open up your Apache configuration. This can typically be found in `/etc/apache2/apache2.conf`, `/etc/httpd/httpd.conf`, `/usr/local/apache/conf/httpd.conf` or another location depending on configuration. If you're unsure, ask your hosting provider. 4. At the very end add:`CloudflareRemoteIPTrustedProxy {LOOPBACK_ADDRESS}` So, if your server is located at 127.0.0.1, it will look like:`CloudflareRemoteIPTrustedProxy 127.0.0.1` 5. If you have more than one server to add to the trusted proxy list, you can add them at the end: CloudflareRemoteIPTrustedProxy 127.0.0.1 127.0.0.2 @@ -251,7 +254,7 @@ codebase](https://github.com/cloudflare/mod_cloudflare) from GitHub. To have Lighttpd automatically rewrite the server IP for the access logs and for your application, you can follow one of the two solutions below. -1. Open your **lighttpd.conf** file and add *mod\_extforward* to the *server.modules* list. It must come **after** *mod\_accesslog* to show the real IP in the access logs +1. Open your **lighttpd.conf** file and add *mod_extforward* to the *server.modules* list. It must come **after** *mod_accesslog* to show the real IP in the access logs 2. Add the following code block anywhere in the **lighttpd.conf** file after the server modules list and then restart Lighttpd ``` @@ -269,7 +272,7 @@ If your origin connects to the Internet with IPv6, ranges does not work when IPv6 is enabled. Using the above method will not work when trying to forward IP ranges. Add the following lines to lighttpd.conf as an alternative solution: -`extforward.forwarder = ( "all" => "trust" ) extforward.headers = ("CF-Connecting-IP")` +`extforward.forwarder = ( "all" => "trust" ) extforward.headers = ("CF-Connecting-IP")` ::: ### LiteSpeed server @@ -338,7 +341,7 @@ If your network environment means requests are handled through a proxy (such as ### PHPBB -If you are using an Apache server, then we would recommend installing [mod\_remoteip](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) to restore the visitor IP back to your logs. +If you are using an Apache server, then we would recommend installing [mod_remoteip](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) to restore the visitor IP back to your logs. If you do not have access to your server to install a mod, then you may be able to [modify the core](https://www.phpbb.com/community/viewtopic.php?p=13936406#p13936406). @@ -417,15 +420,15 @@ As this plugin was created by an outside party, we can't provide technical suppo If you use the hosting control panel VestaCP, you have both Nginx and Apache running on your server. Requests are proxied through Nginx before going to Apache. -Because of this Nginx proxy, you actually need to the instructions to configure Nginx to return the real visitor IP address. [Mod\_remoteip](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) for Apache is not needed unless you disable the Nginx server for some requests. Adding [mod\_remoteip](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) to Apache will not conflict with the Nginx server configuration. +Because of this Nginx proxy, you actually need to the instructions to configure Nginx to return the real visitor IP address. [Mod_remoteip](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) for Apache is not needed unless you disable the Nginx server for some requests. Adding [mod_remoteip](https://support.cloudflare.com/hc/articles/200170786#C5XWe97z77b3XZV) to Apache will not conflict with the Nginx server configuration. ### node.js -An outside developer has created a module to restore visitor IP called [node\_cloudflare.](https://github.com/keverw/node_CloudFlare) +An outside developer has created a module to restore visitor IP called [node_cloudflare.](https://github.com/keverw/node_CloudFlare) ### HAProxy -In order to extract the original client IP in the X\_FORWARDED\_FOR header, you need to use the following configuration in HAProxy: +In order to extract the original client IP in the X_FORWARDED_FOR header, you need to use the following configuration in HAProxy: 1. Create a text file `CF_ips.lst` containing all IP ranges from [https://www.cloudflare.com/en-gb/ips/](https://www.cloudflare.com/en-gb/ips/) 2. Ensure to disable `option forwardfor` in HAProxy @@ -451,9 +454,9 @@ clientIPDetection: For more details, refer to [Custom header original IP detection extension](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/http/original_ip_detection/custom_header/v3/custom_header.proto). -*** +--- ## Related Resources -* [HTTP request headers](/fundamentals/reference/http-request-headers/) -* [Transform Rules](/rules/transform/) +- [HTTP request headers](/fundamentals/reference/http-request-headers/) +- [Transform Rules](/rules/transform/) diff --git a/src/content/docs/terraform/advanced-topics/import-cloudflare-resources.mdx b/src/content/docs/terraform/advanced-topics/import-cloudflare-resources.mdx index 668d6aeba11ab9..c6dd4a234daf49 100644 --- a/src/content/docs/terraform/advanced-topics/import-cloudflare-resources.mdx +++ b/src/content/docs/terraform/advanced-topics/import-cloudflare-resources.mdx @@ -1,13 +1,12 @@ --- pcx_content_type: tutorial title: Import Cloudflare resources - --- An important point to understand about Terraform is that it can only manage configuration it created or was explicitly told about after the fact. The reason for this limitation is that Terraform expects to be authoritative for the resources it manages. It relies on two types of files to understand what resources it controls and what state they are in. Terraform determines when and how to make changes from the following: -* A [configuration file](https://developer.hashicorp.com/terraform/language) (ending in `.tf`) that defines the configuration of resources for Terraform to manage. This is what you worked with in the tutorial steps. -* A local [state file](https://developer.hashicorp.com/terraform/language/state) that maps the resource names defined in your configuration file — for example, `cloudflare_load_balancer.www-lb` — to the resources that exist in Cloudflare. +- A [configuration file](https://developer.hashicorp.com/terraform/language) (ending in `.tf`) that defines the configuration of resources for Terraform to manage. This is what you worked with in the tutorial steps. +- A local [state file](https://developer.hashicorp.com/terraform/language/state) that maps the resource names defined in your configuration file — for example, `cloudflare_load_balancer.www-lb` — to the resources that exist in Cloudflare. When Terraform makes calls to Cloudflare's API to create new resources as explained in the [tutorial](/terraform/tutorial/), it persists those IDs to a state file. By default, Terraform uses the `terraform.tfstate` file in your directory, but this can also be a [remote location](https://developer.hashicorp.com/terraform/language/state/remote). These IDs are later looked up and refreshed when you call `terraform plan` and `terraform apply`. @@ -24,8 +23,8 @@ Before you start, you must install `cf-terraforming`. If you use Homebrew on macOS, open a terminal and run the following commands: ```sh -$ brew tap cloudflare/cloudflare -$ brew install cloudflare/cloudflare/cf-terraforming +brew tap cloudflare/cloudflare +brew install cloudflare/cloudflare/cf-terraforming ``` If you are using a different OS, [download the latest release](https://github.com/cloudflare/cf-terraforming/releases) from the `cf-terraforming` GitHub repository. @@ -48,8 +47,8 @@ The list of supported resources is available in the [Terraform README](https://g To start managing existing Cloudflare resources in Terraform, for example, DNS records, you need: -* The Terraform configuration of that resource (defined in a `.tf` file) -* An accompanying Terraform state file of that resources state (defined in a `.tfstate` file) +- The Terraform configuration of that resource (defined in a `.tf` file) +- An accompanying Terraform state file of that resources state (defined in a `.tfstate` file) ### Generate Terraform configuration with `cf-terraforming` @@ -67,7 +66,7 @@ Remember to keep your credentials saved in environment variables or terraform au Start by making a call to `cf-terraforming generate` to generate the Terraform configuration for the DNS records in the zone you want to manage with Terraform. ```sh -$ cf-terraforming generate --email $CLOUDFLARE_EMAIL --token $CLOUDFLARE_API_TOKEN -z 1109d899a5ff5fd74bc01e581693685b --resource-type cloudflare_record > importing-example.tf +cf-terraforming generate --email $CLOUDFLARE_EMAIL --token $CLOUDFLARE_API_TOKEN -z 1109d899a5ff5fd74bc01e581693685b --resource-type cloudflare_record > importing-example.tf ``` If you had not redirected the output to the `importing-example.tf` file, the result displayed in the standard output (your terminal window) would look like the following: @@ -113,8 +112,10 @@ resource "cloudflare_record" "terraform_managed_resource_5799bb01054843eea726758 Calling `terraform plan` at this point will try to create these resources as if they did not exist, since they are not present in the local state file: ```sh -$ terraform plan +terraform plan +``` +```sh output Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + create @@ -208,7 +209,7 @@ When you run `cf-terraforming import ...`, you will obtain a list of `terraform 1. Run the following command: ```sh - $ cf-terraforming import --resource-type "cloudflare_record" --email $CLOUDFLARE_EMAIL --key $CLOUDFLARE_API_KEY --zone $CLOUDFLARE_ZONE_ID + cf-terraforming import --resource-type "cloudflare_record" --email $CLOUDFLARE_EMAIL --key $CLOUDFLARE_API_KEY --zone $CLOUDFLARE_ZONE_ID ``` 2. Copy each `terraform import ...` command included in the output and run it. Terraform will import each resource individually into Terraform state. @@ -225,7 +226,10 @@ terraform import cloudflare_record.terraform_managed_resource_5799bb01054843eea7 You would run each command individually in the terminal: ```sh -$ terraform import cloudflare_record.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31 1109d899a5ff5fd74bc01e581693685b/3c0b456bc2aa443089c5f40f45f51b31 +terraform import cloudflare_record.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31 1109d899a5ff5fd74bc01e581693685b/3c0b456bc2aa443089c5f40f45f51b31 +``` + +```sh output cloudflare_record.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Importing from ID "1109d899a5ff5fd74bc01e581693685b/3c0b456bc2aa443089c5f40f45f51b31"... cloudflare_record.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Import complete! Imported cloudflare_record [id=3c0b456bc2aa443089c5f40f45f51b31] @@ -235,8 +239,13 @@ Import successful! The resources that were imported are shown above. These resources are now in your Terraform state and will henceforth be managed by Terraform. +``` -$ terraform import cloudflare_record.terraform_managed_resource_5e10399a590a45279f09aa8fb1163354 1109d899a5ff5fd74bc01e581693685b/d09d916d059aa9fc8cb54bdd49deea5f +```sh +terraform import cloudflare_record.terraform_managed_resource_5e10399a590a45279f09aa8fb1163354 1109d899a5ff5fd74bc01e581693685b/d09d916d059aa9fc8cb54bdd49deea5f +``` + +```sh output cloudflare_record.terraform_managed_resource_5e10399a590a45279f09aa8fb1163354: Importing from ID "1109d899a5ff5fd74bc01e581693685b/d09d916d059aa9fc8cb54bdd49deea5f"... cloudflare_record.terraform_managed_resource_5e10399a590a45279f09aa8fb1163354: Import complete! Imported cloudflare_record [id=d09d916d059aa9fc8cb54bdd49deea5f] @@ -246,8 +255,13 @@ Import successful! The resources that were imported are shown above. These resources are now in your Terraform state and will henceforth be managed by Terraform. +``` -$ terraform import cloudflare_record.terraform_managed_resource_de1cb74bae184b569bb7f83fefe72248 1109d899a5ff5fd74bc01e581693685b/8d6ec0d02c5b22212ff673782c816ef8 +```sh +terraform import cloudflare_record.terraform_managed_resource_de1cb74bae184b569bb7f83fefe72248 1109d899a5ff5fd74bc01e581693685b/8d6ec0d02c5b22212ff673782c816ef8 +``` + +```sh output cloudflare_record.terraform_managed_resource_de1cb74bae184b569bb7f83fefe72248: Importing from ID "1109d899a5ff5fd74bc01e581693685b/8d6ec0d02c5b22212ff673782c816ef8"... cloudflare_record.terraform_managed_resource_de1cb74bae184b569bb7f83fefe72248: Import complete! Imported cloudflare_record [id=8d6ec0d02c5b22212ff673782c816ef8] @@ -257,8 +271,13 @@ Import successful! The resources that were imported are shown above. These resources are now in your Terraform state and will henceforth be managed by Terraform. +``` -$ terraform import cloudflare_record.terraform_managed_resource_5799bb01054843eea726758f935d2aa2 1109d899a5ff5fd74bc01e581693685b/3766b952a2dda4c47e71952aeef33c77 +```sh +terraform import cloudflare_record.terraform_managed_resource_5799bb01054843eea726758f935d2aa2 1109d899a5ff5fd74bc01e581693685b/3766b952a2dda4c47e71952aeef33c77 +``` + +```sh output cloudflare_record.terraform_managed_resource_5799bb01054843eea726758f935d2aa2: Importing from ID "1109d899a5ff5fd74bc01e581693685b/3766b952a2dda4c47e71952aeef33c77"... cloudflare_record.terraform_managed_resource_5799bb01054843eea726758f935d2aa2: Import complete! Imported cloudflare_record [id=3766b952a2dda4c47e71952aeef33c77] @@ -273,6 +292,9 @@ your Terraform state and will henceforth be managed by Terraform. If you now run `terraform plan`, you will notice that Terraform will no longer try to re-create the `cloudflare_record` resources: ```sh -$ terraform plan | grep changes +terraform plan | grep changes +``` + +```sh output No changes. Infrastructure is up-to-date. ``` diff --git a/src/content/docs/terraform/advanced-topics/provider-customization.mdx b/src/content/docs/terraform/advanced-topics/provider-customization.mdx index e3b1614b1386f7..421c41fb4003e5 100644 --- a/src/content/docs/terraform/advanced-topics/provider-customization.mdx +++ b/src/content/docs/terraform/advanced-topics/provider-customization.mdx @@ -1,7 +1,6 @@ --- pcx_content_type: reference title: Provider customization - --- Terraform communicates with cloud and global network provider APIs such as Cloudflare through modules known as providers. These providers are [installed automatically](/terraform/tutorial/initialize-terraform/#2-initialize-terraform-and-the-cloudflare-provider) when you run `terraform init` in a directory that has a `.tf` file containing a provider. @@ -12,7 +11,7 @@ Typically, the only required parameters to the provider are those required to au :::note -The examples below build on the [Cloudflare Terraform tutorial](/terraform/tutorial/). +The examples below build on the [Cloudflare Terraform tutorial](/terraform/tutorial/). ::: You can customize the Cloudflare Terraform provider using configuration parameters, specified either in your `.tf` configuration files or via environment variables, such as `$CLOUDFLARE_RPS`. Using environment variables may make sense when running Terraform from a CI/CD system or when the change is temporary and does not need to be persisted in your configuration history. @@ -25,9 +24,9 @@ Enterprise customers may request a limit increase by contacting their account te ```sh # Remove requests-per-second (RPS) limit for API calls performed by Terraform (default: 4). -$ export CLOUDFLARE_RPS= +export CLOUDFLARE_RPS= # Print logs from the API client using the default log library logger (default: false). -$ export CLOUDFLARE_API_CLIENT_LOGGING=true +export CLOUDFLARE_API_CLIENT_LOGGING=true # Maximum backoff period in seconds after failed API calls (default: 30). -$ export CLOUDFLARE_MAX_BACKOFF=20 +export CLOUDFLARE_MAX_BACKOFF=20 ``` diff --git a/src/content/docs/terraform/installing.mdx b/src/content/docs/terraform/installing.mdx index 44cfbe7fc64d82..1c640c5a0b3dc1 100644 --- a/src/content/docs/terraform/installing.mdx +++ b/src/content/docs/terraform/installing.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Install Terraform - --- Terraform ships as a single binary file. The examples below include installation information for popular operating systems. @@ -15,12 +14,10 @@ For official instructions on installing Terraform, refer to [Install Terraform]( :::caution - Terraform maintains your configuration state, which can be broken when you make configuration changes through both Terraform and either the Cloudflare Dashboard or API. To avoid this state, make sure you manage Terraform resources only in Terraform. For more details, refer to our [best practices](/terraform/advanced-topics/best-practices/). - ::: ## Mac @@ -28,8 +25,8 @@ To avoid this state, make sure you manage Terraform resources only in Terraform. The easiest way to install Terraform on macOS is with Homebrew. ```sh -$ brew tap hashicorp/tap -$ brew install hashicorp/tap/terraform +brew tap hashicorp/tap +brew install hashicorp/tap/terraform ``` ## Linux @@ -37,21 +34,29 @@ $ brew install hashicorp/tap/terraform You can install the `terraform` binary via your distribution's package manager. For example: ```sh -$ sudo apt install terraform +sudo apt install terraform ``` Alternatively, you can fetch a specific version directly and place the binary in your `PATH`: ```sh -$ wget -q https://releases.hashicorp.com/terraform/1.4.5/terraform_1.4.5_linux_amd64.zip +wget -q https://releases.hashicorp.com/terraform/1.4.5/terraform_1.4.5_linux_amd64.zip + +unzip terraform_1.4.5_linux_amd64.zip +``` -$ unzip terraform_1.4.5_linux_amd64.zip +```sh output Archive: terraform_1.4.5_linux_amd64.zip inflating: terraform +``` + +```sh +sudo mv terraform /usr/local/bin/terraform -$ sudo mv terraform /usr/local/bin/terraform +terraform version +``` -$ terraform version +```sh output Terraform v1.4.5 ``` diff --git a/src/content/docs/terraform/tutorial/add-page-rules.mdx b/src/content/docs/terraform/tutorial/add-page-rules.mdx index bec0b874ff9a5d..a992f51965573f 100644 --- a/src/content/docs/terraform/tutorial/add-page-rules.mdx +++ b/src/content/docs/terraform/tutorial/add-page-rules.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Add exceptions with Page Rules - --- In the [Configure HTTPS settings](/terraform/tutorial/configure-https-settings/) tutorial, you configured zone settings that apply to all incoming requests for `example.com`. In this tutorial, you will add an exception to these settings using [Page Rules](/rules/page-rules/). @@ -18,10 +17,16 @@ Specifically, you will increase the security level for a URL known to be expensi Create a new branch and append the configuration. ```bash -$ git checkout -b step5-pagerule +git checkout -b step5-pagerule +``` + +```bash output Switched to a new branch 'step5-pagerule' +``` -$ cat >> cloudflare.tf <<'EOF' +```sh + +cat >> cloudflare.tf <<'EOF' resource "cloudflare_page_rule" "increase-security-on-expensive-page" { zone_id = var.zone_id target = "www.${var.domain}/expensive-db-call" @@ -52,7 +57,10 @@ EOF Preview the changes Terraform will make and then merge them into the `master` branch. ```sh -$ terraform plan +terraform plan +``` + +```sh output cloudflare_record.www-asia: Refreshing state... [id=fda39d8c9bf909132e82a36bab992864] cloudflare_record.www: Refreshing state... [id=c38d3103767284e7cd14d5dad3ab8669] cloudflare_zone_settings_override.example-com-settings: Refreshing state... [id=e2e6491340be87a3726f91fc4148b126] @@ -112,17 +120,32 @@ Plan: 2 to add, 0 to change, 0 to destroy. Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +```sh -$ git add cloudflare.tf +git add cloudflare.tf +git commit -m "Step 5 - Add two Page Rules." +``` -$ git commit -m "Step 5 - Add two Page Rules." +```sh output [step5-pagerule d4fec16] Step 5 - Add two Page Rules. 1 file changed, 23 insertions(+) +``` + +```sh +git checkout master +``` -$ git checkout master +```sh output Switched to branch 'master' +``` + +```sh +git merge step5-pagerule +``` -$ git merge step5-pagerule +```sh output Updating 7a2ac34..d4fec16 Fast-forward cloudflare.tf | 23 +++++++++++++++++++++++ @@ -134,14 +157,20 @@ Fast-forward First, test request the (now missing) old location of the expensive-to-render page. ```sh -$ curl -vso /dev/null https://www.example.com/old-location.php 2>&1 | grep "< HTTP\|Location" +curl -vso /dev/null https://www.example.com/old-location.php 2>&1 | grep "< HTTP\|Location" +``` + +```sh output < HTTP/1.1 404 Not Found ``` As expected, the location cannot be found. Apply the Page Rules, including the redirect that should fix this error. ```sh -$ terraform apply --auto-approve +terraform apply --auto-approve +``` + +```sh output cloudflare_record.www-asia: Refreshing state... [id=fda39d8c9bf909132e82a36bab992864] cloudflare_load_balancer_monitor.get-root-https: Refreshing state... [id=4238142473fcd48e89ef1964be72e3e0] cloudflare_zone_settings_override.example-com-settings: Refreshing state... [id=e2e6491340be87a3726f91fc4148b126] @@ -206,11 +235,19 @@ Apply complete! Resources: 2 added, 0 changed, 0 destroyed. With the Page Rules in place, try that call again, along with a test for the I'm Under Attack mode: ```sh -$ curl -vso /dev/null https://www.example.com/old-location.php 2>&1 | grep "< HTTP\|Location" +curl -vso /dev/null https://www.example.com/old-location.php 2>&1 | grep "< HTTP\|Location" +``` + +```sh output < HTTP/1.1 301 Moved Permanently < Location: https://www.example.com/expensive-db-call +``` + +```sh +curl -vso /dev/null https://www.example.com/expensive-db-call 2>&1 | grep "< HTTP" +``` -$ curl -vso /dev/null https://www.example.com/expensive-db-call 2>&1 | grep "< HTTP" +```sh output < HTTP/1.1 503 Service Temporarily Unavailable ``` diff --git a/src/content/docs/terraform/tutorial/configure-https-settings.mdx b/src/content/docs/terraform/tutorial/configure-https-settings.mdx index 0fb191fd4f9380..d141ad3ff90cd1 100644 --- a/src/content/docs/terraform/tutorial/configure-https-settings.mdx +++ b/src/content/docs/terraform/tutorial/configure-https-settings.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Configure HTTPS settings - --- After proxying a basic website through Cloudflare, you can use Terraform to adjust zone settings. In this tutorial, you will configure some optional HTTPS settings and then push the updated configuration to GitHub for posterity. @@ -17,17 +16,22 @@ You will use a new Git branch for the changes and then merge it into the `master In this step, modify the Terraform configuration to enable the following settings: -* [TLS 1.3](/ssl/edge-certificates/additional-options/tls-13/) -* [Automatic HTTPS Rewrites](/ssl/edge-certificates/additional-options/automatic-https-rewrites/) -* [Strict SSL mode](/ssl/origin-configuration/ssl-modes/full-strict/) +- [TLS 1.3](/ssl/edge-certificates/additional-options/tls-13/) +- [Automatic HTTPS Rewrites](/ssl/edge-certificates/additional-options/automatic-https-rewrites/) +- [Strict SSL mode](/ssl/origin-configuration/ssl-modes/full-strict/) Strict mode requires a valid SSL certificate on your origin — use the [Cloudflare Origin CA](/ssl/origin-configuration/origin-ca/) to generate one. ```bash -$ git checkout -b step3-https +git checkout -b step3-https +``` + +```sh output Switched to a new branch 'step3-https' +``` -$ cat >> cloudflare.tf <<'EOF' +```sh +cat >> cloudflare.tf <<'EOF' resource "cloudflare_zone_settings_override" "example-com-settings" { zone_id = var.zone_id @@ -46,7 +50,10 @@ EOF Review what Terraform is proposing before applying changes. The example output below is being filtered to ignore computed values — in this case, settings that will keep their default values. ```sh -$ terraform plan | grep -v "(known after apply)" +terraform plan | grep -v "(known after apply)" +``` + +```sh output Refreshing Terraform state in-memory prior to plan... The refreshed state will be used to calculate this plan, but will not be persisted to local or remote state storage. @@ -85,21 +92,39 @@ guarantee to take exactly these actions if you run "terraform apply" now. The proposed changes look good, so you can merge them into the `master` branch and then apply them with `terraform apply`. When working on a team, you may want to require pull requests and use this opportunity to peer review any proposed configuration changes. ```sh -$ git add cloudflare.tf -$ git commit -m "Step 3 - Enable TLS 1.3, Always Use HTTPS, and SSL Strict mode." +git add cloudflare.tf +git commit -m "Step 3 - Enable TLS 1.3, Always Use HTTPS, and SSL Strict mode." +``` + +```sh output [step3-https d540600] Step 3 - Enable TLS 1.3, Always Use HTTPS, and SSL Strict mode. 1 file changed, 11 insertions(+) +``` -$ git checkout master +```sh +git checkout master +``` + +```sh output Switched to branch 'master' +``` -$ git merge step3-https +```sh +git merge step3-https +``` + +```sh output Updating d26f40b..d540600 Fast-forward cloudflare.tf | 11 +++++++++++ 1 file changed, 11 insertions(+) +``` + +```sh +git push +``` -$ git push +```sh output Counting objects: 3, done. Delta compression using up to 8 threads. Compressing objects: 100% (3/3), done. @@ -115,7 +140,10 @@ To git@github.com:$GITHUB_USER/cf-config.git Before applying the changes, try to connect with TLS 1.3. Technically, you should not be able to with default settings. To follow along with this test, you will need to [compile `curl` against BoringSSL](https://everything.curl.dev/source/build/tls/boringssl#build-boringssl). ```sh -$ curl -v --tlsv1.3 https://www.example.com 2>&1 | grep "SSL connection\|error" +curl -v --tlsv1.3 https://www.example.com 2>&1 | grep "SSL connection\|error" +``` + +```sh output * error:1000042e:SSL routines:OPENSSL_internal:TLSV1_ALERT_PROTOCOL_VERSION curl: (35) error:1000042e:SSL routines:OPENSSL_internal:TLSV1_ALERT_PROTOCOL_VERSION ``` @@ -123,7 +151,10 @@ curl: (35) error:1000042e:SSL routines:OPENSSL_internal:TLSV1_ALERT_PROTOCOL_VER As shown above, you should receive an error because TLS 1.3 is not yet enabled on your zone. Enable it by running `terraform apply` and try again. ```sh -$ terraform apply --auto-approve +terraform apply --auto-approve +``` + +```sh output Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: @@ -222,6 +253,9 @@ Apply complete! Resources: 1 added, 0 changed, 0 destroyed. Try the same command as before. The command will now succeed. ```sh -$ curl -v --tlsv1.3 https://www.example.com 2>&1 | grep "SSL connection\|error" +curl -v --tlsv1.3 https://www.example.com 2>&1 | grep "SSL connection\|error" +``` + +```sh output * SSL connection using TLSv1.3 / AEAD-AES128-GCM-SHA256 ``` diff --git a/src/content/docs/terraform/tutorial/initialize-terraform.mdx b/src/content/docs/terraform/tutorial/initialize-terraform.mdx index e0d12408a21da0..ed9ddcfa7ee781 100644 --- a/src/content/docs/terraform/tutorial/initialize-terraform.mdx +++ b/src/content/docs/terraform/tutorial/initialize-terraform.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Introduction to Terraform init - --- This tutorial shows you how to get started with Terraform. The tutorial uses an example scenario where you have a web server for your domain, accessible on `203.0.113.10`, and you just signed up your domain (`example.com`) on Cloudflare to manage everything in Terraform. @@ -21,11 +20,11 @@ Terraform will process any files with a `.tf` extension. As the configuration be :::caution -To prevent accidentally exposing your Cloudflare credentials, do not save this file in your version control system. The [next tutorial](/terraform/tutorial/track-history/) will cover best practices for passing in your API token. +To prevent accidentally exposing your Cloudflare credentials, do not save this file in your version control system. The [next tutorial](/terraform/tutorial/track-history/) will cover best practices for passing in your API token. ::: ```bash -$ cat > cloudflare.tf <<'EOF' +cat > cloudflare.tf <<'EOF' terraform { required_providers { cloudflare = { @@ -66,7 +65,10 @@ EOF After creating your basic configuration in HCL, initialize Terraform and ask it to apply the configuration to Cloudflare. ```sh -$ terraform init +terraform init +``` + +```sh output Initializing provider plugins... - Checking for available provider plugins on https://releases.hashicorp.com... @@ -96,7 +98,10 @@ commands will detect it and remind you to do so if necessary. When you run `terraform init`, any plugins required, such as the Cloudflare Terraform provider, are automatically downloaded and saved locally to a `.terraform` directory. ```sh -$ find .terraform/ +find .terraform/ +``` + +```sh output .terraform/ .terraform/plugins .terraform/plugins/darwin_amd64 @@ -109,7 +114,10 @@ $ find .terraform/ After installing the Cloudflare provider, review the proposed changes to your Cloudflare account so they match the configuration you previously defined. ```sh -$ terraform plan +terraform plan +``` + +```sh output Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: @@ -151,7 +159,10 @@ The `plan` command is important because it allows you to preview the changes for You can use `--auto-approve` on the command line for a briefer output. Without this flag, Terraform will display the output of the Terraform plan and then ask for confirmation before applying it. ```sh -$ terraform apply --auto-approve +terraform apply --auto-approve +``` + +```sh output Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: @@ -189,7 +200,10 @@ Log in to the [Cloudflare dashboard](https://dash.cloudflare.com) and go to **DN To see the full results returned from the API call, including the default values that you did not specify but let Terraform compute, run `terraform show`. ```sh -$ terraform show +terraform show +``` + +```sh output # cloudflare_record.www: resource "cloudflare_record" "www" { id = "c38d3103767284e7cd14d5dad3ab8668" @@ -216,6 +230,9 @@ resource "cloudflare_record" "www" { ``` ```sh -$ curl https://www.example.com +curl https://www.example.com +``` + +```sh output Hello, this is 203.0.113.10! ``` diff --git a/src/content/docs/terraform/tutorial/revert-configuration.mdx b/src/content/docs/terraform/tutorial/revert-configuration.mdx index 5c2db9e4076356..51dbc05d0b8257 100644 --- a/src/content/docs/terraform/tutorial/revert-configuration.mdx +++ b/src/content/docs/terraform/tutorial/revert-configuration.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Revert configuration - --- Sometimes, you may have to roll back configuration changes. For example, you might want to run performance tests on a new configuration or maybe you mistyped an IP address and brought your entire site down. @@ -18,7 +17,10 @@ To revert your configuration, check out the desired branch and ask Terraform to Before determining how far back to revert, review the versioned history: ```sh -$ git log +git log +``` + +```sh output commit d4fec164581bec44684a4d59bb80aec1f1da5a6e Author: Me Date: Wed Apr 18 22:04:52 2018 -0700 @@ -63,7 +65,10 @@ Another benefit of storing your Cloudflare configuration in Git is that you can Check when the last change was made: ```sh -$ git show +git show +``` + +```sh output commit d4fec164581bec44684a4d59bb80aec1f1da5a6e Author: Me Date: Wed Apr 18 22:04:52 2018 -0700 @@ -106,8 +111,10 @@ index 0b39450..ef11d8a 100644 Review the past few changes: ```sh -$ git log -p -3 +git log -p -3 +``` +```sh output ... // page rule config from above ... @@ -201,11 +208,19 @@ While you can always edit the config file directly and delete those entries, you Run the following Git command to revert the last commit without rewriting history: ```sh -$ git revert HEAD~1..HEAD +git revert HEAD~1..HEAD +``` + +```sh output [master f9a6f7d] Revert "Step 6 - Bug fix." 1 file changed, 1 insertion(+), 1 deletion(-) +``` + +```sh +git log -2 +``` -$ git log -2 +```sh output commit f9a6f7db72ea1437e146050a5e7556052ecc9a1a Author: Me Date: Wed Apr 18 23:28:09 2018 -0700 @@ -226,7 +241,10 @@ Date: Wed Apr 18 22:04:52 2018 -0700 Run `terraform plan` and check the execution plan: ```sh -$ terraform plan +terraform plan +``` + +```sh output Refreshing Terraform state in-memory prior to plan... The refreshed state will be used to calculate this plan, but will not be persisted to local or remote state storage. @@ -269,7 +287,10 @@ As expected, Terraform is indicating it will remove the two Page Rules created i The changes look good. Terraform reverts the Cloudflare configuration when you apply the changes: ```sh -$ terraform apply --auto-approve +terraform apply --auto-approve +``` + +```sh output cloudflare_page_rule.redirect-to-new-db-page: Refreshing state... [id=c5c40ff2dc12416b5fe4d0541980c591] cloudflare_page_rule.increase-security-on-expensive-page: Refreshing state... [id=1c13fdb84710c4cc8b11daf7ffcca449] cloudflare_zone_settings_override.example-com-settings: Refreshing state... [id=e2e6491340be87a3726f91fc4148b126] diff --git a/src/content/docs/terraform/tutorial/track-history.mdx b/src/content/docs/terraform/tutorial/track-history.mdx index 5b832f4173431f..ef257be9f01225 100644 --- a/src/content/docs/terraform/tutorial/track-history.mdx +++ b/src/content/docs/terraform/tutorial/track-history.mdx @@ -6,13 +6,12 @@ sidebar: head: - tag: title content: Track your history - --- In the [Initialize Terraform](/terraform/tutorial/initialize-terraform/) tutorial, you created and applied some basic Cloudflare configuration. Terraform applied this configuration to your zone because you provided your API token at the top of the `cloudflare.tf` file that has access to this zone. ```sh -$ head -n13 cloudflare.tf | tail -n3 +head -n13 cloudflare.tf | tail -n3 provider "cloudflare" { api_token = "your-api-token" } @@ -25,14 +24,14 @@ In this tutorial, you will store your configuration in GitHub where it can be tr As a good security practice, remove your Cloudflare credentials from anything that will be committed to a repository. The Cloudflare Terraform provider supports reading the credentials (and other configuration) [from environment variables](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs#schema), as in the following example: ```bash -$ sed -ie 's/^.*api_token =.*$/ # token pulled from $CLOUDFLARE_API_TOKEN/' cloudflare.tf +sed -ie 's/^.*api_token =.*$/ # token pulled from $CLOUDFLARE_API_TOKEN/' cloudflare.tf -$ head -n13 cloudflare.tf | tail -n3 +head -n13 cloudflare.tf | tail -n3 provider "cloudflare" { # token pulled from $CLOUDFLARE_API_TOKEN } -$ export CLOUDFLARE_API_TOKEN=your-api-token +export CLOUDFLARE_API_TOKEN=your-api-token ``` You must still include the empty provider definition in the file, so that Terraform knows to install the Cloudflare plugin. For more information about advanced options you can use to customize the Cloudflare provider, refer to [Provider customization](/terraform/advanced-topics/provider-customization/). @@ -40,7 +39,10 @@ You must still include the empty provider definition in the file, so that Terraf After running the commands above, ensure that you can still authenticate to Cloudflare by running `terraform plan`. Terraform will pull the current state which requires a valid email and API token. ```sh -$ terraform plan +terraform plan +``` + +```sh output cloudflare_record.www: Refreshing state... [id=c38d3102767284e7ca14d5dad3ab8b69] ------------------------------------------------------------------------ @@ -59,12 +61,13 @@ After removing the credentials, initialize a Git repository with your Cloudflare First, create the GitHub repository to store the configuration. You can do this via the GitHub user interface or with an API call. ```sh -$ export GITHUB_USER=your-github-user -$ export GITHUB_TOKEN=your-github-token +export GITHUB_USER=your-github-user +export GITHUB_TOKEN=your-github-token -$ export GITHUB_URL=$(curl -H "Authorization: token $GITHUB_TOKEN" -d '{"name": "cf-config", "private": true}' "https://api.github.com/user/repos" 2> /dev/null | jq -r .ssh_url) +export GITHUB_URL=$(curl -H "Authorization: token $GITHUB_TOKEN" -d '{"name": "cf-config", "private": true}' "https://api.github.com/user/repos" 2> /dev/null | jq -r .ssh_url) + +echo $GITHUB_URL -$ echo $GITHUB_URL git@github.com:$GITHUB_USER/cf-config.git ``` @@ -72,20 +75,26 @@ Next, initialize a Git repository and make the first commit. :::note[Note] - You might need to [add your SSH key to your GitHub account](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account). - ::: ```sh -$ git init +git init +``` + +```sh output Initialized empty Git repository in /Users/username/cf-config/.git/ +``` -$ git remote add origin $GITHUB_URL -$ git add cloudflare.tf +```sh +git remote add origin $GITHUB_URL +git add cloudflare.tf + +git commit -m "Step 2 - Initial commit with webserver definition." +``` -$ git commit -m "Step 2 - Initial commit with webserver definition." +```sh output [master (root-commit) 5acea17] Step 2 - Initial commit with webserver definition. 1 file changed, 16 insertions(+) create mode 100644 cloudflare.tf @@ -96,19 +105,27 @@ Notice that the `.terraform` directory and `terraform.tfstate` file were not com To prevent Git from notifying you about the two files, add them to a new `.gitignore` file, commit it, and push everything to GitHub. ```bash -$ cat > .gitignore <<'EOF' +cat > .gitignore <<'EOF' .terraform/ terraform.tfstate* EOF -$ git add .gitignore +git add .gitignore + +git commit -m "Step 2 - Ignore terraform plugin directory and state file." +``` -$ git commit -m "Step 2 - Ignore terraform plugin directory and state file." +```sh output [master 494c6d6] Step 2 - Ignore terraform plugin directory and state file. 1 file changed, 2 insertions(+) create mode 100644 .gitignore +``` + +```sh +git push +``` -$ git push +```sh output Counting objects: 6, done. Delta compression using up to 8 threads. Compressing objects: 100% (4/4), done. diff --git a/src/content/docs/terraform/tutorial/use-load-balancing.mdx b/src/content/docs/terraform/tutorial/use-load-balancing.mdx index ae8baa116da916..112531a8d823a2 100644 --- a/src/content/docs/terraform/tutorial/use-load-balancing.mdx +++ b/src/content/docs/terraform/tutorial/use-load-balancing.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Improve performance and reliability - --- In this tutorial, you will add a second origin for some basic round robining, and then use the [Cloudflare Load Balancing](/load-balancing/) product to fail traffic over as needed. You will also enhance your load balancing configuration through the use of "geo steering" to serve results from an origin server that is geographically closest to your end users. @@ -16,10 +15,15 @@ In this tutorial, you will add a second origin for some basic round robining, an To get started, add a DNS record for a second web server, located in Asia. The IP address for this server is `198.51.100.15`. ```bash -$ git checkout -b step4-loadbalance +git checkout -b step4-loadbalance +``` + +```bash output Switched to a new branch 'step4-loadbalance' +``` -$ cat >> cloudflare.tf <<'EOF' +```bash +cat >> cloudflare.tf <<'EOF' resource "cloudflare_record" "www-asia" { zone_id = var.zone_id name = "www" @@ -37,7 +41,10 @@ Note that while the name of the `resource` is different because Terraform resour Check the `terraform plan` and then merge and apply the changes. ```sh -$ terraform plan | grep -v "(known after apply)" +terraform plan | grep -v "(known after apply)" +``` + +```sh output Refreshing Terraform state in-memory prior to plan... The refreshed state will be used to calculate this plan, but will not be persisted to local or remote state storage. @@ -69,16 +76,31 @@ Plan: 1 to add, 0 to change, 0 to destroy. Note: You didn't use the -out option to save this plan, so Terraform can't guarantee to take exactly these actions if you run "terraform apply" now. +``` + +```sh +git add cloudflare.tf +git commit -m "Step 4 - Add additional 'www' DNS record for Asia data center." +``` -$ git add cloudflare.tf -$ git commit -m "Step 4 - Add additional 'www' DNS record for Asia data center." +```sh output [step4-loadbalance 6761a4f] Step 4 - Add additional 'www' DNS record for Asia data center. 1 file changed, 7 insertions(+) +``` -$ git checkout master +```sh output +git checkout master +``` + +```sh output Switched to branch 'master' +``` + +```sh +git merge step4-loadbalance +``` -$ git merge step4-loadbalance +```sh output Updating e1c38cf..6761a4f Fast-forward cloudflare.tf | 7 +++++++ @@ -90,7 +112,10 @@ Fast-forward Add the second DNS record for [www.example.com](http://www.example.com). ```sh -$ terraform apply --auto-approve +terraform apply --auto-approve +``` + +```sh output cloudflare_record.www: Refreshing state... [id=c38d3103767284e7cd14d5dad3ab8668] cloudflare_zone_settings_override.example-com-settings: Refreshing state... [id=e2e6491340be87a3726f91fc4148b126] @@ -127,16 +152,34 @@ Apply complete! Resources: 1 added, 0 changed, 0 destroyed. With the second DNS record in place, make some requests to see where the traffic is served from. ```sh -$ curl https://www.example.com +curl https://www.example.com +``` + +```sh output Hello, this is 203.0.113.10! +``` + +```sh +curl https://www.example.com +``` -$ curl https://www.example.com +```sh output Hello, this is 203.0.113.10! +``` + +```sh +curl https://www.example.com +``` -$ curl https://www.example.com +```sh output Hello, this is 198.51.100.15! +``` + +```sh +curl https://www.example.com +``` -$ curl https://www.example.com +```sh output Hello, this is 203.0.113.10! ``` @@ -146,7 +189,7 @@ As noted above, there is no discernible pattern for which origin receives the re :::note -Before proceeding, ensure [Load Balancing is enabled for your account](/load-balancing/get-started/enable-load-balancing/). +Before proceeding, ensure [Load Balancing is enabled for your account](/load-balancing/get-started/enable-load-balancing/). ::: As described in the [Load Balancing tutorial](/learning-paths/load-balancing/), you will need to complete three tasks: @@ -160,10 +203,15 @@ As described in the [Load Balancing tutorial](/learning-paths/load-balancing/), To monitor the origins, create a basic health check that makes a `GET` request to each origin on the URL `https://www.example.com`. If the origin returns the `200` status code (`OK`) within five seconds, it is considered healthy. If it fails to do so three times in a row, it is considered unhealthy. This health check will be run once per minute from several regions and send an email notification to your email address (configured as ``) if any failures are detected. ```bash -$ git checkout step4-loadbalance +git checkout step4-loadbalance +``` + +```bash output Switched to branch 'step4-loadbalance' +``` -$ cat >> cloudflare.tf <<'EOF' +```bash +cat >> cloudflare.tf <<'EOF' resource "cloudflare_load_balancer_monitor" "get-root-https" { account_id = var.account_id @@ -183,15 +231,15 @@ EOF In this example, the pool will be called `www-servers` with two origins added to it: -* `www-us` (`203.0.113.10`) -* `www-asia` (`198.51.100.15`) +- `www-us` (`203.0.113.10`) +- `www-asia` (`198.51.100.15`) For now, skip any sort of [geo routing](/load-balancing/understand-basics/traffic-steering/steering-policies/geo-steering/). Note the reference to the monitor that you added in the last step. When applying this configuration, Terraform will determine that it first needs to create the monitor before looking up the ID and providing it to the pool you wish to create. ```bash -$ cat >> cloudflare.tf <<'EOF' +cat >> cloudflare.tf <<'EOF' resource "cloudflare_load_balancer_pool" "www-servers" { account_id = var.account_id @@ -219,7 +267,7 @@ EOF When you create a load balancer (LB), it will [replace any existing DNS records with the same name](/load-balancing/load-balancers/dns-records/). For example, if you create the `www.example.com` load balancer below, it will supersede the two `www` DNS records that you previously defined. One benefit of leaving the DNS records in place is that if you temporarily disable load balancing, connections to this hostname are still possible as shown in [step 2](#2-preview-and-merge-the-changes) above. ```bash -$ cat >> cloudflare.tf <<'EOF' +cat >> cloudflare.tf <<'EOF' resource "cloudflare_load_balancer" "www-lb" { zone_id = var.zone_id @@ -237,7 +285,10 @@ EOF As usual, review the proposed plan before applying any changes. ```sh -$ terraform plan +terraform plan +``` + +```sh output cloudflare_record.www: Refreshing state... [id=c38d3103767284e7cd14d5dad3ab8669] cloudflare_record.www-asia: Refreshing state... [id=fda39d8c9bf909132e82a36bab992864] cloudflare_zone_settings_override.example-com-settings: Refreshing state... [id=e2e6491340be87a3726f91fc4148b126] @@ -343,12 +394,20 @@ guarantee to take exactly these actions if you run "terraform apply" now. The plan looks good. Merge the plan and apply it. ```sh -$ git add cloudflare.tf -$ git commit -m "Step 4 - Create load balancer (LB) monitor, LB pool, and LB." +git add cloudflare.tf +git commit -m "Step 4 - Create load balancer (LB) monitor, LB pool, and LB." +``` + +```sh output [step4-loadbalance bc9aa9a] Step 4 - Create load balancer (LB) monitor, LB pool, and LB. 1 file changed, 35 insertions(+) +``` + +```sh +terraform apply --auto-approve +``` -$ terraform apply --auto-approve +```sh output cloudflare_zone_settings_override.example-com-settings: Refreshing state... [id=e2e6491340be87a3726f91fc4148b126] cloudflare_record.www: Refreshing state... [id=c38d3103767284e7cd14d5dad3ab8669] cloudflare_record.www-asia: Refreshing state... [id=fda39d8c9bf909132e82a36bab992864] @@ -458,7 +517,10 @@ Apply complete! Resources: 3 added, 0 changed, 0 destroyed. With load balancing in place, run four `curl` requests again to see where the traffic is served from. ```sh -$ for i in {1..4}; do curl https://www.example.com && sleep 5; done +for i in {1..4}; do curl https://www.example.com && sleep 5; done +``` + +```sh output Hello, this is 198.51.100.15! Hello, this is 203.0.113.10! diff --git a/src/content/docs/time-services/roughtime/usage.mdx b/src/content/docs/time-services/roughtime/usage.mdx index 16f7929ecdd28e..bc7ec1eec8cedf 100644 --- a/src/content/docs/time-services/roughtime/usage.mdx +++ b/src/content/docs/time-services/roughtime/usage.mdx @@ -6,15 +6,14 @@ sidebar: head: - tag: title content: Get the Roughtime from Cloudflare - --- The "Hello, world!" of Roughtime is very simple: the client sends a request over UDP to the server and the server responds with a signed timestamp. You just need the server's address and public key to run the protocol: -* **Server address**: `roughtime.cloudflare.com:2003` (resolves to an IP address in our [anycast IP range](https://www.cloudflare.com/learning/cdn/glossary/anycast-network/)). You can use either IPv4 or IPv6. -* **Public key**: `0GD7c3yP8xEc4Zl2zeuN2SlLvDVVocjsPSL8/Rl/7zg=` +- **Server address**: `roughtime.cloudflare.com:2003` (resolves to an IP address in our [anycast IP range](https://www.cloudflare.com/learning/cdn/glossary/anycast-network/)). You can use either IPv4 or IPv6. +- **Public key**: `0GD7c3yP8xEc4Zl2zeuN2SlLvDVVocjsPSL8/Rl/7zg=` To get started, download and run Cloudflare's [Go client](https://github.com/cloudflare/roughtime): @@ -31,7 +30,7 @@ change in the future. We will keep this page up-to-date with the most current pu You can also obtain it programmatically using DNS. For example: ```sh -$ dig TXT roughtime.cloudflare.com | grep -oP 'TXT\s"\K.*?(?=")' +dig TXT roughtime.cloudflare.com | grep -oP 'TXT\s"\K.*?(?=")' ``` ## Next steps diff --git a/src/content/docs/turnstile/get-started/server-side-validation.mdx b/src/content/docs/turnstile/get-started/server-side-validation.mdx index af4c7e4633b793..3289f87c2f5feb 100644 --- a/src/content/docs/turnstile/get-started/server-side-validation.mdx +++ b/src/content/docs/turnstile/get-started/server-side-validation.mdx @@ -3,10 +3,9 @@ title: Server-side validation pcx_content_type: get-started sidebar: order: 2 - --- -import { GlossaryTooltip, Render, TabItem, Tabs } from "~/components" +import { GlossaryTooltip, Render, TabItem, Tabs } from "~/components"; @@ -20,7 +19,7 @@ Tokens issued to Turnstile using the success callbacks, via explicit or implicit A Turnstile token can have up to 2048 characters. -It is also valid for 300 seconds before it is rejected by siteverify. +It is also valid for 300 seconds before it is rejected by siteverify. ::: The siteverify endpoint needs to be passed a secret key that is associated with the sitekey. The secret key will be provisioned alongside the sitekey when you create a widget. Furthermore, the response needs to be passed to the siteverify endpoint. @@ -29,13 +28,16 @@ A response may only be validated once. If the same response is presented twice, :::note -Refer to the [full demo on GitHub](https://github.com/cloudflare/turnstile-demo-workers/blob/main/src/index.mjs). +Refer to the [full demo on GitHub](https://github.com/cloudflare/turnstile-demo-workers/blob/main/src/index.mjs). :::
```sh title="Example using cURL" -$ curl 'https://challenges.cloudflare.com/turnstile/v0/siteverify' --data 'secret=verysecret&response=' +curl 'https://challenges.cloudflare.com/turnstile/v0/siteverify' --data 'secret=verysecret&response=' +``` + +```sh output { "success": true, "error-codes": [], @@ -51,25 +53,25 @@ $ curl 'https://challenges.cloudflare.com/turnstile/v0/siteverify' --data 'secre ```js title="Example using fetch from Cloudflare Workers" // This is the demo secret key. In production, we recommend // you store your secret key(s) safely. -const SECRET_KEY = '1x0000000000000000000000000000000AA'; +const SECRET_KEY = "1x0000000000000000000000000000000AA"; async function handlePost(request) { const body = await request.formData(); // Turnstile injects a token in "cf-turnstile-response". - const token = body.get('cf-turnstile-response'); - const ip = request.headers.get('CF-Connecting-IP'); + const token = body.get("cf-turnstile-response"); + const ip = request.headers.get("CF-Connecting-IP"); // Validate the token by calling the // "/siteverify" API endpoint. let formData = new FormData(); - formData.append('secret', SECRET_KEY); - formData.append('response', token); - formData.append('remoteip', ip); + formData.append("secret", SECRET_KEY); + formData.append("response", token); + formData.append("remoteip", ip); - const url = 'https://challenges.cloudflare.com/turnstile/v0/siteverify'; + const url = "https://challenges.cloudflare.com/turnstile/v0/siteverify"; const result = await fetch(url, { body: formData, - method: 'POST', + method: "POST", }); const outcome = await result.json(); @@ -123,27 +125,27 @@ async function handlePost(request) { ```js title="Example using idempotency functionality" // This is the demo secret key. In production, we recommend // you store your secret key(s) safely. -const SECRET_KEY = '1x0000000000000000000000000000000AA'; +const SECRET_KEY = "1x0000000000000000000000000000000AA"; async function handlePost(request) { const body = await request.formData(); // Turnstile injects a token in "cf-turnstile-response". - const token = body.get('cf-turnstile-response'); - const ip = request.headers.get('CF-Connecting-IP'); + const token = body.get("cf-turnstile-response"); + const ip = request.headers.get("CF-Connecting-IP"); // Validate the token by calling the // "/siteverify" API endpoint. let formData = new FormData(); - formData.append('secret', SECRET_KEY); - formData.append('response', token); - formData.append('remoteip', ip); + formData.append("secret", SECRET_KEY); + formData.append("response", token); + formData.append("remoteip", ip); const idempotencyKey = crypto.randomUUID(); - formData.append('idempotency_key', idempotencyKey); + formData.append("idempotency_key", idempotencyKey); - const url = 'https://challenges.cloudflare.com/turnstile/v0/siteverify'; + const url = "https://challenges.cloudflare.com/turnstile/v0/siteverify"; const firstResult = await fetch(url, { body: formData, - method: 'POST', + method: "POST", }); const firstOutcome = await firstResult.json(); if (firstOutcome.success) { @@ -155,14 +157,13 @@ async function handlePost(request) { // the associated idempotency key as well. const subsequentResult = await fetch(url, { body: formData, - method: 'POST', + method: "POST", }); const subsequentOutcome = await subsequentResult.json(); if (subsequentOutcome.success) { // ... } - } ``` @@ -203,8 +204,8 @@ async function handlePost(request) { // ... } ​ - // A subsequent validation request to the "/siteverify" - // API endpoint for the same token as before, providing + // A subsequent validation request to the "/siteverify" + // API endpoint for the same token as before, providing // the associated idempotency key as well. const subsequentResult = await fetch(url, { body: JSON.stringify({ @@ -240,10 +241,8 @@ async function handlePost(request) { :::note - The `remoteip` parameter helps to prevent abuse by ensuring the current visitor is the one who received the token. This is currently not strictly validated. - ::: The siteverify endpoint behaves similar to reCAPTCHA’s or hCaptcha's siteverify endpoint. @@ -255,22 +254,22 @@ It always contains a `success` property, either true or false, indicating whethe ```json title="Successful validation response" {2} { - "success": true, - "challenge_ts": "2022-02-28T15:14:30.096Z", - "hostname": "example.com", - "error-codes": [], - "action": "login", - "cdata": "sessionid-123456789" + "success": true, + "challenge_ts": "2022-02-28T15:14:30.096Z", + "hostname": "example.com", + "error-codes": [], + "action": "login", + "cdata": "sessionid-123456789" } ```
-* `challenge_ts` is the ISO timestamp for the time the challenge was solved. -* `hostname` is the hostname for which the challenge was served. -* `action` is the customer widget identifier passed to the widget on the client side. This is used to differentiate widgets using the same sitekey in analytics. Its integrity is protected by modifications from an attacker. It is recommended to validate that the action matches an expected value. -* `cdata` is the customer data passed to the widget on the client side. This can be used by the customer to convey state. It is integrity protected by modifications from an attacker. -* `error-codes` is a list of errors that occurred. +- `challenge_ts` is the ISO timestamp for the time the challenge was solved. +- `hostname` is the hostname for which the challenge was served. +- `action` is the customer widget identifier passed to the widget on the client side. This is used to differentiate widgets using the same sitekey in analytics. Its integrity is protected by modifications from an attacker. It is recommended to validate that the action matches an expected value. +- `cdata` is the customer data passed to the widget on the client side. This can be used by the customer to convey state. It is integrity protected by modifications from an attacker. +- `error-codes` is a list of errors that occurred. In case of a validation failure, the response should be similar to the following: @@ -278,10 +277,8 @@ In case of a validation failure, the response should be similar to the following ```json title="Failed validation response" {2} { - "success": false, - "error-codes": [ - "invalid-input-response" - ] + "success": false, + "error-codes": ["invalid-input-response"] } ``` diff --git a/src/content/docs/turnstile/get-started/terraform.mdx b/src/content/docs/turnstile/get-started/terraform.mdx index 519dcc8c9367a9..7a118e02c3a8da 100644 --- a/src/content/docs/turnstile/get-started/terraform.mdx +++ b/src/content/docs/turnstile/get-started/terraform.mdx @@ -6,7 +6,7 @@ sidebar: --- :::note[Requirements] -This guide assumes that you have the [Terraform](https://developer.hashicorp.com/terraform/tutorials/certification-associate-tutorials/install-cli) command installed on your machine. +This guide assumes that you have the [Terraform](https://developer.hashicorp.com/terraform/tutorials/certification-associate-tutorials/install-cli) command installed on your machine. ::: [Terraform](https://developer.hashicorp.com/terraform/tutorials/certification-associate-tutorials/install-cli) is a tool for building, changing, and versioning infrastructure, and provides components and documentation for building [Cloudflare resources](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs). Listed below are examples to help you get started with Turnstile using Terraform. For a more generalized guide on configuring Cloudflare and Terraform, visit our [Getting Started with Terraform and Cloudflare](https://blog.cloudflare.com/getting-started-with-terraform-and-cloudflare-part-1/) blog post. @@ -18,7 +18,7 @@ This guide assumes that you have the [Terraform](https://developer.hashicorp.com Create an [API Token](/fundamentals/api/get-started/create-token/) with the **Account > Turnstile > Edit** permission. Next, you need to export this secret in our environment variables: ```sh title="Export your token" -$ export CLOUDFLARE_API_TOKEN= +export CLOUDFLARE_API_TOKEN= ``` ### Create a Turnstile widget @@ -63,7 +63,7 @@ output "turnstile_example_secretkey" { :::note -The `id` field in the `cloudflare_turnstile_widget.example` resource is your Turnstile widget's sitekey. +The `id` field in the `cloudflare_turnstile_widget.example` resource is your Turnstile widget's sitekey. ::: ### Initialize Terraform and the Cloudflare provider @@ -71,8 +71,10 @@ The `id` field in the `cloudflare_turnstile_widget.example` resource is your Tur Run the command `terraform init` to set up your Terraform working directory, enabling it to interact with Cloudflare services. This process involves downloading the required provider plugins, establishing backend storage for your state files, and creating a local `.terraform` directory to store configuration data. ```sh title="Initialize command" -$ terraform init +terraform init +``` +```sh output Initializing the backend... Initializing provider plugins... @@ -100,7 +102,10 @@ commands will detect it and remind you to do so if necessary. You can run `terraform plan`, which will output any proposed changes. This will prompt you for your Cloudflare Account ID. Make sure to review the plan carefully. ```sh title="Review command" -$ terraform plan +terraform plan +``` + +```sh output var.account_id Your Cloudflare Account ID. @@ -140,7 +145,10 @@ Note: You didn't use the -out option to save this plan, so Terraform can't guara Once the changes look accurate and you are comfortable moving forward, apply them using the `terraform apply` command. ```sh title="Apply command" -$ terraform apply --auto-approve +terraform apply --auto-approve +``` + +```sh output var.account_id Your Cloudflare Account ID. @@ -187,13 +195,16 @@ You have successfully created a Turnstile widget. Go to the [Cloudflare dashboar Use `terraform output` to get your secret key. ```sh title="Secret key" -$ terraform output turnstile_example_secretkey +terraform output turnstile_example_secretkey +``` + +```sh output "0x4AAAAAAAEe4xWueFq9yX8ypjlimbk1Db4" ``` :::note -For advanced usage, refer to our [Terraform resource documentation](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/turnstile_widget). +For advanced usage, refer to our [Terraform resource documentation](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/turnstile_widget). ::: ## Import a Turnstile widget @@ -202,19 +213,19 @@ For advanced usage, refer to our [Terraform resource documentation](https://regi Before you can import the Turnstile widget in Terraform, you must: -* [Install the `cf-terraforming` tool](/terraform/advanced-topics/import-cloudflare-resources/#cf-terraforming). -* [Create a Cloudflare API token](/fundamentals/api/get-started/create-token/). -* [Initialize Terraform and the Cloudflare provider](#initialize-terraform-and-the-cloudflare-provider). +- [Install the `cf-terraforming` tool](/terraform/advanced-topics/import-cloudflare-resources/#cf-terraforming). +- [Create a Cloudflare API token](/fundamentals/api/get-started/create-token/). +- [Initialize Terraform and the Cloudflare provider](#initialize-terraform-and-the-cloudflare-provider). ### Import Turnstile widgets in Terraform Run the `cf-terraforming generate` command and add the output below to your `.tf` file. ```sh title="Generate command" -$ cf-terraforming generate --resource-type cloudflare_turnstile_widget --account 6be2041a37d48aaaa9c686434f1709f0 - -output: +cf-terraforming generate --resource-type cloudflare_turnstile_widget --account 6be2041a37d48aaaa9c686434f1709f0 +``` +```sh output resource "cloudflare_turnstile_widget" "terraform_managed_resource_0x4AAAAAAAEk5sP3rwf91fe8" { account_id = "6be2041a37d48aaaa9c686434f1709f0" domains = ["example.net"] @@ -241,11 +252,11 @@ resource "cloudflare_turnstile_widget" "terraform_managed_resource_0x4AAAAAAAF1z Run the `cf-terraforming import` command and the resulting commands below. ```sh title="Import command" -$ cf-terraforming import --resource-type cloudflare_turnstile_widget --account 6be2041a37d48aaaa9c686434f1709f0 - -output: +cf-terraforming import --resource-type cloudflare_turnstile_widget --account 6be2041a37d48aaaa9c686434f1709f0 +``` +```sh output terraform import cloudflare_turnstile_widget.terraform_managed_resource_0x4AAAAAAAEg5sP3rwf91fe8 6be2041a37d48aaaa9c686434f1709f0/0x4AAAAAAAEk5sP3rwf91fe8 terraform import cloudflare_turnstile_widget.terraform_managed_resource_0x4AAAAAAAE0gwg0H1StXlOx 6be2041a37d48aaaa9c686434f1709f0/0x4AAAAAAAE0wwg0H1StXlOx -terraform import cloudflare_turnstile_widget.terraform_managed_resource_0x4AAAAAAAE2z4LbxEka5UBh 6be2041a37d48aaaa9c686434f1709f0/0x4AAAAAAAF1z4LbxEka5UBh +terraform import cloudflare_turnstile_widget.terraform_managed_resource_0x4AAAAAAAE2z4LbxEka5UBh 6be2041a37d48aaaa9c686434f1709f0/0x4AAAAAAAF1z4LbxEka5UBh ``` diff --git a/src/content/docs/vectorize/best-practices/create-indexes.mdx b/src/content/docs/vectorize/best-practices/create-indexes.mdx index df9897cda25683..6d6ec81827d5fd 100644 --- a/src/content/docs/vectorize/best-practices/create-indexes.mdx +++ b/src/content/docs/vectorize/best-practices/create-indexes.mdx @@ -3,18 +3,17 @@ title: Create indexes pcx_content_type: concept sidebar: order: 3 - --- -import { Render } from "~/components" +import { Render } from "~/components"; Indexes are the "atom" of Vectorize. Vectors are inserted into an index and enable you to query the index for similar vectors for a given input vector. Creating an index requires three inputs: -* A name, for example `prod-search-index` or `recommendations-idx-dev`. -* The (fixed) [dimension size](#dimensions) of each vector, for example 384 or 1536. -* The (fixed) [distance metric](#distance-metrics) to use for calculating vector similarity. +- A name, for example `prod-search-index` or `recommendations-idx-dev`. +- The (fixed) [dimension size](#dimensions) of each vector, for example 384 or 1536. +- The (fixed) [distance metric](#distance-metrics) to use for calculating vector similarity. The configuration of an index cannot be changed after creation. @@ -33,13 +32,13 @@ Please use the 'wrangler vectorize --deprecated-v1' flag to create, get, list, d To create an index with `wrangler`: ```sh -$ npx wrangler vectorize create your-index-name --dimensions=NUM_DIMENSIONS --metric=SELECTED_METRIC +npx wrangler vectorize create your-index-name --dimensions=NUM_DIMENSIONS --metric=SELECTED_METRIC ``` To create an index that can accept vector embeddings from Worker's AI's [`@cf/baai/bge-base-en-v1.5`](/workers-ai/models/#text-embeddings) embedding model, which outputs vectors with 768 dimensions, use the following command: ```sh -$ npx wrangler vectorize create your-index-name --dimensions=768 --metric=cosine +npx wrangler vectorize create your-index-name --dimensions=768 --metric=cosine ``` ## Dimensions @@ -61,7 +60,7 @@ The following table highlights some example embeddings models and their output d :::note[Learn more about Workers AI] -Refer to the [Workers AI documentation](/workers-ai/models/#text-embeddings) to learn about its built-in embedding models. +Refer to the [Workers AI documentation](/workers-ai/models/#text-embeddings) to learn about its built-in embedding models. ::: ## Distance metrics @@ -72,8 +71,8 @@ Distance metrics are functions that determine how close vectors are from each ot | ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `cosine` | Distance is measured between `-1` (most dissimilar) to `1` (identical). `0` denotes an orthogonal vector. | | `euclidean` | Euclidean (L2) distance. `0` denotes identical vectors. The larger the positive number, the further the vectors are apart. | -| `dot-product` | Negative dot product. Larger negative values *or* smaller positive values denote more similar vectors. A score of `-1000` is more similar than `-500`, and a score of `15` more similar than `50`. | +| `dot-product` | Negative dot product. Larger negative values _or_ smaller positive values denote more similar vectors. A score of `-1000` is more similar than `-500`, and a score of `15` more similar than `50`. | -Determining the similarity between vectors can be subjective based on how the machine-learning model that represents features in the resulting vector embeddings. For example, a score of `0.8511` when using a `cosine` metric means that two vectors are close in distance, but whether data they represent is *similar* is a function of how well the model is able to represent the original content. +Determining the similarity between vectors can be subjective based on how the machine-learning model that represents features in the resulting vector embeddings. For example, a score of `0.8511` when using a `cosine` metric means that two vectors are close in distance, but whether data they represent is _similar_ is a function of how well the model is able to represent the original content. Distance metrics cannot be changed after index creation, and that each metric has a different scoring function. diff --git a/src/content/docs/vectorize/best-practices/insert-vectors.mdx b/src/content/docs/vectorize/best-practices/insert-vectors.mdx index 6c6a0d361aea80..9fb3b43e2a0202 100644 --- a/src/content/docs/vectorize/best-practices/insert-vectors.mdx +++ b/src/content/docs/vectorize/best-practices/insert-vectors.mdx @@ -3,10 +3,9 @@ title: Insert vectors pcx_content_type: concept sidebar: order: 4 - --- -import { Render } from "~/components" +import { Render } from "~/components"; Vectorize indexes allow you to insert vectors at any point: Vectorize will optimize the index behind the scenes to ensure that vector search remains efficient, even as new vectors are added or existing vectors updated. @@ -14,9 +13,9 @@ Vectorize indexes allow you to insert vectors at any point: Vectorize will optim Vectorize supports vectors in three formats: -* An array of floating point numbers (converted into a JavaScript `number[]` array). -* A [Float32Array](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Float32Array) -* A [Float64Array](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Float64Array) +- An array of floating point numbers (converted into a JavaScript `number[]` array). +- A [Float32Array](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Float32Array) +- A [Float64Array](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Float64Array) In most cases, a `number[]` array is the easiest when dealing with other APIs, and is the return type of most machine-learning APIs. @@ -28,9 +27,9 @@ Metadata keys cannot be empty, contain the dot character (`.`), contain the doub Metadata can be used to: -* Include the object storage key, database UUID or other identifier to look up the content the vector embedding represents. -* The raw content (up to the [metadata limits](/vectorize/platform/limits/)), which can allow you to skip additional lookups for smaller content. -* Dates, timestamps, or other metadata that describes when the vector embedding was generated or how it was generated. +- Include the object storage key, database UUID or other identifier to look up the content the vector embedding represents. +- The raw content (up to the [metadata limits](/vectorize/platform/limits/)), which can allow you to skip additional lookups for smaller content. +- Dates, timestamps, or other metadata that describes when the vector embedding was generated or how it was generated. For example, a vector embedding representing an image could include the path to the [R2 object](/r2/) it was generated from, the format, and a category lookup: @@ -55,21 +54,21 @@ To insert vectors with a namespace: // Vectors from a machine-learning model are typically ~100 to 1536 dimensions // wide (or wider still). const sampleVectors: Array = [ - { - id: "1", - values: [32.4, 74.1, 3.2], - namespace: "text", - }, - { - id: "2", - values: [15.1, 19.2, 15.8], - namespace: "images", - }, - { - id: "3", - values: [0.16, 1.2, 3.8], - namespace: "pdfs", - }, + { + id: "1", + values: [32.4, 74.1, 3.2], + namespace: "text", + }, + { + id: "2", + values: [15.1, 19.2, 15.8], + namespace: "images", + }, + { + id: "3", + values: [0.16, 1.2, 3.8], + namespace: "pdfs", + }, ]; // Insert your vectors, returning a count of the vectors inserted and their vector IDs. @@ -80,7 +79,9 @@ To query vectors within a namespace: ```ts // Your queryVector will be searched against vectors within the namespace (only) -let matches = await env.TUTORIAL_INDEX.query(queryVector, { namespace: "images" }) +let matches = await env.TUTORIAL_INDEX.query(queryVector, { + namespace: "images", +}); ``` ## Examples @@ -94,21 +95,21 @@ Use the `insert()` and `upsert()` methods available on an index from within a Cl // Vectors from a machine-learning model are typically ~100 to 1536 dimensions // wide (or wider still). const sampleVectors: Array = [ - { - id: "1", - values: [32.4, 74.1, 3.2], - metadata: { url: "/products/sku/13913913" }, - }, - { - id: "2", - values: [15.1, 19.2, 15.8], - metadata: { url: "/products/sku/10148191" }, - }, - { - id: "3", - values: [0.16, 1.2, 3.8], - metadata: { url: "/products/sku/97913813" }, - }, + { + id: "1", + values: [32.4, 74.1, 3.2], + metadata: { url: "/products/sku/13913913" }, + }, + { + id: "2", + values: [15.1, 19.2, 15.8], + metadata: { url: "/products/sku/10148191" }, + }, + { + id: "3", + values: [0.16, 1.2, 3.8], + metadata: { url: "/products/sku/97913813" }, + }, ]; // Insert your vectors, returning a count of the vectors inserted and their vector IDs. @@ -127,8 +128,8 @@ Please use a maximum of 5000 vectors per embeddings.ndjson file to prevent the g You can bulk upload vector embeddings directly: -* The file must be in newline-delimited JSON (NDJSON format): each complete vector must be newline separated, and not within an array or object. -* Vectors must be complete and include a unique string `id` per vector. +- The file must be in newline-delimited JSON (NDJSON format): each complete vector must be newline separated, and not within an array or object. +- Vectors must be complete and include a unique string `id` per vector. An example NDJSON formatted file: @@ -141,7 +142,7 @@ An example NDJSON formatted file: ```sh -$ wrangler vectorize insert --file=embeddings.ndjson +wrangler vectorize insert --file=embeddings.ndjson ``` ### HTTP API diff --git a/src/content/docs/vectorize/get-started/embeddings.mdx b/src/content/docs/vectorize/get-started/embeddings.mdx index 740baac76064ff..94f5ea673111a7 100644 --- a/src/content/docs/vectorize/get-started/embeddings.mdx +++ b/src/content/docs/vectorize/get-started/embeddings.mdx @@ -3,10 +3,9 @@ title: Vectorize and Workers AI pcx_content_type: get-started sidebar: order: 3 - --- -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; @@ -14,18 +13,16 @@ Vectorize allows you to generate [vector embeddings](/vectorize/reference/what-i :::note[New to Vectorize?] - If this is your first time using Vectorize or a vector database, start with the [Vectorize Get started guide](/vectorize/get-started/intro/). - ::: This guide will instruct you through: -* Creating a Vectorize index. -* Connecting a [Cloudflare Worker](/workers/) to your index. -* Using [Workers AI](/workers-ai/) to generate vector embeddings. -* Using Vectorize to query those vector embeddings. +- Creating a Vectorize index. +- Connecting a [Cloudflare Worker](/workers/) to your index. +- Using [Workers AI](/workers-ai/) to generate vector embeddings. +- Using Vectorize to query those vector embeddings. ## Prerequisites @@ -41,23 +38,33 @@ You will create a new project that will contain a Worker script, which will act Open your terminal and create a new project named `embeddings-tutorial` by running the following command: - - - + + + This will create a new `embeddings-tutorial` directory. Your new `embeddings-tutorial` directory will include: -* A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) at `src/index.ts`. -* A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. `wrangler.toml` is how your `embeddings-tutorial` Worker will access your index. +- A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) at `src/index.ts`. +- A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. `wrangler.toml` is how your `embeddings-tutorial` Worker will access your index. :::note - If you are familiar with Cloudflare Workers, or initializing projects in a Continuous Integration (CI) environment, initialize a new project non-interactively by setting `CI=true` as an environmental variable when running `create cloudflare@latest`. For example: `CI=true npm create cloudflare@latest embeddings-tutorial --type=simple --git --ts --deploy=false` will create a basic "Hello World" project ready to build on. - ::: ## 2. Create an index @@ -67,7 +74,7 @@ A vector database is distinct from a traditional SQL or NoSQL database. A vector To create your first Vectorize index, change into the directory you just created for your Workers project: ```sh -$ cd embeddings-tutorial +cd embeddings-tutorial ``` :::note[Using Vectorize v1?] @@ -78,9 +85,9 @@ Please use the 'wrangler vectorize --deprecated-v1' flag to create, get, list, d To create an index, use the `wrangler vectorize create` command and provide a name for the index. A good index name is: -* A combination of lowercase and/or numeric ASCII characters, shorter than 32 characters, starts with a letter, and uses dashes (-) instead of spaces. -* Descriptive of the use-case and environment. For example, "production-doc-search" or "dev-recommendation-engine". -* Only used for describing the index, and is not directly referenced in code. +- A combination of lowercase and/or numeric ASCII characters, shorter than 32 characters, starts with a letter, and uses dashes (-) instead of spaces. +- Descriptive of the use-case and environment. For example, "production-doc-search" or "dev-recommendation-engine". +- Only used for describing the index, and is not directly referenced in code. In addition, define both the `dimensions` of the vectors you will store in the index, as well as the distance `metric` used to determine similar vectors when creating the index. **This configuration cannot be changed later**, as a vector database is configured for a fixed vector configuration. @@ -89,8 +96,10 @@ In addition, define both the `dimensions` of the vectors you will store in the i Run the following `wrangler vectorize` command, ensuring that the `dimensions` are set to `768`: this is important, as the Workers AI model used in this tutorial outputs vectors with 768 dimensions. ```sh -$ npx wrangler vectorize create embeddings-index --dimensions=768 --metric=cosine +npx wrangler vectorize create embeddings-index --dimensions=768 --metric=cosine +``` +```sh output ✅ Successfully created index 'embeddings-index' [[vectorize]] @@ -114,9 +123,9 @@ index_name = "embeddings-index" Specifically: -* The value (string) you set for `` will be used to reference this database in your Worker. In this tutorial, name your binding `VECTORIZE_INDEX`. -* The binding must be [a valid JavaScript variable name](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Grammar_and_types#variables). For example, `binding = "MY_INDEX"` or `binding = "PROD_SEARCH_INDEX"` would both be valid names for the binding. -* Your binding is available in your Worker at `env.` and the Vectorize [client API](/vectorize/reference/client-api/) is exposed on this binding for use within your Workers application. +- The value (string) you set for `` will be used to reference this database in your Worker. In this tutorial, name your binding `VECTORIZE_INDEX`. +- The binding must be [a valid JavaScript variable name](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Grammar_and_types#variables). For example, `binding = "MY_INDEX"` or `binding = "PROD_SEARCH_INDEX"` would both be valid names for the binding. +- Your binding is available in your Worker at `env.` and the Vectorize [client API](/vectorize/reference/client-api/) is exposed on this binding for use within your Workers application. ## 4. Set up Workers AI @@ -154,19 +163,26 @@ interface EmbeddingResponse { export default { async fetch(request, env, ctx): Promise { let path = new URL(request.url).pathname; - if (path.startsWith('/favicon')) { - return new Response('', { status: 404 }); + if (path.startsWith("/favicon")) { + return new Response("", { status: 404 }); } // You only need to generate vector embeddings once (or as // data changes), not on every request - if (path === '/insert') { + if (path === "/insert") { // In a real-world application, you could read content from R2 or // a SQL database (like D1) and pass it to Workers AI - const stories = ['This is a story about an orange cloud', 'This is a story about a llama', 'This is a story about a hugging emoji']; - const modelResp: EmbeddingResponse = await env.AI.run('@cf/baai/bge-base-en-v1.5', { - text: stories, - }); + const stories = [ + "This is a story about an orange cloud", + "This is a story about a llama", + "This is a story about a hugging emoji", + ]; + const modelResp: EmbeddingResponse = await env.AI.run( + "@cf/baai/bge-base-en-v1.5", + { + text: stories, + }, + ); // Convert the vector embeddings into a format Vectorize can accept. // Each vector needs an ID, a value (the vector) and optional metadata. @@ -184,12 +200,17 @@ export default { } // Your query: expect this to match vector ID. 1 in this example - let userQuery = 'orange cloud'; - const queryVector: EmbeddingResponse = await env.AI.run('@cf/baai/bge-base-en-v1.5', { - text: [userQuery], + let userQuery = "orange cloud"; + const queryVector: EmbeddingResponse = await env.AI.run( + "@cf/baai/bge-base-en-v1.5", + { + text: [userQuery], + }, + ); + + let matches = await env.VECTORIZE_INDEX.query(queryVector.data[0], { + topK: 1, }); - - let matches = await env.VECTORIZE_INDEX.query(queryVector.data[0], { topK: 1 }); return Response.json({ // Expect a vector ID. 1 to be your top match with a score of // ~0.896888444 @@ -206,7 +227,7 @@ export default { Before deploying your Worker globally, log in with your Cloudflare account by running: ```sh -$ npx wrangler login +npx wrangler login ``` You will be directed to a web page asking you to log in to the Cloudflare dashboard. After you have logged in, you will be asked if Wrangler can make changes to your Cloudflare account. Scroll down and select **Allow** to continue. @@ -214,7 +235,7 @@ You will be directed to a web page asking you to log in to the Cloudflare dashbo From here, deploy your Worker to make your project accessible on the Internet. To deploy your Worker, run: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` Preview your Worker at `https://embeddings-tutorial..workers.dev`. @@ -236,14 +257,14 @@ This should return the following JSON: Extend this example by: -* Adding more inputs and generating a larger set of vectors. -* Accepting a custom query parameter passed in the URL, for example via `URL.searchParams`. -* Creating a new index with a different [distance metric](/vectorize/best-practices/create-indexes/#distance-metrics) and observing how your scores change in response to your inputs. +- Adding more inputs and generating a larger set of vectors. +- Accepting a custom query parameter passed in the URL, for example via `URL.searchParams`. +- Creating a new index with a different [distance metric](/vectorize/best-practices/create-indexes/#distance-metrics) and observing how your scores change in response to your inputs. By finishing this tutorial, you have successfully created a Vectorize index, used Workers AI to generate vector embeddings, and deployed your project globally. ## Next steps -* Build a [generative AI chatbot](/workers-ai/tutorials/build-a-retrieval-augmented-generation-ai/) using Workers AI and Vectorize. -* Learn more about [how vector databases work](/vectorize/reference/what-is-a-vector-database/). -* Read [examples](/vectorize/reference/client-api/) on how to use the Vectorize API from Cloudflare Workers. +- Build a [generative AI chatbot](/workers-ai/tutorials/build-a-retrieval-augmented-generation-ai/) using Workers AI and Vectorize. +- Learn more about [how vector databases work](/vectorize/reference/what-is-a-vector-database/). +- Read [examples](/vectorize/reference/client-api/) on how to use the Vectorize API from Cloudflare Workers. diff --git a/src/content/docs/vectorize/get-started/intro.mdx b/src/content/docs/vectorize/get-started/intro.mdx index 4cf3627cdf8584..47699fd167a43c 100644 --- a/src/content/docs/vectorize/get-started/intro.mdx +++ b/src/content/docs/vectorize/get-started/intro.mdx @@ -3,10 +3,9 @@ title: Introduction to Vectorize pcx_content_type: get-started sidebar: order: 2 - --- -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; @@ -14,9 +13,9 @@ Vectorize is Cloudflare's vector database. Vector databases allow you to use mac This guide will instruct you through: -* Creating your first Vectorize index. -* Connecting a [Cloudflare Worker](/workers/) to your index. -* Inserting and performing a similarity search by querying your index. +- Creating your first Vectorize index. +- Connecting a [Cloudflare Worker](/workers/) to your index. +- Inserting and performing a similarity search by querying your index. ## Prerequisites @@ -36,33 +35,41 @@ To continue, you will need: :::note[New to Workers?] - Refer to [How Workers works](/workers/reference/how-workers-works/) to learn about the Workers serverless execution model works. Go to the [Workers Get started guide](/workers/get-started/guide/) to set up your first Worker. - ::: You will create a new project that will contain a Worker, which will act as the client application for your Vectorize index. Create a new project named `vectorize-tutorial` by running: - - - + + + This will create a new `vectorize-tutorial` directory. Your new `vectorize-tutorial` directory will include: -* A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) at `src/index.ts`. -* A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. `wrangler.toml` is how your `vectorize-tutorial` Worker will access your index. +- A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) at `src/index.ts`. +- A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. `wrangler.toml` is how your `vectorize-tutorial` Worker will access your index. :::note - If you are familiar with Cloudflare Workers, or initializing projects in a Continuous Integration (CI) environment, initialize a new project non-interactively by setting `CI=true` as an environmental variable when running `create cloudflare@latest`. For example: `CI=true npm create cloudflare@latest vectorize-tutorial --type=simple --git --ts --deploy=false` will create a basic "Hello World" project ready to build on. - ::: ## 2. Create an index @@ -72,7 +79,7 @@ A vector database is distinct from a traditional SQL or NoSQL database. A vector To create your first Vectorize index, change into the directory you just created for your Workers project: ```sh -$ cd vectorize-tutorial +cd vectorize-tutorial ``` :::note[Using Vectorize v1?] @@ -83,9 +90,9 @@ Please use the 'wrangler vectorize --deprecated-v1' flag to create, get, list, d To create an index, you will need to use the `wrangler vectorize create` command and provide a name for the index. A good index name is: -* A combination of lowercase and/or numeric ASCII characters, shorter than 32 characters, starts with a letter, and uses dashes (-) instead of spaces. -* Descriptive of the use-case and environment. For example, "production-doc-search" or "dev-recommendation-engine". -* Only used for describing the index, and is not directly referenced in code. +- A combination of lowercase and/or numeric ASCII characters, shorter than 32 characters, starts with a letter, and uses dashes (-) instead of spaces. +- Descriptive of the use-case and environment. For example, "production-doc-search" or "dev-recommendation-engine". +- Only used for describing the index, and is not directly referenced in code. In addition, you will need to define both the `dimensions` of the vectors you will store in the index, as well as the distance `metric` used to determine similar vectors when creating the index. A `metric` can be Euclidean, cosine, or dot product. **This configuration cannot be changed later**, as a vector database is configured for a fixed vector configuration. @@ -94,8 +101,10 @@ In addition, you will need to define both the `dimensions` of the vectors you wi Run the following `wrangler vectorize` command: ```sh -$ npx wrangler vectorize create tutorial-index --dimensions=3 --metric=cosine +npx wrangler vectorize create tutorial-index --dimensions=3 --metric=cosine +``` +```sh output ✅ Successfully created index 'tutorial-index' [[vectorize]] @@ -119,9 +128,9 @@ index_name = "tutorial-index" Specifically: -* The value (string) you set for `` will be used to reference this database in your Worker. In this tutorial, name your binding `VECTORIZE_INDEX`. -* The binding must be [a valid JavaScript variable name](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Grammar_and_types#variables). For example, `binding = "MY_INDEX"` or `binding = "PROD_SEARCH_INDEX"` would both be valid names for the binding. -* Your binding is available in your Worker at `env.` and the Vectorize [client API](/vectorize/reference/client-api/) is exposed on this binding for use within your Workers application. +- The value (string) you set for `` will be used to reference this database in your Worker. In this tutorial, name your binding `VECTORIZE_INDEX`. +- The binding must be [a valid JavaScript variable name](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Grammar_and_types#variables). For example, `binding = "MY_INDEX"` or `binding = "PROD_SEARCH_INDEX"` would both be valid names for the binding. +- Your binding is available in your Worker at `env.` and the Vectorize [client API](/vectorize/reference/client-api/) is exposed on this binding for use within your Workers application. ## 4. Insert vectors @@ -143,18 +152,38 @@ export interface Env { // Vectors from a machine-learning model are typically ~100 to 1536 dimensions // wide (or wider still). const sampleVectors: Array = [ - { id: '1', values: [32.4, 74.1, 3.2], metadata: { url: '/products/sku/13913913' } }, - { id: '2', values: [15.1, 19.2, 15.8], metadata: { url: '/products/sku/10148191' } }, - { id: '3', values: [0.16, 1.2, 3.8], metadata: { url: '/products/sku/97913813' } }, - { id: '4', values: [75.1, 67.1, 29.9], metadata: { url: '/products/sku/418313' } }, - { id: '5', values: [58.8, 6.7, 3.4], metadata: { url: '/products/sku/55519183' } }, + { + id: "1", + values: [32.4, 74.1, 3.2], + metadata: { url: "/products/sku/13913913" }, + }, + { + id: "2", + values: [15.1, 19.2, 15.8], + metadata: { url: "/products/sku/10148191" }, + }, + { + id: "3", + values: [0.16, 1.2, 3.8], + metadata: { url: "/products/sku/97913813" }, + }, + { + id: "4", + values: [75.1, 67.1, 29.9], + metadata: { url: "/products/sku/418313" }, + }, + { + id: "5", + values: [58.8, 6.7, 3.4], + metadata: { url: "/products/sku/55519183" }, + }, ]; export default { async fetch(request, env, ctx): Promise { let path = new URL(request.url).pathname; if (path.startsWith("/favicon")) { - return new Response('', { status: 404 }); + return new Response("", { status: 404 }); } // You only need to insert vectors into your index once @@ -168,8 +197,8 @@ export default { return Response.json(inserted); } - return Response.json({text: "nothing to do... yet"}, { status: 404 }) - } + return Response.json({ text: "nothing to do... yet" }, { status: 404 }); + }, } satisfies ExportedHandler; ``` @@ -201,18 +230,38 @@ export interface Env { // Vectors from a machine-learning model are typically ~100 to 1536 dimensions // wide (or wider still). const sampleVectors: Array = [ - { id: '1', values: [32.4, 74.1, 3.2], metadata: { url: '/products/sku/13913913' } }, - { id: '2', values: [15.1, 19.2, 15.8], metadata: { url: '/products/sku/10148191' } }, - { id: '3', values: [0.16, 1.2, 3.8], metadata: { url: '/products/sku/97913813' } }, - { id: '4', values: [75.1, 67.1, 29.9], metadata: { url: '/products/sku/418313' } }, - { id: '5', values: [58.8, 6.7, 3.4], metadata: { url: '/products/sku/55519183' } }, + { + id: "1", + values: [32.4, 74.1, 3.2], + metadata: { url: "/products/sku/13913913" }, + }, + { + id: "2", + values: [15.1, 19.2, 15.8], + metadata: { url: "/products/sku/10148191" }, + }, + { + id: "3", + values: [0.16, 1.2, 3.8], + metadata: { url: "/products/sku/97913813" }, + }, + { + id: "4", + values: [75.1, 67.1, 29.9], + metadata: { url: "/products/sku/418313" }, + }, + { + id: "5", + values: [58.8, 6.7, 3.4], + metadata: { url: "/products/sku/55519183" }, + }, ]; export default { async fetch(request, env, ctx): Promise { let path = new URL(request.url).pathname; if (path.startsWith("/favicon")) { - return new Response('', { status: 404 }); + return new Response("", { status: 404 }); } // You only need to insert vectors into your index once @@ -241,7 +290,11 @@ export default { // By default, vector values are not returned, as in many cases the // vector id and scores are sufficient to map the vector back to the // original content it represents. - const matches = await env.VECTORIZE_INDEX.query(queryVector, { topK: 3, returnValues: true, returnMetadata: true }); + const matches = await env.VECTORIZE_INDEX.query(queryVector, { + topK: 3, + returnValues: true, + returnMetadata: true, + }); return Response.json({ // This will return the closest vectors: you will notice that the vector @@ -259,7 +312,7 @@ export default { Before deploying your Worker globally, log in with your Cloudflare account by running: ```sh -$ npx wrangler login +npx wrangler login ``` You will be directed to a web page asking you to log in to the Cloudflare dashboard. After you have logged in, you will be asked if Wrangler can make changes to your Cloudflare account. Scroll down and select **Allow** to continue. @@ -267,7 +320,7 @@ You will be directed to a web page asking you to log in to the Cloudflare dashbo From here, you can deploy your Worker to make your project accessible on the Internet. To deploy your Worker, run: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` Once deployed, preview your Worker at `https://vectorize-tutorial..workers.dev`. @@ -292,37 +345,36 @@ You will notice that `id: 5` has a `score` of `0.999909486`. Because you are usi ```json // https://vectorize-tutorial..workers.dev/ { - "matches": { - "count": 3, - "matches": [ - { - "id": "5", - "score": 0.999909486, - "values": [58.79999923706055, 6.699999809265137, 3.4000000953674316], - "metadata": { - "url": "/products/sku/55519183" - } - }, - { - "id": "4", - "score": 0.789848214, - "values": [75.0999984741211, 67.0999984741211, 29.899999618530273], - "metadata": { - "url": "/products/sku/418313" - } - }, - { - "id": "2", - "score": 0.611976262, - "values": [15.100000381469727, 19.200000762939453, 15.800000190734863], - "metadata": { - "url": "/products/sku/10148191" - } - } - ] - } + "matches": { + "count": 3, + "matches": [ + { + "id": "5", + "score": 0.999909486, + "values": [58.79999923706055, 6.699999809265137, 3.4000000953674316], + "metadata": { + "url": "/products/sku/55519183" + } + }, + { + "id": "4", + "score": 0.789848214, + "values": [75.0999984741211, 67.0999984741211, 29.899999618530273], + "metadata": { + "url": "/products/sku/418313" + } + }, + { + "id": "2", + "score": 0.611976262, + "values": [15.100000381469727, 19.200000762939453, 15.800000190734863], + "metadata": { + "url": "/products/sku/10148191" + } + } + ] + } } - ``` From here, experiment by passing a different `queryVector` and observe the results: the matches and the `score` should change based on the change in distance between the query vector and the vectors in our index. @@ -333,8 +385,8 @@ By finishing this tutorial, you have successfully created and queried your first ## Related resources -* [Build an end-to-end vector search application](/vectorize/get-started/embeddings/) using Workers AI and Vectorize. -* Learn more about [how vector databases work](/vectorize/reference/what-is-a-vector-database/). -* Read [examples](/vectorize/reference/client-api/) on how to use the Vectorize API from Cloudflare Workers. -* [Euclidean Distance vs Cosine Similarity](https://www.baeldung.com/cs/euclidean-distance-vs-cosine-similarity). -* [Dot product](https://en.wikipedia.org/wiki/Dot_product). +- [Build an end-to-end vector search application](/vectorize/get-started/embeddings/) using Workers AI and Vectorize. +- Learn more about [how vector databases work](/vectorize/reference/what-is-a-vector-database/). +- Read [examples](/vectorize/reference/client-api/) on how to use the Vectorize API from Cloudflare Workers. +- [Euclidean Distance vs Cosine Similarity](https://www.baeldung.com/cs/euclidean-distance-vs-cosine-similarity). +- [Dot product](https://en.wikipedia.org/wiki/Dot_product). diff --git a/src/content/docs/vectorize/reference/client-api.mdx b/src/content/docs/vectorize/reference/client-api.mdx index 5ce3dc04739ebf..03d664813cd450 100644 --- a/src/content/docs/vectorize/reference/client-api.mdx +++ b/src/content/docs/vectorize/reference/client-api.mdx @@ -3,10 +3,9 @@ title: Vectorize API pcx_content_type: concept sidebar: order: 2 - --- -import { Render } from "~/components" +import { Render } from "~/components"; This page covers the Vectorize API available within [Cloudflare Workers](/workers/), including usage examples. @@ -16,10 +15,10 @@ This page covers the Vectorize API available within [Cloudflare Workers](/worker ```ts let vectorsToInsert = [ - {id: "123", values: [32.4, 6.5, 11.2, 10.3, 87.9]}, - {id: "456", values: [2.5, 7.8, 9.1, 76.9, 8.5]}, -] -let inserted = await env.YOUR_INDEX.insert(vectorsToInsert) + { id: "123", values: [32.4, 6.5, 11.2, 10.3, 87.9] }, + { id: "456", values: [2.5, 7.8, 9.1, 76.9, 8.5] }, +]; +let inserted = await env.YOUR_INDEX.insert(vectorsToInsert); ``` Inserts vectors into the index. Returns the count of vectors inserted and their IDs. @@ -32,11 +31,11 @@ If you need to update existing vectors, use the [upsert](#upsert-vectors) operat ```ts let vectorsToUpsert = [ - {id: "123", values: [32.4, 6.5, 11.2, 10.3, 87.9]}, - {id: "456", values: [2.5, 7.8, 9.1, 76.9, 8.5]}, - {id: "768", values: [29.1, 5.7, 12.9, 15.4, 1.1]} -] -let upserted = await env.YOUR_INDEX.upsert(vectorsToUpsert) + { id: "123", values: [32.4, 6.5, 11.2, 10.3, 87.9] }, + { id: "456", values: [2.5, 7.8, 9.1, 76.9, 8.5] }, + { id: "768", values: [29.1, 5.7, 12.9, 15.4, 1.1] }, +]; +let upserted = await env.YOUR_INDEX.upsert(vectorsToUpsert); ``` Upserts vectors into an index. Returns the count of vectors upserted and their IDs. @@ -48,18 +47,22 @@ Upserting does not merge or combine the values or metadata of an existing vector ### Query vectors ```ts -let queryVector = [32.4, 6.55, 11.2, 10.3, 87.9] -let matches = await env.YOUR_INDEX.query(queryVector) +let queryVector = [32.4, 6.55, 11.2, 10.3, 87.9]; +let matches = await env.YOUR_INDEX.query(queryVector); ``` Query an index with the provided vector, returning the score(s) of the closest vectors based on the configured distance metric. -* Configure the number of returned matches by setting `topK` (default: 3) -* Return vector values by setting `returnValues: true` (default: false) -* Return vector metadata by setting `returnMetadata: true` (default: false) +- Configure the number of returned matches by setting `topK` (default: 3) +- Return vector values by setting `returnValues: true` (default: false) +- Return vector metadata by setting `returnMetadata: true` (default: false) ```ts -let matches = await env.YOUR_INDEX.query(queryVector, { topK: 5, returnValues: true, returnMetadata: true }) +let matches = await env.YOUR_INDEX.query(queryVector, { + topK: 5, + returnValues: true, + returnMetadata: true, +}); ``` ### Get vectors by ID @@ -97,9 +100,7 @@ Enable metadata filtering on the specified property. Limited to 10 properties. Run the following `wrangler vectorize` command: ```sh -$ wrangler vectorize create-metadata-index --property-name='some-prop' --type='string' - - +wrangler vectorize create-metadata-index --property-name='some-prop' --type='string' ``` ### Delete Metadata Index @@ -111,8 +112,7 @@ Allow Vectorize to delete the specified metadata index. Run the following `wrangler vectorize` command: ```sh -$ wrangler vectorize delete-metadata-index --property-name='some-prop' - +wrangler vectorize delete-metadata-index --property-name='some-prop' ``` ### List Metadata Indexes @@ -124,8 +124,7 @@ List metadata properties on which metadata filtering is enabled. Run the following `wrangler vectorize` command: ```sh -$ wrangler vectorize list-metadata-index - +wrangler vectorize list-metadata-index ``` ### Get Index Info @@ -137,30 +136,28 @@ Get additional details about the index. Run the following `wrangler vectorize` command: ```sh -$ wrangler vectorize info - +wrangler vectorize info ``` - ## Vectors A vector represents the vector embedding output from a machine learning model. -* `id` - a unique `string` identifying the vector in the index. This should map back to the ID of the document, object or database identifier that the vector values were generated from. -* `namespace` - an optional partition key within a index. Operations are performed per-namespace, so this can be used to create isolated segments within a larger index. -* `values` - an array of `number`, `Float32Array`, or `Float64Array` as the vector embedding itself. This must be a dense array, and the length of this array must match the `dimensions` configured on the index. -* `metadata` - an optional set of key-value pairs that can be used to store additional metadata alongside a vector. +- `id` - a unique `string` identifying the vector in the index. This should map back to the ID of the document, object or database identifier that the vector values were generated from. +- `namespace` - an optional partition key within a index. Operations are performed per-namespace, so this can be used to create isolated segments within a larger index. +- `values` - an array of `number`, `Float32Array`, or `Float64Array` as the vector embedding itself. This must be a dense array, and the length of this array must match the `dimensions` configured on the index. +- `metadata` - an optional set of key-value pairs that can be used to store additional metadata alongside a vector. ```ts let vectorExample = { - id: "12345", - values: [32.4, 6.55, 11.2, 10.3, 87.9], - metadata: { - "key": "value", - "hello": "world", - "url": "r2://bucket/some/object.json" - } -} + id: "12345", + values: [32.4, 6.55, 11.2, 10.3, 87.9], + metadata: { + key: "value", + hello: "world", + url: "r2://bucket/some/object.json", + }, +}; ``` ## Binding to a Worker diff --git a/src/content/docs/vectorize/reference/metadata-filtering.mdx b/src/content/docs/vectorize/reference/metadata-filtering.mdx index a16c9c657293f0..4639192e30e17f 100644 --- a/src/content/docs/vectorize/reference/metadata-filtering.mdx +++ b/src/content/docs/vectorize/reference/metadata-filtering.mdx @@ -3,7 +3,6 @@ title: Metadata filtering pcx_content_type: concept sidebar: order: 6 - --- # Metadata Filtering @@ -15,7 +14,7 @@ Vectorize v2 requires to define metadata properties that will support filtering. ::: -In addition to providing an input vector to your query, you can also filter by [vector metadata](/vectorize/best-practices/insert-vectors/#metadata) associated with every vector. Query results only include vectors that match `filter` criteria, meaning that `filter` is applied first, and `topK` results are taken from the filtered set. +In addition to providing an input vector to your query, you can also filter by [vector metadata](/vectorize/best-practices/insert-vectors/#metadata) associated with every vector. Query results only include vectors that match `filter` criteria, meaning that `filter` is applied first, and `topK` results are taken from the filtered set. By using metadata filtering to limit the scope of a query, you can filter by specific customer IDs, tenant, product category or any other metadata you associate with your vectors. @@ -28,16 +27,16 @@ Optional `filter` property on `query()` method specifies metadata filter: | `$eq` | Equals | | `$ne` | Not equals | -* `filter` must be non-empty object whose compact JSON representation must be less than 2048 bytes. -* `filter` object keys cannot be empty, contain `" | .` (dot is reserved for nesting), start with `$`, or be longer than 512 characters. -* `filter` object non-nested values can be `string`, `number`, `boolean`, or `null` values. +- `filter` must be non-empty object whose compact JSON representation must be less than 2048 bytes. +- `filter` object keys cannot be empty, contain `" | .` (dot is reserved for nesting), start with `$`, or be longer than 512 characters. +- `filter` object non-nested values can be `string`, `number`, `boolean`, or `null` values. ### Namespace versus metadata filtering -Both [namespaces](/vectorize/best-practices/insert-vectors/#namespaces) and metadata filtering narrow the vector search space for a query. Consider the following when evaluating both filter types: +Both [namespaces](/vectorize/best-practices/insert-vectors/#namespaces) and metadata filtering narrow the vector search space for a query. Consider the following when evaluating both filter types: -* A namespace filter is applied before metadata filter(s). -* A vector can only be part of a single namespace with the documented [limits](/vectorize/platform/limits/). Vector metadata can contain multiple key-value pairs up to [metadata per vector limits](/vectorize/platform/limits/). Metadata values support different types (`string`, `boolean`, and others), therefore offering more flexibility. +- A namespace filter is applied before metadata filter(s). +- A vector can only be part of a single namespace with the documented [limits](/vectorize/platform/limits/). Vector metadata can contain multiple key-value pairs up to [metadata per vector limits](/vectorize/platform/limits/). Metadata values support different types (`string`, `boolean`, and others), therefore offering more flexibility. ### Valid `filter` examples @@ -62,7 +61,8 @@ Both [namespaces](/vectorize/best-practices/insert-vectors/#namespaces) and meta #### Keys define nesting with `.` (dot) ```json -{ "pandas.nice": 42 } // looks for { "pandas": { "nice": 42 } } +{ "pandas.nice": 42 } + // looks for { "pandas": { "nice": 42 } } ``` ## Examples @@ -78,37 +78,38 @@ Please use the 'wrangler vectorize --deprecated-v1' flag to create, get, list, d With the following index definition: ```sh -$ npx wrangler vectorize create tutorial-index --dimensions=3 --metric=cosine +npx wrangler vectorize create tutorial-index --dimensions=3 --metric=cosine ``` Metadata can be added when [inserting or upserting vectors](/vectorize/best-practices/insert-vectors/#examples). ```ts const newMetadataVectors: Array = [ - { - id: "1", - values: [32.4, 74.1, 3.2], - metadata: { url: "/products/sku/13913913", streaming_platform: "netflix" } - }, - { - id: "2", - values: [15.1, 19.2, 15.8], - metadata: { url: "/products/sku/10148191", streaming_platform: "hbo" } - }, - { - id: "3", - values: [0.16, 1.2, 3.8], - metadata: { url: "/products/sku/97913813", streaming_platform: "amazon" } - }, - { id: "4", - values: [75.1, 67.1, 29.9], - metadata: { url: "/products/sku/418313", streaming_platform: "netflix" } - }, - { - id: "5", - values: [58.8, 6.7, 3.4], - metadata: { url: "/products/sku/55519183", streaming_platform: "hbo" } - }, + { + id: "1", + values: [32.4, 74.1, 3.2], + metadata: { url: "/products/sku/13913913", streaming_platform: "netflix" }, + }, + { + id: "2", + values: [15.1, 19.2, 15.8], + metadata: { url: "/products/sku/10148191", streaming_platform: "hbo" }, + }, + { + id: "3", + values: [0.16, 1.2, 3.8], + metadata: { url: "/products/sku/97913813", streaming_platform: "amazon" }, + }, + { + id: "4", + values: [75.1, 67.1, 29.9], + metadata: { url: "/products/sku/418313", streaming_platform: "netflix" }, + }, + { + id: "5", + values: [58.8, 6.7, 3.4], + metadata: { url: "/products/sku/55519183", streaming_platform: "hbo" }, + }, ]; // Upsert vectors with added metadata, returning a count of the vectors upserted and their vector IDs @@ -122,42 +123,46 @@ Use the `query()` method: ```ts let queryVector: Array = [54.8, 5.5, 3.1]; // Best match is vector id = 5 (score closet to 1) -let originalMatches = await env.YOUR_INDEX.query(queryVector, { topK: 3, returnValues: true, returnMetadata: true }); +let originalMatches = await env.YOUR_INDEX.query(queryVector, { + topK: 3, + returnValues: true, + returnMetadata: true, +}); ``` Results without metadata filtering: ```json { - "matches": [ - { - "id": "5", - "score": 0.999909486, - "values": [58.79999923706055, 6.699999809265137, 3.4000000953674316], - "metadata": { - "url": "/products/sku/55519183", - "streaming_platform": "hbo" - } - }, - { - "id": "4", - "score": 0.789848214, - "values": [75.0999984741211, 67.0999984741211, 29.899999618530273], - "metadata": { - "url": "/products/sku/418313", - "streaming_platform": "netflix" - } - }, - { - "id": "2", - "score": 0.611976262, - "values": [15.100000381469727, 19.200000762939453, 15.800000190734863], - "metadata": { - "url": "/products/sku/10148191", - "streaming_platform": "hbo" - } - } - ] + "matches": [ + { + "id": "5", + "score": 0.999909486, + "values": [58.79999923706055, 6.699999809265137, 3.4000000953674316], + "metadata": { + "url": "/products/sku/55519183", + "streaming_platform": "hbo" + } + }, + { + "id": "4", + "score": 0.789848214, + "values": [75.0999984741211, 67.0999984741211, 29.899999618530273], + "metadata": { + "url": "/products/sku/418313", + "streaming_platform": "netflix" + } + }, + { + "id": "2", + "score": 0.611976262, + "values": [15.100000381469727, 19.200000762939453, 15.800000190734863], + "metadata": { + "url": "/products/sku/10148191", + "streaming_platform": "hbo" + } + } + ] } ``` @@ -166,36 +171,41 @@ The same `query()` method with a `filter` property supports metadata filtering. ```ts let queryVector: Array = [54.8, 5.5, 3.1]; // Best match is vector id = 4 with metadata filter -let metadataMatches = await env.YOUR_INDEX.query(queryVector, { topK: 3, filter: { streaming_platform: "netflix" }, returnValues: true, returnMetadata: true } ) +let metadataMatches = await env.YOUR_INDEX.query(queryVector, { + topK: 3, + filter: { streaming_platform: "netflix" }, + returnValues: true, + returnMetadata: true, +}); ``` Results with metadata filtering: ```json { - "matches": [ - { - "id": "4", - "score": 0.789848214, - "values": [ 75.0999984741211, 67.0999984741211, 29.899999618530273], - "metadata": { - "url": "/products/sku/418313", - "streaming_platform": "netflix" - } - }, - { - "id": "1", - "score": 0.491185264, - "values": [32.400001525878906, 74.0999984741211, 3.200000047683716], - "metadata": { - "url": "/products/sku/13913913", - "streaming_platform": "netflix" - } - } - ] + "matches": [ + { + "id": "4", + "score": 0.789848214, + "values": [75.0999984741211, 67.0999984741211, 29.899999618530273], + "metadata": { + "url": "/products/sku/418313", + "streaming_platform": "netflix" + } + }, + { + "id": "1", + "score": 0.491185264, + "values": [32.400001525878906, 74.0999984741211, 3.200000047683716], + "metadata": { + "url": "/products/sku/13913913", + "streaming_platform": "netflix" + } + } + ] } ``` ## Limitations -* Only newly created indexes on or after 2023-12-06 support metadata filtering. Previously created indexes cannot be migrated to support metadata filtering. +- Only newly created indexes on or after 2023-12-06 support metadata filtering. Previously created indexes cannot be migrated to support metadata filtering. diff --git a/src/content/docs/waf/managed-rules/payload-logging/command-line/decrypt-payload.mdx b/src/content/docs/waf/managed-rules/payload-logging/command-line/decrypt-payload.mdx index e82a47255d66cc..c6bb3b6e2a91cc 100644 --- a/src/content/docs/waf/managed-rules/payload-logging/command-line/decrypt-payload.mdx +++ b/src/content/docs/waf/managed-rules/payload-logging/command-line/decrypt-payload.mdx @@ -7,7 +7,6 @@ sidebar: head: - tag: title content: Decrypt the payload content in the command line - --- Use the `matched-data-cli` tool to decrypt a payload in the command line. @@ -19,15 +18,15 @@ Use the `matched-data-cli` tool to decrypt a payload in the command line. 3. Open a command line window and change to the local folder containing the `matched-data-cli` binary. ```sh - ~ $ cd matched-data-cli + cd matched-data-cli ``` 4. Create two files: one with your private key and another one with the encrypted payload: ```sh - ~/matched-data-cli $ printf "" > private_key.txt && chmod 400 private_key.txt + printf "" > private_key.txt && chmod 400 private_key.txt - ~/matched-data-cli $ printf "" > encrypted_payload.txt + printf "" > encrypted_payload.txt ``` Replace `` with your private key and `` with the encrypted payload. @@ -37,12 +36,12 @@ Use the `matched-data-cli` tool to decrypt a payload in the command line. 5. Run the following command to decrypt the payload: ```sh - ~/matched-data-cli $ ./matched-data-cli decrypt -k private_key.txt encrypted_payload.txt + decrypt -k private_key.txt encrypted_payload.txt ``` :::note -If you are using macOS and you get an error when running the `matched-data-cli` tool, refer to [Troubleshooting macOS errors](/waf/managed-rules/payload-logging/command-line/generate-key-pair/#troubleshooting-macos-errors). +If you are using macOS and you get an error when running the `matched-data-cli` tool, refer to [Troubleshooting macOS errors](/waf/managed-rules/payload-logging/command-line/generate-key-pair/#troubleshooting-macos-errors). ::: ## Example @@ -50,22 +49,23 @@ If you are using macOS and you get an error when running the `matched-data-cli` The following example creates two files — one with the private key and another one with the encrypted payload — and runs the `matched-data-cli` tool to decrypt the payload in the `encrypted_payload.txt` file: ```sh -~ $ cd matched-data-cli +~ cd matched-data-cli + +printf "uBS5eBttHrqkdY41kbZPdvYnNz8Vj0TvKIUpjB1y/GA=" > private_key.txt && chmod 400 private_key.txt -~/matched-data-cli $ printf "uBS5eBttHrqkdY41kbZPdvYnNz8Vj0TvKIUpjB1y/GA=" > private_key.txt && chmod 400 private_key.txt +printf "AzTY6FHajXYXuDMUte82wrd+1n5CEHPoydYiyd3FMg5IEQAAAAAAAAA0lOhGXBclw8pWU5jbbYuepSIJN5JohTtZekLliJBlVWk=" > encrypted_payload.txt -~/matched-data-cli $ printf "AzTY6FHajXYXuDMUte82wrd+1n5CEHPoydYiyd3FMg5IEQAAAAAAAAA0lOhGXBclw8pWU5jbbYuepSIJN5JohTtZekLliJBlVWk=" > encrypted_payload.txt +decrypt -k private_key.txt encrypted_payload.txt +``` -~/matched-data-cli $ ./matched-data-cli decrypt -k private_key.txt encrypted_payload.txt +```sh output test matched data ``` :::note[Encryption formats] - The format of the encrypted payload can change over time. The `matched-data-cli` tool returns an error if it cannot decrypt a new encryption format. To fix this error, [download](https://github.com/cloudflare/matched-data-cli/releases) a newer version of the tool from GitHub and try again. - ::: diff --git a/src/content/docs/waf/managed-rules/payload-logging/command-line/generate-key-pair.mdx b/src/content/docs/waf/managed-rules/payload-logging/command-line/generate-key-pair.mdx index 0cfca90f01d52c..df3220171a1a4f 100644 --- a/src/content/docs/waf/managed-rules/payload-logging/command-line/generate-key-pair.mdx +++ b/src/content/docs/waf/managed-rules/payload-logging/command-line/generate-key-pair.mdx @@ -7,7 +7,6 @@ sidebar: head: - tag: title content: Generate a key pair in the command line - --- Generate a public/private key pair using the Cloudflare [`matched-data-cli`](https://github.com/cloudflare/matched-data-cli) command-line tool. After generating a key pair, enter the generated public key in the payload logging configuration. @@ -21,16 +20,19 @@ Do the following: 3. Open a terminal and go to the local folder containing the `matched-data-cli` tool. ```sh - ~ $ cd matched-data-cli + cd matched-data-cli ``` 4. Run the following command: ```sh - ~/matched-data-cli $ ./matched-data-cli generate-key-pair + ./matched-data-cli generate-key-pair + ``` + + ```sh output { - "private_key": "uBS5eBttHrqkdY41kbZPdvYnNz8Vj0TvKIUpjB1y/GA=", - "public_key": "Ycig/Zr/pZmklmFUN99nr+taURlYItL91g+NcHGYpB8=" + "private_key": "uBS5eBttHrqkdY41kbZPdvYnNz8Vj0TvKIUpjB1y/GA=", + "public_key": "Ycig/Zr/pZmklmFUN99nr+taURlYItL91g+NcHGYpB8=" } ``` diff --git a/src/content/docs/waf/reference/migration-guides/firewall-rules-to-custom-rules.mdx b/src/content/docs/waf/reference/migration-guides/firewall-rules-to-custom-rules.mdx index fa860fd95b6282..f9a56922efc93b 100644 --- a/src/content/docs/waf/reference/migration-guides/firewall-rules-to-custom-rules.mdx +++ b/src/content/docs/waf/reference/migration-guides/firewall-rules-to-custom-rules.mdx @@ -3,55 +3,52 @@ title: Firewall Rules to WAF custom rules migration pcx_content_type: reference sidebar: order: 2 - --- Cloudflare converted existing [firewall rules](/firewall/) into [WAF custom rules](/waf/custom-rules/). With custom rules, you get the same level of protection and a few additional features. Custom rules are available in the Cloudflare dashboard at **Security** > **WAF** > **Custom rules**. :::caution[Deprecation notice] - **Cloudflare Firewall Rules is now deprecated.** The Firewall Rules API and Filters API, as well as the `cloudflare_firewall_rule` and `cloudflare_filter` Terraform resources, will only be available until 2025-01-15. If you have any automation based on these APIs and resources, you must migrate to the new APIs and resources before 2025-01-15 to avoid any issues. On 2025-01-15, the APIs and resources mentioned above will stop working. Any remaining active firewall rules will be disabled, and the **Firewall rules** tab in the dashboard will be removed. If you have not migrated to WAF custom rules yet, you may have some invalid configuration that prevents the migration from happening. In this case, contact your account team to get help with the migration to WAF custom rules. - ::: ## Main differences The main differences between firewall rules and WAF custom rules are the following: -* [Improved response for Block action](#improved-response-for-block-action) -* [Different error page for blocked requests](#different-error-page-for-blocked-requests) -* [New Skip action replacing both Allow and Bypass actions](#new-skip-action-replacing-both-allow-and-bypass-actions) -* [Custom rules are evaluated in order](#custom-rules-are-evaluated-in-order) -* [Logs and events](#logs-and-events) -* [New API and Terraform resources](#new-api-and-terraform-resources) +- [Improved response for Block action](#improved-response-for-block-action) +- [Different error page for blocked requests](#different-error-page-for-blocked-requests) +- [New Skip action replacing both Allow and Bypass actions](#new-skip-action-replacing-both-allow-and-bypass-actions) +- [Custom rules are evaluated in order](#custom-rules-are-evaluated-in-order) +- [Logs and events](#logs-and-events) +- [New API and Terraform resources](#new-api-and-terraform-resources) ### Improved response for Block action -In WAF custom rules you can [customize the response of the *Block* action](/waf/custom-rules/create-dashboard/#configure-a-custom-response-for-blocked-requests). +In WAF custom rules you can [customize the response of the _Block_ action](/waf/custom-rules/create-dashboard/#configure-a-custom-response-for-blocked-requests). -The default block response is a Cloudflare standard HTML page. If you need to send a custom response for *Block* actions, configure the custom rule to return a fixed response with a custom response code (403, by default) and a custom body (HTML, JSON, XML, or plain text). +The default block response is a Cloudflare standard HTML page. If you need to send a custom response for _Block_ actions, configure the custom rule to return a fixed response with a custom response code (403, by default) and a custom body (HTML, JSON, XML, or plain text). To define a custom response for a single rule, go to **Security** > **WAF** > [**Custom rules**](https://dash.cloudflare.com/?to=/:account/:zone/security/waf/custom-rules), edit the custom rule, and fill in the block-related options. :::note -Custom block response configurations will not be returned by the Firewall Rules API. You must use the [Rulesets API](/waf/custom-rules/create-api/#example-b) to manage this new feature. +Custom block response configurations will not be returned by the Firewall Rules API. You must use the [Rulesets API](/waf/custom-rules/create-api/#example-b) to manage this new feature. ::: ### Different error page for blocked requests -Requests blocked by a firewall rule with a *Block* action would get a Cloudflare [1020 error code](/support/troubleshooting/cloudflare-errors/troubleshooting-cloudflare-1xxx-errors/#error-1020-access-denied) response. Cloudflare users could customize this error page in **Custom Pages** > **1000 Class Errors**. +Requests blocked by a firewall rule with a _Block_ action would get a Cloudflare [1020 error code](/support/troubleshooting/cloudflare-errors/troubleshooting-cloudflare-1xxx-errors/#error-1020-access-denied) response. Cloudflare users could customize this error page in **Custom Pages** > **1000 Class Errors**. Requests blocked by a WAF custom rule will get a different response: the WAF block response. To customize the default block response, you can either: -* Define a custom WAF block response for your entire zone in [**Custom Pages**](https://dash.cloudflare.com/?to=/:account/:zone/custom-pages) > **WAF Block**. This custom page will always have an HTML content type. -* [Define a custom response](/waf/custom-rules/create-dashboard/#configure-a-custom-response-for-blocked-requests) for requests blocked by a specific WAF custom rule. This custom response supports other content types besides HTML. +- Define a custom WAF block response for your entire zone in [**Custom Pages**](https://dash.cloudflare.com/?to=/:account/:zone/custom-pages) > **WAF Block**. This custom page will always have an HTML content type. +- [Define a custom response](/waf/custom-rules/create-dashboard/#configure-a-custom-response-for-blocked-requests) for requests blocked by a specific WAF custom rule. This custom response supports other content types besides HTML. If you have customized your 1xxx error page in Custom Pages for requests blocked by firewall rules, you will need to create a new response page for blocked requests using one of the above methods. @@ -59,58 +56,58 @@ For more information on Custom Pages, refer to [Configuring Custom Pages](/suppo ### New Skip action replacing both Allow and Bypass actions -Firewall Rules supported the *Allow* and *Bypass* actions, often used together. These actions were commonly used for handling known legitimate requests — for example, requests coming from trusted IP addresses. +Firewall Rules supported the _Allow_ and _Bypass_ actions, often used together. These actions were commonly used for handling known legitimate requests — for example, requests coming from trusted IP addresses. -When a request triggered *Allow*, all remaining firewall rules were not evaluated, effectively allowing the request to continue to the next security product. The *Bypass* action was designed to specify which security products (such as WAF managed rules, rate limiting rules, and User Agent Blocking) should not run on the request triggering the action. +When a request triggered _Allow_, all remaining firewall rules were not evaluated, effectively allowing the request to continue to the next security product. The _Bypass_ action was designed to specify which security products (such as WAF managed rules, rate limiting rules, and User Agent Blocking) should not run on the request triggering the action. With Firewall Rules, if you wanted to stop running all security products for a given request, you would create two rules: -* One rule with *Bypass* action (selecting all security products). -* One rule with *Allow* action (to stop executing other firewall rules). +- One rule with _Bypass_ action (selecting all security products). +- One rule with _Allow_ action (to stop executing other firewall rules). -The requirement of having two rules to address this common scenario no longer applies to WAF custom rules. You should now [use the *Skip* action](/waf/custom-rules/skip/), which combines the *Allow* and *Bypass* actions. The *Skip* action fully replaces the *Allow* and *Bypass* actions, which are not supported in WAF custom rules. +The requirement of having two rules to address this common scenario no longer applies to WAF custom rules. You should now [use the _Skip_ action](/waf/custom-rules/skip/), which combines the _Allow_ and _Bypass_ actions. The _Skip_ action fully replaces the _Allow_ and _Bypass_ actions, which are not supported in WAF custom rules. -With the *Skip* action you can do the following: +With the _Skip_ action you can do the following: -* Stop running all the remaining custom rules (equivalent to the *Allow* action) -* Avoid running other security products (equivalent to the *Bypass* action) -* A combination of the above. +- Stop running all the remaining custom rules (equivalent to the _Allow_ action) +- Avoid running other security products (equivalent to the _Bypass_ action) +- A combination of the above. -You can also select whether you want to log events matching the custom rule with the *Skip* action or not. This is especially useful when creating a positive security model to avoid logging large amounts of legitimate traffic. +You can also select whether you want to log events matching the custom rule with the _Skip_ action or not. This is especially useful when creating a positive security model to avoid logging large amounts of legitimate traffic. :::note -The Firewall Rules API does not support the *Skip* action. When you create a custom rule with *Skip* action, it is translated to *Allow* and *Bypass* in the Firewall Rules API. You must use the [Rulesets API](/waf/custom-rules/skip/api-examples/) to fully use the new *Skip* action functionality. +The Firewall Rules API does not support the _Skip_ action. When you create a custom rule with _Skip_ action, it is translated to _Allow_ and _Bypass_ in the Firewall Rules API. You must use the [Rulesets API](/waf/custom-rules/skip/api-examples/) to fully use the new _Skip_ action functionality. ::: ### Custom rules are evaluated in order -Firewall rules actions had a specific [order of precedence](/firewall/cf-firewall-rules/actions/) when using [priority ordering](/firewall/cf-firewall-rules/order-priority/#managing-rule-evaluation-by-priority-order). In contrast, custom rules actions do not have such an order. Custom rules are always evaluated in order, and some actions like *Block* will stop the evaluation of other rules. +Firewall rules actions had a specific [order of precedence](/firewall/cf-firewall-rules/actions/) when using [priority ordering](/firewall/cf-firewall-rules/order-priority/#managing-rule-evaluation-by-priority-order). In contrast, custom rules actions do not have such an order. Custom rules are always evaluated in order, and some actions like _Block_ will stop the evaluation of other rules. For example, if you were using priority ordering and had the following firewall rules with the same priority both matching an incoming request: -* Firewall rule #1 — Priority: 2 / Action: *Block* -* Firewall rule #2 — Priority: 2 / Action: *Allow* +- Firewall rule #1 — Priority: 2 / Action: _Block_ +- Firewall rule #2 — Priority: 2 / Action: _Allow_ -The request would be allowed, since the *Allow* action in Firewall Rules would have precedence over the *Block* action. +The request would be allowed, since the _Allow_ action in Firewall Rules would have precedence over the _Block_ action. In contrast, if you create two custom rules where both rules match an incoming request: -* Custom rule #1 — Action: *Block* -* Custom rule #2 — Action: *Skip* (configured to skip all remaining custom rules) +- Custom rule #1 — Action: _Block_ +- Custom rule #2 — Action: _Skip_ (configured to skip all remaining custom rules) -The request would be blocked, since custom rules are evaluated in order and the *Block* action will stop the evaluation of other rules. +The request would be blocked, since custom rules are evaluated in order and the _Block_ action will stop the evaluation of other rules. :::note -For the custom rules converted from your existing firewall rules, Cloudflare will preserve your current order of execution. +For the custom rules converted from your existing firewall rules, Cloudflare will preserve your current order of execution. ::: ### Logs and events Events logged by custom rules are shown in [Security Events](/waf/analytics/security-events/), available at **Security** > **Events**, with `Custom Rules` as their source. -You may still find events generated by Firewall Rules in the Security Events page when you select a time frame including the days when the transition to custom rules occurred. Similarly, you may still find events with both *Skip* and *Allow* actions in the same view during the transition period. +You may still find events generated by Firewall Rules in the Security Events page when you select a time frame including the days when the transition to custom rules occurred. Similarly, you may still find events with both _Skip_ and _Allow_ actions in the same view during the transition period. ### New API and Terraform resources @@ -134,7 +131,7 @@ For users that still have access to both products, the **Firewall rules** tab wi For the time being, all three APIs will be available (Firewall Rules API, Filters API, and Rulesets API). Cloudflare will internally convert your [Firewall Rules API](/firewall/api/cf-firewall-rules/) and [Filters API](/firewall/api/cf-filters/) calls into the corresponding [Rulesets API](/waf/custom-rules/create-api/) calls. The converted API calls between the Firewall Rules API/Filters API and the Rulesets API appear in audit logs as generated by Cloudflare and not by the actual user making the requests. There will be a single list of rules for both firewall rules and WAF custom rules. -Some new features of WAF custom rules, like custom responses for blocked requests and the *Skip* action, are not supported in the Firewall Rules API. To take advantage of these features, Cloudflare recommends that you use the custom rules page in the Cloudflare dashboard or the Rulesets API. +Some new features of WAF custom rules, like custom responses for blocked requests and the _Skip_ action, are not supported in the Firewall Rules API. To take advantage of these features, Cloudflare recommends that you use the custom rules page in the Cloudflare dashboard or the Rulesets API. Refer to the WAF documentation for [examples of managing WAF custom rules using the Rulesets API](/waf/custom-rules/create-api/). @@ -142,8 +139,8 @@ Refer to the WAF documentation for [examples of managing WAF custom rules using **The following Terraform resources from the Cloudflare provider are now deprecated:** -* [`cloudflare_firewall_rule`](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/firewall_rule) -* [`cloudflare_filter`](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/filter) +- [`cloudflare_firewall_rule`](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/firewall_rule) +- [`cloudflare_filter`](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/filter) These resources will stop working on 2025-01-15. If you are currently using these resources to manage your Firewall Rules configuration, you must manually migrate any Terraform configuration to [`cloudflare_ruleset`](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/ruleset) resources before this date to prevent any issues. @@ -162,7 +159,7 @@ The recommended steps for replacing your firewall rules (and filters) configurat 1. Run the following command to generate all ruleset configurations for a zone: ```sh null {3,6} - $ cf-terraforming generate --zone --resource-type "cloudflare_ruleset" + cf-terraforming generate --zone --resource-type "cloudflare_ruleset" resource "cloudflare_ruleset" "terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31" { kind = "zone" @@ -181,24 +178,29 @@ The recommended steps for replacing your firewall rules (and filters) configurat 3. Import the `cloudflare_ruleset` resource you previously identified into Terraform state using the `terraform import` command. For example: - ```sh - $ terraform import cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31 zone//3c0b456bc2aa443089c5f40f45f51b31 +```sh +terraform import cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31 zone//3c0b456bc2aa443089c5f40f45f51b31 +``` - cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Importing from ID "zone//3c0b456bc2aa443089c5f40f45f51b31"... - cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Import prepared! - Prepared cloudflare_ruleset for import - cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] +```sh output +cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Importing from ID "zone//3c0b456bc2aa443089c5f40f45f51b31"... +cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Import prepared! + Prepared cloudflare_ruleset for import +cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] - Import successful! +Import successful! - The resources that were imported are shown above. These resources are now in - your Terraform state and will henceforth be managed by Terraform. - ``` +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +``` 4. Run `terraform plan` to validate that Terraform now checks the state of the new `cloudflare_ruleset` resource, in addition to other existing resources already managed by Terraform. For example: ```sh - $ terraform plan + terraform plan + ``` + + ```sh output cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] [...] @@ -211,14 +213,16 @@ The recommended steps for replacing your firewall rules (and filters) configurat :::caution[Important] - You must remove firewall rules and filters from Terraform state before deleting their configuration from `.tf` configuration files to prevent issues. + You must remove firewall rules and filters from Terraform state before deleting their configuration from `.tf` configuration files to prevent issues. ::: 1. Run the following command to find all resources related to firewall rules and filters: ```sh - $ terraform state list | grep -E '^cloudflare_(filter|firewall_rule)\.' + terraform state list | grep -E '^cloudflare_(filter|firewall_rule)\.' + ``` + ```sh output cloudflare_filter.my_filter cloudflare_firewall_rule.my_firewall_rule ``` @@ -226,8 +230,10 @@ The recommended steps for replacing your firewall rules (and filters) configurat 2. Run the `terraform state rm ...` command in dry-run mode to understand the impact of removing those resources without performing any changes: ```sh - $ terraform state rm -dry-run cloudflare_filter.my_filter cloudflare_firewall_rule.my_firewall_rule + terraform state rm -dry-run cloudflare_filter.my_filter cloudflare_firewall_rule.my_firewall_rule + ``` + ```sh output Would remove cloudflare_filter.my_filter Would remove cloudflare_firewall_rule.my_firewall_rule ``` @@ -235,8 +241,10 @@ The recommended steps for replacing your firewall rules (and filters) configurat 3. If the impact looks correct, run the same command without the `-dry-run` parameter to actually remove the resources from Terraform state: ```sh - $ terraform state rm cloudflare_filter.my_filter cloudflare_firewall_rule.my_firewall_rule + terraform state rm cloudflare_filter.my_filter cloudflare_firewall_rule.my_firewall_rule + ``` + ```sh output Removed cloudflare_filter.my_filter Removed cloudflare_firewall_rule.my_firewall_rule Successfully removed 2 resource instance(s). @@ -246,18 +254,20 @@ The recommended steps for replacing your firewall rules (and filters) configurat 7. Run `terraform plan` to verify that the resources you deleted from configuration files no longer appear. You should not have any pending changes. - ```sh - $ terraform plan +```sh +terraform plan +``` - cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] - [...] +```sh output +cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] +[...] - No changes. Your infrastructure matches the configuration. +No changes. Your infrastructure matches the configuration. - Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed. - ``` +Terraform has compared your real infrastructure against your configuration and found no differences, so no changes are needed. +``` For details on importing Cloudflare resources to Terraform and using the `cf-terraforming` tool, refer to the following resources: -* [Import Cloudflare resources](/terraform/advanced-topics/import-cloudflare-resources/) -* [`cf-terraforming` GitHub repository](https://github.com/cloudflare/cf-terraforming) +- [Import Cloudflare resources](/terraform/advanced-topics/import-cloudflare-resources/) +- [`cf-terraforming` GitHub repository](https://github.com/cloudflare/cf-terraforming) diff --git a/src/content/docs/waf/reference/migration-guides/waf-managed-rules-migration.mdx b/src/content/docs/waf/reference/migration-guides/waf-managed-rules-migration.mdx index 4045dabe040d8f..92788cc047f6a4 100644 --- a/src/content/docs/waf/reference/migration-guides/waf-managed-rules-migration.mdx +++ b/src/content/docs/waf/reference/migration-guides/waf-managed-rules-migration.mdx @@ -3,10 +3,9 @@ pcx_content_type: reference title: WAF Managed Rules migration sidebar: order: 1 - --- -import { GlossaryTooltip } from "~/components" +import { GlossaryTooltip } from "~/components"; On 2022-05-04, Cloudflare started the WAF migration from the [previous version of WAF managed rules](/waf/reference/legacy/old-waf-managed-rules/) to the new [WAF Managed Rules](/waf/managed-rules/), allowing a first set of eligible zones to migrate. Currently, all zones can migrate to WAF Managed Rules, including partner accounts. @@ -18,26 +17,26 @@ Once the migration finishes, the **Managed rules** tab in the Cloudflare dashboa **The previous version of WAF managed rules is now deprecated.** The [APIs for managing the previous version of WAF managed rules](#api-changes) will stop working on 2025-01-15. The same applies to [Terraform resources](#terraform-changes) related to the previous version of WAF managed rules. You must migrate before this date to avoid any issues. -Refer to [Possible migration errors](#possible-migration-errors) if you are having issues migrating. +Refer to [Possible migration errors](#possible-migration-errors) if you are having issues migrating. ::: ## Main benefits The new version of WAF Managed Rules provides the following benefits over the previous version: -* **New matching engine** – WAF Managed Rules are powered by the Ruleset Engine, which allows faster managed rule deployments and the ability to check even more traffic without scaling issues. The rules follow the same syntax used in other Cloudflare security products like WAF custom rules. +- **New matching engine** – WAF Managed Rules are powered by the Ruleset Engine, which allows faster managed rule deployments and the ability to check even more traffic without scaling issues. The rules follow the same syntax used in other Cloudflare security products like WAF custom rules. -* **Updated Managed Rulesets** – The Cloudflare OWASP Core Ruleset, one of WAF's Managed Rulesets, is based on the latest version of the OWASP Core Ruleset (v3.x), which adds paranoia levels and improves false positives rates compared to the version used in WAF managed rules (2.x). You also have more control over the sensitivity score, with a clear indication of how much each rule contributes to the score and what was the total score of a triggered request. +- **Updated Managed Rulesets** – The Cloudflare OWASP Core Ruleset, one of WAF's Managed Rulesets, is based on the latest version of the OWASP Core Ruleset (v3.x), which adds paranoia levels and improves false positives rates compared to the version used in WAF managed rules (2.x). You also have more control over the sensitivity score, with a clear indication of how much each rule contributes to the score and what was the total score of a triggered request. -* **Better rule browsing and configuration** – Deploy Managed Rulesets with a single click to get immediate protection. Override the behavior of entire rulesets, or customize a single rule. Apply overrides to all rules with a specific tag to adjust rules applicable to a given software or attack vector. You can deploy configurations like the following: +- **Better rule browsing and configuration** – Deploy Managed Rulesets with a single click to get immediate protection. Override the behavior of entire rulesets, or customize a single rule. Apply overrides to all rules with a specific tag to adjust rules applicable to a given software or attack vector. You can deploy configurations like the following: - * Deploy the Cloudflare Managed Ruleset across all my zones. - * Deploy the Cloudflare OWASP Core Ruleset on all traffic that does not contain `/api/*` in the path. - * Disable Managed Rulesets across my account for traffic coming from my IP. + - Deploy the Cloudflare Managed Ruleset across all my zones. + - Deploy the Cloudflare OWASP Core Ruleset on all traffic that does not contain `/api/*` in the path. + - Disable Managed Rulesets across my account for traffic coming from my IP. For more information on the benefits of WAF Managed Rules, refer to our [blog post](https://blog.cloudflare.com/new-cloudflare-waf/). -*** +--- ## Migration impact @@ -51,9 +50,9 @@ For API users, the APIs for managing the previous version of WAF managed rules w The update process will create an equivalent configuration for the following settings of WAF managed rules: -* Firewall rules configured with *Bypass* > *WAF Managed Rules*. -* Page Rules configured with *Disable Security*. -* Page Rules configured with *Web Application Firewall: Off* or *Web Application Firewall: On*. +- Firewall rules configured with _Bypass_ > _WAF Managed Rules_. +- Page Rules configured with _Disable Security_. +- Page Rules configured with _Web Application Firewall: Off_ or _Web Application Firewall: On_. The OWASP ruleset configuration will be partially migrated. Refer to the next section for details. @@ -63,7 +62,7 @@ The update process will partially migrate the settings of the OWASP ModSecurity The following OWASP settings will be migrated: -* **Sensitivity**: The [old sensitivity values](/waf/reference/legacy/old-waf-managed-rules/#owasp-modsecurity-core-rule-set) will be migrated to the following [paranoia level](/waf/managed-rules/reference/owasp-core-ruleset/concepts/#paranoia-level) (PL) and [score threshold](/waf/managed-rules/reference/owasp-core-ruleset/concepts/#score-threshold) combinations in the new OWASP ruleset: +- **Sensitivity**: The [old sensitivity values](/waf/reference/legacy/old-waf-managed-rules/#owasp-modsecurity-core-rule-set) will be migrated to the following [paranoia level](/waf/managed-rules/reference/owasp-core-ruleset/concepts/#paranoia-level) (PL) and [score threshold](/waf/managed-rules/reference/owasp-core-ruleset/concepts/#score-threshold) combinations in the new OWASP ruleset: | Old sensitivity | PL in new OWASP | Score threshold in new OWASP | | --------------- | --------------- | ---------------------------- | @@ -72,12 +71,12 @@ The following OWASP settings will be migrated: | Low | PL1 | Medium – 40 or higher | | Default | PL2 | Medium – 40 or higher | -* **Action**: The action in the previous OWASP ruleset has an almost direct mapping in the new OWASP managed ruleset, except for the *Simulate* action which will be migrated to *Log*. +- **Action**: The action in the previous OWASP ruleset has an almost direct mapping in the new OWASP managed ruleset, except for the _Simulate_ action which will be migrated to _Log_. The following OWASP settings will **not** be migrated, since there is no direct equivalence between rules in the two versions: -* OWASP group overrides -* OWASP rule overrides +- OWASP group overrides +- OWASP rule overrides To replace these settings you will need to configure the Cloudflare OWASP Core Ruleset in WAF Managed Rules again according to your needs, namely any tag/rule overrides. For more information on configuring the new OWASP Core Ruleset, refer to [Cloudflare OWASP Core Ruleset](/waf/managed-rules/reference/owasp-core-ruleset/). @@ -102,13 +101,13 @@ For more information about configuring WAF Managed Rules in the dashboard, refer Once the migration is complete, the APIs for interacting with WAF managed rules **will stop working**. These APIs are the following: -* [WAF packages](/api/operations/waf-packages-list-waf-packages) -* [WAF rule groups](/api/operations/waf-rule-groups-list-waf-rule-groups) -* [WAF rules](/api/operations/waf-rules-list-waf-rules) +- [WAF packages](/api/operations/waf-packages-list-waf-packages) +- [WAF rule groups](/api/operations/waf-rule-groups-list-waf-rule-groups) +- [WAF rules](/api/operations/waf-rules-list-waf-rules) :::caution -If you have any integrations using the WAF managed rules APIs stated above, you must update them before migrating to the new WAF Managed Rules. +If you have any integrations using the WAF managed rules APIs stated above, you must update them before migrating to the new WAF Managed Rules. ::: To work with WAF Managed Rules you must use the [Rulesets API](/ruleset-engine/managed-rulesets/). For more information on deploying WAF Managed Rules via API, refer to [Deploy managed rulesets via API](/waf/managed-rules/deploy-api/). @@ -117,15 +116,15 @@ To work with WAF Managed Rules you must use the [Rulesets API](/ruleset-engine/m Once the migration is complete, the following Terraform resources for configuring WAF managed rules **will stop working**: -* [`cloudflare_waf_package`](https://registry.terraform.io/providers/cloudflare/cloudflare/3.35.0/docs/resources/waf_package) -* [`cloudflare_waf_group`](https://registry.terraform.io/providers/cloudflare/cloudflare/3.35.0/docs/resources/waf_group) -* [`cloudflare_waf_rule`](https://registry.terraform.io/providers/cloudflare/cloudflare/3.35.0/docs/resources/waf_rule) +- [`cloudflare_waf_package`](https://registry.terraform.io/providers/cloudflare/cloudflare/3.35.0/docs/resources/waf_package) +- [`cloudflare_waf_group`](https://registry.terraform.io/providers/cloudflare/cloudflare/3.35.0/docs/resources/waf_group) +- [`cloudflare_waf_rule`](https://registry.terraform.io/providers/cloudflare/cloudflare/3.35.0/docs/resources/waf_rule) These resources were only supported in the Terraform Cloudflare provider up to version 3.35. Version 4.x [no longer supports these resources](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/guides/version-4-upgrade#resources-1). To manage the configuration of the new WAF Managed Rules using Terraform, you must use [`cloudflare_ruleset`](https://registry.terraform.io/providers/cloudflare/cloudflare/latest/docs/resources/ruleset) resources. -*** +--- ## Eligible zones @@ -133,14 +132,14 @@ To manage the configuration of the new WAF Managed Rules using Terraform, you mu :::note[Update notice] -On 2023-08-18, Cloudflare added support for migrating partner accounts to the new WAF Managed Rules. +On 2023-08-18, Cloudflare added support for migrating partner accounts to the new WAF Managed Rules. ::: In phase 2 all zones are eligible for migration. The exact migration procedure varies according to your Cloudflare plan. -* **Pro** and **Business** customers can update to the new WAF Managed Rules in the Cloudflare dashboard or via API. Once the new version is enabled, the previous version of WAF managed rules will be automatically disabled. +- **Pro** and **Business** customers can update to the new WAF Managed Rules in the Cloudflare dashboard or via API. Once the new version is enabled, the previous version of WAF managed rules will be automatically disabled. -* **Enterprise** customers can enable the new WAF Managed Rules configuration while keeping the previous version of WAF managed rules enabled, allowing them to check the impact of the new WAF configuration. After reviewing the behavior of the new configuration and making any required adjustments to specific managed rules, Enterprise users can then finish the migration, which will disable the previous version of WAF managed rules. +- **Enterprise** customers can enable the new WAF Managed Rules configuration while keeping the previous version of WAF managed rules enabled, allowing them to check the impact of the new WAF configuration. After reviewing the behavior of the new configuration and making any required adjustments to specific managed rules, Enterprise users can then finish the migration, which will disable the previous version of WAF managed rules. **Note:** Zones that have [URI-based WAF overrides](/api/operations/waf-overrides-list-waf-overrides), which you could only manage via API, will not be able to migrate immediately to the new WAF Managed Rules. You must delete these overrides before migrating. @@ -148,20 +147,20 @@ In phase 2 all zones are eligible for migration. The exact migration procedure v In phase 1 the migration became available to a subset of eligible zones, which had to meet the following requirements: -* The zone has: +- The zone has: - * WAF disabled, or - * WAF enabled and only the Cloudflare Managed Ruleset is enabled (the OWASP ModSecurity Core Rule Set must be disabled). + - WAF disabled, or + - WAF enabled and only the Cloudflare Managed Ruleset is enabled (the OWASP ModSecurity Core Rule Set must be disabled). -* The zone has no [firewall rules](/firewall/cf-dashboard/) or [Page Rules](/rules/page-rules/) bypassing, enabling, or disabling WAF managed rules: +- The zone has no [firewall rules](/firewall/cf-dashboard/) or [Page Rules](/rules/page-rules/) bypassing, enabling, or disabling WAF managed rules: - * Firewall rules configured with *Bypass* > *WAF Managed Rules*. - * Page Rules configured with *Disable Security*. - * Page Rules configured with *Web Application Firewall: Off* or *Web Application Firewall: On.* + - Firewall rules configured with _Bypass_ > _WAF Managed Rules_. + - Page Rules configured with _Disable Security_. + - Page Rules configured with _Web Application Firewall: Off_ or _Web Application Firewall: On._ -* The zone has no [URI-based WAF overrides](/api/operations/waf-overrides-list-waf-overrides) (only available via API). +- The zone has no [URI-based WAF overrides](/api/operations/waf-overrides-list-waf-overrides) (only available via API). -*** +--- ## Starting the migration @@ -203,7 +202,7 @@ When the migration finishes, the dashboard will only display the new WAF Managed :::note -The update process can take up to an hour. During this period you may observe security events from both versions of WAF managed rules. +The update process can take up to an hour. During this period you may observe security events from both versions of WAF managed rules. ::: ### Using the API @@ -219,13 +218,13 @@ The update process can take up to an hour. During this period you may observe se ```json null {3} { - "result": { - "compatible": true, - "migration_state": "start" - }, - "success": true, - "errors": [], - "messages": [] + "result": { + "compatible": true, + "migration_state": "start" + }, + "success": true, + "errors": [], + "messages": [] } ``` @@ -242,42 +241,42 @@ The update process can take up to an hour. During this period you may observe se ```json { - "result": { - "name": "default", - "rules": [ - { - "id": "", - "version": "", - "action": "execute", - "expression": "true", - "description": "", - "ref": "", - "enabled": true, - "action_parameters": { - "id": "efb7b8c949ac4650a09736fc376e9aee", - "overrides": { - "rules": [ - { - "id": "23ee7cebe6e8443e99ecf932ab579455", - "action": "log", - "enabled": false - } - ] - } - } - } - ] - }, - "success": true, - "errors": [], - "messages": [] + "result": { + "name": "default", + "rules": [ + { + "id": "", + "version": "", + "action": "execute", + "expression": "true", + "description": "", + "ref": "", + "enabled": true, + "action_parameters": { + "id": "efb7b8c949ac4650a09736fc376e9aee", + "overrides": { + "rules": [ + { + "id": "23ee7cebe6e8443e99ecf932ab579455", + "action": "log", + "enabled": false + } + ] + } + } + } + ] + }, + "success": true, + "errors": [], + "messages": [] } ``` The returned configuration in the example above, which would match the existing configuration for the previous WAF version, contains: -* A rule that executes the Cloudflare Managed Ruleset (ruleset ID efb7b8c949ac4650a09736fc376e9aee). -* A single override for the rule `Apache Struts - Open Redirect - CVE:CVE-2013-2248` (rule ID 23ee7cebe6e8443e99ecf932ab579455) in the same ruleset, setting the action to `log` and disabling the rule. +- A rule that executes the Cloudflare Managed Ruleset (ruleset ID efb7b8c949ac4650a09736fc376e9aee). +- A single override for the rule `Apache Struts - Open Redirect - CVE:CVE-2013-2248` (rule ID 23ee7cebe6e8443e99ecf932ab579455) in the same ruleset, setting the action to `log` and disabling the rule. 3. (Optional, for Enterprise customers only) If you are migrating an Enterprise zone to WAF Managed Rules, you can enter validation mode before finishing the migration. In this mode, both WAF implementations will be enabled. Use the [Update a zone entry point ruleset](/api/operations/updateZoneEntrypointRuleset) operation, making sure you include the `waf_migration=validation&phase_two=1` query string parameters: @@ -352,10 +351,10 @@ Once the provided configuration is saved and the new WAF Managed Rules are enabl :::note -Pro and Business customers, which do not have access to the validation mode described in step 3, can update the rules (and overrides) in their zone entry point ruleset without triggering the migration by omitting the `waf_migration=pending&phase_two=1` parameters. However, all the rules in their configuration must be disabled (`"enabled": false`). Only Enterprise customers can configure (enabled) rules deploying Managed Rulesets without triggering the migration. +Pro and Business customers, which do not have access to the validation mode described in step 3, can update the rules (and overrides) in their zone entry point ruleset without triggering the migration by omitting the `waf_migration=pending&phase_two=1` parameters. However, all the rules in their configuration must be disabled (`"enabled": false`). Only Enterprise customers can configure (enabled) rules deploying Managed Rulesets without triggering the migration. ::: -*** +--- ## Analyzing the new WAF behavior in Security Events @@ -365,9 +364,9 @@ If you are an Enterprise customer, use the **validation mode** of the WAF migrat Go to the [Activity log](/waf/analytics/security-events/paid-plans/#activity-log) in Security Events during validation mode and check the following: -* Look for any requests allowed by the new WAF that are being handled by the previous WAF version (for example, by a challenge or block action). If this happens, consider writing a [firewall rule](/firewall/cf-dashboard/create-edit-delete-rules/#create-a-firewall-rule) or a [WAF custom rule](/waf/custom-rules/create-dashboard/) to handle the requests you previously identified. +- Look for any requests allowed by the new WAF that are being handled by the previous WAF version (for example, by a challenge or block action). If this happens, consider writing a [firewall rule](/firewall/cf-dashboard/create-edit-delete-rules/#create-a-firewall-rule) or a [WAF custom rule](/waf/custom-rules/create-dashboard/) to handle the requests you previously identified. -* Look for legitimate requests being blocked by the new WAF. In this situation, edit the WAF managed rule that is blocking these requests, changing the performed action or disabling the rule. For more information, refer to [Configure a managed ruleset](/waf/managed-rules/deploy-zone-dashboard/#configure-a-managed-ruleset). +- Look for legitimate requests being blocked by the new WAF. In this situation, edit the WAF managed rule that is blocking these requests, changing the performed action or disabling the rule. For more information, refer to [Configure a managed ruleset](/waf/managed-rules/deploy-zone-dashboard/#configure-a-managed-ruleset). ### For Business/Professional customers @@ -377,14 +376,12 @@ In the days following the migration, check the [Activity log](/waf/analytics/sec Additionally, check for requests that should have been blocked. In this situation, consider creating a [firewall rule](/firewall/cf-dashboard/create-edit-delete-rules/#create-a-firewall-rule) or a [WAF custom rule](/waf/custom-rules/create-dashboard/) to block these requests. -*** +--- ## API operations Updating to the new WAF Managed Rules via API requires invoking the following API operations: - - | Name | Method + Endpoint | Description | | --------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Check WAF
update compatibility | `GET` `/zones//waf_migration/check?phase_two=1` | Checks if the current zone can be updated to the new WAF, given its current configuration. | @@ -392,22 +389,20 @@ Updating to the new WAF Managed Rules via API requires invoking the following AP | [Update zone
entry point ruleset](/ruleset-engine/rulesets-api/update/) | `PUT` `/zones//rulesets/` `phases/http_request_firewall_managed/entrypoint?waf_migration=&phase_two=1` | Updates the configuration of the zone entry point ruleset for the `http_request_firewall_managed` phase.
Available values for the `waf_migration` query string parameter:
– `pending` / `1`: Defines the new WAF Managed Rules configuration and disables the previous version of WAF managed rules as soon as the provided configuration is saved and the new WAF is enabled.
– `validation` / `2`: (Enterprise zones only) Defines the new WAF Managed Rules configuration and enables the new WAF Managed Rules side by side with the previous version, entering validation mode. To exit validation mode and finish the migration, invoke the same API endpoint with `waf_migration=pending`. | | Get WAF status | `GET` `/zones//waf_migration/status` | Obtains the status of old and new WAF managed rules for a zone (enabled/disabled). The response also includes the current migration state (or mode). | - - You must prepend the Cloudflare API base URL to the endpoints listed above to obtain the full endpoint: `https://api.cloudflare.com/client/v4` -*** +--- ## Possible migration errors Contact Cloudflare Support to get help with the following errors: -* The number of firewall rules to migrate exceeds 200. -* The length of a firewall rule expression is longer than 4 KB. +- The number of firewall rules to migrate exceeds 200. +- The length of a firewall rule expression is longer than 4 KB. -*** +--- ## Additional resources @@ -419,8 +414,8 @@ You can also create [overrides](/ruleset-engine/managed-rulesets/override-manage For more information, refer to the following resources: -* [Deploy a managed ruleset to a phase at the zone level](/ruleset-engine/managed-rulesets/deploy-managed-ruleset/#deploy-a-managed-ruleset-to-a-phase-at-the-zone-level) -* [Override a managed ruleset](/ruleset-engine/managed-rulesets/override-managed-ruleset/) +- [Deploy a managed ruleset to a phase at the zone level](/ruleset-engine/managed-rulesets/deploy-managed-ruleset/#deploy-a-managed-ruleset-to-a-phase-at-the-zone-level) +- [Override a managed ruleset](/ruleset-engine/managed-rulesets/override-managed-ruleset/) ### Configuring the new WAF Managed Rules using Terraform @@ -435,7 +430,7 @@ The recommended steps for replacing your old WAF managed rules configuration in 1. Run the following command to generate all ruleset configurations for a zone: ```sh null {3,6} - $ cf-terraforming generate --zone --resource-type "cloudflare_ruleset" + cf-terraforming generate --zone --resource-type "cloudflare_ruleset" resource "cloudflare_ruleset" "terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31" { kind = "zone" @@ -454,24 +449,29 @@ The recommended steps for replacing your old WAF managed rules configuration in 3. Import the `cloudflare_ruleset` resource you previously identified into Terraform state using the `terraform import` command. For example: - ```sh - $ terraform import cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31 zone//3c0b456bc2aa443089c5f40f45f51b31 +```sh +terraform import cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31 zone//3c0b456bc2aa443089c5f40f45f51b31 +``` - cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Importing from ID "zone//3c0b456bc2aa443089c5f40f45f51b31"... - cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Import prepared! - Prepared cloudflare_ruleset for import - cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] +```sh output + cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Importing from ID "zone//3c0b456bc2aa443089c5f40f45f51b31"... + cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Import prepared! + Prepared cloudflare_ruleset for import + cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] - Import successful! + Import successful! - The resources that were imported are shown above. These resources are now in - your Terraform state and will henceforth be managed by Terraform. - ``` + The resources that were imported are shown above. These resources are now in + your Terraform state and will henceforth be managed by Terraform. +``` 4. Run `terraform plan` to validate that Terraform now checks the state of the new `cloudflare_ruleset` resource, in addition to other existing resources already managed by Terraform. For example: ```sh - $ terraform plan + terraform plan + ``` + + ```sh output cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] [...] @@ -484,14 +484,16 @@ The recommended steps for replacing your old WAF managed rules configuration in :::caution[Important] - You must remove WAF packages, groups, and rules from Terraform state before deleting their configuration from `.tf` configuration files to prevent issues. + You must remove WAF packages, groups, and rules from Terraform state before deleting their configuration from `.tf` configuration files to prevent issues. ::: 1. Run the following command to find all resources related to the previous version of WAF managed rules: ```sh - $ terraform state list | grep -E '^cloudflare_waf_(package|group|rule)\.' + terraform state list | grep -E '^cloudflare_waf_(package|group|rule)\.' + ``` + ```sh output cloudflare_waf_package.my_package cloudflare_waf_group.my_group ``` @@ -499,8 +501,10 @@ The recommended steps for replacing your old WAF managed rules configuration in 2. Run the `terraform state rm ...` command in dry-run mode to understand the impact of removing those resources without performing any changes: ```sh - $ terraform state rm -dry-run cloudflare_waf_package.my_package cloudflare_waf_group.my_group + terraform state rm -dry-run cloudflare_waf_package.my_package cloudflare_waf_group.my_group + ``` + ```sh output Would remove cloudflare_waf_package.my_package Would remove cloudflare_waf_group.my_group ``` @@ -508,8 +512,10 @@ The recommended steps for replacing your old WAF managed rules configuration in 3. If the impact looks correct, run the same command without the `-dry-run` parameter to actually remove the resources from Terraform state: ```sh - $ terraform state rm cloudflare_waf_package.my_package cloudflare_waf_group.my_group + terraform state rm cloudflare_waf_package.my_package cloudflare_waf_group.my_group + ``` + ```sh output Removed cloudflare_waf_package.my_package Removed cloudflare_waf_group.my_group Successfully removed 2 resource instance(s). @@ -520,8 +526,10 @@ The recommended steps for replacing your old WAF managed rules configuration in 7. Run `terraform plan` to verify that the resources you deleted from configuration files no longer appear. You should not have any pending changes. ```sh - $ terraform plan + terraform plan + ``` + ```sh output cloudflare_ruleset.terraform_managed_resource_3c0b456bc2aa443089c5f40f45f51b31: Refreshing state... [id=3c0b456bc2aa443089c5f40f45f51b31] [...] @@ -532,13 +540,13 @@ The recommended steps for replacing your old WAF managed rules configuration in For details on importing Cloudflare resources to Terraform and using the `cf-terraforming` tool, refer to the following resources: -* [Import Cloudflare resources](/terraform/advanced-topics/import-cloudflare-resources/) -* [`cf-terraforming` GitHub repository](https://github.com/cloudflare/cf-terraforming) +- [Import Cloudflare resources](/terraform/advanced-topics/import-cloudflare-resources/) +- [`cf-terraforming` GitHub repository](https://github.com/cloudflare/cf-terraforming) -*** +--- ## Final remarks -The concept of paranoia level did not exist in the OWASP version (2.x) used in WAF managed rules. Based on the OWASP guide recommendations, the WAF migration process will set the paranoia level of the Cloudflare OWASP Core Ruleset to *PL2*. +The concept of paranoia level did not exist in the OWASP version (2.x) used in WAF managed rules. Based on the OWASP guide recommendations, the WAF migration process will set the paranoia level of the Cloudflare OWASP Core Ruleset to _PL2_. -You cannot disable the new version of WAF Managed Rules using [Page Rules](/rules/page-rules/), since the *Web Application Firewall: Off* setting in Page Rules only applies to the previous version of WAF managed rules. To disable the new WAF Managed Rules you must configure [exceptions](/waf/managed-rules/waf-exceptions/) (also known as skip rules). +You cannot disable the new version of WAF Managed Rules using [Page Rules](/rules/page-rules/), since the _Web Application Firewall: Off_ setting in Page Rules only applies to the previous version of WAF managed rules. To disable the new WAF Managed Rules you must configure [exceptions](/waf/managed-rules/waf-exceptions/) (also known as skip rules). diff --git a/src/content/docs/warp-client/get-started/linux.mdx b/src/content/docs/warp-client/get-started/linux.mdx index 0cd505e287eebe..d7d297c88a57f3 100644 --- a/src/content/docs/warp-client/get-started/linux.mdx +++ b/src/content/docs/warp-client/get-started/linux.mdx @@ -5,22 +5,19 @@ weight: 0 head: - tag: title content: Linux desktop client - --- You have two ways of installing WARP on Linux, depending on the distro you are using: -* Find the latest WARP client in the [package repository](https://pkg.cloudflareclient.com/). -* Install the `cloudflare-warp` package that suits your distro: - * **apt-based OS** (like Ubuntu): `sudo apt install cloudflare-warp`. - * **yum-based OS** (like CentOS or RHEL): `sudo yum install cloudflare-warp`. +- Find the latest WARP client in the [package repository](https://pkg.cloudflareclient.com/). +- Install the `cloudflare-warp` package that suits your distro: + - **apt-based OS** (like Ubuntu): `sudo apt install cloudflare-warp`. + - **yum-based OS** (like CentOS or RHEL): `sudo yum install cloudflare-warp`. :::note - If you get an error message when trying to install via the terminal, download the package that suits your distro from the [package repository](https://pkg.cloudflareclient.com/). - ::: ## Using WARP @@ -39,23 +36,23 @@ To connect for the very first time: You can use `warp-cli mode --help` to get a list of the modes to switch between. For example: -* **DNS only mode via DoH:** `warp-cli mode doh`. -* **WARP with DoH:** `warp-cli mode warp+doh`. +- **DNS only mode via DoH:** `warp-cli mode doh`. +- **WARP with DoH:** `warp-cli mode warp+doh`. ### Using 1.1.1.1 for Families The Linux client supports all 1.1.1.1 for Families modes, in either WARP on DNS-only mode: -* **Families mode off:** `warp-cli dns families off` -* **Malware protection:** `warp-cli dns families malware` -* **Malware and adult content:** `warp-cli dns families full` +- **Families mode off:** `warp-cli dns families off` +- **Malware protection:** `warp-cli dns families malware` +- **Malware and adult content:** `warp-cli dns families full` ### Additional commands A complete list of all supported commands can be found by running: ```sh -$ warp-cli --help +warp-cli --help ``` ## Feedback diff --git a/src/content/docs/warp-client/get-started/macOS.mdx b/src/content/docs/warp-client/get-started/macOS.mdx index 12942f96f6cea5..5020346ae99ed3 100644 --- a/src/content/docs/warp-client/get-started/macOS.mdx +++ b/src/content/docs/warp-client/get-started/macOS.mdx @@ -5,10 +5,9 @@ weight: 0 head: - tag: title content: macOS desktop client - --- -import { Render } from "~/components" +import { Render } from "~/components"; 1. Download Cloudflare WARP for macOS from [Microsoft App Center](https://install.appcenter.ms/orgs/cloudflare/apps/1.1.1.1-macos-1/distribution_groups/release) or [1.1.1.1](https://1.1.1.1/). 2. Go to your predefined download folder and open the `.pkg` file. @@ -30,29 +29,27 @@ This is the main GUI application that you interact with. You can find it in`/App This is the daemon service responsible for establishing the wireguard tunnel and all interaction between our service endpoint and the Cloudflare WARP application. Here is where you can find: -* **Service**: `/Applications/Cloudflare WARP.app/Contents/Resources/CloudflareWARP` -* **Definition**: `/Library/LaunchDaemons/com.cloudflare.1dot1dot1dot1.macos.warp.daemon.plist` +- **Service**: `/Applications/Cloudflare WARP.app/Contents/Resources/CloudflareWARP` +- **Definition**: `/Library/LaunchDaemons/com.cloudflare.1dot1dot1dot1.macos.warp.daemon.plist` ### Log files The macOS application places log files in two locations based on what part of the app is logging information. These logs are included with a feedback submission, when you select the checkbox in **Feedback** > **Share debug information**. -* **Daemon and install logs**: `/Library/Application Support/Cloudflare`. -* **Application GUI logs**: `/Users//Library/Logs/Cloudflare`. +- **Daemon and install logs**: `/Library/Application Support/Cloudflare`. +- **Application GUI logs**: `/Users//Library/Logs/Cloudflare`. ## How to remove the application We include an uninstall script as part of the macOS package you install. Type the following in a terminal window to uninstall WARP: ```sh -$ cd /Applications/Cloudflare\ WARP.app/Contents/Resources -$ ./uninstall.sh +cd /Applications/Cloudflare\ WARP.app/Contents/Resources +./uninstall.sh ``` :::note[Note] - You may be prompted to provide your credentials while removing the application. - ::: diff --git a/src/content/docs/web3/ethereum-gateway/reference/supported-api-methods.mdx b/src/content/docs/web3/ethereum-gateway/reference/supported-api-methods.mdx index acb163f02527c0..8b3e9e9c6bf670 100644 --- a/src/content/docs/web3/ethereum-gateway/reference/supported-api-methods.mdx +++ b/src/content/docs/web3/ethereum-gateway/reference/supported-api-methods.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Supported API methods - Ethereum Gateway - --- The full list of API methods that are supported by an Ethereum Gateway @@ -15,59 +14,59 @@ supported. For a full list of JSON-RPC API methods, refer to the [JSON-RPC specification](https://github.com/ethereum/execution-apis). -| JSON-RPC method | Cloudflare Ethereum Gateway support | -| ------------------------------------------------------------------------------------------------------------------------------------------ | :---------------------------------: | -| [web3\_clientVersion](https://ethereum.org/en/developers/docs/apis/json-rpc/#web3_clientversion) | ✅ | -| [web3\_sha3](https://ethereum.org/en/developers/docs/apis/json-rpc/#web3_sha3) | ✅ | -| [net\_version](https://ethereum.org/en/developers/docs/apis/json-rpc/#net_version) | ✅ | -| [net\_listening](https://ethereum.org/en/developers/docs/apis/json-rpc/#net_listening) | ✅ | -| [eth\_syncing](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_syncing) | ✅ | -| [eth\_mining](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_mining) | ✅ | -| [eth\_gasPrice](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gasprice) | ✅ | -| [eth\_feeHistory](https://github.com/ethereum/execution-apis)[^2] | ✅ | -| [eth\_blockNumber](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_blocknumber) | ✅ | -| [eth\_chainId](https://github.com/ethereum/execution-apis) | ✅ | -| [eth\_getBalance](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getbalance) | ✅ | -| [eth\_getStorageAt](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getstorageat) | ✅ | -| [eth\_getTransactionCount](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactioncount) | ✅ | -| [eth\_getBlockTransactionCountByHash](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblocktransactioncountbyhash) | ✅ | -| [eth\_getBlockTransactionCountByNumber](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblocktransactioncountbynumber) | ✅ | -| [eth\_getUncleCountByBlockHash](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getunclecountbyblockhash) | ✅ | -| [eth\_getUncleCountByBlockNumber](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getunclecountbyblocknumber) | ✅ | -| [eth\_getCode](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getcode) | ✅ | -| [eth\_sendRawTransaction](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_sendrawtransaction) | ✅ | -| [eth\_call](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_call) | ✅ | -| [eth\_estimateGas](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_estimategas) | ✅ | -| [eth\_getBlockByHash](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbyhash) | ✅ | -| [eth\_getBlockByNumber](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbynumber) | ✅ | -| [eth\_getTransactionByHash](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionbyhash) | ✅ | -| [eth\_getTransactionByBlockHashAndIndex](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionbyblockhashandindex) | ✅ | -| [eth\_getTransactionByBlockNumberAndIndex](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionbyblocknumberandindex) | ✅ | -| [eth\_getTransactionReceipt](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionreceipt) | ✅ | -| [eth\_getUncleByBlockHashAndIndex](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getunclebyblockhashandindex) | ✅ | -| [eth\_getUncleByBlockNumberAndIndex](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getunclebyblocknumberandindex) | ✅ | -| [eth\_getLogs](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getlogs)[^1] | ✅ | -| [eth\_getWork](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getwork) | ✅ | -| [eth\_getProof](https://ethereum.github.io/execution-apis/api-documentation/) | ✅ | -| [net\_peerCount](https://ethereum.org/en/developers/docs/apis/json-rpc/#net_peercount) | ❌ | -| [eth\_protocolVersion](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_protocolversion) | ❌ | -| [eth\_coinbase](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_coinbase) | ❌ | -| [eth\_hashrate](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_hashrate) | ❌ | -| [eth\_accounts](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_accounts) | ❌ | -| [eth\_sign](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_sign) | ❌ | -| [eth\_sendTransaction](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_sendtransaction) | ❌ | -| [eth\_getCompilers](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getcompilers) | ❌ | -| [eth\_compileLLL](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_compilelll) | ❌ | -| [eth\_compileSolidity](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_compile_solidity) | ❌ | -| [eth\_compileSerpent](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_compileserpent) | ❌ | -| [eth\_newFilter](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_newfilter) | ❌ | -| [eth\_newBlockFilter](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_newblockfilter) | ❌ | -| [eth\_newPendingTransactionFilter](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_newpendingtransactionfilter) | ❌ | -| [eth\_uninstallFilter](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_uninstallfilter) | ❌ | -| [eth\_getFilterChanges](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getfilterchanges) | ❌ | -| [eth\_getFilterLogs](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getfilterlogs) | ❌ | -| [eth\_submitWork](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_submitwork) | ❌ | -| [eth\_submitHashrate](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_submithashrate) | ❌ | +| JSON-RPC method | Cloudflare Ethereum Gateway support | +| ----------------------------------------------------------------------------------------------------------------------------------------- | :---------------------------------: | +| [web3_clientVersion](https://ethereum.org/en/developers/docs/apis/json-rpc/#web3_clientversion) | ✅ | +| [web3_sha3](https://ethereum.org/en/developers/docs/apis/json-rpc/#web3_sha3) | ✅ | +| [net_version](https://ethereum.org/en/developers/docs/apis/json-rpc/#net_version) | ✅ | +| [net_listening](https://ethereum.org/en/developers/docs/apis/json-rpc/#net_listening) | ✅ | +| [eth_syncing](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_syncing) | ✅ | +| [eth_mining](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_mining) | ✅ | +| [eth_gasPrice](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gasprice) | ✅ | +| [eth_feeHistory](https://github.com/ethereum/execution-apis)[^2] | ✅ | +| [eth_blockNumber](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_blocknumber) | ✅ | +| [eth_chainId](https://github.com/ethereum/execution-apis) | ✅ | +| [eth_getBalance](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getbalance) | ✅ | +| [eth_getStorageAt](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getstorageat) | ✅ | +| [eth_getTransactionCount](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactioncount) | ✅ | +| [eth_getBlockTransactionCountByHash](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblocktransactioncountbyhash) | ✅ | +| [eth_getBlockTransactionCountByNumber](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblocktransactioncountbynumber) | ✅ | +| [eth_getUncleCountByBlockHash](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getunclecountbyblockhash) | ✅ | +| [eth_getUncleCountByBlockNumber](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getunclecountbyblocknumber) | ✅ | +| [eth_getCode](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getcode) | ✅ | +| [eth_sendRawTransaction](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_sendrawtransaction) | ✅ | +| [eth_call](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_call) | ✅ | +| [eth_estimateGas](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_estimategas) | ✅ | +| [eth_getBlockByHash](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbyhash) | ✅ | +| [eth_getBlockByNumber](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbynumber) | ✅ | +| [eth_getTransactionByHash](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionbyhash) | ✅ | +| [eth_getTransactionByBlockHashAndIndex](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionbyblockhashandindex) | ✅ | +| [eth_getTransactionByBlockNumberAndIndex](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionbyblocknumberandindex) | ✅ | +| [eth_getTransactionReceipt](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionreceipt) | ✅ | +| [eth_getUncleByBlockHashAndIndex](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getunclebyblockhashandindex) | ✅ | +| [eth_getUncleByBlockNumberAndIndex](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getunclebyblocknumberandindex) | ✅ | +| [eth_getLogs](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getlogs)[^1] | ✅ | +| [eth_getWork](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getwork) | ✅ | +| [eth_getProof](https://ethereum.github.io/execution-apis/api-documentation/) | ✅ | +| [net_peerCount](https://ethereum.org/en/developers/docs/apis/json-rpc/#net_peercount) | ❌ | +| [eth_protocolVersion](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_protocolversion) | ❌ | +| [eth_coinbase](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_coinbase) | ❌ | +| [eth_hashrate](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_hashrate) | ❌ | +| [eth_accounts](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_accounts) | ❌ | +| [eth_sign](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_sign) | ❌ | +| [eth_sendTransaction](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_sendtransaction) | ❌ | +| [eth_getCompilers](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getcompilers) | ❌ | +| [eth_compileLLL](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_compilelll) | ❌ | +| [eth_compileSolidity](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_compile_solidity) | ❌ | +| [eth_compileSerpent](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_compileserpent) | ❌ | +| [eth_newFilter](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_newfilter) | ❌ | +| [eth_newBlockFilter](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_newblockfilter) | ❌ | +| [eth_newPendingTransactionFilter](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_newpendingtransactionfilter) | ❌ | +| [eth_uninstallFilter](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_uninstallfilter) | ❌ | +| [eth_getFilterChanges](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getfilterchanges) | ❌ | +| [eth_getFilterLogs](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getfilterlogs) | ❌ | +| [eth_submitWork](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_submitwork) | ❌ | +| [eth_submitHashrate](https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_submithashrate) | ❌ | [^1]: **Limitations**: Max block range of 800 blocks. @@ -77,18 +76,18 @@ For a full list of JSON-RPC API methods, refer to the [JSON-RPC specification](h EVM traces are a way to track the execution of smart contracts on the Ethereum blockchain. It records all the steps taken by the Ethereum Virtual Machine (EVM) as it runs the smart contract. This includes information like the specific operation that was executed, how much gas it cost, and any changes made to the blockchain as a result. The trace module is a tool that allows developers to access and analyze these traces, which can be useful for debugging, testing, and monitoring smart contracts. It can be used to identify and fix errors, optimize performance, and gain insight into how the smart contract is interacting with the blockchain. -### trace\_filter +### trace_filter The `trace_filter` method retrieves the traces of multiple transactions in a single request. This method is particularly useful for debugging and monitoring specific addresses on the Ethereum blockchain. #### Request Parameters -* `fromBlock`: `Quantity` or `Tag` - (optional) The block number to start receiving traces from. -* `toBlock`: `Quantity` or `Tag` - (optional) The block number to stop receiving traces at. -* `fromAddress`: `Array` - (optional) An array of addresses to start receiving traces from. -* `toAddress`: `Address` - (optional) An array of addresses to stop retrieving traces at. -* `after`: `Quantity` - (optional) The offset trace number -* `count`: `Quantity` - (optional) The amount of traces to return. +- `fromBlock`: `Quantity` or `Tag` - (optional) The block number to start receiving traces from. +- `toBlock`: `Quantity` or `Tag` - (optional) The block number to stop receiving traces at. +- `fromAddress`: `Array` - (optional) An array of addresses to start receiving traces from. +- `toAddress`: `Address` - (optional) An array of addresses to stop retrieving traces at. +- `after`: `Quantity` - (optional) The offset trace number +- `count`: `Quantity` - (optional) The amount of traces to return. #### Returns @@ -97,7 +96,7 @@ This method returns an `Array` of traces matching the given filter. #### Example ```sh title="trace_filter Request" {1} -$ curl https://web3-trial.cloudflare-eth.com/v1/mainnet \ +curl https://web3-trial.cloudflare-eth.com/v1/mainnet \ -X POST \ -H 'Content-Type: application/json' \ --data '{ @@ -121,31 +120,31 @@ $ curl https://web3-trial.cloudflare-eth.com/v1/mainnet \ ```json { - "jsonrpc": "2.0", - "result": [ - { - "action": { - "from": "0xedc763b3e418cd14767b3be02b667619a6374076", - "callType": "call", - "gas": "0x8462", - "input": "0x095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - "to": "0x7ff4169a6b5122b664c51c95727d87750ec07c84", - "value": "0x0" - }, - "blockHash": "0x351e7c06ec010c8f7e7358eb580238dd23e1e129be96822aa93ebb6da08558e6", - "blockNumber": 13416771, - "result": { - "gasUsed": "0x6009", - "output": "0x0000000000000000000000000000000000000000000000000000000000000001" - }, - "subtraces": 0, - "traceAddress": [], - "transactionHash": "0x054bbb9fbb855bf23f755e548c7409f45fc5eff8a824b2ad06380bc038d7b049", - "transactionPosition": 54, - "type": "call" - } - ], - "id": 1 + "jsonrpc": "2.0", + "result": [ + { + "action": { + "from": "0xedc763b3e418cd14767b3be02b667619a6374076", + "callType": "call", + "gas": "0x8462", + "input": "0x095ea7b30000000000000000000000007a250d5630b4cf539739df2c5dacb4c659f2488dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "to": "0x7ff4169a6b5122b664c51c95727d87750ec07c84", + "value": "0x0" + }, + "blockHash": "0x351e7c06ec010c8f7e7358eb580238dd23e1e129be96822aa93ebb6da08558e6", + "blockNumber": 13416771, + "result": { + "gasUsed": "0x6009", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + "subtraces": 0, + "traceAddress": [], + "transactionHash": "0x054bbb9fbb855bf23f755e548c7409f45fc5eff8a824b2ad06380bc038d7b049", + "transactionPosition": 54, + "type": "call" + } + ], + "id": 1 } ``` @@ -153,5 +152,5 @@ $ curl https://web3-trial.cloudflare-eth.com/v1/mainnet \ The `trace_filter` method has some limitations to ensure that our nodes are not overloaded. -* The block range for the `trace_filter` method is limited to 800 blocks. -* The trace `count` is limited to 200 +- The block range for the `trace_filter` method is limited to 800 blocks. +- The trace `count` is limited to 200 diff --git a/src/content/docs/web3/how-to/use-ethereum-gateway.mdx b/src/content/docs/web3/how-to/use-ethereum-gateway.mdx index d6f30002cc6174..c0f5b7e9aadef6 100644 --- a/src/content/docs/web3/how-to/use-ethereum-gateway.mdx +++ b/src/content/docs/web3/how-to/use-ethereum-gateway.mdx @@ -3,7 +3,6 @@ pcx_content_type: how-to title: Use Ethereum gateway sidebar: order: 2 - --- Once you have an Ethereum gateway — meaning that you [create a new gateway](/web3/how-to/manage-gateways/#create-a-gateway) with a `target` of **Ethereum** — you can interact with [different Ethereum networks](/web3/ethereum-gateway/reference/supported-networks/) by specifying the correct JSON blob for your query. @@ -13,7 +12,12 @@ Once you have an Ethereum gateway — meaning that you [create a new gateway](/w The Cloudflare Ethereum Gateway allows HTTP requests where the body of the request is set to be the JSON body of the request you would like to make. For example, if you would like to read the block that is at number `0x2244`, then your JSON blob takes the form: ```json -{ "jsonrpc": "2.0", "method": "eth_getBlockByNumber", "params": ["0x2244", true], "id": 1 } +{ + "jsonrpc": "2.0", + "method": "eth_getBlockByNumber", + "params": ["0x2244", true], + "id": 1 +} ``` Each blob use a valid [`method` parameter](/web3/ethereum-gateway/reference/supported-api-methods/). The `params` array here contains the block number that we would like to locate and a boolean expressing whether each individual transaction in the block should be shown in their entirety (`true`) or as stubs (`false`). @@ -21,27 +25,27 @@ Each blob use a valid [`method` parameter](/web3/ethereum-gateway/reference/supp To send this query to your [custom Ethereum Gateway](/web3/how-to/manage-gateways/), you could use a cURL command: ```sh -$ curl https://web3-trial.cloudflare-eth.com/v1/mainnet -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x2244", true],"id":1}' +curl https://web3-trial.cloudflare-eth.com/v1/mainnet -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x2244", true],"id":1}' ``` You can also write the same query using the JS Fetch API: ```js await fetch( - new Request('https://web3-trial.cloudflare-eth.com/v1/mainnet', { - method: 'POST', - body: JSON.stringify({ - jsonrpc: '2.0', - method: 'eth_getBlockByNumber', - params: ['0x2244', true], - id: 1, - }), - headers: { - 'Content-Type': 'application/json', - }, - }) -).then(resp => { - return resp.json(); + new Request("https://web3-trial.cloudflare-eth.com/v1/mainnet", { + method: "POST", + body: JSON.stringify({ + jsonrpc: "2.0", + method: "eth_getBlockByNumber", + params: ["0x2244", true], + id: 1, + }), + headers: { + "Content-Type": "application/json", + }, + }), +).then((resp) => { + return resp.json(); }); ``` @@ -49,30 +53,30 @@ The response in both cases will be a JSON blob of the form: ```json { - "jsonrpc": "2.0", - "id": 1, - "result": { - "difficulty": "0x746ef15b66", - "extraData": "0x476574682f76312e302e302f6c696e75782f676f312e342e32", - "gasLimit": "0x1388", - "gasUsed": "0x0", - "hash": "0xd6bb42034740c5d728e774e43a01f26222e0fcc279c504ca5963dc34fe70f392", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0xf927a40c8b7f6e07c5af7fa2155b4864a4112b13", - "mixHash": "0x975da446e302e6da6cedb3fbaa763c3c203ae88d6fab4924e2a3d34a568c4361", - "nonce": "0x88a7f12f49151c83", - "number": "0x2244", - "parentHash": "0x067fd84ecdbc7491bf5ec7d5d4ead361b1f590eec74797a7f90b4a7d7004a48d", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x21b", - "stateRoot": "0x828dade2067283e370993ec6a1bda0e65c1310e404a6d5bbb030b596eb80017c", - "timestamp": "0x55bb040f", - "totalDifficulty": "0x5c328da43525d", - "transactions": [], - "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "uncles": [] - } + "jsonrpc": "2.0", + "id": 1, + "result": { + "difficulty": "0x746ef15b66", + "extraData": "0x476574682f76312e302e302f6c696e75782f676f312e342e32", + "gasLimit": "0x1388", + "gasUsed": "0x0", + "hash": "0xd6bb42034740c5d728e774e43a01f26222e0fcc279c504ca5963dc34fe70f392", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0xf927a40c8b7f6e07c5af7fa2155b4864a4112b13", + "mixHash": "0x975da446e302e6da6cedb3fbaa763c3c203ae88d6fab4924e2a3d34a568c4361", + "nonce": "0x88a7f12f49151c83", + "number": "0x2244", + "parentHash": "0x067fd84ecdbc7491bf5ec7d5d4ead361b1f590eec74797a7f90b4a7d7004a48d", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x21b", + "stateRoot": "0x828dade2067283e370993ec6a1bda0e65c1310e404a6d5bbb030b596eb80017c", + "timestamp": "0x55bb040f", + "totalDifficulty": "0x5c328da43525d", + "transactions": [], + "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "uncles": [] + } } ``` @@ -89,30 +93,30 @@ Once you have a wallet set up and a method of signing your own transactions, you Then you can use your [custom Gateway](/web3/how-to/manage-gateways/) to send the transaction to the network with a cURL command: ```sh -$ curl https://web3-trial.cloudflare-eth.com/v1/mainnet -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675"],"id":1}' +curl https://web3-trial.cloudflare-eth.com/v1/mainnet -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","method":"eth_sendRawTransaction","params":["0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675"],"id":1}' ``` You could also use a JS Fetch API request: ```js await fetch( - new Request('https://web3-trial.cloudflare-eth.com/v1/mainnet', { - method: 'POST', - body: JSON.stringify({ - jsonrpc: '2.0', - method: 'eth_sendRawTransaction', - params: [ - '0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675' - ], - id: 1, - }), - headers: { - 'Content-Type': 'application/json', - }, - }) -).then(resp => { - return resp.json(); + new Request("https://web3-trial.cloudflare-eth.com/v1/mainnet", { + method: "POST", + body: JSON.stringify({ + jsonrpc: "2.0", + method: "eth_sendRawTransaction", + params: [ + "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675", + ], + id: 1, + }), + headers: { + "Content-Type": "application/json", + }, + }), +).then((resp) => { + return resp.json(); }); ``` -*(The actual command above will not work — you need to provide your own signed transaction.)* +_(The actual command above will not work — you need to provide your own signed transaction.)_ diff --git a/src/content/docs/workers-ai/function-calling/embedded/examples/kv.mdx b/src/content/docs/workers-ai/function-calling/embedded/examples/kv.mdx index f45481e581243e..894102287d8fbe 100644 --- a/src/content/docs/workers-ai/function-calling/embedded/examples/kv.mdx +++ b/src/content/docs/workers-ai/function-calling/embedded/examples/kv.mdx @@ -7,7 +7,6 @@ tags: - AI sidebar: order: 6 - --- Interact with persistent storage to retrieve or store information enables for powerful use cases. @@ -23,7 +22,7 @@ Importantly, your `wrangler.toml` file must be updated to include the `KV` bindi ## Worker code ```ts title="Embedded function calling example with KV API" -import { runWithTools } from '@cloudflare/ai-utils'; +import { runWithTools } from "@cloudflare/ai-utils"; type Env = { AI: Ai; @@ -33,43 +32,52 @@ type Env = { export default { async fetch(request, env, ctx) { // Define function - const updateKvValue = async ({ key, value }: { key: string; value: string }) => { + const updateKvValue = async ({ + key, + value, + }: { + key: string; + value: string; + }) => { const response = await env.KV.put(key, value); return `Successfully updated key-value pair in database: ${response}`; }; // Run AI inference with function calling - const response = await runWithTools(env.AI, '@hf/nousresearch/hermes-2-pro-mistral-7b', { - messages: [ - { role: 'system', content: 'Put user given values in KV' }, - { role: 'user', content: 'Set the value of banana to yellow.' }, - ], - tools: [ - { - name: 'KV update', - description: 'Update a key-value pair in the database', - parameters: { - type: 'object', - properties: { - key: { - type: 'string', - description: 'The key to update', - }, - value: { - type: 'string', - description: 'The value to update', + const response = await runWithTools( + env.AI, + "@hf/nousresearch/hermes-2-pro-mistral-7b", + { + messages: [ + { role: "system", content: "Put user given values in KV" }, + { role: "user", content: "Set the value of banana to yellow." }, + ], + tools: [ + { + name: "KV update", + description: "Update a key-value pair in the database", + parameters: { + type: "object", + properties: { + key: { + type: "string", + description: "The key to update", + }, + value: { + type: "string", + description: "The value to update", + }, }, + required: ["key", "value"], }, - required: ['key', 'value'], + function: updateKvValue, }, - function: updateKvValue, - }, - ], - }); + ], + }, + ); return new Response(JSON.stringify(response)); }, } satisfies ExportedHandler; - ``` ## Verify results @@ -77,5 +85,5 @@ export default { To verify the results, run the following command ```sh -$ npx wrangler kv:key get banana --binding KV --local +npx wrangler kv:key get banana --binding KV --local ``` diff --git a/src/content/docs/workers-ai/function-calling/embedded/get-started.mdx b/src/content/docs/workers-ai/function-calling/embedded/get-started.mdx index 35ab5e6faa6bd2..02f1d651c9daf2 100644 --- a/src/content/docs/workers-ai/function-calling/embedded/get-started.mdx +++ b/src/content/docs/workers-ai/function-calling/embedded/get-started.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Get Started - --- This guide will instruct you through setting up and deploying your first Workers AI project with embedded function calling. You will use Workers, a Workers AI binding, the [`ai-utils package`](https://github.com/cloudflare/ai-utils), and a large language model (LLM) to deploy your first AI-powered application on the Cloudflare global network with embedded function calling. @@ -20,7 +19,7 @@ Follow the [Workers AI Get Started Guide](/workers-ai/get-started/workers-wrangl Next, run the following command in your project repository to install the Worker AI utilities package. ```sh -$ npm install @cloudflare/ai-utils --save +npm install @cloudflare/ai-utils --save ``` ## 3. Add Workers AI Embedded function calling @@ -28,7 +27,7 @@ $ npm install @cloudflare/ai-utils --save Update the `index.ts` file in your application directory with the following code: ```ts title="Embedded function calling example" -import { runWithTools } from '@cloudflare/ai-utils'; +import { runWithTools } from "@cloudflare/ai-utils"; type Env = { AI: Ai; @@ -45,33 +44,33 @@ export default { const response = await runWithTools( env.AI, // Model with function calling support - '@hf/nousresearch/hermes-2-pro-mistral-7b', + "@hf/nousresearch/hermes-2-pro-mistral-7b", { // Messages messages: [ { - role: 'user', - content: 'What the result of 123123123 + 10343030?', + role: "user", + content: "What the result of 123123123 + 10343030?", }, ], // Definition of available tools the AI model can leverage tools: [ { - name: 'sum', - description: 'Sum up two numbers and returns the result', + name: "sum", + description: "Sum up two numbers and returns the result", parameters: { - type: 'object', + type: "object", properties: { - a: { type: 'number', description: 'the first number' }, - b: { type: 'number', description: 'the second number' }, + a: { type: "number", description: "the first number" }, + b: { type: "number", description: "the second number" }, }, - required: ['a', 'b'], + required: ["a", "b"], }, // reference to previously defined function function: sum, }, ], - } + }, ); return new Response(JSON.stringify(response)); }, @@ -104,11 +103,9 @@ Follow steps 4 and 5 of the [Workers AI Get Started Guide](/workers-ai/get-start :::note[Workers AI Embedded Function Calling charges] - Embedded function calling runs Workers AI inference requests. Standard charges for inference (e.g. tokens) usage will be charged. Resources consumed (e.g. CPU time) during embedded functions' code execution will be charged just as any other Worker's code execution. - ::: ## API reference diff --git a/src/content/docs/workers-ai/function-calling/index.mdx b/src/content/docs/workers-ai/function-calling/index.mdx index bc0053a1734cac..0be6af95ba1cf3 100644 --- a/src/content/docs/workers-ai/function-calling/index.mdx +++ b/src/content/docs/workers-ai/function-calling/index.mdx @@ -5,10 +5,9 @@ sidebar: order: 5 badge: text: Beta - --- -import { Stream, TabItem, Tabs } from "~/components" +import { Stream, TabItem, Tabs } from "~/components"; Function calling enables people to take Large Language Models (LLMs) and use the model response to execute functions or interact with external APIs. The developer usually defines a set of functions and the required input schema for each function, which we call `tools`. The model then intelligently understands when it needs to do a tool call, and it returns a JSON output which the user needs to feed to another function or API. @@ -28,52 +27,52 @@ To show you the value of embedded function calling, take a look at the example b ```sh # The ai-utils package enables embedded function calling -$ npm i @cloudflare/ai-utils +npm i @cloudflare/ai-utils ``` ```js title="Embedded function calling example" import { - createToolsFromOpenAPISpec, - runWithTools, - autoTrimTools, + createToolsFromOpenAPISpec, + runWithTools, + autoTrimTools, } from "@cloudflare/ai-utils"; export default { - async fetch(request, env, ctx) { - const response = await runWithTools( - env.AI, - "@hf/nousresearch/hermes-2-pro-mistral-7b", - { - messages: [{ role: "user", content: "Who is Cloudflare on github?" }], - tools: [ - // You can pass the OpenAPI spec link or contents directly - ...(await createToolsFromOpenAPISpec( - "https://gist.githubusercontent.com/mchenco/fd8f20c8f06d50af40b94b0671273dc1/raw/f9d4b5cd5944cc32d6b34cad0406d96fd3acaca6/partial_api.github.com.json", - { - overrides: [ - { - // for all requests on *.github.com, we'll need to add a User-Agent. - matcher: ({ url, method }) => { - return url.hostname === "api.github.com"; - }, - values: { - headers: { - "User-Agent": - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", - }, - }, - }, - ], - }, - )), - ], - }, - ).then((response) => { - return response; - }); - - return new Response(JSON.stringify(response)); - }, + async fetch(request, env, ctx) { + const response = await runWithTools( + env.AI, + "@hf/nousresearch/hermes-2-pro-mistral-7b", + { + messages: [{ role: "user", content: "Who is Cloudflare on github?" }], + tools: [ + // You can pass the OpenAPI spec link or contents directly + ...(await createToolsFromOpenAPISpec( + "https://gist.githubusercontent.com/mchenco/fd8f20c8f06d50af40b94b0671273dc1/raw/f9d4b5cd5944cc32d6b34cad0406d96fd3acaca6/partial_api.github.com.json", + { + overrides: [ + { + // for all requests on *.github.com, we'll need to add a User-Agent. + matcher: ({ url, method }) => { + return url.hostname === "api.github.com"; + }, + values: { + headers: { + "User-Agent": + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", + }, + }, + }, + ], + }, + )), + ], + }, + ).then((response) => { + return response; + }); + + return new Response(JSON.stringify(response)); + }, }; ``` @@ -81,90 +80,90 @@ export default { ```js title="Traditional function calling example" export default { - async fetch(request, env, ctx) { - const response = await env.AI.run( - "@hf/nousresearch/hermes-2-pro-mistral-7b", - { - messages: [{ role: "user", content: "Who is Cloudflare on github?" }], - tools: [ - { - name: "getGithubUser", - description: - "Provides publicly available information about someone with a GitHub account.", - parameters: { - type: "object", - properties: { - username: { - type: "string", - description: "The handle for the GitHub user account.", - }, - }, - required: ["username"], - }, - }, - ], - }, - ); - - const selected_tool = response.tool_calls[0]; - let res; - - if (selected_tool.name == "getGithubUser") { - try { - const username = selected_tool.arguments.username; - const url = `https://api.github.com/users/${username}`; - res = await fetch(url, { - headers: { - // Github API requires a User-Agent header - "User-Agent": - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", - }, - }).then((res) => res.json()); - } catch (error) { - return error; - } - } - - const finalResponse = await env.AI.run( - "@hf/nousresearch/hermes-2-pro-mistral-7b", - { - messages: [ - { - role: "user", - content: "Who is Cloudflare on github?", - }, - { - role: "assistant", - content: "", - tool_call: selected_tool.name, - }, - { - role: "tool", - name: selected_tool.name, - content: JSON.stringify(res), - }, - ], - tools: [ - { - name: "getGithubUser", - description: - "Provides publicly available information about someone with a GitHub account.", - parameters: { - type: "object", - properties: { - username: { - type: "string", - description: "The handle for the GitHub user account.", - }, - }, - required: ["username"], - }, - }, - ], - }, - ); - return new Response(JSON.stringify(finalResponse)); - }, + async fetch(request, env, ctx) { + const response = await env.AI.run( + "@hf/nousresearch/hermes-2-pro-mistral-7b", + { + messages: [{ role: "user", content: "Who is Cloudflare on github?" }], + tools: [ + { + name: "getGithubUser", + description: + "Provides publicly available information about someone with a GitHub account.", + parameters: { + type: "object", + properties: { + username: { + type: "string", + description: "The handle for the GitHub user account.", + }, + }, + required: ["username"], + }, + }, + ], + }, + ); + + const selected_tool = response.tool_calls[0]; + let res; + + if (selected_tool.name == "getGithubUser") { + try { + const username = selected_tool.arguments.username; + const url = `https://api.github.com/users/${username}`; + res = await fetch(url, { + headers: { + // Github API requires a User-Agent header + "User-Agent": + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", + }, + }).then((res) => res.json()); + } catch (error) { + return error; + } + } + + const finalResponse = await env.AI.run( + "@hf/nousresearch/hermes-2-pro-mistral-7b", + { + messages: [ + { + role: "user", + content: "Who is Cloudflare on github?", + }, + { + role: "assistant", + content: "", + tool_call: selected_tool.name, + }, + { + role: "tool", + name: selected_tool.name, + content: JSON.stringify(res), + }, + ], + tools: [ + { + name: "getGithubUser", + description: + "Provides publicly available information about someone with a GitHub account.", + parameters: { + type: "object", + properties: { + username: { + type: "string", + description: "The handle for the GitHub user account.", + }, + }, + required: ["username"], + }, + }, + ], + }, + ); + return new Response(JSON.stringify(finalResponse)); + }, }; ``` diff --git a/src/content/docs/workers-ai/get-started/rest-api.mdx b/src/content/docs/workers-ai/get-started/rest-api.mdx index adb3a25b9cae39..2268fd11d306c0 100644 --- a/src/content/docs/workers-ai/get-started/rest-api.mdx +++ b/src/content/docs/workers-ai/get-started/rest-api.mdx @@ -7,7 +7,6 @@ head: - tag: title content: Get started - REST API description: Use the Cloudflare Workers AI REST API to deploy a large language model (LLM). - --- This guide will instruct you through setting up and deploying your first Workers AI project. You will use the Workers AI REST API to experiment with a large language model (LLM). @@ -35,10 +34,8 @@ To get these values: :::note - If you choose to [create an API token](/fundamentals/api/get-started/create-token/) instead of using the template, that token will need permissions for both `Workers AI - Read` and `Workers AI - Edit`. - ::: ## 2. Run a model via API @@ -48,7 +45,7 @@ After creating your API token, authenticate and make requests to the API using y You will use the [Execute AI model](/api/operations/workers-ai-post-run-model) endpoint to run the [`@cf/meta/llama-3.1-8b-instruct`](/workers-ai/models/llama-3.1-8b-instruct/) model: ```bash -$ curl https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/@cf/meta/llama-3.1-8b-instruct \ +curl https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/run/@cf/meta/llama-3.1-8b-instruct \ -H 'Authorization: Bearer {API_TOKEN}' \ -d '{ "prompt": "Where did the phrase Hello World come from" }' ``` @@ -59,12 +56,12 @@ The API response will look like the following: ```json { - "result": { - "response": "Hello, World first appeared in 1974 at Bell Labs when Brian Kernighan included it in the C programming language example. It became widely used as a basic test program due to simplicity and clarity. It represents an inviting greeting from a program to the world." - }, - "success": true, - "errors": [], - "messages": [] + "result": { + "response": "Hello, World first appeared in 1974 at Bell Labs when Brian Kernighan included it in the C programming language example. It became widely used as a basic test program due to simplicity and clarity. It represents an inviting greeting from a program to the world." + }, + "success": true, + "errors": [], + "messages": [] } ``` @@ -74,4 +71,4 @@ By completing this guide, you have created a Cloudflare account (if you did not ## Related resources -* [Models](/workers-ai/models/) - Browse the Workers AI models catalog. +- [Models](/workers-ai/models/) - Browse the Workers AI models catalog. diff --git a/src/content/docs/workers-ai/get-started/workers-wrangler.mdx b/src/content/docs/workers-ai/get-started/workers-wrangler.mdx index 24ab153c202fd0..057f95eed14f9d 100644 --- a/src/content/docs/workers-ai/get-started/workers-wrangler.mdx +++ b/src/content/docs/workers-ai/get-started/workers-wrangler.mdx @@ -7,10 +7,9 @@ head: - tag: title content: Get started - CLI description: Deploy your first Cloudflare Workers AI project using the CLI. - --- -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; This guide will instruct you through setting up and deploying your first Workers AI project. You will use [Workers](/workers/), a Workers AI binding, and a large language model (LLM) to deploy your first AI-powered application on the Cloudflare global network. @@ -22,21 +21,29 @@ You will create a new Worker project using the `create-cloudflare` CLI (C3). [C3 Create a new project named `hello-ai` by running: - + Running `npm create cloudflare@latest` will prompt you to install the [`create-cloudflare` package](https://www.npmjs.com/package/create-cloudflare), and lead you through setup. C3 will also install [Wrangler](/workers/wrangler/), the Cloudflare Developer Platform CLI. - + This will create a new `hello-ai` directory. Your new `hello-ai` directory will include: -* A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) at `src/index.ts`. -* A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. +- A `"Hello World"` [Worker](/workers/get-started/guide/#3-write-code) at `src/index.ts`. +- A [`wrangler.toml`](/workers/wrangler/configuration/) configuration file. Go to your application directory: ```sh -$ cd hello-ai +cd hello-ai ``` ## 2. Connect your Worker to Workers AI @@ -64,20 +71,19 @@ Update the `index.ts` file in your `hello-ai` application directory with the fol ```typescript title="src/index.ts" export interface Env { - // If you set another name in wrangler.toml as the value for 'binding', - // replace "AI" with the variable name you defined. - AI: Ai; + // If you set another name in wrangler.toml as the value for 'binding', + // replace "AI" with the variable name you defined. + AI: Ai; } export default { - async fetch(request, env): Promise { - const response = await env.AI.run('@cf/meta/llama-3.1-8b-instruct', { - prompt: "What is the origin of the phrase Hello, World" - } - ); - - return new Response(JSON.stringify(response)); - }, + async fetch(request, env): Promise { + const response = await env.AI.run("@cf/meta/llama-3.1-8b-instruct", { + prompt: "What is the origin of the phrase Hello, World", + }); + + return new Response(JSON.stringify(response)); + }, } satisfies ExportedHandler; ``` @@ -88,7 +94,7 @@ Up to this point, you have created an AI binding for your Worker and configured While in your project directory, test Workers AI locally by running [`wrangler dev`](/workers/wrangler/commands/#dev): ```sh -$ npx wrangler dev +npx wrangler dev ``` @@ -97,7 +103,7 @@ You will be prompted to log in after you run the `wrangler dev`. When you run `n ```json { - "response":"Ah, a most excellent question, my dear human friend! *adjusts glasses*\n\nThe origin of the phrase \"Hello, World\" is a fascinating tale that spans several decades and multiple disciplines. It all began in the early days of computer programming, when a young man named Brian Kernighan was tasked with writing a simple program to demonstrate the basics of a new programming language called C.\nKernighan, a renowned computer scientist and author, was working at Bell Labs in the late 1970s when he created the program. He wanted to showcase the language's simplicity and versatility, so he wrote a basic \"Hello, World!\" program that printed the familiar greeting to the console.\nThe program was included in Kernighan and Ritchie's influential book \"The C Programming Language,\" published in 1978. The book became a standard reference for C programmers, and the \"Hello, World!\" program became a sort of \"Hello, World!\" for the programming community.\nOver time, the phrase \"Hello, World!\" became a shorthand for any simple program that demonstrated the basics" + "response": "Ah, a most excellent question, my dear human friend! *adjusts glasses*\n\nThe origin of the phrase \"Hello, World\" is a fascinating tale that spans several decades and multiple disciplines. It all began in the early days of computer programming, when a young man named Brian Kernighan was tasked with writing a simple program to demonstrate the basics of a new programming language called C.\nKernighan, a renowned computer scientist and author, was working at Bell Labs in the late 1970s when he created the program. He wanted to showcase the language's simplicity and versatility, so he wrote a basic \"Hello, World!\" program that printed the familiar greeting to the console.\nThe program was included in Kernighan and Ritchie's influential book \"The C Programming Language,\" published in 1978. The book became a standard reference for C programmers, and the \"Hello, World!\" program became a sort of \"Hello, World!\" for the programming community.\nOver time, the phrase \"Hello, World!\" became a shorthand for any simple program that demonstrated the basics" } ``` @@ -106,7 +112,7 @@ You will be prompted to log in after you run the `wrangler dev`. When you run `n Before deploying your AI Worker globally, log in with your Cloudflare account by running: ```sh -$ npx wrangler login +npx wrangler login ``` You will be directed to a web page asking you to log in to the Cloudflare dashboard. After you have logged in, you will be asked if Wrangler can make changes to your Cloudflare account. Scroll down and select **Allow** to continue. @@ -114,8 +120,11 @@ You will be directed to a web page asking you to log in to the Cloudflare dashbo Finally, deploy your Worker to make your project accessible on the Internet. To deploy your Worker, run: ```sh -$ npx wrangler deploy -# Outputs: https://hello-ai..workers.dev +npx wrangler deploy +``` + +```sh output +https://hello-ai..workers.dev ``` Your Worker will be deployed to your custom [`workers.dev`](/workers/configuration/routing/workers-dev/) subdomain. You can now visit the URL to run your AI Worker. @@ -124,5 +133,5 @@ By finishing this tutorial, you have created a Worker, connected it to Workers A ## Related resources -* [Cloudflare Developers community on Discord](https://discord.cloudflare.com) - Submit feature requests, report bugs, and share your feedback directly with the Cloudflare team by joining the Cloudflare Discord server. -* [Models](/workers-ai/models/) - Browse the Workers AI models catalog. +- [Cloudflare Developers community on Discord](https://discord.cloudflare.com) - Submit feature requests, report bugs, and share your feedback directly with the Cloudflare team by joining the Cloudflare Discord server. +- [Models](/workers-ai/models/) - Browse the Workers AI models catalog. diff --git a/src/content/docs/workers-ai/tutorials/build-a-retrieval-augmented-generation-ai.mdx b/src/content/docs/workers-ai/tutorials/build-a-retrieval-augmented-generation-ai.mdx index cf27455aed087d..f120c0582baca2 100644 --- a/src/content/docs/workers-ai/tutorials/build-a-retrieval-augmented-generation-ai.mdx +++ b/src/content/docs/workers-ai/tutorials/build-a-retrieval-augmented-generation-ai.mdx @@ -14,12 +14,9 @@ languages: - JavaScript sidebar: order: 2 - --- - - -import { Details, Render, PackageManagers } from "~/components" +import { Details, Render, PackageManagers } from "~/components"; This guide will instruct you through setting up and deploying your first application with Cloudflare AI. You will build a fully-featured AI-powered application, using tools like Workers AI, Vectorize, D1, and Cloudflare Workers. @@ -35,13 +32,24 @@ C3 (`create-cloudflare-cli`) is a command-line tool designed to help you setup a Open a terminal window and run C3 to create your Worker project: - - - + + + In your project directory, C3 has generated several files. -
1. `wrangler.toml`: Your [Wrangler](/workers/wrangler/configuration/#sample-wranglertoml-configuration) configuration file. @@ -50,13 +58,12 @@ In your project directory, C3 has generated several files. 4. `package-lock.json`: Refer to [`npm` documentation on `package-lock.json`](https://docs.npmjs.com/cli/v9/configuring-npm/package-lock-json). 5. `node_modules`: Refer to [`npm` documentation `node_modules`](https://docs.npmjs.com/cli/v7/configuring-npm/folders#node-modules). -
Now, move into your newly created directory: ```sh -$ cd rag-ai-tutorial +cd rag-ai-tutorial ``` ## 2. Develop with Wrangler CLI @@ -66,17 +73,15 @@ The Workers command-line interface, [Wrangler](/workers/wrangler/install-and-upd After you have created your first Worker, run the [`wrangler dev`](/workers/wrangler/commands/#dev) command in the project directory to start a local server for developing your Worker. This will allow you to test your Worker locally during development. ```sh -$ npx wrangler dev --remote +npx wrangler dev --remote ``` :::note - If you have not used Wrangler before, it will try to open your web browser to login with your Cloudflare account. If you have issues with this step or you do not have access to a browser interface, refer to the [`wrangler login`](/workers/wrangler/commands/#login) documentation for more information. - ::: You will now be able to go to [http://localhost:8787](http://localhost:8787) to see your Worker running. Any changes you make to your code will trigger a rebuild, and reloading the page will show you the up-to-date output of your Worker. @@ -97,18 +102,13 @@ Now, find the `src/index.js` file. Inside the `fetch` handler, you can query the ```js export default { async fetch(request, env, ctx) { - const answer = await env.AI.run( - '@cf/meta/llama-3-8b-instruct', - { - messages: [ - { role: 'user', content: `What is the square root of 9?` } - ] - } - ) - - return new Response(JSON.stringify(answer)) - } -} + const answer = await env.AI.run("@cf/meta/llama-3-8b-instruct", { + messages: [{ role: "user", content: `What is the square root of 9?` }], + }); + + return new Response(JSON.stringify(answer)); + }, +}; ``` By querying the LLM binding, we can interact directly with the Cloudflare AI large language model directly in our code. @@ -116,13 +116,16 @@ By querying the LLM binding, we can interact directly with the Cloudflare AI lar You can deploy your Worker using `wrangler`: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` Making a request to your Worker will now return a response from the LLM binding. ```sh -$ curl https://example.username.workers.dev +curl https://example.username.workers.dev +``` + +```sh output {"response":"Answer: The square root of 9 is 3."} ``` @@ -133,7 +136,7 @@ Embeddings allow you to add additional capabilities to the language models you c To begin using Vectorize, create a new embeddings index using `wrangler`. This index will store vectors with 768 dimensions, and will use cosine similarity to determine which vectors are most similar to each other: ```sh -$ npx wrangler vectorize create vector-index --dimensions=768 --metric=cosine +npx wrangler vectorize create vector-index --dimensions=768 --metric=cosine ``` Then, add the configuration details for your new Vectorize index to `wrangler.toml`: @@ -153,7 +156,7 @@ To implement the searching feature, you must set up a D1 database from Cloudflar Create a new D1 database using `wrangler`: ```sh -$ npx wrangler d1 create database +npx wrangler d1 create database ``` Then, paste the configuration details output from the previous command into `wrangler.toml`: @@ -170,13 +173,13 @@ database_id = "abc-def-geh" # replace this with a real database_id (UUID) In this application, we'll create a `notes` table in D1, which will allow us to store notes and later retrieve them in Vectorize. To create this table, run a SQL command using `wrangler d1 execute`: ```sh -$ npx wrangler d1 execute database --remote --command "CREATE TABLE IF NOT EXISTS notes (id INTEGER PRIMARY KEY, text TEXT NOT NULL)" +npx wrangler d1 execute database --remote --command "CREATE TABLE IF NOT EXISTS notes (id INTEGER PRIMARY KEY, text TEXT NOT NULL)" ``` Now, we can add a new note to our database using `wrangler d1 execute`: ```sh -$ npx wrangler d1 execute database --remote --command "INSERT INTO notes (text) VALUES ('The best pizza topping is pepperoni')" +npx wrangler d1 execute database --remote --command "INSERT INTO notes (text) VALUES ('The best pizza topping is pepperoni')" ``` ## 5. Creating notes and adding them to Vectorize @@ -184,29 +187,24 @@ $ npx wrangler d1 execute database --remote --command "INSERT INTO notes (text) To expand on your Workers function in order to handle multiple routes, we will add `hono`, a routing library for Workers. This will allow us to create a new route for adding notes to our database. Install `hono` using `npm`: ```sh -$ npm install hono +npm install hono ``` Then, import `hono` into your `src/index.js` file. You should also update the `fetch` handler to use `hono`: ```js -import { Hono } from "hono" -const app = new Hono() - -app.get('/', async (c) => { - const answer = await c.env.AI.run( - '@cf/meta/llama-3-8b-instruct', - { - messages: [ - { role: 'user', content: `What is the square root of 9?` } - ] - } - ) - - return c.json(answer) -}) - -export default app +import { Hono } from "hono"; +const app = new Hono(); + +app.get("/", async (c) => { + const answer = await c.env.AI.run("@cf/meta/llama-3-8b-instruct", { + messages: [{ role: "user", content: `What is the square root of 9?` }], + }); + + return c.json(answer); +}); + +export default app; ``` This will establish a route at the root path `/` that is functionally equivalent to the previous version of your application. Now, we can add a new route for adding notes to our database. @@ -214,39 +212,43 @@ This will establish a route at the root path `/` that is functionally equivalent This example features the [`@cf/baai/bge-base-en-v1.5` model](/workers-ai/models/bge-base-en-v1.5/), which can be used to create an embedding. Embeddings are stored and retrieved from our vector database [Vectorize](/vectorize/). The user's query is also turned into an embedding so that it can be used for searching within Vectorize. ```js -app.post('/notes', async (c) => { - const { text } = await c.req.json() - if (!text) { - return c.text("Missing text", 400); - } +app.post("/notes", async (c) => { + const { text } = await c.req.json(); + if (!text) { + return c.text("Missing text", 400); + } - const { results } = await c.env.DB.prepare("INSERT INTO notes (text) VALUES (?) RETURNING *") - .bind(text) - .run() + const { results } = await c.env.DB.prepare( + "INSERT INTO notes (text) VALUES (?) RETURNING *", + ) + .bind(text) + .run(); - const record = results.length ? results[0] : null + const record = results.length ? results[0] : null; - if (!record) { - return c.text("Failed to create note", 500); + if (!record) { + return c.text("Failed to create note", 500); } - const { data } = await c.env.AI.run('@cf/baai/bge-base-en-v1.5', { text: [text] }) - const values = data[0] + const { data } = await c.env.AI.run("@cf/baai/bge-base-en-v1.5", { + text: [text], + }); + const values = data[0]; - if (!values) { - return c.text("Failed to generate vector embedding", 500); + if (!values) { + return c.text("Failed to generate vector embedding", 500); } - const { id } = record - const inserted = await c.env.VECTOR_INDEX.upsert([ - { - id: id.toString(), - values, - } - ]) + const { id } = record; + const inserted = await c.env.VECTOR_INDEX.upsert([ + { + id: id.toString(), + values, + }, + ]); - return c.json({ id, text, inserted }) -}) + return c.json({ id, text, inserted }); +}); ``` This function does the following things: @@ -265,64 +267,65 @@ To complete your code, you can update the root path (`/`) to query Vectorize. Yo Since we are using cosine similarity, the vectors with the highest cosine similarity will be the most similar to the query. We can introduce a `SIMILIARITY_CUTOFF` to only return vectors that are above a certain similarity threshold. In this case, we will use a cutoff of `0.75`, but you can adjust this value to suit your needs. -We will also specify the `topK` parameter as part of the optional parameters to the `query` function. The `topK` parameter limits the number of vectors returned by the function. For instance, providing a `topK` of 1 will only return the *most similar* vector based on the query. You may customize this for your own needs. +We will also specify the `topK` parameter as part of the optional parameters to the `query` function. The `topK` parameter limits the number of vectors returned by the function. For instance, providing a `topK` of 1 will only return the _most similar_ vector based on the query. You may customize this for your own needs. With the list of similar vectors, you can retrieve the notes that match the record IDs stored alongside those vectors. You can insert the text of those notes as context into the prompt for the LLM binding. We'll update the prompt to include the context, and to ask the LLM to use the context when responding. Finally, you can query the LLM binding to get a response. ```js -import { Hono } from 'hono' -const app = new Hono() +import { Hono } from "hono"; +const app = new Hono(); // Existing post route... // app.post('/notes', async (c) => { ... }) -app.get('/', async (c) => { - const question = c.req.query('text') || "What is the square root of 9?" - - const embeddings = await c.env.AI.run('@cf/baai/bge-base-en-v1.5', { text: question }) - const vectors = embeddings.data[0] - - const SIMILARITY_CUTOFF = 0.75 - const vectorQuery = await c.env.VECTOR_INDEX.query(vectors, { topK: 1 }); - const vecIds = vectorQuery.matches - .filter(vec => vec.score > SIMILARITY_CUTOFF) - .map(vec => vec.id) - - let notes = [] - if (vecIds.length) { - const query = `SELECT * FROM notes WHERE id IN (${vecIds.join(", ")})` - const { results } = await c.env.DB.prepare(query).bind().all() - if (results) notes = results.map(vec => vec.text) - } - - const contextMessage = notes.length - ? `Context:\n${notes.map(note => `- ${note}`).join("\n")}` - : "" - - const systemPrompt = `When answering the question or responding, use the context provided, if it is provided and relevant.` - - const { response: answer } = await c.env.AI.run( - '@cf/meta/llama-3-8b-instruct', - { - messages: [ - ...(notes.length ? [{ role: 'system', content: contextMessage }] : []), - { role: 'system', content: systemPrompt }, - { role: 'user', content: question } - ] - } - ) - - return c.text(answer); -}) +app.get("/", async (c) => { + const question = c.req.query("text") || "What is the square root of 9?"; + + const embeddings = await c.env.AI.run("@cf/baai/bge-base-en-v1.5", { + text: question, + }); + const vectors = embeddings.data[0]; + + const SIMILARITY_CUTOFF = 0.75; + const vectorQuery = await c.env.VECTOR_INDEX.query(vectors, { topK: 1 }); + const vecIds = vectorQuery.matches + .filter((vec) => vec.score > SIMILARITY_CUTOFF) + .map((vec) => vec.id); + + let notes = []; + if (vecIds.length) { + const query = `SELECT * FROM notes WHERE id IN (${vecIds.join(", ")})`; + const { results } = await c.env.DB.prepare(query).bind().all(); + if (results) notes = results.map((vec) => vec.text); + } -app.onError((err, c) => { - return c.text(err) -}) + const contextMessage = notes.length + ? `Context:\n${notes.map((note) => `- ${note}`).join("\n")}` + : ""; -export default app + const systemPrompt = `When answering the question or responding, use the context provided, if it is provided and relevant.`; + const { response: answer } = await c.env.AI.run( + "@cf/meta/llama-3-8b-instruct", + { + messages: [ + ...(notes.length ? [{ role: "system", content: contextMessage }] : []), + { role: "system", content: systemPrompt }, + { role: "user", content: question }, + ], + }, + ); + + return c.text(answer); +}); + +app.onError((err, c) => { + return c.text(err); +}); + +export default app; ``` ## 7. Deploy your project @@ -330,27 +333,25 @@ export default app If you did not deploy your Worker during [step 1](/workers/get-started/guide/#1-create-a-new-worker-project), deploy your Worker via Wrangler, to a `*.workers.dev` subdomain, or a [Custom Domain](/workers/configuration/routing/custom-domains/), if you have one configured. If you have not configured any subdomain or domain, Wrangler will prompt you during the publish process to set one up. ```sh -$ npx wrangler deploy +npx wrangler deploy ``` Preview your Worker at `..workers.dev`. :::note[Note] - When pushing to your `*.workers.dev` subdomain for the first time, you may see [`523` errors](/support/troubleshooting/cloudflare-errors/troubleshooting-cloudflare-5xx-errors/#error-523-origin-is-unreachable) while DNS is propagating. These errors should resolve themselves after a minute or so. - ::: ## Related resources To do more: -* Explore the reference diagram for a [Retrieval Augmented Generation (RAG) Architecture](/reference-architecture/diagrams/ai/ai-rag/). -* Review Cloudflare's [AI documentation](/workers-ai). -* Review [Tutorials](/workers/tutorials/) to build projects on Workers. -* Explore [Examples](/workers/examples/) to experiment with copy and paste Worker code. -* Understand how Workers works in [Reference](/workers/reference/). -* Learn about Workers features and functionality in [Platform](/workers/platform/). -* Set up [Wrangler](/workers/wrangler/install-and-update/) to programmatically create, test, and deploy your Worker projects. +- Explore the reference diagram for a [Retrieval Augmented Generation (RAG) Architecture](/reference-architecture/diagrams/ai/ai-rag/). +- Review Cloudflare's [AI documentation](/workers-ai). +- Review [Tutorials](/workers/tutorials/) to build projects on Workers. +- Explore [Examples](/workers/examples/) to experiment with copy and paste Worker code. +- Understand how Workers works in [Reference](/workers/reference/). +- Learn about Workers features and functionality in [Platform](/workers/platform/). +- Set up [Wrangler](/workers/wrangler/install-and-update/) to programmatically create, test, and deploy your Worker projects. diff --git a/src/content/docs/workers/configuration/cron-triggers/index.mdx b/src/content/docs/workers/configuration/cron-triggers/index.mdx index 3a8c53d906df05..445921c245826f 100644 --- a/src/content/docs/workers/configuration/cron-triggers/index.mdx +++ b/src/content/docs/workers/configuration/cron-triggers/index.mdx @@ -3,7 +3,6 @@ pcx_content_type: concept title: Cron Triggers head: [] description: Enable your Worker to be executed on a schedule. - --- ## Background @@ -22,8 +21,8 @@ To respond to a Cron Trigger, you must add a [`"scheduled"` handler](/workers/ru Refer to the following examples to write your code: -* [Setting Cron Triggers](/workers/examples/cron-trigger/) -* [Multiple Cron Triggers](/workers/examples/multiple-cron-triggers/) +- [Setting Cron Triggers](/workers/examples/cron-trigger/) +- [Multiple Cron Triggers](/workers/examples/multiple-cron-triggers/) ### 2. Update configuration @@ -75,40 +74,36 @@ Cloudflare supports cron expressions with five fields, along with most [Quartz s Some common time intervals that may be useful for setting up your Cron Trigger: +- `* * * * *` + - At every minute -* `* * * * *` - - * At every minute - -* `*/30 * * * *` +- `*/30 * * * *` - * At every 30th minute + - At every 30th minute -* `45 * * * *` +- `45 * * * *` - * On the 45th minute of every hour + - On the 45th minute of every hour -* `0 17 * * sun` or `0 17 * * 1` +- `0 17 * * sun` or `0 17 * * 1` - * 5PM on Sunday + - 5PM on Sunday -* `10 7 * * mon-fri` or `10 7 * * 2-6` +- `10 7 * * mon-fri` or `10 7 * * 2-6` - * 7:10AM on weekdays + - 7:10AM on weekdays -* `0 15 1 * *` +- `0 15 1 * *` - * 3PM on first day of the month + - 3PM on first day of the month -* `0 18 * * 6L` or `0 18 * * friL` - - * 6PM on the last Friday of the month - -* `59 23 LW * *` - * 11:59PM on the last weekday of the month +- `0 18 * * 6L` or `0 18 * * friL` + - 6PM on the last Friday of the month +- `59 23 LW * *` + - 11:59PM on the last weekday of the month ## Test Cron Triggers @@ -116,18 +111,16 @@ The recommended way of testing Cron Triggers is using Wrangler. :::note[Cron Trigger changes take time to propagate.] - Changes such as adding a new Cron Trigger, updating an old Cron Trigger, or deleting a Cron Trigger may take several minutes (up to 15 minutes) to propagate to the Cloudflare global network. - ::: -Test Cron Triggers using `Wrangler` by passing in the `--test-scheduled` flag to [`wrangler dev`](/workers/wrangler/commands/#dev). This will expose a `/__scheduled` route which can be used to test using a HTTP request. To simulate different cron patterns, a `cron` query parameter can be passed in. +Test Cron Triggers using `Wrangler` by passing in the `--test-scheduled` flag to [`wrangler dev`](/workers/wrangler/commands/#dev). This will expose a `/__scheduled` route which can be used to test using a HTTP request. To simulate different cron patterns, a `cron` query parameter can be passed in. ```sh -$ npx wrangler dev --test-scheduled +npx wrangler dev --test-scheduled -$ curl "http://localhost:8787/__scheduled?cron=*+*+*+*+*" +curl "http://localhost:8787/__scheduled?cron=*+*+*+*+*" ``` ## View past events @@ -136,10 +129,8 @@ Users can review the execution history of their Cron Triggers in **Past Cron Eve :::note - It can take up to 30 minutes before events are displayed in **Past Cron Events** when creating a new Worker or changing a Worker's name. - ::: Refer to [Metrics and Analytics](/workers/observability/metrics-and-analytics/) for more information. @@ -156,10 +147,8 @@ To delete a Cron Trigger on a deployed Worker via the dashboard: :::note - You can only delete Cron Triggers using the Cloudflare dashboard (and not through your `wrangler.toml` file). - ::: ## Limits @@ -183,5 +172,5 @@ Green Compute can be configured at the account level: ## Related resources -* [Triggers](/workers/wrangler/configuration/#triggers) - Review `wrangler.toml` syntax for Cron Triggers. -* Learn how to access Cron Triggers in [ES modules syntax](/workers/reference/migrate-to-module-workers/) for an optimized experience. +- [Triggers](/workers/wrangler/configuration/#triggers) - Review `wrangler.toml` syntax for Cron Triggers. +- Learn how to access Cron Triggers in [ES modules syntax](/workers/reference/migrate-to-module-workers/) for an optimized experience. diff --git a/src/content/docs/workers/configuration/integrations/apis.mdx b/src/content/docs/workers/configuration/integrations/apis.mdx index 03d15ad1fedea9..20e56e9529ddd8 100644 --- a/src/content/docs/workers/configuration/integrations/apis.mdx +++ b/src/content/docs/workers/configuration/integrations/apis.mdx @@ -1,30 +1,29 @@ --- pcx_content_type: concept title: APIs - --- To integrate with third party APIs from Cloudflare Workers, use the [fetch API](/workers/runtime-apis/fetch/) to make HTTP requests to the API endpoint. Then use the response data to modify or manipulate your content as needed. For example, if you want to integrate with a weather API, make a fetch request to the API endpoint and retrieve the current weather data. Then use this data to display the current weather conditions on your website. -To make the `fetch()` request, add the following code to your project's `src/index.js` file: +To make the `fetch()` request, add the following code to your project's `src/index.js` file: ```js async function handleRequest(request) { - // Make the fetch request to the third party API endpoint - const response = await fetch('https://weather-api.com/endpoint', { - method: 'GET', - headers: { - 'Content-Type': 'application/json' - } - }); - - // Retrieve the data from the response - const data = await response.json(); - - // Use the data to modify or manipulate your content as needed - return new Response(data); + // Make the fetch request to the third party API endpoint + const response = await fetch("https://weather-api.com/endpoint", { + method: "GET", + headers: { + "Content-Type": "application/json", + }, + }); + + // Retrieve the data from the response + const data = await response.json(); + + // Use the data to modify or manipulate your content as needed + return new Response(data); } ``` @@ -33,7 +32,7 @@ async function handleRequest(request) { If your API requires authentication, use Wrangler secrets to securely store your credentials. To do this, create a secret in your Cloudflare Workers project using the following [`wrangler secret`](/workers/wrangler/commands/#secret) command: ```sh -$ wrangler secret put SECRET_NAME +wrangler secret put SECRET_NAME ``` Then, retrieve the secret value in your code using the following code snippet: @@ -48,6 +47,6 @@ For services that require mTLS authentication, use [mTLS certificates](/workers/ ## Tips -* Use the Cache API to cache data from the third party API. This allows you to optimize cacheable requests made to the API. Integrating with third party APIs from Cloudflare Workers adds additional functionality and features to your application. +- Use the Cache API to cache data from the third party API. This allows you to optimize cacheable requests made to the API. Integrating with third party APIs from Cloudflare Workers adds additional functionality and features to your application. -* Use [Custom Domains](/workers/configuration/routing/custom-domains/) when communicating with external APIs, which treat your Worker as your core application. +- Use [Custom Domains](/workers/configuration/routing/custom-domains/) when communicating with external APIs, which treat your Worker as your core application. diff --git a/src/content/docs/workers/configuration/integrations/external-services.mdx b/src/content/docs/workers/configuration/integrations/external-services.mdx index 9d48cd86b9b066..5f4dc8d06d79b2 100644 --- a/src/content/docs/workers/configuration/integrations/external-services.mdx +++ b/src/content/docs/workers/configuration/integrations/external-services.mdx @@ -1,7 +1,6 @@ --- pcx_content_type: concept title: External Services - --- Many external services provide libraries and SDKs to interact with their APIs. While many Node-compatible libraries work on Workers right out of the box, some, which implement `fs`, `http/net`, or access the browser `window` do not directly translate to the Workers runtime, which is v8-based. @@ -11,7 +10,7 @@ Many external services provide libraries and SDKs to interact with their APIs. W If your service requires authentication, use Wrangler secrets to securely store your credentials. To do this, create a secret in your Cloudflare Workers project using the following [`wrangler secret`](/workers/wrangler/commands/#secret) command: ```sh -$ wrangler secret put SECRET_NAME +wrangler secret put SECRET_NAME ``` Then, retrieve the secret value in your code using the following code snippet: diff --git a/src/content/docs/workers/configuration/secrets.mdx b/src/content/docs/workers/configuration/secrets.mdx index dd247dd27c7154..a6007e2924c5d1 100644 --- a/src/content/docs/workers/configuration/secrets.mdx +++ b/src/content/docs/workers/configuration/secrets.mdx @@ -3,10 +3,9 @@ pcx_content_type: configuration title: Secrets head: [] description: Store sensitive information, like API keys and auth tokens, in your Worker. - --- -import { Render } from "~/components" +import { Render } from "~/components"; ## Background @@ -27,13 +26,13 @@ Secrets can be added through [`wrangler secret put`](/workers/wrangler/commands/ `wrangler secret put` creates a new version of the Worker and deploys it immediately. ```sh -$ npx wrangler secret put +npx wrangler secret put ``` If using [gradual deployments](/workers/configuration/versions-and-deployments/gradual-deployments/), instead use the `wrangler versions secret put` command. This will only create a new version of the Worker, that can then be deploying using [`wrangler versions deploy`](/workers/wrangler/commands/#deploy-2). ```sh -$ npx wrangler versions secret put --x-versions +npx wrangler versions secret put --x-versions ``` #### Via the dashboard @@ -58,13 +57,13 @@ Secrets can be deleted through [`wrangler secret delete`](/workers/wrangler/comm `wrangler secret delete` creates a new version of the Worker and deploys it immediately. ```sh -$ npx wrangler secret delete +npx wrangler secret delete ``` If using [gradual deployments](/workers/configuration/versions-and-deployments/gradual-deployments/), instead use the `wrangler versions secret delete` command. This will only create a new version of the Worker, that can then be deploying using [`wrangler versions deploy`](/workers/wrangler/commands/#deploy-2). ```sh -$ npx wrangler versions secret delete --x-versions +npx wrangler versions secret delete --x-versions ``` #### Via the dashboard @@ -82,4 +81,4 @@ To delete a secret from your Worker project via the dashboard: ## Related resources -* [Wrangler secret commands](/workers/wrangler/commands/#secret) - Review the Wrangler commands to create, delete and list secrets. +- [Wrangler secret commands](/workers/wrangler/commands/#secret) - Review the Wrangler commands to create, delete and list secrets. diff --git a/src/content/docs/workers/configuration/sites/start-from-existing.mdx b/src/content/docs/workers/configuration/sites/start-from-existing.mdx index fc746470d697a4..358b21ee07f0f7 100644 --- a/src/content/docs/workers/configuration/sites/start-from-existing.mdx +++ b/src/content/docs/workers/configuration/sites/start-from-existing.mdx @@ -3,10 +3,9 @@ pcx_content_type: how-to title: Start from existing sidebar: order: 1 - --- -import { Render, TabItem, Tabs } from "~/components" +import { Render, TabItem, Tabs } from "~/components"; @@ -14,26 +13,26 @@ Workers Sites require [Wrangler](https://github.com/cloudflare/workers-sdk/tree/ To deploy a pre-existing static site project, start with a pre-generated site. Workers Sites works with all static site generators, for example: -* [Hugo](https://gohugo.io/getting-started/quick-start/) -* [Gatsby](https://www.gatsbyjs.org/docs/quick-start/), requires Node -* [Jekyll](https://jekyllrb.com/docs/), requires Ruby -* [Eleventy](https://www.11ty.io/#quick-start), requires Node -* [WordPress](https://wordpress.org) (refer to the tutorial on [deploying static WordPress sites with Pages](/pages/how-to/deploy-a-wordpress-site/)) +- [Hugo](https://gohugo.io/getting-started/quick-start/) +- [Gatsby](https://www.gatsbyjs.org/docs/quick-start/), requires Node +- [Jekyll](https://jekyllrb.com/docs/), requires Ruby +- [Eleventy](https://www.11ty.io/#quick-start), requires Node +- [WordPress](https://wordpress.org) (refer to the tutorial on [deploying static WordPress sites with Pages](/pages/how-to/deploy-a-wordpress-site/)) ## Getting started 1. Run the `wrangler init` command in the root of your project’s directory to generate a basic Worker: ```sh - $ wrangler init -y + wrangler init -y ``` This command adds/update the following files: - * `wrangler.toml`: The file containing project configuration. - * `package.json`: Wrangler `devDependencies` are added. - * `tsconfig.json`: Added if not already there to support writing the Worker in TypeScript. - * `src/index.ts`: A basic Cloudflare Worker, written in TypeScript. + - `wrangler.toml`: The file containing project configuration. + - `package.json`: Wrangler `devDependencies` are added. + - `tsconfig.json`: Added if not already there to support writing the Worker in TypeScript. + - `src/index.ts`: A basic Cloudflare Worker, written in TypeScript. 2. Add your site’s build/output directory to the `wrangler.toml` file: @@ -44,15 +43,15 @@ To deploy a pre-existing static site project, start with a pre-generated site. W The default directories for the most popular static site generators are listed below: - * Hugo: `public` - * Gatsby: `public` - * Jekyll: `_site` - * Eleventy: `_site` + - Hugo: `public` + - Gatsby: `public` + - Jekyll: `_site` + - Eleventy: `_site` 3. Install the `@cloudflare/kv-asset-handler` package in your project: ```sh - $ npm i -D @cloudflare/kv-asset-handler + npm i -D @cloudflare/kv-asset-handler ``` 4. Replace the contents of `src/index.ts` with the following code snippet: @@ -60,32 +59,32 @@ To deploy a pre-existing static site project, start with a pre-generated site. W ```js -import { getAssetFromKV } from '@cloudflare/kv-asset-handler'; -import manifestJSON from '__STATIC_CONTENT_MANIFEST'; +import { getAssetFromKV } from "@cloudflare/kv-asset-handler"; +import manifestJSON from "__STATIC_CONTENT_MANIFEST"; const assetManifest = JSON.parse(manifestJSON); export default { - async fetch(request, env, ctx) { - try { - // Add logic to decide whether to serve an asset or run your original Worker code - return await getAssetFromKV( - { - request, - waitUntil: ctx.waitUntil.bind(ctx), - }, - { - ASSET_NAMESPACE: env.__STATIC_CONTENT, - ASSET_MANIFEST: assetManifest, - } - ); - } catch (e) { - let pathname = new URL(request.url).pathname; - return new Response(`"${pathname}" not found`, { - status: 404, - statusText: 'not found', - }); - } - }, + async fetch(request, env, ctx) { + try { + // Add logic to decide whether to serve an asset or run your original Worker code + return await getAssetFromKV( + { + request, + waitUntil: ctx.waitUntil.bind(ctx), + }, + { + ASSET_NAMESPACE: env.__STATIC_CONTENT, + ASSET_MANIFEST: assetManifest, + }, + ); + } catch (e) { + let pathname = new URL(request.url).pathname; + return new Response(`"${pathname}" not found`, { + status: 404, + statusText: "not found", + }); + } + }, }; ``` @@ -95,20 +94,20 @@ export default { import { getAssetFromKV } from "@cloudflare/kv-asset-handler"; addEventListener("fetch", (event) => { - event.respondWith(handleEvent(event)); + event.respondWith(handleEvent(event)); }); async function handleEvent(event) { - try { - // Add logic to decide whether to serve an asset or run your original Worker code - return await getAssetFromKV(event); - } catch (e) { - let pathname = new URL(event.request.url).pathname; - return new Response(`"${pathname}" not found`, { - status: 404, - statusText: "not found", - }); - } + try { + // Add logic to decide whether to serve an asset or run your original Worker code + return await getAssetFromKV(event); + } catch (e) { + let pathname = new URL(event.request.url).pathname; + return new Response(`"${pathname}" not found`, { + status: 404, + statusText: "not found", + }); + } } ``` @@ -118,7 +117,7 @@ async function handleEvent(event) { Wrangler will automatically upload the assets found in the configured directory. ```sh - $ npx wrangler deploy + npx wrangler deploy ``` 6. Deploy your site to a [custom domain](/workers/configuration/routing/custom-domains/) that you own and have already attached as a Cloudflare zone. Add a `route` property to the `wrangler.toml` file. @@ -129,7 +128,7 @@ async function handleEvent(event) { :::note - Refer to the documentation on [Routes](/workers/configuration/routing/routes/) to configure a `route` properly. + Refer to the documentation on [Routes](/workers/configuration/routing/routes/) to configure a `route` properly. ::: Learn more about [configuring your project](/workers/wrangler/configuration/). diff --git a/src/content/docs/workers/configuration/sites/start-from-scratch.mdx b/src/content/docs/workers/configuration/sites/start-from-scratch.mdx index 1ef3005920f51a..8dc8cbaada82b8 100644 --- a/src/content/docs/workers/configuration/sites/start-from-scratch.mdx +++ b/src/content/docs/workers/configuration/sites/start-from-scratch.mdx @@ -3,10 +3,9 @@ pcx_content_type: how-to title: Start from scratch sidebar: order: 2 - --- -import { Render } from "~/components" +import { Render } from "~/components"; @@ -20,7 +19,7 @@ This guide shows how to quickly start a new Workers Sites project from scratch. The following example creates a project called `my-site`: ```sh - $ git clone --depth=1 --branch=wrangler2 https://github.com/cloudflare/worker-sites-template my-site + git clone --depth=1 --branch=wrangler2 https://github.com/cloudflare/worker-sites-template my-site ``` 3. Run `npm install` to install all dependencies. @@ -28,40 +27,40 @@ This guide shows how to quickly start a new Workers Sites project from scratch. 4. You can preview your site by running the [`wrangler dev`](/workers/wrangler/commands/#dev) command: ```sh - $ wrangler dev + wrangler dev ``` 5. Deploy your site to Cloudflare: ```sh - $ npx wrangler deploy + npx wrangler deploy ``` ## Project layout The template project contains the following files and directories: -* `public`: The static assets for your project. By default it contains an `index.html` and a `favicon.ico`. -* `src`: The Worker configured for serving your assets. You do not need to edit this but if you want to see how it works or add more functionality to your Worker, you can edit `src/index.ts`. -* `wrangler.toml`: The file containing project configuration. +- `public`: The static assets for your project. By default it contains an `index.html` and a `favicon.ico`. +- `src`: The Worker configured for serving your assets. You do not need to edit this but if you want to see how it works or add more functionality to your Worker, you can edit `src/index.ts`. +- `wrangler.toml`: The file containing project configuration. The `bucket` property tells Wrangler where to find the static assets (e.g. `site = { bucket = "./public" }`). -* `package.json`/`package-lock.json`: define the required Node.js dependencies. +- `package.json`/`package-lock.json`: define the required Node.js dependencies. ## Customize `wrangler.toml`: -* Change the `name` property to the name of your project: +- Change the `name` property to the name of your project: ```toml name = "my-site" ``` -* Consider updating`compatibility_date` to today's date to get access to the most recent Workers features: +- Consider updating`compatibility_date` to today's date to get access to the most recent Workers features: ```toml compatibility_date = "yyyy-mm-dd" ``` -* Deploy your site to a [custom domain](/workers/configuration/routing/custom-domains/) that you own and have already attached as a Cloudflare zone: +- Deploy your site to a [custom domain](/workers/configuration/routing/custom-domains/) that you own and have already attached as a Cloudflare zone: ```toml route = "https://example.com/*" @@ -69,7 +68,7 @@ The template project contains the following files and directories: :::note - Refer to the documentation on [Routes](/workers/configuration/routing/routes/) to configure a `route` properly. + Refer to the documentation on [Routes](/workers/configuration/routing/routes/) to configure a `route` properly. ::: Learn more about [configuring your project](/workers/wrangler/configuration/). diff --git a/src/content/docs/workers/configuration/sites/start-from-worker.mdx b/src/content/docs/workers/configuration/sites/start-from-worker.mdx index 818eb038f81eec..cefc8aff93de62 100644 --- a/src/content/docs/workers/configuration/sites/start-from-worker.mdx +++ b/src/content/docs/workers/configuration/sites/start-from-worker.mdx @@ -3,10 +3,9 @@ pcx_content_type: how-to title: Start from Worker sidebar: order: 3 - --- -import { Render, TabItem, Tabs } from "~/components" +import { Render, TabItem, Tabs } from "~/components"; @@ -28,7 +27,7 @@ If you have a pre-existing Worker project, you can use Workers Sites to serve st 3. Install the `@cloudflare/kv-asset-handler` package in your project: ```sh - $ npm i -D @cloudflare/kv-asset-handler + npm i -D @cloudflare/kv-asset-handler ``` 4. Import the `getAssetFromKV()` function into your Worker entry point and use it to respond with static assets. @@ -36,32 +35,32 @@ If you have a pre-existing Worker project, you can use Workers Sites to serve st ```js -import { getAssetFromKV } from '@cloudflare/kv-asset-handler'; -import manifestJSON from '__STATIC_CONTENT_MANIFEST'; +import { getAssetFromKV } from "@cloudflare/kv-asset-handler"; +import manifestJSON from "__STATIC_CONTENT_MANIFEST"; const assetManifest = JSON.parse(manifestJSON); export default { - async fetch(request, env, ctx) { - try { - // Add logic to decide whether to serve an asset or run your original Worker code - return await getAssetFromKV( - { - request, - waitUntil: ctx.waitUntil.bind(ctx), - }, - { - ASSET_NAMESPACE: env.__STATIC_CONTENT, - ASSET_MANIFEST: assetManifest, - } - ); - } catch (e) { - let pathname = new URL(request.url).pathname; - return new Response(`"${pathname}" not found`, { - status: 404, - statusText: 'not found', - }); - } - }, + async fetch(request, env, ctx) { + try { + // Add logic to decide whether to serve an asset or run your original Worker code + return await getAssetFromKV( + { + request, + waitUntil: ctx.waitUntil.bind(ctx), + }, + { + ASSET_NAMESPACE: env.__STATIC_CONTENT, + ASSET_MANIFEST: assetManifest, + }, + ); + } catch (e) { + let pathname = new URL(request.url).pathname; + return new Response(`"${pathname}" not found`, { + status: 404, + statusText: "not found", + }); + } + }, }; ``` @@ -71,20 +70,20 @@ export default { import { getAssetFromKV } from "@cloudflare/kv-asset-handler"; addEventListener("fetch", (event) => { - event.respondWith(handleEvent(event)); + event.respondWith(handleEvent(event)); }); async function handleEvent(event) { - try { - // Add logic to decide whether to serve an asset or run your original Worker code - return await getAssetFromKV(event); - } catch (e) { - let pathname = new URL(event.request.url).pathname; - return new Response(`"${pathname}" not found`, { - status: 404, - statusText: "not found", - }); - } + try { + // Add logic to decide whether to serve an asset or run your original Worker code + return await getAssetFromKV(event); + } catch (e) { + let pathname = new URL(event.request.url).pathname; + return new Response(`"${pathname}" not found`, { + status: 404, + statusText: "not found", + }); + } } ``` @@ -96,5 +95,5 @@ For more information on the configurable options of `getAssetFromKV()` refer to Wrangler will automatically upload the assets found in the configured directory. ```sh - $ npx wrangler deploy + npx wrangler deploy ``` diff --git a/src/content/docs/workers/configuration/smart-placement.mdx b/src/content/docs/workers/configuration/smart-placement.mdx index f70a508f286c96..c3710079907d81 100644 --- a/src/content/docs/workers/configuration/smart-placement.mdx +++ b/src/content/docs/workers/configuration/smart-placement.mdx @@ -7,7 +7,6 @@ description: Speed up your Worker application by automatically placing your sidebar: badge: text: Beta - --- By default, [Workers](/workers/) and [Pages Functions](/pages/functions/) are invoked in a data center closest to where the request was received. If you are running back-end logic in a Worker, it may be more performant to run that Worker closer to your back-end infrastructure rather than the end user. Smart Placement automatically places your workloads in an optimal location that minimizes latency and speeds up your applications. @@ -44,11 +43,12 @@ Workers with a [D1](/d1/) binding will always be placed in a data center near th There are some back-end services that are not considered by the Smart Placement algorithm: -* **Globally distributed services**: If the services that your Worker communicates with are geo-distributed in many regions (for example, CDNs, distributed databases, distributed APIs), Smart Placement is not a good fit. We automatically rule these out of the Smart Placement optimization. - * Examples: Google APIs, services using Fastly or Akamai's CDN. +- **Globally distributed services**: If the services that your Worker communicates with are geo-distributed in many regions (for example, CDNs, distributed databases, distributed APIs), Smart Placement is not a good fit. We automatically rule these out of the Smart Placement optimization. + + - Examples: Google APIs, services using Fastly or Akamai's CDN. -* **Analytics or logging services**: Requests to analytics or logging services should not be in the critical path of your application. [`waitUntil()`](/workers/runtime-apis/context/#waituntil) should be used so that the response back to users is not blocked when instrumenting your code. Since `waitUntil()` does not impact the request duration from a user’s perspective, we automatically rule analytics and logging services out of the Smart Placement optimization. - * Examples: New Relic, Datadog, Tinybird, Grafana, Amplitude, Honeycomb. +- **Analytics or logging services**: Requests to analytics or logging services should not be in the critical path of your application. [`waitUntil()`](/workers/runtime-apis/context/#waituntil) should be used so that the response back to users is not blocked when instrumenting your code. Since `waitUntil()` does not impact the request duration from a user’s perspective, we automatically rule analytics and logging services out of the Smart Placement optimization. + - Examples: New Relic, Datadog, Tinybird, Grafana, Amplitude, Honeycomb. ## Enable Smart Placement @@ -90,18 +90,18 @@ To enable Smart Placement via the dashboard: A Worker's metadata contains details about a Worker's placement status. Query your Worker's placement status through the following Workers API endpoint: ```bash -$ curl -X GET https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/workers/services/{WORKER_NAME} \ +curl -X GET https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/workers/services/{WORKER_NAME} \ -H "Authorization: Bearer " \ -H "Content-Type: application/json" | jq . ``` Possible placement states include: -* *(not present)*: The Worker has not been analyzed for Smart Placement yet. -* `INSUFFICIENT_INVOCATIONS`: Not enough requests for Smart Placement to make a placement decision. -* `NO_VALID_HOSTS`: The Worker does not send subrequests to back-end services supported by Smart Placement. -* `INSUFFICIENT_SUBREQUESTS`: The Worker does not send enough subrequests to valid back-end services. -* `SUCCESS`: The Worker has been successfully analyzed and will be optimized by Smart Placement. +- _(not present)_: The Worker has not been analyzed for Smart Placement yet. +- `INSUFFICIENT_INVOCATIONS`: Not enough requests for Smart Placement to make a placement decision. +- `NO_VALID_HOSTS`: The Worker does not send subrequests to back-end services supported by Smart Placement. +- `INSUFFICIENT_SUBREQUESTS`: The Worker does not send enough subrequests to valid back-end services. +- `SUCCESS`: The Worker has been successfully analyzed and will be optimized by Smart Placement. ### Request Duration Analytics @@ -117,10 +117,8 @@ For example, the `cf-placement: remote-LHR` header's `remote` value indicates th :::caution[Beta use only] - We may remove the `cf-placement` header before Smart Placement enters general availability. - ::: ## Best practices diff --git a/src/content/docs/workers/configuration/versions-and-deployments/gradual-deployments.mdx b/src/content/docs/workers/configuration/versions-and-deployments/gradual-deployments.mdx index 779934f4b766ca..7db6f455c6fa54 100644 --- a/src/content/docs/workers/configuration/versions-and-deployments/gradual-deployments.mdx +++ b/src/content/docs/workers/configuration/versions-and-deployments/gradual-deployments.mdx @@ -6,10 +6,9 @@ description: Incrementally deploy code changes to your Workers with gradual depl sidebar: badge: text: Beta - --- -import { Example } from "~/components" +import { Example } from "~/components"; Gradual Deployments give you the ability to incrementally deploy new [versions](/workers/configuration/versions-and-deployments/#versions) of Workers by splitting traffic across versions. @@ -17,37 +16,33 @@ Gradual Deployments give you the ability to incrementally deploy new [versions]( Using gradual deployments, you can: -* Gradually shift traffic to a newer version of your Worker. -* Monitor error rates and exceptions across versions using [analytics and logging](/workers/configuration/versions-and-deployments/gradual-deployments/#observability) tooling. -* [Roll back](/workers/configuration/versions-and-deployments/rollbacks/) to a previously stable version if you notice issues when deploying a new version. +- Gradually shift traffic to a newer version of your Worker. +- Monitor error rates and exceptions across versions using [analytics and logging](/workers/configuration/versions-and-deployments/gradual-deployments/#observability) tooling. +- [Roll back](/workers/configuration/versions-and-deployments/rollbacks/) to a previously stable version if you notice issues when deploying a new version. :::caution - Gradual deployments are in **beta and under active development**. Review [Limits](/workers/configuration/versions-and-deployments//gradual-deployments/#limits) associated with rollbacks before using this feature. Provide your feedback on the rollbacks feature through the [feedback form](https://www.cloudflare.com/lp/developer-week-deployments). - ::: ## Use gradual deployments The following section guides you through an example usage of gradual deployments. You will choose to use either [Wrangler](/workers/configuration/versions-and-deployments/gradual-deployments/#via-wrangler) or the [Cloudflare dashboard](/workers/configuration/versions-and-deployments/gradual-deployments/#via-the-cloudflare-dashboard) to: -* Create a new Worker. -* Publish a new version of that Worker without deploying it. -* Create a gradual deployment between the two versions. -* Progress the deployment of the new version to 100% of traffic. +- Create a new Worker. +- Publish a new version of that Worker without deploying it. +- Create a gradual deployment between the two versions. +- Progress the deployment of the new version to 100% of traffic. ### Via Wrangler :::note - Minimum required wrangler version: 3.40.0. - ::: #### 1. Create and deploy a new Worker @@ -55,7 +50,7 @@ Minimum required wrangler version: 3.40.0. Create a new `"Hello World"` Worker using the [`create-cloudflare` CLI (C3)](/pages/get-started/c3/) and deploy it. ```sh -$ npm create cloudflare@latest -- --type=hello-world +npm create cloudflare@latest -- --type=hello-world ``` Answer `yes` or `no` to using TypeScript. Answer `yes` to deploying your application. This is the first version of your Worker. @@ -65,7 +60,7 @@ Answer `yes` or `no` to using TypeScript. Answer `yes` to deploying your applica To create a new version of the Worker, edit the Worker code by changing the `Response` content to your desired text and upload the Worker by using the [`wrangler versions upload`](/workers/wrangler/commands/#upload) command. ```sh -$ npx wrangler versions upload --experimental-versions +npx wrangler versions upload --experimental-versions ``` This will create a new version of the Worker that is not automatically deployed. @@ -76,7 +71,7 @@ Use the [`wrangler versions deploy`](/workers/wrangler/commands/#deploy-2) comma create a new deployment that splits traffic between two versions of the Worker. Follow the interactive prompts to create a deployment with the versions uploaded in [step #1](/workers/configuration/versions-and-deployments/gradual-deployments/#1-create-and-deploy-a-new-worker) and [step #2](/workers/configuration/versions-and-deployments/gradual-deployments/#2-create-a-new-version-of-the-worker). Select your desired percentages for each version. ```sh -$ npx wrangler versions deploy --experimental-versions +npx wrangler versions deploy --experimental-versions ``` #### 4. Test the split deployment @@ -99,7 +94,7 @@ You can test also target a specific version using [version overrides](#version-o Run `wrangler versions deploy` again and follow the interactive prompts. Select the version uploaded in [step 2](/workers/configuration/versions-and-deployments/gradual-deployments/#2-create-a-new-version-of-the-worker) and set it to 100% deployment. ```sh -$ npx wrangler versions deploy --experimental-versions +npx wrangler versions deploy --experimental-versions ``` ### Via the Cloudflare dashboard @@ -130,7 +125,7 @@ You may want requests associated with a particular identifier (such as user, ses You can do this by setting the `Cloudflare-Workers-Version-Key` header on the incoming request to your Worker. For example: ```sh -$ curl -s https://$SCRIPT_NAME.$SUBDOMAIN.workers.dev -H 'Cloudflare-Workers-Version-Key: foo' +curl -s https://$SCRIPT_NAME.$SUBDOMAIN.workers.dev -H 'Cloudflare-Workers-Version-Key: foo' ``` For a given [deployment](/workers/configuration/versions-and-deployments/#deployments), all requests with a version key set to `foo` will be handled by the same version of your Worker. The specific version of your Worker that the version key `foo` corresponds to is determined by the percentages you have configured for each Worker version in your deployment. @@ -151,7 +146,7 @@ Text in **Expression Editor**: starts_with(http.request.uri.path, "/asset/") ``` -Selected operation under **Modify request header**: *Set dynamic* +Selected operation under **Modify request header**: _Set dynamic_ **Header name**: `Cloudflare-Workers-Version-Key` @@ -166,7 +161,7 @@ You can use version overrides to send a request to a specific version of your Wo To specify a version override in your request, you can set the `Cloudflare-Workers-Version-Overrides` header on the request to your Worker. For example: ```sh -$ curl -s https://$SCRIPT_NAME.$SUBDOMAIN.workers.dev -H 'Cloudflare-Workers-Version-Overrides: my-worker-name="dc8dcd28-271b-4367-9840-6c244f84cb40"' +curl -s https://$SCRIPT_NAME.$SUBDOMAIN.workers.dev -H 'Cloudflare-Workers-Version-Overrides: my-worker-name="dc8dcd28-271b-4367-9840-6c244f84cb40"' ``` `Cloudflare-Workers-Version-Overrides` is a [Dictionary Structured Header](https://www.rfc-editor.org/rfc/rfc8941#name-dictionaries). @@ -177,17 +172,15 @@ A version override will only be applied if the specified version is in the curre :::note[Verifying that the version override was applied] - There are a number of reasons why a request's version override may not be applied. For example: -* The deployment containing the specified version may not have propagated yet. -* The header value may not be a valid [Dictionary](https://www.rfc-editor.org/rfc/rfc8941#name-dictionaries). +- The deployment containing the specified version may not have propagated yet. +- The header value may not be a valid [Dictionary](https://www.rfc-editor.org/rfc/rfc8941#name-dictionaries). In the case that a request's version override is not applied, the request will be routed according to the percentages set in the gradual deployment configuration. To make sure that the request's version override was applied correctly, you can [observe](#observability) the version of your Worker that was invoked. You could even automate this check by using the [runtime binding](#runtime-binding) to return the version in the Worker's response. - ::: ### Example @@ -210,7 +203,7 @@ Create a new deployment using [`wrangler versions deploy --experimental-versions Now test the new version with a version override before gradually progressing the new version to 100%: ```sh -$ curl -s https://$SCRIPT_NAME.$SUBDOMAIN.workers.dev -H 'Cloudflare-Workers-Version-Overrides: my-worker-name="dc8dcd28-271b-4367-9840-6c244f84cb40"' +curl -s https://$SCRIPT_NAME.$SUBDOMAIN.workers.dev -H 'Cloudflare-Workers-Version-Overrides: my-worker-name="dc8dcd28-271b-4367-9840-6c244f84cb40"' ``` ## Gradual deployments for Durable Objects @@ -238,18 +231,16 @@ Here is how the versions of your Durable Objects might change as you progress yo This is only an example, so the versions assigned to your Durable Objects may be different. However, the following is guaranteed: -* For a given deployment, requests to each Durable Object will always use the same Worker version. -* When you specify each version in the same order as the previous deployment and increase the percentage of a version, Durable Objects which were previously assigned that version will not be assigned a different version. In this example, Durable Object "foo" would never revert from version "B" to version "A". -* The Durable Object will only be [reset](/durable-objects/observability/troubleshooting/#durable-object-reset-because-its-code-was-updated) when it is assigned a different version, so each Durable Object will only be reset once in this example. +- For a given deployment, requests to each Durable Object will always use the same Worker version. +- When you specify each version in the same order as the previous deployment and increase the percentage of a version, Durable Objects which were previously assigned that version will not be assigned a different version. In this example, Durable Object "foo" would never revert from version "B" to version "A". +- The Durable Object will only be [reset](/durable-objects/observability/troubleshooting/#durable-object-reset-because-its-code-was-updated) when it is assigned a different version, so each Durable Object will only be reset once in this example. :::note - Typically, your Durable Object Worker will define both your Durable Object class and the Worker that interacts with it. In this case, you cannot deploy changes to your Durable Object and its Worker independently. You should ensure that API changes between your Durable Object and its Worker are [forwards and backwards compatible](/durable-objects/platform/known-issues/#code-updates) whether you are using gradual deployments or not. However, using gradual deployments will make it even more likely that different versions of your Durable Objects and its Worker will interact with each other. - ::: ## Observability diff --git a/src/content/docs/workers/databases/connecting-to-databases.mdx b/src/content/docs/workers/databases/connecting-to-databases.mdx index b39701051f8e11..474b41b8d23c3f 100644 --- a/src/content/docs/workers/databases/connecting-to-databases.mdx +++ b/src/content/docs/workers/databases/connecting-to-databases.mdx @@ -5,16 +5,13 @@ sidebar: order: 1 head: [] description: Learn about the different kinds of database integrations Cloudflare supports. - --- Cloudflare Workers can connect to and query your data in both SQL and NoSQL databases, including: -* Traditional hosted relational databases, including Postgres and MySQL. -* Serverless databases: Supabase, MongoDB Atlas, PlanetScale, FaunaDB, and Prisma. -* Cloudflare's own [D1](/d1/), a serverless SQL-based database. - - +- Traditional hosted relational databases, including Postgres and MySQL. +- Serverless databases: Supabase, MongoDB Atlas, PlanetScale, FaunaDB, and Prisma. +- Cloudflare's own [D1](/d1/), a serverless SQL-based database. | Database | Integration | Library or Driver | Connection Method | | --------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------------------------------- | ------------------------------------------------------------------ | @@ -31,11 +28,9 @@ Cloudflare Workers can connect to and query your data in both SQL and NoSQL data | [Upstash Redis](https://blog.cloudflare.com/cloudflare-workers-database-integration-with-upstash/) | [Yes](/workers/databases/native-integrations/upstash/) | [@upstash/redis](https://github.com/upstash/upstash-redis) | API via client library | | [TiDB Cloud](https://docs.pingcap.com/tidbcloud/integrate-tidbcloud-with-cloudflare) | No | [@tidbcloud/serverless](https://github.com/tidbcloud/serverless-js) | API via client library | - - :::note -If you do not see an integration listed or have an integration to add, complete and submit the [Cloudflare Developer Platform Integration form](https://forms.gle/iaUqLWE8aezSEhgd6). +If you do not see an integration listed or have an integration to add, complete and submit the [Cloudflare Developer Platform Integration form](https://forms.gle/iaUqLWE8aezSEhgd6). ::: Once you have installed the necessary packages, use the APIs provided by these packages to connect to your database and perform operations on it. Refer to detailed links for service-specific instructions. @@ -54,7 +49,7 @@ There are four ways to connect to a database from a Worker: If your database requires authentication, use Wrangler secrets to securely store your credentials. To do this, create a secret in your Cloudflare Workers project using the following [`wrangler secret`](/workers/wrangler/commands/#secret) command: ```sh -$ wrangler secret put +wrangler secret put ``` Then, retrieve the secret value in your code using the following code snippet: @@ -69,6 +64,6 @@ For services that require mTLS authentication, use [mTLS certificates](/workers/ ## Next steps -* Learn how to connect to [an existing PostgreSQL database](/hyperdrive/) with Hyperdrive. -* Discover [other storage options available](/workers/platform/storage-options/) for use with Workers. -* [Create your first database](/d1/get-started/) with Cloudflare D1. +- Learn how to connect to [an existing PostgreSQL database](/hyperdrive/) with Hyperdrive. +- Discover [other storage options available](/workers/platform/storage-options/) for use with Workers. +- [Create your first database](/d1/get-started/) with Cloudflare D1. diff --git a/src/content/docs/workers/databases/native-integrations/neon.mdx b/src/content/docs/workers/databases/native-integrations/neon.mdx index dfd72178e1ecd5..70c00478e253ea 100644 --- a/src/content/docs/workers/databases/native-integrations/neon.mdx +++ b/src/content/docs/workers/databases/native-integrations/neon.mdx @@ -1,10 +1,9 @@ --- pcx_content_type: configuration title: Neon - --- -import { Render } from "~/components" +import { Render } from "~/components"; [Neon](https://neon.tech/) is a fully managed serverless PostgreSQL. It separates storage and compute to offer modern developer features, such as serverless, branching, and bottomless storage. @@ -55,7 +54,7 @@ To set up an integration with Neon: 5. In your Worker, install the `@neondatabase/serverless` driver to connect to your database and start manipulating data: ```sh - $ npm install @neondatabase/serverless + npm install @neondatabase/serverless ``` 6. The following example shows how to make a query to your Neon database in a Worker. The credentials needed to connect to Neon have been automatically added as secrets to your Worker through the integration. diff --git a/src/content/docs/workers/databases/native-integrations/planetscale.mdx b/src/content/docs/workers/databases/native-integrations/planetscale.mdx index 49b596dee378d0..fa406d18ce1870 100644 --- a/src/content/docs/workers/databases/native-integrations/planetscale.mdx +++ b/src/content/docs/workers/databases/native-integrations/planetscale.mdx @@ -1,10 +1,9 @@ --- pcx_content_type: configuration title: PlanetScale - --- -import { Render } from "~/components" +import { Render } from "~/components"; [PlanetScale](https://planetscale.com/) is a MySQL-compatible platform that makes databases infinitely scalable, easier and safer to manage. @@ -46,36 +45,36 @@ To set up an integration with PlanetScale: 5. In your Worker, install the `@planetscale/database` driver to connect to your PlanetScale database and start manipulating data: ```sh - $ npm install @planetscale/database + npm install @planetscale/database ``` 6. The following example shows how to make a query to your PlanetScale database in a Worker. The credentials needed to connect to PlanetScale have been automatically added as secrets to your Worker through the integration. ```js - import { connect } from '@planetscale/database'; + import { connect } from "@planetscale/database"; export default { - async fetch(request, env) { - const config = { - host: env.DATABASE_HOST, - username: env.DATABASE_USERNAME, - password: env.DATABASE_PASSWORD, - // see https://github.com/cloudflare/workerd/issues/698 - fetch: (url, init) => { - delete (init)["cache"]; - return fetch(url, init); - } - } - - const conn = connect(config) - const data = await conn.execute('SELECT * FROM products;') - return new Response(JSON.stringify(data.rows), { - status: 200, - headers: { - 'Content-Type': 'application/json' - } - }); - }, + async fetch(request, env) { + const config = { + host: env.DATABASE_HOST, + username: env.DATABASE_USERNAME, + password: env.DATABASE_PASSWORD, + // see https://github.com/cloudflare/workerd/issues/698 + fetch: (url, init) => { + delete init["cache"]; + return fetch(url, init); + }, + }; + + const conn = connect(config); + const data = await conn.execute("SELECT * FROM products;"); + return new Response(JSON.stringify(data.rows), { + status: 200, + headers: { + "Content-Type": "application/json", + }, + }); + }, }; ``` diff --git a/src/content/docs/workers/databases/native-integrations/supabase.mdx b/src/content/docs/workers/databases/native-integrations/supabase.mdx index 6c6e01d9109d8a..1bae560615c0c8 100644 --- a/src/content/docs/workers/databases/native-integrations/supabase.mdx +++ b/src/content/docs/workers/databases/native-integrations/supabase.mdx @@ -1,10 +1,9 @@ --- pcx_content_type: configuration title: Supabase - --- -import { Render } from "~/components" +import { Render } from "~/components"; [Supabase](https://supabase.com/) is an open source Firebase alternative and a PostgreSQL database service that offers real-time functionality, database backups, and extensions. With Supabase, developers can quickly set up a PostgreSQL database and build applications. @@ -18,8 +17,8 @@ To set up an integration with Supabase: 2. Create a `countries` table with the following query. You can create a table in your Supabase dashboard in two ways: - * Use the table editor, which allows you to set up Postgres similar to a spreadsheet. - * Alternatively, use the [SQL editor](https://supabase.com/docs/guides/database/overview#the-sql-editor): + - Use the table editor, which allows you to set up Postgres similar to a spreadsheet. + - Alternatively, use the [SQL editor](https://supabase.com/docs/guides/database/overview#the-sql-editor): ```sql CREATE TABLE countries ( @@ -44,10 +43,10 @@ To set up an integration with Supabase: 4. Select **Integrations** > **Supabase**. 5. Follow the setup flow, selecting the database created in step 1. -5. In your Worker, install the `@supabase/supabase-js` driver to connect to your database and start manipulating data: +5. In your Worker, install the `@supabase/supabase-js` driver to connect to your database and start manipulating data: ```sh - $ npm install @supabase/supabase-js + npm install @supabase/supabase-js ``` 6. The following example shows how to make a query to your Supabase database in a Worker. The credentials needed to connect to Supabase have been automatically added as secrets to your Worker through the integration. diff --git a/src/content/docs/workers/databases/native-integrations/turso.mdx b/src/content/docs/workers/databases/native-integrations/turso.mdx index 77a8194f4badd0..0bb84b8f9e7672 100644 --- a/src/content/docs/workers/databases/native-integrations/turso.mdx +++ b/src/content/docs/workers/databases/native-integrations/turso.mdx @@ -1,10 +1,9 @@ --- pcx_content_type: configuration title: Turso - --- -import { Render } from "~/components" +import { Render } from "~/components"; [Turso](https://turso.tech/) is an edge-hosted, distributed database based on [libSQL](https://libsql.org/), an open-source fork of SQLite. Turso was designed to minimize query latency for applications where queries comes from anywhere in the world. @@ -18,23 +17,25 @@ To set up an integration with Turso: ```sh # On macOS and linux with homebrew -$ brew install tursodatabase/tap/turso +brew install tursodatabase/tap/turso # Manual scripted installation -$ curl -sSfL https://get.tur.so/install.sh | bash +curl -sSfL https://get.tur.so/install.sh | bash ``` Next, run the following command to make sure the Turso CLI is installed: ```sh -$ turso --version +turso --version ``` 2. Before you create your first Turso database, you have to authenticate with your GitHub account by running: ```sh -$ turso auth login +turso auth login +``` +```sh output Waiting for authentication... ✔ Success! Logged in as ``` @@ -42,7 +43,10 @@ Waiting for authentication... After you have authenticated, you can create a database using the command `turso db create `. Turso will create a database and automatically choose a location closest to you. ```sh -$ turso db create my-db +turso db create my-db +``` + +```sh output # Example: Creating database my-db in Amsterdam, Netherlands (ams) @@ -54,7 +58,7 @@ Created database my-db in Amsterdam, Netherlands (ams) in 13 seconds. With the first database created, you can now connect to it directly and execute SQL queries against it. ```sh -$ turso db shell my-db +turso db shell my-db ``` 3. Copy the following SQL query into the shell you just opened: @@ -91,57 +95,57 @@ $ turso db shell my-db 5. In your Worker, install the Turso client library: ```sh -$ npm install @libsql/client +npm install @libsql/client ``` 6. The following example shows how to make a query to your Turso database in a Worker. The credentials needed to connect to Turso have been automatically added as [secrets](/workers/configuration/secrets/) to your Worker through the integration. ```ts - import { Client as LibsqlClient, createClient } from "@libsql/client/web"; - - export interface Env { - TURSO_URL?: string; - TURSO_AUTH_TOKEN?: string; - } - - export default { - async fetch(request, env, ctx): Promise { - const client = buildLibsqlClient(env); - - try { - const res = await client.execute('SELECT * FROM elements'); - return new Response(JSON.stringify(res), { - status: 200, - headers: { 'Content-Type': 'application/json' }, - }); - } catch (error) { - console.error('Error executing SQL query:', error); - return new Response(JSON.stringify({ error: 'Internal Server Error' }), { - status: 500 - }); - } - }, - } satisfies ExportedHandler; - - function buildLibsqlClient(env: Env): LibsqlClient { - const url = env.TURSO_URL?.trim(); - if (url === undefined) { - throw new Error("TURSO_URL env var is not defined"); - } - - const authToken = env.TURSO_AUTH_TOKEN?.trim(); - if (authToken == undefined) { - throw new Error("TURSO_AUTH_TOKEN env var is not defined"); - } - - return createClient({ url, authToken }) - } +import { Client as LibsqlClient, createClient } from "@libsql/client/web"; + +export interface Env { + TURSO_URL?: string; + TURSO_AUTH_TOKEN?: string; +} + +export default { + async fetch(request, env, ctx): Promise { + const client = buildLibsqlClient(env); + + try { + const res = await client.execute("SELECT * FROM elements"); + return new Response(JSON.stringify(res), { + status: 200, + headers: { "Content-Type": "application/json" }, + }); + } catch (error) { + console.error("Error executing SQL query:", error); + return new Response(JSON.stringify({ error: "Internal Server Error" }), { + status: 500, + }); + } + }, +} satisfies ExportedHandler; + +function buildLibsqlClient(env: Env): LibsqlClient { + const url = env.TURSO_URL?.trim(); + if (url === undefined) { + throw new Error("TURSO_URL env var is not defined"); + } + + const authToken = env.TURSO_AUTH_TOKEN?.trim(); + if (authToken == undefined) { + throw new Error("TURSO_AUTH_TOKEN env var is not defined"); + } + + return createClient({ url, authToken }); +} ``` -* The libSQL client library import `@libsql/client/web` must be imported exactly as shown when working with Cloudflare Workers. The non-web import will not work in the Workers environment. -* The `Env` interface contains the [environment variable](/workers/configuration/environment-variables/) and [secret](/workers/configuration/secrets/) defined when you added the Turso integration in step 4. -* The `Env` interface also caches the libSQL client object and router, which was created on the first request to the Worker. -* The Worker uses `buildLibsqlClient` to query the `elements` database and returns the response as a JSON object. +- The libSQL client library import `@libsql/client/web` must be imported exactly as shown when working with Cloudflare Workers. The non-web import will not work in the Workers environment. +- The `Env` interface contains the [environment variable](/workers/configuration/environment-variables/) and [secret](/workers/configuration/secrets/) defined when you added the Turso integration in step 4. +- The `Env` interface also caches the libSQL client object and router, which was created on the first request to the Worker. +- The Worker uses `buildLibsqlClient` to query the `elements` database and returns the response as a JSON object. With your environment configured and your code ready, you can now test your Worker locally before you deploy. diff --git a/src/content/docs/workers/databases/native-integrations/upstash.mdx b/src/content/docs/workers/databases/native-integrations/upstash.mdx index 8a50fc54de238d..1d6380a240719e 100644 --- a/src/content/docs/workers/databases/native-integrations/upstash.mdx +++ b/src/content/docs/workers/databases/native-integrations/upstash.mdx @@ -1,10 +1,9 @@ --- pcx_content_type: configuration title: Upstash - --- -import { Render } from "~/components" +import { Render } from "~/components"; [Upstash](https://upstash.com/) is a serverless database with Redis\* and Kafka API. Upstash also offers QStash, a task queue/scheduler designed for the serverless. @@ -17,14 +16,31 @@ To set up an integration with Upstash: 1. You need an existing Upstash database to connect to. [Create an Upstash database](https://docs.upstash.com/redis#create-a-database) or [load data from an existing database to Upstash](https://docs.upstash.com/redis/howto/connectclient). 2. Insert some data to your Upstash database. You can add data to your Upstash database in two ways: - * Use the CLI directly from your Upstash console. - * Alternatively, install [redis-cli](https://redis.io/docs/getting-started/installation/) locally and run the following commands. + + - Use the CLI directly from your Upstash console. + - Alternatively, install [redis-cli](https://redis.io/docs/getting-started/installation/) locally and run the following commands. + ```sh - $ set GB "Ey up?" + set GB "Ey up?" + ``` + + ```sh output OK - $ set US "Yo, what’s up?" + ``` + + ```sh + set US "Yo, what’s up?" + ``` + + ```sh output OK - $ set NL "Hoi, hoe gaat het?" + ``` + + ```sh + set NL "Hoi, hoe gaat het?" + ``` + + ```sh output OK ``` @@ -39,7 +55,7 @@ To set up an integration with Upstash: 4. In your Worker, install the `@upstash/redis`, a HTTP client to connect to your database and start manipulating data: ```sh - $ npm install @upstash/redis + npm install @upstash/redis ``` 5. The following example shows how to make a query to your Upstash database in a Worker. The credentials needed to connect to Upstash have been automatically added as secrets to your Worker through the integration. @@ -48,30 +64,28 @@ To set up an integration with Upstash: import { Redis } from "@upstash/redis/cloudflare"; export default { - async fetch(request, env) { - const redis = Redis.fromEnv(env); - - const country = request.headers.get("cf-ipcountry"); - if (country) { - const greeting = await redis.get(country); - if (greeting) { - return new Response(greeting); - } - } - - return new Response("Hello What's up!"); - }, + async fetch(request, env) { + const redis = Redis.fromEnv(env); + + const country = request.headers.get("cf-ipcountry"); + if (country) { + const greeting = await redis.get(country); + if (greeting) { + return new Response(greeting); + } + } + + return new Response("Hello What's up!"); + }, }; ``` :::note - `Redis.fromEnv(env)` automatically picks up the default `url` and `token` names created in the integration. If you have renamed the secrets, you must declare them explicitly like in the [Upstash basic example](https://docs.upstash.com/redis/sdks/redis-ts/getstarted#basic-usage). - ::: To learn more about Upstash, refer to the [Upstash documentation](https://docs.upstash.com/redis). @@ -93,7 +107,7 @@ To set up an integration with Upstash Kafka: 3. In your Worker, install `@upstash/kafka`, a HTTP/REST based Kafka client: ```sh - $ npm install @upstash/kafka + npm install @upstash/kafka ``` 4. Use the [upstash-kafka](https://github.com/upstash/upstash-kafka/blob/main/README.md) JavaScript SDK to send data to Kafka. @@ -117,7 +131,7 @@ To set up an integration with Upstash QStash: 3. In your Worker, install the `@upstash/qstash`, a HTTP client to connect to your database QStash endpoint: ```sh - $ npm install @upstash/qstash + npm install @upstash/qstash ``` 4. Refer to the [Upstash documentation on how to receive webhooks from QStash in your Cloudflare Worker](https://docs.upstash.com/qstash/quickstarts/cloudflare-workers#3-use-qstash-in-your-handler). diff --git a/src/content/docs/workers/databases/native-integrations/xata.mdx b/src/content/docs/workers/databases/native-integrations/xata.mdx index a4dfdc950091f0..fcfaa3cf179fbb 100644 --- a/src/content/docs/workers/databases/native-integrations/xata.mdx +++ b/src/content/docs/workers/databases/native-integrations/xata.mdx @@ -1,10 +1,9 @@ --- pcx_content_type: configuration title: Xata - --- -import { Render } from "~/components" +import { Render } from "~/components"; [Xata](https://xata.io) is a serverless data platform powered by PostgreSQL. Xata uniquely combines multiple types of stores (relational databases, search engines, analytics engines) into a single service, accessible through a consistent REST API. @@ -29,15 +28,15 @@ To set up an integration with Xata: 4. Install the [Xata CLI](https://xata.io/docs/getting-started/installation) and authenticate the CLI by running the following commands: ```sh - $ npm install -g @xata.io/cli + npm install -g @xata.io/cli - $ xata auth login + xata auth login ``` 5. Once you have the CLI set up, In your Worker, run the following code in the root directory of your project: ```sh - $ xata init + xata init ``` Accept the default settings during the configuration process. After completion, a `.env` and `.xatarc` file will be generated in your project folder. @@ -54,23 +53,23 @@ To set up an integration with Xata: ```ts export default { - async fetch(request, env, ctx): Promise { - const xata = new XataClient({ - apiKey: env.XATA_API_KEY, - branch: env.XATA_BRANCH, - databaseURL: env.XATA_DATABASE_URL, - }); - - const records = await xata.db.Posts.select([ - "id", - "title", - "author.name", - "author.email", - "author.bio", - ]).getAll(); - - return Response.json(records); - }, + async fetch(request, env, ctx): Promise { + const xata = new XataClient({ + apiKey: env.XATA_API_KEY, + branch: env.XATA_BRANCH, + databaseURL: env.XATA_DATABASE_URL, + }); + + const records = await xata.db.Posts.select([ + "id", + "title", + "author.name", + "author.email", + "author.bio", + ]).getAll(); + + return Response.json(records); + }, } satisfies ExportedHandler; ``` diff --git a/src/content/docs/workers/examples/cron-trigger.mdx b/src/content/docs/workers/examples/cron-trigger.mdx index bdfffa15f516e9..e5101c96099ab4 100644 --- a/src/content/docs/workers/examples/cron-trigger.mdx +++ b/src/content/docs/workers/examples/cron-trigger.mdx @@ -71,7 +71,7 @@ The recommended way of testing Cron Triggers is using Wrangler. Cron Triggers can be tested using Wrangler by passing in the `--test-scheduled` flag to [`wrangler dev`](/workers/wrangler/commands/#dev). This will expose a `/__scheduled` route which can be used to test using a HTTP request. To simulate different cron patterns, a `cron` query parameter can be passed in. ```sh -$ npx wrangler dev --test-scheduled +npx wrangler dev --test-scheduled -$ curl "http://localhost:8787/__scheduled?cron=0+*+*+*+*" +curl "http://localhost:8787/__scheduled?cron=0+*+*+*+*" ``` diff --git a/src/content/docs/workers/examples/multiple-cron-triggers.mdx b/src/content/docs/workers/examples/multiple-cron-triggers.mdx index bb6c67f162db5e..c32914f9d16ebc 100644 --- a/src/content/docs/workers/examples/multiple-cron-triggers.mdx +++ b/src/content/docs/workers/examples/multiple-cron-triggers.mdx @@ -79,7 +79,7 @@ The recommended way of testing Cron Triggers is using Wrangler. Cron Triggers can be tested using Wrangler by passing in the `--test-scheduled` flag to [`wrangler dev`](/workers/wrangler/commands/#dev). This will expose a `/__scheduled` route which can be used to test using a HTTP request. To simulate different cron patterns, a `cron` query parameter can be passed in. ```sh -$ npx wrangler dev --test-scheduled +npx wrangler dev --test-scheduled -$ curl "http://localhost:8787/__scheduled?cron=*%2F3+*+*+*+*" +curl "http://localhost:8787/__scheduled?cron=*%2F3+*+*+*+*" ``` diff --git a/src/content/docs/workers/get-started/guide.mdx b/src/content/docs/workers/get-started/guide.mdx index 30a778e641b899..75f3eec08392db 100644 --- a/src/content/docs/workers/get-started/guide.mdx +++ b/src/content/docs/workers/get-started/guide.mdx @@ -6,10 +6,9 @@ sidebar: head: - tag: title content: Get started - CLI - --- -import { Details, Render, PackageManagers } from "~/components" +import { Details, Render, PackageManagers } from "~/components"; Set up and deploy your first Worker with Wrangler, the Cloudflare Developer Platform CLI. @@ -25,17 +24,28 @@ This guide will instruct you through setting up and deploying your first Worker. Open a terminal window and run C3 to create your Worker project: - - - + + + Now, you have a new project set up. Move into that project folder. ```sh -$ cd my-first-worker +cd my-first-worker ``` -
In your project directory, C3 will have generated the following: @@ -46,35 +56,32 @@ In your project directory, C3 will have generated the following: 4. `package-lock.json`: Refer to [`npm` documentation on `package-lock.json`](https://docs.npmjs.com/cli/v9/configuring-npm/package-lock-json). 5. `node_modules`: Refer to [`npm` documentation `node_modules`](https://docs.npmjs.com/cli/v7/configuring-npm/folders#node-modules). -
-
In addition to creating new projects from C3 templates, C3 also supports creating new projects from Git repositories. To create a new project from a Git repository, open your terminal and run: ```sh -$ npm create cloudflare@latest -- --template +npm create cloudflare@latest -- --template ``` `` may be any of the following: -* `user/repo` (GitHub) -* `git@github.com:user/repo` -* `https://github.com/user/repo` -* `user/repo/some-template` (subdirectories) -* `user/repo#canary` (branches) -* `user/repo#1234abcd` (commit hash) -* `bitbucket:user/repo` (Bitbucket) -* `gitlab:user/repo` (GitLab) +- `user/repo` (GitHub) +- `git@github.com:user/repo` +- `https://github.com/user/repo` +- `user/repo/some-template` (subdirectories) +- `user/repo#canary` (branches) +- `user/repo#1234abcd` (commit hash) +- `bitbucket:user/repo` (Bitbucket) +- `gitlab:user/repo` (GitLab) At a minimum, template folders must contain the following: -* `package.json` -* `wrangler.toml` -* `src/` containing a worker script referenced from `wrangler.toml` - +- `package.json` +- `wrangler.toml` +- `src/` containing a worker script referenced from `wrangler.toml`
@@ -85,19 +92,17 @@ The Workers command-line interface, [Wrangler](/workers/wrangler/install-and-upd After you have created your first Worker, run the [`wrangler dev`](/workers/wrangler/commands/#dev) command in the project directory to start a local server for developing your Worker. This will allow you to preview your Worker locally during development. ```sh -$ npx wrangler dev +npx wrangler dev ``` If you have not used Wrangler before, it will try to open your web browser to login with your Cloudflare account. Go to [http://localhost:8787](http://localhost:8787) to view your Worker. -
If you have issues with this step or you do not have access to a browser interface, refer to the [`wrangler login`](/workers/wrangler/commands/#login) documentation. -
## 3. Write code @@ -108,22 +113,21 @@ Find the `src/index.js` file. `index.js` will be populated with the code below: ```js title="Original index.js" export default { - async fetch(request, env, ctx) { - return new Response("Hello World!"); - }, + async fetch(request, env, ctx) { + return new Response("Hello World!"); + }, }; ``` -
This code block consists of a few different parts. ```js title="Updated index.js" {1} export default { - async fetch(request, env, ctx) { - return new Response("Hello World!"); - }, + async fetch(request, env, ctx) { + return new Response("Hello World!"); + }, }; ``` @@ -131,9 +135,9 @@ export default { ```js title="index.js" {2} export default { - async fetch(request, env, ctx) { - return new Response("Hello World!"); - }, + async fetch(request, env, ctx) { + return new Response("Hello World!"); + }, }; ``` @@ -143,30 +147,28 @@ Additionally, the `fetch` handler will always be passed three parameters: [`requ ```js title="index.js" {3} export default { - async fetch(request, env, ctx) { - return new Response("Hello World!"); - }, + async fetch(request, env, ctx) { + return new Response("Hello World!"); + }, }; ``` The Workers runtime expects `fetch` handlers to return a `Response` object or a Promise which resolves with a `Response` object. In this example, you will return a new `Response` with the string `"Hello World!"`. -
Replace the content in your current `index.js` file with the content below, which changes the text output. ```js title="index.js" {3} export default { - async fetch(request, env, ctx) { - return new Response("Hello Worker!"); - }, + async fetch(request, env, ctx) { + return new Response("Hello Worker!"); + }, }; ``` Then, save the file and reload the page. Your Worker's output will have changed to the new text. -
If the output for your Worker does not change, make sure that: @@ -175,7 +177,6 @@ If the output for your Worker does not change, make sure that: 2. You have `wrangler dev` running. 3. You reloaded your browser. -
## 4. Deploy your project @@ -183,26 +184,24 @@ If the output for your Worker does not change, make sure that: Deploy your Worker via Wrangler to a `*.workers.dev` subdomain or a [Custom Domain](/workers/configuration/routing/custom-domains/). ```sh -$ npx wrangler deploy +npx wrangler deploy ``` If you have not configured any subdomain or domain, Wrangler will prompt you during the publish process to set one up. Preview your Worker at `..workers.dev`. -
When pushing to your `*.workers.dev` subdomain for the first time, you may see [`523` errors](/support/troubleshooting/cloudflare-errors/troubleshooting-cloudflare-5xx-errors/#error-523-origin-is-unreachable) while DNS is propagating. These errors should resolve themselves after a minute or so. -
## Next steps To do more: -* Review our [Examples](/workers/examples/) and [Tutorials](/workers/tutorials/) for inspiration. -* Set up [bindings](/workers/runtime-apis/bindings/) to allow your Worker to interact with other resources and unlock new functionality. -* Learn how to [test and debug](/workers/testing/) your Workers. -* Read about [Workers limits and pricing](/workers/platform/). +- Review our [Examples](/workers/examples/) and [Tutorials](/workers/tutorials/) for inspiration. +- Set up [bindings](/workers/runtime-apis/bindings/) to allow your Worker to interact with other resources and unlock new functionality. +- Learn how to [test and debug](/workers/testing/) your Workers. +- Read about [Workers limits and pricing](/workers/platform/). diff --git a/src/content/docs/workers/get-started/quickstarts.mdx b/src/content/docs/workers/get-started/quickstarts.mdx index 2d33c1c6d27951..074bb4f69cd003 100644 --- a/src/content/docs/workers/get-started/quickstarts.mdx +++ b/src/content/docs/workers/get-started/quickstarts.mdx @@ -7,107 +7,101 @@ sidebar: head: [] description: GitHub repositories that are designed to be a starting point for building a new Cloudflare Workers project. - --- - - -import { LinkButton, WorkerStarter } from "~/components" +import { LinkButton, WorkerStarter } from "~/components"; Quickstarts are GitHub repositories that are designed to be a starting point for building a new Cloudflare Workers project. To start any of the projects below, run: ```sh -$ npm create cloudflare@latest -- --template +npm create cloudflare@latest -- --template ``` +- `new-project-name` + - A folder with this name will be created with your new project inside, pre-configured to [your Workers account](/workers/wrangler/configuration/). -* `new-project-name` - - * A folder with this name will be created with your new project inside, pre-configured to [your Workers account](/workers/wrangler/configuration/). - -* `template` - * This is the URL of the GitHub repo starter, as below. Refer to the [create-cloudflare documentation](/pages/get-started/c3/) for a full list of possible values. +- `template` + - This is the URL of the GitHub repo starter, as below. Refer to the [create-cloudflare documentation](/pages/get-started/c3/) for a full list of possible values. :::note[Cloudflare templates repository] - To access a full list of available Cloudflare templates, refer to the [Cloudflare templates repository](https://github.com/cloudflare/workers-sdk/tree/main/templates). - ::: ## Example Projects -*** +--- ## Frameworks - -*** +--- ## Built with Workers Get inspiration from other sites and projects out there that were built with Cloudflare Workers. -Built with Workers + + Built with Workers + diff --git a/src/content/docs/workers/languages/python/index.mdx b/src/content/docs/workers/languages/python/index.mdx index e2a1a88e98132c..0a428ebeb13f7f 100644 --- a/src/content/docs/workers/languages/python/index.mdx +++ b/src/content/docs/workers/languages/python/index.mdx @@ -9,16 +9,15 @@ head: - tag: title content: Write Cloudflare Workers in Python description: Write Workers in 100% Python - --- Cloudflare Workers provides first-class support for Python, including support for: -* The majority of Python's [Standard library](/workers/languages/python/stdlib/) -* All [bindings](/workers/runtime-apis/bindings/), including [Workers AI](/workers-ai/), [Vectorize](/vectorize), [R2](/r2), [KV](/kv), [D1](/d1), [Queues](/queues/), [Durable Objects](/durable-objects/), [Service Bindings](/workers/runtime-apis/bindings/service-bindings/) and more. -* [Environment Variables](/workers/configuration/environment-variables/), and [Secrets](/workers/configuration/secrets/) -* A robust [foreign function interface (FFI)](/workers/languages/python/ffi) that lets you use JavaScript objects and functions directly from Python — including all [Runtime APIs](/workers/runtime-apis/) -* [Built-in packages](/workers/languages/python/packages), including [FastAPI](https://fastapi.tiangolo.com/), [Langchain](https://pypi.org/project/langchain/), [httpx](https://www.python-httpx.org/) and more. +- The majority of Python's [Standard library](/workers/languages/python/stdlib/) +- All [bindings](/workers/runtime-apis/bindings/), including [Workers AI](/workers-ai/), [Vectorize](/vectorize), [R2](/r2), [KV](/kv), [D1](/d1), [Queues](/queues/), [Durable Objects](/durable-objects/), [Service Bindings](/workers/runtime-apis/bindings/service-bindings/) and more. +- [Environment Variables](/workers/configuration/environment-variables/), and [Secrets](/workers/configuration/secrets/) +- A robust [foreign function interface (FFI)](/workers/languages/python/ffi) that lets you use JavaScript objects and functions directly from Python — including all [Runtime APIs](/workers/runtime-apis/) +- [Built-in packages](/workers/languages/python/packages), including [FastAPI](https://fastapi.tiangolo.com/), [Langchain](https://pypi.org/project/langchain/), [httpx](https://www.python-httpx.org/) and more. :::caution[Python Workers are in beta. Packages do not run in production.] @@ -105,10 +104,13 @@ development server. Now, if you send a POST request with the appropriate body, your Worker should respond with a personalized message. ```bash -$ curl --header "Content-Type: application/json" \ - --request POST \ - --data '{"name": "Python"}' http://localhost:8787 -# Hello, Python! +curl --header "Content-Type: application/json" \ + --request POST \ + --data '{"name": "Python"}' http://localhost:8787 +``` + +```bash output +Hello, Python! ``` ## The `env` Parameter @@ -143,6 +145,6 @@ async def on_fetch(request, env): ## Further Reading -* Understand which parts of the [Python Standard Library](/workers/languages/python/stdlib) are supported in Python Workers. -* Learn about Python Workers' [foreign function interface (FFI)](/workers/languages/python/ffi), and how to use it to work with [bindings](/workers/runtime-apis/bindings) and [Runtime APIs](/workers/runtime-apis/). -* Explore the [Built-in Python packages](/workers/languages/python/packages) that the Workers runtime provides. +- Understand which parts of the [Python Standard Library](/workers/languages/python/stdlib) are supported in Python Workers. +- Learn about Python Workers' [foreign function interface (FFI)](/workers/languages/python/ffi), and how to use it to work with [bindings](/workers/runtime-apis/bindings) and [Runtime APIs](/workers/runtime-apis/). +- Explore the [Built-in Python packages](/workers/languages/python/packages) that the Workers runtime provides. diff --git a/src/content/docs/workers/languages/rust/index.mdx b/src/content/docs/workers/languages/rust/index.mdx index 98d2c41db3a9e6..0d6160ac3ab0a6 100644 --- a/src/content/docs/workers/languages/rust/index.mdx +++ b/src/content/docs/workers/languages/rust/index.mdx @@ -19,18 +19,18 @@ By following this guide, you will learn how to build a Worker entirely in the Ru Before starting this guide, make sure you have: -* A recent version of [`Rust`](https://rustup.rs/) -* [`npm`](https://docs.npmjs.com/getting-started) -* The Rust `wasm32-unknown-unknown` toolchain: +- A recent version of [`Rust`](https://rustup.rs/) +- [`npm`](https://docs.npmjs.com/getting-started) +- The Rust `wasm32-unknown-unknown` toolchain: ```sh -$ rustup target add wasm32-unknown-unknown +rustup target add wasm32-unknown-unknown ``` -* And `cargo-generate` sub-command by running: +- And `cargo-generate` sub-command by running: ```sh -$ cargo install cargo-generate +cargo install cargo-generate ``` ## 1. Create a new project with Wrangler @@ -38,21 +38,21 @@ $ cargo install cargo-generate Open a terminal window, and run the following command to generate a Worker project template in Rust: ```sh -$ cargo generate cloudflare/workers-rs +cargo generate cloudflare/workers-rs ``` Your project will be created in a new directory that you named, in which you will find the following files and folders: -* `Cargo.toml` - The standard project configuration file for Rust's [`Cargo`](https://doc.rust-lang.org/cargo/) package manager. The template pre-populates some best-practice settings for building for Wasm on Workers. -* `wrangler.toml` - Wrangler configuration, pre-populated with a custom build command to invoke `worker-build` (Refer to [Wrangler Bundling](/workers/languages/rust/#bundling-worker-build)). -* `src` - Rust source directory, pre-populated with Hello World Worker. +- `Cargo.toml` - The standard project configuration file for Rust's [`Cargo`](https://doc.rust-lang.org/cargo/) package manager. The template pre-populates some best-practice settings for building for Wasm on Workers. +- `wrangler.toml` - Wrangler configuration, pre-populated with a custom build command to invoke `worker-build` (Refer to [Wrangler Bundling](/workers/languages/rust/#bundling-worker-build)). +- `src` - Rust source directory, pre-populated with Hello World Worker. ## 2. Develop locally After you have created your first Worker, run the [`wrangler dev`](/workers/wrangler/commands/#dev) command to start a local server for developing your Worker. This will allow you to test your Worker in development. ```sh -$ npx wrangler dev +npx wrangler dev ``` If you have not used Wrangler before, it will try to open your web browser to login with your Cloudflare account. @@ -78,7 +78,6 @@ async fn main(req: Request, env: Env, ctx: Context) -> Result { :::note - There is some counterintuitive behavior going on here: 1. `workers-rs` provides an `event` macro which expects a handler function signature identical to those seen in JavaScript Workers. @@ -93,10 +92,10 @@ There is some counterintuitive behavior going on here: This macro allows you to define entrypoints to your Worker. The `event` macro supports the following events: -* `fetch` - Invoked by an incoming HTTP request. -* `scheduled` - Invoked by [`Cron Triggers`](/workers/configuration/cron-triggers/). -* `queue` - Invoked by incoming message batches from [Queues](/queues/) (Requires `queue` feature in `Cargo.toml`, refer to the [`workers-rs` GitHub repository and `queues` feature flag](https://github.com/cloudflare/workers-rs#queues)). -* `start` - Invoked when the Worker is first launched (such as, to install panic hooks). +- `fetch` - Invoked by an incoming HTTP request. +- `scheduled` - Invoked by [`Cron Triggers`](/workers/configuration/cron-triggers/). +- `queue` - Invoked by incoming message batches from [Queues](/queues/) (Requires `queue` feature in `Cargo.toml`, refer to the [`workers-rs` GitHub repository and `queues` feature flag](https://github.com/cloudflare/workers-rs#queues)). +- `start` - Invoked when the Worker is first launched (such as, to install panic hooks). #### `fetch` parameters @@ -110,12 +109,12 @@ An object representing the incoming request. This includes methods for accessing Provides access to Worker [bindings](/workers/runtime-apis/bindings/). -* [`Secret`](https://github.com/cloudflare/workers-rs/blob/e15f88110d814c2d7759b2368df688433f807694/worker/src/env.rs#L92) - Secret value configured in Cloudflare dashboard or using `wrangler secret put`. -* [`Var`](https://github.com/cloudflare/workers-rs/blob/e15f88110d814c2d7759b2368df688433f807694/worker/src/env.rs#L92) - Environment variable defined in `wrangler.toml`. -* [`KvStore`](https://docs.rs/worker-kv/latest/worker_kv/struct.KvStore.html) - Workers [KV](/kv/api/) namespace binding. -* [`ObjectNamespace`](https://docs.rs/worker/latest/worker/durable/struct.ObjectNamespace.html) - [Durable Object](/durable-objects/) binding. -* [`Fetcher`](https://docs.rs/worker/latest/worker/struct.Fetcher.html) - [Service binding](/workers/runtime-apis/bindings/service-bindings/) to another Worker. -* [`Bucket`](https://docs.rs/worker/latest/worker/struct.Bucket.html) - [R2](/r2/) Bucket binding. +- [`Secret`](https://github.com/cloudflare/workers-rs/blob/e15f88110d814c2d7759b2368df688433f807694/worker/src/env.rs#L92) - Secret value configured in Cloudflare dashboard or using `wrangler secret put`. +- [`Var`](https://github.com/cloudflare/workers-rs/blob/e15f88110d814c2d7759b2368df688433f807694/worker/src/env.rs#L92) - Environment variable defined in `wrangler.toml`. +- [`KvStore`](https://docs.rs/worker-kv/latest/worker_kv/struct.KvStore.html) - Workers [KV](/kv/api/) namespace binding. +- [`ObjectNamespace`](https://docs.rs/worker/latest/worker/durable/struct.ObjectNamespace.html) - [Durable Object](/durable-objects/) binding. +- [`Fetcher`](https://docs.rs/worker/latest/worker/struct.Fetcher.html) - [Service binding](/workers/runtime-apis/bindings/service-bindings/) to another Worker. +- [`Bucket`](https://docs.rs/worker/latest/worker/struct.Bucket.html) - [R2](/r2/) Bucket binding. 3. **[`Context`](https://docs.rs/worker/latest/worker/struct.Context.html)** @@ -134,17 +133,15 @@ Implements convenient [routing API](https://docs.rs/worker/latest/worker/struct. With your project configured, you can now deploy your Worker, to a `*.workers.dev` subdomain, or a [Custom Domain](/workers/configuration/routing/custom-domains/), if you have one configured. If you have not configured any subdomain or domain, Wrangler will prompt you during the deployment process to set one up. ```sh -$ npx wrangler deploy +npx wrangler deploy ``` Preview your Worker at `..workers.dev`. :::note - When pushing to your `*.workers.dev` subdomain for the first time, you may see [`523` errors](/support/troubleshooting/cloudflare-errors/troubleshooting-cloudflare-5xx-errors/#error-523-origin-is-unreachable) while DNS is propagating. These errors should resolve themselves after a minute or so. - ::: After completing these steps, you will have a basic Rust-based Worker deployed. From here, you can add crate @@ -175,11 +172,13 @@ import * as imports from "./mywasmlib_bg.js"; // switch between both syntax for node and for workerd import wkmod from "./mywasmlib_bg.wasm"; import * as nodemod from "./mywasmlib_bg.wasm"; -if ((typeof process !== 'undefined') && (process.release.name === 'node')) { - imports.__wbg_set_wasm(nodemod); +if (typeof process !== "undefined" && process.release.name === "node") { + imports.__wbg_set_wasm(nodemod); } else { - const instance = new WebAssembly.Instance(wkmod, { "./mywasmlib_bg.js": imports }); - imports.__wbg_set_wasm(instance.exports); + const instance = new WebAssembly.Instance(wkmod, { + "./mywasmlib_bg.js": imports, + }); + imports.__wbg_set_wasm(instance.exports); } export * from "./mywasmlib_bg.js"; @@ -191,7 +190,6 @@ export * from "./mywasmlib_bg.js"; import { myFunction } from "path/to/mylib.js"; ``` - ::: ### Async (`wasm-bindgen-futures`) @@ -225,4 +223,4 @@ Finally, `worker-bundle` automatically invokes [`wasm-opt`](https://github.com/b ## Related resources -* [Rust Wasm Book](https://rustwasm.github.io/book/introduction.html) +- [Rust Wasm Book](https://rustwasm.github.io/book/introduction.html) diff --git a/src/content/docs/workers/observability/logging/real-time-logs.mdx b/src/content/docs/workers/observability/logging/real-time-logs.mdx index c4258af64b2bf6..6e130d56ea7733 100644 --- a/src/content/docs/workers/observability/logging/real-time-logs.mdx +++ b/src/content/docs/workers/observability/logging/real-time-logs.mdx @@ -4,10 +4,9 @@ title: Real-time logs head: [] description: Debug your Worker application by accessing logs and exceptions through the Cloudflare dashboard or `wrangler tail`. - --- -import { TabItem, Tabs } from "~/components" +import { TabItem, Tabs } from "~/components"; Logging is a fundamental building block supporting application development — it can provide insights during the initial stages of development and is often times crucial to understanding an issue occurring in production. @@ -15,10 +14,8 @@ The Workers platform captures all `console.log`'s and uncaught exceptions, in ad :::caution - Real-time logs are not available for zones on the [Cloudflare China Network](/china-network/). - ::: ## Add custom logs @@ -31,24 +28,24 @@ In addition, you can add custom logs throughout your code. Any `console.log` sta ```js export default { - async fetch(request) { - const { cf } = request; - const { city, country } = cf; + async fetch(request) { + const { cf } = request; + const { city, country } = cf; - console.log(`Request came from city: ${city} in country: ${country}`); + console.log(`Request came from city: ${city} in country: ${country}`); - return new Response("Hello worker!", { - headers: { "content-type": "text/plain" }, - }); - } -} + return new Response("Hello worker!", { + headers: { "content-type": "text/plain" }, + }); + }, +}; ```
```js addEventListener("fetch", (event) => { - event.respondWith(handleRequest(event.request)); + event.respondWith(handleRequest(event.request)); }); /** @@ -56,14 +53,14 @@ addEventListener("fetch", (event) => { * @param {Request} request */ async function handleRequest(request) { - const { cf } = request; - const { city, country } = cf; + const { cf } = request; + const { city, country } = cf; - console.log(`Request came from city: ${city} in country: ${country}`); + console.log(`Request came from city: ${city} in country: ${country}`); - return new Response("Hello worker!", { - headers: { "content-type": "text/plain" }, - }); + return new Response("Hello worker!", { + headers: { "content-type": "text/plain" }, + }); } ``` @@ -92,26 +89,29 @@ The output of each `wrangler tail` log is a structured JSON object: ```json { - "outcome": "ok", - "scriptName": null, - "exceptions": [], - "logs": [], - "eventTimestamp": 1590680082349, - "event": { - "request": { - "url": "https://www.bytesized.xyz/", - "method": "GET", - "headers": {}, - "cf": {} - } - } + "outcome": "ok", + "scriptName": null, + "exceptions": [], + "logs": [], + "eventTimestamp": 1590680082349, + "event": { + "request": { + "url": "https://www.bytesized.xyz/", + "method": "GET", + "headers": {}, + "cf": {} + } + } } ``` By piping the output to tools like [`jq`](https://stedolan.github.io/jq/), you can query and manipulate the requests to look for specific information: ```sh -$ npx wrangler tail | jq .event.request.url +npx wrangler tail | jq .event.request.url +``` + +```sh output "https://www.bytesized.xyz/" "https://www.bytesized.xyz/component---src-pages-index-js-a77e385e3bde5b78dbf6.js" "https://www.bytesized.xyz/page-data/app-data.json" @@ -123,17 +123,15 @@ You can customize how `wrangler tail` works to fit your needs. Refer to [the `wr Note that: -* Workers logs are not stored. You can start and stop the stream at any time to view them, but they do not persist. -* If your Worker has a high volume of traffic, the real-time logs might enter sampling mode. This will cause some of your messages to be dropped and a warning to appear in your logs. -* Logs from any [Durable Objects](/durable-objects/) your Worker is using will show up in the dashboard. -* A maximum of 10 clients can view a Worker's logs at one time. This can be a combination of either dashboard sessions or `wrangler tail` calls. +- Workers logs are not stored. You can start and stop the stream at any time to view them, but they do not persist. +- If your Worker has a high volume of traffic, the real-time logs might enter sampling mode. This will cause some of your messages to be dropped and a warning to appear in your logs. +- Logs from any [Durable Objects](/durable-objects/) your Worker is using will show up in the dashboard. +- A maximum of 10 clients can view a Worker's logs at one time. This can be a combination of either dashboard sessions or `wrangler tail` calls. :::note - You can filter real-time logs in the dashboard or using [`wrangler tail`](/workers/wrangler/commands/#tail). If your Worker has a high volume of messages, filtering real-time logs can help mitgate messages from being dropped. - ::: ## Persist logs @@ -153,8 +151,8 @@ Refer to the [Tail Workers documentation](/workers/observability/logging/tail-wo ## Related resources -* [Errors and exceptions](/workers/observability/errors/) - Review common Workers errors. -* [Local development and testing](/workers/testing/local-development/) - Develop and test you Workers locally. -* [Logpush](/workers/observability/logging/logpush/) - Learn how to push Workers Trace Event Logs to supported destinations. -* [Tail Workers](/workers/observability/logging/logpush/) - Learn how to attach Tail Workers to transform your logs and send them to HTTP endpoints. -* [Source maps and stack traces](/workers/observability/source-maps) - Learn how to enable source maps and generate stack traces for Workers. +- [Errors and exceptions](/workers/observability/errors/) - Review common Workers errors. +- [Local development and testing](/workers/testing/local-development/) - Develop and test you Workers locally. +- [Logpush](/workers/observability/logging/logpush/) - Learn how to push Workers Trace Event Logs to supported destinations. +- [Tail Workers](/workers/observability/logging/logpush/) - Learn how to attach Tail Workers to transform your logs and send them to HTTP endpoints. +- [Source maps and stack traces](/workers/observability/source-maps) - Learn how to enable source maps and generate stack traces for Workers. diff --git a/src/content/docs/workers/platform/limits.mdx b/src/content/docs/workers/platform/limits.mdx index e4bd46f8205118..45db87d14da67a 100644 --- a/src/content/docs/workers/platform/limits.mdx +++ b/src/content/docs/workers/platform/limits.mdx @@ -5,14 +5,13 @@ sidebar: order: 2 head: [] description: Cloudflare Workers plan and platform limits. - --- -import { Render } from "~/components" +import { Render } from "~/components"; ## Account plan limits -| Feature | Workers Free | Workers Paid ([Bundled](/workers/platform/pricing/#example-pricing-bundled-usage-model), [Unbound](/workers/platform/pricing/#example-pricing-unbound-usage-model)) and [Standard](/workers/platform/pricing/#example-pricing-standard-usage-model) | +| Feature | Workers Free | Workers Paid ([Bundled](/workers/platform/pricing/#example-pricing-bundled-usage-model), [Unbound](/workers/platform/pricing/#example-pricing-unbound-usage-model)) and [Standard](/workers/platform/pricing/#example-pricing-standard-usage-model) | | -------------------------------------------------------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | [Subrequests](#subrequests) | 50/request | 50/request ([Bundled](/workers/platform/pricing/#example-pricing-bundled-usage-model)),
1000/request ([Unbound](/workers/platform/pricing/#example-pricing-unbound-usage-model), [Standard](/workers/platform/pricing/#example-pricing-standard-usage-model)) | | [Simultaneous outgoing
connections/request](#simultaneous-open-connections) | 6 | 6 | @@ -23,11 +22,12 @@ import { Render } from "~/components" | [Number of Workers](#number-of-workers)1 | 100 | 500 | | Number of [Cron Triggers](/workers/configuration/cron-triggers/)
per account | 5 | 250 | -1 If you are running into limits, your project may be a good fit for [Workers for Platforms](/cloudflare-for-platforms/workers-for-platforms/). +1 If you are running into limits, your project may be a good fit for +[Workers for Platforms](/cloudflare-for-platforms/workers-for-platforms/). -*** +--- ## Request limits @@ -46,26 +46,22 @@ Cloudflare Enterprise customers may contact their account team or [Cloudflare Su | Business | 200 MB | | Enterprise | 500 MB (by default) | -*** +--- ## Response limits Cloudflare does not enforce response limits, but cache limits for [Cloudflare's CDN are observed](/cache/concepts/default-cache-behavior/). Maximum file size is 512 MB for Free, Pro, and Business customers and 5 GB for Enterprise customers. -*** +--- ## Worker limits - - -| Feature | Free | [Bundled usage model](/workers/platform/pricing/#example-pricing-bundled-usage-model) | [Unbound](/workers/platform/pricing/#example-pricing-unbound-usage-model) and [Standard](/workers/platform/pricing/#example-pricing-standard-usage-model) usage model | -| --------------------------- | ------------------------------------------ | ------------------------------------------- | ------------------------------------------- | -| [Request](#request) | 100,000 requests/day
1000 requests/min | none | none | -| [Worker memory](#memory) | 128 MB | 128 MB | 128 MB | -| [CPU time](#cpu-time) | 10 ms | 50 ms HTTP request
50 ms [Cron Trigger](/workers/configuration/cron-triggers/) | 30 s HTTP request
15 min [Cron Trigger](/workers/configuration/cron-triggers/)
15 min [Queue Consumer](/queues/configuration/javascript-apis/#consumer) | | -| [Duration](#duration) | None | none | 15 min [Cron Trigger](/workers/configuration/cron-triggers/)
15 min [Durable Object Alarm](/durable-objects/api/alarms/)
15 min [Queue Consumer](/queues/configuration/javascript-apis/#consumer) | - - +| Feature | Free | [Bundled usage model](/workers/platform/pricing/#example-pricing-bundled-usage-model) | [Unbound](/workers/platform/pricing/#example-pricing-unbound-usage-model) and [Standard](/workers/platform/pricing/#example-pricing-standard-usage-model) usage model | +| ------------------------ | ------------------------------------------ | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | +| [Request](#request) | 100,000 requests/day
1000 requests/min | none | none | +| [Worker memory](#memory) | 128 MB | 128 MB | 128 MB | +| [CPU time](#cpu-time) | 10 ms | 50 ms HTTP request
50 ms [Cron Trigger](/workers/configuration/cron-triggers/) | 30 s HTTP request
15 min [Cron Trigger](/workers/configuration/cron-triggers/)
15 min [Queue Consumer](/queues/configuration/javascript-apis/#consumer) | | +| [Duration](#duration) | None | none | 15 min [Cron Trigger](/workers/configuration/cron-triggers/)
15 min [Durable Object Alarm](/durable-objects/api/alarms/)
15 min [Queue Consumer](/queues/configuration/javascript-apis/#consumer) | ### Duration @@ -73,44 +69,38 @@ Duration is a measurement of wall-clock time — the total amount of time from :::note -Cloudflare updates the Workers runtime a few times per week. When this happens, any in-flight requests are given a grace period of 30 seconds to finish. If a request does not finish within this time, it is terminated. While your application should follow the best practice of handling disconnects by retrying requests, this scenario is extremely improbable. To encounter it, you would need to have a request that takes longer than 30 seconds that also happens to intersect with the exact time an update to the runtime is happening. +Cloudflare updates the Workers runtime a few times per week. When this happens, any in-flight requests are given a grace period of 30 seconds to finish. If a request does not finish within this time, it is terminated. While your application should follow the best practice of handling disconnects by retrying requests, this scenario is extremely improbable. To encounter it, you would need to have a request that takes longer than 30 seconds that also happens to intersect with the exact time an update to the runtime is happening. ::: ### CPU time CPU time is the amount of time the CPU actually spends doing work, during a given request. Most Workers requests consume less than a millisecond of CPU time. It is rare to find normally operating Workers that exceed the CPU time limit. -
+
:::note -On the Unbound billing model, scheduled Workers ([Cron Triggers](/workers/configuration/cron-triggers/)) have different limits on CPU time based on the schedule interval. When the schedule interval is less than 1 hour, a Scheduled Worker may run for up to 30 seconds. When the schedule interval is more than 1 hour, a scheduled Worker may run for up to 15 minutes. +On the Unbound billing model, scheduled Workers ([Cron Triggers](/workers/configuration/cron-triggers/)) have different limits on CPU time based on the schedule interval. When the schedule interval is less than 1 hour, a Scheduled Worker may run for up to 30 seconds. When the schedule interval is more than 1 hour, a scheduled Worker may run for up to 15 minutes. ::: -*** +--- ## Cache API limits - - | Feature | Workers Free | [Bundled](/workers/platform/pricing/#example-pricing-bundled-usage-model) | [Unbound](/workers/platform/pricing/#example-pricing-unbound-usage-model) and [Standard](/workers/platform/pricing/#example-pricing-standard-usage-model) | | ---------------------------------------- | ------------ | ------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | | [Maximum object size](#cache-api-limits) | 512 MB | 512 MB | 512 MB | | [Calls/request](#cache-api-limits) | 50 | 50 | 1,000 | - - -* 50 total `put()`, `match()`, or `delete()` calls per-request, using the same quota as `fetch()`. +- 50 total `put()`, `match()`, or `delete()` calls per-request, using the same quota as `fetch()`. :::note - The size of chunked response bodies (`Transfer-Encoding: chunked`) is not known in advance. Then, `.put()`ing such responses will block subsequent `.put()`s from starting until the current `.put()` completes. - ::: -*** +--- ## Request @@ -124,10 +114,8 @@ The burst rate and daily request limits apply at the account level, meaning that :::caution - If you are currently being rate limited, upgrade to a [Workers Paid plan](https://dash.cloudflare.com/?account=workers/plans) to lift burst rate and daily request limits. - ::: ### Burst rate @@ -152,7 +140,7 @@ Routes in fail open mode will bypass the failing Worker and prevent it from oper Routes in fail closed mode will display a Cloudflare `1027` error page to visitors, signifying the Worker has been temporarily disabled. Cloudflare recommends this option if your Worker is performing security related tasks. -*** +--- ## Memory @@ -166,7 +154,7 @@ If a Worker processes a request that pushes the Worker over the 128 MB limit, th Use the [TransformStream API](/workers/runtime-apis/streams/transformstream/) to stream responses if you are concerned about memory usage. This avoids loading an entire response into memory. -*** +--- ## Subrequests @@ -192,37 +180,37 @@ There is no set limit on the amount of real time a Worker may use. As long as th When the client disconnects, all tasks associated with that client’s request are proactively canceled. If the Worker passed a promise to [`event.waitUntil()`](/workers/runtime-apis/handlers/fetch/), cancellation will be delayed until the promise has completed or until an additional 30 seconds have elapsed, whichever happens first. -*** +--- ## Simultaneous open connections You can open up to six connections simultaneously, for each invocation of your Worker. The connections opened by the following API calls all count toward this limit: -* the `fetch()` method of the [Fetch API](/workers/runtime-apis/fetch/). -* `get()`, `put()`, `list()`, and `delete()` methods of [Workers KV namespace objects](/kv/api/). -* `put()`, `match()`, and `delete()` methods of [Cache objects](/workers/runtime-apis/cache/). -* `list()`, `get()`, `put()`, `delete()`, and `head()` methods of [R2](/r2/). -* `send()` and `sendBatch()`, methods of [Queues](/queues/). -* Opening a TCP socket using the [`connect()`](/workers/runtime-apis/tcp-sockets/) API. +- the `fetch()` method of the [Fetch API](/workers/runtime-apis/fetch/). +- `get()`, `put()`, `list()`, and `delete()` methods of [Workers KV namespace objects](/kv/api/). +- `put()`, `match()`, and `delete()` methods of [Cache objects](/workers/runtime-apis/cache/). +- `list()`, `get()`, `put()`, `delete()`, and `head()` methods of [R2](/r2/). +- `send()` and `sendBatch()`, methods of [Queues](/queues/). +- Opening a TCP socket using the [`connect()`](/workers/runtime-apis/tcp-sockets/) API. Once an invocation has six connections open, it can still attempt to open additional connections. -* These attempts are put in a pending queue — the connections will not be initiated until one of the currently open connections has closed. -* Earlier connections can delay later ones, if a Worker tries to make many simultaneous subrequests, its later subrequests may appear to take longer to start. +- These attempts are put in a pending queue — the connections will not be initiated until one of the currently open connections has closed. +- Earlier connections can delay later ones, if a Worker tries to make many simultaneous subrequests, its later subrequests may appear to take longer to start. If you have cases in your application that use `fetch()` but that do not require consuming the response body, you can avoid the unread response body from consuming a concurrent connection by using `response.body.cancel()`. For example, if you want to check whether the HTTP response code is successful (2xx) before consuming the body, you should explicitly cancel the pending response body: ```ts -let resp = await fetch(url) +let resp = await fetch(url); // Only read the response body for successful responses if (resp.statusCode <= 299) { - // Call resp.json(), resp.text() or otherwise process the body + // Call resp.json(), resp.text() or otherwise process the body } else { - // Explicitly cancel it - resp.body.cancel() + // Explicitly cancel it + resp.body.cancel(); } ``` @@ -234,13 +222,11 @@ If the Worker later attempts to use a canceled connection, an exception will be :::note - Simultaneous Open Connections are measured from the top-level request, meaning any connections open from Workers sharing resources (for example, Workers triggered via [Service bindings](/workers/runtime-apis/bindings/service-bindings/)) will share the simultaneous open connection limit. - ::: -*** +--- ## Environment variables @@ -249,30 +235,32 @@ There is no limit to the number of environment variables per account. Each environment variable has a size limitation of 5 KB. -*** +--- ## Worker size -A Worker can be up to 10 MB in size *after compression* on the Workers Paid plan, and up to 1 MB on the Workers Free plan. +A Worker can be up to 10 MB in size _after compression_ on the Workers Paid plan, and up to 1 MB on the Workers Free plan. You can assess the size of your Worker bundle after compression by performing a dry-run with `wrangler` and reviewing the final compressed (`gzip`) size output by `wrangler`: ```sh -$ wrangler deploy --outdir bundled/ --dry-run +wrangler deploy --outdir bundled/ --dry-run +``` +```sh output # Output will resemble the below: Total Upload: 259.61 KiB / gzip: 47.23 KiB ``` Note that larger Worker bundles can impact the start-up time of the Worker, as the Worker needs to be loaded into memory. You should consider removing unnecessary dependencies and/or using [Workers KV](/kv/), a [D1 database](/d1/) or [R2](/r2/) to store configuration files, static assets and binary data instead of attempting to bundle them within your Worker code. -*** +--- ## Worker startup time A Worker must be able to be parsed and execute its global scope (top-level code outside of any handlers) within 400 ms. Worker size can impact startup because there is more code to parse and evaluate. Avoiding expensive code in the global scope can keep startup efficient as well. -*** +--- ## Number of Workers @@ -280,7 +268,7 @@ You can have up to 500 Workers on your account on the Workers Paid plan, and up If you need more than 500 Workers, consider using [Workers for Platforms](/cloudflare-for-platforms/workers-for-platforms/). -*** +--- ## Number of routes per zone @@ -290,13 +278,13 @@ Each zone has a limit of 1,000 [routes](/workers/configuration/routing/routes/). When configuring [routing](/workers/configuration/routing/), the maximum number of zones that can be referenced by a Worker is 1,000. If you require more than 1,000 zones on your Worker, consider using [Workers for Platforms](/cloudflare-for-platforms/workers-for-platforms/) or request an increase to this limit. -*** +--- ## Image Resizing with Workers When using Image Resizing with Workers, refer to [Image Resizing documentation](/images/transform-images/) for more information on the applied limits. -*** +--- ## Log size @@ -308,6 +296,6 @@ Refer to the [Workers Trace Event Logpush documentation](/workers/observability/ Review other developer platform resource limits. -* [KV limits](/kv/platform/limits/) -* [Durable Object limits](/durable-objects/platform/limits/) -* [Queues limits](/queues/platform/limits/) +- [KV limits](/kv/platform/limits/) +- [Durable Object limits](/durable-objects/platform/limits/) +- [Queues limits](/queues/platform/limits/) diff --git a/src/content/docs/workers/runtime-apis/bindings/mTLS.mdx b/src/content/docs/workers/runtime-apis/bindings/mTLS.mdx index 99347ff87033e1..f846a0b2a4b166 100644 --- a/src/content/docs/workers/runtime-apis/bindings/mTLS.mdx +++ b/src/content/docs/workers/runtime-apis/bindings/mTLS.mdx @@ -4,10 +4,9 @@ title: mTLS head: [] description: Configure your Worker to present a client certificate to services that enforce an mTLS connection. - --- -import { TabItem, Tabs } from "~/components" +import { TabItem, Tabs } from "~/components"; When using [HTTPS](https://www.cloudflare.com/learning/ssl/what-is-https/), a server presents a certificate for the client to authenticate in order to prove their identity. For even tighter security, some services require that the client also present a certificate. @@ -17,22 +16,19 @@ To present a client certificate when communicating with a service, create a mTLS :::caution - Currently, mTLS for Workers cannot be used for requests made to a service that is a [proxied zone](/dns/manage-dns-records/reference/proxied-dns-records/) on Cloudflare. If your Worker presents a client certificate to a service proxied by Cloudflare, Cloudflare will return a `520` error. - ::: First, upload a certificate and its private key to your account using the [`wrangler mtls-certificate`](/workers/wrangler/commands/#mtls-certificate) command: :::caution - -The `wrangler mtls-certificate upload` command requires the [SSL and Certificates Edit API token scope](/fundamentals/api/reference/permissions/). If you are using the OAuth flow triggered by `wrangler login`, the correct scope is set automatically. If you are using API tokens, refer to [Create an API token](https://developers.cloudflare.com/fundamentals/api/get-started/create-token/) to set the right scope for your API token. +The `wrangler mtls-certificate upload` command requires the [SSL and Certificates Edit API token scope](/fundamentals/api/reference/permissions/). If you are using the OAuth flow triggered by `wrangler login`, the correct scope is set automatically. If you are using API tokens, refer to [Create an API token](https://developers.cloudflare.com/fundamentals/api/get-started/create-token/) to set the right scope for your API token. ::: ```sh -$ npx wrangler mtls-certificate upload --cert cert.pem --key key.pem --name my-client-cert +npx wrangler mtls-certificate upload --cert cert.pem --key key.pem --name my-client-cert ``` Then, update your Worker project's `wrangler.toml` file to create an mTLS certificate binding: @@ -45,14 +41,14 @@ mtls_certificates = [ :::note -Certificate IDs are displayed after uploading, and can also be viewed with the command `wrangler mtls-certificate list`. +Certificate IDs are displayed after uploading, and can also be viewed with the command `wrangler mtls-certificate list`. ::: Adding an mTLS certificate binding includes a variable in the Worker's environment on which the `fetch()` method is available. This `fetch()` method uses the standard [Fetch](/workers/runtime-apis/fetch/) API and has the exact same signature as the global `fetch`, but always presents the client certificate when establishing the TLS connection. :::note -mTLS certificate bindings present an API similar to [service bindings](/workers/runtime-apis/bindings/service-bindings). +mTLS certificate bindings present an API similar to [service bindings](/workers/runtime-apis/bindings/service-bindings). ::: ### Interface @@ -61,10 +57,10 @@ mTLS certificate bindings present an API similar to [service bindings](/workers/ ```js export default { - async fetch(request, environment) { - return await environment.MY_CERT.fetch("https://a-secured-origin.com") - } -} + async fetch(request, environment) { + return await environment.MY_CERT.fetch("https://a-secured-origin.com"); + }, +}; ```
diff --git a/src/content/docs/workers/runtime-apis/handlers/scheduled.mdx b/src/content/docs/workers/runtime-apis/handlers/scheduled.mdx index cd065b776878e9..8ad8e0bfb6ec83 100644 --- a/src/content/docs/workers/runtime-apis/handlers/scheduled.mdx +++ b/src/content/docs/workers/runtime-apis/handlers/scheduled.mdx @@ -1,7 +1,6 @@ --- pcx_content_type: configuration title: Scheduled Handler - --- ## Background @@ -10,64 +9,55 @@ When a Worker is invoked via a [Cron Trigger](/workers/configuration/cron-trigge :::note[Testing scheduled() handlers in local development] - You can test the behavior of your `scheduled()` handler in local development using Wrangler. Cron Triggers can be tested using `Wrangler` by passing in the `--test-scheduled` flag to [`wrangler dev`](/workers/wrangler/commands/#dev). This will expose a `/__scheduled` route which can be used to test using a http request. To simulate different cron patterns, a `cron` query parameter can be passed in. ```sh -$ npx wrangler dev --test-scheduled +npx wrangler dev --test-scheduled -$ curl "http://localhost:8787/__scheduled?cron=*+*+*+*+*" +curl "http://localhost:8787/__scheduled?cron=*+*+*+*+*" ``` - ::: -*** +--- ## Syntax ```js export default { - async scheduled(event, env, ctx) { - ctx.waitUntil(doSomeTaskOnASchedule()); - }, + async scheduled(event, env, ctx) { + ctx.waitUntil(doSomeTaskOnASchedule()); + }, }; ``` ### Properties +- `event.cron` string + - The value of the [Cron Trigger](/workers/configuration/cron-triggers/) that started the `ScheduledEvent`. -* `event.cron` string - - * The value of the [Cron Trigger](/workers/configuration/cron-triggers/) that started the `ScheduledEvent`. - -* `event.type` string +- `event.type` string - * The type of event. This will always return `"scheduled"`. + - The type of event. This will always return `"scheduled"`. -* `event.scheduledTime` number +- `event.scheduledTime` number - * The time the `ScheduledEvent` was scheduled to be executed in milliseconds since January 1, 1970, UTC. It can be parsed as new Date(event.scheduledTime). + - The time the `ScheduledEvent` was scheduled to be executed in milliseconds since January 1, 1970, UTC. It can be parsed as new Date(event.scheduledTime). -* `env` object - - * An object containing the bindings associated with your Worker using ES modules format, such as KV namespaces and Durable Objects. - -* `ctx` object - * An object containing the context associated with your Worker using ES modules format. Currently, this object just contains the `waitUntil` function. +- `env` object + - An object containing the bindings associated with your Worker using ES modules format, such as KV namespaces and Durable Objects. +- `ctx` object + - An object containing the context associated with your Worker using ES modules format. Currently, this object just contains the `waitUntil` function. ### Methods When a Workers script is invoked by a [Cron Trigger](/workers/configuration/cron-triggers/), the Workers runtime starts a `ScheduledEvent` which will be handled by the `scheduled` function in your Workers Module class. The `ctx` argument represents the context your function runs in, and contains the following methods to control what happens next: +- ctx.waitUntil(promisePromise) : void - -* ctx.waitUntil(promisePromise) : void - - * Use this method to notify the runtime to wait for asynchronous tasks (for example, logging, analytics to third-party services, streaming and caching). The first `ctx.waitUntil` to fail will be observed and recorded as the status in the [Cron Trigger](/workers/configuration/cron-triggers/) Past Events table. Otherwise, it will be reported as a success. - + - Use this method to notify the runtime to wait for asynchronous tasks (for example, logging, analytics to third-party services, streaming and caching). The first `ctx.waitUntil` to fail will be observed and recorded as the status in the [Cron Trigger](/workers/configuration/cron-triggers/) Past Events table. Otherwise, it will be reported as a success. diff --git a/src/content/docs/workers/runtime-apis/nodejs/index.mdx b/src/content/docs/workers/runtime-apis/nodejs/index.mdx index 9f59a50fc6be72..fe7e650dc157ba 100644 --- a/src/content/docs/workers/runtime-apis/nodejs/index.mdx +++ b/src/content/docs/workers/runtime-apis/nodejs/index.mdx @@ -4,10 +4,9 @@ title: Node.js compatibility head: [] description: Implemented Node.js runtime APIs and enablement instructions for your Worker project. - --- -import { DirectoryListing } from "~/components" +import { DirectoryListing } from "~/components"; Most Workers import one or more packages of JavaScript or TypeScript code from [npm](https://www.npmjs.com/) as dependencies in `package.json`. Many of these packages rely on APIs from the [Node.js runtime](https://nodejs.org/en/about), and will not work unless these APIs are present. @@ -19,10 +18,10 @@ Node.js APIs are available under the `node:` prefix, and this prefix must be use ```js // Do this: -import { Buffer } from 'node:buffer'; +import { Buffer } from "node:buffer"; // Do not do this: -import { Buffer } from 'buffer'; +import { Buffer } from "buffer"; ``` Unless otherwise specified, implementations of Node.js APIs in Workers are intended to match the implementation in the [Current release of Node.js](https://github.com/nodejs/release#release-schedule). @@ -42,7 +41,7 @@ compatibility_flags = [ "nodejs_compat" ] To enable `nodejs_compat` in local development, pass the [`--compatibility-flags`](/workers/wrangler/commands/#dev-1) argument with the `nodejs_compat` flag to `wrangler pages dev`: ```sh -$ npx wrangler pages dev [] --compatibility-flags="nodejs_compat" +npx wrangler pages dev [] --compatibility-flags="nodejs_compat" ``` For additional options, refer to the list of [Pages-specific CLI commands](/workers/wrangler/commands/#dev-1). @@ -66,4 +65,4 @@ compatibility_flags = [ "nodejs_als" ] ## Related resources -* Write your Worker code in [ES modules syntax](/workers/reference/migrate-to-module-workers/) for an optimized experience. +- Write your Worker code in [ES modules syntax](/workers/reference/migrate-to-module-workers/) for an optimized experience. diff --git a/src/content/docs/workers/runtime-apis/rpc/lifecycle.mdx b/src/content/docs/workers/runtime-apis/rpc/lifecycle.mdx index 273228725fd805..37f0d87b37c038 100644 --- a/src/content/docs/workers/runtime-apis/rpc/lifecycle.mdx +++ b/src/content/docs/workers/runtime-apis/rpc/lifecycle.mdx @@ -7,7 +7,6 @@ head: - tag: title content: Workers RPC — Lifecycle description: Memory management, resource management, and the lifecycle of RPC stubs. - --- ## Lifetimes, Memory and Resource Management @@ -30,8 +29,8 @@ To ensure resources are properly disposed of, you should use [Explicit Resource Explicit Resource Management adds the following language features: -* The [`using` declaration](https://github.com/tc39/proposal-explicit-resource-management?tab=readme-ov-file#using-declarations) -* [`Symbol.dispose` and `Symbol.asyncDispose`](https://github.com/tc39/proposal-explicit-resource-management?tab=readme-ov-file#additions-to-symbol) +- The [`using` declaration](https://github.com/tc39/proposal-explicit-resource-management?tab=readme-ov-file#using-declarations) +- [`Symbol.dispose` and `Symbol.asyncDispose`](https://github.com/tc39/proposal-explicit-resource-management?tab=readme-ov-file#additions-to-symbol) If a variable is declared with `using`, when the variable is no longer in scope, the variable's disposer will be invoked. For example: @@ -51,7 +50,7 @@ function sendEmail(id, message) { Because it has not yet landed in V8, the `using` keyword is not yet available directly in the Workers runtime. To use it in your code, you must use a prerelease version of the [Wrangler CLI](/workers/wrangler/) to run and deploy your Worker: ```sh -$ npx wrangler@using-keyword-experimental dev +npx wrangler@using-keyword-experimental dev ``` This version of Wrangler will transpile `using` into direct calls to `Symbol.dispose()`, before running your code or deploying it to Cloudflare. @@ -60,9 +59,9 @@ The following code: ```js { - using counter = await env.COUNTER_SERVICE.newCounter(); - await counter.increment(2); - await counter.increment(4); + using counter = await env.COUNTER_SERVICE.newCounter(); + await counter.increment(2); + await counter.increment(4); } ``` @@ -70,13 +69,13 @@ The following code: ```js { - const counter = await env.COUNTER_SERVICE.newCounter(); - try { - await counter.increment(2); - await counter.increment(4); - } finally { - counter[Symbol.dispose](); - } + const counter = await env.COUNTER_SERVICE.newCounter(); + try { + await counter.increment(2); + await counter.increment(4); + } finally { + counter[Symbol.dispose](); + } } ``` @@ -96,15 +95,17 @@ For example, the Worker below does not make use of the `using` declaration, but ```js export default { - async fetch(request, env, ctx) { - let authResult = await env.AUTH_SERVICE.checkCookie(req.headers.get("Cookie")); - if (!authResult.authorized) { - return new Response("Not authorized", {status: 403}); - } - let profile = await authResult.user.getProfile(); - - return new Response(`Hello, ${profile.name}!`); - }, + async fetch(request, env, ctx) { + let authResult = await env.AUTH_SERVICE.checkCookie( + req.headers.get("Cookie"), + ); + if (!authResult.authorized) { + return new Response("Not authorized", { status: 403 }); + } + let profile = await authResult.user.getProfile(); + + return new Response(`Hello, ${profile.name}!`); + }, }; ``` @@ -134,9 +135,9 @@ A class that extends [`RpcTarget`](/workers/runtime-apis/rpc/) can optionally im ```js class Foo extends RpcTarget { - [Symbol.dispose]() { - // ... - } + [Symbol.dispose]() { + // ... + } } ``` @@ -168,14 +169,14 @@ In order to avoid this situation, you can manually create a stub locally, and th import { RpcTarget, RpcStub } from "cloudflare:workers"; class Foo extends RpcTarget { - // ... + // ... } let obj = new Foo(); let stub = new RpcStub(obj); -await rpc1(stub.dup()); // sends a dup of `stub` -await rpc2(stub.dup()); // sends another dup of `stub` -stub[Symbol.dispose](); // disposes the original stub +await rpc1(stub.dup()); // sends a dup of `stub` +await rpc2(stub.dup()); // sends another dup of `stub` +stub[Symbol.dispose](); // disposes the original stub // obj's disposer will be called when the other two stubs // are disposed remotely. diff --git a/src/content/docs/workers/runtime-apis/webassembly/javascript.mdx b/src/content/docs/workers/runtime-apis/webassembly/javascript.mdx index e3d8443373b3a4..d8ad8e0697cfd5 100644 --- a/src/content/docs/workers/runtime-apis/webassembly/javascript.mdx +++ b/src/content/docs/workers/runtime-apis/webassembly/javascript.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Wasm in JavaScript - --- Wasm can be used from within a Worker written in JavaScript or TypeScript by importing a Wasm module, @@ -43,7 +42,7 @@ Review the following example module (`;;` denotes a comment): Using [`wat2wasm`](https://github.com/WebAssembly/wabt), convert the WAT format to WebAssembly Binary Format: ```sh -$ wat2wasm src/simple.wat -o src/simple.wasm +wat2wasm src/simple.wat -o src/simple.wasm ``` ## Bundling @@ -55,15 +54,15 @@ Wrangler will bundle any Wasm module that ends in `.wasm` or `.wasm?module`, so After you have converted the WAT format to WebAssembly Binary Format, import and use the Wasm module in your existing JavaScript or TypeScript Worker: ```typescript -import mod from './simple.wasm' +import mod from "./simple.wasm"; // Define imports available to Wasm instance. const importObject = { - imports: { - imported_func: (arg: number) => { - console.log(`Hello from JavaScript: ${arg}`) - } - }, + imports: { + imported_func: (arg: number) => { + console.log(`Hello from JavaScript: ${arg}`); + }, + }, }; // Create instance of WebAssembly Module `mod`, supplying @@ -72,14 +71,14 @@ const importObject = { const instance = await WebAssembly.instantiate(mod, importObject); export default { - async fetch() { - // Invoke the `exported_func` from our Wasm Instance with - // an argument. - const retval = instance.exports.exported_func(42); - // Return the return value! - return new Response(`Success: ${retval}`); - } -} + async fetch() { + // Invoke the `exported_func` from our Wasm Instance with + // an argument. + const retval = instance.exports.exported_func(42); + // Return the return value! + return new Response(`Success: ${retval}`); + }, +}; ``` When invoked, this Worker should log `Hello from JavaScript: 42` and return `Success: 42`, demonstrating the ability to invoke Wasm methods with arguments from JavaScript and vice versa. diff --git a/src/content/docs/workers/testing/local-development.mdx b/src/content/docs/workers/testing/local-development.mdx index bef3fea8b7ad2d..22eba5543b73db 100644 --- a/src/content/docs/workers/testing/local-development.mdx +++ b/src/content/docs/workers/testing/local-development.mdx @@ -5,7 +5,6 @@ sidebar: order: 2 head: [] description: Develop your Workers locally via Wrangler. - --- Cloudflare Workers and most connected resources can be fully developed and tested locally - providing confidence that the applications you build locally will work the same way in production. This allows you to be more efficient and effective by providing a faster feedback loop and removing the need to test against remote resources. Local development runs against the same production runtime used by Cloudflare Workers, [workerd](https://github.com/cloudflare/workerd). @@ -16,18 +15,16 @@ In addition to testing Workers locally with [`wrangler dev`](/workers/wrangler/c :::note - This guide assumes you are using [Wrangler v3.0](https://blog.cloudflare.com/wrangler3/) or later. Users new to Wrangler CLI and Cloudflare Workers should visit the [Wrangler Install/Update guide](/workers/wrangler/install-and-update) to install `wrangler`. - ::: Wrangler provides a [`dev`](/workers/wrangler/commands/#dev) command that starts a local server for developing your Worker. Make sure you have `npm` installed and run the following in the folder containing your Worker application: ```sh -$ npx wrangler dev +npx wrangler dev ``` `wrangler dev` will run the Worker directly on your local machine. `wrangler dev` uses a combination of `workerd` and [Miniflare](https://github.com/cloudflare/workers-sdk/tree/main/packages/miniflare), a simulator that allows you to test your Worker against additional resources like KV, Durable Objects, WebSockets, and more. @@ -36,20 +33,20 @@ $ npx wrangler dev | Product | Local Dev Supported | Remote Dev Supported | | ----------------------------------- | ------------------- | -------------------- | -| AI | ✅[^1] | ✅ | -| Analytics Engine | ❌ | ✅ | -| Browser Rendering | ❌ | ✅ | -| D1 | ✅ | ✅ | -| Durable Objects | ✅ | ✅ | -| Email Bindings | ❌ | ✅ | -| Hyperdrive | ✅ | ✅ | -| KV | ✅ | ✅ | -| mTLS | ❌ | ✅ | -| Queues | ✅ | ❌ | -| R2 | ✅ | ✅ | -| Rate Limiting | ✅ | ✅ | -| Service Bindings (multiple workers) | ✅ | ✅ | -| Vectorize | ❌ | ✅ | +| AI | ✅[^1] | ✅ | +| Analytics Engine | ❌ | ✅ | +| Browser Rendering | ❌ | ✅ | +| D1 | ✅ | ✅ | +| Durable Objects | ✅ | ✅ | +| Email Bindings | ❌ | ✅ | +| Hyperdrive | ✅ | ✅ | +| KV | ✅ | ✅ | +| mTLS | ❌ | ✅ | +| Queues | ✅ | ❌ | +| R2 | ✅ | ✅ | +| Rate Limiting | ✅ | ✅ | +| Service Bindings (multiple workers) | ✅ | ✅ | +| Vectorize | ❌ | ✅ | With any bindings that are not supported locally, you will need to use the `--remote` command in wrangler, such as `wrangler dev --remote`. @@ -68,17 +65,15 @@ When you run `wrangler dev` Wrangler stores local resources in a `.wrangler/stat If you prefer to specify a directory, you can use the [`--persist-to`](/workers/wrangler/commands/#dev) flag with `wrangler dev` like this: ```sh -$ npx wrangler dev --persist-to +npx wrangler dev --persist-to ``` Using this will write all local storage and cache to the specified directory instead of `.wrangler`. :::note - This local persistence folder should be added to your `.gitignore` file. - ::: ### Use `--local` flag @@ -95,7 +90,7 @@ The following [Wrangler commands](/workers/wrangler/commands/) have a `--local` If using `--persist-to` to specify a custom folder with `wrangler dev` you should also add `--persist-to` with the same directory name along with the `--local` flag when running the commands above. For example, to put a custom KV key into a local namespace via the CLI you would run: ```sh -$ npx wrangler kv:key put test 12345 --binding MY_KV_NAMESPACE --local --persist-to worker-local +npx wrangler kv:key put test 12345 --binding MY_KV_NAMESPACE --local --persist-to worker-local ``` Running `wrangler kv:key put` will create a new key `test` with a value of `12345` on the local namespace specified via the binding `MY_KV_NAMESPACE` in `wrangler.toml`. This example command sets the local persistence directory to `worker-local` using `--persist-to`, to ensure that the data is created in the correct location. If `--persist-to` was not set, it would create the data in the `.wrangler` folder. @@ -120,7 +115,7 @@ API_ACCOUNT_ID = "local_example_user" There may be times you want to develop against remote resources and bindings. To run `wrangler dev` in remote mode, add the `--remote` flag, which will run both your code and resources remotely: ```sh -$ npx wrangler dev --remote +npx wrangler dev --remote ``` For some products like KV and R2, remote resources used for `wrangler dev --remote` must be specified with preview ID/names in `wrangler.toml` such as `preview_id` for KV or `preview_bucket name` for R2. Resources used for remote mode (preview) can be different from resources used for production to prevent changing production data during development. To use production data in `wrangler dev --remote`, set the preview ID/name of the resource to the ID/name of your production resource. @@ -131,13 +126,11 @@ You can customize how `wrangler dev` works to fit your needs. Refer to [the `wra :::caution - There is a bug associated with how outgoing requests are handled when using `wrangler dev --remote`. For more information, read the [Known issues section](/workers/platform/known-issues/#wrangler-dev). - ::: ## Related resources -* [D1 local development](/d1/build-with-d1/local-development/) - The official D1 guide to local development and testing. -* [Debugging tools](/workers/testing/debugging-tools) - Tools to help you diagnose issues and gain insight into your Workers. +- [D1 local development](/d1/build-with-d1/local-development/) - The official D1 guide to local development and testing. +- [Debugging tools](/workers/testing/debugging-tools) - Tools to help you diagnose issues and gain insight into your Workers. diff --git a/src/content/docs/workers/testing/vitest-integration/get-started/migrate-from-miniflare-2.mdx b/src/content/docs/workers/testing/vitest-integration/get-started/migrate-from-miniflare-2.mdx index 3ecb005d080edf..1402e1cefe2451 100644 --- a/src/content/docs/workers/testing/vitest-integration/get-started/migrate-from-miniflare-2.mdx +++ b/src/content/docs/workers/testing/vitest-integration/get-started/migrate-from-miniflare-2.mdx @@ -7,7 +7,6 @@ head: [] description: Migrate from [Miniflare 2](https://github.com/cloudflare/miniflare?tab=readme-ov-file) to the Workers Vitest integration. - --- [Miniflare 2](https://github.com/cloudflare/miniflare?tab=readme-ov-file) provided custom environments for Jest and Vitest in the `jest-environment-miniflare` and `vitest-environment-miniflare` packages respectively. @@ -15,18 +14,14 @@ The `@cloudflare/vitest-pool-workers` package provides similar functionality usi :::caution - Cloudflare no longer provides a Jest testing environment for Workers. If you previously used Jest, you will need to [migrate to Vitest](https://vitest.dev/guide/migration.html#migrating-from-jest) first, then follow the rest of this guide. Vitest provides built-in support for TypeScript, ES modules, and hot-module reloading for tests out-of-the-box. - ::: :::caution - The Workers Vitest integration does not support testing Workers using the service worker format. [Migrate to ES modules format](/workers/reference/migrate-to-module-workers/) first. - ::: ## Install the Workers Vitest integration @@ -34,9 +29,9 @@ The Workers Vitest integration does not support testing Workers using the servic First, you will need to uninstall the old environment and install the new pool. Vitest environments can only customize the global scope, whereas pools can run tests using a completely different runtime. In this case, the pool runs your tests inside [`workerd`](https://github.com/cloudflare/workerd) instead of Node.js. ```sh -$ npm uninstall vitest-environment-miniflare -$ npm install --save-dev --save-exact vitest@1.5.0 -$ npm install --save-dev @cloudflare/vitest-pool-workers +npm uninstall vitest-environment-miniflare +npm install --save-dev --save-exact vitest@1.5.0 +npm install --save-dev @cloudflare/vitest-pool-workers ``` ## Update your Vitest configuration file @@ -95,11 +90,11 @@ If you are using TypeScript, add an ambient `.d.ts` declaration file defining a ```ts declare module "cloudflare:test" { - interface ProvidedEnv { - NAMESPACE: KVNamespace; - } - // ...or if you have an existing `Env` type... - interface ProvidedEnv extends Env {} + interface ProvidedEnv { + NAMESPACE: KVNamespace; + } + // ...or if you have an existing `Env` type... + interface ProvidedEnv extends Env {} } ``` @@ -133,8 +128,8 @@ The `new ExecutionContext()` constructor and `getMiniflareWaitUntil()` function The `getMiniflareFetchMock()` function has been replaced with the new `fetchMock` helper from the `cloudflare:test` module. `fetchMock` has the same type as the return type of `getMiniflareFetchMock()`. There are a couple of differences between `fetchMock` and the previous return value of `getMiniflareFetchMock()`: -* `fetchMock` is deactivated by default, whereas previously it would start activated. This deactivation prevents unnecessary buffering of request bodies if you are not using `fetchMock`. You will need to call `fetchMock.activate()` before calling `fetch()` to enable it. -* `fetchMock` is reset at the start of each test run, whereas previously, interceptors added in previous runs would apply to the current one. This ensures test runs are not affected by previous runs. +- `fetchMock` is deactivated by default, whereas previously it would start activated. This deactivation prevents unnecessary buffering of request bodies if you are not using `fetchMock`. You will need to call `fetchMock.activate()` before calling `fetch()` to enable it. +- `fetchMock` is reset at the start of each test run, whereas previously, interceptors added in previous runs would apply to the current one. This ensures test runs are not affected by previous runs. ```diff import { beforeAll, afterAll } from "vitest"; @@ -186,7 +181,7 @@ The `getMiniflareDurableObjectStorage()`, `getMiniflareDurableObjectState()`, `g }); ``` -The `flushMiniflareDurableObjectAlarms()` function has been replaced with the `runDurableObjectAlarm()` function from the `cloudflare:test` module. The `runDurableObjectAlarm()` function accepts a single `DurableObjectStub` and returns a `Promise` that resolves to `true` if an alarm was scheduled and the `alarm()` handler was executed, or `false` otherwise. To "flush" multiple instances' alarms, call `runDurableObjectAlarm()` in a loop. +The `flushMiniflareDurableObjectAlarms()` function has been replaced with the `runDurableObjectAlarm()` function from the `cloudflare:test` module. The `runDurableObjectAlarm()` function accepts a single `DurableObjectStub` and returns a `Promise` that resolves to `true` if an alarm was scheduled and the `alarm()` handler was executed, or `false` otherwise. To "flush" multiple instances' alarms, call `runDurableObjectAlarm()` in a loop. ```diff + import { env, runDurableObjectAlarm } from "cloudflare:test"; diff --git a/src/content/docs/workers/testing/vitest-integration/get-started/write-your-first-test.mdx b/src/content/docs/workers/testing/vitest-integration/get-started/write-your-first-test.mdx index 3ea9875438f55c..5a366f1dd6fad9 100644 --- a/src/content/docs/workers/testing/vitest-integration/get-started/write-your-first-test.mdx +++ b/src/content/docs/workers/testing/vitest-integration/get-started/write-your-first-test.mdx @@ -5,37 +5,34 @@ sidebar: order: 1 head: [] description: Write unit tests against Workers. - --- -import { TabItem, Tabs } from "~/components" +import { TabItem, Tabs } from "~/components"; This guide will instruct you through installing and setting up the `@cloudflare/vitest-pool-workers` package. This will help you get started writing tests against your Workers using Vitest. The `@cloudflare/vitest-pool-workers` package works by running code inside a Cloudflare Worker that Vitest would usually run inside a [Node.js worker thread](https://nodejs.org/api/worker_threads.html). For examples of tests using `@cloudflare/vitest-pool-workers`, refer to [Recipes](/workers/testing/vitest-integration/recipes/). ## Prerequisites -* Open the root directory of your Worker or [create a new Worker](/workers/get-started/guide/#1-create-a-new-worker-project). +- Open the root directory of your Worker or [create a new Worker](/workers/get-started/guide/#1-create-a-new-worker-project). -* Make sure that your Worker is developed using the ES modules format. To migrate from the service worker format to the ES modules format, refer to the [Migrate to the ES modules format](/workers/reference/migrate-to-module-workers/) guide. +- Make sure that your Worker is developed using the ES modules format. To migrate from the service worker format to the ES modules format, refer to the [Migrate to the ES modules format](/workers/reference/migrate-to-module-workers/) guide. -* In your project's `wrangler.toml` configuration file, define a [compatibility date](/workers/configuration/compatibility-dates/) of `2022-10-31` or higher, and include `nodejs_compat` in your [compatibility flags](/workers/wrangler/configuration/#use-runtime-apis-directly). +- In your project's `wrangler.toml` configuration file, define a [compatibility date](/workers/configuration/compatibility-dates/) of `2022-10-31` or higher, and include `nodejs_compat` in your [compatibility flags](/workers/wrangler/configuration/#use-runtime-apis-directly). ## Install Vitest and `@cloudflare/vitest-pool-workers` Open a terminal window and make sure you are in your project's root directory. Once you have confirmed that, run: ```sh -$ npm install vitest@1.5.0 --save-dev --save-exact -$ npm install @cloudflare/vitest-pool-workers --save-dev +npm install vitest@1.5.0 --save-dev --save-exact +npm install @cloudflare/vitest-pool-workers --save-dev ``` The above commands will add the packages to your `package.json` file and install them as dev dependencies. :::note - -Currently, the `@cloudflare/vitest-pool-workers` package *only* works with Vitest 1.5.0. - +Currently, the `@cloudflare/vitest-pool-workers` package _only_ works with Vitest 1.5.0. ::: @@ -48,22 +45,20 @@ You can reference a `wrangler.toml` file to leverage its `main` entry point, [co import { defineWorkersConfig } from "@cloudflare/vitest-pool-workers/config"; export default defineWorkersConfig({ - test: { - poolOptions: { - workers: { - wrangler: { configPath: "./wrangler.toml" }, - }, - }, - }, + test: { + poolOptions: { + workers: { + wrangler: { configPath: "./wrangler.toml" }, + }, + }, + }, }); ``` :::note - For a full list of available configuration options, refer to [Configuration](/workers/testing/vitest-integration/configuration/). - ::: ### Add configuration options via Miniflare @@ -74,16 +69,16 @@ For example, to add bindings that will be used in tests, you can add `miniflare` ```js null {6-8} export default defineWorkersConfig({ - test: { - poolOptions: { - workers: { - main: "./src/index.ts", - miniflare: { - kvNamespaces: ["TEST_NAMESPACE"], - }, - }, - }, - }, + test: { + poolOptions: { + workers: { + main: "./src/index.ts", + miniflare: { + kvNamespaces: ["TEST_NAMESPACE"], + }, + }, + }, + }, }); ``` @@ -91,10 +86,8 @@ This configuration would add a KV namespace `TEST_NAMESPACE` that was only acces :::note - For a full list of available Miniflare options, refer to the [Miniflare `WorkersOptions` API documentation](https://github.com/cloudflare/workers-sdk/tree/main/packages/miniflare#interface-workeroptions). - ::: ## Define types @@ -125,9 +118,9 @@ If you created a basic Worker via the guide listed above, you should have the fo ```js export default { - async fetch(request, env, ctx) { - return new Response("Hello World!"); - }, + async fetch(request, env, ctx) { + return new Response("Hello World!"); + }, }; ``` @@ -148,28 +141,36 @@ This Worker receives a request, and returns a response of `"Hello World!"`. In o ```js -import { env, createExecutionContext, waitOnExecutionContext } from "cloudflare:test"; +import { + env, + createExecutionContext, + waitOnExecutionContext, +} from "cloudflare:test"; import { describe, it, expect } from "vitest"; // Could import any other source file/function here import worker from "../src"; describe("Hello World worker", () => { - it("responds with Hello World!", async () => { - const request = new Request("http://example.com"); - // Create an empty context to pass to `worker.fetch()` - const ctx = createExecutionContext(); - const response = await worker.fetch(request, env, ctx); - // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions - await waitOnExecutionContext(ctx); - expect(await response.text()).toBe("Hello World!"); - }); + it("responds with Hello World!", async () => { + const request = new Request("http://example.com"); + // Create an empty context to pass to `worker.fetch()` + const ctx = createExecutionContext(); + const response = await worker.fetch(request, env, ctx); + // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions + await waitOnExecutionContext(ctx); + expect(await response.text()).toBe("Hello World!"); + }); }); ``` ```ts -import { env, createExecutionContext, waitOnExecutionContext } from "cloudflare:test"; +import { + env, + createExecutionContext, + waitOnExecutionContext, +} from "cloudflare:test"; import { describe, it, expect } from "vitest"; // Could import any other source file/function here import worker from "../src"; @@ -179,15 +180,15 @@ import worker from "../src"; const IncomingRequest = Request; describe("Hello World worker", () => { - it("responds with Hello World!", async () => { - const request = new IncomingRequest("http://example.com"); - // Create an empty context to pass to `worker.fetch()` - const ctx = createExecutionContext(); - const response = await worker.fetch(request, env, ctx); - // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions - await waitOnExecutionContext(ctx); - expect(await response.text()).toBe("Hello World!"); - }); + it("responds with Hello World!", async () => { + const request = new IncomingRequest("http://example.com"); + // Create an empty context to pass to `worker.fetch()` + const ctx = createExecutionContext(); + const response = await worker.fetch(request, env, ctx); + // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions + await waitOnExecutionContext(ctx); + expect(await response.text()).toBe("Hello World!"); + }); }); ``` @@ -199,15 +200,15 @@ Add functionality to handle a `404` path on the Worker. This functionality will ```js export default { - async fetch(request, env, ctx) { - const { pathname } = new URL(request.url); + async fetch(request, env, ctx) { + const { pathname } = new URL(request.url); - if (pathname === "/404") { - return new Response("Not found", { status: 404 }); - } + if (pathname === "/404") { + return new Response("Not found", { status: 404 }); + } - return new Response("Hello World!"); - }, + return new Response("Hello World!"); + }, }; ``` @@ -216,14 +217,14 @@ export default { ```ts export default { async fetch(request, env, ctx): Promise { - const { pathname } = new URL(request.url); + const { pathname } = new URL(request.url); - if(pathname === "/404") { - return new Response("Not found", { status: 404 }); - } + if (pathname === "/404") { + return new Response("Not found", { status: 404 }); + } return new Response("Hello World!"); - } + }, } satisfies ExportedHandler; ``` @@ -235,14 +236,14 @@ To test this, add the following to your test file: ```js it("responds with not found and proper status for /404", async () => { - const request = new Request("http://example.com/404"); - // Create an empty context to pass to `worker.fetch()` - const ctx = createExecutionContext(); - const response = await worker.fetch(request, env, ctx); - // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions - await waitOnExecutionContext(ctx); - expect(await response.status).toBe(404); - expect(await response.text()).toBe("Not found"); + const request = new Request("http://example.com/404"); + // Create an empty context to pass to `worker.fetch()` + const ctx = createExecutionContext(); + const response = await worker.fetch(request, env, ctx); + // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions + await waitOnExecutionContext(ctx); + expect(await response.status).toBe(404); + expect(await response.text()).toBe("Not found"); }); ``` @@ -250,14 +251,14 @@ it("responds with not found and proper status for /404", async () => { ```ts it("responds with not found and proper status for /404", async () => { - const request = new IncomingRequest("http://example.com/404"); - // Create an empty context to pass to `worker.fetch()` - const ctx = createExecutionContext(); - const response = await worker.fetch(request, env, ctx); - // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions - await waitOnExecutionContext(ctx); - expect(await response.status).toBe(404); - expect(await response.text()).toBe("Not found"); + const request = new IncomingRequest("http://example.com/404"); + // Create an empty context to pass to `worker.fetch()` + const ctx = createExecutionContext(); + const response = await worker.fetch(request, env, ctx); + // Wait for all `Promise`s passed to `ctx.waitUntil()` to settle before running test assertions + await waitOnExecutionContext(ctx); + expect(await response.status).toBe(404); + expect(await response.text()).toBe("Not found"); }); ``` @@ -265,4 +266,4 @@ it("responds with not found and proper status for /404", async () => { ## Related resources -* [`@cloudflare/vitest-pool-workers` GitHub repository](https://github.com/cloudflare/workers-sdk/tree/main/fixtures/vitest-pool-workers-examples) - Examples of tests using the `@cloudflare/vitest-pool-workers` package. +- [`@cloudflare/vitest-pool-workers` GitHub repository](https://github.com/cloudflare/workers-sdk/tree/main/fixtures/vitest-pool-workers-examples) - Examples of tests using the `@cloudflare/vitest-pool-workers` package. diff --git a/src/content/docs/workers/tutorials/build-a-jamstack-app/index.mdx b/src/content/docs/workers/tutorials/build-a-jamstack-app/index.mdx index 319b63f959dfc9..07f98eba98e8af 100644 --- a/src/content/docs/workers/tutorials/build-a-jamstack-app/index.mdx +++ b/src/content/docs/workers/tutorials/build-a-jamstack-app/index.mdx @@ -8,12 +8,9 @@ products: - KV languages: - JavaScript - --- - - -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; In this tutorial, you will build a todo list application using HTML, CSS, and JavaScript. The application data will be stored in [Workers KV](/kv/api/). @@ -32,14 +29,22 @@ If you would like to see the finished code for this project, find the [project o First, use the [`create-cloudflare`](https://www.npmjs.com/package/create-cloudflare) CLI tool to create a new Cloudflare Workers project named `todos`. In this tutorial, you will use the default `Hello World` template to create a Workers project. - + - + Move into your newly created directory: ```sh -$ cd todos +cd todos ``` Inside of your new `todos` Worker project directory, `index.js` represents the entry point to your Cloudflare Workers application. @@ -48,9 +53,9 @@ All incoming HTTP requests to a Worker are passed to the [`fetch()` handler](/wo ```js export default { - async fetch(request, env, ctx) { - return new Response('Hello World!'); - }, + async fetch(request, env, ctx) { + return new Response("Hello World!"); + }, }; ``` @@ -79,7 +84,7 @@ To begin, you need to understand how to populate your todo list with actual data To get started with KV, set up a namespace. All of your cached data will be stored inside that namespace and, with configuration, you can access that namespace inside the Worker with a predefined variable. Use Wrangler to create a new namespace called `TODOS` with the [`kv:namespace create` command](/workers/wrangler/commands/#create-3) and get the associated namespace ID by running the following command in your terminal: ```sh title="Create a new KV namespace" -$ npx wrangler kv:namespace create "TODOS" --preview +npx wrangler kv:namespace create "TODOS" --preview ``` The associated namespace can be combined with a `--preview` flag to interact with a preview namespace instead of a production namespace. Namespaces can be added to your application by defining them inside your Wrangler configuration. Copy your newly created namespace ID, and in your `wrangler.toml`, define a `kv_namespaces` key to set up your namespace: @@ -96,19 +101,19 @@ Start storing data by defining an initial set of data, which you will put inside ```js export default { - async fetch(request, env, ctx) { - const defaultData = { - todos: [ - { - id: 1, - name: 'Finish the Cloudflare Workers blog post', - completed: false, - }, - ], - }; - await env.TODOS.put('data', JSON.stringify(defaultData)); - return new Response('Hello World!'); - }, + async fetch(request, env, ctx) { + const defaultData = { + todos: [ + { + id: 1, + name: "Finish the Cloudflare Workers blog post", + completed: false, + }, + ], + }; + await env.TODOS.put("data", JSON.stringify(defaultData)); + return new Response("Hello World!"); + }, }; ``` @@ -118,31 +123,31 @@ Given the presence of data in the cache and the assumption that your cache is ev ```js export default { - async fetch(request, env, ctx) { - const defaultData = { - todos: [ - { - id: 1, - name: 'Finish the Cloudflare Workers blog post', - completed: false, - }, - ], - }; - const setCache = data => env.TODOS.put('data', data); - const getCache = () => env.TODOS.get('data'); - - let data; - - const cache = await getCache(); - if (!cache) { - await setCache(JSON.stringify(defaultData)); - data = defaultData; - } else { - data = JSON.parse(cache); - } - - return new Response(JSON.stringify(data)); - }, + async fetch(request, env, ctx) { + const defaultData = { + todos: [ + { + id: 1, + name: "Finish the Cloudflare Workers blog post", + completed: false, + }, + ], + }; + const setCache = (data) => env.TODOS.put("data", data); + const getCache = () => env.TODOS.get("data"); + + let data; + + const cache = await getCache(); + if (!cache) { + await setCache(JSON.stringify(defaultData)); + data = defaultData; + } else { + data = JSON.parse(cache); + } + + return new Response(JSON.stringify(data)); + }, }; ``` @@ -226,7 +231,7 @@ Your static page can take in `window.todos` and render HTML based on it, but you First, your `html` variable will change to a function. The function will take in a `todos` argument, which will populate the `window.todos` variable in the above code sample: ```js null {1,6} -const html = todos => ` +const html = (todos) => ` @@ -260,21 +265,21 @@ Add this new functionality in `fetch`: if the request method is a PUT, it will t ```js null {5,6,7,8,9,10,11,12,13,14} export default { - async fetch(request, env, ctx) { - const setCache = data => env.TODOS.put('data', data); - - if (request.method === 'PUT') { - const body = await request.text(); - try { - JSON.parse(body); - await setCache(body); - return new Response(body, { status: 200 }); - } catch (err) { - return new Response(err, { status: 500 }); - } - } - // previous code - }, + async fetch(request, env, ctx) { + const setCache = (data) => env.TODOS.put("data", data); + + if (request.method === "PUT") { + const body = await request.text(); + try { + JSON.parse(body); + await setCache(body); + return new Response(body, { status: 200 }); + } catch (err) { + return new Response(err, { status: 500 }); + } + } + // previous code + }, }; ``` @@ -283,7 +288,7 @@ Check that the request is a `PUT` and wrap the remainder of the code in a `try/c With this script, you can now add some dynamic functionality to your HTML page to actually hit this route. First, create an input for your todo name and a button for submitting the todo. ```js null {5,6,7,8} -const html = todos => ` +const html = (todos) => ` @@ -299,7 +304,7 @@ const html = todos => ` Given that input and button, add a corresponding JavaScript function to watch for clicks on the button — once the button is clicked, the browser will `PUT` to `/` and submit the todo. ```js null {8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23} -const html = todos => ` +const html = (todos) => ` @@ -330,7 +335,7 @@ const html = todos => ` This code updates the cache. Remember that the KV cache is eventually consistent — even if you were to update your Worker to read from the cache and return it, you have no guarantees it will actually be up to date. Instead, update the list of todos locally, by taking your original code for rendering the todo list, making it a reusable function called `populateTodos`, and calling it when the page loads and when the cache request has finished: ```js null {6,7,8,9,10,11,12,13,14,15,16} -const html = todos => ` +const html = (todos) => ` @@ -380,7 +385,7 @@ Luckily, a great deal of the infrastructure for this work is already in place. Y To start, the `populateTodos` function can be updated to generate a `div` for each todo. In addition, move the name of the todo into a child element of that `div`: ```js null {11,12,13} -const html = todos => ` +const html = (todos) => ` @@ -405,7 +410,7 @@ You have designed the client-side part of this code to handle an array of todos To start, it would be useful to attach the ID of each todo in the HTML. By doing this, you can then refer to the element later in order to correspond it to the todo in the JavaScript part of your code. Data attributes and the corresponding `dataset` method in JavaScript are a perfect way to implement this. When you generate your `div` element for each todo, you can attach a data attribute called todo to each `div`: ```js null {11} -const html = todos => ` +const html = (todos) => ` @@ -438,7 +443,7 @@ Inside your HTML, each `div` for a todo now has an attached data attribute, whic You can now generate a checkbox for each todo element. This checkbox will default to unchecked for new todos but you can mark it as checked as the element is rendered in the window: ```js null {13,14,15,17} -const html = todos => ` +const html = (todos) => ` @@ -465,7 +470,7 @@ const html = todos => ` The checkbox is set up to correctly reflect the value of completed on each todo but it does not yet update when you actually check the box. To do this, attach the `completeTodo` function as an event listener on the `click` event. Inside the function, inspect the checkbox element, find its parent (the todo `div`), and use its `todo` data attribute to find the corresponding todo in the data array. You can toggle the completed status, update its properties, and rerender the UI: ```js null {9,13,14,15,16,17,18,19,20,21,22} -const html = todos => ` +const html = (todos) => ` @@ -503,51 +508,51 @@ You may also want to add user-specific caching. Right now, the cache key is alwa ```js null {15,16,22,33} export default { - async fetch(request, env, ctx) { - const defaultData = { - todos: [ - { - id: 1, - name: 'Finish the Cloudflare Workers blog post', - completed: false, - }, - ], - }; - const setCache = (key, data) => env.TODOS.put(key, data); - const getCache = key => env.TODOS.get(key); - - const ip = request.headers.get('CF-Connecting-IP'); - const myKey = `data-${ip}`; - - if (request.method === 'PUT') { - const body = await request.text(); - try { - JSON.parse(body); - await setCache(myKey, body); - return new Response(body, { status: 200 }); - } catch (err) { - return new Response(err, { status: 500 }); - } - } - - let data; - - const cache = await getCache(); - if (!cache) { - await setCache(myKey, JSON.stringify(defaultData)); - data = defaultData; - } else { - data = JSON.parse(cache); - } - - const body = html(JSON.stringify(data.todos).replace(/ env.TODOS.put(key, data); + const getCache = (key) => env.TODOS.get(key); + + const ip = request.headers.get("CF-Connecting-IP"); + const myKey = `data-${ip}`; + + if (request.method === "PUT") { + const body = await request.text(); + try { + JSON.parse(body); + await setCache(myKey, body); + return new Response(body, { status: 200 }); + } catch (err) { + return new Response(err, { status: 500 }); + } + } + + let data; + + const cache = await getCache(); + if (!cache) { + await setCache(myKey, JSON.stringify(defaultData)); + data = defaultData; + } else { + data = JSON.parse(cache); + } + + const body = html(JSON.stringify(data.todos).replace(/ ` +const html = (todos) => ` @@ -639,51 +644,51 @@ const html = todos => ` `; export default { - async fetch(request, env, ctx) { - const defaultData = { - todos: [ - { - id: 1, - name: 'Finish the Cloudflare Workers blog post', - completed: false, - }, - ], - }; - const setCache = (key, data) => env.TODOS.put(key, data); - const getCache = key => env.TODOS.get(key); - - const ip = request.headers.get('CF-Connecting-IP'); - const myKey = `data-${ip}`; - - if (request.method === 'PUT') { - const body = await request.text(); - try { - JSON.parse(body); - await setCache(myKey, body); - return new Response(body, { status: 200 }); - } catch (err) { - return new Response(err, { status: 500 }); - } - } - - let data; - - const cache = await getCache(); - if (!cache) { - await setCache(myKey, JSON.stringify(defaultData)); - data = defaultData; - } else { - data = JSON.parse(cache); - } - - const body = html(JSON.stringify(data.todos).replace(/ env.TODOS.put(key, data); + const getCache = (key) => env.TODOS.get(key); + + const ip = request.headers.get("CF-Connecting-IP"); + const myKey = `data-${ip}`; + + if (request.method === "PUT") { + const body = await request.text(); + try { + JSON.parse(body); + await setCache(myKey, body); + return new Response(body, { status: 200 }); + } catch (err) { + return new Response(err, { status: 500 }); + } + } + + let data; + + const cache = await getCache(); + if (!cache) { + await setCache(myKey, JSON.stringify(defaultData)); + data = defaultData; + } else { + data = JSON.parse(cache); + } + + const body = html(JSON.stringify(data.todos).replace(/ - - + + + Then, move into your newly created directory: ```sh -$ cd qr-code-generator +cd qr-code-generator ``` Inside of your new `qr-code-generator` Worker project directory, `index.js` represents the entry point to your Cloudflare Workers application. @@ -40,7 +49,7 @@ All Cloudflare Workers applications start by listening for `fetch` events, which ```js export default { async fetch(request, env, ctx) { - return new Response('Hello Worker!'); + return new Response("Hello Worker!"); }, }; ``` @@ -60,8 +69,8 @@ At this point in the tutorial, your Worker function can receive requests and ret ```js null {2,3,4} export default { async fetch(request, env, ctx) { - if (request.method === 'POST') { - return new Response('Hello Worker!'); + if (request.method === "POST") { + return new Response("Hello Worker!"); } }, }; @@ -72,13 +81,13 @@ Currently, if an incoming request is not a `POST`, the function will return `und ```js null {7,8} export default { async fetch(request, env, ctx) { - if (request.method === 'POST') { - return new Response('Hello Worker!'); + if (request.method === "POST") { + return new Response("Hello Worker!"); } - return new Response('Expected POST request', { + return new Response("Expected POST request", { status: 405, - }) + }); }, }; ``` @@ -87,14 +96,15 @@ You have established the basic flow of the request. You will now set up a respon ```js null {7,8,9,10} export default { - async fetch(request, env, ctx) { - if (request.method === "POST") {} - } -} + async fetch(request, env, ctx) { + if (request.method === "POST") { + } + }, +}; async function generateQRCode(request) { - // TODO: Include QR code generation - return new Response("Hello worker!") + // TODO: Include QR code generation + return new Response("Hello worker!"); } ``` @@ -102,12 +112,12 @@ With the `generateQRCode` function filled out, call it within `fetch` function a ```js null {4} export default { - async fetch(request, env, ctx) { - if (request.method === "POST") { - return generateQRCode(request) - } - } -} + async fetch(request, env, ctx) { + if (request.method === "POST") { + return generateQRCode(request); + } + }, +}; ``` ## 3. Build a QR code generator @@ -115,17 +125,17 @@ export default { All projects deployed to Cloudflare Workers support npm packages. This support makes it easy to rapidly build out functionality in your Workers. The [`qr-image`](https://github.com/alexeyten/qr-image) package is a great way to take text and encode it into a QR code. The package supports generating the QR codes in a number of file formats (such as PNG, the default, and SVG) and configuring other aspects of the generated QR code. In the command line, install and save `qr-image` to your project’s `package.json`: ```sh title="Installing the qr-image package" -$ npm install --save qr-image +npm install --save qr-image ``` In `index.js`, import the `qr-image` package as the variable `qr`. In the `generateQRCode` function, parse the incoming request as JSON using `request.json`, and generate a QR code from `text` using `qr.imageSync`: ```js null {1,2,3,4,5,6} -const qr = require("qr-image") +const qr = require("qr-image"); async function generateQRCode(request) { - const { text } = await request.json() - const qr_png = qr.imageSync(text || "https://workers.dev") + const { text } = await request.json(); + const qr_png = qr.imageSync(text || "https://workers.dev"); } ``` @@ -133,10 +143,10 @@ By default, the QR code is generated as a PNG. Construct a new instance of `Resp ```js null {3,4,5} async function generateQRCode(request) { - const { text } = await request.json() - const headers = { "Content-Type": "image/png" } - const qr_png = qr.imageSync(text || "https://workers.dev") - return new Response(qr_png, { headers }) + const { text } = await request.json(); + const headers = { "Content-Type": "image/png" }; + const qr_png = qr.imageSync(text || "https://workers.dev"); + return new Response(qr_png, { headers }); } ``` @@ -152,23 +162,23 @@ The Worker will work if a user sends a `POST` request to a route, but it would b ```js null {23-54} export default { - async fetch(request, env, ctx) { - if (request.method === 'POST') { - return generateQRCode(request) - } - - return new Response(landing, { - headers: { - "Content-Type": "text/html" - } - }) -}, -} + async fetch(request, env, ctx) { + if (request.method === "POST") { + return generateQRCode(request); + } + + return new Response(landing, { + headers: { + "Content-Type": "text/html", + }, + }); + }, +}; async function generateQRCode(request) { - const { text } = await request.json() - const headers = { "Content-Type": "image/png" } - const qr_png = qr.imageSync(text || "https://workers.dev") + const { text } = await request.json(); + const headers = { "Content-Type": "image/png" }; + const qr_png = qr.imageSync(text || "https://workers.dev"); return new Response(qr_png, { headers }); } @@ -197,7 +207,7 @@ const landing = ` }) } -` +`; ``` The `landing` variable, which is a static HTML string, sets up an `input` tag and a corresponding `button`, which calls the `generateQRCode` function. This function will make an HTTP `POST` request back to your Worker, allowing you to see the corresponding QR code image returned on the page. @@ -205,26 +215,26 @@ The `landing` variable, which is a static HTML string, sets up an `input` tag an With the above steps complete, your Worker is ready. The full version of the code looks like this: ```js -const qr = require('qr-image'); +const qr = require("qr-image"); export default { async fetch(request, env, ctx) { - if (request.method === 'POST') { - return generateQRCode(request) + if (request.method === "POST") { + return generateQRCode(request); } return new Response(landing, { headers: { - "Content-Type": "text/html" - } - }) + "Content-Type": "text/html", + }, + }); }, }; async function generateQRCode(request) { - const { text } = await request.json() - const headers = { "Content-Type": "image/png" } - const qr_png = qr.imageSync(text || "https://workers.dev") + const { text } = await request.json(); + const headers = { "Content-Type": "image/png" }; + const qr_png = qr.imageSync(text || "https://workers.dev"); return new Response(qr_png, { headers }); } @@ -253,7 +263,7 @@ const landing = ` }) } -` +`; ``` ## 5. Deploy your Worker @@ -263,7 +273,7 @@ With all the above steps complete, you have written the code for a QR code gener Wrangler has built-in support for bundling, uploading, and releasing your Cloudflare Workers application. To do this, run `npx wrangler deploy`, which will build and deploy your code. ```sh title="Deploy your Worker project" -$ npx wrangler deploy +npx wrangler deploy ``` ## Related resources diff --git a/src/content/docs/workers/tutorials/build-a-slackbot/index.mdx b/src/content/docs/workers/tutorials/build-a-slackbot/index.mdx index e100e88d69a9a6..da3d8388def79d 100644 --- a/src/content/docs/workers/tutorials/build-a-slackbot/index.mdx +++ b/src/content/docs/workers/tutorials/build-a-slackbot/index.mdx @@ -8,12 +8,9 @@ tags: - Hono languages: - TypeScript - --- - - -import { Render, TabItem, Tabs, PackageManagers } from "~/components" +import { Render, TabItem, Tabs, PackageManagers } from "~/components"; In this tutorial, you will build a [Slack](https://slack.com) bot using [Cloudflare Workers](/workers/). Your bot will make use of GitHub webhooks to send messages to a Slack channel when issues are updated or created, and allow users to write a command to look up GitHub issues from inside Slack. @@ -23,7 +20,7 @@ This tutorial is recommended for people who are familiar with writing web applic If you would like to review the code or how the bot works in an actual Slack channel before proceeding with this tutorial, you can access the final version of the codebase [on GitHub](https://github.com/yusukebe/workers-slack-bot). From GitHub, you can add your own Slack API keys and deploy it to your own Slack channels for testing. -*** +--- @@ -99,24 +96,24 @@ When your webhook is created, it will attempt to send a test payload to your app To initiate the project, use the command line interface [C3 (create-cloudflare-cli)](https://github.com/cloudflare/workers-sdk/tree/main/packages/create-cloudflare). - + Follow these steps to create a Hono project. -* For *What would you like to start with*?, select `Framework Starter`. -* For *Which development framework do you want to use?*, select `Hono`. -* For, *Do you want to deploy your application?*, select `No`. +- For _What would you like to start with_?, select `Framework Starter`. +- For _Which development framework do you want to use?_, select `Hono`. +- For, _Do you want to deploy your application?_, select `No`. Go to the `slack-bot` directory: ```sh -$ cd slack-bot +cd slack-bot ``` Open `src/index.ts` in an editor to find the following code. ```ts -import { Hono } from 'hono'; +import { Hono } from "hono"; type Bindings = { [key in keyof CloudflareBindings]: CloudflareBindings[key]; @@ -124,8 +121,8 @@ type Bindings = { const app = new Hono<{ Bindings: Bindings }>(); -app.get('/', (c) => { - return c.text('Hello Hono!'); +app.get("/", (c) => { + return c.text("Hello Hono!"); }); export default app; @@ -138,13 +135,13 @@ To run the application on your local machine, execute the following command. ```sh title="Run your application locally" -$ npm run dev +npm run dev ``` ```sh title="Run your application locally" -$ yarn dev +yarn dev ``` @@ -164,12 +161,12 @@ You can create your application in several files instead of writing all endpoint For example, imagine the following Web API application. ```ts -import { Hono } from 'hono'; +import { Hono } from "hono"; const app = new Hono(); -app.get('/posts', (c) => c.text('Posts!')); -app.post('/posts', (c) => c.text('Created!', 201)); +app.get("/posts", (c) => c.text("Posts!")); +app.post("/posts", (c) => c.text("Created!", 201)); export default app; ``` @@ -177,12 +174,12 @@ export default app; You can add the routes under `/api/v1`. ```ts null {2,6} -import { Hono } from 'hono'; -import api from './api'; +import { Hono } from "hono"; +import api from "./api"; const app = new Hono(); -app.route('/api/v1', api); +app.route("/api/v1", api); export default app; ``` @@ -198,22 +195,22 @@ The Slack bot will have two child applications called "route" each. Create the route files in a directory named `routes`. ```sh title="Create new folders and files" -$ mkdir -p src/routes -$ touch src/routes/lookup.ts -$ touch src/routes/webhook.ts +mkdir -p src/routes +touch src/routes/lookup.ts +touch src/routes/webhook.ts ``` Then update the main application. ```ts null {2,3,7,8} -import { Hono } from 'hono'; -import lookup from './routes/lookup'; -import webhook from './routes/webhook'; +import { Hono } from "hono"; +import lookup from "./routes/lookup"; +import webhook from "./routes/webhook"; const app = new Hono(); -app.route('/lookup', lookup); -app.route('/webhook', webhook); +app.route("/lookup", lookup); +app.route("/webhook", webhook); export default app; ``` @@ -224,24 +221,24 @@ Before implementing the actual functions, you need to define the TypeScript type ```ts export type Bindings = { - SLACK_WEBHOOK_URL: string -} + SLACK_WEBHOOK_URL: string; +}; export type Issue = { - html_url: string - title: string - body: string - state: string - created_at: string - number: number - user: User -} + html_url: string; + title: string; + body: string; + state: string; + created_at: string; + number: number; + user: User; +}; type User = { - html_url: string - login: string - avatar_url: string -} + html_url: string; + login: string; + avatar_url: string; +}; ``` ### Creating the lookup route @@ -249,7 +246,7 @@ type User = { Start creating the lookup route in `src/routes/lookup.ts`. ```ts -import { Hono } from 'hono'; +import { Hono } from "hono"; const app = new Hono(); @@ -285,13 +282,13 @@ With Slack slash commands, you can respond to a slash command by returning struc To begin, the `lookup` route should parse the messages coming from Slack. As previously mentioned, the Slack API sends an HTTP POST in URL Encoded format. You can get the variable `text` by parsing it with `c.req.json()`. ```ts null {5,6,7,8,9,10} -import { Hono } from 'hono'; +import { Hono } from "hono"; const app = new Hono(); -app.post('/', async (c) => { +app.post("/", async (c) => { const { text } = await c.req.parseBody(); - if (typeof text !== 'string') { + if (typeof text !== "string") { return c.notFound(); } }); @@ -304,25 +301,26 @@ Given a `text` variable, that contains text like `cloudflare/wrangler#1`, you sh To do this, create a new file in your application, at `src/utils/github.ts`. This file will contain a number of “utility” functions for working with GitHub’s API. The first of these will be a string parser, called `parseGhIssueString`: ```ts -const ghIssueRegex = /(?[\w.-]*)\/(?[\w.-]*)\#(?\d*)/; +const ghIssueRegex = + /(?[\w.-]*)\/(?[\w.-]*)\#(?\d*)/; export const parseGhIssueString = (text: string) => { const match = text.match(ghIssueRegex); - return match ? match.groups ?? {} : {}; + return match ? (match.groups ?? {}) : {}; }; ``` `parseGhIssueString` takes in a `text` input, matches it against `ghIssueRegex`, and if a match is found, returns the `groups` object from that match, making use of the `owner`, `repo`, and `issue_number` capture groups defined in the regex. By exporting this function from `src/utils/github.ts`, you can make use of it back in `src/handlers/lookup.ts`: ```ts null {2,12} -import { Hono } from 'hono'; -import { parseGhIssueString } from '../utils/github'; +import { Hono } from "hono"; +import { parseGhIssueString } from "../utils/github"; const app = new Hono(); -app.post('/', async (c) => { +app.post("/", async (c) => { const { text } = await c.req.parseBody(); - if (typeof text !== 'string') { + if (typeof text !== "string") { return c.notFound(); } @@ -337,32 +335,37 @@ export default app; With this data, you can make your first API lookup to GitHub. Again, make a new function in `src/utils/github.ts`, to make a `fetch` request to the GitHub API for the issue data: ```ts null {8,9,10,11,12} -const ghIssueRegex = /(?[\w.-]*)\/(?[\w.-]*)\#(?\d*)/; +const ghIssueRegex = + /(?[\w.-]*)\/(?[\w.-]*)\#(?\d*)/; export const parseGhIssueString = (text: string) => { - const match = text.match(ghIssueRegex) - return match ? match.groups ?? {} : {} -} - -export const fetchGithubIssue = (owner: string, repo: string, issue_number: string) => { - const url = `https://api.github.com/repos/${owner}/${repo}/issues/${issue_number}` - const headers = { 'User-Agent': 'simple-worker-slack-bot' } - return fetch(url, { headers }) -} + const match = text.match(ghIssueRegex); + return match ? (match.groups ?? {}) : {}; +}; + +export const fetchGithubIssue = ( + owner: string, + repo: string, + issue_number: string, +) => { + const url = `https://api.github.com/repos/${owner}/${repo}/issues/${issue_number}`; + const headers = { "User-Agent": "simple-worker-slack-bot" }; + return fetch(url, { headers }); +}; ``` Back in `src/handlers/lookup.ts`, use `fetchGitHubIssue` to make a request to GitHub’s API, and parse the response: ```ts null {2,3,14,15} -import { Hono } from 'hono'; -import { fetchGithubIssue, parseGhIssueString } from '../utils/github'; -import { Issue } from '../types'; +import { Hono } from "hono"; +import { fetchGithubIssue, parseGhIssueString } from "../utils/github"; +import { Issue } from "../types"; const app = new Hono(); -app.post('/', async (c) => { +app.post("/", async (c) => { const { text } = await c.req.parseBody(); - if (typeof text !== 'string') { + if (typeof text !== "string") { return c.notFound(); } @@ -392,12 +395,12 @@ The previously mentioned [Block Kit](https://api.slack.com/block-kit) framework Create another file, `src/utils/slack.ts`, to contain the function `constructGhIssueSlackMessage`, a function for taking issue data, and turning it into a collection of blocks. Blocks are JavaScript objects that Slack will use to format the message: ```ts -import { Issue } from '../types'; +import { Issue } from "../types"; export const constructGhIssueSlackMessage = ( issue: Issue, issue_string: string, - prefix_text?: string + prefix_text?: string, ) => { const issue_link = `<${issue.html_url}|${issue_string}>`; const user_link = `<${issue.user.html_url}|${issue.user.login}>`; @@ -425,35 +428,39 @@ With those variables in place, `text_lines` is an array of each line of text for With the text constructed, you can finally construct your Slack message, returning an array of blocks for Slack’s [Block Kit](https://api.slack.com/block-kit). In this case, there is only have one block: a [section](https://api.slack.com/reference/messaging/blocks#section) block with Markdown text, and an accessory image of the user who created the issue. Return that single block inside of an array, to complete the `constructGhIssueSlackMessage` function: ```ts null {15,16,17,18,19,20,21,22,23,24,25,26,27,28} -import { Issue } from '../types' - -export const constructGhIssueSlackMessage = (issue: Issue, issue_string: string, prefix_text?: string) => { - const issue_link = `<${issue.html_url}|${issue_string}>` - const user_link = `<${issue.user.html_url}|${issue.user.login}>` - const date = new Date(Date.parse(issue.created_at)).toLocaleDateString() - - const text_lines = [ - prefix_text, - `*${issue.title} - ${issue_link}*`, - issue.body, - `*${issue.state}* - Created by ${user_link} on ${date}` - ] - - return [ - { - type: 'section', - text: { - type: 'mrkdwn', - text: text_lines.join('\n') - }, - accessory: { - type: 'image', - image_url: issue.user.avatar_url, - alt_text: issue.user.login - } - } - ] -} +import { Issue } from "../types"; + +export const constructGhIssueSlackMessage = ( + issue: Issue, + issue_string: string, + prefix_text?: string, +) => { + const issue_link = `<${issue.html_url}|${issue_string}>`; + const user_link = `<${issue.user.html_url}|${issue.user.login}>`; + const date = new Date(Date.parse(issue.created_at)).toLocaleDateString(); + + const text_lines = [ + prefix_text, + `*${issue.title} - ${issue_link}*`, + issue.body, + `*${issue.state}* - Created by ${user_link} on ${date}`, + ]; + + return [ + { + type: "section", + text: { + type: "mrkdwn", + text: text_lines.join("\n"), + }, + accessory: { + type: "image", + image_url: issue.user.avatar_url, + alt_text: issue.user.login, + }, + }, + ]; +}; ``` #### Finishing the lookup route @@ -461,16 +468,16 @@ export const constructGhIssueSlackMessage = (issue: Issue, issue_string: string, In `src/handlers/lookup.ts`, use `constructGhIssueSlackMessage` to construct `blocks`, and return them as a new response with `c.json()` when the slash command is called: ```ts null {3,17,18,19,20,21,22} -import { Hono } from 'hono'; -import { fetchGithubIssue, parseGhIssueString } from '../utils/github'; -import { constructGhIssueSlackMessage } from '../utils/slack'; -import { Issue } from '../types'; +import { Hono } from "hono"; +import { fetchGithubIssue, parseGhIssueString } from "../utils/github"; +import { constructGhIssueSlackMessage } from "../utils/slack"; +import { Issue } from "../types"; const app = new Hono(); -app.post('/', async (c) => { +app.post("/", async (c) => { const { text } = await c.req.parseBody(); - if (typeof text !== 'string') { + if (typeof text !== "string") { return c.notFound(); } @@ -481,7 +488,7 @@ app.post('/', async (c) => { return c.json({ blocks, - response_type: 'in_channel', + response_type: "in_channel", }); }); @@ -497,38 +504,38 @@ If you would like the messages to remain private, remove the `response_type` lin The `lookup` route is almost complete, but there are a number of errors that can occur in the route, such as parsing the body from Slack, getting the issue from GitHub, or constructing the Slack message itself. Although Hono applications can handle errors without having to do anything, you can customize the response returned in the following way. ```ts null {25,26,27,28,29,30} -import { Hono } from 'hono' -import { fetchGithubIssue, parseGhIssueString } from '../utils/github' -import { constructGhIssueSlackMessage } from '../utils/slack' -import { Issue } from '../types' - -const app = new Hono() - -app.post('/', async (c) => { - const { text } = await c.req.parseBody() - if (typeof text !== 'string') { - return c.notFound() - } - - const { owner, repo, issue_number } = parseGhIssueString(text) - const response = await fetchGithubIssue(owner, repo, issue_number) - const issue = await response.json() - const blocks = constructGhIssueSlackMessage(issue, text) - - return c.json({ - blocks, - response_type: 'in_channel' - }) -}) +import { Hono } from "hono"; +import { fetchGithubIssue, parseGhIssueString } from "../utils/github"; +import { constructGhIssueSlackMessage } from "../utils/slack"; +import { Issue } from "../types"; + +const app = new Hono(); + +app.post("/", async (c) => { + const { text } = await c.req.parseBody(); + if (typeof text !== "string") { + return c.notFound(); + } + + const { owner, repo, issue_number } = parseGhIssueString(text); + const response = await fetchGithubIssue(owner, repo, issue_number); + const issue = await response.json(); + const blocks = constructGhIssueSlackMessage(issue, text); + + return c.json({ + blocks, + response_type: "in_channel", + }); +}); app.onError((_e, c) => { return c.text( "Uh-oh! We couldn't find the issue you provided. " + - 'We can only find public issues in the following format: `owner/repo#issue_number`.' + "We can only find public issues in the following format: `owner/repo#issue_number`.", ); }); -export default app +export default app; ``` ### Creating the webhook route @@ -540,8 +547,8 @@ At the beginning of this tutorial, you configured a GitHub webhook to track any In `src/routes/webhook.ts`, define a blank Hono application. The difference from the `lookup` route is that the `Bindings` is passed as a generics for the `new Hono()`. This is necessary to give the appropriate TypeScript type to `SLACK_WEBHOOK_URL` which will be used later. ```ts -import { Hono } from 'hono'; -import { Bindings } from '../types'; +import { Hono } from "hono"; +import { Bindings } from "../types"; const app = new Hono<{ Bindings: Bindings }>(); @@ -559,12 +566,12 @@ Compare this message format to the format returned when a user uses the `/issue` To start filling out the route, parse the request body formatted JSON into an object and construct some helper variables: ```ts null {2,6,7,8,9,10} -import { Hono } from 'hono'; -import { constructGhIssueSlackMessage } from '../utils/slack'; +import { Hono } from "hono"; +import { constructGhIssueSlackMessage } from "../utils/slack"; const app = new Hono(); -app.post('/', async (c) => { +app.post("/", async (c) => { const { action, issue, repository } = await c.req.json(); const prefix_text = `An issue was ${action}:`; const issue_string = `${repository.owner.login}/${repository.name}#${issue.number}`; @@ -582,17 +589,17 @@ Use `c.req.json()` to convert the payload body of the request from JSON into a p The messages your Slack bot sends back to your Slack channel from the `lookup` and `webhook` routes are incredibly similar. Because of this, you can re-use the existing `constructGhIssueSlackMessage` to continue populating `src/handlers/webhook.ts`. Import the function from `src/utils/slack.ts`, and pass the issue data into it: ```ts null {10} -import { Hono } from 'hono' -import { constructGhIssueSlackMessage } from '../utils/slack' +import { Hono } from "hono"; +import { constructGhIssueSlackMessage } from "../utils/slack"; -const app = new Hono() +const app = new Hono(); -app.post('/', async (c) => { - const { action, issue, repository } = await c.req.json() - const prefix_text = `An issue was ${action}:` - const issue_string = `${repository.owner.login}/${repository.name}#${issue.number}` - const blocks = constructGhIssueSlackMessage(issue, issue_string, prefix_text) -}) +app.post("/", async (c) => { + const { action, issue, repository } = await c.req.json(); + const prefix_text = `An issue was ${action}:`; + const issue_string = `${repository.owner.login}/${repository.name}#${issue.number}`; + const blocks = constructGhIssueSlackMessage(issue, issue_string, prefix_text); +}); export default app; ``` @@ -602,14 +609,14 @@ Importantly, the usage of `constructGhIssueSlackMessage` in this handler adds on Add a utility function, `compact`, which takes an array, and filters out any `null` or `undefined` values from it. This function will be used to remove `prefix_text` from `text_lines` if it has not actually been passed in to the function, such as when called from `src/handlers/lookup.ts`. The full (and final) version of the `src/utils/slack.ts` looks like this: ```ts null {3,26} -import { Issue } from '../types'; +import { Issue } from "../types"; const compact = (array: unknown[]) => array.filter((el) => el); export const constructGhIssueSlackMessage = ( issue: Issue, issue_string: string, - prefix_text?: string + prefix_text?: string, ) => { const issue_link = `<${issue.html_url}|${issue_string}>`; const user_link = `<${issue.user.html_url}|${issue.user.login}>`; @@ -624,13 +631,13 @@ export const constructGhIssueSlackMessage = ( return [ { - type: 'section', + type: "section", text: { - type: 'mrkdwn', - text: compact(text_lines).join('\n'), + type: "mrkdwn", + text: compact(text_lines).join("\n"), }, accessory: { - type: 'image', + type: "image", image_url: issue.user.avatar_url, alt_text: issue.user.login, }, @@ -642,13 +649,13 @@ export const constructGhIssueSlackMessage = ( Back in `src/handlers/webhook.ts`, the `blocks` that are returned from `constructGhIssueSlackMessage` become the body in a new `fetch` request, an HTTP POST request to a Slack webhook URL. Once that request completes, return a response with status code `200`, and the body text `"OK"`: ```ts null {13,14,15,16,17,18,19} -import { Hono } from 'hono'; -import { constructGhIssueSlackMessage } from '../utils/slack'; -import { Bindings } from '../types'; +import { Hono } from "hono"; +import { constructGhIssueSlackMessage } from "../utils/slack"; +import { Bindings } from "../types"; const app = new Hono<{ Bindings: Bindings }>(); -app.post('/', async (c) => { +app.post("/", async (c) => { const { action, issue, repository } = await c.req.json(); const prefix_text = `An issue was ${action}:`; const issue_string = `${repository.owner.login}/${repository.name}#${issue.number}`; @@ -656,11 +663,11 @@ app.post('/', async (c) => { const fetchResponse = await fetch(c.env.SLACK_WEBHOOK_URL, { body: JSON.stringify({ blocks }), - method: 'POST', - headers: { 'Content-Type': 'application/json' }, + method: "POST", + headers: { "Content-Type": "application/json" }, }); - return c.text('OK'); + return c.text("OK"); }); export default app; @@ -670,16 +677,17 @@ The constant `SLACK_WEBHOOK_URL` represents the Slack Webhook URL that you creat :::caution - Since this webhook allows developers to post directly to your Slack channel, keep it secret. - ::: To use this constant inside of your codebase, use the [`wrangler secret`](/workers/wrangler/commands/#secret) command: ```sh title="Set the SLACK_WEBHOOK_URL secret" -$ npx wrangler secret put SLACK_WEBHOOK_URL +npx wrangler secret put SLACK_WEBHOOK_URL +``` + +```sh output Enter a secret value: https://hooks.slack.com/services/abc123 ``` @@ -690,13 +698,13 @@ Similarly to the `lookup` route, the `webhook` route should include some basic e To do this, write the custom error handler with `app.onError()` and return a new response with a status code of `500`. The final version of `src/routes/webhook.ts` looks like this: ```ts null {24,25,26,27,28,29,30,31} -import { Hono } from 'hono'; -import { constructGhIssueSlackMessage } from '../utils/slack'; -import { Bindings } from '../types'; +import { Hono } from "hono"; +import { constructGhIssueSlackMessage } from "../utils/slack"; +import { Bindings } from "../types"; const app = new Hono<{ Bindings: Bindings }>(); -app.post('/', async (c) => { +app.post("/", async (c) => { const { action, issue, repository } = await c.req.json(); const prefix_text = `An issue was ${action}:`; const issue_string = `${repository.owner.login}/${repository.name}#${issue.number}`; @@ -704,25 +712,25 @@ app.post('/', async (c) => { const fetchResponse = await fetch(c.env.SLACK_WEBHOOK_URL, { body: JSON.stringify({ blocks }), - method: 'POST', - headers: { 'Content-Type': 'application/json' }, + method: "POST", + headers: { "Content-Type": "application/json" }, }); if (!fetchResponse.ok) throw new Error(); - return c.text('OK'); + return c.text("OK"); }); app.onError((_e, c) => { return c.json( { - message: 'Unable to handle webhook', + message: "Unable to handle webhook", }, - 500 + 500, ); }); -export default app +export default app; ``` ## Deploy @@ -734,13 +742,13 @@ Wrangler has built-in support for bundling, uploading, and releasing your Cloudf ```sh title="Deploy your application" -$ npm run deploy +npm run deploy ``` ```sh title="Deploy your application" -$ yarn deploy +yarn deploy ``` diff --git a/src/content/docs/workers/tutorials/connect-to-turso-using-workers/index.mdx b/src/content/docs/workers/tutorials/connect-to-turso-using-workers/index.mdx index a1a0f27345f84e..b58881006ab482 100644 --- a/src/content/docs/workers/tutorials/connect-to-turso-using-workers/index.mdx +++ b/src/content/docs/workers/tutorials/connect-to-turso-using-workers/index.mdx @@ -7,29 +7,26 @@ title: Connect to and query your Turso database using Workers languages: - TypeScript - SQL - --- -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; This tutorial will guide you on how to build globally distributed applications with Cloudflare Workers, and [Turso](https://chiselstrike.com/), an edge-hosted distributed database based on libSQL. By using Workers and Turso, you can create applications that are close to your end users without having to maintain or operate infrastructure in tens or hundreds of regions. :::note - For a more seamless experience, refer to the [Turso Database Integration guide](/workers/databases/native-integrations/turso/). The Turso Database Integration will connect your Worker to a Turso database by getting the right configuration from Turso and adding it as [secrets](/workers/configuration/secrets/) to your Worker. - ::: ## Prerequisites Before continuing with this tutorial, you should have: -* Successfully [created up your first Cloudflare Worker](/workers/get-started/guide/) and/or have deployed a Cloudflare Worker before. -* Installed [Wrangler](/workers/wrangler/install-and-update/), a command-line tool for building Cloudflare Workers. -* A [GitHub account](https://github.com/), required for authenticating to Turso. -* A basic familiarity with installing and using command-line interface (CLI) applications. +- Successfully [created up your first Cloudflare Worker](/workers/get-started/guide/) and/or have deployed a Cloudflare Worker before. +- Installed [Wrangler](/workers/wrangler/install-and-update/), a command-line tool for building Cloudflare Workers. +- A [GitHub account](https://github.com/), required for authenticating to Turso. +- A basic familiarity with installing and using command-line interface (CLI) applications. ## Install the Turso CLI @@ -37,16 +34,19 @@ You will need the Turso CLI to create and populate a database. Run either of the ```sh # On macOS or Linux with Homebrew -$ brew install chiselstrike/tap/turso +brew install chiselstrike/tap/turso # Manual scripted installation -$ curl -sSfL | bash +curl -sSfL | bash ``` After you have installed the Turso CLI, verify that the CLI is in your shell path: ```sh -$ turso --version +turso --version +``` + +```sh output # This should output your current Turso CLI version (your installed version may be higher): turso version v0.51.0 ``` @@ -56,7 +56,10 @@ turso version v0.51.0 Before you create your first Turso database, you need to log in to the CLI using your GitHub account by running: ```sh -$ turso auth login +turso auth login +``` + +```sh output Waiting for authentication... ✔ Success! Logged in as @@ -67,7 +70,10 @@ Waiting for authentication... After you have authenticated, you can create a database by running `turso db create `. Turso will automatically choose a location closest to you. ```sh -$ turso db create my-db +turso db create my-db +``` + +```sh output # Example: [===> ] Creating database my-db in Los Angeles, California (US) (lax) @@ -78,7 +84,7 @@ Created database my-db in Los Angeles, California (US) (lax) in 34 seconds. With your first database created, you can now connect to it directly and execute SQL against it: ```sh -$ turso db shell my-db +turso db shell my-db ``` To get started with your database, create and define a schema for your first table. In this example, you will create a `example_users` table with one column: `email` (of type `text`) and then populate it with one email address. @@ -100,22 +106,34 @@ The Workers command-line interface, [Wrangler](/workers/wrangler/install-and-upd To create a new Workers project (named `worker-turso-ts`), run the following: - - - + + + To start developing your Worker, `cd` into your new project directory: ```sh -$ cd worker-turso-ts +cd worker-turso-ts ``` In your project directory, you now have the following files: -* `wrangler.toml`: Your Wrangler configuration file. -* `src/index.ts`: A minimal Hello World Worker written in TypeScript -* `package.json`: A minimal Node dependencies configuration file. -* `tsconfig.json`: TypeScript configuration that includes Workers types. Only generated if indicated. +- `wrangler.toml`: Your Wrangler configuration file. +- `src/index.ts`: A minimal Hello World Worker written in TypeScript +- `package.json`: A minimal Node dependencies configuration file. +- `tsconfig.json`: TypeScript configuration that includes Workers types. Only generated if indicated. For this tutorial, only the `wrangler.toml` and `src/index.ts` files are relevant. You will not need to edit the other files, and they should be left as is. @@ -128,11 +146,11 @@ The Turso client library requires two pieces of information to make a connection To get the URL for your database, run the following Turso CLI command, and copy the result: -```sh -$ turso db show my-db --url -# Output: +````sh +turso db show my-db --url +```sh output libsql://my-db-.turso.io -``` +```` Open `wrangler.toml` in your editor and at the bottom of the file, create a new `[vars]` section representing the [environment variables](/workers/configuration/environment-variables/) for your project: @@ -146,7 +164,7 @@ Save the changes to `wrangler.toml`. Next, create a long-lived authentication token for your Worker to use when connecting to your database. Run the following Turso CLI command, and copy the output to your clipboard: ```sh -$ turso db tokens create my-db -e none +turso db tokens create my-db -e none # Will output a long text string (an encoded JSON Web Token) ``` @@ -154,7 +172,7 @@ To keep this token secret: 1. You will create a `.dev.vars` file for local development. Do not commit this file to source control. You should add `.dev.vars to your `.gitignore\` file if you are using Git. -* You will also create a [secret](/workers/configuration/secrets/) to keep your authentication token confidential. +- You will also create a [secret](/workers/configuration/secrets/) to keep your authentication token confidential. First, create a new file called `.dev.vars` with the following structure. Paste your authentication token in the quotation marks: @@ -166,8 +184,10 @@ Save your changes to `.dev.vars`. Next, store the authentication token as a secr ```sh # Ensure you specify the secret name exactly: your Worker will need to reference it later. -$ npx wrangler secret put LIBSQL_DB_AUTH_TOKEN -------------------------------------------------------- +npx wrangler secret put LIBSQL_DB_AUTH_TOKEN +``` + +```sh output ? Enter a secret value: › ``` @@ -178,7 +198,7 @@ Select `` on your keyboard to save the token as a secret. Both `LIBSQL_DB Install the Turso client library and a router: ```sh -$ npm install @libsql/client itty-router +npm install @libsql/client itty-router ``` The `@libsql/client` library allows you to query a Turso database. The `itty-router` library is a lightweight router you will use to help handle incoming requests to the worker. @@ -198,78 +218,78 @@ import { Client as LibsqlClient, createClient } from "@libsql/client/web"; import { Router, RouterType } from "itty-router"; export interface Env { - // The environment variable containing your the URL for your Turso database. - LIBSQL_DB_URL?: string; - // The Secret that contains the authentication token for your Turso database. - LIBSQL_DB_AUTH_TOKEN?: string; - - // These objects are created before first use, then stashed here - // for future use - router?: RouterType; + // The environment variable containing your the URL for your Turso database. + LIBSQL_DB_URL?: string; + // The Secret that contains the authentication token for your Turso database. + LIBSQL_DB_AUTH_TOKEN?: string; + + // These objects are created before first use, then stashed here + // for future use + router?: RouterType; } export default { - async fetch(request, env): Promise { - if (env.router === undefined) { - env.router = buildRouter(env); - } + async fetch(request, env): Promise { + if (env.router === undefined) { + env.router = buildRouter(env); + } - return env.router.handle(request); - }, + return env.router.handle(request); + }, } satisfies ExportedHandler; function buildLibsqlClient(env: Env): LibsqlClient { - const url = env.LIBSQL_DB_URL?.trim(); - if (url === undefined) { - throw new Error("LIBSQL_DB_URL env var is not defined"); - } + const url = env.LIBSQL_DB_URL?.trim(); + if (url === undefined) { + throw new Error("LIBSQL_DB_URL env var is not defined"); + } - const authToken = env.LIBSQL_DB_AUTH_TOKEN?.trim(); - if (authToken === undefined) { - throw new Error("LIBSQL_DB_AUTH_TOKEN env var is not defined"); - } + const authToken = env.LIBSQL_DB_AUTH_TOKEN?.trim(); + if (authToken === undefined) { + throw new Error("LIBSQL_DB_AUTH_TOKEN env var is not defined"); + } - return createClient({ url, authToken }); + return createClient({ url, authToken }); } function buildRouter(env: Env): RouterType { - const router = Router(); - - router.get("/users", async () => { - const client = buildLibsqlClient(env); - const rs = await client.execute("select * from example_users"); - return Response.json(rs); - }); - - router.get("/add-user", async (request) => { - const client = buildLibsqlClient(env); - const email = request.query.email; - if (email === undefined) { - return new Response("Missing email", { status: 400 }); - } - if (typeof email !== "string") { - return new Response("email must be a single string", { status: 400 }); - } - if (email.length === 0) { - return new Response("email length must be > 0", { status: 400 }); - } - - try { - await client.execute({ - sql: "insert into example_users values (?)", - args: [email], - }); - } catch (e) { - console.error(e); - return new Response("database insert failed"); - } - - return new Response("Added"); - }); - - router.all("*", () => new Response("Not Found.", { status: 404 })); - - return router; + const router = Router(); + + router.get("/users", async () => { + const client = buildLibsqlClient(env); + const rs = await client.execute("select * from example_users"); + return Response.json(rs); + }); + + router.get("/add-user", async (request) => { + const client = buildLibsqlClient(env); + const email = request.query.email; + if (email === undefined) { + return new Response("Missing email", { status: 400 }); + } + if (typeof email !== "string") { + return new Response("email must be a single string", { status: 400 }); + } + if (email.length === 0) { + return new Response("email length must be > 0", { status: 400 }); + } + + try { + await client.execute({ + sql: "insert into example_users values (?)", + args: [email], + }); + } catch (e) { + console.error(e); + return new Response("database insert failed"); + } + + return new Response("Added"); + }); + + router.all("*", () => new Response("Not Found.", { status: 404 })); + + return router; } ``` @@ -277,11 +297,11 @@ Save your `src/index.ts` file with your changes. Note: -* The libSQL client library import '@libsql/client/web' must be imported exactly as shown when working with Cloudflare workers. The non-web import will not work in the Workers environment. -* The `Env` interface contains the environment variable and secret you defined earlier. -* The `Env` interface also caches the libSQL client object and router, which are created on the first request to the Worker. -* The `/users` route fetches all rows from the `example_users` table you created in the Turso shell. It simply serializes the `ResultSet` object as JSON directly to the caller. -* The `/add-user` route inserts a new row using a value provided in the query string. +- The libSQL client library import '@libsql/client/web' must be imported exactly as shown when working with Cloudflare workers. The non-web import will not work in the Workers environment. +- The `Env` interface contains the environment variable and secret you defined earlier. +- The `Env` interface also caches the libSQL client object and router, which are created on the first request to the Worker. +- The `/users` route fetches all rows from the `example_users` table you created in the Turso shell. It simply serializes the `ResultSet` object as JSON directly to the caller. +- The `/add-user` route inserts a new row using a value provided in the query string. With your environment configured and your code ready, you will now test your Worker locally before you deploy. @@ -290,7 +310,7 @@ With your environment configured and your code ready, you will now test your Wor To run a local instance of our Worker (entirely on your machine), run the following command: ```sh -$ npx wrangler dev +npx wrangler dev ``` You should be able to review output similar to the following: @@ -320,15 +340,19 @@ Connect to it and validate your Worker returns the email address you inserted wh You should see JSON similar to the following containing the data from the `example_users` table: ```json -{"columns":["email"],"rows":[{"email":"foo@bar.com"}],"rowsAffected":0} +{ + "columns": ["email"], + "rows": [{ "email": "foo@bar.com" }], + "rowsAffected": 0 +} ``` :::caution If you see an error instead of a list of users, double check that: -* You have entered the correct value for your `LIBSQL_DB_URL` in `wrangler.toml`. -* You have set a secret called `LIBSQL_DB_AUTH_TOKEN` with your database authentication token. +- You have entered the correct value for your `LIBSQL_DB_URL` in `wrangler.toml`. +- You have set a secret called `LIBSQL_DB_AUTH_TOKEN` with your database authentication token. Both of these need to be present and match the variable names in your Worker's code. ::: @@ -344,7 +368,7 @@ Quit Wrangler by typing `q` into the shell where it was started. After you have validated that your Worker can connect to your Turso database, deploy your Worker. Run the following Wrangler command to deploy your Worker to the Cloudflare global network: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` The first time you run this command, it will launch a browser, ask you to sign in with your Cloudflare account, and grant permissions to Wrangler. @@ -367,12 +391,12 @@ You have now deployed a Worker that can connect to your Turso database, query it To clean up the resources you created as part of this tutorial: -* If you do not want to keep this Worker, run `npx wrangler delete worker-turso-ts` to delete the deployed Worker. -* You can also delete your Turso database via `turso db destroy my-db`. +- If you do not want to keep this Worker, run `npx wrangler delete worker-turso-ts` to delete the deployed Worker. +- You can also delete your Turso database via `turso db destroy my-db`. ## Related resources -* Find the [complete project source code on GitHub](https://github.com/cloudflare/workers-sdk/tree/main/templates/worker-turso-ts/). -* Understand how to [debug your Cloudflare Worker](/workers/observability/). -* Join the [Cloudflare Developer Discord](https://discord.cloudflare.com). -* Join the [ChiselStrike (Turso) Discord](https://discord.com/invite/4B5D7hYwub). +- Find the [complete project source code on GitHub](https://github.com/cloudflare/workers-sdk/tree/main/templates/worker-turso-ts/). +- Understand how to [debug your Cloudflare Worker](/workers/observability/). +- Join the [Cloudflare Developer Discord](https://discord.cloudflare.com). +- Join the [ChiselStrike (Turso) Discord](https://discord.com/invite/4B5D7hYwub). diff --git a/src/content/docs/workers/tutorials/create-finetuned-chatgpt-ai-models-with-r2/index.mdx b/src/content/docs/workers/tutorials/create-finetuned-chatgpt-ai-models-with-r2/index.mdx index d512b28703e2c4..208ae175b8e614 100644 --- a/src/content/docs/workers/tutorials/create-finetuned-chatgpt-ai-models-with-r2/index.mdx +++ b/src/content/docs/workers/tutorials/create-finetuned-chatgpt-ai-models-with-r2/index.mdx @@ -13,12 +13,9 @@ languages: - TypeScript sidebar: order: 1 - --- - - -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; In this tutorial, you will use the [OpenAI](https://openai.com) API and [Cloudflare R2](/r2) to create a [fine-tuned model](https://platform.openai.com/docs/guides/fine-tuning). @@ -38,24 +35,36 @@ To review the completed code for this application, refer to the [GitHub reposito Before you start, make sure you have: -* A Cloudflare account with access to R2. If you do not have a Cloudflare account, [sign up](https://dash.cloudflare.com/sign-up/workers-and-pages) before continuing. Then purchase R2 from your Cloudflare dashboard. -* An OpenAI API key. -* A fine-tune document, structured as [JSON Lines](https://jsonlines.org/). Use the [example document](https://github.com/kristianfreeman/openai-finetune-r2-example/blob/16ca53ca9c8589834abe317487eeedb8a24c7643/example_data.jsonl) in the source code. +- A Cloudflare account with access to R2. If you do not have a Cloudflare account, [sign up](https://dash.cloudflare.com/sign-up/workers-and-pages) before continuing. Then purchase R2 from your Cloudflare dashboard. +- An OpenAI API key. +- A fine-tune document, structured as [JSON Lines](https://jsonlines.org/). Use the [example document](https://github.com/kristianfreeman/openai-finetune-r2-example/blob/16ca53ca9c8589834abe317487eeedb8a24c7643/example_data.jsonl) in the source code. ## 1. Create a Worker application First, use the `c3` CLI to create a new Cloudflare Workers project. - - - + + + The above options will create the "Hello World" TypeScript project. Move into your newly created directory: ```sh -$ cd finetune-chatgpt-model +cd finetune-chatgpt-model ``` ## 2. Upload a fine-tune document to R2 @@ -65,7 +74,7 @@ Next, upload the fine-tune document to R2. R2 is a key-value store that allows y To create a new R2 bucket use the [`wrangler r2 bucket create`](/workers/wrangler/commands/#create-2) command. Note that you are logged in with your Cloudflare account. If not logged in via Wrangler, use the [`wrangler login`](/workers/wrangler/commands/#login) command. ```sh -$ npx wrangler r2 bucket create +npx wrangler r2 bucket create ``` Replace `` with your desired bucket name. Note that bucket names must be lowercase and can only contain dashes. @@ -73,7 +82,7 @@ Replace `` with your desired bucket name. Note that bucket names mu Next, upload a file using the [`wrangler r2 object put`](/workers/wrangler/commands/#put-2) command. ```sh -$ npx wrangler r2 object put -f +npx wrangler r2 object put -f ``` `` is the combined bucket and file path of the file you want to upload -- for example, `fine-tune-ai/finetune.jsonl`, where `fine-tune-ai` is the bucket name. Replace `` with the local filename of your fine-tune document. @@ -95,13 +104,13 @@ bucket_name = '' You will use [Hono](https://hono.dev/), a lightweight framework for building Cloudflare Workers applications. Hono provides an interface for defining routes and middleware functions. Inside your project directory, run the following command to install Hono: ```sh -$ npm install hono +npm install hono ``` You also need to install the [OpenAI Node API library](https://www.npmjs.com/package/openai). This library provides convenient access to the OpenAI REST API in a Node.js project. To install the library, execute the following command: ```sh -$ npm install openai +npm install openai ``` Next, open the `src/index.ts` file and replace the default code with the below code. Replace `` with the binding name you set in `wrangler.toml` file. @@ -136,7 +145,7 @@ app.onError((err, c) => { export default app; ``` -In the above code, you first import the required packages and define the types. Then, you initialize `app` as a new Hono instance. Using the `use` middleware function, you add the OpenAI API client to the context of all routes. This middleware function allows you to access the client from within any route handler. `onError()` defines an error handler to return any errors as a JSON response. +In the above code, you first import the required packages and define the types. Then, you initialize `app` as a new Hono instance. Using the `use` middleware function, you add the OpenAI API client to the context of all routes. This middleware function allows you to access the client from within any route handler. `onError()` defines an error handler to return any errors as a JSON response. ## 5. Read R2 files and upload them to OpenAI @@ -184,23 +193,23 @@ This section includes the `GET /models` route and the `createModel` function. Th ```typescript const createModel = async (c: Context, fileId: string) => { - const openai: OpenAI = c.get("openai") + const openai: OpenAI = c.get("openai"); const body = { training_file: fileId, model: "gpt-4o-mini", - } + }; - return openai.fineTuning.jobs.create(body) -} + return openai.fineTuning.jobs.create(body); +}; -app.get('/models', async c => { - const fileId = c.req.query("file_id") - if (!fileId) return c.text("Missing file ID query param", 400) +app.get("/models", async (c) => { + const fileId = c.req.query("file_id"); + if (!fileId) return c.text("Missing file ID query param", 400); - const model = await createModel(c, fileId) - return c.json(model) -}) + const model = await createModel(c, fileId); + return c.json(model); +}); ``` ## 7. List all fine-tune jobs @@ -209,15 +218,15 @@ This section describes the `GET /jobs` route and the corresponding `getJobs` fun ```typescript const getJobs = async (c: Context) => { - const openai: OpenAI = c.get("openai") - const resp = await openai.fineTuning.jobs.list() - return resp.data -} - -app.get('/jobs', async c => { - const jobs = await getJobs(c) - return c.json(jobs) -}) + const openai: OpenAI = c.get("openai"); + const resp = await openai.fineTuning.jobs.list(); + return resp.data; +}; + +app.get("/jobs", async (c) => { + const jobs = await getJobs(c); + return c.json(jobs); +}); ``` ## 8. Deploy your application @@ -227,7 +236,7 @@ After you have created your Worker application and added the required functions, Before you deploy, you must set the `OPENAI_API_KEY` [secret](/workers/configuration/secrets/) for your application. Do this by running the [`wrangler secret put`](/workers/wrangler/commands/#put-3) command: ```sh -$ npx wrangler secret put OPENAI_API_KEY +npx wrangler secret put OPENAI_API_KEY ``` To deploy your Worker application to the Cloudflare global network: @@ -235,7 +244,7 @@ To deploy your Worker application to the Cloudflare global network: 1. Make sure you are in your Worker project's directory, then run the [`wrangler deploy`](/workers/wrangler/commands/#deploy) command: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` 2. Wrangler will package and upload your code. @@ -247,13 +256,13 @@ $ npx wrangler deploy To use your application, create a new fine-tune job by making a request to the `/files` with a `file` query param matching the filename you uploaded earlier: ```sh -$ curl https://your-worker-url.com/files?file=finetune.jsonl +curl https://your-worker-url.com/files?file=finetune.jsonl ``` When the file is uploaded, issue another request to `/models`, passing the `file_id` query parameter. This should match the `id` returned as JSON from the `/files` route: ```sh -$ curl https://your-worker-url.com/models?file_id=file-abc123 +curl https://your-worker-url.com/models?file_id=file-abc123 ``` Finally, visit `/jobs` to see the status of your fine-tune jobs in OpenAI. Once the fine-tune job has completed, you can see the `fine_tuned_model` value, indicating a fine-tuned model has been created. @@ -268,8 +277,8 @@ Use it in any API requests you make to OpenAI's chat completions endpoints. For ```javascript openai.chat.completions.create({ - messages: [{ role: "system", content: "You are a helpful assistant." }], - model: "ft:gpt-4o-mini:my-org:custom_suffix:id", + messages: [{ role: "system", content: "You are a helpful assistant." }], + model: "ft:gpt-4o-mini:my-org:custom_suffix:id", }); ``` diff --git a/src/content/docs/workers/tutorials/deploy-a-realtime-chat-app/index.mdx b/src/content/docs/workers/tutorials/deploy-a-realtime-chat-app/index.mdx index 95c6dcd5124f51..074623f21b1dbf 100644 --- a/src/content/docs/workers/tutorials/deploy-a-realtime-chat-app/index.mdx +++ b/src/content/docs/workers/tutorials/deploy-a-realtime-chat-app/index.mdx @@ -8,12 +8,9 @@ products: - Durable Objects languages: - JavaScript - --- - - -import { Render } from "~/components" +import { Render } from "~/components"; In this tutorial, you will deploy a serverless, real-time chat application that runs using [Durable Objects](/durable-objects/). @@ -28,7 +25,7 @@ To continue with this tutorial, you must purchase the [Workers Paid plan](/worke Open your terminal and clone the [workers-chat-demo](https://github.com/cloudflare/workers-chat-demo) repository: ```sh -$ git clone https://github.com/cloudflare/workers-chat-demo.git +git clone https://github.com/cloudflare/workers-chat-demo.git ``` ## Authenticate Wrangler @@ -36,7 +33,7 @@ $ git clone https://github.com/cloudflare/workers-chat-demo.git After you have cloned the repository, authenticate Wrangler by running: ```sh -$ npx wrangler login +npx wrangler login ``` ## Deploy your project @@ -44,7 +41,7 @@ $ npx wrangler login When you are ready to deploy your application, run: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` Your application will be deployed to your `*.workers.dev` subdomain. @@ -73,7 +70,7 @@ routes = [ To test your live application: -1. Open your `edge-chat-demo..workers.dev` subdomain. Your subdomain can be found in the [Cloudflare dashboard](https://dash.cloudflare.com) > **Workers & Pages** > your Worker > **Triggers** > **Routes** > select the `edge-chat-demo..workers.dev` route. +1. Open your `edge-chat-demo..workers.dev` subdomain. Your subdomain can be found in the [Cloudflare dashboard](https://dash.cloudflare.com) > **Workers & Pages** > your Worker > **Triggers** > **Routes** > select the `edge-chat-demo..workers.dev` route. 2. Enter a name in the **your name** field. 3. Choose whether to enter a public room or create a private room. 4. Send the link to other participants. You will be able to view room participants on the right side of the screen. @@ -112,6 +109,6 @@ By completing this tutorial, you have deployed a real-time chat application with Continue building with other Cloudflare Workers tutorials below. -* [Build a Slackbot](/workers/tutorials/build-a-slackbot/) -* [Create SMS notifications for your GitHub repository using Twilio](/workers/tutorials/github-sms-notifications-using-twilio/) -* [Build a QR code generator](/workers/tutorials/build-a-qr-code-generator/) +- [Build a Slackbot](/workers/tutorials/build-a-slackbot/) +- [Create SMS notifications for your GitHub repository using Twilio](/workers/tutorials/github-sms-notifications-using-twilio/) +- [Build a QR code generator](/workers/tutorials/build-a-qr-code-generator/) diff --git a/src/content/docs/workers/tutorials/generate-youtube-thumbnails-with-workers-and-images/index.mdx b/src/content/docs/workers/tutorials/generate-youtube-thumbnails-with-workers-and-images/index.mdx index 9ae9e2f3392e72..58e2388219ddab 100644 --- a/src/content/docs/workers/tutorials/generate-youtube-thumbnails-with-workers-and-images/index.mdx +++ b/src/content/docs/workers/tutorials/generate-youtube-thumbnails-with-workers-and-images/index.mdx @@ -9,12 +9,9 @@ products: languages: - JavaScript - Rust - --- - - -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; In this tutorial, you will learn how to programmatically generate a custom YouTube thumbnail using Cloudflare Workers and Cloudflare Image Resizing. You may want to generate a custom YouTube thumbnail to customize the thumbnail's design, call-to-actions and images used to encourage more viewers to watch your video. @@ -28,9 +25,9 @@ To follow this tutorial, make sure you have Node, Cargo, and [Wrangler](/workers In this tutorial, you will learn how to: -* Upload Images to Cloudflare with the Cloudflare dashboard or API. -* Set up a Worker project with Wrangler. -* Manipulate images with image transformations in your Worker. +- Upload Images to Cloudflare with the Cloudflare dashboard or API. +- Set up a Worker project with Wrangler. +- Manipulate images with image transformations in your Worker. ## Upload your image @@ -52,7 +49,7 @@ To upload an image using the Cloudflare dashboard: To upload your image with the [Upload via URL](/images/upload-images/upload-url/) API, refer to the example below: ```sh -$ curl --request POST \ +curl --request POST \ --url https://api.cloudflare.com/client/v4/accounts//images/v1 \ --header 'Authorization: Bearer ' \ --form 'url=' \ @@ -61,30 +58,30 @@ $ curl --request POST \ ``` -* `ACCOUNT_ID`: The current user's account id which can be found in your account settings. -* `API_TOKEN`: Needs to be generated to scoping Images permission. -* `PATH_TO_IMAGE`: Indicates the URL for the image you want to upload. +- `ACCOUNT_ID`: The current user's account id which can be found in your account settings. +- `API_TOKEN`: Needs to be generated to scoping Images permission. +- `PATH_TO_IMAGE`: Indicates the URL for the image you want to upload. You will then receive a response similar to this: ```json { -"result": { - "id": "2cdc28f0-017a-49c4-9ed7-87056c83901", - "filename": "image.jpeg", - "metadata": { - "key": "value", - }, - "uploaded": "2022-01-31T16:39:28.458Z", - "requireSignedURLs": false, - "variants": [ - "https://imagedelivery.net/Vi7wi5KSItxGFsWRG2Us6Q/2cdc28f0-017a-49c4-9ed7-87056c83901/public","https://imagedelivery.net/Vi7wi5KSItxGFsWRG2Us6Q/2cdc28f0-017a-49c4-9ed7-87056c83901/thumbnail" - ] -}, -"success": true, -"errors": [], -"messages": [] - + "result": { + "id": "2cdc28f0-017a-49c4-9ed7-87056c83901", + "filename": "image.jpeg", + "metadata": { + "key": "value" + }, + "uploaded": "2022-01-31T16:39:28.458Z", + "requireSignedURLs": false, + "variants": [ + "https://imagedelivery.net/Vi7wi5KSItxGFsWRG2Us6Q/2cdc28f0-017a-49c4-9ed7-87056c83901/public", + "https://imagedelivery.net/Vi7wi5KSItxGFsWRG2Us6Q/2cdc28f0-017a-49c4-9ed7-87056c83901/thumbnail" + ] + }, + "success": true, + "errors": [], + "messages": [] } ``` @@ -96,17 +93,17 @@ After uploading your image, create a Worker that will enable you to transform te You will need the following before you begin: -* A recent version of [Rust](https://rustup.rs/). -* Access to the `cargo-generate` subcommand: +- A recent version of [Rust](https://rustup.rs/). +- Access to the `cargo-generate` subcommand: ```sh - $ cargo install cargo-generate + cargo install cargo-generate ``` Create a new Worker project using the `worker-rust` template: ```sh -$ cargo generate https://github.com/cloudflare/rustwasm-worker-template +cargo generate https://github.com/cloudflare/rustwasm-worker-template ``` You will now make a few changes to the files in your project directory. @@ -133,7 +130,7 @@ pub async fn main(req: Request, env: Env, _ctx: worker::Context) -> Result Result { After you have finished updating your project, start a local server for developing your Worker by running: ```sh -$ npx wrangler dev +npx wrangler dev ``` This should spin up a `localhost` instance with the image displayed: @@ -348,7 +345,7 @@ name = "worker-to-text" Then run the `npx wrangler deploy` command to deploy your Worker. ```sh -$ npx wrangler deploy +npx wrangler deploy ``` A `.workers.dev` domain will be generated for your Worker after running `wrangler deploy`. You will use this domain in the main thumbnail image. @@ -357,31 +354,43 @@ A `.workers.dev` domain will be generated for your Worker after running `wrangle Create a Worker to serve the image you uploaded to Images by running: - - - + + + To start developing your Worker, `cd` into your new project directory: ```sh -$ cd thumbnail-image +cd thumbnail-image ``` This will create a new Worker project named `thumbnail-image`. In the `src/index.js` file, add the following code block: ```js export default { - async fetch(request, env) { - const url = new URL(request.url) - if (url.pathname === '/original-image') { - const image = await fetch( - `https://imagedelivery.net/${env.CLOUDFLARE_ACCOUNT_HASH}/${IMAGE_ID}/public` - ); - return image; - } - return new Response('Image Resizing with a Worker') - } -} + async fetch(request, env) { + const url = new URL(request.url); + if (url.pathname === "/original-image") { + const image = await fetch( + `https://imagedelivery.net/${env.CLOUDFLARE_ACCOUNT_HASH}/${IMAGE_ID}/public`, + ); + return image; + } + return new Response("Image Resizing with a Worker"); + }, +}; ``` Update `env.CLOUDFLARE_ACCOUNT_HASH` with your [Cloudflare account ID](/fundamentals/setup/find-account-and-zone-ids/). Update `env.IMAGE_ID` with your [image ID](/images/get-started/). @@ -394,57 +403,56 @@ You will now use [Cloudflare image transformations](/images/transform-images/), ```js null {11} export default { - async fetch(request, env) { - const url = new URL(request.url) - if (url.pathname === '/original-image') { - const image = await fetch( - `https://imagedelivery.net/${env.CLOUDFLARE_ACCOUNT_HASH}/${IMAGE_ID}/public` - ); - return image; - } - - if (url.pathname === '/thumbnail') { } - - return new Response('Image Resizing with a Worker') - } -} + async fetch(request, env) { + const url = new URL(request.url); + if (url.pathname === "/original-image") { + const image = await fetch( + `https://imagedelivery.net/${env.CLOUDFLARE_ACCOUNT_HASH}/${IMAGE_ID}/public`, + ); + return image; + } + + if (url.pathname === "/thumbnail") { + } + + return new Response("Image Resizing with a Worker"); + }, +}; ``` Next, use the `fetch` method to apply the image transformation changes on top of the background image. The overlay options are nested in `options.cf.image`. ```js null {12,13,14,15,16,17,18} export default { - async fetch(request, env) { - const url = new URL(request.url) - - if (url.pathname === '/original-image') { - const image = await fetch( - `https://imagedelivery.net/${env.CLOUDFLARE_ACCOUNT_HASH}/${IMAGE_ID}/public` - ); - return image; - } - - if (url.pathname === '/thumbnail') { - fetch(imageURL, { - cf: { - image: {} - } - }) - } - - return new Response('Image Resizing with a Worker') - } -} + async fetch(request, env) { + const url = new URL(request.url); + + if (url.pathname === "/original-image") { + const image = await fetch( + `https://imagedelivery.net/${env.CLOUDFLARE_ACCOUNT_HASH}/${IMAGE_ID}/public`, + ); + return image; + } + + if (url.pathname === "/thumbnail") { + fetch(imageURL, { + cf: { + image: {}, + }, + }); + } + + return new Response("Image Resizing with a Worker"); + }, +}; ``` The `imageURL` is the URL of the image you want to use as a background image. In the `cf.image` object, specify the options you want to apply to the background image. :::note - At time of publication, Cloudflare image transformations do not allow resizing images in a Worker that is stored in Cloudflare Images. Instead of using the image you served on the `/original-image` route, you will use the same image from a different source. - ::: Add your background image to an assets directory on GitHub and push your changes to GitHub. Copy the URL of the image upload by performing a left click on the image and selecting the **Copy Remote File Url** option. @@ -452,13 +460,14 @@ Add your background image to an assets directory on GitHub and push your changes Replace the `imageURL` value with the copied remote URL. ```js null {2,3} -if (url.pathname === '/thumbnail') { - const imageURL = "https://github.com/lauragift21/social-image-demo/blob/1ed9044463b891561b7438ecdecbdd9da48cdb03/assets/cover.png?raw=true" - fetch(imageURL, { - cf: { - image: {} - } - }) +if (url.pathname === "/thumbnail") { + const imageURL = + "https://github.com/lauragift21/social-image-demo/blob/1ed9044463b891561b7438ecdecbdd9da48cdb03/assets/cover.png?raw=true"; + fetch(imageURL, { + cf: { + image: {}, + }, + }); } ``` @@ -466,18 +475,18 @@ Next, add overlay options in the image object. Resize the image to the preferred ```js null {3,4,5,6,7,8,9,10,11,12} fetch(imageURL, { - cf: { - image: { - width: 1280, - height: 720, - draw: [ - { - url: 'https://text-to-image.examples.workers.dev', - left: 40, - }, - ], - }, - }, + cf: { + image: { + width: 1280, + height: 720, + draw: [ + { + url: "https://text-to-image.examples.workers.dev", + left: 40, + }, + ], + }, + }, }); ``` @@ -493,7 +502,7 @@ name = "thumbnail-image" Deploy your Worker by running: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` The command deploys your Worker to custom `workers.dev` subdomain. Go to your `.workers.dev` subdomain and go to the `/thumbnail` route. @@ -508,25 +517,25 @@ To add dynamic text, append any text attached to the `/thumbnail` URL using quer ```js for (const title of url.searchParams.values()) { - try { - const editedImage = await fetch(imageURL, { - cf: { - image: { - width: 1280, - height: 720, - draw: [ - { - url: `https://text-to-image.examples.workers.dev/?${title}`, - left: 50 - } - ], - }, - }, - }); - return editedImage; - } catch (error) { - console.log(error); - } + try { + const editedImage = await fetch(imageURL, { + cf: { + image: { + width: 1280, + height: 720, + draw: [ + { + url: `https://text-to-image.examples.workers.dev/?${title}`, + left: 50, + }, + ], + }, + }, + }); + return editedImage; + } catch (error) { + console.log(error); + } } ``` diff --git a/src/content/docs/workers/tutorials/github-sms-notifications-using-twilio/index.mdx b/src/content/docs/workers/tutorials/github-sms-notifications-using-twilio/index.mdx index ca8bf427e6b165..bd031d6e0a589a 100644 --- a/src/content/docs/workers/tutorials/github-sms-notifications-using-twilio/index.mdx +++ b/src/content/docs/workers/tutorials/github-sms-notifications-using-twilio/index.mdx @@ -6,24 +6,21 @@ pcx_content_type: tutorial title: GitHub SMS notifications using Twilio languages: - JavaScript - --- - - -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; In this tutorial, you will learn to build an SMS notification system on Workers to receive updates on a GitHub repository. Your Worker will send you a text update using Twilio when there is new activity on your repository. You will learn how to: -* Build webhooks using Workers. -* Integrate Workers with GitHub and Twilio. -* Use Worker secrets with Wrangler. +- Build webhooks using Workers. +- Integrate Workers with GitHub and Twilio. +- Use Worker secrets with Wrangler. ![Animated gif of receiving a text message on your phone after pushing changes to a repository](/images/workers/tutorials/github-sms/video-of-receiving-a-text-after-pushing-to-a-repo.gif) -*** +--- @@ -31,14 +28,26 @@ You will learn how to: Start by using `npm create cloudflare@latest` to create a Worker project in the command line: - - - + + + Make note of the URL that your application was deployed to. You will be using it when you configure your GitHub webhook. ```sh -$ cd github-twilio-notifications +cd github-twilio-notifications ``` Inside of your new `github-sms-notifications` directory, `src/index.js` represents the entry point to your Cloudflare Workers application. You will configure this file for most of the tutorial. @@ -49,7 +58,7 @@ First, create a webhook for your repository to post updates to your Worker. Insi You can reference the finished code at this [GitHub repository](https://github.com/rickyrobinett/workers-sdk/tree/main/templates/examples/github-sms-notifications-using-twilio). -*** +--- ## Configure GitHub @@ -59,7 +68,7 @@ To start, configure a GitHub webhook to post to your Worker when there is an upd 2. Set the Payload URL to the `/webhook` path on the Worker URL that you made note of when your application was first deployed. -3. In the **Content type** dropdown, select *application/json*. +3. In the **Content type** dropdown, select _application/json_. 4. In the **Secret** field, input a secret key of your choice. @@ -69,7 +78,7 @@ To start, configure a GitHub webhook to post to your Worker when there is an upd ![Following instructions to set up your webhook in the GitHub webhooks settings dashboard](~/assets/images/workers/tutorials/github-sms/github-config-screenshot.png) -*** +--- ## Parsing the response @@ -79,9 +88,9 @@ Initially, your generated `index.js` should look like this: ```js export default { - async fetch(request, env, ctx) { - return new Response('Hello World!'); - }, + async fetch(request, env, ctx) { + return new Response("Hello World!"); + }, }; ``` @@ -89,11 +98,11 @@ Use the `request.method` property of [`Request`](/workers/runtime-apis/request/) ```js export default { - async fetch(request, env, ctx) { - if(request.method !== 'POST') { - return new Response('Please send a POST request!'); - } - }, + async fetch(request, env, ctx) { + if (request.method !== "POST") { + return new Response("Please send a POST request!"); + } + }, }; ``` @@ -119,36 +128,38 @@ async fetch(request, env, ctx) { The `checkSignature` function will use the Node.js crypto library to hash the received payload with your known secret key to ensure it matches the request hash. GitHub uses an HMAC hexdigest to compute the hash in the SHA-256 format. You will place this function at the top of your `index.js` file, before your export. ```js -import { createHmac, timingSafeEqual } from 'node:crypto'; -import { Buffer } from 'node:buffer'; +import { createHmac, timingSafeEqual } from "node:crypto"; +import { Buffer } from "node:buffer"; function checkSignature(text, headers, githubSecretToken) { - const hmac = createHmac('sha256', githubSecretToken); - hmac.update(text); - const expectedSignature = hmac.digest('hex'); - const actualSignature = headers.get('x-hub-signature-256'); - - const trusted = Buffer.from(`sha256=${expectedSignature}`, 'ascii'); - const untrusted = Buffer.from(actualSignature, 'ascii'); - - return trusted.byteLength == untrusted.byteLength - && timingSafeEqual(trusted, untrusted); -}; + const hmac = createHmac("sha256", githubSecretToken); + hmac.update(text); + const expectedSignature = hmac.digest("hex"); + const actualSignature = headers.get("x-hub-signature-256"); + + const trusted = Buffer.from(`sha256=${expectedSignature}`, "ascii"); + const untrusted = Buffer.from(actualSignature, "ascii"); + + return ( + trusted.byteLength == untrusted.byteLength && + timingSafeEqual(trusted, untrusted) + ); +} ``` To make this work, you need to use [`wrangler secret put`](/workers/wrangler/commands/#put-3) to set your `GITHUB_SECRET_TOKEN`. This token is the secret you picked earlier when configuring you GitHub webhook: ```sh -$ npx wrangler secret put GITHUB_SECRET_TOKEN +npx wrangler secret put GITHUB_SECRET_TOKEN ``` -Add the nodejs\_compat flag to your `wrangler.toml` file: +Add the nodejs_compat flag to your `wrangler.toml` file: ```toml compatibility_flags = ["nodejs_compat"] ``` -*** +--- ## Sending a text with Twilio @@ -160,37 +171,37 @@ Create a new function called `sendText()` that will handle making the request to ```js async function sendText(accountSid, authToken, message) { - const endpoint = `https://api.twilio.com/2010-04-01/Accounts/${accountSid}/Messages.json`; - - const encoded = new URLSearchParams({ - 'To': '%YOUR_PHONE_NUMBER%', - 'From': '%YOUR_TWILIO_NUMBER%', - 'Body': message - }); - - const token = btoa(`${accountSid}:${authToken}`); - - const request = { - body: encoded, - method: 'POST', - headers: { - 'Authorization': `Basic ${token}`, - 'Content-Type': 'application/x-www-form-urlencoded', - } - }; - - const response = await fetch(endpoint, request); - const result = await response.json(); - - return Response.json(result); -}; + const endpoint = `https://api.twilio.com/2010-04-01/Accounts/${accountSid}/Messages.json`; + + const encoded = new URLSearchParams({ + To: "%YOUR_PHONE_NUMBER%", + From: "%YOUR_TWILIO_NUMBER%", + Body: message, + }); + + const token = btoa(`${accountSid}:${authToken}`); + + const request = { + body: encoded, + method: "POST", + headers: { + Authorization: `Basic ${token}`, + "Content-Type": "application/x-www-form-urlencoded", + }, + }; + + const response = await fetch(endpoint, request); + const result = await response.json(); + + return Response.json(result); +} ``` To make this work, you need to set some secrets to hide your `ACCOUNT_SID` and `AUTH_TOKEN` from the source code. You can set secrets with [`wrangler secret put`](/workers/wrangler/commands/#put-3) in your command line. ```sh -$ npx wrangler secret put TWILIO_ACCOUNT_SID -$ npx wrangler secret put TWILIO_AUTH_TOKEN +npx wrangler secret put TWILIO_ACCOUNT_SID +npx wrangler secret put TWILIO_AUTH_TOKEN ``` Modify your `githubWebhookHandler` to send a text message using the `sendText` function you just made. @@ -225,7 +236,7 @@ async fetch(request, env, ctx) { Run the `npx wrangler publish` command to redeploy your Worker project: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` ![Video of receiving a text after pushing to a repo](/images/workers/tutorials/github-sms/video-of-receiving-a-text-after-pushing-to-a-repo.gif) @@ -240,5 +251,5 @@ By completing this tutorial, you have learned how to build webhooks using Worker {/* */} -* [Build a JAMStack app](/workers/tutorials/build-a-jamstack-app/) -* [Build a QR code generator](/workers/tutorials/build-a-qr-code-generator/) +- [Build a JAMStack app](/workers/tutorials/build-a-jamstack-app/) +- [Build a QR code generator](/workers/tutorials/build-a-qr-code-generator/) diff --git a/src/content/docs/workers/tutorials/handle-form-submissions-with-airtable/index.mdx b/src/content/docs/workers/tutorials/handle-form-submissions-with-airtable/index.mdx index edae3715776aeb..c393366aafb281 100644 --- a/src/content/docs/workers/tutorials/handle-form-submissions-with-airtable/index.mdx +++ b/src/content/docs/workers/tutorials/handle-form-submissions-with-airtable/index.mdx @@ -8,12 +8,9 @@ tags: - Forms languages: - JavaScript - --- - - -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; In this tutorial, you will use [Cloudflare Workers](/workers/) and [Airtable](https://airtable.com) to persist form submissions from a front-end user interface. Airtable is a free-to-use spreadsheet solution that has an approachable API for developers. Workers will handle incoming form submissions and use Airtable's [REST API](https://airtable.com/api) to asynchronously persist the data in an Airtable base (Airtable's term for a spreadsheet) for later reference. @@ -85,14 +82,26 @@ To handle the form submission, create and deploy a Worker that parses the incomi Create a new `airtable-form-handler` Worker project: - - - + + + Then, move into the newly created directory: ```sh -$ cd airtable-form-handler +cd airtable-form-handler ``` ## 3. Configure an Airtable base @@ -118,20 +127,24 @@ Next, navigate to [Airtable's API page](https://airtable.com/api) and select you You will also need to create a **Personal access token** that you'll use to access your Airtable base. You can do so by visiting the [Personal access tokens](https://airtable.com/create/tokens) page on Airtable's website and creating a new token. Make sure that you configure the token in the following way: -* Scope: the `data.records:write` scope must be set on the token -* Access: access should be granted to the base you have been working with in this tutorial +- Scope: the `data.records:write` scope must be set on the token +- Access: access should be granted to the base you have been working with in this tutorial The results access token should now be set in your application. To make the token available in your codebase, use the [`wrangler secret`](/workers/wrangler/commands/#secret) command. The `secret` command encrypts and stores environment variables for use in your function, without revealing them to users. Run `wrangler secret put`, passing `AIRTABLE_ACCESS_TOKEN` as the name of your secret: -```sh title="Set the `AIRTABLE_ACCESS_TOKEN` secret with Wrangler" -$ npx wrangler secret put AIRTABLE_ACCESS_TOKEN +```sh title="Set the `AIRTABLE_ACCESS_TOKEN` secret with Wrangler" +npx wrangler secret put AIRTABLE_ACCESS_TOKEN + +```` + +```sh output Enter the secret text you would like assigned to the variable AIRTABLE_ACCESS_TOKEN on the script named airtable-form-handler: ****** 🌀 Creating the secret for script name airtable-form-handler ✨ Success! Uploaded secret AIRTABLE_ACCESS_TOKEN. -``` +```` Before you continue, review the keys that you should have from Airtable: @@ -147,50 +160,44 @@ In your Worker project's `index.js` file, replace the default code with a Worker ```js export default { - async fetch(request, env) { - const url = new URL(request.url) - if (url.pathname === "/submit") { - await submitHandler(request, env) - } - return new Response('Not found', { status: 404 }) - } -} + async fetch(request, env) { + const url = new URL(request.url); + if (url.pathname === "/submit") { + await submitHandler(request, env); + } + return new Response("Not found", { status: 404 }); + }, +}; ``` The `submitHandler` has two functions. First, it will parse the form data coming from your HTML5 form. Once the data is parsed, use the Airtable API to persist a new row (a new form submission) to your table: ```js -async function submitHandler (request, env) { - if (request.method !== "POST") { - return new Response("Method Not Allowed", { - status: 405 - }) - } - const body = await request.formData(); - - const { - first_name, - last_name, - email, - phone, - subject, - message - } = Object.fromEntries(body) - - // The keys in "fields" are case-sensitive, and - // should exactly match the field names you set up - // in your Airtable table, such as "First Name". - const reqBody = { - fields: { - "First Name": first_name, - "Last Name": last_name, - "Email": email, - "Phone Number": phone, - "Subject": subject, - "Message": message - } - } - await createAirtableRecord(env, reqBody) +async function submitHandler(request, env) { + if (request.method !== "POST") { + return new Response("Method Not Allowed", { + status: 405, + }); + } + const body = await request.formData(); + + const { first_name, last_name, email, phone, subject, message } = + Object.fromEntries(body); + + // The keys in "fields" are case-sensitive, and + // should exactly match the field names you set up + // in your Airtable table, such as "First Name". + const reqBody = { + fields: { + "First Name": first_name, + "Last Name": last_name, + Email: email, + "Phone Number": phone, + Subject: subject, + Message: message, + }, + }; + await createAirtableRecord(env, reqBody); } // Existing code @@ -205,19 +212,22 @@ Then you call `createAirtableRecord` (the function you will define next). The `c ```js async function createAirtableRecord(env, body) { - try { - const result = fetch(`https://api.airtable.com/v0/${env.AIRTABLE_BASE_ID}/${encodeURIComponent(env.AIRTABLE_TABLE_NAME)}`, { - method: 'POST', - body: JSON.stringify(body), - headers: { - Authorization: `Bearer ${env.AIRTABLE_ACCESS_TOKEN}`, - 'Content-Type': 'application/json', - } - }) - return result; - } catch (error) { - console.error(error); - } + try { + const result = fetch( + `https://api.airtable.com/v0/${env.AIRTABLE_BASE_ID}/${encodeURIComponent(env.AIRTABLE_TABLE_NAME)}`, + { + method: "POST", + body: JSON.stringify(body), + headers: { + Authorization: `Bearer ${env.AIRTABLE_ACCESS_TOKEN}`, + "Content-Type": "application/json", + }, + }, + ); + return result; + } catch (error) { + console.error(error); + } } // Existing code @@ -242,18 +252,18 @@ AIRTABLE_TABLE_NAME = "Form Submissions" With all these fields submitted, it is time to deploy your Workers serverless function and get your form communicating with it. First, publish your Worker: ```sh title="Deploy your Worker" -$ npx wrangler deploy +npx wrangler deploy ``` Your Worker project will deploy to a unique URL — for example, `https://workers-airtable-form.cloudflare.workers.dev`. This represents the first part of your front-end form's `action` attribute — the second part is the path for your form handler, which is `/submit`. In your front-end UI, configure your `form` tag as seen below: ```html
- +
``` @@ -267,7 +277,7 @@ With this tutorial completed, you have created a Worker that can accept form sub ## Related resources -* [Build a Slackbot](/workers/tutorials/build-a-slackbot) -* [Build a To-Do List Jamstack App](/workers/tutorials/build-a-jamstack-app) -* [Build a blog using Nuxt.js and Sanity.io on Cloudflare Pages](/pages/tutorials/build-a-blog-using-nuxt-and-sanity) -* [James Quick's video on building a Cloudflare Workers + Airtable integration](https://www.youtube.com/watch?v=tFQ2kbiu1K4) +- [Build a Slackbot](/workers/tutorials/build-a-slackbot) +- [Build a To-Do List Jamstack App](/workers/tutorials/build-a-jamstack-app) +- [Build a blog using Nuxt.js and Sanity.io on Cloudflare Pages](/pages/tutorials/build-a-blog-using-nuxt-and-sanity) +- [James Quick's video on building a Cloudflare Workers + Airtable integration](https://www.youtube.com/watch?v=tFQ2kbiu1K4) diff --git a/src/content/docs/workers/tutorials/openai-function-calls-workers/index.mdx b/src/content/docs/workers/tutorials/openai-function-calls-workers/index.mdx index ac2f27cc1a0f6a..b3804c1f3342e6 100644 --- a/src/content/docs/workers/tutorials/openai-function-calls-workers/index.mdx +++ b/src/content/docs/workers/tutorials/openai-function-calls-workers/index.mdx @@ -8,12 +8,9 @@ languages: - JavaScript tags: - AI - --- - - -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; In this tutorial, you will build a project that leverages [OpenAI's function calling](https://platform.openai.com/docs/guides/function-calling) feature, available in OpenAI's latest Chat Completions API models. @@ -21,13 +18,13 @@ The function calling feature allows the AI model to intelligently decide when to ## What you will learn -* How to use OpenAI's function calling feature. -* Integrating OpenAI's API in a Cloudflare Worker. -* Fetching and processing website content using Cheerio. -* Handling API responses and function calls in JavaScript. -* Storing API keys as secrets with Wrangler. +- How to use OpenAI's function calling feature. +- Integrating OpenAI's API in a Cloudflare Worker. +- Fetching and processing website content using Cheerio. +- Handling API responses and function calls in JavaScript. +- Storing API keys as secrets with Wrangler. -*** +--- @@ -35,14 +32,26 @@ The function calling feature allows the AI model to intelligently decide when to Create a Worker project in the command line: - - - + + + Go to your new `openai-function-calling-workers` Worker project: ```sh -$ cd openai-function-calling-workers +cd openai-function-calling-workers ``` Inside of your new `openai-function-calling-workers` directory, find the `src/index.js` file. You will configure this file for most of the tutorial. @@ -54,27 +63,26 @@ You will also need an OpenAI account and API key for this tutorial. If you do no With your Worker project created, make your first request to OpenAI. You will use the OpenAI node library to interact with the OpenAI API. In this project, you will also use the Cheerio library to handle processing the HTML content of websites ```sh -$ npm install openai cheerio +npm install openai cheerio ``` Now, define the structure of your Worker in `index.js`: ```js export default { - async fetch(request, env, ctx) { - // Initialize OpenAI API - // Handle incoming requests - return new Response('Hello World!'); - }, + async fetch(request, env, ctx) { + // Initialize OpenAI API + // Handle incoming requests + return new Response("Hello World!"); + }, }; - ``` Above `export default`, add the imports for `openai` and `cheerio`: ```js import OpenAI from "openai"; -import * as cheerio from 'cheerio'; +import * as cheerio from "cheerio"; ``` Within your `fetch` function, instantiate your `OpenAI` client: @@ -93,7 +101,7 @@ async fetch(request, env, ctx) { Use [`wrangler secret put`](/workers/wrangler/commands/#put-3) to set `OPENAI_API_KEY`. This [secret's](/workers/configuration/secrets/) value is the API key you created earlier in the OpenAI dashboard: ```sh -$ npx wrangler secret put +npx wrangler secret put ``` For local development, create a new file `.dev.vars` in your Worker project and add this line. Make sure to replace `OPENAI_API_KEY` with your own OpenAI API key: @@ -106,62 +114,66 @@ Now, make a request to the OpenAI [Chat Completions API](https://platform.openai ```js export default { - async fetch(request, env, ctx) { - const openai = new OpenAI({ - apiKey: env.OPENAI_API_KEY, - }); - - const url = new URL(request.url); - const message = url.searchParams.get('message'); - - const messages = [{ role: 'user', content: message ? message : "What's in the news today?" }]; - - const tools = [ - { - type: 'function', - function: { - name: 'read_website_content', - description: 'Read the content on a given website', - parameters: { - type: 'object', - properties: { - url: { - type: 'string', - description: 'The URL to the website to read', - }, - }, - required: ['url'], - }, - }, - }, - ]; - - const chatCompletion = await openai.chat.completions.create({ - model: 'gpt-4o-mini', - messages: messages, - tools: tools, - tool_choice: 'auto', - }); - - const assistantMessage = chatCompletion.choices[0].message; - console.log(assistantMessage); - - //Later you will continue handling the assistant's response here - return new Response(assistantMessage.content); - }, + async fetch(request, env, ctx) { + const openai = new OpenAI({ + apiKey: env.OPENAI_API_KEY, + }); + + const url = new URL(request.url); + const message = url.searchParams.get("message"); + + const messages = [ + { + role: "user", + content: message ? message : "What's in the news today?", + }, + ]; + + const tools = [ + { + type: "function", + function: { + name: "read_website_content", + description: "Read the content on a given website", + parameters: { + type: "object", + properties: { + url: { + type: "string", + description: "The URL to the website to read", + }, + }, + required: ["url"], + }, + }, + }, + ]; + + const chatCompletion = await openai.chat.completions.create({ + model: "gpt-4o-mini", + messages: messages, + tools: tools, + tool_choice: "auto", + }); + + const assistantMessage = chatCompletion.choices[0].message; + console.log(assistantMessage); + + //Later you will continue handling the assistant's response here + return new Response(assistantMessage.content); + }, }; - ``` Review the arguments you are passing to OpenAI: -* **model**: This is the model you want OpenAI to use for your request. In this case, you are using `gpt-4o-mini`. -* **messages**: This is an array containing all messages that are part of the conversation. Initially you provide a message from the user, and we later add the response from the model. The content of the user message is either the `message` query parameter from the request URL or the default "What's in the news today?". -* **tools**: An array containing the actions available to the AI model. In this example you only have one tool, `read_website_content`, which reads the content on a given website. - * **name**: The name of your function. In this case, it is `read_website_content`. - * **description**: A short description that lets the model know the purpose of the function. This is optional but helps the model know when to select the tool. - * **parameters**: A JSON Schema object which describes the function. In this case we request a response containing an object with the required property `url`. -* **tool\_choice**: This argument is technically optional as `auto` is the default. This argument indicates that either a function call or a normal message response can be returned by OpenAI. +- **model**: This is the model you want OpenAI to use for your request. In this case, you are using `gpt-4o-mini`. +- **messages**: This is an array containing all messages that are part of the conversation. Initially you provide a message from the user, and we later add the response from the model. The content of the user message is either the `message` query parameter from the request URL or the default "What's in the news today?". +- **tools**: An array containing the actions available to the AI model. In this example you only have one tool, `read_website_content`, which reads the content on a given website. + - **name**: The name of your function. In this case, it is `read_website_content`. + - **description**: A short description that lets the model know the purpose of the function. This is optional but helps the model know when to select the tool. + - **parameters**: A JSON Schema object which describes the function. In this case we request a response containing an object with the required property `url`. +- **tool_choice**: This argument is technically optional as `auto` is the default. This argument indicates that either a function call or a normal message response can be returned by OpenAI. ## 3. Building your `read_website_content()` function @@ -171,16 +183,16 @@ Add this code above the `export default` block in your `index.js` file: ```js async function read_website_content(url) { - console.log('reading website content'); - - const response = await fetch(url); - const body = await response.text(); - let cheerioBody = cheerio.load(body); - const resp = { - website_body: cheerioBody('p').text(), - url: url, - }; - return JSON.stringify(resp); + console.log("reading website content"); + + const response = await fetch(url); + const body = await response.text(); + let cheerioBody = cheerio.load(body); + const resp = { + website_body: cheerioBody("p").text(), + url: url, + }; + return JSON.stringify(resp); } ``` @@ -196,28 +208,28 @@ Modify the fetch method within the `export default` block as follows: // ... your previous code ... if (assistantMessage.tool_calls) { - for (const toolCall of assistantMessage.tool_calls) { - if (toolCall.function.name === 'read_website_content') { - const url = JSON.parse(toolCall.function.arguments).url; - const websiteContent = await read_website_content(url); - messages.push({ - role: 'tool', - tool_call_id: toolCall.id, - name: toolCall.function.name, - content: websiteContent, - }); - } - } - - const secondChatCompletion = await openai.chat.completions.create({ - model: 'gpt-4o-mini', - messages: messages, - }); - - return new Response(secondChatCompletion.choices[0].message.content); + for (const toolCall of assistantMessage.tool_calls) { + if (toolCall.function.name === "read_website_content") { + const url = JSON.parse(toolCall.function.arguments).url; + const websiteContent = await read_website_content(url); + messages.push({ + role: "tool", + tool_call_id: toolCall.id, + name: toolCall.function.name, + content: websiteContent, + }); + } + } + + const secondChatCompletion = await openai.chat.completions.create({ + model: "gpt-4o-mini", + messages: messages, + }); + + return new Response(secondChatCompletion.choices[0].message.content); } else { - // this is your existing return statement - return new Response(assistantMessage.content); + // this is your existing return statement + return new Response(assistantMessage.content); } ``` @@ -232,7 +244,7 @@ Test your code by running `npx wrangler dev` and open the provided url in your b To deploy your application, run the `npx wrangler deploy` command to deploy your Worker application: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` You can now preview your Worker at `..workers.dev`. Going to this URL will display the response from OpenAI. Optionally, add the `message` URL parameter to write a custom message: for example, `https://..workers.dev/?message=What is the weather in NYC today?`. diff --git a/src/content/docs/workers/tutorials/postgres/index.mdx b/src/content/docs/workers/tutorials/postgres/index.mdx index e7928562ec85f6..2ae9f16ad1b2d8 100644 --- a/src/content/docs/workers/tutorials/postgres/index.mdx +++ b/src/content/docs/workers/tutorials/postgres/index.mdx @@ -9,12 +9,9 @@ tags: languages: - TypeScript - SQL - --- - - -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; In this tutorial, you will learn how to create a Cloudflare Workers application and connect it to a PostgreSQL database using [TCP Sockets](/workers/runtime-apis/tcp-sockets/). The Workers application you create in this tutorial will interact with a product database inside of PostgreSQL. @@ -31,18 +28,30 @@ To continue: First, use the [`create-cloudflare` CLI](https://github.com/cloudflare/workers-sdk/tree/main/packages/create-cloudflare) to create a new Worker application. To do this, open a terminal window and run the following command: - + This will prompt you to install the [`create-cloudflare`](https://www.npmjs.com/package/create-cloudflare) package and lead you through a setup wizard. - + If you choose to deploy, you will be asked to authenticate (if not logged in already), and your project will be deployed. If you deploy, you can still modify your Worker code and deploy again at the end of this tutorial. Now, move into the newly created directory: ```sh -$ cd postgres-tutorial +cd postgres-tutorial ``` ### Enable Node.js compatibility @@ -58,7 +67,7 @@ node_compat = true To connect to a PostgreSQL database, you will need the `pg` library. In your Worker application directory, run the following command to install the library: ```sh -$ npm install pg +npm install pg ``` Make sure you are using `pg` (node-postgres) version `8.11.0` or higher, as earlier versions do not support the Cloudflare Workers [TCP Sockets API](/workers/runtime-apis/tcp-sockets/). @@ -83,7 +92,10 @@ Replace `username`, `password`, `host`, `port`, and `database` with the appropri Set your connection string as a [secret](/workers/configuration/secrets/) so that it is not stored as plain text. Use [`wrangler secret put`](/workers/wrangler/commands/#secret) with the example variable name `DB_URL`: ```sh -$ npx wrangler secret put DB_URL +npx wrangler secret put DB_URL +``` + +```sh output ➜ wrangler secret put DB_URL ------------------------------------------------------- ? Enter a secret value: › ******************** @@ -106,7 +118,10 @@ DB_NAME = "productsdb" To set your password as a [secret](/workers/configuration/secrets/) so that it is not stored as plain text, use [`wrangler secret put`](/workers/wrangler/commands/#secret). `DB_PASSWORD` is an example variable name for this secret to be accessed in your Worker: ```sh -$ npx wrangler secret put DB_PASSWORD +npx wrangler secret put DB_PASSWORD +``` + +```sh output ------------------------------------------------------- ? Enter a secret value: › ******************** ✨ Success! Uploaded secret DB_PASSWORD @@ -133,11 +148,11 @@ await client.connect(); ```typescript const client = new Client({ - user: env.DB_USERNAME, - password: env.DB_PASSWORD, - host: env.DB_HOST, - port: env.DB_PORT, - database: env.DB_NAME + user: env.DB_USERNAME, + password: env.DB_PASSWORD, + host: env.DB_HOST, + port: env.DB_PORT, + database: env.DB_NAME, }); await client.connect(); ``` @@ -148,7 +163,6 @@ To demonstrate how to interact with the products database, you will fetch data f :::note - If you are following along in your own PostgreSQL instance, set up the `products` using the following SQL `CREATE TABLE` statement. This statement defines the columns and their respective data types for the `products` table: ```sql @@ -160,29 +174,28 @@ CREATE TABLE products ( ); ``` - ::: Replace the existing code in your `worker.ts` file with the following code: ```typescript export default { - async fetch(request, env, ctx): Promise { - const client = new Client(env.DB_URL); - await client.connect(); - - // Query the products table - const result = await client.query("SELECT * FROM products"); - - // Return the result as JSON - const resp = new Response(JSON.stringify(result.rows), { - headers: { "Content-Type": "application/json" }, - }); - - // Clean up the client - ctx.waitUntil(client.end()); - return resp; - }, + async fetch(request, env, ctx): Promise { + const client = new Client(env.DB_URL); + await client.connect(); + + // Query the products table + const result = await client.query("SELECT * FROM products"); + + // Return the result as JSON + const resp = new Response(JSON.stringify(result.rows), { + headers: { "Content-Type": "application/json" }, + }); + + // Clean up the client + ctx.waitUntil(client.end()); + return resp; + }, } satisfies ExportedHandler; ``` @@ -193,7 +206,7 @@ This code establishes a connection to the PostgreSQL database within your Worker Run the following command to deploy your Worker: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` Your application is now live and accessible at `..workers.dev`. @@ -211,26 +224,26 @@ Add the following code snippet inside the `fetch` event handler in your `worker. ```typescript const url = new URL(request.url); if (request.method === "POST" && url.pathname === "/products") { - // Parse the request's JSON payload - const productData = await request.json(); + // Parse the request's JSON payload + const productData = await request.json(); - // Insert the new product into the database - const insertQuery = ` + // Insert the new product into the database + const insertQuery = ` INSERT INTO products (name, description, price) VALUES ($1, $2, $3) RETURNING * `; - const values = [productData.name, productData.description, productData.price]; - const insertResult = await client.query(insertQuery, values); + const values = [productData.name, productData.description, productData.price]; + const insertResult = await client.query(insertQuery, values); - // Return the inserted row as JSON - const insertResp = new Response(JSON.stringify(insertResult.rows[0]), { - headers: { "Content-Type": "application/json" }, - }); + // Return the inserted row as JSON + const insertResp = new Response(JSON.stringify(insertResult.rows[0]), { + headers: { "Content-Type": "application/json" }, + }); - // Clean up the client - ctx.waitUntil(client.end()); - return insertResp; + // Clean up the client + ctx.waitUntil(client.end()); + return insertResp; } ``` @@ -248,25 +261,25 @@ Modify your existing Worker code to accommodate the new feature: ```typescript if (request.method === "POST" && url.pathname === "/products") { - // (Insert a new row as detailed in the code snippet above) + // (Insert a new row as detailed in the code snippet above) } else if (request.method === "GET" && url.pathname === "/products") { - // (Fetch data from the products table as detailed in Step 5) + // (Fetch data from the products table as detailed in Step 5) } ``` After making these changes, deploy the Worker again by running: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` You can now use your Cloudflare Worker to insert new rows into the `products` table. To test this functionality, send a `POST` request to your Worker's URL with the `/products` path, along with a JSON payload containing the new product data: ```json { - "name": "Sample Product", - "description": "This is a sample product", - "price": 19.99 + "name": "Sample Product", + "description": "This is a sample product", + "price": 19.99 } ``` diff --git a/src/content/docs/workers/tutorials/send-emails-with-postmark/index.mdx b/src/content/docs/workers/tutorials/send-emails-with-postmark/index.mdx index 9375ba1f5d9e0b..99443541bfc1a8 100644 --- a/src/content/docs/workers/tutorials/send-emails-with-postmark/index.mdx +++ b/src/content/docs/workers/tutorials/send-emails-with-postmark/index.mdx @@ -8,39 +8,36 @@ tags: - Email languages: - JavaScript - --- - - In this tutorial, you will learn how to send transactional emails from Workers using [Postmark](https://postmarkapp.com/). At the end of this tutorial, you’ll be able to: -* Create a Worker to send emails. -* Sign up and add a Cloudflare domain to Postmark. -* Send emails from your Worker using Postmark. -* Store API keys securely with secrets. +- Create a Worker to send emails. +- Sign up and add a Cloudflare domain to Postmark. +- Send emails from your Worker using Postmark. +- Store API keys securely with secrets. ## Prerequisites To continue with this tutorial, you’ll need: -* A [Cloudflare account](https://dash.cloudflare.com/sign-up/workers-and-pages), if you don’t already have one. -* A [registered](/registrar/get-started/register-domain/) domain. -* Installed [npm](https://docs.npmjs.com/getting-started). -* A [Postmark account](https://account.postmarkapp.com/sign_up). +- A [Cloudflare account](https://dash.cloudflare.com/sign-up/workers-and-pages), if you don’t already have one. +- A [registered](/registrar/get-started/register-domain/) domain. +- Installed [npm](https://docs.npmjs.com/getting-started). +- A [Postmark account](https://account.postmarkapp.com/sign_up). ## Create a Worker project Start by using [C3](/pages/get-started/c3/) to create a Worker project in the command line, then, answer the prompts: ```sh -$ npm create cloudflare@latest +npm create cloudflare@latest ``` Alternatively, you can use CLI arguments to speed things up: ```sh -$ npm create cloudflare@latest email-with-postmark -- --type=hello-world --ts=false --git=true --deploy=false +npm create cloudflare@latest email-with-postmark -- --type=hello-world --ts=false --git=true --deploy=false ``` This creates a simple hello-world Worker having the following content: @@ -48,10 +45,9 @@ This creates a simple hello-world Worker having the following content: ```js export default { async fetch(request, env, ctx) { - return new Response('Hello World!'); + return new Response("Hello World!"); }, }; - ``` ## Add your domain to Postmark @@ -68,7 +64,6 @@ Next, you’re presented with a list of DNS records to add to your Cloudflare do If you need more help adding DNS records in Cloudflare, refer to [Manage DNS records](/dns/manage-dns-records/how-to/create-dns-records/). - ::: When that’s done, head back to Postmark and click on the `Verify` buttons. If all records are properly configured, your domain status should be updated to `Verified`. @@ -85,23 +80,22 @@ The final step is putting it all together in a Worker. In your Worker, make a po [Postmark’s JavaScript library](https://www.npmjs.com/package/postmark) is currently not supported on Workers. Use the [email API](https://postmarkapp.com/developer/user-guide/send-email-with-api) instead. - ::: ```jsx export default { async fetch(request, env, ctx) { - return await fetch('https://api.postmarkapp.com/email', { - method: 'POST', + return await fetch("https://api.postmarkapp.com/email", { + method: "POST", headers: { - 'Content-Type': 'application/json', - 'X-Postmark-Server-Token': 'your_postmark_api_token_here', + "Content-Type": "application/json", + "X-Postmark-Server-Token": "your_postmark_api_token_here", }, body: JSON.stringify({ - From: 'hello@example.com', - To: 'someone@example.com', - Subject: 'Hello World', - HtmlBody: '

Hello from Workers

', + From: "hello@example.com", + To: "someone@example.com", + Subject: "Hello World", + HtmlBody: "

Hello from Workers

", }), }); }, @@ -111,7 +105,7 @@ export default { To test your code locally, run the following command and navigate to [http://localhost:8787/](http://localhost:8787/) in a browser: ```sh -$ npm start +npm start ``` Deploy your Worker with `npm run deploy`. @@ -129,7 +123,7 @@ POSTMARK_API_TOKEN=your_postmark_api_token_here Also ensure the secret is added to your deployed worker by running: ```sh title="Add secret to deployed Worker" -$ npx wrangler secret put POSTMARK_API_TOKEN +npx wrangler secret put POSTMARK_API_TOKEN ``` The added secret can be accessed on via the `env` parameter passed to your Worker’s fetch event handler: @@ -137,17 +131,17 @@ The added secret can be accessed on via the `env` parameter passed to your Worke ```jsx export default { async fetch(request, env, ctx) { - return await fetch('https://api.postmarkapp.com/email', { - method: 'POST', + return await fetch("https://api.postmarkapp.com/email", { + method: "POST", headers: { - 'Content-Type': 'application/json', - 'X-Postmark-Server-Token': env.POSTMARK_API_TOKEN, + "Content-Type": "application/json", + "X-Postmark-Server-Token": env.POSTMARK_API_TOKEN, }, body: JSON.stringify({ - From: 'hello@example.com', - To: 'someone@example.com', - Subject: 'Hello World', - HtmlBody: '

Hello from Workers

', + From: "hello@example.com", + To: "someone@example.com", + Subject: "Hello World", + HtmlBody: "

Hello from Workers

", }), }); }, @@ -158,6 +152,6 @@ And finally, deploy this update with `npm run deploy`. ## Related resources -* [Storing API keys and tokens with Secrets](/workers/configuration/secrets/). -* [Transferring your domain to Cloudflare](/registrar/get-started/transfer-domain-to-cloudflare/). -* [Send emails from Workers](/email-routing/email-workers/send-email-workers/) +- [Storing API keys and tokens with Secrets](/workers/configuration/secrets/). +- [Transferring your domain to Cloudflare](/registrar/get-started/transfer-domain-to-cloudflare/). +- [Send emails from Workers](/email-routing/email-workers/send-email-workers/) diff --git a/src/content/docs/workers/tutorials/send-emails-with-resend/index.mdx b/src/content/docs/workers/tutorials/send-emails-with-resend/index.mdx index 28c41241c1559a..5f729571c182c0 100644 --- a/src/content/docs/workers/tutorials/send-emails-with-resend/index.mdx +++ b/src/content/docs/workers/tutorials/send-emails-with-resend/index.mdx @@ -9,39 +9,36 @@ tags: - Resend languages: - JavaScript - --- - - In this tutorial, you will learn how to send transactional emails from Workers using [Resend](https://resend.com/). At the end of this tutorial, you’ll be able to: -* Create a Worker to send emails. -* Sign up and add a Cloudflare domain to Resend. -* Send emails from your Worker using Resend. -* Store API keys securely with secrets. +- Create a Worker to send emails. +- Sign up and add a Cloudflare domain to Resend. +- Send emails from your Worker using Resend. +- Store API keys securely with secrets. ## Prerequisites To continue with this tutorial, you’ll need: -* A [Cloudflare account](https://dash.cloudflare.com/sign-up/workers-and-pages), if you don’t already have one. -* A [registered](/registrar/get-started/register-domain/) domain. -* Installed [npm](https://docs.npmjs.com/getting-started). -* A [Resend account](https://resend.com/signup). +- A [Cloudflare account](https://dash.cloudflare.com/sign-up/workers-and-pages), if you don’t already have one. +- A [registered](/registrar/get-started/register-domain/) domain. +- Installed [npm](https://docs.npmjs.com/getting-started). +- A [Resend account](https://resend.com/signup). ## Create a Worker project Start by using [C3](/pages/get-started/c3/) to create a Worker project in the command line, then, answer the prompts: ```sh -$ npm create cloudflare@latest +npm create cloudflare@latest ``` Alternatively, you can use CLI arguments to speed things up: ```sh -$ npm create cloudflare@latest email-with-resend -- --type=hello-world --ts=false --git=true --deploy=false +npm create cloudflare@latest email-with-resend -- --type=hello-world --ts=false --git=true --deploy=false ``` This creates a simple hello-world Worker having the following content: @@ -49,10 +46,9 @@ This creates a simple hello-world Worker having the following content: ```js export default { async fetch(request, env, ctx) { - return new Response('Hello World!'); + return new Response("Hello World!"); }, }; - ``` ## Add your domain to Resend @@ -65,10 +61,8 @@ Next, you’re presented with a list of DNS records to add to your Cloudflare do :::note - If you need more help adding DNS records in Cloudflare, refer to [Manage DNS records](/dns/manage-dns-records/how-to/create-dns-records/). - ::: When that’s done, head back to Resend and click on the `Verify DNS Records` button. If all records are properly configured, your domain status should be updated to `Verified`. @@ -82,23 +76,23 @@ Lastly, navigate to `API Keys` with the side menu, to create an API key. Give yo The final step is putting it all together in a Worker. Open up a terminal in the directory of the Worker you created earlier. Then, install the Resend SDK: ```sh -$ npm i resend +npm i resend ``` In your Worker, import and use the Resend library like so: ```jsx -import { Resend } from 'resend'; +import { Resend } from "resend"; export default { async fetch(request, env, ctx) { - const resend = new Resend('your_resend_api_key'); + const resend = new Resend("your_resend_api_key"); const { data, error } = await resend.emails.send({ - from: 'hello@example.com', - to: 'someone@example.com', - subject: 'Hello World', - html: '

Hello from Workers

', + from: "hello@example.com", + to: "someone@example.com", + subject: "Hello World", + html: "

Hello from Workers

", }); return Response.json({ data, error }); @@ -109,7 +103,7 @@ export default { To test your code locally, run the following command and navigate to [http://localhost:8787/](http://localhost:8787/) in a browser: ```sh -$ npm start +npm start ``` Deploy your Worker with `npm run deploy`. @@ -127,23 +121,23 @@ RESEND_API_KEY=your_resend_api_key Also ensure the secret is added to your deployed worker by running: ```sh title="Add secret to deployed Worker" -$ npx wrangler secret put RESEND_API_KEY +npx wrangler secret put RESEND_API_KEY ``` The added secret can be accessed on via the `env` parameter passed to your Worker’s fetch event handler: ```jsx -import { Resend } from 'resend'; +import { Resend } from "resend"; export default { async fetch(request, env, ctx) { const resend = new Resend(env.RESEND_API_KEY); const { data, error } = await resend.emails.send({ - from: 'hello@example.com', - to: 'someone@example.com', - subject: 'Hello World', - html: '

Hello from Workers

', + from: "hello@example.com", + to: "someone@example.com", + subject: "Hello World", + html: "

Hello from Workers

", }); return Response.json({ data, error }); @@ -155,6 +149,6 @@ And finally, deploy this update with `npm run deploy`. ## Related resources -* [Storing API keys and tokens with Secrets](/workers/configuration/secrets/). -* [Transferring your domain to Cloudflare](/registrar/get-started/transfer-domain-to-cloudflare/). -* [Send emails from Workers](/email-routing/email-workers/send-email-workers/) +- [Storing API keys and tokens with Secrets](/workers/configuration/secrets/). +- [Transferring your domain to Cloudflare](/registrar/get-started/transfer-domain-to-cloudflare/). +- [Send emails from Workers](/email-routing/email-workers/send-email-workers/) diff --git a/src/content/docs/workers/tutorials/store-data-with-fauna/index.mdx b/src/content/docs/workers/tutorials/store-data-with-fauna/index.mdx index 607685e040574c..51d2b873405fe1 100644 --- a/src/content/docs/workers/tutorials/store-data-with-fauna/index.mdx +++ b/src/content/docs/workers/tutorials/store-data-with-fauna/index.mdx @@ -8,29 +8,26 @@ tags: - Hono languages: - TypeScript - --- - - -import { Render, TabItem, Tabs, PackageManagers } from "~/components" +import { Render, TabItem, Tabs, PackageManagers } from "~/components"; In this tutorial, you learn how to store and retrieve data in your Cloudflare Workers applications by building a REST API that manages an inventory catalog using [Fauna](https://fauna.com/) as its data layer. ## Learning goals -* How to store and retrieve data from Fauna in Workers. -* How to use Wrangler to store secrets securely. -* How to use [Hono](https://hono.dev) as a web framework for your Workers. +- How to store and retrieve data from Fauna in Workers. +- How to use Wrangler to store secrets securely. +- How to use [Hono](https://hono.dev) as a web framework for your Workers. Building with Fauna, Workers, and Hono enables you to create a globally distributed, strongly consistent, fully serverless REST API in a single repository. Fauna is a document-based database with a flexible schema. This allows you to define the structure of your data – whatever it may be – and store documents that adhere to that structure. In this tutorial, you will build a product inventory, where each `product` document must contain the following properties: -* **title** - A human-friendly string that represents the title or name of a product. -* **serialNumber** - A machine-friendly string that uniquely identifies the product. -* **weightLbs** - A floating point number that represents the weight in pounds of the product. -* **quantity** A non-negative integer that represents how many items of a particular product there are in the inventory. +- **title** - A human-friendly string that represents the title or name of a product. +- **serialNumber** - A machine-friendly string that uniquely identifies the product. +- **weightLbs** - A floating point number that represents the weight in pounds of the product. +- **quantity** A non-negative integer that represents how many items of a particular product there are in the inventory. Documents are stored in a [collection](https://docs.fauna.com/fauna/current/reference/schema_entities/collection/). Collections in document databases are groups of related documents. @@ -46,10 +43,8 @@ Open the [Fauna dashboard](https://dashboard.fauna.com/) in your browser and log :::note[Fauna Account] - If you do not have a Fauna account, [sign up](https://dashboard.fauna.com/register) and deploy this template using the free tier. - ::: In the Fauna dashboard: @@ -64,7 +59,7 @@ In the Fauna dashboard: To create a collection named **Products**, enter the FQL query in the **SHELL** window on right side of the screen. ```js title="Create a new collection" -Collection.create({ name: "Products" }) +Collection.create({ name: "Products" }); ``` Select **Run**. You will see an output similar to the following. @@ -93,10 +88,8 @@ The Fauna dashboard displays the key's secret. Copy and save this server key to :::caution[Protect your keys] - Server keys can read and write all documents in all collections and can call all [user-defined functions](https://docs.fauna.com/fauna/current/cookbook/data_model/user_defined_functions) (UDFs). Protect server keys and do not commit them to source control repositories. - ::: ## Manage your inventory with Workers @@ -105,18 +98,18 @@ Server keys can read and write all documents in all collections and can call all Create a new project by using [C3](https://github.com/cloudflare/workers-sdk/tree/main/packages/create-cloudflare). - + To continue with this guide: -* For *What would you like to start with*?, select `Framework Starter`. -* For *Which development framework do you want to use?*, select `Hono`. -* For, *Do you want to deploy your application?*, select `No`. +- For _What would you like to start with_?, select `Framework Starter`. +- For _Which development framework do you want to use?_, select `Hono`. +- For, _Do you want to deploy your application?_, select `No`. Then, move into your newly created directory: ```sh -$ cd fauna-workers +cd fauna-workers ``` Update the `wrangler.toml` file to set the name for the Worker. @@ -140,7 +133,7 @@ FAUNA_SECRET= For production, store your secret safely with [`wrangler secret put` command](/workers/wrangler/commands/#put-3): ```sh title="Store your Fauna secret" -$ npx wrangler secret put FAUNA_SECRET +npx wrangler secret put FAUNA_SECRET ``` When prompted, paste the Fauna server secret you obtained earlier. @@ -154,13 +147,13 @@ Install [the Fauna JavaScript driver](https://github.com/fauna/fauna-js) in your ```sh title="Install the Fauna driver" -$ npm install fauna +npm install fauna ``` ```sh title="Install the Fauna driver" -$ yarn add fauna +yarn add fauna ``` @@ -170,37 +163,37 @@ $ yarn add fauna Replace the contents of your `src/index.ts` file with the skeleton of your API: ```ts title="src/index.ts" -import { Hono } from 'hono'; -import { Client, fql, ServiceError } from 'fauna'; +import { Hono } from "hono"; +import { Client, fql, ServiceError } from "fauna"; type Bindings = { - FAUNA_SECRET: string; + FAUNA_SECRET: string; }; type Variables = { - faunaClient: Client; + faunaClient: Client; }; type Product = { - id: string; - serialNumber: number; - title: string; - weightLbs: number; - quantity: number; + id: string; + serialNumber: number; + title: string; + weightLbs: number; + quantity: number; }; const app = new Hono<{ Bindings: Bindings; Variables: Variables }>(); -app.use('*', async (c, next) => { - const faunaClient = new Client({ - secret: c.env.FAUNA_SECRET, - }); - c.set('faunaClient', faunaClient); - await next(); +app.use("*", async (c, next) => { + const faunaClient = new Client({ + secret: c.env.FAUNA_SECRET, + }); + c.set("faunaClient", faunaClient); + await next(); }); -app.get('/', (c) => { - return c.text('Hello World'); +app.get("/", (c) => { + return c.text("Hello World"); }); export default app; @@ -209,12 +202,12 @@ export default app; This is custom middleware to initialize the Fauna client and set the instance with `c.set()` for later use in another handler: ```js title="Custom middleware for the Fauna Client" -app.use('*', async (c, next) => { - const faunaClient = new Client({ - secret: c.env.FAUNA_SECRET, - }); - c.set('faunaClient', faunaClient); - await next(); +app.use("*", async (c, next) => { + const faunaClient = new Client({ + secret: c.env.FAUNA_SECRET, + }); + c.set("faunaClient", faunaClient); + await next(); }); ``` @@ -225,28 +218,26 @@ You can access the `FAUNA_SECRET` environment variable from `c.env.FAUNA_SECRET` Add your first Hono handler to the `src/index.ts` file. This route accepts `POST` requests to the `/products` endpoint: ```ts title="Create product documents" -app.post('/products', async (c) => { - const { serialNumber, title, weightLbs } = await c.req.json>(); - const query = fql`Products.create({ +app.post("/products", async (c) => { + const { serialNumber, title, weightLbs } = + await c.req.json>(); + const query = fql`Products.create({ serialNumber: ${serialNumber}, title: ${title}, weightLbs: ${weightLbs}, quantity: 0 })`; - const result = await c.var.faunaClient.query(query); - return c.json(result.data); + const result = await c.var.faunaClient.query(query); + return c.json(result.data); }); - ``` :::caution[Handler order] - In Hono, you should place your handler below the custom middleware. This is because middleware and handlers are executed in sequence from top to bottom. If you place the handler first, you cannot retrieve the instance of the Fauna client using `c.var.faunaClient`. - ::: This route applied an FQL query in the `fql` function that creates a new document in the **Products** collection: @@ -257,18 +248,18 @@ fql`Products.create({ title: ${title}, weightLbs: ${weightLbs}, quantity: 0 -})` +})`; ``` To review what a document looks like, run the following query. In the Fauna dashboard, go to **Explorer** > Region name > Database name like a `cloudflare_rest_api` > the **SHELL** window: ```js title="Create query in pure FQL" Products.create({ - serialNumber: "A48432348", - title: "Gaming Console", - weightLbs: 5, - quantity: 0 -}) + serialNumber: "A48432348", + title: "Gaming Console", + weightLbs: 5, + quantity: 0, +}); ``` Fauna returns the created document: @@ -289,7 +280,7 @@ Examining the route you create, when the query is successful, the data newly cre ```js title="Return the new document data" return c.json({ - productId: result.data, + productId: result.data, }); ``` @@ -299,18 +290,18 @@ If Fauna returns any error, an exception is raised by the client. You can catch ```ts title="Handle errors" app.onError((e, c) => { - if (e instanceof ServiceError) { - return c.json( - { - status: e.httpStatus, - code: e.code, - message: e.message, - }, - e.httpStatus - ); - } - console.trace(e); - return c.text('Internal Server Error', 500); + if (e instanceof ServiceError) { + return c.json( + { + status: e.httpStatus, + code: e.code, + message: e.message, + }, + e.httpStatus, + ); + } + console.trace(e); + return c.text("Internal Server Error", 500); }); ``` @@ -321,18 +312,18 @@ Next, create a route that reads a single document from the **Products** collecti Add the following handler to your `src/index.ts` file. This route accepts `GET` requests at the `/products/:productId` endpoint: ```ts title="Retrieve product documents" -app.get('/products/:productId', async (c) => { - const productId = c.req.param('productId'); - const query = fql`Products.byId(${productId})`; - const result = await c.var.faunaClient.query(query); - return c.json(result.data); +app.get("/products/:productId", async (c) => { + const productId = c.req.param("productId"); + const query = fql`Products.byId(${productId})`; + const result = await c.var.faunaClient.query(query); + return c.json(result.data); }); ``` The FQL query uses the [`byId()`](https://docs.fauna.com/fauna/current/reference/schema_entities/collection/instance-byid) method to retrieve a full document from the **Productions** collection: ```js title="Retrieve a document by ID in FQL inside JavaScript" -fql`Products.byId(productId)` +fql`Products.byId(productId)`; ``` If the document exists, return it in the response body: @@ -348,11 +339,11 @@ If not, an error is returned. The logic to delete product documents is similar to the logic for retrieving products. Add the following route to your `src/index.ts` file: ```ts title="Delete product documents" -app.delete('/products/:productId', async (c) => { - const productId = c.req.param('productId'); - const query = fql`Products.byId(${productId})!.delete()`; - const result = await c.var.faunaClient.query(query); - return c.json(result.data); +app.delete("/products/:productId", async (c) => { + const productId = c.req.param("productId"); + const query = fql`Products.byId(${productId})!.delete()`; + const result = await c.var.faunaClient.query(query); + return c.json(result.data); }); ``` @@ -367,13 +358,13 @@ Before deploying your Worker, test it locally by using Wrangler's [`dev`](/worke ```sh title="Develop your Worker" -$ npm run dev +npm run dev ``` ```sh title="Develop your Worker" -$ yarn dev +yarn dev ``` @@ -383,7 +374,7 @@ Once the development server is up and running, start making HTTP requests to you First, create a new product: ```sh title="Create a new product" -$ curl \ +curl \ --data '{"serialNumber": "H56N33834", "title": "Bluetooth Headphones", "weightLbs": 0.5}' \ --header 'Content-Type: application/json' \ --request POST \ @@ -394,22 +385,20 @@ You should receive a `200` response similar to the following: ```json title="Create product response" { - "productId": "" + "productId": "" } ``` :::note - Copy the `productId` value for use in the remaining test queries. - ::: Next, read the document you created: ```sh title="Read a document" -$ curl \ +curl \ --header 'Content-Type: application/json' \ --request GET \ http://127.0.0.1:8787/products/ @@ -419,17 +408,17 @@ The response should be the new document serialized to JSON: ```json title="Read product response" { - "coll": { - "name": "Products" - }, - "id": "", - "ts": { - "isoString": "" - }, - "serialNumber": "H56N33834", - "title": "Bluetooth Headphones", - "weightLbs": 0.5, - "quantity": 0 + "coll": { + "name": "Products" + }, + "id": "", + "ts": { + "isoString": "" + }, + "serialNumber": "H56N33834", + "title": "Bluetooth Headphones", + "weightLbs": 0.5, + "quantity": 0 } ``` @@ -438,13 +427,13 @@ Finally, deploy your Worker using the [`wrangler deploy`](/workers/wrangler/comm ```sh title="Deploy your Worker" -$ npm run deploy +npm run deploy ``` ```sh title="Deploy your Worker" -$ yarn deploy +yarn deploy ``` @@ -460,12 +449,13 @@ This will present a problem. To calculate the total quantity of a product, you f Add the following route to your `src/index.ts` file. This route responds to HTTP `PATCH` requests on the `/products/:productId/add-quantity` URL endpoint: ```ts title="Update inventory quantity" -app.patch('/products/:productId/add-quantity', async (c) => { - const productId = c.req.param('productId'); - const { quantity } = await c.req.json>(); - const query = fql`Products.byId(${productId}){ quantity : .quantity + ${quantity}}`; - const result = await c.var.faunaClient.query>(query); - return c.json(result.data); +app.patch("/products/:productId/add-quantity", async (c) => { + const productId = c.req.param("productId"); + const { quantity } = await c.req.json>(); + const query = fql`Products.byId(${productId}){ quantity : .quantity + ${quantity}}`; + const result = + await c.var.faunaClient.query>(query); + return c.json(result.data); }); ``` @@ -477,16 +467,14 @@ fql`Products.byId(${productId}){ quantity : .quantity + ${quantity}}`; :::note[Consistency guarantees in Fauna] - -Even if multiple Workers update this quantity from different parts of the world, Fauna guarantees the consistency of the data across all Fauna regions. This article on [consistency](https://fauna.com/blog/consistency-without-clocks-faunadb-transaction-protocol?utm_source=Cloudflare\&utm_medium=referral\&utm_campaign=Q4_CF_2021) explains how Fauna's distributed protocol works without the need for atomic clocks. - +Even if multiple Workers update this quantity from different parts of the world, Fauna guarantees the consistency of the data across all Fauna regions. This article on [consistency](https://fauna.com/blog/consistency-without-clocks-faunadb-transaction-protocol?utm_source=Cloudflare&utm_medium=referral&utm_campaign=Q4_CF_2021) explains how Fauna's distributed protocol works without the need for atomic clocks. ::: Test your update route: ```sh title="Update product inventory" -$ curl \ +curl \ --data '{"quantity": 5}' \ --header 'Content-Type: application/json' \ --request PATCH \ @@ -497,7 +485,7 @@ The response should be the entire updated document with five additional items in ```json title="Update product response" { - "quantity": 5 + "quantity": 5 } ``` @@ -506,13 +494,13 @@ Update your Worker by deploying it to Cloudflare. ```sh title="Update your Worker in Cloudflare" -$ npm run deploy +npm run deploy ``` ```sh title="Update your Worker in Cloudflare" -$ yarn deploy +yarn deploy ``` diff --git a/src/content/docs/workers/tutorials/upload-assets-with-r2/index.mdx b/src/content/docs/workers/tutorials/upload-assets-with-r2/index.mdx index 367bca61e04b20..4261d3b9070af6 100644 --- a/src/content/docs/workers/tutorials/upload-assets-with-r2/index.mdx +++ b/src/content/docs/workers/tutorials/upload-assets-with-r2/index.mdx @@ -8,12 +8,9 @@ products: - R2 languages: - TypeScript - --- - - -import { Render, PackageManagers } from "~/components" +import { Render, PackageManagers } from "~/components"; This tutorial explains how to create a TypeScript-based Cloudflare Workers project that can securely access files from and upload files to a [Cloudflare R2](/r2/) bucket. Cloudflare R2 allows developers to store large amounts of unstructured data without the costly egress bandwidth fees associated with typical cloud storage services. @@ -29,14 +26,26 @@ To continue: First, use the [`create-cloudflare` CLI](https://github.com/cloudflare/workers-sdk/tree/main/packages/create-cloudflare) to create a new Worker. To do this, open a terminal window and run the following command: - - - + + + Move into your newly created directory: ```sh -$ cd upload-r2-assets +cd upload-r2-assets ``` ## Create an R2 bucket @@ -44,13 +53,13 @@ $ cd upload-r2-assets Before you integrate R2 bucket access into your Worker application, an R2 bucket must be created: ```sh -$ npx wrangler r2 bucket create +npx wrangler r2 bucket create ``` Replace `` with the name you want to assign to your bucket. List your account's R2 buckets to verify that a new bucket has been added: ```sh -$ npx wrangler r2 bucket list +npx wrangler r2 bucket list ``` ## Configure access to an R2 bucket @@ -77,28 +86,28 @@ To fetch files from the R2 bucket, use the `BINDING.get` function. In the below ```ts interface Env { - MY_BUCKET: R2Bucket; + MY_BUCKET: R2Bucket; } export default { - async fetch(request, env): Promise { - // For example, the request URL my-worker.account.workers.dev/image.png - const url = new URL(request.url); - const key = url.pathname.slice(1); - // Retrieve the key "image.png" - const object = await env.MY_BUCKET.get(key); - - if (object === null) { - return new Response('Object Not Found', { status: 404 }); - } - - const headers = new Headers(); - object.writeHttpMetadata(headers); - headers.set('etag', object.httpEtag); - - return new Response(object.body, { - headers, - }); - }, + async fetch(request, env): Promise { + // For example, the request URL my-worker.account.workers.dev/image.png + const url = new URL(request.url); + const key = url.pathname.slice(1); + // Retrieve the key "image.png" + const object = await env.MY_BUCKET.get(key); + + if (object === null) { + return new Response("Object Not Found", { status: 404 }); + } + + const headers = new Headers(); + object.writeHttpMetadata(headers); + headers.set("etag", object.httpEtag); + + return new Response(object.body, { + headers, + }); + }, } satisfies ExportedHandler; ``` @@ -111,36 +120,36 @@ Next, you will add the ability to upload to your R2 bucket using authentication. Create a secret value of your choice -- for instance, a random string or password. Using the Wrangler CLI, add the secret to your project as `AUTH_SECRET`: ```sh -$ npx wrangler secret put AUTH_SECRET +npx wrangler secret put AUTH_SECRET ``` Now, add a new code path that handles a `PUT` HTTP request. This new code will check that the previously uploaded secret is correctly used for authentication, and then upload to R2 using `MY_BUCKET.put(key, data)`: ```ts interface Env { - MY_BUCKET: R2Bucket; - AUTH_SECRET: string; + MY_BUCKET: R2Bucket; + AUTH_SECRET: string; } export default { - async fetch(request, env): Promise { - if (request.method === 'PUT') { - // Note that you could require authentication for all requests - // by moving this code to the top of the fetch function. - const auth = request.headers.get('Authorization'); - const expectedAuth = `Bearer ${env.AUTH_SECRET}`; - - if (!auth || auth !== expectedAuth) { - return new Response('Unauthorized', { status: 401 }); - } - - const url = new URL(request.url); - const key = url.pathname.slice(1); - await env.MY_BUCKET.put(key, request.body); - return new Response(`Object ${key} uploaded successfully!`); - } - - // include the previous code here... - }, + async fetch(request, env): Promise { + if (request.method === "PUT") { + // Note that you could require authentication for all requests + // by moving this code to the top of the fetch function. + const auth = request.headers.get("Authorization"); + const expectedAuth = `Bearer ${env.AUTH_SECRET}`; + + if (!auth || auth !== expectedAuth) { + return new Response("Unauthorized", { status: 401 }); + } + + const url = new URL(request.url); + const key = url.pathname.slice(1); + await env.MY_BUCKET.put(key, request.body); + return new Response(`Object ${key} uploaded successfully!`); + } + + // include the previous code here... + }, } satisfies ExportedHandler; ``` @@ -151,7 +160,7 @@ This approach ensures that only clients who provide a valid bearer token, via th After completing your Cloudflare Worker project, deploy it to Cloudflare. Make sure you are in your Worker application directory that you created for this tutorial, then run: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` Your application is now live and accessible at `..workers.dev`. diff --git a/src/content/docs/workers/tutorials/workers-kv-from-rust/index.mdx b/src/content/docs/workers/tutorials/workers-kv-from-rust/index.mdx index a2be8cb15e7fe4..90718cabc4cf4b 100644 --- a/src/content/docs/workers/tutorials/workers-kv-from-rust/index.mdx +++ b/src/content/docs/workers/tutorials/workers-kv-from-rust/index.mdx @@ -8,12 +8,9 @@ products: - KV languages: - Rust - --- - - -import { Render } from "~/components" +import { Render } from "~/components"; This tutorial will teach you how to read and write to KV directly from Rust using [workers-rs](https://github.com/cloudflare/workers-rs). @@ -24,13 +21,13 @@ using [workers-rs](https://github.com/cloudflare/workers-rs). To complete this tutorial, you will need: -* [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). -* [Wrangler](/workers/wrangler/) CLI. -* The [Rust](https://www.rust-lang.org/tools/install) toolchain. -* And `cargo-generate` sub-command by running: +- [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). +- [Wrangler](/workers/wrangler/) CLI. +- The [Rust](https://www.rust-lang.org/tools/install) toolchain. +- And `cargo-generate` sub-command by running: ```sh -$ cargo install cargo-generate +cargo install cargo-generate ``` ## 1. Create your Worker project in Rust @@ -38,7 +35,7 @@ $ cargo install cargo-generate Open a terminal window, and run the following command to generate a Worker project template in Rust: ```sh -$ cargo generate cloudflare/workers-rs +cargo generate cloudflare/workers-rs ``` Then select `template/hello-world-http` template, give your project a descriptive name and select enter. A new project should be created in your directory. Open the project in your editor and run `npx wrangler dev` to compile and run your project. @@ -50,7 +47,7 @@ In this tutorial, you will use Workers KV from Rust to build an app to store and In the terminal, use Wrangler to create a KV namespace for `cities`. This generates a configuration to be added to the project: ```sh -$ npx wrangler kv:namespace create cities +npx wrangler kv:namespace create cities ``` To add this configuration to your project, open the `wrangler.toml` file and create an entry for `kv_namespaces` above the build command: @@ -117,7 +114,7 @@ For the post handler, you will retrieve the country name from the path and the c Save the file and make a `POST` request to test this endpoint: ```sh -$ curl --json '{"city": "Paris"}' http://localhost:8787/France +curl --json '{"city": "Paris"}' http://localhost:8787/France ``` ## 4. Read data from KV @@ -139,7 +136,7 @@ To retrieve cities stored in KV, write a `GET` route that pulls the country name Save and make a curl request to test the endpoint: ```sh -$ curl http://localhost:8787/France +curl http://localhost:8787/France ``` ## 5. Deploy your project @@ -191,10 +188,10 @@ async fn fetch(req: Request, env: Env, _ctx: Context) -> Result { To deploy your Worker, run the following command: ```sh -$ npx wrangler deploy +npx wrangler deploy ``` ## Related resources -* [Rust support in Workers](/workers/languages/rust/). -* [Using KV in Workers](/kv/get-started/). +- [Rust support in Workers](/workers/languages/rust/). +- [Using KV in Workers](/kv/get-started/). diff --git a/src/content/docs/workers/wrangler/commands.mdx b/src/content/docs/workers/wrangler/commands.mdx index 19c361805b117a..e1f71f5a787363 100644 --- a/src/content/docs/workers/wrangler/commands.mdx +++ b/src/content/docs/workers/wrangler/commands.mdx @@ -5,65 +5,58 @@ head: - tag: title content: Commands - Wrangler description: Create, develop, and deploy your Cloudflare Workers with Wrangler commands. - --- -import { TabItem, Tabs } from "~/components" +import { TabItem, Tabs } from "~/components"; Wrangler offers a number of commands to manage your Cloudflare Workers. -* [`docs`](#docs) - Open this page in your default browser. -* [`init`](#init) - Create a new project from a variety of web frameworks and templates. (Deprecated — use `npm create cloudflare@latest` instead) -* [`generate`](#generate) - Create a Wrangler project using an existing [Workers template](https://github.com/cloudflare/worker-template). -* [`d1`](#d1) - Interact with D1. -* [`vectorize`](#vectorize) - Interact with Vectorize indexes. -* [`hyperdrive`](#hyperdrive) - Manage your Hyperdrives. -* [`deploy`](#deploy) - Deploy your Worker to Cloudflare. -* [`dev`](#dev) - Start a local server for developing your Worker. -* [`publish`](#publish) - Publish your Worker to Cloudflare. -* [`delete`](#delete-3) - Delete your Worker from Cloudflare. -* [`kv namespace`](#kv-namespace) - Manage Workers KV namespaces. -* [`kv key`](#kv-key) - Manage key-value pairs within a Workers KV namespace. -* [`kv bulk`](#kv-bulk) - Manage multiple key-value pairs within a Workers KV namespace in batches. -* [`r2 bucket`](#r2-bucket) - Manage Workers R2 buckets. -* [`r2 object`](#r2-object) - Manage Workers R2 objects. -* [`secret`](#secret) - Manage the secret variables for a Worker. -* [`secret:bulk`](#secretbulk) - Manage multiple secret variables for a Worker. -* [`tail`](#tail) - Start a session to livestream logs from a deployed Worker. -* [`pages`](#pages) - Configure Cloudflare Pages. -* [`queues`](#queues) - Configure Workers Queues. -* [`login`](#login) - Authorize Wrangler with your Cloudflare account using OAuth. -* [`logout`](#logout) - Remove Wrangler’s authorization for accessing your account. -* [`whoami`](#whoami) - Retrieve your user information and test your authentication configuration. -* [`versions`](#versions) - Retrieve details for recent versions. -* [`deployments`](#deployments) - Retrieve details for recent deployments. -* [`rollback`](#rollback) - Rollback to a recent deployment. -* [`dispatch-namespace`](#dispatch-namespace) - Interact with a [dispatch namespace](/cloudflare-for-platforms/workers-for-platforms/reference/how-workers-for-platforms-works/#dispatch-namespace). -* [`mtls-certificate`](#mtls-certificate) - Manage certificates used for mTLS connections. -* [`types`](#types) - Generate types from bindings and module rules in configuration. +- [`docs`](#docs) - Open this page in your default browser. +- [`init`](#init) - Create a new project from a variety of web frameworks and templates. (Deprecated — use `npm create cloudflare@latest` instead) +- [`generate`](#generate) - Create a Wrangler project using an existing [Workers template](https://github.com/cloudflare/worker-template). +- [`d1`](#d1) - Interact with D1. +- [`vectorize`](#vectorize) - Interact with Vectorize indexes. +- [`hyperdrive`](#hyperdrive) - Manage your Hyperdrives. +- [`deploy`](#deploy) - Deploy your Worker to Cloudflare. +- [`dev`](#dev) - Start a local server for developing your Worker. +- [`publish`](#publish) - Publish your Worker to Cloudflare. +- [`delete`](#delete-3) - Delete your Worker from Cloudflare. +- [`kv namespace`](#kv-namespace) - Manage Workers KV namespaces. +- [`kv key`](#kv-key) - Manage key-value pairs within a Workers KV namespace. +- [`kv bulk`](#kv-bulk) - Manage multiple key-value pairs within a Workers KV namespace in batches. +- [`r2 bucket`](#r2-bucket) - Manage Workers R2 buckets. +- [`r2 object`](#r2-object) - Manage Workers R2 objects. +- [`secret`](#secret) - Manage the secret variables for a Worker. +- [`secret:bulk`](#secretbulk) - Manage multiple secret variables for a Worker. +- [`tail`](#tail) - Start a session to livestream logs from a deployed Worker. +- [`pages`](#pages) - Configure Cloudflare Pages. +- [`queues`](#queues) - Configure Workers Queues. +- [`login`](#login) - Authorize Wrangler with your Cloudflare account using OAuth. +- [`logout`](#logout) - Remove Wrangler’s authorization for accessing your account. +- [`whoami`](#whoami) - Retrieve your user information and test your authentication configuration. +- [`versions`](#versions) - Retrieve details for recent versions. +- [`deployments`](#deployments) - Retrieve details for recent deployments. +- [`rollback`](#rollback) - Rollback to a recent deployment. +- [`dispatch-namespace`](#dispatch-namespace) - Interact with a [dispatch namespace](/cloudflare-for-platforms/workers-for-platforms/reference/how-workers-for-platforms-works/#dispatch-namespace). +- [`mtls-certificate`](#mtls-certificate) - Manage certificates used for mTLS connections. +- [`types`](#types) - Generate types from bindings and module rules in configuration. :::note - The following global flags work on every command, with some exceptions for `pages` commands. - - -* `--help` boolean - * Show help. -* `--version` boolean - * Show version number. -* `--config` string (not supported by Pages) - * Path to `.toml` configuration file. -* `--experimental-json-config` boolean (not supported by Pages) - * ⚠️ This is an experimental command. Read configuration from a `wrangler.json` file, instead of `wrangler.toml`. `wrangler.json` is a [JSONC](https://code.visualstudio.com/docs/languages/json#_json-with-comments) file. - - - +- `--help` boolean + - Show help. +- `--version` boolean + - Show version number. +- `--config` string (not supported by Pages) + - Path to `.toml` configuration file. +- `--experimental-json-config` boolean (not supported by Pages) + - ⚠️ This is an experimental command. Read configuration from a `wrangler.json` file, instead of `wrangler.toml`. `wrangler.json` is a [JSONC](https://code.visualstudio.com/docs/languages/json#_json-with-comments) file. ::: -*** +--- ## How to run Wrangler commands @@ -78,19 +71,19 @@ Since Cloudflare recommends [installing Wrangler locally](/workers/wrangler/inst ```sh -$ npx wrangler [PARAMETERS] [OPTIONS] +npx wrangler [PARAMETERS] [OPTIONS] ``` ```sh -$ yarn wrangler [PARAMETERS] [OPTIONS] +yarn wrangler [PARAMETERS] [OPTIONS] ``` ```sh -$ pnpm wrangler [PARAMETERS] [OPTIONS] +pnpm wrangler [PARAMETERS] [OPTIONS] ``` @@ -113,24 +106,24 @@ You can then run them using your package manager of choice: ```sh -$ npm run deploy +npm run deploy ``` ```sh -$ yarn run deploy +yarn run deploy ``` ```sh -$ pnpm run deploy +pnpm run deploy ``` -*** +--- ## `docs` @@ -140,12 +133,8 @@ Open the Cloudflare developer documentation in your default browser. wrangler docs [] ``` - - -* `COMMAND` string optional - * The Wrangler command you want to learn more about. This opens your default browser to the section of the documentation that describes the command. - - +- `COMMAND` string optional + - The Wrangler command you want to learn more about. This opens your default browser to the section of the documentation that describes the command. ## `init` @@ -155,27 +144,22 @@ Create a new project via the [create-cloudflare-cli (C3) tool](/workers/get-star wrangler init [] [OPTIONS] ``` +- `NAME` string optional (default: name of working directory) + - The name of the Workers project. This is both the directory name and `name` property in the generated `wrangler.toml` [configuration](/workers/wrangler/configuration/) file. +- `--yes` boolean optional + - Answer yes to any prompts for new projects. +- `--from-dash` string optional + - Fetch a Worker initialized from the dashboard. This is done by passing the flag and the Worker name. `wrangler init --from-dash `. + - The `--from-dash` command will not automatically sync changes made to the dashboard after the command is used. Therefore, it is recommended that you continue using the CLI. - -* `NAME` string optional (default: name of working directory) - * The name of the Workers project. This is both the directory name and `name` property in the generated `wrangler.toml` [configuration](/workers/wrangler/configuration/) file. -* `--yes` boolean optional - * Answer yes to any prompts for new projects. -* `--from-dash` string optional - * Fetch a Worker initialized from the dashboard. This is done by passing the flag and the Worker name. `wrangler init --from-dash `. - * The `--from-dash` command will not automatically sync changes made to the dashboard after the command is used. Therefore, it is recommended that you continue using the CLI. - - -*** +--- ## `generate` :::note - This command has been deprecated as of [Wrangler v3](/workers/wrangler/migration/update-v2-to-v3/) and will be removed in a future version. - ::: Create a new project using an existing [Workers template](https://github.com/cloudflare/workers-sdk/tree/main/templates/worker). @@ -184,16 +168,12 @@ Create a new project using an existing [Workers template](https://github.com/clo wrangler generate [] [TEMPLATE] ``` +- `NAME` string optional (default: name of working directory) + - The name of the Workers project. This is both the directory name and `name` property in the generated `wrangler.toml` [configuration](/workers/wrangler/configuration/) file. +- `TEMPLATE` string optional + - The URL of a GitHub template, with a default [worker-template](https://github.com/cloudflare/worker-template). Browse a list of available templates on the [cloudflare/workers-sdk](https://github.com/cloudflare/workers-sdk/tree/main/templates#usage) repository. - -* `NAME` string optional (default: name of working directory) - * The name of the Workers project. This is both the directory name and `name` property in the generated `wrangler.toml` [configuration](/workers/wrangler/configuration/) file. -* `TEMPLATE` string optional - * The URL of a GitHub template, with a default [worker-template](https://github.com/cloudflare/worker-template). Browse a list of available templates on the [cloudflare/workers-sdk](https://github.com/cloudflare/workers-sdk/tree/main/templates#usage) repository. - - - -*** +--- ## `d1` @@ -212,14 +192,11 @@ Creates a new D1 database, and provides the binding and UUID that you will put i wrangler d1 create [OPTIONS] ``` - - -* `DATABASE_NAME` string required - * The name of the new D1 database. -* `--location` string optional - * Provide an optional [location hint](/d1/configuration/data-location/) for your database leader. - * Available options include `weur` (Western Europe), `eeur` (Eastern Europe), `apac` (Asia Pacific), `oc` (Oceania), `wnam` (Western North America), and `enam` (Eastern North America). - +- `DATABASE_NAME` string required + - The name of the new D1 database. +- `--location` string optional + - Provide an optional [location hint](/d1/configuration/data-location/) for your database leader. + - Available options include `weur` (Western Europe), `eeur` (Eastern Europe), `apac` (Asia Pacific), `oc` (Oceania), `wnam` (Western North America), and `enam` (Eastern North America). ### `info` @@ -229,14 +206,10 @@ Get information about a D1 database, including the current database size and sta wrangler d1 info [OPTIONS] ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database to get information about. -* `--json` boolean optional - * Return output as JSON rather than a table. - - +- `DATABASE_NAME` string required + - The name of the D1 database to get information about. +- `--json` boolean optional + - Return output as JSON rather than a table. ### `list` @@ -246,12 +219,8 @@ List all D1 databases in your account. wrangler d1 list [OPTIONS] ``` - - -* `--json` boolean optional - * Return output as JSON rather than a table. - - +- `--json` boolean optional + - Return output as JSON rather than a table. ### `delete` @@ -261,14 +230,10 @@ Delete a D1 database. wrangler d1 delete [OPTIONS] ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database to delete. -* `-y, --skip-confirmation` boolean optional - * Skip deletion confirmation prompt. - - +- `DATABASE_NAME` string required + - The name of the D1 database to delete. +- `-y, --skip-confirmation` boolean optional + - Skip deletion confirmation prompt. ### `execute` @@ -280,36 +245,30 @@ wrangler d1 execute [OPTIONS] :::note - You must provide either `--command` or `--file` for this command to run successfully. - ::: - - -* `DATABASE_NAME` string required - * The name of the D1 database to execute a query on. -* `--command` string optional - * The SQL query you wish to execute. -* `--file` string optional - * Path to the SQL file you wish to execute. -* `-y, --yes` boolean optional - * Answer `yes` to any prompts. -* `--local` boolean(default: true) optional - * Execute commands/files against a local database for use with [wrangler dev](#dev). -* `--remote` boolean (default: false) optional - * Execute commands/files against a remote D1 database for use with [wrangler dev --remote](#dev). -* `--persist-to` string optional - * Specify directory to use for local persistence (for use in combination with `--local`). -* `--json` boolean optional - * Return output as JSON rather than a table. -* `--preview` boolean optional - * Execute commands/files against a preview D1 database (as defined by `preview_database_id` in [Wrangler.toml](/workers/wrangler/configuration/#d1-databases)). -* `--batch-size` number optional - * Number of queries to send in a single batch. - - +- `DATABASE_NAME` string required + - The name of the D1 database to execute a query on. +- `--command` string optional + - The SQL query you wish to execute. +- `--file` string optional + - Path to the SQL file you wish to execute. +- `-y, --yes` boolean optional + - Answer `yes` to any prompts. +- `--local` boolean(default: true) optional + - Execute commands/files against a local database for use with [wrangler dev](#dev). +- `--remote` boolean (default: false) optional + - Execute commands/files against a remote D1 database for use with [wrangler dev --remote](#dev). +- `--persist-to` string optional + - Specify directory to use for local persistence (for use in combination with `--local`). +- `--json` boolean optional + - Return output as JSON rather than a table. +- `--preview` boolean optional + - Execute commands/files against a preview D1 database (as defined by `preview_database_id` in [Wrangler.toml](/workers/wrangler/configuration/#d1-databases)). +- `--batch-size` number optional + - Number of queries to send in a single batch. ### `export` @@ -326,22 +285,18 @@ Export a D1 database or table's schema and/or content to a `.sql` file. wrangler d1 export [OPTIONS] ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database to export. -* `--remote` boolean (default: false) optional - * Execute commands/files against a remote D1 database for use with [wrangler dev --remote](#dev). -* `--output` string optional - * Path to the SQL file for your export. -* `--table` string optional - * The name of the table within a D1 database to export. -* `--no-data` boolean (default: false) optional - * Controls whether export SQL file contains database data. Note that `--no-data=true` is not recommended due to a known wrangler limitation that intreprets the value as false. -* `--no-schema` boolean (default: false) optional - * Controls whether export SQL file contains database schema. Note that `--no-schema=true` is not recommended due to a known wrangler limitation that intreprets the value as false. - - +- `DATABASE_NAME` string required + - The name of the D1 database to export. +- `--remote` boolean (default: false) optional + - Execute commands/files against a remote D1 database for use with [wrangler dev --remote](#dev). +- `--output` string optional + - Path to the SQL file for your export. +- `--table` string optional + - The name of the table within a D1 database to export. +- `--no-data` boolean (default: false) optional + - Controls whether export SQL file contains database data. Note that `--no-data=true` is not recommended due to a known wrangler limitation that intreprets the value as false. +- `--no-schema` boolean (default: false) optional + - Controls whether export SQL file contains database schema. Note that `--no-schema=true` is not recommended due to a known wrangler limitation that intreprets the value as false. ### `time-travel restore` @@ -351,18 +306,14 @@ Restore a database to a specific point-in-time using [Time Travel](/d1/reference wrangler d1 time-travel restore [OPTIONS] ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database to execute a query on. -* `--bookmark` string optional - * A D1 bookmark representing the state of a database at a specific point in time. -* `--timestamp` string optional - * A UNIX timestamp or JavaScript date-time `string` within the last 30 days. -* `--json` boolean optional - * Return output as JSON rather than a table. - - +- `DATABASE_NAME` string required + - The name of the D1 database to execute a query on. +- `--bookmark` string optional + - A D1 bookmark representing the state of a database at a specific point in time. +- `--timestamp` string optional + - A UNIX timestamp or JavaScript date-time `string` within the last 30 days. +- `--json` boolean optional + - Return output as JSON rather than a table. ### `time-travel info` @@ -372,16 +323,12 @@ Inspect the current state of a database for a specific point-in-time using [Time wrangler d1 time-travel info [OPTIONS] ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database to execute a query on. -* `--timestamp` string optional - * A UNIX timestamp or JavaScript date-time `string` within the last 30 days. -* `--json` boolean optional - * Return output as JSON rather than a table. - - +- `DATABASE_NAME` string required + - The name of the D1 database to execute a query on. +- `--timestamp` string optional + - A UNIX timestamp or JavaScript date-time `string` within the last 30 days. +- `--json` boolean optional + - Return output as JSON rather than a table. ### `backup create` @@ -398,12 +345,8 @@ Initiate a D1 backup. wrangler d1 backup create ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database to backup. - - +- `DATABASE_NAME` string required + - The name of the D1 database to backup. ### `backup list` @@ -420,12 +363,8 @@ List all available backups. wrangler d1 backup list ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database to list the backups of. - - +- `DATABASE_NAME` string required + - The name of the D1 database to list the backups of. ### `backup restore` @@ -442,14 +381,10 @@ Restore a backup into a D1 database. wrangler d1 backup restore ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database to restore the backup into. -* `BACKUP_ID` string required - * The ID of the backup you wish to restore. - - +- `DATABASE_NAME` string required + - The name of the D1 database to restore the backup into. +- `BACKUP_ID` string required + - The ID of the backup you wish to restore. ### `backup download` @@ -466,16 +401,12 @@ Download existing data to your local machine. wrangler d1 backup download ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database you wish to download the backup of. -* `BACKUP_ID` string required - * The ID of the backup you wish to download. -* `--output` string optional - * The `.sqlite3` file to write to (defaults to `'..sqlite3'`). - - +- `DATABASE_NAME` string required + - The name of the D1 database you wish to download the backup of. +- `BACKUP_ID` string required + - The ID of the backup you wish to download. +- `--output` string optional + - The `.sqlite3` file to write to (defaults to `'..sqlite3'`). ### `migrations create` @@ -491,14 +422,10 @@ The filename will include a version number and the migration name you specify be wrangler d1 migrations create ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database you wish to create a migration for. -* `MIGRATION_NAME` string required - * A descriptive name for the migration you wish to create. - - +- `DATABASE_NAME` string required + - The name of the D1 database you wish to create a migration for. +- `MIGRATION_NAME` string required + - A descriptive name for the migration you wish to create. ### `migrations list` @@ -508,18 +435,14 @@ View a list of unapplied migration files. wrangler d1 migrations list [OPTIONS] ``` - - -* `DATABASE_NAME` string required - * The name of the D1 database you wish to list unapplied migrations for. -* `--local` boolean optional - * Show the list of unapplied migration files on your locally persisted D1 database. -* `--persist-to` string optional - * Specify directory to use for local persistence (for use in combination with `--local`). -* `--preview` boolean optional - * Show the list of unapplied migration files on your preview D1 database (as defined by `preview_database_id` in [`wrangler.toml`](/workers/wrangler/configuration/#d1-databases)). - - +- `DATABASE_NAME` string required + - The name of the D1 database you wish to list unapplied migrations for. +- `--local` boolean optional + - Show the list of unapplied migration files on your locally persisted D1 database. +- `--persist-to` string optional + - Specify directory to use for local persistence (for use in combination with `--local`). +- `--preview` boolean optional + - Show the list of unapplied migration files on your preview D1 database (as defined by `preview_database_id` in [`wrangler.toml`](/workers/wrangler/configuration/#d1-databases)). ### `migrations apply` @@ -537,24 +460,20 @@ If applying a migration results in an error, this migration will be rolled back, wrangler d1 migrations apply [OPTIONS] ``` +- `DATABASE_NAME` string required + - The name of the D1 database you wish to apply your migrations on. +- `--local` boolean (default: true)optional + - Execute any unapplied migrations on your locally persisted D1 database. +- `--remote` boolean (default: false) optional + - Execute any unapplied migrations on your remote D1 database. +- `--persist-to` string optional + - Specify directory to use for local persistence (for use in combination with `--local`). +- `--preview` boolean optional + - Execute any unapplied migrations on your preview D1 database (as defined by `preview_database_id` in [`wrangler.toml`](/workers/wrangler/configuration/#d1-databases)). +- `--batch-size` number optional + - Number of queries to send in a single batch. - -* `DATABASE_NAME` string required - * The name of the D1 database you wish to apply your migrations on. -* `--local` boolean (default: true)optional - * Execute any unapplied migrations on your locally persisted D1 database. -* `--remote` boolean (default: false) optional - * Execute any unapplied migrations on your remote D1 database. -* `--persist-to` string optional - * Specify directory to use for local persistence (for use in combination with `--local`). -* `--preview` boolean optional - * Execute any unapplied migrations on your preview D1 database (as defined by `preview_database_id` in [`wrangler.toml`](/workers/wrangler/configuration/#d1-databases)). -* `--batch-size` number optional - * Number of queries to send in a single batch. - - - -*** +--- ## `hyperdrive` @@ -568,36 +487,32 @@ Create a new Hyperdrive configuration. wrangler hyperdrive create [OPTIONS] ``` - - -* `ID` string required - * The ID of the Hyperdrive configuration to create. -* `--connection-string` string optional - * The database connection string in the form `postgres://user:password@hostname:port/database`. -* `--host` string optional - * The hostname or IP address Hyperdrive should connect to. -* `--port` number optional - * The database port to connect to. -* `--scheme` string optional - * The scheme used to connect to the origin database - e.g. postgresql or postgres. -* `--database` string optional - * The database (name) to connect to. For example, Postgres or defaultdb. -* `--user` string optional - * The username used to authenticate to the database. -* `--password` string optional - * The password used to authenticate to the database. -* `--access-client-id` string optional - * The Client ID of the Access token to use when connecting to the origin database, must be set with a Client Access Secret. Mutually exclusive with `port`. -* `--access-client-secret` string optional - * The Client Secret of the Access token to use when connecting to the origin database, must be set with a Client Access ID. Mutually exclusive with `port`. -* `--caching-disabled` boolean optional - * Disables the caching of SQL responses. -* `--max-age` number optional - * Specifies max duration for which items should persist in the cache, cannot be set when caching is disabled. -* `--swr` number optional - * Stale While Revalidate - Indicates the number of seconds cache may serve the response after it becomes stale, cannot be set when caching is disabled. - - +- `ID` string required + - The ID of the Hyperdrive configuration to create. +- `--connection-string` string optional + - The database connection string in the form `postgres://user:password@hostname:port/database`. +- `--host` string optional + - The hostname or IP address Hyperdrive should connect to. +- `--port` number optional + - The database port to connect to. +- `--scheme` string optional + - The scheme used to connect to the origin database - e.g. postgresql or postgres. +- `--database` string optional + - The database (name) to connect to. For example, Postgres or defaultdb. +- `--user` string optional + - The username used to authenticate to the database. +- `--password` string optional + - The password used to authenticate to the database. +- `--access-client-id` string optional + - The Client ID of the Access token to use when connecting to the origin database, must be set with a Client Access Secret. Mutually exclusive with `port`. +- `--access-client-secret` string optional + - The Client Secret of the Access token to use when connecting to the origin database, must be set with a Client Access ID. Mutually exclusive with `port`. +- `--caching-disabled` boolean optional + - Disables the caching of SQL responses. +- `--max-age` number optional + - Specifies max duration for which items should persist in the cache, cannot be set when caching is disabled. +- `--swr` number optional + - Stale While Revalidate - Indicates the number of seconds cache may serve the response after it becomes stale, cannot be set when caching is disabled. ### `update` @@ -607,34 +522,30 @@ Update an existing Hyperdrive configuration. wrangler hyperdrive update [OPTIONS] ``` - - -* `ID` string required - * The ID of the Hyperdrive configuration to update. -* `--name` string optional - * The new name of the Hyperdrive configuration. -* `--origin-host` string optional - * The new database hostname or IP address Hyperdrive should connect to. -* `--origin-port` string optional - * The new database port to connect to. -* `--database` string optional - * The new database (name) to connect to. For example, Postgres or defaultdb. -* `--origin-user` string optional - * The new username used to authenticate to the database. -* `--origin-password` string optional - * The new password used to authenticate to the database. -* `--access-client-id` string optional - * The Client ID of the Access token to use when connecting to the origin database, must be set with a Client Access Secret. Mutually exclusive with `origin-port`. -* `--access-client-secret` string optional - * The Client Secret of the Access token to use when connecting to the origin database, must be set with a Client Access ID. Mutually exclusive with `origin-port`. -* `--caching-disabled` boolean optional - * Disables the caching of SQL responses. -* `--max-age` number optional - * Specifies max duration for which items should persist in the cache, cannot be set when caching is disabled. -* `--swr` number optional - * Stale While Revalidate - Indicates the number of seconds cache may serve the response after it becomes stale, cannot be set when caching is disabled. - - +- `ID` string required + - The ID of the Hyperdrive configuration to update. +- `--name` string optional + - The new name of the Hyperdrive configuration. +- `--origin-host` string optional + - The new database hostname or IP address Hyperdrive should connect to. +- `--origin-port` string optional + - The new database port to connect to. +- `--database` string optional + - The new database (name) to connect to. For example, Postgres or defaultdb. +- `--origin-user` string optional + - The new username used to authenticate to the database. +- `--origin-password` string optional + - The new password used to authenticate to the database. +- `--access-client-id` string optional + - The Client ID of the Access token to use when connecting to the origin database, must be set with a Client Access Secret. Mutually exclusive with `origin-port`. +- `--access-client-secret` string optional + - The Client Secret of the Access token to use when connecting to the origin database, must be set with a Client Access ID. Mutually exclusive with `origin-port`. +- `--caching-disabled` boolean optional + - Disables the caching of SQL responses. +- `--max-age` number optional + - Specifies max duration for which items should persist in the cache, cannot be set when caching is disabled. +- `--swr` number optional + - Stale While Revalidate - Indicates the number of seconds cache may serve the response after it becomes stale, cannot be set when caching is disabled. ### `list` @@ -652,12 +563,8 @@ Delete an existing Hyperdrive configuration. wrangler hyperdrive delete ``` - - -* `ID` string required - * The name of the Hyperdrive configuration to delete. - - +- `ID` string required + - The name of the Hyperdrive configuration to delete. ### `get` @@ -667,14 +574,10 @@ Get an existing Hyperdrive configuration. wrangler hyperdrive get ``` +- `ID` string required + - The name of the Hyperdrive configuration to get. - -* `ID` string required - * The name of the Hyperdrive configuration to get. - - - -*** +--- ## `vectorize` @@ -693,16 +596,12 @@ Creates a new vector index, and provides the binding and name that you will put wrangler vectorize create [--dimensions=] [--metric=] ``` - - -* `INDEX_NAME` string required - * The name of the new index to create. Cannot be changed. -* `--dimensions` number optional - * The vector dimension width to configure the index for. Cannot be changed after creation. -* `--metric` string optional - * The distance metric to use for calculating vector distance. Must be one of `cosine`, `euclidean`, or `dot-product`. - - +- `INDEX_NAME` string required + - The name of the new index to create. Cannot be changed. +- `--dimensions` number optional + - The vector dimension width to configure the index for. Cannot be changed after creation. +- `--metric` string optional + - The distance metric to use for calculating vector distance. Must be one of `cosine`, `euclidean`, or `dot-product`. ### `get` @@ -712,12 +611,8 @@ Get details about an individual index, including its configuration. wrangler vectorize get ``` - - -* `INDEX_NAME` string required - * The name of the index to fetch details for. - - +- `INDEX_NAME` string required + - The name of the index to fetch details for. ### `list` @@ -735,14 +630,10 @@ Delete a Vectorize index. wrangler vectorize delete [OPTIONS] ``` - - -* `INDEX_NAME` string required - * The name of the Vectorize index to delete. -* `--force` boolean optional - * Skip confirmation when deleting the index (Note: This is not a recoverable operation). - - +- `INDEX_NAME` string required + - The name of the Vectorize index to delete. +- `--force` boolean optional + - Skip confirmation when deleting the index (Note: This is not a recoverable operation). ### `insert` @@ -752,18 +643,14 @@ Insert vectors into an index. wrangler vectorize insert [OPTIONS] ``` +- `INDEX_NAME` string required + - The name of the Vectorize index to delete. +- `--file` string required + - A file containing the vectors to insert in newline-delimited JSON (JSON) format. +- `--batch-size` number optional + - The number of vectors to insert at a time (default: `5000`). - -* `INDEX_NAME` string required - * The name of the Vectorize index to delete. -* `--file` string required - * A file containing the vectors to insert in newline-delimited JSON (JSON) format. -* `--batch-size` number optional - * The number of vectors to insert at a time (default: `5000`). - - - -*** +--- ## `dev` @@ -775,101 +662,93 @@ wrangler dev [