diff --git a/.github/actions/label-products/index.ts b/.github/actions/label-products/index.ts index ad48385071a9df..bc42cc30abdd45 100644 --- a/.github/actions/label-products/index.ts +++ b/.github/actions/label-products/index.ts @@ -1,115 +1,113 @@ // NOTE: This is the source file! // ~> Run `npm run build` to produce `index.js` -import * as github from '@actions/github'; -import * as core from '@actions/core'; +import * as github from "@actions/github"; +import * as core from "@actions/core"; function getTopLevelFolder(path: string): string { - const parts = path.split('/'); - return parts[2]; + const parts = path.split("/"); + return parts[2]; } - function getSubFolder(path: string): string { - const parts = path.split('/'); - return parts[3]; + const parts = path.split("/"); + return parts[3]; } function getChangedSubFolders(files: any[]): string[] { - const changedFolders = new Set(); + const changedFolders = new Set(); - for (const file of files) { - const path = file.filename; - const topLevelFolder = getTopLevelFolder(path); + for (const file of files) { + const path = file.filename; + const topLevelFolder = getTopLevelFolder(path); - // Check if the file is within the top-level /content folder - if (topLevelFolder === 'docs') { - const subFolder = getSubFolder(path); - changedFolders.add(subFolder); - } - } + // Check if the file is within the top-level /content folder + if (topLevelFolder === "docs") { + const subFolder = getSubFolder(path); + changedFolders.add(subFolder); + } + } - return Array.from(changedFolders); + return Array.from(changedFolders); } async function run(): Promise { - try { - const ctx = github.context; - const token = core.getInput('GITHUB_TOKEN', { required: true }); - const octokit = github.getOctokit(token); - const pr = ctx.payload.pull_request; - const prNumber = pr.number; - const files = await octokit.paginate(octokit.rest.pulls.listFiles, { - ...ctx.repo, - pull_number: pr.number, - per_page: 100, - }); - - // Get the changed sub-folders within the top-level /content folder - const changedFolders = getChangedSubFolders(files); - - // ... - - // Label the PR based on the changed sub-folders - await labelPRSubFolders(octokit, ctx.repo, prNumber, changedFolders); - - // ... - } catch (error) { - console.error('An error occurred:', error); - process.exit(1); - } + try { + const ctx = github.context; + const token = core.getInput("GITHUB_TOKEN", { required: true }); + const octokit = github.getOctokit(token); + const pr = ctx.payload.pull_request; + const prNumber = pr.number; + const files = await octokit.paginate(octokit.rest.pulls.listFiles, { + ...ctx.repo, + pull_number: pr.number, + per_page: 100, + }); + + // Get the changed sub-folders within the top-level /content folder + const changedFolders = getChangedSubFolders(files); + + // ... + + // Label the PR based on the changed sub-folders + await labelPRSubFolders(octokit, ctx.repo, prNumber, changedFolders); + + // ... + } catch (error) { + console.error("An error occurred:", error); + process.exit(1); + } } async function labelPRSubFolders( - octokit: any, - repo: any, - prNumber: number, - changedFolders: string[] + octokit: any, + repo: any, + prNumber: number, + changedFolders: string[], ): Promise { - const labelPrefix = 'product:'; - const labelsToRemove: string[] = []; - const labelsToAdd: string[] = []; - - for (const label of github.context.payload.pull_request.labels) { - if (label.name.startsWith(labelPrefix)) { - labelsToRemove.push(label.name); - } - } - - for (const folder of changedFolders) { - const label = labelPrefix + folder; - labelsToAdd.push(label); - } - - const currentLabels = new Set( - github.context.payload.pull_request.labels.map((label: any) => label.name) - ); - - for (const labelToRemove of labelsToRemove) { - if (!labelsToAdd.includes(labelToRemove)) { - await octokit.rest.issues.removeLabel({ - ...repo, - issue_number: prNumber, - name: labelToRemove, - }); - } - } - - let newLabels: string[] = []; - for (const labelToAdd of labelsToAdd) { - if (!currentLabels.has(labelToAdd)) { - newLabels.push(labelToAdd); - } - } - - if (newLabels.length > 0) { - await octokit.rest.issues.addLabels({ - ...repo, - issue_number: prNumber, - labels: newLabels, - }); - } + const labelPrefix = "product:"; + const labelsToRemove: string[] = []; + const labelsToAdd: string[] = []; + + for (const label of github.context.payload.pull_request.labels) { + if (label.name.startsWith(labelPrefix)) { + labelsToRemove.push(label.name); + } + } + + for (const folder of changedFolders) { + const label = labelPrefix + folder; + labelsToAdd.push(label); + } + + const currentLabels = new Set( + github.context.payload.pull_request.labels.map((label: any) => label.name), + ); + + for (const labelToRemove of labelsToRemove) { + if (!labelsToAdd.includes(labelToRemove)) { + await octokit.rest.issues.removeLabel({ + ...repo, + issue_number: prNumber, + name: labelToRemove, + }); + } + } + + let newLabels: string[] = []; + for (const labelToAdd of labelsToAdd) { + if (!currentLabels.has(labelToAdd)) { + newLabels.push(labelToAdd); + } + } + + if (newLabels.length > 0) { + await octokit.rest.issues.addLabels({ + ...repo, + issue_number: prNumber, + labels: newLabels, + }); + } } run(); - diff --git a/.github/actions/label-size/index.ts b/.github/actions/label-size/index.ts index 2f27ea7fb338f6..0b265ea53e808a 100644 --- a/.github/actions/label-size/index.ts +++ b/.github/actions/label-size/index.ts @@ -1,67 +1,68 @@ -import * as core from '@actions/core'; -import * as github from '@actions/github'; +import * as core from "@actions/core"; +import * as github from "@actions/github"; async function run(): Promise { - try { - const ctx = github.context; - const token = core.getInput('GITHUB_TOKEN', { required: true }); - const octokit = github.getOctokit(token); - const pr = ctx.payload.pull_request; - const files = await octokit.paginate(octokit.rest.pulls.listFiles, { - ...ctx.repo, - pull_number: pr.number, - per_page: 100 - }); + try { + const ctx = github.context; + const token = core.getInput("GITHUB_TOKEN", { required: true }); + const octokit = github.getOctokit(token); + const pr = ctx.payload.pull_request; + const files = await octokit.paginate(octokit.rest.pulls.listFiles, { + ...ctx.repo, + pull_number: pr.number, + per_page: 100, + }); - const changes = files.reduce((total, file) => total + file.changes, 0); + const changes = files.reduce((total, file) => total + file.changes, 0); - let label: string | undefined; + let label: string | undefined; - switch (true) { - case changes <= 10: - label = 'size/xs'; - break; - case changes <= 100: - label = 'size/s'; - break; - case changes <= 500: - label = 'size/m'; - break; - case changes <= 1000: - label = 'size/l'; - break; - default: - label = 'size/xl'; - break; - } + switch (true) { + case changes <= 10: + label = "size/xs"; + break; + case changes <= 100: + label = "size/s"; + break; + case changes <= 500: + label = "size/m"; + break; + case changes <= 1000: + label = "size/l"; + break; + default: + label = "size/xl"; + break; + } - const currentLabels = pr.labels.map((label) => label.name); - const sizeLabels = ['size/xs', 'size/s', 'size/m', 'size/l', 'size/xl']; + const currentLabels = pr.labels.map((label) => label.name); + const sizeLabels = ["size/xs", "size/s", "size/m", "size/l", "size/xl"]; - // Remove previous size-related labels that are different from the current label - const labelsToRemove = currentLabels.filter( - (currentLabel) => sizeLabels.includes(currentLabel) && currentLabel !== label - ); + // Remove previous size-related labels that are different from the current label + const labelsToRemove = currentLabels.filter( + (currentLabel) => + sizeLabels.includes(currentLabel) && currentLabel !== label, + ); - for (const labelToRemove of labelsToRemove) { - await octokit.rest.issues.removeLabel({ - ...ctx.repo, - issue_number: pr.number, - name: labelToRemove - }); - } + for (const labelToRemove of labelsToRemove) { + await octokit.rest.issues.removeLabel({ + ...ctx.repo, + issue_number: pr.number, + name: labelToRemove, + }); + } - // Add the current label - if (!currentLabels.includes(label)) { - await octokit.rest.issues.addLabels({ - ...ctx.repo, - issue_number: pr.number, - labels: [label] - }); - } - } catch (error) { - core.setFailed(error.message); - } + // Add the current label + if (!currentLabels.includes(label)) { + await octokit.rest.issues.addLabels({ + ...ctx.repo, + issue_number: pr.number, + labels: [label], + }); + } + } catch (error) { + core.setFailed(error.message); + } } run(); diff --git a/bin/crawl.ts b/bin/crawl.ts deleted file mode 100644 index cbe05d7d73ee6d..00000000000000 --- a/bin/crawl.ts +++ /dev/null @@ -1,251 +0,0 @@ -// @ts-nocheck TODO: refactor this for modern Astro, or remove -/** - * 1. Crawl the `/public` directory (HTML files) and assert: - * - all anchor tags () do not point to broken links - * - all images () do not have broken sources - * NOTE: Requires `npm run build` first! - */ -import * as http from "http"; -import * as https from "https"; -import { existsSync } from "fs"; -import * as fs from "fs/promises"; -import { join, resolve, extname } from "path"; -import { parse } from "node-html-parser"; - -let WARNS = 0; -let ERRORS = 0; -let REDIRECT_ERRORS: string[] = []; - -const ROOT = resolve("."); -const PUBDIR = join(ROOT, "public"); -const REDIRECT_FILE = join(ROOT, "content/_redirects"); -const VERBOSE = process.argv.includes("--verbose"); -const EXTERNALS = process.argv.includes("--externals"); - -async function walk(dir: string) { - let files = await fs.readdir(dir); - await Promise.all( - files.map(async (name) => { - let abs = join(dir, name); - if (name.endsWith(".html")) return task(abs); - let stats = await fs.stat(abs); - if (stats.isDirectory()) return walk(abs); - }), - ); -} - -let CACHE = new Map(); - -function HEAD(url: string): Promise { - let value = CACHE.has(url); - if (value != null) return Promise.resolve(value); - - let options: https.RequestOptions = { - method: "HEAD", - headers: { - "user-agent": "dev-docs", - }, - }; - - if (url.startsWith("http://")) { - options.agent = http.globalAgent; - } - - let req = https.request(url, options); - - return new Promise((r) => { - req.on("error", (err) => { - console.log(url, err); - CACHE.set(url, false); - return r(false); - }); - - req.on("response", (res) => { - let bool = res.statusCode > 199 && res.statusCode < 400; - console.log({ url, bool }); - CACHE.set(url, bool); - return r(bool); - }); - - req.end(); - }); -} - -interface Message { - type: "error" | "warn"; - html?: string; - value?: string; - text?: string; -} - -async function testREDIRECTS(file: string) { - const textPlaceholder = await fs.readFile(file, "utf-8"); - const destinationURLRegex = new RegExp(/\/.*\/*? (\/.*\/)/); - - for (const line of textPlaceholder.split(/[\r\n]+/)) { - let exists = false; - if (!line.startsWith("#")) { - const result = line.match(destinationURLRegex); - if (result !== null) { - const match = result[1]; - if (match.startsWith("/api/")) { - return; - } else { - let local = join(PUBDIR, match); - exists = existsSync(local); - - if (!exists) { - REDIRECT_ERRORS.push(`\n ✘ ${result[0]}`); - } - } - } - } - } -} - -async function task(file: string) { - let html = await fs.readFile(file, "utf8"); - - let document = parse(html, { - comment: false, - blockTextElements: { - script: false, - noscript: false, - style: false, - pre: false, - }, - }); - - let placeholder = "http://foo.io"; - // build this file's URL; without "index.html" at end - let self = file - .substring(PUBDIR.length, file.length - 10) - .replace(/\\+/g, "/"); - let url = new URL(self, placeholder); - - let messages: Message[] = []; - let items = document.querySelectorAll("a[href],img[src]"); - - await Promise.all( - items.map(async (item) => { - let content = item.outerHTML; - let target = item.getAttribute("src") || item.getAttribute("href"); - - if (!target && item.rawTagName === "a") { - // parsing error; this is actually ` - if (/logo-link/.test(item.classNames)) return; - return messages.push({ - type: "warn", - html: content, - text: `Missing "href" value`, - }); - } - - if (target && (target.startsWith("/api/") || target === "/api")) { - return; - } - - if (target && target.includes("discord.gg/cloudflaredev")) { - return messages.push({ - type: "error", - html: content, - text: "Use 'https://discord.cloudflare.com' instead of 'https://discord.gg/cloudflaredev'.", - }); - } - - let exists: boolean; - let external = false; - let resolved = new URL(target, url); - - if (!/https?/.test(resolved.protocol)) return; - - if ((external = resolved.origin !== placeholder)) { - // only fetch external URLs with `--externals` flag - exists = EXTERNALS ? await HEAD(target) : true; - } - - if (!external) { - let local = join(PUBDIR, resolved.pathname); - - // is this HTML page? eg; "/foo/" - if (extname(local).length === 0) { - // TODO? log warning about no trailing slash - if (!local.endsWith("/")) local += "/"; - local += "index.html"; - } - - exists = existsSync(local); - } - - if (!exists) { - messages.push({ - type: "error", - html: content, - value: target, - }); - } - }), - ); - - if (messages.length > 0) { - let output = file.substring(PUBDIR.length); - - messages.forEach((msg) => { - if (msg.type === "error") { - output += "\n ✘"; - ERRORS++; - } else { - output += "\n ⚠"; - WARNS++; - } - output += " " + (msg.text || msg.value); - if (VERBOSE) output += "\n " + msg.html; - }); - - console.log(output + "\n"); - } -} - -try { - await walk(PUBDIR); - - if (!ERRORS && !WARNS) { - console.log("\n~> Regular files DONE~!\n\n"); - } else { - let msg = "\n~> Regular files DONE with:"; - if (ERRORS > 0) { - process.exitCode = 1; - msg += "\n - " + ERRORS.toLocaleString() + " error(s)"; - } - if (WARNS > 0) { - msg += "\n - " + WARNS.toLocaleString() + " warning(s)"; - } - console.log(msg + "\n\n"); - } -} catch (err) { - console.error(err.stack || err); - process.exit(1); -} - -try { - await testREDIRECTS(REDIRECT_FILE); - if (REDIRECT_ERRORS.length == 0) { - console.log("\n~> /content/_redirects file DONE~!\n\n"); - } else { - let msg = "\n~> /content/_redirects file DONE with:"; - process.exitCode = 1; - msg += - "\n - " + - REDIRECT_ERRORS.length.toLocaleString() + - " error(s)" + - " (due to bad destination URLs)" + - "\n\n"; - for (let i = 0; i < REDIRECT_ERRORS.length; i++) { - msg += REDIRECT_ERRORS[i]; - } - console.log(msg + "\n\n"); - } -} catch (err) { - console.error(err.stack || err); - process.exit(1); -} diff --git a/src/assets/images/analytics/sentinel-diagram.png b/src/assets/images/analytics/sentinel-diagram.png new file mode 100644 index 00000000000000..c1ab058b581096 Binary files /dev/null and b/src/assets/images/analytics/sentinel-diagram.png differ diff --git a/src/content/changelogs/page-shield.yaml b/src/content/changelogs/page-shield.yaml index d35caf98f9677e..de6a4d4bb3f024 100644 --- a/src/content/changelogs/page-shield.yaml +++ b/src/content/changelogs/page-shield.yaml @@ -5,6 +5,16 @@ productLink: "/page-shield/" productArea: Application security productAreaLink: /fundamentals/reference/changelog/security/ entries: + - publish_date: "2024-09-18" + title: Page Shield's script monitor now available in Free plan + description: |- + The Page Shield's script monitor feature is now available to all users, including users in the Free plan. + + - publish_date: "2024-09-18" + title: Page Shield policy changes now available in audit logs + description: |- + Cloudflare [Audit Logs](/fundamentals/setup/account/account-security/review-audit-logs/) now include entries for any changes to Page Shield's policies. + - publish_date: "2024-06-18" title: Cookie Monitor now available description: |- diff --git a/src/content/docs/1.1.1.1/encryption/dns-over-https/index.mdx b/src/content/docs/1.1.1.1/encryption/dns-over-https/index.mdx index 964c88c2c2da42..3d29acdca746c7 100644 --- a/src/content/docs/1.1.1.1/encryption/dns-over-https/index.mdx +++ b/src/content/docs/1.1.1.1/encryption/dns-over-https/index.mdx @@ -2,7 +2,8 @@ pcx_content_type: reference title: DNS over HTTPS slug: 1.1.1.1/encryption/dns-over-https - +sidebar: + label: About DoH --- import { DirectoryListing } from "~/components" diff --git a/src/content/docs/1.1.1.1/infrastructure/index.mdx b/src/content/docs/1.1.1.1/infrastructure/index.mdx index 4946f1a69c0b90..7ca2f3cbfb2754 100644 --- a/src/content/docs/1.1.1.1/infrastructure/index.mdx +++ b/src/content/docs/1.1.1.1/infrastructure/index.mdx @@ -3,6 +3,8 @@ pcx_content_type: navigation title: Infrastructure sidebar: order: 4 + group: + hideIndex: true slug: 1.1.1.1/infrastructure --- diff --git a/src/content/docs/1.1.1.1/other-ways-to-use-1.1.1.1/index.mdx b/src/content/docs/1.1.1.1/other-ways-to-use-1.1.1.1/index.mdx index d32a242c5cf07e..e2112bc7ad9c3a 100644 --- a/src/content/docs/1.1.1.1/other-ways-to-use-1.1.1.1/index.mdx +++ b/src/content/docs/1.1.1.1/other-ways-to-use-1.1.1.1/index.mdx @@ -3,6 +3,8 @@ pcx_content_type: navigation title: Other ways to use 1.1.1.1 sidebar: order: 5 + group: + hideIndex: true slug: 1.1.1.1/other-ways-to-use-1.1.1.1 --- diff --git a/src/content/docs/1.1.1.1/setup/index.mdx b/src/content/docs/1.1.1.1/setup/index.mdx index 094bc3c138aa14..4c8e2137871131 100644 --- a/src/content/docs/1.1.1.1/setup/index.mdx +++ b/src/content/docs/1.1.1.1/setup/index.mdx @@ -3,6 +3,7 @@ pcx_content_type: reference title: Set up sidebar: order: 2 + label: General head: - tag: title content: Set up Cloudflare 1.1.1.1 resolver diff --git a/src/content/docs/analytics/analytics-integrations/sentinel.mdx b/src/content/docs/analytics/analytics-integrations/sentinel.mdx new file mode 100644 index 00000000000000..ec6f4f48425060 --- /dev/null +++ b/src/content/docs/analytics/analytics-integrations/sentinel.mdx @@ -0,0 +1,18 @@ +--- +pcx_content_type: how-to +title: Sentinel +sidebar: + order: 105 + +--- + +Microsoft has developed a Cloudflare connector that allows their customers to integrate [Cloudflare Logs](/logs/) with Microsoft Sentinel. + +## How it works + +[Logpush](/logs/get-started/enable-destinations/azure/) sends logs from Cloudflare to Azure Blob Storage. From there, the Cloudflare connector, a Microsoft function, ingests these logs into Azure Log Analytics Workspace, making them available for monitoring and analysis in Microsoft Sentinel. + +![Sentinel integrations steps](~/assets/images/analytics/sentinel-diagram.png) + +For more details, refer to the Microsoft documentation [Cloudflare connector for Microsoft Sentinel](https://learn.microsoft.com/en-us/azure/sentinel/data-connectors/cloudflare). + diff --git a/src/content/docs/automatic-platform-optimization/troubleshooting/faq.mdx b/src/content/docs/automatic-platform-optimization/troubleshooting/faq.mdx index 4046198fccab90..6b127b1abfc6ff 100644 --- a/src/content/docs/automatic-platform-optimization/troubleshooting/faq.mdx +++ b/src/content/docs/automatic-platform-optimization/troubleshooting/faq.mdx @@ -15,7 +15,7 @@ No. APO ignores Origin Cache Control for caching on the Edge, but APO serves ori ## Why are my browser cache control headers missing with APO? -The browser cache control headers may be missing with APO if you set your browser to cache TTL to "respect existing headers." For example: +The browser cache control headers may be missing with APO if you set your **Browser Cache TTL** to **Respect Existing Headers**. For example: ```sh curl --silent --verbose --output /dev/null https://example.com/ --header 'Accept: text/html' 2>&1 | grep cache-control @@ -27,7 +27,7 @@ curl --silent --verbose --output /dev/null https://example.com/ --header 'Accept ## Is the stale-if-error directive still needed with APO? -No, the stale-if-error directive is not needed because the feature is built into APO. +No, the `stale-if-error` directive is not needed because the feature is built into APO. ## When I check the posts and homepage cache status, the response header shows `cf-cache-status: BYPASS`. Is APO working? diff --git a/src/content/docs/d1/get-started.mdx b/src/content/docs/d1/get-started.mdx index 5a99000765c5cf..6ad50bd209576e 100644 --- a/src/content/docs/d1/get-started.mdx +++ b/src/content/docs/d1/get-started.mdx @@ -6,7 +6,7 @@ sidebar: order: 2 --- -import { Render, PackageManagers, Steps, FileTree } from "~/components"; +import { Render, PackageManagers, Steps, FileTree, Tabs, TabItem } from "~/components"; This guide instructs you through: @@ -14,20 +14,18 @@ This guide instructs you through: - Creating a schema and querying your database via the command-line. - Connecting a [Cloudflare Worker](/workers/) to your D1 database to query your D1 database programmatically. -## Prerequisites +You can perform these tasks through the CLI or through the Cloudflare dashboard. -To continue: +## Prerequisites - -1. Sign up for a [Cloudflare account](https://dash.cloudflare.com/sign-up/workers-and-pages) if you have not already. -2. Install [`npm`](https://docs.npmjs.com/getting-started). -3. Install [`Node.js`](https://nodejs.org/en/). Use a Node version manager like [Volta](https://volta.sh/) or [nvm](https://github.com/nvm-sh/nvm) to avoid permission issues and change Node.js versions. [Wrangler](/workers/wrangler/install-and-update/) requires a Node version of `16.17.0` or later. - + ## 1. Create a Worker Create a new Worker as the means to query your database. + + 1. Create a new project named `d1-tutorial` by running: @@ -40,7 +38,7 @@ Create a new Worker as the means to query your database. one: "Hello World example", two: "Hello World Worker", three: "TypeScript", - }} + }}z /> This creates a new `d1-tutorial` directory as illustrated below. @@ -73,12 +71,26 @@ For example: `CI=true npm create cloudflare@latest d1-tutorial --type=simple --g ::: + + + +1. Log in to your [Cloudflare dashboard](https://dash.cloudflare.com/) and select your account. +2. Go to your account > **Workers & Pages** > **Overview**. +3. Select **Create**. +4. Select **Create Worker**. +5. Name for your Worker. For this tutorial, name your Worker `d1-tutorial`. +6. Select **Deploy**. + + + ## 2. Create a database A D1 database is conceptually similar to many other databases: a database may contain one or more tables, the ability to query those tables, and optional indexes. D1 uses the familiar [SQL query language](https://www.sqlite.org/lang.html) (as used by SQLite). To create your first D1 database: + + 1. Change into the directory you just created for your Workers project: @@ -112,6 +124,20 @@ The `wrangler` command-line interface is Cloudflare's tool for managing and depl ::: + + + +1. Go to **Storage & Databases** > **D1**. +2. Select **Create**. +3. Name your database. For this tutorial, name your D1 database `prod-d1-tutorial`. +4. (Optional) Provide a location hint. Location hint is an optional parameter you can provide to indicate your desired geographical location for your database. Refer to [Provide a location hint](/d1/configuration/data-location/#provide-a-location-hint) for more information. +5. Select **Create**. + + + + + + :::note For reference, a good database name: @@ -124,11 +150,16 @@ For reference, a good database name: ## 3. Bind your Worker to your D1 database -You must create a binding for your Worker to connect to your D1 database. [Bindings](/workers/runtime-apis/bindings/) allow your Workers to access resources, like D1, on the Cloudflare developer platform. You create bindings by updating your `wrangler.toml` file. +You must create a binding for your Worker to connect to your D1 database. [Bindings](/workers/runtime-apis/bindings/) allow your Workers to access resources, like D1, on the Cloudflare developer platform. To bind your D1 database to your Worker: + + +You create bindings by updating your `wrangler.toml` file. + + 1. Copy the lines obtained from [step 2](/d1/get-started/#2-create-a-database) from your terminal. 2. Add them to the end of your `wrangler.toml` file. @@ -155,10 +186,33 @@ When you execute the `wrangler d1 create` command, the client API package (which You can also bind your D1 database to a [Pages Function](/pages/functions/). For more information, refer to [Functions Bindings for D1](/pages/functions/bindings/#d1-databases). + + +You create bindings by adding them to the Worker you have created. + + +1. Go to **Workers & Pages** > **Overview**. +2. Select the `d1-tutorial` Worker you created in [step 1](/d1/get-started/#1-create-a-worker). +3. Select **Settings**. +4. Scroll to **Bindings**, then select **Add**. +5. Select **D1 database**. +6. Name your binding in **Variable name**, then select the `prod-d1-tutorial` D1 database you created in [step 2](/d1/get-started/#2-create-a-database) from the dropdown menu. For this tutorial, name your binding `DB`. +7. Select **Deploy** to deploy your binding. When deploying, there are two options: + - **Deploy:** Immediately deploy the binding to 100% of your audience. + - **Save version:** Save a version of the binding which you can deploy in the future. + + For this tutorial, select **Deploy**. + + + + + ## 4. Run a query against your D1 database ### Configure your D1 database + + With `wrangler.toml` configured properly, set up your database. Use the example `schema.sql` file below to initialize your database. @@ -200,10 +254,36 @@ With `wrangler.toml` configured properly, set up your database. Use the example + + +Use the Dashboard to create a table and populate it with data. + + +1. Go to **Storage & Databases** > **D1**. +2. Select the `prod-d1-tutorial` database you created in [step 2](/d1/get-started/#2-create-a-database). +3. Select **Console**. +4. Paste the following SQL snippet. + + ```sql + DROP TABLE IF EXISTS Customers; + CREATE TABLE IF NOT EXISTS Customers (CustomerId INTEGER PRIMARY KEY, CompanyName TEXT, ContactName TEXT); + INSERT INTO Customers (CustomerID, CompanyName, ContactName) VALUES (1, 'Alfreds Futterkiste', 'Maria Anders'), (4, 'Around the Horn', 'Thomas Hardy'), (11, 'Bs Beverages', 'Victoria Ashworth'), (13, 'Bs Beverages', 'Random Name'); + ``` + +5. Select **Execute**. This creates a table called `Customers` in your `prod-d1-tutorial` database. +6. Select **Tables**, then select the `Customers` table to view the contents of the table. + + + + + + ### Write queries within your Worker After you have set up your database, run an SQL query from within your Worker. + + 1. Navigate to your `d1-tutorial` Worker and open the `index.ts` file. The `index.ts` file is where you configure your Worker's interactions with D1. 2. Clear the content of `index.ts`. @@ -249,31 +329,52 @@ After you have set up your database, run an SQL query from within your Worker. After configuring your Worker, you can test your project locally before you deploy globally. -## 5. Develop locally with Wrangler + -While in your project directory, test your database locally. +You can query your D1 database using your Worker. -1. Run `wrangler dev`: +1. Go to **Workers & Pages** > **Overview**. +2. Select the `d1-tutorial` Worker you created. +3. Select **Edit Code**. +4. Clear the contents of the `worker.js` file, then paste the following code: - ```sh - npx wrangler dev - ``` - - When you run `wrangler dev`, Wrangler provides a URL (most likely `localhost:8787`) to review your Worker. + ```js + export default { + async fetch(request, env) { + const { pathname } = new URL(request.url); -2. Navigate to the URL. + if (pathname === "/api/beverages") { + // If you did not use `DB` as your binding name, change it here + const { results } = await env.DB.prepare( + "SELECT * FROM Customers WHERE CompanyName = ?" + ) + .bind("Bs Beverages") + .all(); + return new Response(JSON.stringify(results), { + headers: { 'Content-Type': 'application/json' } + }); + } - The page displays `Call /api/beverages to see everyone who works at Bs Beverages`. + return new Response( + "Call /api/beverages to see everyone who works at Bs Beverages" + ); + }, + }; + ``` +5. Select **Save**. -3. Test your database is running successfully. Add `/api/beverages` to the provided Wrangler URL. For example, `localhost:8787/api/beverages`. + + -If successful, the browser displays your data. +## 5. Deploy your database + +Deploy your database on Cloudflare's global network. -## 6. Deploy your database + -To deploy your Worker to production, you must first repeat the [database bootstrapping](/d1/get-started/#configure-your-d1-database) steps after replacing the `--local` flag with the `--remote` flag to give your Worker data to read. This creates the database tables and imports the data into the production version of your database, running on Cloudflare's global network. +To deploy your Worker to production using Wrangler, you must first repeat the [database configuration](/d1/get-started/#configure-your-d1-database) steps after replacing the `--local` flag with the `--remote` flag to give your Worker data to read. This creates the database tables and imports the data into the production version of your database. 1. Bootstrap your database with the `schema.sql` file you created in step 4: @@ -297,26 +398,102 @@ To deploy your Worker to production, you must first repeat the [database bootstr ```sh output Outputs: https://d1-tutorial..workers.dev ``` + + You can now visit the URL for your newly created project to query your live database. + + For example, if the URL of your new Worker is `d1-tutorial..workers.dev`, accessing `https://d1-tutorial..workers.dev/api/beverages` sends a request to your Worker that queries your live database directly. + +4. Test your database is running successfully. Add `/api/beverages` to the provided Wrangler URL. For example, `https://d1-tutorial..workers.dev/api/beverages`. + + + + + + +1. Go to **Workers & Pages** > **Overview**. +2. Select your `d1-tutorial` Worker. +3. Select **Deployments**. +4. From the **Version History** table, select **Deploy version**. +5. From the **Deploy version** page, select **Deploy**. + + + +This deploys the latest version of the Worker code to production. + + + +## 6. (Optional) Develop locally with Wrangler + +If you are using D1 with Wrangler, you can test your database locally. While in your project directory: + + +1. Run `wrangler dev`: + + ```sh + npx wrangler dev + ``` + + When you run `wrangler dev`, Wrangler provides a URL (most likely `localhost:8787`) to review your Worker. + +2. Go to the URL. + + The page displays `Call /api/beverages to see everyone who works at Bs Beverages`. + +3. Test your database is running successfully. Add `/api/beverages` to the provided Wrangler URL. For example, `localhost:8787/api/beverages`. -You can now visit the URL for your newly created project to query your live database. +If successful, the browser displays your data. -For example, if the URL of your new Worker is `d1-tutorial..workers.dev`, accessing `https://d1-tutorial..workers.dev/api/beverages` sends a request to your Worker that queries your live database directly. +:::note +You can only develop locally if you are using Wrangler. You cannot develop locally through the Cloudflare dashboard. +::: ## 7. (Optional) Delete your database -To delete your database, run: +To delete your database: + + + +Run: ```sh npx wrangler d1 delete prod-d1-tutorial ``` -If you want to delete your Worker, run: + + + +1. Go to **Storages & Databases** > **D1**. +2. Select your `prod-d1-tutorial` D1 database. +3. Select **Settings**. +4. Select **Delete**. +5. Type the name of the database (`prod-d1-tutorial`) to confirm the deletion. + + + + +If you want to delete your Worker: + + + +Run: ```sh npx wrangler delete d1-tutorial ``` + + + +1. Go to **Workers & Pages** > **Overview**. +2. Select your `d1-tutorial` Worker. +3. Select **Settings**. +4. Scroll to the bottom of the page, then select **Delete**. +5. Type the name of the Worker (`d1-tutorial`) to confirm the deletion. + + + + ## Summary In this tutorial, you have: diff --git a/src/content/docs/docs-guide/manage-content/redirects/best-practices.mdx b/src/content/docs/docs-guide/manage-content/redirects/best-practices.mdx index e54d65b473f174..4cdea6b4ac4c7d 100644 --- a/src/content/docs/docs-guide/manage-content/redirects/best-practices.mdx +++ b/src/content/docs/docs-guide/manage-content/redirects/best-practices.mdx @@ -9,7 +9,7 @@ head: --- -Beyond [how Cloudflare uses redirects](/docs-guide/manage-content/redirects/process/) and having a [maintenance plan](/docs-guide/manage-content/redirects/maintenance/), we follow these best practices. +Beyond [how Cloudflare uses redirects](/docs-guide/manage-content/redirects/process/), we follow these best practices. ## Organize your redirects diff --git a/src/content/docs/docs-guide/manage-content/redirects/maintenance.mdx b/src/content/docs/docs-guide/manage-content/redirects/maintenance.mdx deleted file mode 100644 index c766abd73c05b0..00000000000000 --- a/src/content/docs/docs-guide/manage-content/redirects/maintenance.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -pcx_content_type: how-to -title: Maintenance -sidebar: - order: 2 -head: - - tag: title - content: Redirects | Maintenance - ---- - -There are two parts to redirect maintenance, keeping current links up to date and cleaning up your redirects file. - -## Link maintenance - -In our main [Compiles check](https://github.com/cloudflare/cloudflare-docs/blob/production/.github/workflows/ci.yml), we call [a script](https://github.com/cloudflare/cloudflare-docs/blob/production/bin/crawl.ts) that makes sure all internal links exist in our current build. - -This means that the check will error if it encounters any broken links, even those that have redirects set. - -We highly recommend this approach because: - -* The behavior of each link is much clearer. -* You can troubleshoot issues more easily. -* It simplifies any future migration. - -## Redirect maintenance - -We prune unused redirects in our `_redirects` file every couple months. This process helps us stay under the [limit for Pages redirects](/pages/configuration/redirects/#surpass-_redirects-limits), as well as keeps our file cleaner and more navigable. - -1. We check out the `_redirects` file from 6 months ago, which helps us avoid deleting recently added redirects. - -2. Using a script, we extract all of the target paths from our `_redirects` file into a CSV. - -3. Using that CSV, we join together data from a [Logpush job](/logs/about/) that stores a sample of `301` and `404` requests to our docs site. - -4. Then, we evaluate redirects: - - * **Static redirects**: Compare to all `301` responses and - if the traffic threshold is low enough - remove that specific line from `_redirects`. - * **Dynamic redirects**: Using a `contains` operator, review the amount of traffic reaching all subpaths. If the traffic is minimal, remove the dynamic redirect entirely. If the traffic is just going to one or two paths, swap out for a static redirect. - -5. Afterwards, we monitor the `404` traffic to our docs site using the same Logpush job. This step helps us identify whether we need to re-add any of the redirects. diff --git a/src/content/docs/magic-wan/configuration/connector/configure-hardware-connector/index.mdx b/src/content/docs/magic-wan/configuration/connector/configure-hardware-connector/index.mdx index 988457c35a5522..6aaeb62fbc8a2e 100644 --- a/src/content/docs/magic-wan/configuration/connector/configure-hardware-connector/index.mdx +++ b/src/content/docs/magic-wan/configuration/connector/configure-hardware-connector/index.mdx @@ -10,26 +10,7 @@ import { GlossaryTooltip, Render, Card } from "~/components"; ## Before you begin -You can install up to two Magic WAN Connectors for redundancy at each of your sites. If one of your Connectors fail, traffic will fail over to the other Connector ensuring that you never lose connectivity to that site. - -In this type of high availability (HA) configuration, you will choose a reliable LAN interface as the HA link which will be used to monitor the health of the peer connector. HA links can be dedicated links or can be shared with other LAN traffic. - -You must decide the type of configuration you want for your site from the beginning: no redundancy or with redundancy. You cannot add redundancy after finishing configuring your dashboard settings. If, at a later stage, you decide to enable redundancy, you will need to delete your site configuration in the Cloudflare dashboard, and start again. - - - -- If you need a high availability configuration for your premises, refer to - [About high availability configurations](#about-high-availability-configurations) for more information - and learn how to configure your Connector in this mode. - -- If you do not need a high availability configuration for you premises, proceed to [Configure - Cloudflare dashboard settings](#configure-cloudflare-dashboard-settings). - - - -:::caution[Warning] -You cannot enable high availability for an existing site. To add high availability to an existing site in the Cloudflare dashboard, you need to delete the site and start again. Plan accordingly to create a high availability configuration from the start if needed. -::: + --- @@ -51,7 +32,7 @@ Refer to [SFP+ port information](/magic-wan/configuration/connector/configure-ha --- -## Configure Cloudflare dashboard settings +## Set up Cloudflare dashboard **Add Connector**. -5. Back on the previous screen, select **Add secondary Connector**. -6. From the list, choose your second Connector > **Add Connector**. -7. Select **Next** to [Create a WAN](#2-create-a-wan). If you are configuring a static IP, configure the IP for the primary node as the static address, and the IP for the secondary node as the secondary static address. -8. To create a LAN, follow the steps mentioned above in [Create a LAN](#3-create-a-lan) up until step 4. -9. In **Static address**, enter the IP for the primary node in your site. For example, `192.168.10.1/24`. -10. In **Secondary static address**, enter the IP for the secondary node in your site. For example, `192.168.10.2/24`. -11. In **Virtual static address**, enter the IP that the LAN south of the Connector will forward traffic to. For example, `192.168.10.3/24`. -12. Select **Save**. -13. From the **High availability probing link** drop-down menu, select the port that should be used to monitor the node’s health. Cloudflare recommends you choose a reliable interface as the HA probing link. The primary and secondary node’s probing link should be connected over a switch, and cannot be a direct connection. -14. Follow the instructions in [Set up your Magic WAN Connector](#set-up-your-magic-wan-connector) and [Activate connector](#activate-connector) to finish setting up your Connectors. + --- diff --git a/src/content/docs/magic-wan/configuration/connector/configure-virtual-connector.mdx b/src/content/docs/magic-wan/configuration/connector/configure-virtual-connector.mdx index 9f6ef03708a6d0..4910badcab5449 100644 --- a/src/content/docs/magic-wan/configuration/connector/configure-virtual-connector.mdx +++ b/src/content/docs/magic-wan/configuration/connector/configure-virtual-connector.mdx @@ -3,16 +3,19 @@ pcx_content_type: how-to title: Configure virtual Connector sidebar: order: 4 - --- -import { Render } from "~/components" +import { Render } from "~/components"; Virtual Magic WAN Connector (Virtual Connector) is a virtual appliance alternative to the hardware based Magic WAN Connector appliance. These two versions of Connector are identical otherwise. +## Before you begin + + + ## Prerequisites -Before you can install Virtual Connector, you need an Enterprise account with Magic WAN enabled. Additionally, you need to have a VMware host with sufficient compute, memory, and storage to run the virtual machine with Virtual Connector. This includes: +Before you can install Virtual Connector, you need an Enterprise account with Magic WAN enabled. Additionally, you need to have a VMware host with sufficient compute, memory, and storage to run the virtual machine with Virtual Connector. This includes: - Intel x86 CPU architecture - ESXi hypervisor 7.0U1 or higher @@ -24,7 +27,7 @@ Before you can install Virtual Connector, you need an Enterprise account with Ma Refer to [VMware's documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.esxi.install.doc/GUID-B2F01BF5-078A-4C7E-B505-5DFFED0B8C38.html) for more information on how to install ESXi and configure a virtual machine. -## 1. Obtain the Virtual Connector image +## Obtain the Virtual Connector image Contact your account team at Cloudflare to obtain the Virtual Connector [OVA package](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-AE61948B-C2EE-436E-BAFB-3C7209088552.html) and license keys. The OVA image includes the files required to install and configure the virtual machine (VM) for Virtual Connector with the appropriate settings. @@ -32,7 +35,7 @@ This image can be deployed multiple times to create several instances of a Virtu You will consume one license key for each instance created. For example, if you want to deploy 10 Virtual Connectors you should request 10 license keys, and your account team will create 10 Connector instances in your Cloudflare dashboard. -## 2. Deploy the Virtual Connector on VMware +## Deploy the Virtual Connector on VMware The following instructions assume you already have VMware ESXi hypervisor installed with sufficient resources. Refer to [Prerequisites](#prerequisites) for more information. @@ -73,6 +76,7 @@ Refer to [VMWare's documentation](https://kb.vmware.com/s/article/1003825) for m 7. Select where you want to save the files extracted from the OVA image > **Next**. 8. In **Networking mappings**, select assignments for your desired topology according to the port groups you set up previously: + 1. For example, map `eno1` port to `VM Network` to create your WAN, and `eno2` to `LAN0` to act as your LAN port. 2. Allocate any unused ports to the `null` port group. 3. Take note of your configuration. You will need this information to configure your network in the Cloudflare dashboard. @@ -93,17 +97,41 @@ You cannot use the same license key twice, or reuse a key once the virtual machi ::: 12. Select **Save** to finish configuring your Virtual Connector. -13. Continue set up in your [Cloudflare dashboard](#3-set-up-cloudflare-dashboard). +13. Continue set up in your [Cloudflare dashboard](#set-up-cloudflare-dashboard). + +## Set up Cloudflare dashboard + + -## 3. Set up Cloudflare dashboard +## Activate Connector - + -## 4. Activate Connector +### WAN with a static IP address - +After activating your Virtual Connector, you can use it in a network configuration with the WAN interface set to a static IP address — that is, an Internet configuration that is not automatically set by DHCP. -## 5. Boot your Virtual Connector +To use your Virtual Connector on a network configuration with a static IP: + +1. Wait 60 seconds after activating your Connector. +2. Modify your Port Groups as needed to change the source from which the WAN port obtains its IP address. +3. [Adjust the WAN settings](#2-create-a-wan) in the Cloudflare dashboard to reflect the new static IP address configuration. +4. Reboot your virtual machine. + +## Boot your Virtual Connector 1. Go to **Virtual Machines** in VMware, and boot up Virtual Connector's VM. 2. The Virtual Connector will make a request to Cloudflare. This is the step where Virtual Connector registers your provided license key and [downloads the site configuration](#1-create-a-site) for its connected site. @@ -113,6 +141,15 @@ You cannot use the same license key twice, or reuse a key once the virtual machi --- + + +--- + ## IP sec tunnels and static routes diff --git a/src/content/docs/magic-wan/configuration/connector/troubleshooting.mdx b/src/content/docs/magic-wan/configuration/connector/troubleshooting.mdx index 86e109c35a44da..a0beafc6e442a5 100644 --- a/src/content/docs/magic-wan/configuration/connector/troubleshooting.mdx +++ b/src/content/docs/magic-wan/configuration/connector/troubleshooting.mdx @@ -3,7 +3,6 @@ pcx_content_type: troubleshooting title: Troubleshooting sidebar: order: 10 - --- If you are experiencing difficulties with your Magic WAN Connector, refer to the following tips to troubleshoot what might be happening. @@ -14,7 +13,7 @@ Make sure that you have [activated your Connector](/magic-wan/configuration/conn ## I have tried to activate Magic WAN Connector, but it is still not working -Check if your Magic WAN Connector is connected to the Internet via a port that can serve DHCP. This is required the first time a Connector boots up so that it can reach the Cloudflare global network and download the required configurations that you set up in the Site configuration step. Refer to [Device activation](/magic-wan/configuration/connector/configure-hardware-connector/#activate-connector) for more details. +Check if your Magic WAN Connector is connected to the Internet via a port that can serve DHCP. This is required the first time a Connector boots up so that it can reach the Cloudflare global network and download the required configurations that you set up in the Site configuration step. Refer to [Activate Connector](/magic-wan/configuration/connector/configure-hardware-connector/#activate-connector) for more details. If you have a firewall deployed upstream of the Magic WAN Connector, [check your firewall settings](/magic-wan/configuration/connector/configure-hardware-connector/#firewall-settings-required). You might need to configure your firewall to allow traffic in specific ports for the Connector to work properly. diff --git a/src/content/docs/page-shield/detection/monitor-connections-scripts.mdx b/src/content/docs/page-shield/detection/monitor-connections-scripts.mdx index c1f92087a553be..408fdbb20219df 100644 --- a/src/content/docs/page-shield/detection/monitor-connections-scripts.mdx +++ b/src/content/docs/page-shield/detection/monitor-connections-scripts.mdx @@ -6,7 +6,6 @@ sidebar: head: - tag: title content: Monitor resources and cookies - --- Once you [activate Page Shield](/page-shield/get-started/), the **Monitors** dashboard will show which resources (scripts and connections) are running on your domain, as well as the cookies recently detected in HTTP traffic. @@ -15,7 +14,9 @@ If you notice unexpected scripts or connections on the dashboard, check them for :::note -If you recently activated Page Shield, you may see a delay in reporting. +- Users in Free and Pro plans only have access to the script monitor. +- If you recently activated Page Shield, you may see a delay in reporting. + ::: ## Use the Monitors dashboard @@ -28,13 +29,13 @@ To review the resources and cookies detected by Page Shield: 3. Under **Monitors**, review the list of scripts, connections, and cookies for your domain. To apply a filter, select **Add filter** and use one or more of the available options: - * **Script**: Filter scripts by their URL. - * **Connection**: Filter connections by their target URL. Depending on your [configuration](/page-shield/reference/settings/#connection-target-details), it may search only by target hostname. - * **Host**: Look for scripts appearing on specific hostnames, or connections made in a specific hostname. - * **Page** (requires a Business or Enterprise plan): Look for scripts appearing in a specific page, or for connections made in a specific page. Searches the first page where the script was loaded (or where the connection was made) and the latest occurrences list. - * **Status**: Filter scripts or connections by [status](/page-shield/reference/script-statuses/). - * **Type**: Filter cookies according to their type: first-party cookies or unknown. - * Cookie property: Filter by a cookie property such as **Name**, **Domain**, **Path**, **Same site**, **HTTP only**, and **Secure**. + - **Script**: Filter scripts by their URL. + - **Connection**: Filter connections by their target URL. Depending on your [configuration](/page-shield/reference/settings/#connection-target-details), it may search only by target hostname. + - **Host**: Look for scripts appearing on specific hostnames, or connections made in a specific hostname. + - **Page** (requires a Business or Enterprise plan): Look for scripts appearing in a specific page, or for connections made in a specific page. Searches the first page where the script was loaded (or where the connection was made) and the latest occurrences list. + - **Status**: Filter scripts or connections by [status](/page-shield/reference/script-statuses/). + - **Type**: Filter cookies according to their type: first-party cookies or unknown. + - Cookie property: Filter by a cookie property such as **Name**, **Domain**, **Path**, **Same site**, **HTTP only**, and **Secure**. 4. Depending on your plan, you may be able to [view the details of each item](#view-details). @@ -53,54 +54,52 @@ You can filter the data in these dashboards using different criteria, and print ## View details :::note - -Only available to customers on Business and Enterprise plans. +Only available to customers on Business and Enterprise plans. ::: To view the details of an item, select **Details** next to the item. The details of each connection or script include: -* **Last seen**: How long ago the resource was last detected (in the last 30 days). -* **First seen at**: The date and time when the resource was first detected. -* **Page URLs**: The most recent pages where the resource was detected (up to ten pages). -* **First page URL**: The page where the resource was first detected. -* **Host**: The host where the script is being loaded or the connection is being made. +- **Last seen**: How long ago the resource was last detected (in the last 30 days). +- **First seen at**: The date and time when the resource was first detected. +- **Page URLs**: The most recent pages where the resource was detected (up to ten pages). +- **First page URL**: The page where the resource was first detected. +- **Host**: The host where the script is being loaded or the connection is being made. The details of each cookie include: -* **Type**: A cookie can have the following types: +- **Type**: A cookie can have the following types: - * **First-party**: Cookies set by the origin server through a `set-cookie` HTTP response header. - * **Unknown**: All other detected cookies. + - **First-party**: Cookies set by the origin server through a `set-cookie` HTTP response header. + - **Unknown**: All other detected cookies. -* **Domain**: The value of the `Domain` cookie attribute. When not set or unknown, this value is derived from the host. +- **Domain**: The value of the `Domain` cookie attribute. When not set or unknown, this value is derived from the host. -* **Path**: The value of the `Path` cookie attribute. When not set or unknown, this value is derived from the most recent page where the cookie was detected. +- **Path**: The value of the `Path` cookie attribute. When not set or unknown, this value is derived from the most recent page where the cookie was detected. -* **Last seen**: How long ago the resource was last detected (in the last 30 days). +- **Last seen**: How long ago the resource was last detected (in the last 30 days). -* **First seen at**: The date and time when the cookie was first detected. +- **First seen at**: The date and time when the cookie was first detected. -* **Seen on host**: The host where the cookie was first detected. +- **Seen on host**: The host where the cookie was first detected. -* **Seen on pages**: The most recent pages where the cookie was detected (up to ten pages). +- **Seen on pages**: The most recent pages where the cookie was detected (up to ten pages). -* Additional cookie attributes (only available to Enterprise customers with a paid add-on): - * **Max age**: The value of the `Max-Age` cookie attribute. - * **Expires**: The value of the `Expires` cookie attribute. - * **Lifetime**: The approximate cookie lifetime, based on the `Max-Age` and `Expires` cookie attributes. - * **HTTP only**: The value of the `HttpOnly` cookie attribute. - * **Secure**: The value of the `Secure` cookie attribute. - * **Same site**: The value of the `SameSite` cookie attribute. +- Additional cookie attributes (only available to Enterprise customers with a paid add-on): + - **Max age**: The value of the `Max-Age` cookie attribute. + - **Expires**: The value of the `Expires` cookie attribute. + - **Lifetime**: The approximate cookie lifetime, based on the `Max-Age` and `Expires` cookie attributes. + - **HTTP only**: The value of the `HttpOnly` cookie attribute. + - **Secure**: The value of the `Secure` cookie attribute. + - **Same site**: The value of the `SameSite` cookie attribute. Except for **Domain** and **Path**, [standard cookie attributes](https://developer.mozilla.org/en-US/docs/Web/HTTP/Cookies) are only available for first-party cookies, where Cloudflare detected the `set-cookie` HTTP response header in HTTP traffic. ## Export data :::note - -Only available to Enterprise customers with a paid add-on. +Only available to Enterprise customers with a paid add-on. ::: Use this feature to extract data from Page Shield that you can review and annotate. The data in the exported file will honor any filters you configure in the dashboard. diff --git a/src/content/docs/page-shield/index.mdx b/src/content/docs/page-shield/index.mdx index 300ce1fa64e892..d7fa1b7f7cec20 100644 --- a/src/content/docs/page-shield/index.mdx +++ b/src/content/docs/page-shield/index.mdx @@ -9,48 +9,75 @@ head: description: Page Shield is a comprehensive client-side security and privacy solution that allows you to ensure the safety of your website visitors' browsing environment. - --- -import { Description, Feature, FeatureTable, Plan } from "~/components" +import { Description, Feature, FeatureTable, Plan } from "~/components"; -Ensures the safety and privacy of your website visitors' browsing environment. +Ensures the safety and privacy of your website visitors' browsing environment. + - + Page Shield helps manage resources loaded by your website visitors — including scripts, their connections, and cookies — and triggers alert notifications when resources change or are considered malicious. Learn how to [get started](/page-shield/get-started/). -*** +--- ## Features - -Displays information about loaded scripts in your domain's pages and the connections they make. + + Displays information about loaded scripts in your domain's pages and the + connections they make. - -Find in which page a resource first appeared, and view a list of the latest occurrences of the resource in your pages. + + Find in which page a resource first appeared, and view a list of the latest + occurrences of the resource in your pages. - -Detects malicious scripts in your pages using threat intelligence and machine learning. + + Detects malicious scripts in your pages using threat intelligence and machine + learning. - -Detects any changes in the scripts loaded in your pages. + + Detects any changes in the scripts loaded in your pages. - -Receive notifications about newly detected scripts, scripts loaded from unknown domains, new scripts considered malicious, or code changes in your existing scripts. + + Receive notifications about newly detected scripts, scripts loaded from + unknown domains, new scripts considered malicious, or code changes in your + existing scripts. -Policies define allowed resources on your websites. Use policies to enforce an allowlist of resources, effectively blocking resources not included in your policies. + Policies define allowed resources on your websites. Use policies to enforce an + allowlist of resources, effectively blocking resources not included in your + policies. ## Availability diff --git a/src/content/docs/page-shield/reference/page-shield-api.mdx b/src/content/docs/page-shield/reference/page-shield-api.mdx index 443ff9cced9002..68a82bbd797f00 100644 --- a/src/content/docs/page-shield/reference/page-shield-api.mdx +++ b/src/content/docs/page-shield/reference/page-shield-api.mdx @@ -3,10 +3,9 @@ pcx_content_type: reference title: Page Shield API sidebar: order: 6 - --- -import { GlossaryTooltip } from "~/components" +import { GlossaryTooltip } from "~/components"; You can enable and disable Page Shield, configure its settings, and fetch information about detected scripts and connections using the [Page Shield API](/api/operations/page-shield-get-settings). @@ -14,7 +13,7 @@ To authenticate API requests you need an [API token](/fundamentals/api/get-start :::note -Refer to [API deprecations](/fundamentals/api/reference/deprecations/#page-shield) for details on Page Shield API changes. +Refer to [API deprecations](/fundamentals/api/reference/deprecations/) for details on Page Shield API changes. ::: ## Endpoints @@ -50,36 +49,24 @@ The following table summarizes the available operations: | [Delete a Page Shield policy][13] | `DELETE zones/{zone_id}/page_shield/policies/{policy_id}` | Deletes an existing CSP policy. | [1]: /api/operations/page-shield-get-settings - [2]: /api/operations/page-shield-update-settings - [3]: /api/operations/page-shield-list-scripts - [4]: /api/operations/page-shield-get-script - [5]: /api/operations/page-shield-list-connections - [6]: /api/operations/page-shield-get-connection - [7]: /api/operations/page-shield-list-cookies - [8]: /api/operations/page-shield-get-cookie - [9]: /api/operations/page-shield-list-policies - [10]: /api/operations/page-shield-get-policy - [11]: /api/operations/page-shield-create-policy - [12]: /api/operations/page-shield-update-policy - [13]: /api/operations/page-shield-delete-policy ## API notes -* The malicious script classification (`Malicious` or `Not malicious`) is not directly available in the API. To determine this classification, compare the script's `js_integrity_score` value with the classification threshold, which is currently set to 50. Scripts with a score value lower than the threshold are considered malicious. +- The malicious script classification (`Malicious` or `Not malicious`) is not directly available in the API. To determine this classification, compare the script's `js_integrity_score` value with the classification threshold, which is currently set to 50. Scripts with a score value lower than the threshold are considered malicious. -* The API provides two separate properties for malicious script/connection categories: `malicious_domain_categories` and `malicious_url_categories`, related to the `domain_reported_malicious` and `url_reported_malicious` properties, respectively. The Cloudflare dashboard displays all the categories in a single **Malicious category** field. For more information, refer to [Malicious script and connection categories](/page-shield/how-it-works/malicious-script-detection/#malicious-script-and-connection-categories). +- The API provides two separate properties for malicious script/connection categories: `malicious_domain_categories` and `malicious_url_categories`, related to the `domain_reported_malicious` and `url_reported_malicious` properties, respectively. The Cloudflare dashboard displays all the categories in a single **Malicious category** field. For more information, refer to [Malicious script and connection categories](/page-shield/how-it-works/malicious-script-detection/#malicious-script-and-connection-categories). ## Common API calls @@ -94,15 +81,15 @@ curl "https://api.cloudflare.com/client/v4/zones/{zone_id}/page_shield" \ ```json title="Response" { - "result": { - "enabled": true, - "updated_at": "2023-05-14T11:47:55.677555Z", - "use_cloudflare_reporting_endpoint": true, - "use_connection_url_path": false - }, - "success": true, - "errors": [], - "messages": [] + "result": { + "enabled": true, + "updated_at": "2023-05-14T11:47:55.677555Z", + "use_cloudflare_reporting_endpoint": true, + "use_connection_url_path": false + }, + "success": true, + "errors": [], + "messages": [] } ``` @@ -120,13 +107,13 @@ curl --request PUT \ ```json title="Response" { - "result": { - "enabled": true, - "updated_at": "2023-05-14T11:50:41.756996Z" - }, - "success": true, - "errors": [], - "messages": [] + "result": { + "enabled": true, + "updated_at": "2023-05-14T11:50:41.756996Z" + }, + "success": true, + "errors": [], + "messages": [] } ``` @@ -143,38 +130,38 @@ curl "https://api.cloudflare.com/api/v4/zones/{zone_id}/page_shield/scripts?host ```json title="Response" { - "result": [ - { - "id": "8337233faec2357ff84465a919534e4d", - "url": "https://malicious.example.com/badscript.js", - "added_at": "2023-05-18T10:51:10.09615Z", - "first_seen_at": "2023-05-18T10:51:08Z", - "last_seen_at": "2023-05-22T09:57:54Z", - "host": "example.net", - "domain_reported_malicious": false, - "url_reported_malicious": true, - "malicious_url_categories": ["Malware"], - "first_page_url": "http://malicious.example.com/page_one.html", - "status": "active", - "url_contains_cdn_cgi_path": false, - "hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - "js_integrity_score": 10, - "obfuscation_score": 10, - "dataflow_score": 8, - "fetched_at": "2023-05-21T16:58:07Z" - }, - // (...) - ], - "success": true, - "errors": [], - "messages": [], - "result_info": { - "page": 1, - "per_page": 15, - "count": 15, - "total_count": 24, - "total_pages": 2 - } + "result": [ + { + "id": "8337233faec2357ff84465a919534e4d", + "url": "https://malicious.example.com/badscript.js", + "added_at": "2023-05-18T10:51:10.09615Z", + "first_seen_at": "2023-05-18T10:51:08Z", + "last_seen_at": "2023-05-22T09:57:54Z", + "host": "example.net", + "domain_reported_malicious": false, + "url_reported_malicious": true, + "malicious_url_categories": ["Malware"], + "first_page_url": "http://malicious.example.com/page_one.html", + "status": "active", + "url_contains_cdn_cgi_path": false, + "hash": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "js_integrity_score": 10, + "obfuscation_score": 10, + "dataflow_score": 8, + "fetched_at": "2023-05-21T16:58:07Z" + } + // (...) + ], + "success": true, + "errors": [], + "messages": [], + "result_info": { + "page": 1, + "per_page": 15, + "count": 15, + "total_count": 24, + "total_pages": 2 + } } ``` @@ -193,37 +180,37 @@ curl "https://api.cloudflare.com/api/v4/zones/{zone_id}/page_shield/scripts?stat ```json title="Response" { - "result": [ - { - "id": "83c8da2267394ce8465b74c299658fea", - "url": "https://scripts.example.com/anotherbadscript.js", - "added_at": "2023-05-17T13:16:03.419619Z", - "first_seen_at": "2023-05-17T13:15:23Z", - "last_seen_at": "2023-05-18T09:05:20Z", - "host": "example.net", - "domain_reported_malicious": false, - "url_reported_malicious": false, - "first_page_url": "http://malicious.example.com/page_one.html", - "status": "infrequent", - "url_contains_cdn_cgi_path": false, - "hash": "9245aad577e846dd9b990b1b32425a3fae4aad8b8a28441a8b80084b6bb75a45", - "js_integrity_score": 48, - "obfuscation_score": 49, - "dataflow_score": 45, - "fetched_at": "2023-05-18T03:58:07Z" - }, - // (...) - ], - "success": true, - "errors": [], - "messages": [], - "result_info": { - "page": 1, - "per_page": 15, - "count": 15, - "total_count": 17, - "total_pages": 2 - } + "result": [ + { + "id": "83c8da2267394ce8465b74c299658fea", + "url": "https://scripts.example.com/anotherbadscript.js", + "added_at": "2023-05-17T13:16:03.419619Z", + "first_seen_at": "2023-05-17T13:15:23Z", + "last_seen_at": "2023-05-18T09:05:20Z", + "host": "example.net", + "domain_reported_malicious": false, + "url_reported_malicious": false, + "first_page_url": "http://malicious.example.com/page_one.html", + "status": "infrequent", + "url_contains_cdn_cgi_path": false, + "hash": "9245aad577e846dd9b990b1b32425a3fae4aad8b8a28441a8b80084b6bb75a45", + "js_integrity_score": 48, + "obfuscation_score": 49, + "dataflow_score": 45, + "fetched_at": "2023-05-18T03:58:07Z" + } + // (...) + ], + "success": true, + "errors": [], + "messages": [], + "result_info": { + "page": 1, + "per_page": 15, + "count": 15, + "total_count": 17, + "total_pages": 2 + } } ``` @@ -242,40 +229,40 @@ curl "https://api.cloudflare.com/api/v4/zones/{zone_id}/page_shield/scripts/8337 ```json title="Response" { - "result": { - "id": "8337233faec2357ff84465a919534e4d", - "url": "https://malicious.example.com/badscript.js", - "added_at": "2023-05-18T10:51:10.09615Z", - "first_seen_at": "2023-05-18T10:51:08Z", - "last_seen_at": "2023-05-22T09:57:54Z", - "host": "example.net", - "domain_reported_malicious": false, - "url_reported_malicious": true, - "malicious_url_categories": ["Malware"], - "first_page_url": "http://malicious.example.com/page_one.html", - "status": "active", - "url_contains_cdn_cgi_path": false, - "hash": "9245aad577e846dd9b990b1b32425a3fae4aad8b8a28441a8b80084b6bb75a45", - "js_integrity_score": 48, - "obfuscation_score": 49, - "dataflow_score": 45, - "fetched_at": "2023-05-21T16:58:07Z", - "page_urls": [ - "http://malicious.example.com/page_two.html", - "http://malicious.example.com/page_three.html", - "http://malicious.example.com/page_four.html" - ], - "versions": [ - { - "hash": "9245aad577e846dd9b990b1b32425a3fae4aad8b8a28441a8b80084b6bb75a45", - "js_integrity_score": 50, - "fetched_at": "2023-05-21T16:58:07Z" - } - ] - }, - "success": true, - "errors": [], - "messages": [] + "result": { + "id": "8337233faec2357ff84465a919534e4d", + "url": "https://malicious.example.com/badscript.js", + "added_at": "2023-05-18T10:51:10.09615Z", + "first_seen_at": "2023-05-18T10:51:08Z", + "last_seen_at": "2023-05-22T09:57:54Z", + "host": "example.net", + "domain_reported_malicious": false, + "url_reported_malicious": true, + "malicious_url_categories": ["Malware"], + "first_page_url": "http://malicious.example.com/page_one.html", + "status": "active", + "url_contains_cdn_cgi_path": false, + "hash": "9245aad577e846dd9b990b1b32425a3fae4aad8b8a28441a8b80084b6bb75a45", + "js_integrity_score": 48, + "obfuscation_score": 49, + "dataflow_score": 45, + "fetched_at": "2023-05-21T16:58:07Z", + "page_urls": [ + "http://malicious.example.com/page_two.html", + "http://malicious.example.com/page_three.html", + "http://malicious.example.com/page_four.html" + ], + "versions": [ + { + "hash": "9245aad577e846dd9b990b1b32425a3fae4aad8b8a28441a8b80084b6bb75a45", + "js_integrity_score": 50, + "fetched_at": "2023-05-21T16:58:07Z" + } + ] + }, + "success": true, + "errors": [], + "messages": [] } ``` @@ -294,34 +281,34 @@ curl "https://api.cloudflare.com/api/v4/zones/{zone_id}/page_shield/connections? ```json title="Response" { - "result": [ - { - "id": "0a7bb628776f4e50a50d8594c4a01740", - "url": "https://malicious.example.com", - "added_at": "2022-09-18T10:51:10.09615Z", - "first_seen_at": "2022-09-18T10:51:08Z", - "last_seen_at": "2022-09-02T09:57:54Z", - "host": "example.net", - "domain_reported_malicious": true, - "malicious_domain_categories": ["Malware", "Spyware"], - "url_reported_malicious": false, - "malicious_url_categories": [], - "first_page_url": "https://example.net/one.html", - "status": "active", - "url_contains_cdn_cgi_path": false - }, - // (...) - ], - "success": true, - "errors": [], - "messages": [], - "result_info": { - "page": 1, - "per_page": 15, - "count": 15, - "total_count": 16, - "total_pages": 2 - } + "result": [ + { + "id": "0a7bb628776f4e50a50d8594c4a01740", + "url": "https://malicious.example.com", + "added_at": "2022-09-18T10:51:10.09615Z", + "first_seen_at": "2022-09-18T10:51:08Z", + "last_seen_at": "2022-09-02T09:57:54Z", + "host": "example.net", + "domain_reported_malicious": true, + "malicious_domain_categories": ["Malware", "Spyware"], + "url_reported_malicious": false, + "malicious_url_categories": [], + "first_page_url": "https://example.net/one.html", + "status": "active", + "url_contains_cdn_cgi_path": false + } + // (...) + ], + "success": true, + "errors": [], + "messages": [], + "result_info": { + "page": 1, + "per_page": 15, + "count": 15, + "total_count": 16, + "total_pages": 2 + } } ``` @@ -338,24 +325,24 @@ curl "https://api.cloudflare.com/api/v4/zones/{zone_id}/page_shield/connections/ ```json title="Response" { - "result": { - "id": "0a7bb628776f4e50a50d8594c4a01740", - "url": "https://malicious.example.com", - "added_at": "2022-09-18T10:51:10.09615Z", - "first_seen_at": "2022-09-18T10:51:08Z", - "last_seen_at": "2022-09-02T09:57:54Z", - "host": "example.net", - "domain_reported_malicious": true, - "malicious_domain_categories": ["Malware", "Spyware"], - "url_reported_malicious": false, - "malicious_url_categories": [], - "first_page_url": "https://example.net/one.html", - "status": "active", - "url_contains_cdn_cgi_path": false - }, - "success": true, - "errors": [], - "messages": [] + "result": { + "id": "0a7bb628776f4e50a50d8594c4a01740", + "url": "https://malicious.example.com", + "added_at": "2022-09-18T10:51:10.09615Z", + "first_seen_at": "2022-09-18T10:51:08Z", + "last_seen_at": "2022-09-02T09:57:54Z", + "host": "example.net", + "domain_reported_malicious": true, + "malicious_domain_categories": ["Malware", "Spyware"], + "url_reported_malicious": false, + "malicious_url_categories": [], + "first_page_url": "https://example.net/one.html", + "status": "active", + "url_contains_cdn_cgi_path": false + }, + "success": true, + "errors": [], + "messages": [] } ``` @@ -372,38 +359,36 @@ curl "https://api.cloudflare.com/api/v4/zones/{zone_id}/page_shield/cookies?page ```json title="Response" { - "result": [ - { - "id": "beee03ada7e047e79f076785d8cd8b8e", - "type": "first_party", - "name": "PHPSESSID", - "host": "example.net", - "domain_attribute": "example.net", - "expires_attribute": "2024-10-21T12:28:20Z", - "http_only_attribute": true, - "max_age_attribute": null, - "path_attribute": "/store", - "same_site_attribute": "strict", - "secure_attribute": true, - "first_seen_at": "2024-05-06T10:51:08Z", - "last_seen_at": "2024-05-07T11:56:01Z", - "first_page_url": "example.net/store/products", - "page_urls": [ - "example.net/store/products/1" - ] - }, - // (...) - ], - "success": true, - "errors": [], - "messages": [], - "result_info": { - "page": 1, - "per_page": 15, - "count": 15, - "total_count": 16, - "total_pages": 2 - } + "result": [ + { + "id": "beee03ada7e047e79f076785d8cd8b8e", + "type": "first_party", + "name": "PHPSESSID", + "host": "example.net", + "domain_attribute": "example.net", + "expires_attribute": "2024-10-21T12:28:20Z", + "http_only_attribute": true, + "max_age_attribute": null, + "path_attribute": "/store", + "same_site_attribute": "strict", + "secure_attribute": true, + "first_seen_at": "2024-05-06T10:51:08Z", + "last_seen_at": "2024-05-07T11:56:01Z", + "first_page_url": "example.net/store/products", + "page_urls": ["example.net/store/products/1"] + } + // (...) + ], + "success": true, + "errors": [], + "messages": [], + "result_info": { + "page": 1, + "per_page": 15, + "count": 15, + "total_count": 16, + "total_pages": 2 + } } ``` @@ -420,39 +405,37 @@ curl "https://api.cloudflare.com/api/v4/zones/{zone_id}/page_shield/cookies/beee ```json title="Response" { - "result": { - "id": "beee03ada7e047e79f076785d8cd8b8e", - "type": "first_party", - "name": "PHPSESSID", - "host": "example.net", - "domain_attribute": "example.net", - "expires_attribute": "2024-10-21T12:28:20Z", - "http_only_attribute": true, - "max_age_attribute": null, - "path_attribute": "/store", - "same_site_attribute": "strict", - "secure_attribute": true, - "first_seen_at": "2024-05-06T10:51:08Z", - "last_seen_at": "2024-05-07T11:56:01Z", - "first_page_url": "example.net/store/products", - "page_urls": [ - "example.net/store/products/1" - ] - }, - "success": true, - "errors": [], - "messages": [] + "result": { + "id": "beee03ada7e047e79f076785d8cd8b8e", + "type": "first_party", + "name": "PHPSESSID", + "host": "example.net", + "domain_attribute": "example.net", + "expires_attribute": "2024-10-21T12:28:20Z", + "http_only_attribute": true, + "max_age_attribute": null, + "path_attribute": "/store", + "same_site_attribute": "strict", + "secure_attribute": true, + "first_seen_at": "2024-05-06T10:51:08Z", + "last_seen_at": "2024-05-07T11:56:01Z", + "first_page_url": "example.net/store/products", + "page_urls": ["example.net/store/products/1"] + }, + "success": true, + "errors": [], + "messages": [] } ``` ### Create a policy -This `POST` request creates a Page Shield policy with *Log* action, defining the following scripts as allowed based on where they are hosted: +This `POST` request creates a Page Shield policy with _Log_ action, defining the following scripts as allowed based on where they are hosted: -* Scripts hosted in `myapp.example.com` (which does not include scripts in `example.com`). -* Scripts hosted in `cdnjs.cloudflare.com`. -* The Google Analytics script using its full URL. -* All scripts in the same origin (same HTTP or HTTPS scheme and hostname). +- Scripts hosted in `myapp.example.com` (which does not include scripts in `example.com`). +- Scripts hosted in `cdnjs.cloudflare.com`. +- The Google Analytics script using its full URL. +- All scripts in the same origin (same HTTP or HTTPS scheme and hostname). All other scripts would trigger a policy violation, but those scripts would not be blocked. @@ -460,7 +443,7 @@ For more information on Co :::note -For a list of CSP directives and keywords supported by Page Shield policies, refer to [CSP directives supported by policies](/page-shield/policies/csp-directives/). +For a list of CSP directives and keywords supported by Page Shield policies, refer to [CSP directives supported by policies](/page-shield/policies/csp-directives/). ::: ```bash title="Request" @@ -478,18 +461,18 @@ curl "https://api.cloudflare.com/api/v4/zones/{zone_id}/page_shield/policies" \ ```json title="Response" { - "success": true, - "errors": [], - "messages": [], - "result": { - "id": "", - "description": "My first policy in log mode", - "action": "log", - "expression": "http.host eq myapp.example.com", - "enabled": "true", - "value": "script-src myapp.example.com cdnjs.cloudflare.com https://www.google-analytics.com/analytics.js 'self'" - } + "success": true, + "errors": [], + "messages": [], + "result": { + "id": "", + "description": "My first policy in log mode", + "action": "log", + "expression": "http.host eq myapp.example.com", + "enabled": "true", + "value": "script-src myapp.example.com cdnjs.cloudflare.com https://www.google-analytics.com/analytics.js 'self'" + } } ``` -To create a policy with an *Allow* action instead of *Log*, use `"action": "allow"` in the request body. In the case of such policy, all scripts not allowed by the policy would be blocked. +To create a policy with an _Allow_ action instead of _Log_, use `"action": "allow"` in the request body. In the case of such policy, all scripts not allowed by the policy would be blocked. diff --git a/src/content/docs/page-shield/troubleshooting.mdx b/src/content/docs/page-shield/troubleshooting.mdx index 21c07fb0c20568..6175aa61e78bce 100644 --- a/src/content/docs/page-shield/troubleshooting.mdx +++ b/src/content/docs/page-shield/troubleshooting.mdx @@ -4,10 +4,9 @@ source: https://support.cloudflare.com/hc/en-us/articles/360059485272-Troublesho title: Troubleshooting sidebar: order: 10 - --- -import { GlossaryTooltip } from "~/components" +import { GlossaryTooltip } from "~/components"; ## Why do I not see scripts after I activated Page Shield? @@ -39,7 +38,7 @@ You can safely ignore these warnings, since they are related to the reports that Policy violations reported via CSP's [report-only directive](/page-shield/reference/csp-header/) do not take into consideration any redirects or redirect HTTP status codes. This is [by design](https://www.w3.org/TR/CSP3/#create-violation-for-request) for security reasons. -Some third-party services you may want to cover in your Page Shield allow policies perform redirects. An example of such a service is Google Ads, which [does not work well with CSP policies](https://support.google.com/adsense/thread/102839782?hl=en\&msgid=103611259). +Some third-party services you may want to cover in your Page Shield allow policies perform redirects. An example of such a service is Google Ads, which [does not work well with CSP policies](https://support.google.com/adsense/thread/102839782?hl=en&msgid=103611259). For example, if you add the `adservice.google.com` domain to an allow policy, you could get policy violation reports for this domain due to redirects to a different domain (not present in your allow policy). In this case, the violation report would still mention the original domain, and not the domain of the redirected destination, which can cause some confusion. @@ -47,7 +46,7 @@ To try to solve this issue, add the domain of the redirected destination to your ## Do I have access to Page Shield? -Some customers do. For more details, refer to [Availability](/page-shield/#availability). +Yes, Page Shield is available on all plans. For details on the available features per plan, refer to [Availability](/page-shield/#availability). ### How do I set up Page Shield? diff --git a/src/content/docs/reference-architecture/architectures/sase.mdx b/src/content/docs/reference-architecture/architectures/sase.mdx index f114dc170a2901..7834e1cace8e92 100644 --- a/src/content/docs/reference-architecture/architectures/sase.mdx +++ b/src/content/docs/reference-architecture/architectures/sase.mdx @@ -1,12 +1,9 @@ --- -title: Secure Access Service Edge (SASE) +title: Evolving to a SASE architecture with Cloudflare pcx_content_type: reference-architecture sidebar: order: 1 -head: - - tag: title - content: "Reference Architecture: Evolving to a SASE architecture with Cloudflare" - + label: Secure Access Service Edge (SASE) --- import { Render } from "~/components" diff --git a/src/content/docs/speed/optimization/content/brotli/content-compression.mdx b/src/content/docs/speed/optimization/content/brotli/content-compression.mdx index 1fa2f65afcaea9..8420da4c33a5fb 100644 --- a/src/content/docs/speed/optimization/content/brotli/content-compression.mdx +++ b/src/content/docs/speed/optimization/content/brotli/content-compression.mdx @@ -36,7 +36,7 @@ linkStyle 1,2 stroke-width: 1px ``` :::note -Zstandard compression is enabled by default on Free plans. Customers in other plans can enable Zstandard compression through [Compression Rules](/rules/compression-rules/). +Customers can enable Zstandard compression through [Compression Rules](/rules/compression-rules/). ::: If supported by visitors' web browsers, Cloudflare will return Gzip, Brotli, or Zstandard-encoded responses for the following content types: diff --git a/src/content/docs/src/env.d.ts b/src/content/docs/src/env.d.ts index 9bc5cb41c24efc..e16c13c6952a6f 100644 --- a/src/content/docs/src/env.d.ts +++ b/src/content/docs/src/env.d.ts @@ -1 +1 @@ -/// \ No newline at end of file +/// diff --git a/src/content/docs/turnstile/extensions/google-firebase.mdx b/src/content/docs/turnstile/extensions/google-firebase.mdx new file mode 100644 index 00000000000000..62eccaa6b9852b --- /dev/null +++ b/src/content/docs/turnstile/extensions/google-firebase.mdx @@ -0,0 +1,125 @@ +--- +pcx_content_type: how-to +title: Implement Turnstile with Google Firebase +sidebar: + order: 2 + label: Google Firebase + +--- + +Turnstile is [available as an extension](https://extensions.dev/extensions/cloudflare/cloudflare-turnstile-app-check-provider) with [Google's Firebase](https://firebase.google.com/) platform as an [App Check](https://firebase.google.com/docs/app-check) provider. You can leverage Cloudflare Turnstile's bot detection and challenge capabilities to ensure that requests to your Firebase backend services are verified and only authentic human visitors can interact with your application. + +Google Firebase is a comprehensive app development platform that provides a variety of tools and services to help developers build, improve, and grow their mobile and web applications. + +Firebase App Check helps protect Firebase resources like Cloud Firestore, Realtime Database, Cloud Storage, and Functions from abuse, such as automated fraud attacks and denial of service (DoS) attacks, by ensuring that incoming requests are from legitimate visitors and trusted sources. + +## Set up a Google Firebase project + +1. Create a Firebase project by going to the [Firebase Console](https://console.firebase.google.com/). +2. Select **Add Project** and follow the prompts to create a new project. +3. Add an app to your project by selecting your project. +4. In the project overview, select **Add App** and choose the platform: **Web**. +5. [Register your app](https://firebase.google.com/docs/web/setup?hl=en&authuser=0#register-app) and follow the guide to get your Firebase configuration. + +:::note + +It is important to register your web app first to connect it with Turnstile later. +::: + +## Set up Cloudflare Turnstile + +1. Create a Cloudflare Turnstile site by going to the [Cloudflare Turnstile dashboard](https://dash.cloudflare.com/?to=/:account/turnstile). +2. Create a new widget and get the [sitekey and secret key](/turnstile/get-started/#get-a-sitekey-and-secret-key). + - The domain you configure with the Turnstile widget should be the domain of your web app. + - The [widget mode](/turnstile/concepts/widget/) must be **Invisible**. + +## Integrate Firebase App Check with Turnstile + +### Enable App Check in Firebase + +1. Go to [Cloudflare Turnstile in the Firebase Extensions hub](https://extensions.dev/extensions/cloudflare/cloudflare-turnstile-app-check-provider). +2. Install the Cloudflare Turnstile extension to your Firebase project. +3. Enable [Cloud Functions](https://cloud.google.com/functions?hl=en), [Artifact Registry](https://cloud.google.com/artifact-registry), and [Secret Manager](https://cloud.google.com/security/products/secret-manager?hl=en). +4. Enter the secret key from Cloudflare Turnstile and your Firebase App ID. +5. Select **Install extension**. + +### Grant access to the Cloudflare extension + +1. Grant access to the Cloudflare extension under the IAM section of your project by selecting **Grant Access** under **View by Principals**. +2. Select `ext-cloudflare-turnstile` from the dropdown menu. +3. When filtering the token, select **Service Account Token Creator**. + +### Configure Firebase in your app with Turnstile + +1. Create an `index.ts` file. +2. Add your Firebase configuration. + +```js +import { initializeApp } from "firebase/app"; +import { getAppCheck, initializeAppCheck } from "firebase/app-check"; +import { + CloudflareProviderOptions, +} from '@cloudflare/turnstile-firebase-app-check'; + +const firebaseConfig = { +apiKey: "YOUR_API_KEY", +authDomain: "YOUR_PROJECT_ID.firebaseapp.com", +projectId: "YOUR_PROJECT_ID", +storageBucket: "YOUR_PROJECT_ID.appspot.com", +messagingSenderId: "YOUR_MESSAGING_SENDER_ID", +appId: "YOUR_APP_ID", +}; + +const app = initializeApp(firebaseConfig); + +// Initialize App Check +const siteKey = 'YOUR-SITEKEY'; +const HTTP_ENDPOINT = '${function:ext-cloudflare-turnstile-app-check-provider-tokenExchange.url}'; + +const cpo = new CloudflareProviderOptions(HTTP_ENDPOINT, siteKey); +const provider = new CustomProvider(cpo); + +initializeAppCheck(app, { provider }); + +// retrieve App Check token from Cloudflare Turnstile +cpo.getToken().then(({ token }) => { + document.getElementById('app-check-token').innerHTML = token; +}); + +``` + +### Verify the App Check token in your web application + +To verify the App Check token in your web application, refer to Firebase's [Token Verification guide](https://firebase.google.com/docs/app-check/custom-resource-backend?hl=en#verification). + +```js +import express from "express"; +import { initializeApp } from "firebase-admin/app"; +import { getAppCheck } from "firebase-admin/app-check"; + +const expressApp = express(); +const firebaseApp = initializeApp(); + +const appCheckVerification = async (req, res, next) => { + const appCheckToken = req.header("X-Firebase-AppCheck"); + + if (!appCheckToken) { + res.status(401); + return next("Unauthorized"); + } + + try { + const appCheckClaims = await getAppCheck().verifyToken(appCheckToken); + + // If verifyToken() succeeds, continue with the next middleware function in the stack. + return next(); + } catch (err) { + res.status(401); + return next("Unauthorized"); + } +} + +expressApp.get("/yourApiEndpoint", [appCheckVerification], (req, res) => { + // Handle request. +}); +``` \ No newline at end of file diff --git a/src/content/docs/turnstile/tutorials/index.mdx b/src/content/docs/turnstile/tutorials/index.mdx index 10c55da70ba60f..411240246535bd 100644 --- a/src/content/docs/turnstile/tutorials/index.mdx +++ b/src/content/docs/turnstile/tutorials/index.mdx @@ -1,6 +1,7 @@ --- title: Tutorials pcx_content_type: navigation +hideChildren: true sidebar: order: 5 diff --git a/src/content/docs/vectorize/get-started/intro.mdx b/src/content/docs/vectorize/get-started/intro.mdx index 882615729082e5..fd54886f076d76 100644 --- a/src/content/docs/vectorize/get-started/intro.mdx +++ b/src/content/docs/vectorize/get-started/intro.mdx @@ -132,7 +132,7 @@ Specifically: ## 4. [Optional] Create metadata indexes -Vectorize allows you to add metadata along with vectors into your index, and also provides the ability to to filter on the vector metadata while querying vectors. To do so you would need to specify a metadata field as a "metadata index" for your Vectorize index. +Vectorize allows you to add up to 10KiB of metadata per vector into your index, and also provides the ability to filter on that metadata while querying vectors. To do so you would need to specify a metadata field as a "metadata index" for your Vectorize index. :::note[When to create metadata indexes?] @@ -168,6 +168,14 @@ npx wrangler vectorize list-metadata-index tutorial-index └──────────────┴────────┘ ``` +You can create up to 10 metadata indexes per Vectorize index. + +For metadata indexes of type `number`, the indexed number precision is that of float64. + +For metadata indexes of type `string`, each vector indexes the first 64B of the string data truncated on UTF-8 character boundaries to the longest well-formed UTF-8 substring within that limit, so vectors are filterable on the first 64B of their value for each indexed property. + +See [Vectorize Limits](/vectorize/platform/limits/) for a complete list of limits. + ## 5. Insert vectors Before you can query a vector database, you need to insert vectors for it to query against. These vectors would be generated from data (such as text or images) you pass to a machine learning model. However, this tutorial will define static vectors to illustrate how vector search works on its own. diff --git a/src/content/docs/vectorize/platform/limits.mdx b/src/content/docs/vectorize/platform/limits.mdx index 2801e2d34ad266..0a6afded6547be 100644 --- a/src/content/docs/vectorize/platform/limits.mdx +++ b/src/content/docs/vectorize/platform/limits.mdx @@ -27,6 +27,8 @@ The following limits apply to accounts, indexes and vectors (as specified): | Maximum namespaces per index | 1000 namespaces beta | | Maximum namespace name length | 64 bytes | | Maximum vectors upload size | 100 MB | +| Maximum metadata indexes per Vectorize index | 10 | +| Maximum indexed data per metadata index per vector | 64 bytes | beta This limit is beta only and is expected to increase over time. diff --git a/src/content/docs/vectorize/reference/metadata-filtering.mdx b/src/content/docs/vectorize/reference/metadata-filtering.mdx index ddfe160f9dde72..ae85af17c5ae8b 100644 --- a/src/content/docs/vectorize/reference/metadata-filtering.mdx +++ b/src/content/docs/vectorize/reference/metadata-filtering.mdx @@ -9,7 +9,7 @@ import { Render, PackageManagers } from "~/components"; :::note[Enable metadata filtering] -Vectorize requires metadata indexes to be specified before vectors are inserted to support metadata filtering. Please refer to [Create metadata indexes](/vectorize/get-started/intro/#4-optional-create-metadata-indexes) for details. +Vectorize requires metadata indexes to be specified before vectors are inserted to support metadata filtering. `string`, `number` and `boolean` metadata indexes are supported. Please refer to [Create metadata indexes](/vectorize/get-started/intro/#4-optional-create-metadata-indexes) for details. Vectorize supports [namespace](/vectorize/best-practices/insert-vectors/#namespaces) filtering by default. @@ -19,6 +19,16 @@ In addition to providing an input vector to your query, you can also filter by [ By using metadata filtering to limit the scope of a query, you can filter by specific customer IDs, tenant, product category or any other metadata you associate with your vectors. +## Limits + +You can store up to 10KiB of metadata per vector, and create up to 10 metadata indexes per Vectorize index. + +For metadata indexes of type `number`, the indexed number precision is that of float64. + +For metadata indexes of type `string`, each vector indexes the first 64B of the string data truncated on UTF-8 character boundaries to the longest well-formed UTF-8 substring within that limit, so vectors are filterable on the first 64B of their value for each indexed property. + +See [Vectorize Limits](/vectorize/platform/limits/) for a complete list of limits. + ## Supported operations Optional `filter` property on `query()` method specifies metadata filter: diff --git a/src/content/docs/waf/custom-rules/custom-rulesets/create-api.mdx b/src/content/docs/waf/custom-rules/custom-rulesets/create-api.mdx index b187b39cb6c56c..253ee53eddce69 100644 --- a/src/content/docs/waf/custom-rules/custom-rulesets/create-api.mdx +++ b/src/content/docs/waf/custom-rules/custom-rulesets/create-api.mdx @@ -6,16 +6,17 @@ sidebar: head: - tag: title content: Work with custom rulesets using the API - --- Use the [Rulesets API](/ruleset-engine/rulesets-api/) to work with custom rulesets using an API. :::note -Account-level WAF configuration requires an Enterprise plan with a paid add-on. +Account-level WAF configuration requires an Enterprise plan with a paid add-on. ::: +## Deploy a custom ruleset using the API + To deploy a custom ruleset in your account, follow these general steps: 1. Create a custom ruleset, if it does not exist. @@ -23,12 +24,10 @@ To deploy a custom ruleset in your account, follow these general steps: For more information, refer to [Work with custom rulesets](/ruleset-engine/custom-rulesets/) in the Ruleset Engine documentation. -:::caution[Important] - - -* Deployed custom rulesets will only apply to incoming traffic of Enterprise domains. Regarding the expression of the rule deploying the custom ruleset, you must use parentheses to enclose any custom conditions and end your expression with `and cf.zone.plan eq "ENT"` or else the API operation will fail. +:::caution -* Currently, you can only deploy custom rulesets to a phase at the account level. +- Deployed custom rulesets will only apply to incoming traffic of Enterprise domains. Regarding the expression of the rule deploying the custom ruleset, you must use parentheses to enclose any custom conditions and end your expression with `and cf.zone.plan eq "ENT"` or else the API operation will fail. +- Currently, you can only deploy custom rulesets to a phase at the account level. ::: diff --git a/src/content/docs/waf/custom-rules/custom-rulesets/create-dashboard.mdx b/src/content/docs/waf/custom-rules/custom-rulesets/create-dashboard.mdx index 49c9811cb8254b..14bc1ad7510c16 100644 --- a/src/content/docs/waf/custom-rules/custom-rulesets/create-dashboard.mdx +++ b/src/content/docs/waf/custom-rules/custom-rulesets/create-dashboard.mdx @@ -11,10 +11,9 @@ head: import { Render } from "~/components"; -Create and deploy custom rulesets in Account Home > **WAF** > **Custom rulesets**. +Custom rulesets are configured at the account level. To work with custom rulesets, go to Account Home > **WAF** > **Custom rulesets**. :::note - Account-level WAF configuration requires an Enterprise plan with a paid add-on. ::: @@ -36,7 +35,6 @@ Account-level WAF configuration requires an Enterprise plan with a paid add-on. - Select **Custom filter expression** to define a custom expression that defines when to execute the custom ruleset. Use the **Field** drop-down list to choose an HTTP property. For each request, the value of the property you choose for **Field** is compared to the value you specify for **Value** using the operator selected in **Operator**. Alternatively, select **Edit expression** to define your expression using the [Expression Editor](/ruleset-engine/rules-language/expressions/edit-expressions/#expression-editor). :::caution - Deployed custom rulesets will only apply to incoming traffic of Enterprise domains. The Expression Builder will automatically include this filter. If you define a custom expression for the ruleset using the Expression Editor, you must use parentheses to enclose any custom conditions and end your expression with `and cf.zone.plan eq "ENT"` so that the rule only applies to domains on an Enterprise plan. ::: @@ -56,6 +54,31 @@ Account-level WAF configuration requires an Enterprise plan with a paid add-on. 13. Select **Create**. +## Edit a custom ruleset + +1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/) and select your account. + +2. Go to Account Home > **WAF** > **Custom rulesets**. + + ![Custom rulesets page in the Cloudflare dashboard](~/assets/images/waf/account/custom-rulesets-dashboard.png) + +3. To edit a custom ruleset, select the three dots next to it > **Edit**. + +4. Make any desired changes to the ruleset by selecting **Edit** next to the items you want to change. + +5. When you are done, select **Back to rulesets list**. + +:::caution +Deployed custom rulesets will only apply to incoming traffic of Enterprise domains. The Expression Builder will automatically include this filter. If you define a custom expression for the ruleset using the Expression Editor, you must use parentheses to enclose any custom conditions and end your expression with `and cf.zone.plan eq "ENT"` so that the rule only applies to domains on an Enterprise plan. +::: + +## Delete a custom ruleset + +1. Log in to the [Cloudflare dashboard](https://dash.cloudflare.com/) and select your account. +2. Go to Account Home > **WAF** > **Custom rulesets**. +3. To delete a custom ruleset, select the three dots next to it > **Delete**. +4. To confirm the delete operation, select **Delete**. + ## Configure a custom response for blocked requests + +- If you need a high availability configuration for your premises, refer to + [About high availability configurations](#about-high-availability-configurations) for more information + and learn how to configure your Connector in this mode. + +- If you do not need a high availability configuration for you premises, proceed to [Set up Cloudflare dashboard](#set-up-cloudflare-dashboard). + + + +:::caution[Warning] +You cannot enable high availability for an existing site. To add high availability to an existing site in the Cloudflare dashboard, you need to delete the site and start again. Plan accordingly to create a high availability configuration from the start if needed. +::: diff --git a/src/content/partials/magic-wan/connector/create-site.mdx b/src/content/partials/magic-wan/connector/create-site.mdx index 11a02245c2862e..209b467acb5e5f 100644 --- a/src/content/partials/magic-wan/connector/create-site.mdx +++ b/src/content/partials/magic-wan/connector/create-site.mdx @@ -78,7 +78,7 @@ Take note of the site `id` parameter, as you will need it to create WANs and LAN 3. **Interface number** 4. In **VLAN ID**, enter a number between `0` and `4094` to specify a [VLAN ID](/magic-wan/configuration/connector/reference/#vlan-id). 5. In **Priority**, choose the priority for your WAN. Lower numbers have higher priority. Refer to Traffic steering to learn more about how Cloudflare calculates priorities. -6. **Addressing**: Specify whether the WAN IP is fetched from a DHCP server or if it is a static IP. If you choose a static IP, you also need to specify the static IP and gateway addresses. +6. **Addressing**: If this is the first time you are setting up your Magic WAN Connector, select **DHCP**. After you activate your Connector, you can then choose to continue fetching the WAN IP address from a DHCP server, or change it to a static IP. If you choose a static IP, you also need to specify the static IP and gateway addresses.
diff --git a/src/content/partials/magic-wan/connector/ha-configs.mdx b/src/content/partials/magic-wan/connector/ha-configs.mdx new file mode 100644 index 00000000000000..91c01fc33ce485 --- /dev/null +++ b/src/content/partials/magic-wan/connector/ha-configs.mdx @@ -0,0 +1,45 @@ +--- +inputParameters: finalInstructions +--- + +import { Markdown } from "~/components"; + +## About high availability configurations + +When you set up a site in high availability, the WANs and LANs in your Connectors have the same configuration but are replicated on two nodes. In case of failure of a Connector, the other Connector becomes the active node, taking over configuration of the LAN gateway IP and allowing traffic to continue without disruption. + +Because Connectors in high availability configurations share a single site, you need to set up: + +- **Static address**: The IP for the primary node in your site. +- **Secondary static address**: The IP for the secondary node in your site. +- **Virtual static address**: The IP that the LAN south of the Connector will forward traffic to, which is the LAN's gateway IP. + +Make sure all IPs are part of the same subnet. + +:::note + +- Failure conditions include Connector down or software restart, LAN or WAN link down, tunnel health down. +- High availability (HA) is run in non-preempt mode, which means either the primary or the secondary node can come up as active through an election process which includes node health parameters. +- In the case of a failover where a Connector is acting as a DHCP server, DHCP leases will be synchronized. + ::: + +### ​​Create a high availability configuration + +You cannot enable high availability for an existing site. To add high availability to an existing site in the Cloudflare dashboard, you need to delete the site and start again. + +To set up a high availability configuration: + +1. Follow the steps in [Create a site](#1-create-a-site) up until step 4. +2. After naming your site, select **Enable high availability**. +3. Under **Connector**, select **Add Connector**. +4. From the list, choose your first Connector > **Add Connector**. +5. Back on the previous screen, select **Add secondary Connector**. +6. From the list, choose your second Connector > **Add Connector**. +7. Select **Next** to [Create a WAN](#2-create-a-wan). If you are configuring a static IP, configure the IP for the primary node as the static address, and the IP for the secondary node as the secondary static address. +8. To create a LAN, follow the steps mentioned above in [Create a LAN](#3-create-a-lan) up until step 4. +9. In **Static address**, enter the IP for the primary node in your site. For example, `192.168.10.1/24`. +10. In **Secondary static address**, enter the IP for the secondary node in your site. For example, `192.168.10.2/24`. +11. In **Virtual static address**, enter the IP that the LAN south of the Connector will forward traffic to. For example, `192.168.10.3/24`. +12. Select **Save**. +13. From the **High availability probing link** drop-down menu, select the port that should be used to monitor the node's health. Cloudflare recommends you choose a reliable interface as the HA probing link. The primary and secondary node's probing link should be connected over a switch, and cannot be a direct connection. +14. diff --git a/src/content/plans/index.json b/src/content/plans/index.json index 07bc2b05de1e18..2bd55b857f3d57 100644 --- a/src/content/plans/index.json +++ b/src/content/plans/index.json @@ -1676,8 +1676,8 @@ "properties": { "availability": { "title": "Availability", - "summary": "Pro and above", - "free": "No", + "summary": "Available on all plans", + "free": "Yes", "pro": "Yes", "biz": "Yes", "ent": "Yes", @@ -1685,8 +1685,8 @@ }, "b_script_monitor": { "title": "Script monitor", - "summary": "Pro and above", - "free": "No", + "summary": "Available on all plans", + "free": "Yes", "pro": "Yes", "biz": "Yes", "ent": "Yes", diff --git a/src/content/videos/index.yaml b/src/content/videos/index.yaml index 9707210fdfa5cf..418eb42e78d0c9 100644 --- a/src/content/videos/index.yaml +++ b/src/content/videos/index.yaml @@ -169,3 +169,29 @@ entries: difficulty: Intermediate content_type: 🎥 Video pcx_content_type: tutorial + - link: https://youtu.be/doKt9wWQF9A + title: AI meets Maps | Using Cloudflare AI, Langchain, Mapbox, Folium and Streamlit + description: Welcome to RouteMe, a smart tool that helps you plan the most efficient route between landmarks in any city. Powered by Cloudflare Workers AI, Langchain and Mapbox. This Streamlit webapp uses LLMs and Mapbox off my scripts API to solve the classic traveling salesman problem, turning your sightseeing into an optimized adventure! + tags: [Workers, Workers AI] + languages: [Python] + products: [Workers, Workers AI] + cloudflare: true + stream_id: f610dee9aa20ec843b0e451b3014540a + author: lizzie + updated: 2024-09-16 + difficulty: Intermediate + content_type: 🎥 Video + pcx_content_type: tutorial + - link: https://youtu.be/WhG5dfsOUtM + title: Welcome to the Cloudflare Developer Channel + description: Welcome to the Cloudflare Developers YouTube channel. We've got tutorials and working demos and everything you need to level up your projects. Whether you're working on your next big thing or just dorking around with some side projects, we've got you covered! So why don't you come hang out, subscribe to our developer channel and together we'll build something awesome. You're gonna love it. + tags: [Workers, AI, Vectorize, RAG, Hono, KV, D1, R2, AI Gateway] + languages: [TypeScript, JavaScript, Python] + products: [Workers, Workers AI, Vectorize, RAG, Hono, KV, D1, R2, AI Gateway] + cloudflare: true + stream_id: 0fe3acaf4dc09ba0285b75af6602c362 + author: craig + updated: 2024-09-18 + difficulty: Beginner + content_type: 🎥 Video + pcx_content_type: tutorial diff --git a/src/env.d.ts b/src/env.d.ts index 9bc5cb41c24efc..e16c13c6952a6f 100644 --- a/src/env.d.ts +++ b/src/env.d.ts @@ -1 +1 @@ -/// \ No newline at end of file +/// diff --git a/src/pages/pages/platform/build-configuration.json.ts b/src/pages/pages/platform/build-configuration.json.ts index 209ed9351e625a..0928b6786ee5d7 100644 --- a/src/pages/pages/platform/build-configuration.json.ts +++ b/src/pages/pages/platform/build-configuration.json.ts @@ -1,9 +1,11 @@ import { getEntry } from "astro:content"; export async function GET() { - const entries = await getEntry("pages-framework-presets", "index"); + const entries = await getEntry("pages-framework-presets", "index"); - const sorted = Object.fromEntries(Object.entries(entries.data.build_configs).sort()) + const sorted = Object.fromEntries( + Object.entries(entries.data.build_configs).sort(), + ); - return Response.json(sorted); -} \ No newline at end of file + return Response.json(sorted); +} diff --git a/src/util/container.ts b/src/util/container.ts index 746c77546d261c..13262407b41e3b 100644 --- a/src/util/container.ts +++ b/src/util/container.ts @@ -19,5 +19,5 @@ export async function entryToString(entry: CollectionEntry<"docs">) { params: { slug: entry.slug }, }); - return html; + return html; } diff --git a/src/util/description.ts b/src/util/description.ts index 658790d7fd2bc7..da762f7efa42fb 100644 --- a/src/util/description.ts +++ b/src/util/description.ts @@ -1,6 +1,6 @@ import type { CollectionEntry } from "astro:content"; import { parse } from "node-html-parser"; -import { entryToString } from "./container" +import { entryToString } from "./container"; /* 1. If there is a `description` property in the frontmatter, return that. 2. If there is a `

...

` element in the HTML, return that. diff --git a/src/util/sidebar.ts b/src/util/sidebar.ts index f747c0b90377bf..ab2cb411c0610c 100644 --- a/src/util/sidebar.ts +++ b/src/util/sidebar.ts @@ -1,7 +1,8 @@ export function sortBySidebarOrder(a: any, b: any): number { - const collator = new Intl.Collator("en"); + const collator = new Intl.Collator("en"); - if (a.data.sidebar.order !== b.data.sidebar.order) return a.data.sidebar.order - b.data.sidebar.order; + if (a.data.sidebar.order !== b.data.sidebar.order) + return a.data.sidebar.order - b.data.sidebar.order; - return collator.compare(a.data.title, b.data.title); -} \ No newline at end of file + return collator.compare(a.data.title, b.data.title); +}