diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..05927b1 --- /dev/null +++ b/404.html @@ -0,0 +1 @@ +404: This page could not be found

404

This page could not be found.

\ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..7aa6326 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +sqlite.holt.courses \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/index.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/index.json new file mode 100644 index 0000000..d366129 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/index.json @@ -0,0 +1 @@ +{"pageProps":{"sections":[{"icon":"info-circle","title":"Welcome","slug":"welcome","lessons":[{"slug":"intro","fullSlug":"/lessons/welcome/intro","title":"Intro","order":"01A","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/01-welcome/A-intro.md","description":""},{"slug":"what-is-sqlite","fullSlug":"/lessons/welcome/what-is-sqlite","title":"What is SQLite?","order":"01B","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/01-welcome/B-what-is-sqlite.md","description":""}],"order":"01"},{"icon":"person-running","title":"Running SQLite","slug":"running-sqlite","lessons":[{"slug":"installing-sqlite","fullSlug":"/lessons/running-sqlite/installing-sqlite","title":"Installing SQLite","order":"02A","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/02-running-sqlite/A-installing-sqlite.md","description":""},{"slug":"getting-started","fullSlug":"/lessons/running-sqlite/getting-started","title":"Getting Started","order":"02B","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/02-running-sqlite/B-getting-started.md","description":""}],"order":"02"},{"icon":"code","title":"Basic SQL","slug":"basic-sql","lessons":[{"slug":"select","fullSlug":"/lessons/basic-sql/select","title":"SELECT","order":"03A","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/03-basic-sql/A-select.md","description":""},{"slug":"insert","fullSlug":"/lessons/basic-sql/insert","title":"INSERT","order":"03B","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/03-basic-sql/B-insert.md","description":""},{"slug":"tables","fullSlug":"/lessons/basic-sql/tables","title":"Tables","order":"03C","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/03-basic-sql/C-tables.md","description":""}],"order":"03"},{"icon":"table","title":"Intermediate SQL","slug":"intermediate-sql","lessons":[{"slug":"relational-data","fullSlug":"/lessons/intermediate-sql/relational-data","title":"Relational Data","order":"04A","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/A-relational-data.md","description":""},{"slug":"other-types-of-joins","fullSlug":"/lessons/intermediate-sql/other-types-of-joins","title":"Other Types of Joins","order":"04B","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/B-other-types-of-joins.md","description":""},{"slug":"foreign-keys","fullSlug":"/lessons/intermediate-sql/foreign-keys","title":"Foreign Keys","order":"04C","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/C-foreign-keys.md","description":""},{"slug":"aggregation","fullSlug":"/lessons/intermediate-sql/aggregation","title":"Aggregation","order":"04D","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/D-aggregation.md","description":""},{"slug":"subqueries","fullSlug":"/lessons/intermediate-sql/subqueries","title":"Subqueries","order":"04E","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/E-subqueries.md","description":""}],"order":"04"},{"icon":"circle-nodes","title":"Build a Project with Node.js and SQLite","slug":"build-a-project-with-nodejs-and-sqlite","lessons":[{"slug":"the-example-app","fullSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/the-example-app","title":"The Example App","order":"05A","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/05-build-a-project-with-nodejs-and-sqlite/A-the-example-app.md","description":""},{"slug":"sqlite3","fullSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/sqlite3","title":"sqlite3","order":"05B","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/05-build-a-project-with-nodejs-and-sqlite/B-sqlite3.md","description":""},{"slug":"project","fullSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/project","title":"Project","order":"05C","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/05-build-a-project-with-nodejs-and-sqlite/C-project.md","description":""},{"slug":"alternatives-to-sqlite3","fullSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/alternatives-to-sqlite3","title":"Alternatives to sqlite3","order":"05D","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/05-build-a-project-with-nodejs-and-sqlite/D-alternatives-to-sqlite3.md","description":""}],"order":"05"},{"icon":"fingerprint","title":"What is Unique to SQLite","slug":"what-is-unique-to-sqlite","lessons":[{"slug":"flexible-typing","fullSlug":"/lessons/what-is-unique-to-sqlite/flexible-typing","title":"Flexible Typing","order":"06A","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/06-what-is-unique-to-sqlite/A-flexible-typing.md","description":""},{"slug":"limits-of-sqlite","fullSlug":"/lessons/what-is-unique-to-sqlite/limits-of-sqlite","title":"Limits of SQLite","order":"06B","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/06-what-is-unique-to-sqlite/B-limits-of-sqlite.md","description":""},{"slug":"views","fullSlug":"/lessons/what-is-unique-to-sqlite/views","title":"Views","order":"06C","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/06-what-is-unique-to-sqlite/C-views.md","description":""}],"order":"06"},{"icon":"info-circle","title":"Performance","slug":"performance","lessons":[{"slug":"explain","fullSlug":"/lessons/performance/explain","title":"EXPLAIN","order":"07A","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/07-performance/A-explain.md","description":""},{"slug":"indexes","fullSlug":"/lessons/performance/indexes","title":"Indexes","order":"07B","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/07-performance/B-indexes.md","description":""}],"order":"07"},{"icon":"package","title":"JSON","slug":"json","lessons":[{"slug":"sqlite-extensions","fullSlug":"/lessons/json/sqlite-extensions","title":"SQLite Extensions","order":"08A","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/08-json/A-sqlite-extensions.md","description":""},{"slug":"querying","fullSlug":"/lessons/json/querying","title":"JSON","order":"08B","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/08-json/B-querying.md","description":""},{"slug":"jsonb","fullSlug":"/lessons/json/jsonb","title":"Jsonb","order":"08C","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/08-json/C-jsonb.md","description":""},{"slug":"more-advance-queries","fullSlug":"/lessons/json/more-advance-queries","title":"More Advance Queries","order":"08D","path":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/08-json/D-more-advance-queries.md","description":""}],"order":"08"}]},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/insert.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/insert.json new file mode 100644 index 0000000..53fa52b --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/insert.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"INSERT"},"html":"

We've seen how to read from tables using SELECT. Let's see how to insert new data into tables using INSERT.

\n
INSERT INTO Artist (name) VALUES ('Radiohead');\nSELECT * from Artist WHERE name = 'Radiohead';\n
\n

This inserts a new artist into the Artist table. That table will have one column in it, name and that value is going to be 'Radiohead' (notice single quotes again.) Notice we did not give it an ArtistId. That ID is autogenerated by SQLite for us and is guaranteed unique. If you had multiple columns, you would just make sure it's the same order on both sides

\n
-- Not a valid query for our database, just to show you\nINSERT INTO food (name, food_group, color) VALUES ('carrot', 'vegetable', 'orange'); -- notice the order is the same\n
\n

Also, note that here you could use double quotes (though I typically won't.)

\n
INSERT INTO "Artist" ("name") VALUES ('Radiohead');\n
\n

UPDATE

\n

Let's say you didn't intend to insert the band Radiohead but instead wanted to insert Daft Punk. You could do this.

\n
UPDATE Artist SET name = 'Daft Punk' WHERE name = 'Radiohead';\nSELECT * from Artist WHERE name = 'Daft Punk';\n
\n

Notice the IDs are the same. You also could have selected by the ArtistId instead of the name (and probably would have been a safer practice.)

\n
\n

We'll talk about how to upsert in a bit. We need to talk about table constraint before we talk about upserts. Just wanted you to know I'm not leaving it out as I knew some of you would be wondering!

\n
\n

RETURNING

\n

One more update, let's change it to a different French techno group

\n
UPDATE Artist SET name = 'Justice' WHERE name = 'Daft Punk' RETURNING *;\n
\n

The returning allows you to SELECT the rows you're updating so you can see what changed.

\n

DELETE

\n

Very similar to UPDATEs. RETURNING also works here if you want to see what gets deleted.

\n
DELETE FROM Artist WHERE name = 'Justice'; -- Feel free to put RETURNING * at the end\n
\n","markdown":"\nWe've seen how to read from tables using SELECT. Let's see how to insert new data into tables using INSERT.\n\n```sql\nINSERT INTO Artist (name) VALUES ('Radiohead');\nSELECT * from Artist WHERE name = 'Radiohead';\n```\n\nThis inserts a new artist into the Artist table. That table will have one column in it, `name` and that value is going to be `'Radiohead'` (notice single quotes again.) Notice we did not give it an ArtistId. That ID is autogenerated by SQLite for us and is guaranteed unique. If you had multiple columns, you would just make sure it's the same order on both sides\n\n```sql\n-- Not a valid query for our database, just to show you\nINSERT INTO food (name, food_group, color) VALUES ('carrot', 'vegetable', 'orange'); -- notice the order is the same\n```\n\nAlso, note that here you could use double quotes (though I typically won't.)\n\n```sql\nINSERT INTO \"Artist\" (\"name\") VALUES ('Radiohead');\n```\n\n## UPDATE\n\nLet's say you didn't intend to insert the band Radiohead but instead wanted to insert Daft Punk. You could do this.\n\n```sql\nUPDATE Artist SET name = 'Daft Punk' WHERE name = 'Radiohead';\nSELECT * from Artist WHERE name = 'Daft Punk';\n```\n\nNotice the IDs are the same. You also could have selected by the ArtistId instead of the name (and probably would have been a safer practice.)\n\n> We'll talk about how to upsert in a bit. We need to talk about table constraint before we talk about upserts. Just wanted you to know I'm not leaving it out as I knew some of you would be wondering!\n\n## RETURNING\n\nOne more update, let's change it to a different French techno group\n\n```sql\nUPDATE Artist SET name = 'Justice' WHERE name = 'Daft Punk' RETURNING *;\n```\n\nThe returning allows you to SELECT the rows you're updating so you can see what changed.\n\n## DELETE\n\nVery similar to UPDATEs. RETURNING also works here if you want to see what gets deleted.\n\n```sql\nDELETE FROM Artist WHERE name = 'Justice'; -- Feel free to put RETURNING * at the end\n```\n","slug":"insert","title":"INSERT","section":"Basic SQL","icon":"code","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/03-basic-sql/B-insert.md","nextSlug":"/lessons/basic-sql/tables","prevSlug":"/lessons/basic-sql/select"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/select.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/select.json new file mode 100644 index 0000000..845c495 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/select.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"SELECT"},"html":"

I like to learn SQL by doing rather than some contrived explanation of how grammar is broken down and blah blah blah. I like putting fingers on keyboards and seeing what happens when we do things. Once we start to experience what SQL can do for us then we can get a bit more fine-grained in terms of what is doing what and why.

\n
\n

Note, this is meant to be a very quick intro to general SQL. I've taught [the Complete Intro to SQL][sql] before and it's more in-depth. This class uses Postgres but 95% of the grammar applies to both. I'll highlight where there's differences.

\n
\n

SELECT

\n

You should already have a database session open and have Chinook loaded. If not, look back at the previous lesson on how to do that.

\n

SELECT is the name of the "command" part of the query. It's what the command is going to do. In our case, we're going to be reading from the database which is a SELECT command.

\n
SELECT * FROM Artist;\n
\n
\n

Capitalization of SELECT and FROM isn't important. I usually do it out of habit because everyone used to do it that way. It's not as common now and you'll see me do both.

\n
\n

This will return about 275 rows from the database. We asked for every single artist in the Artist table and we got it! The * just means that we wanted every single bit of info available for every artist. If we only wanted some of the info, we could ask for that like this.

\n
SELECT name from Artist;\n
\n

Notice the number is gone (which was the ArtistId) and we just have the name for each band. We could also ask for both like this

\n
SELECT name, ArtistId from Artist;\n
\n

Notice the columns came back in the order you asked for them too.

\n
\n

If you need to get the name of columns, .schema <TABLE_NAME> is helpful.

\n
\n

WHERE

\n

In this case, we're querying all rows without any filtering whatsoever. Sometimes this is useful but normally you have an idea of some subset of rows you want. Let's say we wanted to get the ID of one of my favorite bands, The Postal Service. How would we do that?

\n
SELECT ArtistID FROM Artist WHERE name = 'The Postal Service';\n
\n
\n

Very important you use single quotes here. Single quotes mean a value or string literal. Double quotes in SQL refer to he name of columns and so putting double quotes would not be valid SQL.

\n
\n

This will just return 174 because that's all we asked for. Likewise, we could do

\n
SELECT name FROM Artist WHERE ArtistId = 174;\n
\n

This one doesn't need quotes because it's the literal number 174.

\n

LIKE

\n

Let's say you didn't know if The Postal Service was listed under "The Postal Service" or just "Postal Service". This is where LIKE can help.

\n
\n

SQLite does not have ILIKE, just LIKE. LIKE is case insensitive.

\n
\n
SELECT ArtistID FROM Artist WHERE name LIKE '%Postal Service';\n
\n

This will match "The Postal Service", "Postal Service", "Definitely Postal Service" and anything that has text before "Postal Service". The % means "give anything that matches 0 to many characters before this".

\n

You can use multiple too. Let's say we wanted to know every band that had "Orchestra" in the name. We can do that with

\n
SELECT name FROM Artist WHERE name LIKE '%orchestra%';\n
\n

Pretty cool, right?

\n

There are more comparator operators that you can discover for yourself as well

\n
SELECT * FROM Artist WHERE ArtistId <= 10;\n
\n

LIMIT and OFFSET

\n

Sometimes you just want a few responses. I do this a lot to just see what's in a table.

\n
SELECT * FROM Artist LIMIT 5;\nSELECT * FROM Artist LIMIT 5 OFFSET 5;\nSELECT * FROM Artist LIMIT 5 OFFSET 10;\n
\n

This will allow you to page through results as well.

\n

ORDER BY

\n

Frequently you will want to change how things are ordered as well.

\n
SELECT * FROM Artist ORDER BY name LIMIT 5;\nSELECT * FROM Artist ORDER BY name ASC LIMIT 5; -- Same as above query. ASC is implied if left out.\nSELECT * FROM Artist ORDER BY name DESC LIMIT 5;\n
\n","markdown":"\nI like to learn SQL by doing rather than some contrived explanation of how grammar is broken down and blah blah blah. I like putting fingers on keyboards and seeing what happens when we do things. Once we start to experience what SQL can do for us then we can get a bit more fine-grained in terms of what is doing what and why.\n\n> Note, this is meant to be a very quick intro to general SQL. I've taught [the Complete Intro to SQL][sql] before and it's more in-depth. This class uses Postgres but 95% of the grammar applies to both. I'll highlight where there's differences.\n\n## SELECT\n\nYou should already have a database session open and have Chinook loaded. If not, look back at the previous lesson on how to do that.\n\nSELECT is the name of the \"command\" part of the query. It's what the command is going to do. In our case, we're going to be reading from the database which is a SELECT command.\n\n```sql\nSELECT * FROM Artist;\n```\n\n> Capitalization of SELECT and FROM isn't important. I usually do it out of habit because _everyone_ used to do it that way. It's not as common now and you'll see me do both.\n\nThis will return about 275 rows from the database. We asked for _every_ single artist in the Artist table and we got it! The `*` just means that we wanted every single bit of info available for every artist. If we only wanted _some_ of the info, we could ask for that like this.\n\n```sql\nSELECT name from Artist;\n```\n\nNotice the number is gone (which was the `ArtistId`) and we just have the name for each band. We could also ask for both like this\n\n```sql\nSELECT name, ArtistId from Artist;\n```\n\nNotice the columns came back in the order you asked for them too.\n\n> If you need to get the name of columns, `.schema ` is helpful.\n\n## WHERE\n\nIn this case, we're querying all rows without any filtering whatsoever. Sometimes this is useful but normally you have an idea of some subset of rows you want. Let's say we wanted to get the ID of one of my favorite bands, The Postal Service. How would we do that?\n\n```sql\nSELECT ArtistID FROM Artist WHERE name = 'The Postal Service';\n```\n\n> Very important you use single quotes here. Single quotes mean a value or string literal. Double quotes in SQL refer to he name of columns and so putting double quotes would not be valid SQL.\n\nThis will just return `174` because that's all we asked for. Likewise, we could do\n\n```sql\nSELECT name FROM Artist WHERE ArtistId = 174;\n```\n\nThis one doesn't need quotes because it's the literal number 174.\n\n## LIKE\n\nLet's say you didn't know if The Postal Service was listed under \"**The** Postal Service\" or just \"Postal Service\". This is where LIKE can help.\n\n> SQLite does not have ILIKE, just LIKE. LIKE is case insensitive.\n\n```sql\nSELECT ArtistID FROM Artist WHERE name LIKE '%Postal Service';\n```\n\nThis will match \"The Postal Service\", \"Postal Service\", \"Definitely Postal Service\" and anything that has text before \"Postal Service\". The `%` means \"give anything that matches 0 to many characters before this\".\n\nYou can use multiple too. Let's say we wanted to know every band that had \"Orchestra\" in the name. We can do that with\n\n```sql\nSELECT name FROM Artist WHERE name LIKE '%orchestra%';\n```\n\nPretty cool, right?\n\nThere are more comparator operators that you can discover for yourself as well\n\n```sql\nSELECT * FROM Artist WHERE ArtistId <= 10;\n```\n\n## LIMIT and OFFSET\n\nSometimes you just want a few responses. I do this a lot to just see what's in a table.\n\n```sql\nSELECT * FROM Artist LIMIT 5;\nSELECT * FROM Artist LIMIT 5 OFFSET 5;\nSELECT * FROM Artist LIMIT 5 OFFSET 10;\n```\n\nThis will allow you to page through results as well.\n\n## ORDER BY\n\nFrequently you will want to change how things are ordered as well.\n\n```SQL\nSELECT * FROM Artist ORDER BY name LIMIT 5;\nSELECT * FROM Artist ORDER BY name ASC LIMIT 5; -- Same as above query. ASC is implied if left out.\nSELECT * FROM Artist ORDER BY name DESC LIMIT 5;\n```\n","slug":"select","title":"SELECT","section":"Basic SQL","icon":"code","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/03-basic-sql/A-select.md","nextSlug":"/lessons/basic-sql/insert","prevSlug":"/lessons/running-sqlite/getting-started"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/tables.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/tables.json new file mode 100644 index 0000000..f6f2cd3 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/basic-sql/tables.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

Let's create our first table, the BandMember table.

\n
CREATE TABLE BandMember (\n  id INTEGER PRIMARY KEY,\n  name TEXT UNIQUE NOT NULL,\n  role TEXT VARCHAR\n);\n
\n
\n

SQLite has only four real data types: INTEGER, REAL, TEXT, and BLOB. It is a dynamically typed system. Therefore you can give datatypes like TINYINT but SQLite will just treat it like an INTEGER and likewise you can give a type of VARCHAR(255) but it will just treat it like TEXT (and therefore won't truncate it.) See more here.

\n
\n

To see the table you created, run .schema BandMember in your psql instance to see it and the sequence that you created. The sequence stores the id counter.

\n

We now have a table. A table is the actual repository of data. Think of a database like a folder and a table like a spreadsheet. You can have many spreadsheets in a folder. Same with tables.

\n

We now have a table, ingredients. Our table has two fields in it, an incrementing ID and a string that is the the title of the ingredients. You can think of fields like columns in a spreadsheet.

\n

A table contains records. A record can be thought of as a row in a spreadsheet. Every time we insert a new record into a table, we're adding another row to our spreadsheet.

\n
\n

The spreadsheet analogy isn't just theoretical. You can essentially use Google Sheets as a database (appropriate for small, infrequent use.)

\n
\n

Let's add a record to our table.

\n
INSERT INTO BandMember (name, role) VALUES ('Thom Yorke', 'singer') RETURNING *;\n
\n

This adds one row with the name of Thom Yorke and the role of singer. Where is the id? Since we made it PRIMARY KEY it gets created automatically. Since this is the first item in our database, its ID will be 1. As you have likely guessed already, the next item in the table will be 2.

\n

Let's see the record.

\n
SELECT * FROM BandMember;\n
\n

You should see something like

\n
1|Thom Yorke|singer\n
\n

Amazing! We now have a table with a record in it.

\n

Let's add multiple.

\n
INSERT INTO\n    BandMember\n    (name, role)\nVALUES\n    ('Jonny Greenwood', 'guitarist'),\n    ('Colin Greenwood', 'bassist'),\n    ('Ed O''Brien', 'guitarist'),\n    ('Philip Selway', 'drummer')\nRETURNING *;\n
\n

You can add multiple add a time as long as you comma separate them.

\n

ALTER TABLE

\n

Okay so now we have a table again. What happens if we want to add a third field to our table? Let's add an image field that will point to a URL of an image of the person.

\n
ALTER TABLE BandMember ADD COLUMN image TEXT;\n
\n

Likewise we can drop it too:

\n
ALTER TABLE BandMember DROP COLUMN image;\n
\n

There are a lot of ways to alter a table. You can make it UNIQUE like we did or NOT NULL. You can also change the data type. For now, let's add back our extra column.

\n
ALTER TABLE BandMember\nADD COLUMN nationality TEXT NOT NULL DEFAULT 'UK';\n
\n

Specifying a DEFAULT when using a NOT NULL constraint will prevent errors if the column has existing null values. In this case, we're saying "add a new non-null column, and for those that exist give them the value of 'UK'."

\n
\n

SQLite does not allow you to do multiple alterations in one statement. If you want to add multiple columns, you have to do multiple alter tables commands.

\n
\n

Dropping a table

\n

What if we messed up and we didn't want an BandMember table?

\n
DROP TABLE BandMember;\n
\n

Pretty simple, right? That's it! Do be careful with this command. Like rm in bash, it's not one you can recover from. Once a table is dropped, it is dropped.

\n","markdown":"Let's create our first table, the `BandMember` table.\n\n```sql\nCREATE TABLE BandMember (\n id INTEGER PRIMARY KEY,\n name TEXT UNIQUE NOT NULL,\n role TEXT VARCHAR\n);\n```\n\n> SQLite has only four real data types: INTEGER, REAL, TEXT, and BLOB. It is a dynamically typed system. Therefore you can give datatypes like TINYINT but SQLite will just treat it like an INTEGER and likewise you can give a type of VARCHAR(255) but it will just treat it like TEXT (and therefore won't truncate it.) [See more here][types].\n\nTo see the table you created, run `.schema BandMember` in your psql instance to see it and the sequence that you created. The sequence stores the `id` counter.\n\nWe now have a table. A table is the actual repository of data. Think of a database like a folder and a table like a spreadsheet. You can have many spreadsheets in a folder. Same with tables.\n\nWe now have a table, ingredients. Our table has two fields in it, an incrementing ID and a string that is the the title of the ingredients. You can think of fields like columns in a spreadsheet.\n\nA table contains records. A record can be thought of as a row in a spreadsheet. Every time we insert a new record into a table, we're adding another row to our spreadsheet.\n\n> The spreadsheet analogy isn't just theoretical. [You can essentially use Google Sheets as a database][sheets] (appropriate for small, infrequent use.)\n\nLet's add a record to our table.\n\n```sql\nINSERT INTO BandMember (name, role) VALUES ('Thom Yorke', 'singer') RETURNING *;\n```\n\nThis adds one row with the name of Thom Yorke and the role of singer. Where is the id? Since we made it `PRIMARY KEY` it gets created automatically. Since this is the first item in our database, its ID will be `1`. As you have likely guessed already, the next item in the table will be `2`.\n\nLet's see the record.\n\n```sql\nSELECT * FROM BandMember;\n```\n\nYou should see something like\n\n```plaintext\n1|Thom Yorke|singer\n```\n\nAmazing! We now have a table with a record in it.\n\nLet's add multiple.\n\n```sql\nINSERT INTO\n BandMember\n (name, role)\nVALUES\n ('Jonny Greenwood', 'guitarist'),\n ('Colin Greenwood', 'bassist'),\n ('Ed O''Brien', 'guitarist'),\n ('Philip Selway', 'drummer')\nRETURNING *;\n```\n\nYou can add multiple add a time as long as you comma separate them.\n\n## ALTER TABLE\n\nOkay so now we have a table again. What happens if we want to add a third field to our table? Let's add an `image` field that will point to a URL of an image of the person.\n\n```sql\nALTER TABLE BandMember ADD COLUMN image TEXT;\n```\n\nLikewise we can drop it too:\n\n```sql\nALTER TABLE BandMember DROP COLUMN image;\n```\n\nThere are a lot of ways to alter a table. You can make it UNIQUE like we did or NOT NULL. You can also change the data type. For now, let's add back our extra column.\n\n```sql\nALTER TABLE BandMember\nADD COLUMN nationality TEXT NOT NULL DEFAULT 'UK';\n```\n\nSpecifying a DEFAULT when using a NOT NULL constraint will prevent errors if the column has existing null values. In this case, we're saying \"add a new non-null column, and for those that exist give them the value of 'UK'.\"\n\n> SQLite does not allow you to do multiple alterations in one statement. If you want to add multiple columns, you have to do multiple alter tables commands.\n\n## Dropping a table\n\nWhat if we messed up and we didn't want an BandMember table?\n\n```sql\nDROP TABLE BandMember;\n```\n\nPretty simple, right? That's it! Do be careful with this command. Like `rm` in bash, it's not one you can recover from. Once a table is dropped, it is dropped.\n\n[sheets]: https://www.npmjs.com/package/google-spreadsheet\n[types]: https://www.sqlite.org/datatype3.html\n","slug":"tables","title":"Tables","section":"Basic SQL","icon":"code","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/03-basic-sql/C-tables.md","nextSlug":"/lessons/intermediate-sql/relational-data","prevSlug":"/lessons/basic-sql/insert"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/alternatives-to-sqlite3.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/alternatives-to-sqlite3.json new file mode 100644 index 0000000..ca747ae --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/alternatives-to-sqlite3.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"Alternatives to sqlite3"},"html":"

Now that you've experienced the OG SQLite client for Node.js, let's look at a few of the alternatives.

\n

Node.js's built-in client

\n

⛓️ Link

\n

This is the one we'll all eventually use in the future. Bun started shipping their own builtin clients so Node.js followed suit. This is unstable and under active development so expect to be able to use it some time in the future.

\n

better-sqlite3

\n

⛓️ Link

\n

As it states, it is an attempt to be sqlite3 but better. This SDK adds support for features like a synchronous API, better transaction support, better performance, and other things that are not supported in sqlite3.

\n

promised-sqlite3

\n

⛓️ Link

\n

There's a few different versions of this that people have done, but generally speaking they take sqlite3 and make it easier to use with promises. You can also do this yourself with Node.js's promisify function and the normal sqlite3 library (this is normally what I do.)

\n

Prisma, Drizzle, Sequelize

\n\n

All of these are what you would call an ORM, object relational mapping. It is a library that abstracts away the actual writing of SQL. You call various functions and methods in the library and the library will generate and send SQL to your database for you.

\n

I have a love/hate relationship with ORMs (having mostly used a lot of Django's ORM when I worked at Reddit). On one hand, it makes writing code frequently very easy when you're doing straightforward stuff. They also frequently can handle things like data migrations for you which can be nice.

\n

However, I have found that ORMs can frequently make things harder too. Once you want to do something that ORM doesn't have or doesn't want you to do, it makes everything harder. It can sometimes be slower.

\n

That said, these three ORMs are quite popular right now and I think they've made great strides in making them more easy to use and more performant. While I typically don't use them myself, I think it's a much more defendable thing to do than it used to be.

\n","markdown":"\nNow that you've experienced the OG SQLite client for Node.js, let's look at a few of the alternatives.\n\n## Node.js's built-in client\n\n[⛓️ Link][nodejs]\n\nThis is the one we'll all eventually use in the future. [Bun][bun] started shipping their own builtin clients so Node.js followed suit. This is unstable and under active development so expect to be able to use it some time in the future.\n\n## better-sqlite3\n\n[⛓️ Link][better-sqlite3]\n\nAs it states, it is an attempt to be sqlite3 but better. This SDK adds support for features like a synchronous API, better transaction support, better performance, and other things that are not supported in sqlite3.\n\n## promised-sqlite3\n\n[⛓️ Link][promised-sqlite3]\n\nThere's a few different versions of this that people have done, but generally speaking they take sqlite3 and make it easier to use with promises. You can also do this yourself with [Node.js's promisify function][promisify] and the normal sqlite3 library (this is normally what I do.)\n\n## Prisma, Drizzle, Sequelize\n\n- [⛓️ Prisma][prisma]\n- [⛓️ Drizzle][drizzle]\n- [⛓️ Sequelize][sequelize]\n\nAll of these are what you would call an ORM, object relational mapping. It is a library that abstracts away the actual writing of SQL. You call various functions and methods in the library and the library will generate and send SQL to your database for you.\n\nI have a love/hate relationship with ORMs (having mostly used a lot of [Django's ORM][django] when I worked at Reddit). On one hand, it makes writing code frequently very easy when you're doing straightforward stuff. They also frequently can handle things like data migrations for you which can be nice.\n\nHowever, I have found that ORMs can frequently make things harder too. Once you want to do something that ORM doesn't have or doesn't want you to do, it makes everything harder. It can sometimes be slower.\n\nThat said, these three ORMs are quite popular right now and I think they've made great strides in making them more easy to use and more performant. While I typically don't use them myself, I think it's a much more defendable thing to do than it used to be.\n\n[nodejs]: https://nodejs.org/api/sqlite.html\n[bun]: https://bun.sh/docs/api/sqlite\n[better-sqlite3]: https://github.com/WiseLibs/better-sqlite3\n[promised-sqlite3]: https://github.com/tguichaoua/promised-sqlite3\n[promisify]: https://nodejs.org/api/util.html#utilpromisifyoriginal\n[drizzle]: https://orm.drizzle.team/docs/get-started-sqlite\n[prisma]: https://www.prisma.io/docs/getting-started/quickstart\n[sequelize]: https://sequelize.org/docs/v7/databases/sqlite/\n[django]: https://docs.djangoproject.com/en/5.0/topics/db/queries/\n","slug":"alternatives-to-sqlite3","title":"Alternatives to sqlite3","section":"Build a Project with Node.js and SQLite","icon":"circle-nodes","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/05-build-a-project-with-nodejs-and-sqlite/D-alternatives-to-sqlite3.md","nextSlug":"/lessons/what-is-unique-to-sqlite/flexible-typing","prevSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/project"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/project.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/project.json new file mode 100644 index 0000000..85e0ca0 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/project.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

You are going to write the invoice.js API route for our invoice viewer app.

\n

Click here to see the file you'll edit.

\n

This is a Fastify app but that shouldn't really matter. You should really only need to write SQLite code.

\n

All of the frontend and Fastify code should just work with no need for you to modify it, but feel free to. It's written in HTMX.

\n

This doesn't need to be the most optimized code. You can nest callbacks if you want to, you can make multiple queries, or you can try to optimize the hell out of it.

\n

I installed sqlite3 for you already, but if you're so inclined, feel free to use other SQLite libraries.

\n

The point here is to learn and experiment.

\n

The version I wrote (which is definitely not the optimal solution) is here.

\n","markdown":"You are going to write the invoice.js API route for our invoice viewer app.\n\n[Click here][file] to see the file you'll edit.\n\nThis is a Fastify app but that shouldn't really matter. You should really only need to write SQLite code.\n\nAll of the frontend and Fastify code should just work with no need for you to modify it, but feel free to. It's written in HTMX.\n\nThis doesn't need to be the most optimized code. You can nest callbacks if you want to, you can make multiple queries, or you can try to optimize the hell out of it.\n\nI installed sqlite3 for you already, but if you're so inclined, feel free to use other SQLite libraries.\n\nThe point here is to learn and experiment.\n\nThe version I wrote (which is definitely _not_ the optimal solution) is [here][solution].\n\n[file]: https://github.com/btholt/sqlite-app/blob/main/invoice.js\n[solution]: https://github.com/btholt/sqlite-app/blob/main/invoice-complete.js\n","slug":"project","title":"Project","section":"Build a Project with Node.js and SQLite","icon":"circle-nodes","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/05-build-a-project-with-nodejs-and-sqlite/C-project.md","nextSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/alternatives-to-sqlite3","prevSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/sqlite3"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/sqlite3.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/sqlite3.json new file mode 100644 index 0000000..9abbd6f --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/sqlite3.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"sqlite3"},"html":"

There are so many libraries for connecting to SQLite. It's a very ubiquitious piece of software and so lots of people have opinions on how to build with it.

\n

I am going to show you the standard one, the first one, the one that everyone knows. I will say that when I build stuff with SQLite, it is not the one I normally choose. However I think it's good for you to get exposure to the OG and then you can branch out and choose other libraries once you know what you're getting away from.

\n

[Click here][sqlite3] to go to the sqlite3 docs.

\n
\n

Why is it sqlite3? It's because it's for version 3 of SQLite, the third major version.

\n
\n

Connecting to the database

\n

Connecting to a SQLite database from Node.js is a snap. Import the the sqlite3 library and create a new database.

\n
import sqlite3 from "sqlite3";\n\nconst db = new sqlite3.Database("./my-database.db");\n
\n

Again, keep in mind that SQLite databases are files. You need to give it a file path to be able to open the database and to start writing to it.

\n

Querying the database

\n

Once you have your db Database instance, you can then start running queries. There's a few functions to be aware of. We'll talk about the parameters to the functions after this.

\n\n

Node.js style callbacks (a.k.a. "Nodebacks")

\n

Some of you whippersnappers may be too young to remember writing JavaScript before promises, async-await, generators, etc. Before the magical time of ES2015 / ES6, we only had one way to deal with asynchronous code, callbacks. These are just functions that are invoked when something async completes. JS has since long moved past this (nearly a decade as of writing) and it's to the point that it feels weird to write callbacks as we've been writing promises so long.

\n

sqlite3 never updated to use promises so you still have to use Node.js-style callbacks (or Nodebacks, as they were sometimes called.) They are Node.js callbacks as they always have the signature of function myCallback(error, data). If that error is populated with anything, it means that something went wrong. Otherwise it succeeded. That's it. That's what makes a "Nodeback".

\n

So with .all(), your code will look like this

\n
db.rows(\n  `SELECT * FROM Track WHERE name LIKE '%live%'`,\n  [], // we'll talk about this array in a sec\n  function (err, rows) {\n    if (err) {\n      // do error stuff here\n      return;\n    }\n\n    // do stuff here with rows\n  }\n);\n
\n

Parameters and SQL Injection

\n

Let's take this example:

\n
const id = getIdFromUser();\ndb.get(`SELECT * FROM Track WHERE TrackId=${id}`, [], function (err, row) {\n  // do stuff\n});\n
\n

What's wrong with this? There's a major problem with this. If that id is coming from a user, it means they can put anything there, right? So they could in theory put valid SQL in there. What if they put 15; DROP TABLE Customer? Then your SQL statement would be SELECT * FROM Track WHERE id=15; DROP TABLE Customer. Uh oh, good bye to all your customer data. They could also do even more nefarious things like steal your data. This is called SQL injection. This is why you never, ever, ever, ever put user input directly into a SQL statement. You always let the library do it for you. No exceptions.

\n

So how do we fix this? sqlite3 gives us that array to handle just that.

\n
const id = getIdFromUser();\ndb.get(`SELECT * FROM Track WHERE TrackId=?`, [id], function (err, row) {\n  // do stuff\n});\n
\n

Done! By replacing it with the question mark and providing the user input in the array, sqlite3 will guarantee you that it's safe to use that no matter where it came from. You can also use an object notation.

\n
const id = getIdFromUser();\ndb.get(\n  `SELECT * FROM Track WHERE TrackId=$id`,\n  { $id: id },\n  function (err, row) {\n    // do stuff\n  }\n);\n
\n

Both are fine. Question marks rely on order, objection notation relies on names matching.

\n

This should be enough of an intro for you to write your sample app.

\n","markdown":"\nThere are so many libraries for connecting to SQLite. It's a very ubiquitious piece of software and so lots of people have opinions on how to build with it.\n\nI am going to show you the standard one, the first one, the one that everyone knows. I will say that when I build stuff with SQLite, it is _not_ the one I normally choose. However I think it's good for you to get exposure to the OG and then you can branch out and choose other libraries once you know what you're getting away from.\n\n[Click here][sqlite3] to go to the sqlite3 docs.\n\n> Why is it sqlite3? It's because it's for version 3 of SQLite, the third major version.\n\n## Connecting to the database\n\nConnecting to a SQLite database from Node.js is a snap. Import the the sqlite3 library and create a `new` database.\n\n```javascript\nimport sqlite3 from \"sqlite3\";\n\nconst db = new sqlite3.Database(\"./my-database.db\");\n```\n\nAgain, keep in mind that SQLite databases are files. You need to give it a file path to be able to open the database and to start writing to it.\n\n## Querying the database\n\nOnce you have your `db` Database instance, you can then start running queries. There's a few functions to be aware of. We'll talk about the parameters to the functions after this.\n\n- `db.run(sql, params, callback)` – This will run a query and not care about the results. You generally use this to run `UPDATE` and `DELETE` commands as you're just updating and not necessarily caring about what the database has to say back to you.\n- `db.get(sql, params, callback)` – Runs a query and only gives you back the first one. Some times all you need is one result (like when you're querying a unique ID). This simplifies your code a bit because you don't need an array of one result.\n- `db.all(sql, params, callback)` – Like get, but it gives you all results back that match the query instead of just one. It always returns an array. If you got no results, you get an empty array\n- `db.each(sql, params, callback, complete)` – Like all, but instead of one big array of results, your callback will get called once for each row in the set. Then the complete function will be called to let you know it's done. This is nice if you have some action you want to take on each row as it's basically a `.map()` of the result set instead of an array.\n\n## Node.js style callbacks (a.k.a. \"Nodebacks\")\n\nSome of you whippersnappers may be too young to remember writing JavaScript before promises, async-await, generators, etc. Before the magical time of ES2015 / ES6, we only had one way to deal with asynchronous code, callbacks. These are just functions that are invoked when something async completes. JS has since long moved past this (nearly a decade as of writing) and it's to the point that it feels weird to write callbacks as we've been writing promises so long.\n\nsqlite3 never updated to use promises so you still have to use Node.js-style callbacks (or Nodebacks, as they were sometimes called.) They are Node.js callbacks as they always have the signature of `function myCallback(error, data)`. If that `error` is populated with anything, it means that something went wrong. Otherwise it succeeded. That's it. That's what makes a \"Nodeback\".\n\nSo with `.all()`, your code will look like this\n\n```javascript\ndb.rows(\n `SELECT * FROM Track WHERE name LIKE '%live%'`,\n [], // we'll talk about this array in a sec\n function (err, rows) {\n if (err) {\n // do error stuff here\n return;\n }\n\n // do stuff here with rows\n }\n);\n```\n\n## Parameters and SQL Injection\n\nLet's take this example:\n\n```javascript\nconst id = getIdFromUser();\ndb.get(`SELECT * FROM Track WHERE TrackId=${id}`, [], function (err, row) {\n // do stuff\n});\n```\n\nWhat's wrong with this? There's a **major** problem with this. If that id is coming from a user, it means they can put _anything_ there, right? So they could in theory put valid SQL in there. What if they put `15; DROP TABLE Customer`? Then your SQL statement would be `SELECT * FROM Track WHERE id=15; DROP TABLE Customer`. Uh oh, good bye to all your customer data. They could also do even more nefarious things like steal your data. This is called SQL injection. This is why you **never, ever, ever, ever** put user input directly into a SQL statement. You _always_ let the library do it for you. No exceptions.\n\nSo how do we fix this? sqlite3 gives us that array to handle just that.\n\n```javascript\nconst id = getIdFromUser();\ndb.get(`SELECT * FROM Track WHERE TrackId=?`, [id], function (err, row) {\n // do stuff\n});\n```\n\nDone! By replacing it with the question mark and providing the user input in the array, sqlite3 will guarantee you that it's safe to use that no matter where it came from. You can also use an object notation.\n\n```javascript\nconst id = getIdFromUser();\ndb.get(\n `SELECT * FROM Track WHERE TrackId=$id`,\n { $id: id },\n function (err, row) {\n // do stuff\n }\n);\n```\n\nBoth are fine. Question marks rely on order, objection notation relies on names matching.\n\nThis should be enough of an intro for you to write your sample app.\n","slug":"sqlite3","title":"sqlite3","section":"Build a Project with Node.js and SQLite","icon":"circle-nodes","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/05-build-a-project-with-nodejs-and-sqlite/B-sqlite3.md","nextSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/project","prevSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/the-example-app"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/the-example-app.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/the-example-app.json new file mode 100644 index 0000000..6083f49 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/build-a-project-with-nodejs-and-sqlite/the-example-app.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"
\n

🚨 Go clone the example app repo from GitHub. Once cloned, run npm install to install all the necessary files and then run npm run dev to start the development server. Please use Node.js version 20+.

\n
\n

We are going to build an app frontend to the Chinook database. Since SQLite is file-based, I went ahead and included a fresh copy for you to use.

\n

This app uses Fastify, HTMX, and Handlebars to make an app to render invoices for users. You won't need to touch the HTMX nor Handlebars at all, and you'll only need to write a minimal amount of Fastify.

\n

The only file you'll be working on is invoice.js. This has a bare minimal amount of Fastify code to get you started working on the route. You'll be able to try the frontend code at http://localhost:8080 and to hit the route directly at http://localhost:8080/invoice?id=1. You do not need to modify any other files.

\n

I have also included my code at invoice-complete.js. You can see the result of that function call at http://localhost:8080/invoice-complete?id=1.

\n
\n

I would strongly suggest you attempt to write the code yourself first before you look how I did it. I know it can be a struggle but I find that I learn the most in those moments of struggle where I know my destination and I'm unsure of how to navigate it and I have to chart the course myself.

\n
\n","markdown":"> 🚨 Go clone the example [app repo from GitHub][app]. Once cloned, run `npm install` to install all the necessary files and then run `npm run dev` to start the development server. Please use Node.js version 20+.\n\nWe are going to build an app frontend to the Chinook database. Since SQLite is file-based, I went ahead and included a fresh copy for you to use.\n\nThis app uses [Fastify][fastify], [HTMX][htmx], and [Handlebars][handlebars] to make an app to render invoices for users. You won't need to touch the HTMX nor Handlebars at all, and you'll only need to write a minimal amount of Fastify.\n\nThe only file you'll be working on is invoice.js. This has a bare minimal amount of Fastify code to get you started working on the route. You'll be able to try the frontend code at [http://localhost:8080](http://localhost:8080) and to hit the route directly at [http://localhost:8080/invoice?id=1](http://localhost:8080/invoice?id=1). You do not need to modify any other files.\n\nI have also included my code at invoice-complete.js. You can see the result of that function call at [http://localhost:8080/invoice-complete?id=1](http://localhost:8080/invoice-complete?id=1).\n\n> I would strongly suggest you attempt to write the code yourself first before you look how I did it. I know it can be a struggle but I find that I learn the most in those moments of struggle where I know my destination and I'm unsure of how to navigate it and I have to chart the course myself.\n\n[app]: https://github.com/btholt/sqlite-app\n[fastify]: https://fastify.dev/\n[htmx]: https://htmx.org/\n[handlebars]: https://handlebarsjs.com/\n","slug":"the-example-app","title":"The Example App","section":"Build a Project with Node.js and SQLite","icon":"circle-nodes","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/05-build-a-project-with-nodejs-and-sqlite/A-the-example-app.md","nextSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/sqlite3","prevSlug":"/lessons/intermediate-sql/subqueries"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/aggregation.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/aggregation.json new file mode 100644 index 0000000..5b3faa2 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/aggregation.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"description":""},"html":"

Occasionally you need to query for macro statistics about your tables, not just query for individual rows.

\n

Let's use what we've already used before, COUNT. What if we want to know how many tracks we have overall in our track table?

\n
SELECT COUNT(*) FROM Track;\n
\n

COUNT is an aggregation function. We give the * is saying "count everything and don't remove nulls or duplicates of any variety".

\n

What if we wanted to count how many distinct genres of tracks we have in the Track table?

\n
SELECT COUNT(DISTINCT GenreId) FROM Track;\n
\n

This is going to tell how many different types we have in the ingredients table. Keep in mind the query to see what the distinct ingredients are.

\n
SELECT DISTINCT GenreId FROM Track;\n
\n

The first query gives you the number, the count of many distinct things in the list. The second query gives you what those distinct things are with no indication of how many of each there are. There could be 1 fruit and 10,000 vegetables and you'd not indicate that.

\n

Okay, so you want to see both at the same time? Let's see that.

\n
SELECT\n  GenreId, COUNT(GenreId)\nFROM\n  Track\nGROUP BY\n  GenreId;\n
\n

This is combining both of what we saw plus a new thing, GROUP BY. This allows us to specify what things we want to aggregate together: the type. Keep in mind if you want to SELECT for something with a GROUP BY clause, you do need to put them in the GROUP BY clause.

\n

Now what if we want to include the actual genre names?

\n
SELECT\n  Track.GenreId, Genre.Name, COUNT(Track.GenreId)\nFROM\n  Track\nJOIN\n    Genre\nON\n    Genre.GenreId = Track.GenreId\nGROUP BY\n  Track.GenreId; -- you can also have Genre.GenreId here, no difference\n
\n

This one can be a bit of a mind trip. Remember the aggregation happens at the end. So after all your selects happen, then on the row set, it goes and runs the aggregation function here using the GROUP BY. So we get a bunch of rows with their Genre.Name attached, and then we count those up.

\n

What if we wanted to find the biggest or smallest TrackId with each genre? (Doesn't seem that useful but I'll still show you how.)

\n
SELECT\n  Track.GenreId, Genre.Name, MAX(Track.TrackId) -- MIN(Track.TrackId)\nFROM\n  Track\nJOIN\n    Genre\nON\n    Genre.GenreId = Track.GenreId\nGROUP BY\n  Track.GenreId;\n
\n

HAVING

\n

What if you only want genres that have more than 300 tracks?

\n
\n

The following query does not work.

\n
\n
SELECT\n  Track.GenreId, Genre.Name, COUNT(Track.GenreId)\nFROM\n  Track\nJOIN\n    Genre\nON\n    Genre.GenreId = Track.GenreId\nWHERE\n    COUNT(Track.GenreId) > 500\nGROUP BY\n  Track.GenreId;\n
\n

You can't use WHERE because that applies to the initial result set. You could filter out all rock songs or only select tracks with a certain length. But you can't filter based on the aggregated values because that happens after WHERE happens. This is why HAVING is useful.

\n
SELECT\n  Track.GenreId, Genre.Name, COUNT(Track.GenreId)\nFROM\n  Track\nJOIN\n    Genre\nON\n    Genre.GenreId = Track.GenreId\nGROUP BY\n  Track.GenreId\nHAVING\n    COUNT(Track.GenreId) > 300;\n
\n

Using HAVING we can filter on the aggregated set. Keep that mind if you ever need to do that.

\n","markdown":"\nOccasionally you need to query for macro statistics about your tables, not just query for individual rows.\n\nLet's use what we've already used before, `COUNT`. What if we want to know how many tracks we have overall in our track table?\n\n```sql\nSELECT COUNT(*) FROM Track;\n```\n\n`COUNT` is an aggregation function. We give the `*` is saying \"count everything and don't remove nulls or duplicates of any variety\".\n\nWhat if we wanted to count how many distinct `genre`s of tracks we have in the Track table?\n\n```sql\nSELECT COUNT(DISTINCT GenreId) FROM Track;\n```\n\nThis is going to tell how many different `type`s we have in the ingredients table. Keep in mind the query to see _what_ the distinct ingredients are.\n\n```sql\nSELECT DISTINCT GenreId FROM Track;\n```\n\nThe first query gives you the number, the count of many distinct things in the list. The second query gives you what those distinct things are with no indication of how many of each there are. There could be 1 fruit and 10,000 vegetables and you'd not indicate that.\n\nOkay, so you want to see both at the same time? Let's see that.\n\n```sql\nSELECT\n GenreId, COUNT(GenreId)\nFROM\n Track\nGROUP BY\n GenreId;\n```\n\nThis is combining both of what we saw plus a new thing, `GROUP BY`. This allows us to specify what things we want to aggregate together: the type. Keep in mind if you want to SELECT for something with a GROUP BY clause, you do need to put them in the GROUP BY clause.\n\nNow what if we want to include the actual genre names?\n\n```sql\nSELECT\n Track.GenreId, Genre.Name, COUNT(Track.GenreId)\nFROM\n Track\nJOIN\n Genre\nON\n Genre.GenreId = Track.GenreId\nGROUP BY\n Track.GenreId; -- you can also have Genre.GenreId here, no difference\n```\n\nThis one can be a bit of a mind trip. Remember the aggregation happens at the end. So after all your selects happen, then on the row set, it goes and runs the aggregation function here using the GROUP BY. So we get a bunch of rows with their Genre.Name attached, and then we count those up.\n\nWhat if we wanted to find the biggest or smallest TrackId with each genre? (Doesn't seem that useful but I'll still show you how.)\n\n```sql\nSELECT\n Track.GenreId, Genre.Name, MAX(Track.TrackId) -- MIN(Track.TrackId)\nFROM\n Track\nJOIN\n Genre\nON\n Genre.GenreId = Track.GenreId\nGROUP BY\n Track.GenreId;\n```\n\n## HAVING\n\nWhat if you only want genres that have more than 300 tracks?\n\n> The following query does not work.\n\n```sql\nSELECT\n Track.GenreId, Genre.Name, COUNT(Track.GenreId)\nFROM\n Track\nJOIN\n Genre\nON\n Genre.GenreId = Track.GenreId\nWHERE\n COUNT(Track.GenreId) > 500\nGROUP BY\n Track.GenreId;\n```\n\nYou can't use WHERE because that applies to the initial result set. You could filter out all rock songs or only select tracks with a certain length. But you can't filter based on the aggregated values because that happens after WHERE happens. This is why HAVING is useful.\n\n```sql\nSELECT\n Track.GenreId, Genre.Name, COUNT(Track.GenreId)\nFROM\n Track\nJOIN\n Genre\nON\n Genre.GenreId = Track.GenreId\nGROUP BY\n Track.GenreId\nHAVING\n COUNT(Track.GenreId) > 300;\n```\n\nUsing HAVING we can filter on the aggregated set. Keep that mind if you ever need to do that.\n","slug":"aggregation","title":"Aggregation","section":"Intermediate SQL","icon":"table","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/D-aggregation.md","nextSlug":"/lessons/intermediate-sql/subqueries","prevSlug":"/lessons/intermediate-sql/foreign-keys"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/foreign-keys.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/foreign-keys.json new file mode 100644 index 0000000..0912ce1 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/foreign-keys.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

We can actually make that relationship between IDs explicit. We can make it so AlbumId isn't just an implicit relationship but an actual relationship tracked by SQLite itself. This relationship is called a foreign key: a key that references foreign information in another table. Our Chinook database already has these relationships built in. Type .schema Track and notice the FOREIGN KEY part on MediaTypeId, GenreId, and AlbumId.

\n
\n

SQLite does not enforce foreign key rules by default. Every time you open the database connection you need to tell it respect it with PRAGMA foreign_keys=on;. Yes, it's by connection, and no, there's no way to always turn it on. Probably my biggest complaint about SQLite. It's done for historical / compat reasons.

\n
\n
PRAGMA foreign_keys=on;\n\nINSERT INTO\n    Track\n    (Name, AlbumId, MediaTypeId, Composer, Milliseconds, Bytes, UnitPrice)\nVALUES\n    ('lol', 99999, 99999, 99999, 99999, 99999, 99999);\n\nDELETE FROM\n    Genre\nWHERE\n    GenreId=24;\n
\n
\n

Both of those queries should fail due to foreign key constraints.

\n
\n\n","markdown":"We can actually make that relationship between IDs explicit. We can make it so `AlbumId` isn't just an implicit relationship but an actual relationship tracked by SQLite itself. This relationship is called a foreign key: a key that references foreign information in another table. Our Chinook database already has these relationships built in. Type `.schema Track` and notice the FOREIGN KEY part on MediaTypeId, GenreId, and AlbumId.\n\n> SQLite does not enforce foreign key rules by default. Every time you open the database connection you need to tell it respect it with `PRAGMA foreign_keys=on;`. Yes, it's by connection, and no, there's no way to always turn it on. Probably my biggest complaint about SQLite. It's done for historical / compat reasons.\n\n```sql\nPRAGMA foreign_keys=on;\n\nINSERT INTO\n Track\n (Name, AlbumId, MediaTypeId, Composer, Milliseconds, Bytes, UnitPrice)\nVALUES\n ('lol', 99999, 99999, 99999, 99999, 99999, 99999);\n\nDELETE FROM\n Genre\nWHERE\n GenreId=24;\n```\n\n> Both of those queries should fail due to foreign key constraints.\n\n- PRAGMAs are basically policies you can tell SQLite to respect. In this case, we are saying for _this_ connection, please enforce rules around foreign keys. You need to do this for every connection to SQLite. There's a bunch but I tend not to use too many of them.\n- You can actually set the foreign key pragma in the connection string when you connect in code. I'll show you how later.\n- Notice it won't let us neither insert with violations of the constraint nor delete.\n- You can also do `ON UPDATE` constraints as well.\n- We did a `NO ACTION` constraint, but there are others as well. NO ACTION means that if a foreign key relationship would be severed and thus leaving orphan rows, error out the query. `RESTRICT` does this as well (but has some minuet difference that has never been important to me.) `ON DELETE CASCADE` will delete any affected rows. So if I delete \"rock\" from the Genre table, it will go delete every rock track from the Track table. You can also do `ON DELETE SET NULL` and `ON DELETE SET DEFAULT` to just change the value.\n","slug":"foreign-keys","title":"Foreign Keys","section":"Intermediate SQL","icon":"table","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/C-foreign-keys.md","nextSlug":"/lessons/intermediate-sql/aggregation","prevSlug":"/lessons/intermediate-sql/other-types-of-joins"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/other-types-of-joins.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/other-types-of-joins.json new file mode 100644 index 0000000..4c35b6d --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/other-types-of-joins.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

We looked at several ways of doing joins in the last section by using the JOIN keyword. One key thing about that is we were doing INNER JOINs. If you don't specify what type of join to do, it is implicitly an INNER JOIN. In practice it didn't matter because our queries matched up 1:1:

\n
SELECT\n    B.Name, A.Title\nFROM\n    Album A\nINNER JOIN\n    Artist B ON\n        A.ArtistId = B.ArtistId\nLIMIT\n    5;\n
\n
\n

INNER JOIN or just JOIN are the same thing.

\n
\n

\"diagram

\n

Our INNER JOIN is giving us the inner section on the Venn Diagram between the two tables. Let's say that we have an artist in the artist table that we don't have any albums for. If we just

\n
SELECT\n    B.Name, A.Title\nFROM\n    Album A\nINNER JOIN\n    Artist B ON\n        A.ArtistId = B.ArtistId\nWHERE\n    B.Name = 'Snow Patrol';\n\nSELECT\n    B.Name, A.Title\nFROM\n    Album A\nRIGHT JOIN\n    Artist B ON\n        A.ArtistId = B.ArtistId\nWHERE\n    B.Name = 'Snow Patrol';\n
\n

Notice the first query doesn't give us any results. That's because it's an inner join – it will only give us things from rows in both tables. Now if we run the second query, it'll give anything that's in the inner part (which is nothing, as we just saw) and it will give us anything that just exists in the Artist Table (the "right" table in this case.)

\n
\n

LEFT refers to the FROM clause table, RIGHT refers to what comes from the JOIN.

\n
\n

What if we wanted to see all artists without an album in the album table?

\n
SELECT\n    B.Name, A.Title\nFROM\n    Album A\nRIGHT OUTER JOIN\n    Artist B ON\n        A.ArtistId = B.ArtistId;\n
\n

That OUTER part means only take things that don't have anything in the Album table, so it will only give us artists with no albums in the albums table.

\n

NATURAL JOIN

\n
SELECT\n    B.Name, A.Title\nFROM\n    Album A\nNATURAL JOIN\n    Artist B\nLIMIT\n    5;\n\nSELECT\n    B.Name, A.Title, C.Name\nFROM\n    Album A\nNATURAL JOIN\n    Artist B\nNATURAL JOIN\n    Track C\nLIMIT\n    5;\n
\n

I don't really like NATURAL JOIN but I thought I'd mention it because you'll see it from time to time. By saying NATURAL JOIN in this case, we're saying "Hey, I have columns in both tables that are named the same thing. Using that, join these tables."

\n

It might seem convenient (and if you structure your tables well like Chinook is, it's easy) but it's so implicit. I don't like magic code and this feels magic and brittle. If you rename a column or drop it or anything like that, you can break your queries or, even worse, get the wrong data back. I'd say steer clear.

\n","markdown":"We looked at several ways of doing joins in the last section by using the `JOIN` keyword. One key thing about that is we were doing `INNER JOIN`s. If you don't specify what type of join to do, it is implicitly an INNER JOIN. In practice it didn't matter because our queries matched up 1:1:\n\n```sql\nSELECT\n B.Name, A.Title\nFROM\n Album A\nINNER JOIN\n Artist B ON\n A.ArtistId = B.ArtistId\nLIMIT\n 5;\n```\n\n> `INNER JOIN` or just `JOIN` are the same thing.\n\n[![diagram of SQL joins](/images/SQL_Joins.png)](https://commons.wikimedia.org/wiki/File:SQL_Joins.svg)\n\nOur INNER JOIN is giving us the inner section on the Venn Diagram between the two tables. Let's say that we have an artist in the artist table that we don't have any albums for. If we just\n\n```sql\nSELECT\n B.Name, A.Title\nFROM\n Album A\nINNER JOIN\n Artist B ON\n A.ArtistId = B.ArtistId\nWHERE\n B.Name = 'Snow Patrol';\n\nSELECT\n B.Name, A.Title\nFROM\n Album A\nRIGHT JOIN\n Artist B ON\n A.ArtistId = B.ArtistId\nWHERE\n B.Name = 'Snow Patrol';\n```\n\nNotice the first query doesn't give us any results. That's because it's an inner join – it will only give us things from rows in _both_ tables. Now if we run the second query, it'll give anything that's in the inner part (which is nothing, as we just saw) _and_ it will give us anything that _just_ exists in the Artist Table (the \"right\" table in this case.)\n\n> LEFT refers to the FROM clause table, RIGHT refers to what comes from the JOIN.\n\nWhat if we wanted to see all artists without an album in the album table?\n\n```sql\nSELECT\n B.Name, A.Title\nFROM\n Album A\nRIGHT OUTER JOIN\n Artist B ON\n A.ArtistId = B.ArtistId;\n```\n\nThat OUTER part means _only_ take things that don't have anything in the Album table, so it will only give us artists with no albums in the albums table.\n\n## NATURAL JOIN\n\n```sql\nSELECT\n B.Name, A.Title\nFROM\n Album A\nNATURAL JOIN\n Artist B\nLIMIT\n 5;\n\nSELECT\n B.Name, A.Title, C.Name\nFROM\n Album A\nNATURAL JOIN\n Artist B\nNATURAL JOIN\n Track C\nLIMIT\n 5;\n```\n\nI don't really like NATURAL JOIN but I thought I'd mention it because you'll see it from time to time. By saying NATURAL JOIN in this case, we're saying \"Hey, I have columns in both tables that are named the same thing. Using that, join these tables.\"\n\nIt might seem convenient (and if you structure your tables well like Chinook is, it's easy) but it's so implicit. I don't like magic code and this feels magic and brittle. If you rename a column or drop it or anything like that, you can break your queries or, even worse, get the wrong data back. I'd say steer clear.\n","slug":"other-types-of-joins","title":"Other Types of Joins","section":"Intermediate SQL","icon":"table","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/B-other-types-of-joins.md","nextSlug":"/lessons/intermediate-sql/foreign-keys","prevSlug":"/lessons/intermediate-sql/relational-data"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/relational-data.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/relational-data.json new file mode 100644 index 0000000..fd891c1 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/relational-data.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

So far we've done a one-to-one matching of records. We've used a record in a database to represent one item: one band member, band, etc.

\n

Now we're going to get into records that can relate to each other. Let's think about albums. A recipe has multiple ingredients. That has word is key here. It means there is a relationship. A single recipe has many ingredients. An ingredient can also be in many recipes. A tomato is both in pizza sauce and in a BLT. This is called a many-to-many relationship.

\n

There are also one-to-many relationships, like imagine if we had multiple photos of each of our ingredients. A single ingredient will have five photos. And those photos will only will only belong to one ingredient. A photo of a green pepper doesn't make sense to belong to anything besides the green pepper ingredient.

\n

There can also exist one-to-one relationships but in general, you would just make those the same record altogether. You could split up the type and title into two tables, but why would you? Then you have a data sync problem. What if a band renames themselves? Ex: On a Friday → Radiohead, Prince → The Artist Formerly Known as Prince, The Quarrymen → The Beatles. Anyone who has ever tried to manually keep data in sync in two+ places knows eventually you will have issues.

\n

Luckily we can use relational data to have one table of bands and one table of albums.

\n
SELECT * FROM Album LIMIT 5;\n
\n

Notice that we're just getting ArtistId (a number) instead of the actual name of the band. That's not what we want, we want to see the name of the album and the name of the band. Enter JOINs. This allows us to join two tables based on common data. Let's see how to do that.

\n
SELECT\n    Artist.Name, Album.Title\nFROM\n    Album\nJOIN\n    Artist ON\n        Album.ArtistId = Artist.ArtistId\nLIMIT 5;\n
\n
\n

Once we start getting into longer queries (especially when in code) I start spacing it out using new lines to make it easier to read. Makes it easier to understand at a glance.

\n
\n

We JOIN'd the Artist table to the Album table based on them sharing a common ArtistId. Pretty cool, right? Let's look at a few more tricks here.

\n

Table Aliases

\n
SELECT\n    b.Name, a.Title\nFROM\n    Album a\nJOIN\n    Artist b ON\n        a.ArtistId = b.ArtistId\nLIMIT 5;\n
\n

You can give tables aliases so they're easier to refer to. Some of the table names can get quite long so this can make it more readable. In this case, I'd argue that the single-letter variable names make it less readable, that's up to you.

\n

You can use WHERE!

\n
SELECT\n    Artist.Name, Album.Title\nFROM\n    Album\nJOIN\n    Artist ON\n        Album.ArtistId = Artist.ArtistId\nWHERE\n    Artist.Name = 'Nirvana';\n\nSELECT\n    Artist.Name, Album.Title\nFROM\n    Album\nJOIN\n    Artist ON\n        Album.ArtistId = Artist.ArtistId\nWHERE\n    Album.Title = 'IV';\n\nSELECT\n    Artist.Name, Album.Title\nFROM\n    Album\nJOIN\n    Artist ON\n        Album.ArtistId = Artist.ArtistId\nWHERE\n    Album.Title LIKE '%live%';\n
\n

As you can see, once you set up the JOIN and how you want to join the tables together, you can start filtering your results based on either table. The first query we asked for only Nirvana's albums, the second is just albums named IV, and the third we're asking for any album that live in it.

\n

Joining more than two tables

\n
SELECT\n    Artist.Name, Album.Title, Track.Name\nFROM\n    Album\nJOIN\n    Artist ON\n        Album.ArtistId = Artist.ArtistId\nJOIN\n    Track ON\n        Track.AlbumId = Album.AlbumId\nWHERE\n    Album.Title = 'IV';\n
\n

As you can see, you can just keep joining tables as long you have ways to join the table.

\n
SELECT\n    Artist.Name, Album.Title, Track.Name, Genre.Name\nFROM\n    Album\nJOIN\n    Artist ON\n        Album.ArtistId = Artist.ArtistId\nJOIN\n    Track ON\n        Track.AlbumId = Album.AlbumId\nJOIN\n    Genre ON\n        Track.GenreId = Genre.GenreId\nWHERE\n    Artist.Name = 'Foo Fighters';\n
\n

You can just keep joining as you need to.

\n","markdown":"So far we've done a one-to-one matching of records. We've used a record in a database to represent one item: one band member, band, etc.\n\nNow we're going to get into records that can relate to each other. Let's think about albums. A recipe has multiple ingredients. That **has** word is key here. It means there is a relationship. A single recipe has many ingredients. An ingredient can also be in many recipes. A tomato is both in pizza sauce and in a BLT. This is called a many-to-many relationship.\n\nThere are also one-to-many relationships, like imagine if we had multiple photos of each of our ingredients. A single ingredient will have five photos. And those photos will only will only belong to one ingredient. A photo of a green pepper doesn't make sense to belong to anything besides the green pepper ingredient.\n\nThere can also exist one-to-one relationships but in general, you would just make those the same record altogether. You could split up the type and title into two tables, but why would you? Then you have a data sync problem. What if a band renames themselves? Ex: On a Friday → Radiohead, Prince → The Artist Formerly Known as Prince, The Quarrymen → The Beatles. Anyone who has ever tried to manually keep data in sync in two+ places knows eventually you will have issues.\n\nLuckily we can use relational data to have one table of bands and one table of albums.\n\n```sql\nSELECT * FROM Album LIMIT 5;\n```\n\nNotice that we're just getting ArtistId (a number) instead of the actual name of the band. That's not what we want, we want to see the name of the album and the name of the band. Enter JOINs. This allows us to join two tables based on common data. Let's see how to do that.\n\n```sql\nSELECT\n Artist.Name, Album.Title\nFROM\n Album\nJOIN\n Artist ON\n Album.ArtistId = Artist.ArtistId\nLIMIT 5;\n```\n\n> Once we start getting into longer queries (especially when in code) I start spacing it out using new lines to make it easier to read. Makes it easier to understand at a glance.\n\nWe JOIN'd the Artist table to the Album table based on them sharing a common ArtistId. Pretty cool, right? Let's look at a few more tricks here.\n\n## Table Aliases\n\n```sql\nSELECT\n b.Name, a.Title\nFROM\n Album a\nJOIN\n Artist b ON\n a.ArtistId = b.ArtistId\nLIMIT 5;\n```\n\nYou can give tables aliases so they're easier to refer to. Some of the table names can get quite long so this can make it more readable. In this case, I'd argue that the single-letter variable names make it _less_ readable, that's up to you.\n\n## You can use WHERE!\n\n```sql\nSELECT\n Artist.Name, Album.Title\nFROM\n Album\nJOIN\n Artist ON\n Album.ArtistId = Artist.ArtistId\nWHERE\n Artist.Name = 'Nirvana';\n\nSELECT\n Artist.Name, Album.Title\nFROM\n Album\nJOIN\n Artist ON\n Album.ArtistId = Artist.ArtistId\nWHERE\n Album.Title = 'IV';\n\nSELECT\n Artist.Name, Album.Title\nFROM\n Album\nJOIN\n Artist ON\n Album.ArtistId = Artist.ArtistId\nWHERE\n Album.Title LIKE '%live%';\n```\n\nAs you can see, once you set up the JOIN and how you want to join the tables together, you can start filtering your results based on either table. The first query we asked for only Nirvana's albums, the second is just albums named `IV`, and the third we're asking for any album that `live` in it.\n\n## Joining more than two tables\n\n```sql\nSELECT\n Artist.Name, Album.Title, Track.Name\nFROM\n Album\nJOIN\n Artist ON\n Album.ArtistId = Artist.ArtistId\nJOIN\n Track ON\n Track.AlbumId = Album.AlbumId\nWHERE\n Album.Title = 'IV';\n```\n\nAs you can see, you can just keep joining tables as long you have ways to join the table.\n\n```sql\nSELECT\n Artist.Name, Album.Title, Track.Name, Genre.Name\nFROM\n Album\nJOIN\n Artist ON\n Album.ArtistId = Artist.ArtistId\nJOIN\n Track ON\n Track.AlbumId = Album.AlbumId\nJOIN\n Genre ON\n Track.GenreId = Genre.GenreId\nWHERE\n Artist.Name = 'Foo Fighters';\n```\n\nYou can just keep joining as you need to.\n","slug":"relational-data","title":"Relational Data","section":"Intermediate SQL","icon":"table","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/A-relational-data.md","nextSlug":"/lessons/intermediate-sql/other-types-of-joins","prevSlug":"/lessons/basic-sql/tables"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/subqueries.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/subqueries.json new file mode 100644 index 0000000..a2d3769 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/intermediate-sql/subqueries.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

Okay, what if you want to find all customer invoices from a certain email address? Using joins, we could accomplish

\n
SELECT\n *\nFROM\n  Invoice i\n\nJOIN\n  Customer c\nON\n  c.CustomerId = i.CustomerId\n\nWHERE\n  c.Email = 'hholy@gmail.com';\n
\n

This totally works and if you like this, roll with it. No issues here. However I'm going to show you a second way (mostly to demonstrate subqueries!)

\n
SELECT\n  *\nFROM\n  Invoice\nWHERE\n  CustomerId = (\n    SELECT CustomerId FROM Customer WHERE Email='hholy@gmail.com'\n  );\n
\n

If you put parens in, you can do a subquery. This query is run first and its results can be fed into the parent query. In this case, we use a subquery to find the ID of the email. In this case, for a one-off query, the performance difference isn't important. If this was a thing run constantly in production and it was slow, I'd analyze the performance of both and pick the better one given our data and indexes (stuff we'll talk about later.)

\n

In thise case you can use JOINs to work around using subqueries but it's not always possible. It's a good little tool in your SQL tool belt to have.

\n","markdown":"Okay, what if you want to find all customer invoices from a certain email address? Using joins, we could accomplish\n\n```sql\nSELECT\n *\nFROM\n Invoice i\n\nJOIN\n Customer c\nON\n c.CustomerId = i.CustomerId\n\nWHERE\n c.Email = 'hholy@gmail.com';\n```\n\nThis totally works and if you like this, roll with it. No issues here. However I'm going to show you a second way (mostly to demonstrate subqueries!)\n\n```sql\nSELECT\n *\nFROM\n Invoice\nWHERE\n CustomerId = (\n SELECT CustomerId FROM Customer WHERE Email='hholy@gmail.com'\n );\n```\n\nIf you put parens in, you can do a subquery. This query is run first and its results can be fed into the parent query. In this case, we use a subquery to find the ID of the email. In this case, for a one-off query, the performance difference isn't important. If this was a thing run constantly in production and it was slow, I'd analyze the performance of both and pick the better one given our data and indexes (stuff we'll talk about later.)\n\nIn thise case you can use JOINs to work around using subqueries but it's not always possible. It's a good little tool in your SQL tool belt to have.\n","slug":"subqueries","title":"Subqueries","section":"Intermediate SQL","icon":"table","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/04-intermediate-sql/E-subqueries.md","nextSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/the-example-app","prevSlug":"/lessons/intermediate-sql/aggregation"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/jsonb.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/jsonb.json new file mode 100644 index 0000000..06d7c89 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/jsonb.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

So we've been doing everything with the JSON functions but in reality we really want to use JSONB. It's a more compact way to represent JSON and it's faster to use. It makes everything a little harder to read but otherwise is just better.

\n
-- notice the b in jsonb\nSELECT jsonb('{"username": "btholt", "favorites":["Daft Punk", "Radiohead"]}');\n-- ?1?usernamegbtholt?favorites??Daft Punk?Radiohead\n
\n

It comes out a little hard to read but ultimately we don't want to read it until it's out of the database so I'd suggest always using JSONB.

\n

Given that, let's make a table, insert some stuff into it, and write some queries.

\n
CREATE TABLE users (email, data);\n\nINSERT INTO\n  users\n  (email, data)\nVALUES\n  ('brian@example.com', jsonb('{"favorites":["Daft Punk", "Radiohead"], "name": {"first": "Brian", "last": "Holt"}}')),\n  ('bob@example.com', jsonb('{"favorites":["Daft Punk"], "name": {"first": "Bob", "last": "Smith"}}')),\n  ('alice@example.com', jsonb('{"admin": true, "favorites":["The Beatles", "Queen"], "name": {"first": "Alice", "last": "Johnson"}}')),\n  ('charlie@example.com', jsonb('{"favorites":["Nirvana", "Pearl Jam"], "name": {"first": "Charlie", "last": "Brown"}}')),\n  ('dave@example.com', jsonb('{"favorites":["Pink Floyd", "Led Zeppelin"], "name": {"first": "Dave", "last": "Wilson"}}')),\n  ('eve@example.com', jsonb('{"favorites":["Madonna", "Michael Jackson"], "name": {"first": "Eve", "last": "Davis"}}')),\n  ('frank@example.com', jsonb('{"favorites":["Queen", "David Bowie"], "name": {"first": "Frank", "last": "Miller"}}')),\n  ('grace@example.com', jsonb('{"favorites":["Radiohead", "Led Zeppelin"], "name": {"first": "Grace", "last": "Lee"}}')),\n  ('hank@example.com', jsonb('{"favorites":["U2", "Radiohead"], "name": {"first": "Hank", "last": "Taylor"}}')),\n  ('ivy@example.com', jsonb('{"favorites":["Adele", "Beyoncé"], "name": {"first": "Ivy", "last": "Anderson"}}')),\n  ('jack@example.com', jsonb('{"favorites":["Radiohead", "Muse"], "name": {"first": "Jack", "last": "Thomas"}}')),\n  ('kate@example.com', jsonb('{"favorites":["Taylor Swift", "Madonna"], "name": {"first": "Kate", "last": "Martinez"}}')),\n  ('leo@example.com', jsonb('{"favorites":["Nirvana", "Daft Punk"], "name": {"first": "Leo", "last": "Garcia"}}'));\n\n-- it's readable but hard to. **never** modify this directly, always let SQLite do it\nSELECT data from users;\n\n-- get nested data\nSELECT data -> 'name' ->> 'first', data -> 'name' ->> 'last' FROM users;\n\nSELECT data -> 'name' ->> 'first', data -> 'name' ->> 'last' FROM users WHERE json_array_length(data, '$.favorites') < 2;\n
\n

Pretty straightforward here. Getting data out of JSON is very similar to just normal fields.

\n

For the second one, we are asking for all users that have less than two favorites. You can use these functions anywhere.

\n","markdown":"So we've been doing everything with the JSON functions but in reality we really want to use JSONB. It's a more compact way to represent JSON and it's faster to use. It makes everything a little harder to read but otherwise is just better.\n\n```sql\n-- notice the b in jsonb\nSELECT jsonb('{\"username\": \"btholt\", \"favorites\":[\"Daft Punk\", \"Radiohead\"]}');\n-- ?1?usernamegbtholt?favorites??Daft Punk?Radiohead\n```\n\nIt comes out a little hard to read but ultimately we don't want to read it until it's out of the database so I'd suggest always using JSONB.\n\nGiven that, let's make a table, insert some stuff into it, and write some queries.\n\n```sql\nCREATE TABLE users (email, data);\n\nINSERT INTO\n users\n (email, data)\nVALUES\n ('brian@example.com', jsonb('{\"favorites\":[\"Daft Punk\", \"Radiohead\"], \"name\": {\"first\": \"Brian\", \"last\": \"Holt\"}}')),\n ('bob@example.com', jsonb('{\"favorites\":[\"Daft Punk\"], \"name\": {\"first\": \"Bob\", \"last\": \"Smith\"}}')),\n ('alice@example.com', jsonb('{\"admin\": true, \"favorites\":[\"The Beatles\", \"Queen\"], \"name\": {\"first\": \"Alice\", \"last\": \"Johnson\"}}')),\n ('charlie@example.com', jsonb('{\"favorites\":[\"Nirvana\", \"Pearl Jam\"], \"name\": {\"first\": \"Charlie\", \"last\": \"Brown\"}}')),\n ('dave@example.com', jsonb('{\"favorites\":[\"Pink Floyd\", \"Led Zeppelin\"], \"name\": {\"first\": \"Dave\", \"last\": \"Wilson\"}}')),\n ('eve@example.com', jsonb('{\"favorites\":[\"Madonna\", \"Michael Jackson\"], \"name\": {\"first\": \"Eve\", \"last\": \"Davis\"}}')),\n ('frank@example.com', jsonb('{\"favorites\":[\"Queen\", \"David Bowie\"], \"name\": {\"first\": \"Frank\", \"last\": \"Miller\"}}')),\n ('grace@example.com', jsonb('{\"favorites\":[\"Radiohead\", \"Led Zeppelin\"], \"name\": {\"first\": \"Grace\", \"last\": \"Lee\"}}')),\n ('hank@example.com', jsonb('{\"favorites\":[\"U2\", \"Radiohead\"], \"name\": {\"first\": \"Hank\", \"last\": \"Taylor\"}}')),\n ('ivy@example.com', jsonb('{\"favorites\":[\"Adele\", \"Beyoncé\"], \"name\": {\"first\": \"Ivy\", \"last\": \"Anderson\"}}')),\n ('jack@example.com', jsonb('{\"favorites\":[\"Radiohead\", \"Muse\"], \"name\": {\"first\": \"Jack\", \"last\": \"Thomas\"}}')),\n ('kate@example.com', jsonb('{\"favorites\":[\"Taylor Swift\", \"Madonna\"], \"name\": {\"first\": \"Kate\", \"last\": \"Martinez\"}}')),\n ('leo@example.com', jsonb('{\"favorites\":[\"Nirvana\", \"Daft Punk\"], \"name\": {\"first\": \"Leo\", \"last\": \"Garcia\"}}'));\n\n-- it's readable but hard to. **never** modify this directly, always let SQLite do it\nSELECT data from users;\n\n-- get nested data\nSELECT data -> 'name' ->> 'first', data -> 'name' ->> 'last' FROM users;\n\nSELECT data -> 'name' ->> 'first', data -> 'name' ->> 'last' FROM users WHERE json_array_length(data, '$.favorites') < 2;\n```\n\nPretty straightforward here. Getting data out of JSON is very similar to just normal fields.\n\nFor the second one, we are asking for all users that have less than two favorites. You can use these functions anywhere.\n","slug":"jsonb","title":"Jsonb","section":"JSON","icon":"package","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/08-json/C-jsonb.md","nextSlug":"/lessons/json/more-advance-queries","prevSlug":"/lessons/json/querying"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/more-advance-queries.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/more-advance-queries.json new file mode 100644 index 0000000..ae0aff1 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/more-advance-queries.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"
-- this is counting the most favorited bands\nSELECT\n  COUNT(f.value) AS count, f.value\nFROM\n  users, json_each(data ->> 'favorites') f\nGROUP BY\n  f.value\nORDER BY\n  count DESC;\n
\n

This query finds the most favorited bands. We are using aggregation and something called a table valued functions. We're using it to make a virtual table of all of the values out of the JSON arrays and then summing those into a most commonly favorited bands.

\n

In general this isn't something too common to use table valued functions but here it is useful. Essentially it allows you to give a table to a function and that will generate a virtual table out of values (with usually more or less rows than what in the table).

\n

Updating JSON

\n
-- this is how you update json\nUPDATE\n  users\nSET\n  data = json_insert(\n    (SELECT data FROM users WHERE email ='brian@example.com'),\n    '$.favorites[#]',\n    'The xx'\n  )\nWHERE\n  email ='brian@example.com';\n
\n

Updating the JSON can be a bit more difficult. We need a copy of the JSON to update (hence the subquery) and then we need to use a JSON method to update it, and then to set the whole thing as the new value. Because there's no real JSON type and it's really just a string at the end of the day, we have to set it holistically each time.

\n

The [#] at the end of '$.favorites[#]' is a special syntax that means "add to the end". You can put a number in there if you want to update a specific place.

\n","markdown":"```sql\n-- this is counting the most favorited bands\nSELECT\n COUNT(f.value) AS count, f.value\nFROM\n users, json_each(data ->> 'favorites') f\nGROUP BY\n f.value\nORDER BY\n count DESC;\n```\n\nThis query finds the most favorited bands. We are using aggregation and something called a [table valued functions][table-valued-function]. We're using it to make a virtual table of all of the values out of the JSON arrays and then summing those into a most commonly favorited bands.\n\nIn general this isn't something too common to use table valued functions but here it is useful. Essentially it allows you to give a table to a function and that will generate a virtual table out of values (with usually more or less rows than what in the table).\n\n## Updating JSON\n\n```sql\n-- this is how you update json\nUPDATE\n users\nSET\n data = json_insert(\n (SELECT data FROM users WHERE email ='brian@example.com'),\n '$.favorites[#]',\n 'The xx'\n )\nWHERE\n email ='brian@example.com';\n```\n\nUpdating the JSON can be a bit more difficult. We need a copy of the JSON to update (hence the subquery) and then we need to use a JSON method to update it, and then to set the whole thing as the new value. Because there's no real JSON type and it's really just a string at the end of the day, we have to set it holistically each time.\n\nThe `[#]` at the end of `'$.favorites[#]'` is a special syntax that means \"add to the end\". You can put a number in there if you want to update a specific place.\n\n[table-valued-function]: https://www.sqlite.org/vtab.html#tabfunc2\n","slug":"more-advance-queries","title":"More Advance Queries","section":"JSON","icon":"package","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/08-json/D-more-advance-queries.md","nextSlug":null,"prevSlug":"/lessons/json/jsonb"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/querying.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/querying.json new file mode 100644 index 0000000..a7a5c34 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/querying.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"JSON"},"html":"

The JSON extension (json1) for SQLite is the most useful one to me, and one that I load nearly any time I do a project with SQLite. It is extremely useful for application development. It allows you to read and write arbitrary data. It allows you to treat SQLite essentially like a document-based database like MongoDB where you don't have to define your schema up front and can do so on the fly. It allows you to have arrays inside of rows instead of having to do one-to-many relations across tables. There are still reasons you'd want to define schema and have relations across table, but this makes it easy for simple use cases where you don't need all the SQL rigor.

\n

Let's install it.

\n
sqlpkg install sqlite/json1\nsqlpkg which sqlite/json1 # copy the out output of this\n
\n

Now load your database and run this

\n
.load /Users/my-user/.sqlpkg/sqlite/json1/json1.dylib\n\nSELECT json('{"username": "btholt", "favorites":["Daft Punk", "Radiohead"]}');\n
\n

This should return back to you a JSON object of the string we passed in. As you can see, we can now operate with JSON inside of SQLite! Pretty cool, right? Let's try a few more.

\n
-- create an array\nSELECT json_array(1, 2, 3);\n\n-- get the length of an array\nSELECT json_array_length('{"username": "btholt", "favorites":["Daft Punk", "Radiohead"]}', '$.favorites');\n\n-- get the type of a field in an object\nSELECT json_type('{"username": "btholt", "favorites":["Daft Punk", "Radiohead"]}', '$.username');\n\n-- construct a new object using pairs\nSELECT json_object('username', 'btholt', 'favorites', json_array('Daft Punk', 'Radiohead'));\n
\n

These are a bunch of helper methods to help you interact with JSON objects in SQLite. Let's see how to manipulate it.

\n
-- add a new field\nSELECT json_insert('{"username": "btholt", "favorites":["Daft Punk", "Radiohead"]}', '$.city', 'Sacramento');\n\n-- remove a field\nSELECT json_remove('{"username": "btholt", "favorites":["Daft Punk", "Radiohead"]}', '$.favorites');\n\n-- update a field\nSELECT json_replace('{"username": "btholt", "favorites":["Daft Punk", "Radiohead"]}', '$.username', 'holtbt');\n
\n

-> and ->>

\n

SQLite provides two convenience operators that it copied from MySQL and Postgres to keep the syntaxes compatible. It allows you to extract specific values from JSON.

\n
SELECT json('{"username": "btholt", "favorites":["Daft Punk", "Radiohead"]}') -> 'username';\nSELECT json('{"username": "btholt", "name": { "first": "Brian" }, "favorites":["Daft Punk", "Radiohead"]}') -> 'name';\nSELECT json('{"username": "btholt", "name": { "first": "Brian" }, "favorites":["Daft Punk", "Radiohead"]}') -> 'name' -> 'first';\n
\n

Notice you can do multiple levels of extraction. Also notice that anything that coming back from with -> operator has double quotes. That's because it's still treating it as JSON so that we can keep using -> to dig further into objects. If we want it to return it as text or integer, we use ->>

\n
SELECT json('{"username": "btholt", "name": { "first": "Brian" }, "favorites":["Daft Punk", "Radiohead"]}') -> 'name' ->> 'first';\n
\n

->> lets you get the actual value out and not JSON. Use it for your last extraction to acutally get the data out.

\n","markdown":"\nThe JSON extension ([json1][json1]) for SQLite is the most useful one to me, and one that I load nearly any time I do a project with SQLite. It is extremely useful for application development. It allows you to read and write arbitrary data. It allows you to treat SQLite essentially like a document-based database like MongoDB where you don't have to define your schema up front and can do so on the fly. It allows you to have arrays inside of rows instead of having to do one-to-many relations across tables. There are still reasons you'd want to define schema and have relations across table, but this makes it easy for simple use cases where you don't need all the SQL rigor.\n\nLet's install it.\n\n```bash\nsqlpkg install sqlite/json1\nsqlpkg which sqlite/json1 # copy the out output of this\n```\n\nNow load your database and run this\n\n```sql\n.load /Users/my-user/.sqlpkg/sqlite/json1/json1.dylib\n\nSELECT json('{\"username\": \"btholt\", \"favorites\":[\"Daft Punk\", \"Radiohead\"]}');\n```\n\nThis should return back to you a JSON object of the string we passed in. As you can see, we can now operate with JSON inside of SQLite! Pretty cool, right? Let's try a few more.\n\n```sql\n-- create an array\nSELECT json_array(1, 2, 3);\n\n-- get the length of an array\nSELECT json_array_length('{\"username\": \"btholt\", \"favorites\":[\"Daft Punk\", \"Radiohead\"]}', '$.favorites');\n\n-- get the type of a field in an object\nSELECT json_type('{\"username\": \"btholt\", \"favorites\":[\"Daft Punk\", \"Radiohead\"]}', '$.username');\n\n-- construct a new object using pairs\nSELECT json_object('username', 'btholt', 'favorites', json_array('Daft Punk', 'Radiohead'));\n```\n\nThese are a bunch of helper methods to help you interact with JSON objects in SQLite. Let's see how to manipulate it.\n\n```sql\n-- add a new field\nSELECT json_insert('{\"username\": \"btholt\", \"favorites\":[\"Daft Punk\", \"Radiohead\"]}', '$.city', 'Sacramento');\n\n-- remove a field\nSELECT json_remove('{\"username\": \"btholt\", \"favorites\":[\"Daft Punk\", \"Radiohead\"]}', '$.favorites');\n\n-- update a field\nSELECT json_replace('{\"username\": \"btholt\", \"favorites\":[\"Daft Punk\", \"Radiohead\"]}', '$.username', 'holtbt');\n```\n\n## -> and ->>\n\nSQLite provides two convenience operators that it copied from MySQL and Postgres to keep the syntaxes compatible. It allows you to extract specific values from JSON.\n\n```sql\nSELECT json('{\"username\": \"btholt\", \"favorites\":[\"Daft Punk\", \"Radiohead\"]}') -> 'username';\nSELECT json('{\"username\": \"btholt\", \"name\": { \"first\": \"Brian\" }, \"favorites\":[\"Daft Punk\", \"Radiohead\"]}') -> 'name';\nSELECT json('{\"username\": \"btholt\", \"name\": { \"first\": \"Brian\" }, \"favorites\":[\"Daft Punk\", \"Radiohead\"]}') -> 'name' -> 'first';\n```\n\nNotice you can do multiple levels of extraction. Also notice that anything that coming back from with -> operator has double quotes. That's because it's still treating it as JSON so that we can keep using -> to dig further into objects. If we want it to return it as text or integer, we use ->>\n\n```sql\nSELECT json('{\"username\": \"btholt\", \"name\": { \"first\": \"Brian\" }, \"favorites\":[\"Daft Punk\", \"Radiohead\"]}') -> 'name' ->> 'first';\n```\n\n->> lets you get the actual value out and not JSON. Use it for your last extraction to acutally get the data out.\n\n[json1]: https://sqlite.org/json1.html\n","slug":"querying","title":"JSON","section":"JSON","icon":"package","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/08-json/B-querying.md","nextSlug":"/lessons/json/jsonb","prevSlug":"/lessons/json/sqlite-extensions"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/sqlite-extensions.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/sqlite-extensions.json new file mode 100644 index 0000000..93f0225 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/json/sqlite-extensions.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"SQLite Extensions"},"html":"

SQLite intentionally maintains a tight set of features and ruthlessly cuts out anything that doesn't contribute to that core use case of a small SQL database. But that said, SQLite has a rich ecosystem of extensions to fill in those gaps of other functionality you may want and many of those are written by the developers of SQLite themselves.

\n

Click here to see the unofficial SQLite package manager, sqlpkg.

\n

As of writing there are 102 extensions in here that have been indexed, and more than that exist. If there's some core database thing you want to do that SQLite doesn't currently do, there likely exists and extension for it.

\n

If you're curious how they're written, Alex Garcia (who has written many SQLite extensions, including the vector search we're about to use) wrote one called hello that is the minimum viable extension.

\n

So how do we load an extension? Let's load the hello extension. I'm going to use sqlpkg CLI but you can also do it manually. If you're following along with me here are the instructions.

\n

Once installed, do:

\n
sqlpkg install asg017/hello\nsqlpkg which asg017/hello # copy the path this gives you\n
\n

Now that you've done that, load into the SQLite CLI and run

\n
.load /Users/my-user/.sqlpkg/asg017/hello/hello0.dylib\nSELECT hello('Brian');\n
\n

That's it! You use the .load syntax to load an extension and then you can start using it right away. Keep in mind you need to load the extension every time you open the file because, again, it's not a server, it's a library that's writing to a file.

\n
\n

hello() is just a function that was added by the extension. SQLite let's you query from these. Try SELECT max(1,2,3,4,5,100);. max() is built into SQLite

\n
\n","markdown":"\nSQLite intentionally maintains a tight set of features and ruthlessly cuts out anything that doesn't contribute to that core use case of a small SQL database. But that said, SQLite has a rich ecosystem of extensions to fill in those gaps of other functionality you may want and many of those are written by the developers of SQLite themselves.\n\n[Click here to see the unofficial SQLite package manager, sqlpkg][sqlpkg].\n\nAs of writing there are 102 extensions in here that have been indexed, and more than that exist. If there's some core database thing you want to do that SQLite doesn't currently do, there likely exists and extension for it.\n\nIf you're curious how they're written, Alex Garcia (who has written many SQLite extensions, including the vector search we're about to use) wrote one called [hello][hello] that is the minimum viable extension.\n\nSo how do we load an extension? Let's load the hello extension. I'm going to use sqlpkg CLI but you can also do it manually. If you're following along with me [here are the instructions][install].\n\nOnce installed, do:\n\n```bash\nsqlpkg install asg017/hello\nsqlpkg which asg017/hello # copy the path this gives you\n```\n\nNow that you've done that, load into the SQLite CLI and run\n\n```sql\n.load /Users/my-user/.sqlpkg/asg017/hello/hello0.dylib\nSELECT hello('Brian');\n```\n\nThat's it! You use the .load syntax to load an extension and then you can start using it right away. Keep in mind you need to load the extension every time you open the file because, again, it's not a server, it's a library that's writing to a file.\n\n> hello() is just a function that was added by the extension. SQLite let's you query from these. Try `SELECT max(1,2,3,4,5,100);`. max() is built into SQLite\n\n[sqlpkg]: https://sqlpkg.org/\n[hello]: https://github.com/asg017/sqlite-hello\n[cli]: https://github.com/nalgeon/sqlpkg-cli\n[install]: https://github.com/nalgeon/sqlpkg-cli?tab=readme-ov-file#download-and-install-preferred-method\n","slug":"sqlite-extensions","title":"SQLite Extensions","section":"JSON","icon":"package","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/08-json/A-sqlite-extensions.md","nextSlug":"/lessons/json/querying","prevSlug":"/lessons/performance/indexes"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/performance/explain.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/performance/explain.json new file mode 100644 index 0000000..13e6167 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/performance/explain.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"EXPLAIN"},"html":"
SELECT * FROM Track Where name ='Black Dog';\nPRAGMA index_list('Track');\nEXPLAIN SELECT * FROM Track Where name ='Black Dog';\nEXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\nCREATE INDEX idx_track_name ON Track (Name);\nEXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\nPRAGMA index_list('Track');\n
\n

Generally speaking, SQLite is extremely fast even on large datasets. It does a great job of working with large amounts of data even on complicated queries. However occasionally it can use some help when you have heavy queries that you run frequently. Let's first understand how to look at queries using some fun features built into SQLite.

\n
SELECT * FROM Track Where name ='Black Dog';\n
\n

Let's say we are building an interface that frequently needs to look up tracks by their names. You'd be running queries like this frequently. Right now our Track database has some 3,000 rows in it but imagine if you had Spotify's database of music. Spotify says it has over 100,000,000 tracks on it, so that query get very slow. Let's see a few of the ways that SQLite gives you to inspect your queries.

\n
EXPLAIN SELECT * FROM Track Where name ='Black Dog';\n
\n
addr  opcode         p1    p2    p3    p4             p5  comment\n----  -------------  ----  ----  ----  -------------  --  -------------\n0     Init           0     17    0                    0   Start at 17\n1     OpenRead       0     409   0     9              0   root=409 iDb=0; Track\n2     Rewind         0     16    0                    0\n3       Column         0     1     1                    0   r[1]= cursor 0 column 1\n4       Ne             2     15    1     BINARY-8       82  if r[1]!=r[2] goto 15\n5       Rowid          0     3     0                    0   r[3]=Track.rowid\n6       Column         0     1     4                    0   r[4]= cursor 0 column 1\n7       Column         0     2     5                    0   r[5]= cursor 0 column 2\n8       Column         0     3     6                    0   r[6]= cursor 0 column 3\n9       Column         0     4     7                    0   r[7]= cursor 0 column 4\n10      Column         0     5     8                    0   r[8]= cursor 0 column 5\n11      Column         0     6     9                    0   r[9]= cursor 0 column 6\n12      Column         0     7     10                   0   r[10]= cursor 0 column 7\n13      Column         0     8     11                   0   r[11]= cursor 0 column 8\n14      ResultRow      3     9     0                    0   output=r[3..11]\n15    Next           0     3     0                    1\n16    Halt           0     0     0                    0\n17    Transaction    0     0     66    0              1   usesStmtJournal=0\n18    String8        0     2     0     Black Dog      0   r[2]='Black Dog'\n19    Goto           0     1     0                    0\n
\n

I'll be honest, I understand like zero of this. This is what SQLite is doing under the hood. I found the input from Postgres's explain to be much more readable and actionable. In any case, it's there and you can anaylze it if you want to. I never look at this so I just wanted to show you that it's there.

\n

Instead, I use this

\n
EXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\n-- `--SCAN Track\n
\n

Critically, the word SCAN here lets you know that this query is going to look at every row in the table. Now, if you only have 3,000 rows or you only run this query infrequently, who cares, a SCAN is fine. However, if you're Spotify and you're scanning 100,000,000 rows every search, then yes, you need to do something about this.

\n

One more fun trick if you're playing around in the CLI:

\n
.eqp on\nSELECT * FROM Track Where name ='Black Dog';\n
\n

If you run .eqp on, for the rest of your session it will always show you the query plan when it runs a query. Can be kinda cool to see how SQLite chooses to plan queries.

\n","markdown":"\n```sql\nSELECT * FROM Track Where name ='Black Dog';\nPRAGMA index_list('Track');\nEXPLAIN SELECT * FROM Track Where name ='Black Dog';\nEXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\nCREATE INDEX idx_track_name ON Track (Name);\nEXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\nPRAGMA index_list('Track');\n```\n\nGenerally speaking, SQLite is extremely fast even on large datasets. It does a great job of working with large amounts of data even on complicated queries. However occasionally it can use some help when you have heavy queries that you run frequently. Let's first understand how to look at queries using some fun features built into SQLite.\n\n```sql\nSELECT * FROM Track Where name ='Black Dog';\n```\n\nLet's say we are building an interface that frequently needs to look up tracks by their names. You'd be running queries like this frequently. Right now our Track database has some 3,000 rows in it but imagine if you had Spotify's database of music. Spotify says it has over 100,000,000 tracks on it, so that query get very slow. Let's see a few of the ways that SQLite gives you to inspect your queries.\n\n```sql\nEXPLAIN SELECT * FROM Track Where name ='Black Dog';\n```\n\n```\naddr opcode p1 p2 p3 p4 p5 comment\n---- ------------- ---- ---- ---- ------------- -- -------------\n0 Init 0 17 0 0 Start at 17\n1 OpenRead 0 409 0 9 0 root=409 iDb=0; Track\n2 Rewind 0 16 0 0\n3 Column 0 1 1 0 r[1]= cursor 0 column 1\n4 Ne 2 15 1 BINARY-8 82 if r[1]!=r[2] goto 15\n5 Rowid 0 3 0 0 r[3]=Track.rowid\n6 Column 0 1 4 0 r[4]= cursor 0 column 1\n7 Column 0 2 5 0 r[5]= cursor 0 column 2\n8 Column 0 3 6 0 r[6]= cursor 0 column 3\n9 Column 0 4 7 0 r[7]= cursor 0 column 4\n10 Column 0 5 8 0 r[8]= cursor 0 column 5\n11 Column 0 6 9 0 r[9]= cursor 0 column 6\n12 Column 0 7 10 0 r[10]= cursor 0 column 7\n13 Column 0 8 11 0 r[11]= cursor 0 column 8\n14 ResultRow 3 9 0 0 output=r[3..11]\n15 Next 0 3 0 1\n16 Halt 0 0 0 0\n17 Transaction 0 0 66 0 1 usesStmtJournal=0\n18 String8 0 2 0 Black Dog 0 r[2]='Black Dog'\n19 Goto 0 1 0 0\n```\n\nI'll be honest, I understand like zero of this. This is what SQLite is doing under the hood. I found the input from Postgres's explain to be much more readable and actionable. In any case, it's there and you can anaylze it if you want to. I never look at this so I just wanted to show you that it's there.\n\nInstead, I use this\n\n```sql\nEXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\n-- `--SCAN Track\n```\n\nCritically, the word `SCAN` here lets you know that this query is going to look at _every_ row in the table. Now, if you only have 3,000 rows or you only run this query infrequently, who cares, a SCAN is fine. However, if you're Spotify and you're scanning 100,000,000 rows every search, then yes, you need to do something about this.\n\nOne more fun trick if you're playing around in the CLI:\n\n```sql\n.eqp on\nSELECT * FROM Track Where name ='Black Dog';\n```\n\nIf you run `.eqp on`, for the rest of your session it will always show you the query plan when it runs a query. Can be kinda cool to see how SQLite chooses to plan queries.\n","slug":"explain","title":"EXPLAIN","section":"Performance","icon":"info-circle","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/07-performance/A-explain.md","nextSlug":"/lessons/performance/indexes","prevSlug":"/lessons/what-is-unique-to-sqlite/views"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/performance/indexes.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/performance/indexes.json new file mode 100644 index 0000000..a86917a --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/performance/indexes.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

Okay, so now we know we have a query we need to optimize. How do we do it? With an index!

\n

An index makes a B-Tree (which stands for balanced tree) to make look ups much faster. Instead of taking O(n) to do a lookup items, it takes O(log n) since it can use a tree to find the item instead of scanning every item in the database.

\n
CREATE INDEX idx_track_name ON Track (Name);\n\n-- see it's there now\nPRAGMA index_list('Track');\n\nEXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\n-- `--SEARCH Track USING INDEX idx_track_name (Name=?)\n
\n

Notice it's a SEARCH now instead of a SCAN. This means it was able to use an index and only look at a subset of the table instead of every row. Hooray!

\n
EXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\n
\n
-- w/ index\naddr  opcode         p1    p2    p3    p4             p5  comment\n----  -------------  ----  ----  ----  -------------  --  -------------\n0     Init           0     19    0                    0   Start at 19\n1     OpenRead       0     409   0     9              0   root=409 iDb=0; Track\n2     OpenRead       1     2     0     k(2,,)         2   root=2 iDb=0; idx_track_name\n3     String8        0     1     0     Black Dog      0   r[1]='Black Dog'\n4     SeekGE         1     18    1     1              0   key=r[1]\n5       IdxGT          1     18    1     1              0   key=r[1]\n6       DeferredSeek   1     0     0                    0   Move 0 to 1.rowid if needed\n7       IdxRowid       1     2     0                    0   r[2]=rowid; Track.rowid\n8       Column         1     0     3                    0   r[3]= cursor 1 column 0\n9       Column         0     2     4                    0   r[4]= cursor 0 column 2\n10      Column         0     3     5                    0   r[5]= cursor 0 column 3\n11      Column         0     4     6                    0   r[6]= cursor 0 column 4\n12      Column         0     5     7                    0   r[7]= cursor 0 column 5\n13      Column         0     6     8                    0   r[8]= cursor 0 column 6\n14      Column         0     7     9                    0   r[9]= cursor 0 column 7\n15      Column         0     8     10                   0   r[10]= cursor 0 column 8\n16      ResultRow      2     9     0                    0   output=r[2..10]\n17    Next           1     5     1                    0\n18    Halt           0     0     0                    0\n19    Transaction    0     0     65    0              1   usesStmtJournal=0\n20    Goto           0     1     0                    0\n
\n

Again, I have a hard time reading this. You can see that it refers to the index in the comments so that's positive.

\n

Okay so let's talk a little bit more about why you may not to index everything. I've heard the saying that indexes are like aspirin – they're a great help when you have a problem but if you use too many they become a problem.

\n

Every time you insert into a table that has indexes, it has to do some rebuilding of the indexes to accommodate this information. Likewise, if you delete, it has to move its nodes around its B-tree to keep it balanced. B-trees also take up space, and on large tables it can be non-trivial amounts of space. The trade-off here is that indexes help with reads but slow down updates, deletes, and inserts as well as take up space. In general I wait for a query to become a problem first before I try to index it, and even then I try to index only what I need to solve my problem. Pre-mature optimization generally a bad thing to do because as developers we're pretty bad at guessing what's going to go wrong.

\n","markdown":"Okay, so now we know we have a query we need to optimize. How do we do it? With an index!\n\nAn index makes a B-Tree (which stands for balanced tree) to make look ups much faster. Instead of taking O(n) to do a lookup items, it takes O(log n) since it can use a tree to find the item instead of scanning every item in the database.\n\n```sql\nCREATE INDEX idx_track_name ON Track (Name);\n\n-- see it's there now\nPRAGMA index_list('Track');\n\nEXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\n-- `--SEARCH Track USING INDEX idx_track_name (Name=?)\n```\n\nNotice it's a SEARCH now instead of a SCAN. This means it was able to use an index and only look at a subset of the table instead of every row. Hooray!\n\n```sql\nEXPLAIN QUERY PLAN SELECT * FROM Track Where name ='Black Dog';\n```\n\n```\n-- w/ index\naddr opcode p1 p2 p3 p4 p5 comment\n---- ------------- ---- ---- ---- ------------- -- -------------\n0 Init 0 19 0 0 Start at 19\n1 OpenRead 0 409 0 9 0 root=409 iDb=0; Track\n2 OpenRead 1 2 0 k(2,,) 2 root=2 iDb=0; idx_track_name\n3 String8 0 1 0 Black Dog 0 r[1]='Black Dog'\n4 SeekGE 1 18 1 1 0 key=r[1]\n5 IdxGT 1 18 1 1 0 key=r[1]\n6 DeferredSeek 1 0 0 0 Move 0 to 1.rowid if needed\n7 IdxRowid 1 2 0 0 r[2]=rowid; Track.rowid\n8 Column 1 0 3 0 r[3]= cursor 1 column 0\n9 Column 0 2 4 0 r[4]= cursor 0 column 2\n10 Column 0 3 5 0 r[5]= cursor 0 column 3\n11 Column 0 4 6 0 r[6]= cursor 0 column 4\n12 Column 0 5 7 0 r[7]= cursor 0 column 5\n13 Column 0 6 8 0 r[8]= cursor 0 column 6\n14 Column 0 7 9 0 r[9]= cursor 0 column 7\n15 Column 0 8 10 0 r[10]= cursor 0 column 8\n16 ResultRow 2 9 0 0 output=r[2..10]\n17 Next 1 5 1 0\n18 Halt 0 0 0 0\n19 Transaction 0 0 65 0 1 usesStmtJournal=0\n20 Goto 0 1 0 0\n```\n\nAgain, I have a hard time reading this. You can see that it refers to the index in the comments so that's positive.\n\nOkay so let's talk a little bit more about why you may not to index everything. I've heard the saying that indexes are like aspirin – they're a great help when you have a problem but if you use too many they become a problem.\n\nEvery time you insert into a table that has indexes, it has to do some rebuilding of the indexes to accommodate this information. Likewise, if you delete, it has to move its nodes around its B-tree to keep it balanced. B-trees also take up space, and on large tables it can be non-trivial amounts of space. The trade-off here is that indexes help with reads but slow down updates, deletes, and inserts as well as take up space. In general I wait for a query to become a problem first before I try to index it, and even then I try to index only what I need to solve my problem. Pre-mature optimization generally a bad thing to do because as developers we're pretty bad at guessing what's going to go wrong.\n","slug":"indexes","title":"Indexes","section":"Performance","icon":"info-circle","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/07-performance/B-indexes.md","nextSlug":"/lessons/json/sqlite-extensions","prevSlug":"/lessons/performance/explain"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/running-sqlite/getting-started.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/running-sqlite/getting-started.json new file mode 100644 index 0000000..4bea589 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/running-sqlite/getting-started.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

So now you have SQLite on your computer, let's just get the most basic session started. In your terminal type sqlite3 (or whatever you're using to get as your alias for it.) You should see something like

\n
SQLite version 3.46.0 2024-05-23 13:25:27\nEnter ".help" for usage hints.\nConnected to a transient in-memory database.\nUse ".open FILENAME" to reopen on a persistent database.\nsqlite>\n
\n

Importantly, you'll see that we're connected to an in-memory database which means whatever we do in this session will be thrown away at the end. If you give a filename as an argument (e.g. sqlite3 my-data.db) then it'll either open that file if it exists or create it if it doesn't.

\n

Dot Commands

\n

Type .help (note the leading period) into your session and hit enter. You should see a long list of possible commands you can run. The dot in front signifies that it's a dot command which are special administrative commands you can pass to SQLite to do something or get some information. This will be things like exporting your database to CSV, reporting potential bugs, opening a file, etc. In other words, it's anything that isn't a query. The .help one is super useful for you to know if you need to be reminded of what's available.

\n

Stopping SQLite

\n

The easiest way is CTRL + D. .exit is a valid dot command that works too.

\n

Let's get it running

\n

Let's save everything to a file. I'll be saving my database to my desktop. Do the following.

\n
cd ~/Desktop\nsqlite3 ./my-chinook.db\n\n## Inside sqlite\n.read ./Chinook_Sqlite.sql # or where-ever you downloaded this file\n.tables # you should the tables you imported from Chinook\n
\n

Once you're here we're ready to start writing queries!

\n","markdown":"So now you have SQLite on your computer, let's just get the most basic session started. In your terminal type `sqlite3` (or whatever you're using to get as your alias for it.) You should see something like\n\n```bash\nSQLite version 3.46.0 2024-05-23 13:25:27\nEnter \".help\" for usage hints.\nConnected to a transient in-memory database.\nUse \".open FILENAME\" to reopen on a persistent database.\nsqlite>\n```\n\nImportantly, you'll see that we're connected to an in-memory database which means whatever we do in this session will be thrown away at the end. If you give a filename as an argument (e.g. `sqlite3 my-data.db`) then it'll either open that file if it exists or create it if it doesn't.\n\n## Dot Commands\n\nType `.help` (note the leading period) into your session and hit enter. You should see a long list of possible commands you can run. The dot in front signifies that it's a dot command which are special administrative commands you can pass to SQLite to do something or get some information. This will be things like exporting your database to CSV, reporting potential bugs, opening a file, etc. In other words, it's anything that isn't a query. The `.help` one is super useful for you to know if you need to be reminded of what's available.\n\n## Stopping SQLite\n\nThe easiest way is CTRL + D. `.exit` is a valid dot command that works too.\n\n## Let's get it running\n\nLet's save everything to a file. I'll be saving my database to my desktop. Do the following.\n\n```bash\ncd ~/Desktop\nsqlite3 ./my-chinook.db\n\n## Inside sqlite\n.read ./Chinook_Sqlite.sql # or where-ever you downloaded this file\n.tables # you should the tables you imported from Chinook\n```\n\nOnce you're here we're ready to start writing queries!\n","slug":"getting-started","title":"Getting Started","section":"Running SQLite","icon":"person-running","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/02-running-sqlite/B-getting-started.md","nextSlug":"/lessons/basic-sql/select","prevSlug":"/lessons/running-sqlite/installing-sqlite"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/running-sqlite/installing-sqlite.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/running-sqlite/installing-sqlite.json new file mode 100644 index 0000000..1a070c8 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/running-sqlite/installing-sqlite.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"Installing SQLite"},"html":"

What's great is that SQLite is probably already on your device. For Windows you may have to look for it but in MacOS and Linux, you should be able to run sqlite3 and it should get you into a temporary session.

\n

Windows

\n

For Windows, feel free also to just click here to download a fresh version of SQLite. In all likelihood you need the Precompile Binaries for Windows, 64-bit. Here's the 3.46 link.

\n

For the sake of this course, I'll be using what is the most current, 3.46. But honestly SQLite changes so little that it's likely unimportant which version you choose. I installed 3.46 via Homebrew.

\n

MacOS

\n
brew install sqlite\n
\n

I added this alias to my zshrc to not have to type out the path all the time.

\n
alias edu-sqlite3="echo 'This is just an alias that Brian Holt is using for his homebrew installed sqlite3. Look at the first version of the class to see how I set up the alias' && /opt/homebrew/opt/sqlite/bin/sqlite3"\n
\n

I added the echo so that people watching the video have a visual warning of what I'm doing with that command, you don't need the added echo.

\n

If you need an alternative way to that (or need to get 3.46.0 specifically like I did) click here. This will have the binary as part of a zip you can download.

\n

Chinook

\n

We'll also be using Chinook, a sample set of data. Please download the v1.4.5 Chinook_Sqlite.sql file as well. I'll teach you how to use it in a bit, but suffice to say it's a dataset about movies that we'll use to have a quick intro to querying.

\n

Start my server

\n","markdown":"\nWhat's great is that SQLite is probably already on your device. For Windows you may have to look for it but in MacOS and Linux, you should be able to run `sqlite3` and it should get you into a temporary session.\n\n### Windows\n\nFor Windows, feel free also to just [click here to download][download] a fresh version of SQLite. In all likelihood you need the Precompile Binaries for Windows, 64-bit. [Here's the 3.46 link][windows].\n\nFor the sake of this course, I'll be using what is the most current, 3.46. But honestly SQLite changes so little that it's likely unimportant which version you choose. I installed 3.46 via Homebrew.\n\n### MacOS\n\n```bash\nbrew install sqlite\n```\n\nI added this alias to my zshrc to not have to type out the path all the time.\n\n```bash\nalias edu-sqlite3=\"echo 'This is just an alias that Brian Holt is using for his homebrew installed sqlite3. Look at the first version of the class to see how I set up the alias' && /opt/homebrew/opt/sqlite/bin/sqlite3\"\n```\n\nI added the echo so that people watching the video have a visual warning of what I'm doing with that command, you don't need the added echo.\n\nIf you need an alternative way to that (or need to get 3.46.0 specifically like I did) [click here][macos]. This will have the binary as part of a zip you can download.\n\n## Chinook\n\nWe'll also be using [Chinook][chinook], a sample set of data. Please download the v1.4.5 Chinook_Sqlite.sql file as well. I'll teach you how to use it in a bit, but suffice to say it's a dataset about movies that we'll use to have a quick intro to querying.\n\n## Start my server\n\n[download]: https://sqlite.org/download.html\n[windows]: https://sqlite.org/2024/sqlite-dll-win-x64-3460000.zip\n[macos]: https://sqlite.org/2024/sqlite-tools-osx-x64-3460000.zip\n[chinook]: https://github.com/lerocha/chinook-database/releases/tag/v1.4.5\n","slug":"installing-sqlite","title":"Installing SQLite","section":"Running SQLite","icon":"person-running","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/02-running-sqlite/A-installing-sqlite.md","nextSlug":"/lessons/running-sqlite/getting-started","prevSlug":"/lessons/welcome/what-is-sqlite"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/welcome/intro.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/welcome/intro.json new file mode 100644 index 0000000..f65f9b3 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/welcome/intro.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"
0930: Introduction\n1000: Getting started with SQLite\n1100: SQL Basics\n1200: Lunch\n1300: Data types\n1400: Managing SQLite tables and databases\n1500: Indexing\n1600: Transactions\\\n0930: Extensions\n1000: Scaling SQLite\n1100: SQLite vs other databases\n1200: Lunch\n1300: Building real-world apps with SQLite\n1400: Wrap-up\n
\n

Agenda:

\n

Intro to SQLite Conceptual

\n\n

Intro to SQLite Execution

\n\n

SQL Basics

\n\n

A bit more advanced

\n\n

Node.js Exercise: Show movies

\n

Particular to SQLite

\n\n

Extensions

\n\n

Growing SQLite

\n\n

TODO – subqueries

\n","markdown":"\n```\n0930: Introduction\n1000: Getting started with SQLite\n1100: SQL Basics\n1200: Lunch\n1300: Data types\n1400: Managing SQLite tables and databases\n1500: Indexing\n1600: Transactions\\\n0930: Extensions\n1000: Scaling SQLite\n1100: SQLite vs other databases\n1200: Lunch\n1300: Building real-world apps with SQLite\n1400: Wrap-up\n```\n\nAgenda:\n\nIntro to SQLite Conceptual\n\n- Brief history\n- It's everywhere\n- It runs on everything\n- Now it can scale up\n\nIntro to SQLite Execution\n\n- It's a file\n- It's still SQL\n- https://www.sqlite.org/omitted.html\n- https://www.sqlite.org/quirks.html\n- CLI\n- Chinook: https://github.com/lerocha/chinook-database\n\nSQL Basics\n\n- Selects\n- Inserts\n- Updates\n- Deletes\n- Order\n- Tables\n\nA bit more advanced\n\n- Relationships\n- Joins\n- Foreign Keys\n- Functions / Aggregations / Group\n- Query Performance\n- Transactions\n\nNode.js Exercise: Show movies\n\nParticular to SQLite\n\n- Dynamic Data Types\n- Limits\n- Triggers\n- Views\n\nExtensions\n\n- JSON\n- Full Text Search\n- Vector\n\nGrowing SQLite\n\n- Litestream / Backups\n- LiteFS / Network Access\n- SQLite vs libSQL\n\nTODO – subqueries\n","slug":"intro","title":"Intro","section":"Welcome","icon":"info-circle","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/01-welcome/A-intro.md","nextSlug":"/lessons/welcome/what-is-sqlite","prevSlug":null}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/welcome/what-is-sqlite.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/welcome/what-is-sqlite.json new file mode 100644 index 0000000..7428dde --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/welcome/what-is-sqlite.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"What is SQLite?"},"html":"

A Brief History

\n

Let's take a look back at where SQLite came from, as that can help us understand why SQLite was created in the first place and what place it has in the world of development. SQLite was written by Dr. D. Richard Hipp in 2000 while he was working on a contract for the United States Navy writing software for destroyers. He evaluated other database engines but ended up designing his own implementation.

\n

Now SQLite is open source (GitHub mirror) and released to the public domain but it is not open to contributions. Dr. Hipp keeps tight control on the software and only lets a few people contribute. Notably, SQLite does not use Git but instead Fossil. This is a nice little summary of how they differ. Fossil was created specifically by Dr. Hipp for the maintenance of SQLite (much like Git was for Linux.)

\n

In practice you don't really need to care. The thing to take away is that Dr. Hipp and the SQLite crew take immense care of the project and are very thoughtful in maintaining an amazing project.

\n

The Most Widely Deployed and Used Database Engine

\n

SQLite is the most used database engine by a huge margin.

\n

According to their website, SQLite is used in:

\n\n

Many of these will continue actually hundreds of individual SQLite databases. This leads them to estimate that there are over 1,000,000,000,000 (trillion) SQLite databases!

\n

Suffice to say, it's one of (if not the) most battle-tested piece of software. It runs everywhere and incredibly reliably so.

\n

What is SQLite not?

\n

SQLite is not a server. It is not a standalone piece of software but instead, a library designed to be attached to a running program. Importantly for you, especially if you've used something like Postgres, MySQL, MongoDB, or any other piece of database software before, you won't have a database server running. SQLite will just create a database file locally on your computer. Your SDK, CLI, or whatever method of accessing your database will just be pointed at that file and that's how you'll read and write to your database.

\n

Importantly SQLite has no network access. In and of itself, it doesn't handle ports, connections, etc. unlike most other database engines. SQLite was designed to work on the same computer as the app using it.

\n

SQLite is a single node. Whereas other databases have replication and consensus, SQLite is designed to be a node of one. You can still do backups and other things like that, but there's no concept of primary/leader and secondary/follower.

\n

SQLite is unbelievably fast. Because we don't have replication or networks to deal with, queries take microseconds instead of milliseconds.

\n

Can I use SQLite in production?

\n

Yes and no. No replication is a tight bottleneck and no network access means you need more tools to use SQLite as a distributed database. So, yes, you can, but you need more tools.

\n

The startup I work at, SQLite Cloud is a paid service that can do this for you (with a generous free tier). LiteFS is an open-source way to do it yourself.

\n","markdown":"\n## A Brief History\n\nLet's take a look back at where SQLite came from, as that can help us understand why SQLite was created in the first place and what place it has in the world of development. SQLite was written by Dr. D. Richard Hipp in 2000 while he was working on a contract for the United States Navy writing software for destroyers. He evaluated other database engines but ended up designing his own implementation.\n\nNow SQLite is [open source][timeline] ([GitHub mirror][gh]) and released to the public domain but it is not open to contributions. Dr. Hipp keeps tight control on the software and only lets a few people contribute. Notably, SQLite does not use Git but instead Fossil. [This is a nice little summary of how they differ][diff]. Fossil was created specifically by Dr. Hipp for the maintenance of SQLite (much like Git was for Linux.)\n\nIn practice you don't really need to care. The thing to take away is that Dr. Hipp and the SQLite crew take immense care of the project and are very thoughtful in maintaining an amazing project.\n\n## The Most Widely Deployed and Used Database Engine\n\n[SQLite is the most used database engine by a huge margin.][most-used]\n\nAccording to their website, SQLite is used in:\n\n- Every Android device\n- Every iPhone and iOS device\n- Every Mac\n- Every Windows10 machine\n- Every Firefox, Chrome, and Safari web browser\n- Every instance of Skype\n- Every instance of iTunes\n- Every Dropbox client\n- Every TurboTax and QuickBooks\n- PHP and Python\n- Most television sets and set-top cable boxes\n- Most automotive multimedia systems\n- Countless millions of other applications\n\nMany of these will continue actually hundreds of individual SQLite databases. This leads them to estimate that there are over 1,000,000,000,000 (trillion) SQLite databases!\n\nSuffice to say, it's one of (if not **the**) most battle-tested piece of software. It runs everywhere and incredibly reliably so.\n\n## What is SQLite not?\n\nSQLite is not a server. It is not a standalone piece of software but instead, a library designed to be attached to a running program. Importantly for you, especially if you've used something like Postgres, MySQL, MongoDB, or any other piece of database software before, you won't have a database _server_ running. SQLite will just create a database file locally on your computer. Your SDK, CLI, or whatever method of accessing your database will just be pointed at that file and that's how you'll read and write to your database.\n\nImportantly **SQLite has no network access**. In and of itself, it doesn't handle ports, connections, etc. unlike most other database engines. SQLite was designed to work on the same computer as the app using it.\n\nSQLite is a single node. Whereas other databases have replication and consensus, SQLite is designed to be a node of one. You can still do backups and other things like that, but there's no concept of primary/leader and secondary/follower.\n\nSQLite is unbelievably fast. Because we don't have replication or networks to deal with, queries take microseconds instead of milliseconds.\n\n## Can I use SQLite in production?\n\nYes and no. No replication is a tight bottleneck and no network access means you need more tools to use SQLite as a distributed database. So, yes, you can, but you need more tools.\n\nThe startup I work at, [SQLite Cloud][cloud] is a paid service that can do this for you (with a generous free tier). [LiteFS][litefs] is an open-source way to do it yourself.\n\n[timeline]: https://sqlite.org/src/timeline\n[gh]: https://github.com/sqlite/sqlite?tab=readme-ov-file\n[diff]: https://www.fossil-scm.org/home/doc/trunk/www/fossil-v-git.wiki#devorg\n[most-used]: https://sqlite.org/mostdeployed.html\n[cloud]: https://sqlitecloud.io?ref=fem\n[litefs]: https://github.com/superfly/litefs\n","slug":"what-is-sqlite","title":"What is SQLite?","section":"Welcome","icon":"info-circle","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/01-welcome/B-what-is-sqlite.md","nextSlug":"/lessons/running-sqlite/installing-sqlite","prevSlug":"/lessons/welcome/intro"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/flexible-typing.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/flexible-typing.json new file mode 100644 index 0000000..8c88699 --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/flexible-typing.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

This one is controversial but SQLite is not rigid at all about what data it will store in what columns. It really only has four data types, and it then it coerces all of the allowed SQL data types into those data types.

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
Column DatatypeTypes Allowed In That Column
INTEGERINTEGER, REAL, TEXT, BLOB
REALREAL, TEXT, BLOB
TEXTTEXT, BLOB
BLOBINTEGER, REAL, TEXT, BLOB
\n

What is indeed nice about this is that SQLite will take most SQL queries written for other databases (like MySQL, Postgres, etc.) and it will work for SQLite as SQLite will just coerce most things to a TEXT data type.

\n

A common one in other databases is VARCHAR and SQLite will happily accept that and just make it a text field. VARCHAR usually comes with a text limit (like this field can only be 255 characters long) and SQLite will not respect those limits. So you can create a table with VARCHAR(255) and then just insert a whole page's worth of text into it, SQLite does not care.

\n

That said, SQLite did add a STRICT table feature in 2021 that allows SQLite to be slightly more mindful of types. However the core strongly believes this to be not the case.

\n

If you want to read more, the core team wrote their thoughts out here.

\n

Lacked data types

\n

Critically, there is no date type which can prove challenging sometimes. You'll either store some sort of date standard string of just UNIX time.

\n

There's also no Boolean. You'll represent these as INTEGERS 0 and 1. TRUE and FALSE are just aliases of 0 and 1.

\n","markdown":"This one is controversial but SQLite is not rigid at all about what data it will store in what columns. It really only has four data types, and it then it coerces all of the allowed SQL data types into those data types.\n\n| Column Datatype | Types Allowed In That Column |\n| --------------- | ---------------------------- |\n| INTEGER | INTEGER, REAL, TEXT, BLOB |\n| REAL | REAL, TEXT, BLOB |\n| TEXT | TEXT, BLOB |\n| BLOB | INTEGER, REAL, TEXT, BLOB |\n\nWhat is indeed nice about this is that SQLite will take most SQL queries written for other databases (like MySQL, Postgres, etc.) and it will work for SQLite as SQLite will just coerce most things to a TEXT data type.\n\nA common one in other databases is VARCHAR and SQLite will happily accept that and just make it a text field. VARCHAR usually comes with a text limit (like this field can only be 255 characters long) and SQLite will not respect those limits. So you can create a table with VARCHAR(255) and then just insert a whole page's worth of text into it, SQLite does not care.\n\nThat said, SQLite did add a STRICT table feature in 2021 that allows SQLite to be slightly more mindful of types. However the core strongly believes this to be not the case.\n\nIf you want to read more, [the core team wrote their thoughts out here][sqlite].\n\n## Lacked data types\n\nCritically, there is no date type which can prove challenging sometimes. You'll either store some sort of date standard string of just UNIX time.\n\nThere's also no Boolean. You'll represent these as INTEGERS 0 and 1. TRUE and FALSE are just aliases of 0 and 1.\n\n[sqlite]: https://www.sqlite.org/flextypegood.html\n","slug":"flexible-typing","title":"Flexible Typing","section":"What is Unique to SQLite","icon":"fingerprint","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/06-what-is-unique-to-sqlite/A-flexible-typing.md","nextSlug":"/lessons/what-is-unique-to-sqlite/limits-of-sqlite","prevSlug":"/lessons/build-a-project-with-nodejs-and-sqlite/alternatives-to-sqlite3"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/limits-of-sqlite.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/limits-of-sqlite.json new file mode 100644 index 0000000..5a679ba --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/limits-of-sqlite.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{"title":"Limits of SQLite"},"html":"

SQLite has unique limits in what it can and can't do. Nearly all of them are overly generous and you should never hit their ceiling and if you do, you are doing something wrong.

\n

Row size

\n

Max length of text: 1GB

\n

Each row can be a 1 GB of size. If you're reaching anything remotely close to this, you need a different storage strategy, probably using something like S3 to store large object and then store the metadata using SQLite.

\n

Columns on a table

\n

Max amount of columns on a table: 2,000 (and up to 32,000) columns

\n

You can have 2,000 columns on a table but man, you can probably architect your data differently if you have 2,000 columns. A JSON column could help a lot or just move to a document based database like MongoDB.

\n

If you want to, you can recompile SQLite yourself and remove the limit and have up to 32,767 columns but please, please do not do that.

\n

Length of SQL query

\n

Max length of a SQL query, 1,000,000,000 characters

\n

Look, if you can write a query that long, I'm just in awe. At that point you're just coding your whole app as SQL.

\n

Tables in a join

\n

Max number of tables in a join: 64 tables

\n

This actually feels like one you could hit though it'd be weird. SQLite uses 8 bytes to do its lookup table for joins and therefore it has a hard limit of 64 tables that it can reference. If you have a query doing 64 joins, you can rearchitect your data to not need so many joins or just do two queries.

\n

Length of LIKE terms

\n

Max length of LIKE globs: 50,000 characters

\n

Write "globs" (or the terms you give to LIKE) can have some perfomance implications if they're too long so they limt these to 50,000 characters. Still seems plenty long but good advice here is to limit how long of term users can give to SQLite so not to slow down your database.

\n

Attached databases

\n

Max number of attached database: 10 databases

\n

We haven't done this, but what if you have two database files and want to query them like one database? SQLite lets you with ATTACH. This does make it hard to do things like have an IoT database per device and to address with ATTACH as it will only support 10. You're better off using some sort of data ingestion pipeline to something like BigQuery or Snowflake and then querying that.

\n

Size of database / rows in a database

\n\n

In theory the database can grow to 281TB of data (which is effectively unlimited for all but the biggest problems in data.) Likewise with number of rows. The database systems can support 2^64 rows but in practice that isn't possible becauase with the smallest amount of data possible you'll run out of space around 2e+13 rows. Or, in other words, more than enough.

\n

To read through more of the limits, check here. I just wanted to go through these with you to demonstrate that SQLite can scale its data capabilities as much as you can. It is powerful enough to tackle just about any problem.

\n","markdown":"\nSQLite has unique limits in what it can and can't do. Nearly all of them are overly generous and you should _never_ hit their ceiling and if you do, you are doing something wrong.\n\n## Row size\n\nMax length of text: 1GB\n\nEach row can be a 1 GB of size. If you're reaching anything remotely close to this, you need a different storage strategy, probably using something like [S3][s3] to store large object and then store the metadata using SQLite.\n\n## Columns on a table\n\nMax amount of columns on a table: 2,000 (and up to 32,000) columns\n\nYou can have 2,000 columns on a table but man, you can probably architect your data differently if you have 2,000 columns. A JSON column could help a lot or just move to a document based database like MongoDB.\n\nIf you want to, you can recompile SQLite yourself and remove the limit and have up to 32,767 columns but please, please do not do that.\n\n## Length of SQL query\n\nMax length of a SQL query, 1,000,000,000 characters\n\nLook, if you can write a query that long, I'm just in awe. At that point you're just coding your whole app as SQL.\n\n## Tables in a join\n\nMax number of tables in a join: 64 tables\n\nThis actually feels like one you _could_ hit though it'd be weird. SQLite uses 8 bytes to do its lookup table for joins and therefore it has a hard limit of 64 tables that it can reference. If you have a query doing 64 joins, you can rearchitect your data to not need so many joins or just do two queries.\n\n## Length of LIKE terms\n\nMax length of LIKE globs: 50,000 characters\n\nWrite \"globs\" (or the terms you give to LIKE) can have some perfomance implications if they're too long so they limt these to 50,000 characters. Still seems plenty long but good advice here is to limit how long of term users can give to SQLite so not to slow down your database.\n\n## Attached databases\n\nMax number of attached database: 10 databases\n\nWe haven't done this, but what if you have two database files and want to query them like one database? SQLite lets you with ATTACH. This does make it hard to do things like have an IoT database per device and to address with ATTACH as it will only support 10. You're better off using some sort of data ingestion pipeline to something like BigQuery or Snowflake and then querying that.\n\n## Size of database / rows in a database\n\n- Size of a database: 281TB\n- Rows in a database: 1.8e+19 rows (theoretically)\n- Rows in a database: 2e+13 rows (effectively)\n\nIn theory the database can grow to 281TB of data (which is effectively unlimited for all but the biggest problems in data.) Likewise with number of rows. The database systems can support 2^64 rows but in practice that isn't possible becauase with the smallest amount of data possible you'll run out of space around 2e+13 rows. Or, in other words, more than enough.\n\nTo read through more of the limits, [check here][limits]. I just wanted to go through these with you to demonstrate that SQLite can scale its data capabilities as much as you can. It is powerful enough to tackle just about any problem.\n\n[limits]: https://www.sqlite.org/limits.html\n[s3]: https://aws.amazon.com/s3/\n","slug":"limits-of-sqlite","title":"Limits of SQLite","section":"What is Unique to SQLite","icon":"fingerprint","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/06-what-is-unique-to-sqlite/B-limits-of-sqlite.md","nextSlug":"/lessons/what-is-unique-to-sqlite/views","prevSlug":"/lessons/what-is-unique-to-sqlite/flexible-typing"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/views.json b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/views.json new file mode 100644 index 0000000..62f31fc --- /dev/null +++ b/_next/data/9VsfZMKTXY2A0On9IrHwO/lessons/what-is-unique-to-sqlite/views.json @@ -0,0 +1 @@ +{"pageProps":{"post":{"attributes":{},"html":"

Views are not unique to SQLite but SQLite certainly has its own version of them.

\n

The idea of a view is that it is an abstract or virtual table that makes use of other data in the table. A really good use case for us is that getting a list of track names with artists and albums requires two joins right now. Any time we want to get that information, we have two to write a query with two joins.

\n
SELECT\n  t.TrackId as id,\n  ar.Name as artist,\n  al.Title as album,\n  t.Name as track\nFROM\n  Track t\n\nJOIN\n  Album al\nON\n  t.AlbumId = al.AlbumId\n\nJOIN\n  Artist ar\nON\n  ar.ArtistId = al.ArtistId\n\nLIMIT 5;\n
\n

This is a super usable set of data for us to list out all the tracks in a database. Now if it only wasn't so burdensome to query. Well, spoilers, it doesn't have to be. We can make a view that automatically does this for us and presents it as a pretty table.

\n
CREATE VIEW\n  easy_tracks\nAS\n\nSELECT\n  t.TrackId as id,\n  ar.Name as artist,\n  al.Title as album,\n  t.Name as track\nFROM\n  Track t\n\nJOIN\n  Album al\nON\n  t.AlbumId = al.AlbumId\n\nJOIN\n  Artist ar\nON\n  ar.ArtistId = al.ArtistId;\n
\n

Now go ahead and SELECT * FROM easy_tracks LIMIT 15; to see what we did. Cool, right? We can even start doing things like joins to this table as well. If you find yourself constantly doing the same joins (like we have this whole course) views can your friend.

\n
\n

SQLite does not materialized views like Postgres. That is to say, we cannot tell SQLite "run this query and store the results" like you can in Postgres. SQLite is always querying the live data underneath.

\n

SQLite also does not support inserting into views like other databases do.

\n
\n","markdown":"Views are not unique to SQLite but SQLite certainly has its own version of them.\n\nThe idea of a view is that it is an abstract or virtual table that makes use of other data in the table. A really good use case for us is that getting a list of track names with artists and albums requires two joins right now. Any time we want to get that information, we have two to write a query with two joins.\n\n```sql\nSELECT\n t.TrackId as id,\n ar.Name as artist,\n al.Title as album,\n t.Name as track\nFROM\n Track t\n\nJOIN\n Album al\nON\n t.AlbumId = al.AlbumId\n\nJOIN\n Artist ar\nON\n ar.ArtistId = al.ArtistId\n\nLIMIT 5;\n```\n\nThis is a super usable set of data for us to list out all the tracks in a database. Now if it only wasn't so burdensome to query. Well, spoilers, it doesn't have to be. We can make a view that automatically does this for us and presents it as a pretty table.\n\n```sql\nCREATE VIEW\n easy_tracks\nAS\n\nSELECT\n t.TrackId as id,\n ar.Name as artist,\n al.Title as album,\n t.Name as track\nFROM\n Track t\n\nJOIN\n Album al\nON\n t.AlbumId = al.AlbumId\n\nJOIN\n Artist ar\nON\n ar.ArtistId = al.ArtistId;\n```\n\nNow go ahead and `SELECT * FROM easy_tracks LIMIT 15;` to see what we did. Cool, right? We can even start doing things like joins to this table as well. If you find yourself constantly doing the same joins (like we have this whole course) views can your friend.\n\n> SQLite does not materialized views like Postgres. That is to say, we cannot tell SQLite \"run this query and store the results\" like you can in Postgres. SQLite is always querying the live data underneath.\n>\n> SQLite also does not support inserting into views like other databases do.\n","slug":"views","title":"Views","section":"What is Unique to SQLite","icon":"fingerprint","filePath":"/home/runner/work/complete-intro-to-sqlite/complete-intro-to-sqlite/lessons/06-what-is-unique-to-sqlite/C-views.md","nextSlug":"/lessons/performance/explain","prevSlug":"/lessons/what-is-unique-to-sqlite/limits-of-sqlite"}},"__N_SSG":true} \ No newline at end of file diff --git a/_next/static/9VsfZMKTXY2A0On9IrHwO/_buildManifest.js b/_next/static/9VsfZMKTXY2A0On9IrHwO/_buildManifest.js new file mode 100644 index 0000000..d544382 --- /dev/null +++ b/_next/static/9VsfZMKTXY2A0On9IrHwO/_buildManifest.js @@ -0,0 +1 @@ +self.__BUILD_MANIFEST={__rewrites:{afterFiles:[],beforeFiles:[],fallback:[]},"/":["static/chunks/pages/index-2a6b1d427e655e1b.js"],"/_error":["static/chunks/pages/_error-77823ddac6993d35.js"],"/lessons/[section]/[slug]":["static/chunks/pages/lessons/[section]/[slug]-f133ec3e69124099.js"],sortedPages:["/","/_app","/_error","/lessons/[section]/[slug]"]},self.__BUILD_MANIFEST_CB&&self.__BUILD_MANIFEST_CB(); \ No newline at end of file diff --git a/_next/static/9VsfZMKTXY2A0On9IrHwO/_ssgManifest.js b/_next/static/9VsfZMKTXY2A0On9IrHwO/_ssgManifest.js new file mode 100644 index 0000000..10f162a --- /dev/null +++ b/_next/static/9VsfZMKTXY2A0On9IrHwO/_ssgManifest.js @@ -0,0 +1 @@ +self.__SSG_MANIFEST=new Set(["\u002F","\u002Flessons\u002F[section]\u002F[slug]"]);self.__SSG_MANIFEST_CB&&self.__SSG_MANIFEST_CB() \ No newline at end of file diff --git a/_next/static/chunks/framework-ecc4130bc7a58a64.js b/_next/static/chunks/framework-ecc4130bc7a58a64.js new file mode 100644 index 0000000..3b13c66 --- /dev/null +++ b/_next/static/chunks/framework-ecc4130bc7a58a64.js @@ -0,0 +1,33 @@ +"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[774],{4448:function(e,n,t){/** + * @license React + * react-dom.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */var r,l,a,u,o,i,s=t(7294),c=t(3840);function f(e){for(var n="https://reactjs.org/docs/error-decoder.html?invariant="+e,t=1;t