diff --git a/.gitignore b/.gitignore index 14de9c9..ec63472 100644 --- a/.gitignore +++ b/.gitignore @@ -7,10 +7,13 @@ paths.txt .DS_Store secret -# Llama2 models +# llama2 models models stories260K +# llama2 metrics +etl + # Windows .vs CppProperties.json diff --git a/icpp_llama2/requirements.txt b/icpp_llama2/requirements.txt index d27719e..2052ff2 100644 --- a/icpp_llama2/requirements.txt +++ b/icpp_llama2/requirements.txt @@ -1,2 +1,3 @@ +-r scripts/requirements.txt icpp-pro ic-py diff --git a/icpp_llama2/scripts/llama2_metrics_dashboards.ipynb b/icpp_llama2/scripts/llama2_metrics_dashboards.ipynb new file mode 100644 index 0000000..59b6184 --- /dev/null +++ b/icpp_llama2/scripts/llama2_metrics_dashboards.ipynb @@ -0,0 +1,480 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "56c2ba74-395f-4da7-a235-3ba403a35efa", + "metadata": {}, + "source": [ + "# llama2 metrics dashboard" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "145c289a-c35a-4525-b06b-ab766d563b97", + "metadata": {}, + "source": [ + "---\n", + "Verify we are in the Conda environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cab4874-3ba3-4e94-9c85-6ae4498c8345", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "print(sys.executable)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "d8d5cade-f5da-45f4-a822-ba99fb50aad7", + "metadata": {}, + "source": [ + "# Setup" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "efc84945-5417-4393-8c42-a947109a67e0", + "metadata": {}, + "source": [ + "---\n", + "## Import python packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "503de8df-1a29-4a74-90ee-423bf7a33502", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "import pandas as pd\n", + "import jupyter_black\n", + "import matplotlib.pyplot as plt\n", + "import seaborn as sns" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "8b406b6f-f39c-4af5-b6dc-dbbe4a1a738e", + "metadata": {}, + "source": [ + "---\n", + "Load [jupyter_black](https://github.com/n8henrie/jupyter-black), so every cell formats itself with black when you run it" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21aca708-e41f-449d-a527-61ae4a0bf8e0", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "jupyter_black.load()" + ] + }, + { + "cell_type": "markdown", + "id": "1ecd91b1", + "metadata": {}, + "source": [ + "---\n", + "## Metadata of model & canister" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7efa948a", + "metadata": {}, + "outputs": [], + "source": [ + "# Some metadata about the model\n", + "data_set = \"TinyStories\"\n", + "model_size = \"15M\"\n", + "finetuned = \"LLM\"\n", + "\n", + "# The canister information\n", + "canister_name = \"llama2\"\n", + "candid_path = \"../src/llama2.did\"\n", + "network = \"ic\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "032af488-f915-4cce-ae6d-dcd5444b0574", + "metadata": { + "tags": [] + }, + "source": [ + "---\n", + "## Read the data from disk" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f448ecc-20bc-473e-9ab4-93de2a7331ee", + "metadata": {}, + "outputs": [], + "source": [ + "sorted(Path(\".\").glob(\"etl/*\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec669862-e85f-4fd4-936c-653eed358706", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Read the latest data\n", + "\n", + "etl_path = sorted(Path(\".\").glob(\"etl/*\"))[-1]\n", + "\n", + "print(f\"Reading data from: {etl_path}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6fb43493-93ca-48c7-b219-cb3864151086", + "metadata": {}, + "outputs": [], + "source": [ + "df_user_ids = pd.read_parquet(\n", + " etl_path\n", + " / f\"{canister_name}_{data_set}_{model_size}_{finetuned}_user_ids.parquet.gzip\",\n", + " engine=\"fastparquet\",\n", + ")\n", + "df_chats = pd.read_parquet(\n", + " etl_path\n", + " / f\"{canister_name}_{data_set}_{model_size}_{finetuned}_chats.parquet.gzip\",\n", + " engine=\"fastparquet\",\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "0ac1a41d-270a-4e55-84b1-3a5e86548109", + "metadata": {}, + "source": [ + "# Metrics" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "8ab4f4d6-dcd5-4f86-a279-e64699091bb7", + "metadata": { + "toc-hr-collapsed": true + }, + "source": [ + "## Totals" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d69d065e-132e-4ae3-b654-fd7957e582b9", + "metadata": {}, + "outputs": [], + "source": [ + "user_count = df_user_ids.shape[0]\n", + "chat_count = df_chats.shape[0]\n", + "\n", + "df_totals = pd.DataFrame(\n", + " {\n", + " \"Totals\": [\n", + " user_count,\n", + " chat_count,\n", + " ]\n", + " },\n", + " index=[\n", + " \"user_count\",\n", + " \"chat_count\",\n", + " ],\n", + ")\n", + "\n", + "pd.set_option(\"display.max_rows\", None)\n", + "\n", + "df_totals" + ] + }, + { + "cell_type": "markdown", + "id": "1a7c422d", + "metadata": {}, + "source": [ + "---\n", + "## Setup dark background for the plots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "76dc68f7", + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "\n", + "# Setting dark background for the plots\n", + "sns.set_theme(\n", + " style=\"darkgrid\", rc={\"axes.facecolor\": \"#282828\", \"grid.color\": \"#282828\"}\n", + ")\n", + "plt.rcParams[\"axes.facecolor\"] = \"black\"\n", + "plt.rcParams[\"figure.facecolor\"] = \"black\"\n", + "plt.rcParams[\"text.color\"] = \"white\"\n", + "plt.rcParams[\"axes.labelcolor\"] = \"white\"\n", + "plt.rcParams[\"xtick.color\"] = \"white\"\n", + "plt.rcParams[\"ytick.color\"] = \"white\"" + ] + }, + { + "cell_type": "markdown", + "id": "85850402", + "metadata": {}, + "source": [ + "---\n", + "## Copy df_chats into df" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f229266c", + "metadata": {}, + "outputs": [], + "source": [ + "df = df_chats.copy()" + ] + }, + { + "cell_type": "markdown", + "id": "15901415", + "metadata": {}, + "source": [ + "---\n", + "## Convert time in ns to more human-readable datetime object" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a4d15b0", + "metadata": {}, + "outputs": [], + "source": [ + "# Convert time in ns to more human-readable datetime object\n", + "df[\"chat_start_datetime\"] = pd.to_datetime(df[\"chat_start_time\"], unit=\"ns\")" + ] + }, + { + "cell_type": "markdown", + "id": "e1cb83df", + "metadata": {}, + "source": [ + "---\n", + "## Number of chats over time (line plot)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "486e6697", + "metadata": {}, + "outputs": [], + "source": [ + "# Group by date and count the number of chats started per day\n", + "daily_usage = df.groupby(\"chat_start_date\").size()\n", + "\n", + "# Calculate accumulated sum of chats\n", + "cumulative_usage = daily_usage.cumsum()\n", + "\n", + "# Plot\n", + "plt.figure(figsize=(12, 6))\n", + "cumulative_usage.plot()\n", + "plt.title(\"Cumulative Usage Over Time\")\n", + "plt.xlabel(\"Date\")\n", + "plt.ylabel(\"Total Number of Chats Started\")\n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "44c8f37a", + "metadata": {}, + "source": [ + "---\n", + "## Number of users over time (bar plot)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ef69308", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(15, 7))\n", + "\n", + "# Grouping by date and counting distinct users\n", + "users_per_day = df.groupby(\"chat_start_date\")[\"user_id\"].nunique().reset_index()\n", + "\n", + "# Plotting with consistent color\n", + "sns.barplot(data=users_per_day, x=\"chat_start_date\", y=\"user_id\", color=\"skyblue\")\n", + "plt.xticks(rotation=45)\n", + "plt.ylabel(\"Number of Users\")\n", + "plt.title(\"Number of Users Over Time\")\n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "829956d3", + "metadata": {}, + "source": [ + "---\n", + "## Number of chats over time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "acc9bf22", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(15, 7))\n", + "\n", + "# Extracting just the date from the datetime for daily aggregation (if not done previously)\n", + "df[\"chat_start_date\"] = df[\"chat_start_datetime\"].dt.date\n", + "\n", + "# Plotting with consistent color\n", + "sns.countplot(\n", + " data=df,\n", + " x=\"chat_start_date\",\n", + " order=sorted(df[\"chat_start_date\"].unique()),\n", + " color=\"skyblue\",\n", + ")\n", + "plt.xticks(rotation=45)\n", + "plt.title(\"Number of Chats Over Time\")\n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "9fe2312e", + "metadata": {}, + "source": [ + "---\n", + "## Total steps per chat over time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e85cacc8", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(15, 7))\n", + "\n", + "# Grouping by date and summing the total chat steps\n", + "chat_steps_per_day = (\n", + " df.groupby(\"chat_start_date\")[\"chat_total_steps\"].sum().reset_index()\n", + ")\n", + "\n", + "# Plotting\n", + "sns.lineplot(data=chat_steps_per_day, x=\"chat_start_date\", y=\"chat_total_steps\")\n", + "plt.xticks(rotation=45)\n", + "plt.title(\"Total Chat Steps Over Time\")\n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0bc73c6", + "metadata": {}, + "outputs": [], + "source": [ + "# Set the style of seaborn for better visualization\n", + "sns.set_style(\"whitegrid\")\n", + "\n", + "df = df_chats.copy()\n", + "\n", + "\n", + "\n", + "# Extract the date from the datetime column for grouping\n", + "df[\"chat_start_date\"] = df[\"chat_start_datetime\"].dt.date\n", + "\n", + "# Group by date and count the number of chats started per day\n", + "daily_usage = df.groupby(\"chat_start_date\").size()\n", + "\n", + "# Plot\n", + "plt.figure(figsize=(12, 6))\n", + "daily_usage.plot()\n", + "plt.title(\"Usage Over Time\")\n", + "plt.xlabel(\"Date\")\n", + "plt.ylabel(\"Number of Chats Started\")\n", + "plt.tight_layout()\n", + "plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "zendesk-with-python", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + }, + "toc-autonumbering": false, + "toc-showcode": false, + "vscode": { + "interpreter": { + "hash": "b140d43f3ce11cc3fea086c71f194b3ccd428305c7a6ae48363426b648cd63fd" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/icpp_llama2/scripts/llama2_metrics_etl.ipynb b/icpp_llama2/scripts/llama2_metrics_etl.ipynb new file mode 100644 index 0000000..722c809 --- /dev/null +++ b/icpp_llama2/scripts/llama2_metrics_etl.ipynb @@ -0,0 +1,410 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "56c2ba74-395f-4da7-a235-3ba403a35efa", + "metadata": {}, + "source": [ + "# llama2 metrics etl" + ] + }, + { + "cell_type": "markdown", + "id": "145c289a-c35a-4525-b06b-ab766d563b97", + "metadata": {}, + "source": [ + "---\n", + "Verify we are in the Conda environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cab4874-3ba3-4e94-9c85-6ae4498c8345", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "print(sys.executable)" + ] + }, + { + "cell_type": "markdown", + "id": "efc84945-5417-4393-8c42-a947109a67e0", + "metadata": {}, + "source": [ + "---\n", + "## Import python packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "503de8df-1a29-4a74-90ee-423bf7a33502", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "import shutil\n", + "from datetime import datetime\n", + "import pandas as pd\n", + "import jupyter_black\n", + "import pprint" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69747068", + "metadata": {}, + "outputs": [], + "source": [ + "from ic_py_canister import get_canister" + ] + }, + { + "cell_type": "markdown", + "id": "8b406b6f-f39c-4af5-b6dc-dbbe4a1a738e", + "metadata": {}, + "source": [ + "---\n", + "Load [jupyter_black](https://github.com/n8henrie/jupyter-black), so every cell formats itself with black when you run it" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21aca708-e41f-449d-a527-61ae4a0bf8e0", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "jupyter_black.load()" + ] + }, + { + "cell_type": "markdown", + "id": "950e8fbd", + "metadata": {}, + "source": [ + "---\n", + "## Metadata of model & canister" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6d92482", + "metadata": {}, + "outputs": [], + "source": [ + "# Some metadata about the model\n", + "data_set = \"TinyStories\"\n", + "model_size = \"15M\"\n", + "finetuned = \"LLM\"\n", + "\n", + "# The canister information\n", + "canister_name = \"llama2\"\n", + "candid_path = \"../src/llama2.did\"\n", + "network = \"ic\"" + ] + }, + { + "cell_type": "markdown", + "id": "c5a16a30-927a-452b-8eb1-0795a5ae8a93", + "metadata": { + "tags": [] + }, + "source": [ + "---\n", + "## Get ic-py based Canister instance" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "88c4790f-cf9e-4d92-9ff1-1c56aacf989c", + "metadata": {}, + "outputs": [], + "source": [ + "canister_llama2 = get_canister(canister_name, candid_path, network)" + ] + }, + { + "cell_type": "markdown", + "id": "236918ea", + "metadata": {}, + "source": [ + "---\n", + "## check health (liveness)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1bcf2f18", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"--\\nChecking liveness of canister (did we deploy it!)\")\n", + "response = canister_llama2.health()\n", + "if response == [True]:\n", + " print(\"Ok!\")\n", + "else:\n", + " print(\"Not OK, response is:\")\n", + " print(response)" + ] + }, + { + "cell_type": "markdown", + "id": "032af488-f915-4cce-ae6d-dcd5444b0574", + "metadata": { + "tags": [] + }, + "source": [ + "---\n", + "## Extract" + ] + }, + { + "cell_type": "markdown", + "id": "27d7acfe", + "metadata": {}, + "source": [ + "### Number of users" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a2b1524b", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"--\\nGet total number of users\")\n", + "response = canister_llama2.get_user_count()\n", + "\n", + "user_count = response[0]\n", + "print(f\"user_count : {user_count}\")" + ] + }, + { + "cell_type": "markdown", + "id": "99bda590", + "metadata": {}, + "source": [ + "### user ids" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ac32437", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"--\\nGet the user ids\")\n", + "response = canister_llama2.get_user_ids()\n", + "\n", + "df_user_ids = pd.DataFrame(response[0], columns=[\"user_id\"])\n", + "\n", + "pd.set_option(\"display.max_rows\", None)\n", + "pd.set_option(\"display.max_colwidth\", 80)\n", + "df_user_ids" + ] + }, + { + "cell_type": "markdown", + "id": "2eaed990", + "metadata": {}, + "source": [ + "### user metadata" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bb0301c4", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"--\\nGet the user metadata\")\n", + "\n", + "# Get the raw data & store it in a list of dictionaries\n", + "data_list = []\n", + "\n", + "for user_id in df_user_ids[\"user_id\"]:\n", + " response = canister_llama2.get_user_metadata(user_id)\n", + " data_list.append({\"user_id\": user_id, \"data\": response})\n", + "\n", + "pprint.pprint(data_list[:3])" + ] + }, + { + "cell_type": "markdown", + "id": "8b97d1b0", + "metadata": {}, + "source": [ + "## Transform" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8dbc076", + "metadata": {}, + "outputs": [], + "source": [ + "# Transform the data into 'long format':\n", + "# -> each chat ends up on it's own row, with user_id as a column\n", + "def transform_data(data_list):\n", + " normalized_data = []\n", + "\n", + " for item in data_list:\n", + " user_id = item[\"user_id\"]\n", + " chat_start_times = item[\"data\"][0]\n", + " chat_total_steps = item[\"data\"][1]\n", + "\n", + " # Ensure that data is paired correctly by zipping the lists\n", + " # If one list is longer than the other, it will be truncated to match the shorter list\n", + " for start_time, total_steps in zip(chat_start_times, chat_total_steps):\n", + " normalized_data.append(\n", + " {\n", + " \"user_id\": user_id,\n", + " \"chat_start_time\": start_time,\n", + " \"chat_total_steps\": total_steps,\n", + " }\n", + " )\n", + "\n", + " return normalized_data\n", + "\n", + "\n", + "normalized_data = transform_data(data_list)\n", + "\n", + "print(f\"Total number of chats : {len(normalized_data)}\")\n", + "pprint.pprint(normalized_data[:3])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f9c292c3", + "metadata": {}, + "outputs": [], + "source": [ + "# Store it in a dataframe where each row represents a chat\n", + "df_chats = pd.DataFrame(normalized_data)\n", + "\n", + "# Add columns for the llama2 model used\n", + "df_chats[\"data_set\"] = data_set\n", + "df_chats[\"model_size\"] = model_size\n", + "df_chats[\"finetuned\"] = finetuned\n", + "\n", + "# Print it\n", + "print(f\"Total number of chats : {df_chats.shape[0]}\")\n", + "pd.set_option(\"display.max_rows\", None)\n", + "pd.set_option(\"display.max_colwidth\", 80)\n", + "df_chats" + ] + }, + { + "cell_type": "markdown", + "id": "9deaca86-43f9-44ab-ba96-6f7d3ff23cd7", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "markdown", + "id": "b4537f33-608c-4556-80ea-7685ceada4d5", + "metadata": {}, + "source": [ + "### Create etl path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "722be854-3ec4-46b7-967e-f2aa89ad36a2", + "metadata": {}, + "outputs": [], + "source": [ + "etl_path = Path(f\"etl/etl-{datetime.today().strftime('%Y-%m-%d')}\")\n", + "\n", + "if etl_path.exists():\n", + " print(f\"Removing etl path: {etl_path}\")\n", + " shutil.rmtree(etl_path)\n", + "\n", + "print(f\"Creating etl path: {etl_path}\")\n", + "etl_path.mkdir(exist_ok=False)" + ] + }, + { + "cell_type": "markdown", + "id": "f7a12ca8-cfa0-4ea8-8e01-7e9828be9df4", + "metadata": {}, + "source": [ + "### Save to disk" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1a7272b3-fcac-43b1-a471-18c0ba38c6cd", + "metadata": {}, + "outputs": [], + "source": [ + "dfs = [df_user_ids, df_chats]\n", + "\n", + "stems = [\"user_ids\", \"chats\"]\n", + "\n", + "for df, stem in zip(dfs, stems):\n", + " csv = etl_path / f\"{canister_name}_{data_set}_{model_size}_{finetuned}_{stem}.csv\"\n", + " print(f\"Writing {csv}\")\n", + " df.to_csv(csv)\n", + "\n", + " parquet = (\n", + " etl_path\n", + " / f\"{canister_name}_{data_set}_{model_size}_{finetuned}_{stem}.parquet.gzip\"\n", + " )\n", + " print(f\"Writing {parquet}\")\n", + " df.to_parquet(parquet, engine=\"fastparquet\", compression=\"gzip\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "zendesk-with-python", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + }, + "toc-autonumbering": false, + "toc-showcode": false, + "vscode": { + "interpreter": { + "hash": "b140d43f3ce11cc3fea086c71f194b3ccd428305c7a6ae48363426b648cd63fd" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/icpp_llama2/scripts/requirements.txt b/icpp_llama2/scripts/requirements.txt new file mode 100644 index 0000000..9a677c3 --- /dev/null +++ b/icpp_llama2/scripts/requirements.txt @@ -0,0 +1,16 @@ +requests +pandas +pandas-stubs +jupyterlab +jupyterlab-lsp +jupyter-black +python-lsp-server[all] +python-dotenv +tabulate +black +mypy +pylint==2.13.9 +matplotlib +fastparquet +openpyxl +seaborn \ No newline at end of file diff --git a/icpp_llama2/scripts/upload.py b/icpp_llama2/scripts/upload.py index 93aca3c..2386c0c 100644 --- a/icpp_llama2/scripts/upload.py +++ b/icpp_llama2/scripts/upload.py @@ -74,7 +74,7 @@ def main() -> int: ) # --------------------------------------------------------------------------- - # geth ic-py based Canister instance + # get ic-py based Canister instance canister_llama2 = get_canister(canister_name, candid_path, network) # check health (liveness)