From 324715ccaf8f6df1e9afd12b6f79e802923ebe60 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 00:54:31 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- notebooks/Figure-1.ipynb | 7 +-- notebooks/Figure-3.ipynb | 9 ++-- notebooks/Figure-5.ipynb | 34 +++++++++---- notebooks/Figure-6.ipynb | 38 +++++++++----- notebooks/Inline-Statistics.ipynb | 69 ++++++++++++++++++-------- notebooks/Statistics.ipynb | 22 +++++--- notebooks/Supplementary-Figure-1.ipynb | 17 ++++--- notebooks/Supplementary-Figure-2.ipynb | 8 +-- notebooks/Supplementary-Table-1.ipynb | 9 ++-- notebooks/Supplementary-Table-2.ipynb | 14 +++--- 10 files changed, 145 insertions(+), 82 deletions(-) diff --git a/notebooks/Figure-1.ipynb b/notebooks/Figure-1.ipynb index 6c74580..ae3c1a1 100644 --- a/notebooks/Figure-1.ipynb +++ b/notebooks/Figure-1.ipynb @@ -15,11 +15,12 @@ "metadata": {}, "outputs": [], "source": [ + "import json\n", "import os\n", + "\n", "import fsspec\n", - "import json\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt" + "import matplotlib.pyplot as plt\n", + "import numpy as np" ] }, { diff --git a/notebooks/Figure-3.ipynb b/notebooks/Figure-3.ipynb index 7803e7f..cc1eceb 100644 --- a/notebooks/Figure-3.ipynb +++ b/notebooks/Figure-3.ipynb @@ -15,13 +15,14 @@ "metadata": {}, "outputs": [], "source": [ + "import json\n", "import os\n", + "\n", "import fsspec\n", - "import json\n", - "import pandas as pd\n", + "import matplotlib.pyplot as plt\n", "import numpy as np\n", - "import statsmodels.formula.api as smf\n", - "import matplotlib.pyplot as plt" + "import pandas as pd\n", + "import statsmodels.formula.api as smf" ] }, { diff --git a/notebooks/Figure-5.ipynb b/notebooks/Figure-5.ipynb index fd06de5..c933acb 100644 --- a/notebooks/Figure-5.ipynb +++ b/notebooks/Figure-5.ipynb @@ -15,12 +15,13 @@ "metadata": {}, "outputs": [], "source": [ + "import json\n", "import os\n", + "\n", "import fsspec\n", - "import json\n", - "import pandas as pd\n", - "import numpy as np\n", "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", "from matplotlib import ticker" ] }, @@ -93,9 +94,15 @@ " np.percentile(data[key][\"delta_arbocs\"], 75) / get(key)[\"arbocs\"][\"issuance\"]\n", " for key in data.keys()\n", "]\n", - "df[\"absolute_low\"] = [np.percentile(data[key][\"delta_arbocs\"], 5) for key in data.keys()]\n", - "df[\"absolute_med\"] = [np.percentile(data[key][\"delta_arbocs\"], 50) for key in data.keys()]\n", - "df[\"absolute_high\"] = [np.percentile(data[key][\"delta_arbocs\"], 95) for key in data.keys()]\n", + "df[\"absolute_low\"] = [\n", + " np.percentile(data[key][\"delta_arbocs\"], 5) for key in data.keys()\n", + "]\n", + "df[\"absolute_med\"] = [\n", + " np.percentile(data[key][\"delta_arbocs\"], 50) for key in data.keys()\n", + "]\n", + "df[\"absolute_high\"] = [\n", + " np.percentile(data[key][\"delta_arbocs\"], 95) for key in data.keys()\n", + "]\n", "df[\"project_size\"] = [get(key)[\"arbocs\"][\"issuance\"] for key in data.keys()]\n", "df[\"cp_slag\"] = [get(key)[\"carbon\"][\"common_practice\"][\"value\"] for key in data.keys()]\n", "df[\"alternate_slag\"] = [np.percentile(data[key][\"alt_slag\"], 50) for key in data.keys()]\n", @@ -117,8 +124,10 @@ "metadata": {}, "outputs": [], "source": [ - "def format_si(num, precision=0, suffixes=[\"\", \"K\", \"M\", \"G\", \"T\", \"P\"], show_suffix=False):\n", - " m = sum([abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])\n", + "def format_si(\n", + " num, precision=0, suffixes=[\"\", \"K\", \"M\", \"G\", \"T\", \"P\"], show_suffix=False\n", + "):\n", + " m = sum([abs(num / 1000.0**x) >= 1 for x in range(1, len(suffixes))])\n", " if show_suffix:\n", " return f\"{num/1000.0**m:.{precision}f}{suffixes[m]}\"\n", " else:\n", @@ -154,7 +163,10 @@ " total.append(np.nansum([data[key][\"delta_arbocs\"][i] for key in data.keys()]))\n", " total_arbocs = np.percentile(total, [5, 50, 95])\n", " total_percentage = np.percentile(total, [5, 50, 95]) / np.sum(\n", - " [[x for x in db if x[\"id\"] == key][0][\"arbocs\"][\"issuance\"] for key in data.keys()]\n", + " [\n", + " [x for x in db if x[\"id\"] == key][0][\"arbocs\"][\"issuance\"]\n", + " for key in data.keys()\n", + " ]\n", " )" ] }, @@ -284,7 +296,9 @@ "axs[1].vlines(df[\"id\"], -1, 1.5, color=(0.95, 0.95, 0.95), linewidth=1)\n", "axs[1].hlines([0], [0], [len(df) - 1], color=(0.75, 0.75, 0.75), linewidth=2)\n", "axs[1].plot(df[\"id\"], df[\"percent_med\"], \".\", color=\"#7EB36A\", markersize=12)\n", - "axs[1].vlines(df[\"id\"], df[\"percent_low\"], df[\"percent_high\"], color=\"black\", linewidth=1.25)\n", + "axs[1].vlines(\n", + " df[\"id\"], df[\"percent_low\"], df[\"percent_high\"], color=\"black\", linewidth=1.25\n", + ")\n", "axs[1].set_xticks([])\n", "axs[1].set_ylim([-1.1, 1.1])\n", "axs[1].set_ylabel(\"Crediting error (%)\")\n", diff --git a/notebooks/Figure-6.ipynb b/notebooks/Figure-6.ipynb index ed696e6..9e0deec 100644 --- a/notebooks/Figure-6.ipynb +++ b/notebooks/Figure-6.ipynb @@ -15,9 +15,10 @@ "metadata": {}, "outputs": [], "source": [ + "import json\n", "import os\n", + "\n", "import fsspec\n", - "import json\n", "import geopandas\n", "import matplotlib as mpl\n", "import matplotlib.pyplot as plt\n", @@ -26,7 +27,6 @@ "from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n", "from shapely.geometry import Point\n", "\n", - "\n", "crs = \"+proj=aea +lat_0=23 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs=True\"" ] }, @@ -100,7 +100,9 @@ "outputs": [], "source": [ "proj_centroids = {\n", - " project[\"opr_id\"]: Point(project[\"shape_centroid\"][0][0], project[\"shape_centroid\"][0][1])\n", + " project[\"opr_id\"]: Point(\n", + " project[\"shape_centroid\"][0][0], project[\"shape_centroid\"][0][1]\n", + " )\n", " for project in db\n", " if (79 in project[\"supersection_ids\"])\n", " and (\n", @@ -119,7 +121,9 @@ "outputs": [], "source": [ "proj_points = geopandas.GeoDataFrame(\n", - " data=list(proj_centroids.keys()), geometry=list(proj_centroids.values()), crs=\"epsg:4326\"\n", + " data=list(proj_centroids.keys()),\n", + " geometry=list(proj_centroids.values()),\n", + " crs=\"epsg:4326\",\n", ")" ] }, @@ -214,9 +218,9 @@ "metadata": {}, "outputs": [], "source": [ - "vmin, vmax = arbitrage_df.mean_local_slag.quantile(0.025), arbitrage_df.mean_local_slag.quantile(\n", - " 0.975\n", - ")\n", + "vmin, vmax = arbitrage_df.mean_local_slag.quantile(\n", + " 0.025\n", + "), arbitrage_df.mean_local_slag.quantile(0.975)\n", "\n", "norm = mpl.colors.Normalize(vmin, vmax)" ] @@ -276,9 +280,13 @@ "outputs": [], "source": [ "supersection_sections = (\n", - " ecomap_sections.loc[ecomap_sections[\"MAP_UNIT_S\"].isin(ecosections)].to_crs(crs).copy()\n", + " ecomap_sections.loc[ecomap_sections[\"MAP_UNIT_S\"].isin(ecosections)]\n", + " .to_crs(crs)\n", + " .copy()\n", + ")\n", + "supersection_sections[\"slag\"] = supersection_sections[\"MAP_UNIT_S\"].map(\n", + " slag_per_section\n", ")\n", - "supersection_sections[\"slag\"] = supersection_sections[\"MAP_UNIT_S\"].map(slag_per_section)\n", "supersection_outline = supersection_sections.dissolve(\"PROJECT\")" ] }, @@ -327,10 +335,14 @@ " vmax=55,\n", " legend_kwds={\"label\": \"\", \"orientation\": \"vertical\"},\n", ")\n", - "cax.set_ylabel(\"$\\Delta$ Carbon\\n(tCO2e / acre)\", loc=\"center\", labelpad=-75, fontsize=12)\n", + "cax.set_ylabel(\n", + " \"$\\\\Delta$ Carbon\\n(tCO2e / acre)\", loc=\"center\", labelpad=-75, fontsize=12\n", + ")\n", "cax.yaxis.set_ticks_position(\"none\")\n", "\n", - "proj_points.to_crs(crs).plot(ax=ax[0], marker=\"^\", color=\"k\", markersize=100, edgecolor=None)\n", + "proj_points.to_crs(crs).plot(\n", + " ax=ax[0], marker=\"^\", color=\"k\", markersize=100, edgecolor=None\n", + ")\n", "supersection_outline.plot(ax=ax[0], edgecolor=\"k\", lw=0.2, color=\"None\")\n", "\n", "supersection_sections.plot(\n", @@ -342,7 +354,9 @@ " vmin=-55,\n", " vmax=55,\n", ")\n", - "proj_points.to_crs(crs).plot(ax=ax[1], marker=\"^\", color=\"k\", markersize=100, edgecolor=None)\n", + "proj_points.to_crs(crs).plot(\n", + " ax=ax[1], marker=\"^\", color=\"k\", markersize=100, edgecolor=None\n", + ")\n", "\n", "\n", "xys = {\"M261D\": (0.7, 0.25), \"M261A\": (0.15, 0.8), \"M261B\": (0.4, 0.15)}\n", diff --git a/notebooks/Inline-Statistics.ipynb b/notebooks/Inline-Statistics.ipynb index 32047be..11c79e1 100644 --- a/notebooks/Inline-Statistics.ipynb +++ b/notebooks/Inline-Statistics.ipynb @@ -16,20 +16,18 @@ "metadata": {}, "outputs": [], "source": [ - "from collections import Counter\n", - "import fsspec\n", "import json\n", - "import os\n", "\n", - "import matplotlib.pyplot as plt\n", + "import fsspec\n", "import numpy as np\n", "import pandas as pd\n", "import seaborn as sns\n", - "from sklearn.metrics import mean_squared_error\n", - "\n", - "from carbonplan_forest_offsets.analysis.project_crediting_error import get_slag_to_total_scalar\n", + "from carbonplan_forest_offsets.analysis.project_crediting_error import (\n", + " get_slag_to_total_scalar,\n", + ")\n", "from carbonplan_forest_offsets.data import cat\n", - "from carbonplan_forest_offsets.load.issuance import load_issuance_table, ifm_opr_ids" + "from carbonplan_forest_offsets.load.issuance import ifm_opr_ids, load_issuance_table\n", + "from sklearn.metrics import mean_squared_error" ] }, { @@ -133,7 +131,8 @@ "subsets = {\n", " \"all\": np.tile(True, len(df)),\n", " \"all_forest\": df[\"project_type\"] == \"forest\",\n", - " \"compliance_ifm\": (df[\"opr_id\"].isin(ifm_opr_ids)) & (df[\"Early Action/ Compliance\"] == \"COP\"),\n", + " \"compliance_ifm\": (df[\"opr_id\"].isin(ifm_opr_ids))\n", + " & (df[\"Early Action/ Compliance\"] == \"COP\"),\n", " \"non_graduated_compliance_ifms\": (df[\"opr_id\"].isin(compliance_opr_ids))\n", " & (df[\"Early Action/ Compliance\"] == \"COP\"),\n", " \"upfront_ifm\": (df[\"opr_id\"].isin(upfront_opr_ids)) & (df[\"arb_rp_id\"].isin([\"A\"])),\n", @@ -202,7 +201,9 @@ "source": [ "sc_data = cat.rfia_all(assessment_area_id=297).read()\n", "\n", - "sc_data = sc_data[sc_data[\"YEAR\"] == 2010].copy() # use 2010 because comporable to CP data\n", + "sc_data = sc_data[\n", + " sc_data[\"YEAR\"] == 2010\n", + "].copy() # use 2010 because comporable to CP data\n", "\n", "\n", "sc_data[\"CARB_ACRE\"] = sc_data[\"CARB_ACRE\"] * 44 / 12 * 0.907185" @@ -237,7 +238,9 @@ "source": [ "standing_carbon = {}\n", "for k, v in fortyps_of_interest.items():\n", - " standing_carbon[k] = round(sc_data.loc[sc_data[\"FORTYPCD\"] == v, \"CARB_ACRE\"].item(), 1)\n", + " standing_carbon[k] = round(\n", + " sc_data.loc[sc_data[\"FORTYPCD\"] == v, \"CARB_ACRE\"].item(), 1\n", + " )\n", "display(standing_carbon)" ] }, @@ -258,7 +261,9 @@ "source": [ "# ak has three assessment areas but lets summarize across all to report inline value\n", "ak_assessment_areas = [285, 286, 287]\n", - "ak_all = pd.concat([cat.rfia_all(assessment_area_id=aa_id).read() for aa_id in ak_assessment_areas])\n", + "ak_all = pd.concat(\n", + " [cat.rfia_all(assessment_area_id=aa_id).read() for aa_id in ak_assessment_areas]\n", + ")\n", "\n", "ak_all = ak_all[ak_all[\"YEAR\"] == 2013].copy() # 2013 to match what used in CP\n", "\n", @@ -340,7 +345,9 @@ " ).round(1)\n", "\n", " as_frac = crediting_error / project[\"arbocs\"][\"calculated\"]\n", - " print(f\"{project['opr_id']} has a {crediting_error[1]} crediting error ({as_frac[1].round(3)})\")\n", + " print(\n", + " f\"{project['opr_id']} has a {crediting_error[1]} crediting error ({as_frac[1].round(3)})\"\n", + " )\n", " print(f\"CI: {crediting_error[0]}, {crediting_error[2]}\")\n", " print(f\"% CI: {as_frac[0].round(3)}, {as_frac[2].round(3)}\")" ] @@ -886,7 +893,8 @@ "projects = [\n", " x\n", " for x in db\n", - " if x[\"carbon\"][\"initial_carbon_stock\"][\"value\"] > x[\"carbon\"][\"common_practice\"][\"value\"]\n", + " if x[\"carbon\"][\"initial_carbon_stock\"][\"value\"]\n", + " > x[\"carbon\"][\"common_practice\"][\"value\"]\n", "]" ] }, @@ -937,7 +945,7 @@ } ], "source": [ - "sum((cp_df[\"baseline\"] <= cp_df[\"cp\"] * 1.05)) / len(cp_df.dropna())" + "sum(cp_df[\"baseline\"] <= cp_df[\"cp\"] * 1.05) / len(cp_df.dropna())" ] }, { @@ -955,7 +963,7 @@ "metadata": {}, "outputs": [], "source": [ - "fn = f\"https://carbonplan.blob.core.windows.net/carbonplan-forests/offsets/archive/results/common-practice-verification.json\"\n", + "fn = \"https://carbonplan.blob.core.windows.net/carbonplan-forests/offsets/archive/results/common-practice-verification.json\"\n", "with fsspec.open(fn, mode=\"r\") as f:\n", " cp_verification = json.load(f)" ] @@ -1000,7 +1008,9 @@ } ], "source": [ - "data = pd.DataFrame(cp_verification[\"projects\"])[[\"opr_id\", \"recalculated\", \"project_reported\"]]\n", + "data = pd.DataFrame(cp_verification[\"projects\"])[\n", + " [\"opr_id\", \"recalculated\", \"project_reported\"]\n", + "]\n", "mean_squared_error(data[\"recalculated\"], data[\"project_reported\"]) ** 0.5" ] }, @@ -1011,7 +1021,9 @@ "metadata": {}, "outputs": [], "source": [ - "data[\"diff\"] = (data[\"recalculated\"] - data[\"project_reported\"]) / (data[\"project_reported\"])\n", + "data[\"diff\"] = (data[\"recalculated\"] - data[\"project_reported\"]) / (\n", + " data[\"project_reported\"]\n", + ")\n", "data = data[np.isfinite(data[\"diff\"])] # CAR1186 = infite bc original CP = 0." ] }, @@ -1188,7 +1200,9 @@ "\n", "crediting_df = pd.DataFrame({k: v[\"delta_arbocs\"] for k, v in crediting_error.items()})\n", "\n", - "median_crediting_error = {k: np.median(v[\"delta_arbocs\"]) for k, v in crediting_error.items()}" + "median_crediting_error = {\n", + " k: np.median(v[\"delta_arbocs\"]) for k, v in crediting_error.items()\n", + "}" ] }, { @@ -1199,7 +1213,10 @@ "outputs": [], "source": [ "tp = pd.concat(\n", - " [pd.Series(median_crediting_error).rename(\"crediting_error\"), error_cp0.rename(\"cp\")],\n", + " [\n", + " pd.Series(median_crediting_error).rename(\"crediting_error\"),\n", + " error_cp0.rename(\"cp\"),\n", + " ],\n", " axis=1,\n", ")" ] @@ -1332,7 +1349,9 @@ "outputs": [], "source": [ "ak_assessment_areas = [285, 286, 287]\n", - "ak_all = pd.concat([cat.rfia_all(assessment_area_id=aa_id).read() for aa_id in ak_assessment_areas])" + "ak_all = pd.concat(\n", + " [cat.rfia_all(assessment_area_id=aa_id).read() for aa_id in ak_assessment_areas]\n", + ")" ] }, { @@ -1388,7 +1407,13 @@ } ], "source": [ - "sum([project[\"arbocs\"][\"issuance\"] for project in db if 287 in project[\"supersection_ids\"]])" + "sum(\n", + " [\n", + " project[\"arbocs\"][\"issuance\"]\n", + " for project in db\n", + " if 287 in project[\"supersection_ids\"]\n", + " ]\n", + ")" ] }, { diff --git a/notebooks/Statistics.ipynb b/notebooks/Statistics.ipynb index b6fad79..21a6a2c 100644 --- a/notebooks/Statistics.ipynb +++ b/notebooks/Statistics.ipynb @@ -7,11 +7,11 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "import fsspec\n", "import json\n", - "import pandas as pd\n", - "import numpy as np" + "\n", + "import fsspec\n", + "import numpy as np\n", + "import pandas as pd" ] }, { @@ -112,8 +112,10 @@ "metadata": {}, "outputs": [], "source": [ - "def format_si(num, precision=1, suffixes=[\"\", \"K\", \"M\", \"G\", \"T\", \"P\"], hide_suffix=False):\n", - " m = sum([abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])\n", + "def format_si(\n", + " num, precision=1, suffixes=[\"\", \"K\", \"M\", \"G\", \"T\", \"P\"], hide_suffix=False\n", + "):\n", + " m = sum([abs(num / 1000.0**x) >= 1 for x in range(1, len(suffixes))])\n", " if hide_suffix:\n", " return f\"{num/1000.0**m:.{precision}f}\"\n", " else:\n", @@ -252,7 +254,9 @@ } ], "source": [ - "condition = lambda x: \"Finite Carbon\" in x[\"developers\"] or \"Finite Carbon\" in x[\"owners\"]\n", + "condition = (\n", + " lambda x: \"Finite Carbon\" in x[\"developers\"] or \"Finite Carbon\" in x[\"owners\"]\n", + ")\n", "get_overcrediting(condition=condition, percentage=True, display=True)" ] }, @@ -315,7 +319,9 @@ "df = pd.DataFrame()\n", "df[\"Developer\"] = developers\n", "results = [\n", - " get_overcrediting(condition=lambda x: d in x[\"developers\"] or d in x[\"owners\"], percentage=True)\n", + " get_overcrediting(\n", + " condition=lambda x: d in x[\"developers\"] or d in x[\"owners\"], percentage=True\n", + " )\n", " for d in developers\n", "]\n", "df[\"Count\"] = [d[\"count\"] for d in results]\n", diff --git a/notebooks/Supplementary-Figure-1.ipynb b/notebooks/Supplementary-Figure-1.ipynb index 9e77ff5..91321c2 100644 --- a/notebooks/Supplementary-Figure-1.ipynb +++ b/notebooks/Supplementary-Figure-1.ipynb @@ -15,12 +15,13 @@ "metadata": {}, "outputs": [], "source": [ + "import json\n", "import os\n", + "\n", "import fsspec\n", - "import json\n", - "import pandas as pd\n", - "import numpy as np\n", "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import pandas as pd\n", "from matplotlib import ticker" ] }, @@ -73,8 +74,10 @@ "metadata": {}, "outputs": [], "source": [ - "def format_si(num, precision=0, suffixes=[\"\", \"K\", \"M\", \"G\", \"T\", \"P\"], show_suffix=False):\n", - " m = sum([abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])\n", + "def format_si(\n", + " num, precision=0, suffixes=[\"\", \"K\", \"M\", \"G\", \"T\", \"P\"], show_suffix=False\n", + "):\n", + " m = sum([abs(num / 1000.0**x) >= 1 for x in range(1, len(suffixes))])\n", " if show_suffix:\n", " return f\"{num/1000.0**m:.{precision}f}{suffixes[m]}\"\n", " else:\n", @@ -139,7 +142,9 @@ " xycoords=\"axes points\",\n", ")\n", "ax.annotate(\n", - " f'MAE: {np.mean((df[\"calculated\"] - df[\"issued\"])):.0f}', xy=(200, 30), xycoords=\"axes points\"\n", + " f'MAE: {np.mean((df[\"calculated\"] - df[\"issued\"])):.0f}',\n", + " xy=(200, 30),\n", + " xycoords=\"axes points\",\n", ")\n", "\n", "fname = \"Supplementary-Figure-1.svg\"\n", diff --git a/notebooks/Supplementary-Figure-2.ipynb b/notebooks/Supplementary-Figure-2.ipynb index 6b21ca9..111740a 100644 --- a/notebooks/Supplementary-Figure-2.ipynb +++ b/notebooks/Supplementary-Figure-2.ipynb @@ -15,13 +15,13 @@ "metadata": {}, "outputs": [], "source": [ + "import json\n", "import os\n", + "\n", "import fsspec\n", - "import json\n", - "import pandas as pd\n", - "import numpy as np\n", "import matplotlib.pyplot as plt\n", - "from matplotlib import ticker" + "import numpy as np\n", + "import pandas as pd" ] }, { diff --git a/notebooks/Supplementary-Table-1.ipynb b/notebooks/Supplementary-Table-1.ipynb index 0b4da58..5c28035 100644 --- a/notebooks/Supplementary-Table-1.ipynb +++ b/notebooks/Supplementary-Table-1.ipynb @@ -15,13 +15,10 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "import fsspec\n", "import json\n", - "import pandas as pd\n", - "import numpy as np\n", - "import random\n", - "import matplotlib.pyplot as plt" + "\n", + "import fsspec\n", + "import pandas as pd" ] }, { diff --git a/notebooks/Supplementary-Table-2.ipynb b/notebooks/Supplementary-Table-2.ipynb index 73a5e8f..c4dbb5c 100644 --- a/notebooks/Supplementary-Table-2.ipynb +++ b/notebooks/Supplementary-Table-2.ipynb @@ -15,13 +15,10 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "import fsspec\n", "import json\n", - "import pandas as pd\n", - "import numpy as np\n", - "import random\n", - "import matplotlib.pyplot as plt" + "\n", + "import fsspec\n", + "import pandas as pd" ] }, { @@ -90,7 +87,10 @@ "]\n", "df[\"Classification\"] = [\n", " \"\\n\".join(\n", - " [str(s[0]).capitalize() + \" : \" + \"%.1f\" % (s[1] * 100) + \"%\" for s in d[\"classification\"]]\n", + " [\n", + " str(s[0]).capitalize() + \" : \" + \"%.1f\" % (s[1] * 100) + \"%\"\n", + " for s in d[\"classification\"]\n", + " ]\n", " )\n", " for d in data\n", "]"