diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 937468d1..6804a642 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -17,7 +17,7 @@ repos:
- id: check-json
- id: pretty-format-json
args: [ '--autofix', '--no-ensure-ascii', '--no-sort-keys' ]
- # exclude: .ipynb
+ exclude: .ipynb
- id: check-yaml
args: [ '--allow-multiple-documents' ]
- repo: https://github.com/pre-commit/pygrep-hooks
diff --git a/docs/index.rst b/docs/index.rst
index 3f2923f5..9aac9939 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -4,6 +4,7 @@ Welcome to xHydro's documentation!
.. toctree::
:maxdepth: 2
:caption: Contents:
+ :hidden:
readme
installation
@@ -13,8 +14,8 @@ Welcome to xHydro's documentation!
authors
history
-Indices and tables
-==================
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
+.. Indices and tables
+.. ==================
+.. * :ref:`genindex`
+.. * :ref:`modindex`
+.. * :ref:`search`
diff --git a/docs/notebooks/local_frequency_analysis.ipynb b/docs/notebooks/local_frequency_analysis.ipynb
new file mode 100644
index 00000000..6835e867
--- /dev/null
+++ b/docs/notebooks/local_frequency_analysis.ipynb
@@ -0,0 +1,4000 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%load_ext autoreload\n",
+ "%autoreload 2"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Frequency analysis \n",
+ "\n",
+ "Text\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "application/javascript": [
+ "(function(root) {\n",
+ " function now() {\n",
+ " return new Date();\n",
+ " }\n",
+ "\n",
+ " var force = true;\n",
+ " var py_version = '3.2.2'.replace('rc', '-rc.').replace('.dev', '-dev.');\n",
+ " var is_dev = py_version.indexOf(\"+\") !== -1 || py_version.indexOf(\"-\") !== -1;\n",
+ " var reloading = false;\n",
+ " var Bokeh = root.Bokeh;\n",
+ " var bokeh_loaded = Bokeh != null && (Bokeh.version === py_version || (Bokeh.versions !== undefined && Bokeh.versions.has(py_version)));\n",
+ "\n",
+ " if (typeof (root._bokeh_timeout) === \"undefined\" || force) {\n",
+ " root._bokeh_timeout = Date.now() + 5000;\n",
+ " root._bokeh_failed_load = false;\n",
+ " }\n",
+ "\n",
+ " function run_callbacks() {\n",
+ " try {\n",
+ " root._bokeh_onload_callbacks.forEach(function(callback) {\n",
+ " if (callback != null)\n",
+ " callback();\n",
+ " });\n",
+ " } finally {\n",
+ " delete root._bokeh_onload_callbacks;\n",
+ " }\n",
+ " console.debug(\"Bokeh: all callbacks have finished\");\n",
+ " }\n",
+ "\n",
+ " function load_libs(css_urls, js_urls, js_modules, js_exports, callback) {\n",
+ " if (css_urls == null) css_urls = [];\n",
+ " if (js_urls == null) js_urls = [];\n",
+ " if (js_modules == null) js_modules = [];\n",
+ " if (js_exports == null) js_exports = {};\n",
+ "\n",
+ " root._bokeh_onload_callbacks.push(callback);\n",
+ "\n",
+ " if (root._bokeh_is_loading > 0) {\n",
+ " console.debug(\"Bokeh: BokehJS is being loaded, scheduling callback at\", now());\n",
+ " return null;\n",
+ " }\n",
+ " if (js_urls.length === 0 && js_modules.length === 0 && Object.keys(js_exports).length === 0) {\n",
+ " run_callbacks();\n",
+ " return null;\n",
+ " }\n",
+ " if (!reloading) {\n",
+ " console.debug(\"Bokeh: BokehJS not loaded, scheduling load and callback at\", now());\n",
+ " }\n",
+ "\n",
+ " function on_load() {\n",
+ " root._bokeh_is_loading--;\n",
+ " if (root._bokeh_is_loading === 0) {\n",
+ " console.debug(\"Bokeh: all BokehJS libraries/stylesheets loaded\");\n",
+ " run_callbacks()\n",
+ " }\n",
+ " }\n",
+ " window._bokeh_on_load = on_load\n",
+ "\n",
+ " function on_error() {\n",
+ " console.error(\"failed to load \" + url);\n",
+ " }\n",
+ "\n",
+ " var skip = [];\n",
+ " if (window.requirejs) {\n",
+ " window.requirejs.config({'packages': {}, 'paths': {'jspanel': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/jspanel', 'jspanel-modal': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/modal/jspanel.modal', 'jspanel-tooltip': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/tooltip/jspanel.tooltip', 'jspanel-hint': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/hint/jspanel.hint', 'jspanel-layout': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/layout/jspanel.layout', 'jspanel-contextmenu': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/contextmenu/jspanel.contextmenu', 'jspanel-dock': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/dock/jspanel.dock', 'gridstack': 'https://cdn.jsdelivr.net/npm/gridstack@7.2.3/dist/gridstack-all', 'notyf': 'https://cdn.jsdelivr.net/npm/notyf@3/notyf.min'}, 'shim': {'jspanel': {'exports': 'jsPanel'}, 'gridstack': {'exports': 'GridStack'}}});\n",
+ " require([\"jspanel\"], function(jsPanel) {\n",
+ "\twindow.jsPanel = jsPanel\n",
+ "\ton_load()\n",
+ " })\n",
+ " require([\"jspanel-modal\"], function() {\n",
+ "\ton_load()\n",
+ " })\n",
+ " require([\"jspanel-tooltip\"], function() {\n",
+ "\ton_load()\n",
+ " })\n",
+ " require([\"jspanel-hint\"], function() {\n",
+ "\ton_load()\n",
+ " })\n",
+ " require([\"jspanel-layout\"], function() {\n",
+ "\ton_load()\n",
+ " })\n",
+ " require([\"jspanel-contextmenu\"], function() {\n",
+ "\ton_load()\n",
+ " })\n",
+ " require([\"jspanel-dock\"], function() {\n",
+ "\ton_load()\n",
+ " })\n",
+ " require([\"gridstack\"], function(GridStack) {\n",
+ "\twindow.GridStack = GridStack\n",
+ "\ton_load()\n",
+ " })\n",
+ " require([\"notyf\"], function() {\n",
+ "\ton_load()\n",
+ " })\n",
+ " root._bokeh_is_loading = css_urls.length + 9;\n",
+ " } else {\n",
+ " root._bokeh_is_loading = css_urls.length + js_urls.length + js_modules.length + Object.keys(js_exports).length;\n",
+ " }\n",
+ "\n",
+ " var existing_stylesheets = []\n",
+ " var links = document.getElementsByTagName('link')\n",
+ " for (var i = 0; i < links.length; i++) {\n",
+ " var link = links[i]\n",
+ " if (link.href != null) {\n",
+ "\texisting_stylesheets.push(link.href)\n",
+ " }\n",
+ " }\n",
+ " for (var i = 0; i < css_urls.length; i++) {\n",
+ " var url = css_urls[i];\n",
+ " if (existing_stylesheets.indexOf(url) !== -1) {\n",
+ "\ton_load()\n",
+ "\tcontinue;\n",
+ " }\n",
+ " const element = document.createElement(\"link\");\n",
+ " element.onload = on_load;\n",
+ " element.onerror = on_error;\n",
+ " element.rel = \"stylesheet\";\n",
+ " element.type = \"text/css\";\n",
+ " element.href = url;\n",
+ " console.debug(\"Bokeh: injecting link tag for BokehJS stylesheet: \", url);\n",
+ " document.body.appendChild(element);\n",
+ " } if (((window['jsPanel'] !== undefined) && (!(window['jsPanel'] instanceof HTMLElement))) || window.requirejs) {\n",
+ " var urls = ['https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/jspanel.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/modal/jspanel.modal.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/tooltip/jspanel.tooltip.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/hint/jspanel.hint.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/layout/jspanel.layout.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/contextmenu/jspanel.contextmenu.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/dock/jspanel.dock.js'];\n",
+ " for (var i = 0; i < urls.length; i++) {\n",
+ " skip.push(urls[i])\n",
+ " }\n",
+ " } if (((window['GridStack'] !== undefined) && (!(window['GridStack'] instanceof HTMLElement))) || window.requirejs) {\n",
+ " var urls = ['https://cdn.holoviz.org/panel/1.2.1/dist/bundled/gridstack/gridstack@7.2.3/dist/gridstack-all.js'];\n",
+ " for (var i = 0; i < urls.length; i++) {\n",
+ " skip.push(urls[i])\n",
+ " }\n",
+ " } if (((window['Notyf'] !== undefined) && (!(window['Notyf'] instanceof HTMLElement))) || window.requirejs) {\n",
+ " var urls = ['https://cdn.holoviz.org/panel/1.2.1/dist/bundled/notificationarea/notyf@3/notyf.min.js'];\n",
+ " for (var i = 0; i < urls.length; i++) {\n",
+ " skip.push(urls[i])\n",
+ " }\n",
+ " } var existing_scripts = []\n",
+ " var scripts = document.getElementsByTagName('script')\n",
+ " for (var i = 0; i < scripts.length; i++) {\n",
+ " var script = scripts[i]\n",
+ " if (script.src != null) {\n",
+ "\texisting_scripts.push(script.src)\n",
+ " }\n",
+ " }\n",
+ " for (var i = 0; i < js_urls.length; i++) {\n",
+ " var url = js_urls[i];\n",
+ " if (skip.indexOf(url) !== -1 || existing_scripts.indexOf(url) !== -1) {\n",
+ "\tif (!window.requirejs) {\n",
+ "\t on_load();\n",
+ "\t}\n",
+ "\tcontinue;\n",
+ " }\n",
+ " var element = document.createElement('script');\n",
+ " element.onload = on_load;\n",
+ " element.onerror = on_error;\n",
+ " element.async = false;\n",
+ " element.src = url;\n",
+ " console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n",
+ " document.head.appendChild(element);\n",
+ " }\n",
+ " for (var i = 0; i < js_modules.length; i++) {\n",
+ " var url = js_modules[i];\n",
+ " if (skip.indexOf(url) !== -1 || existing_scripts.indexOf(url) !== -1) {\n",
+ "\tif (!window.requirejs) {\n",
+ "\t on_load();\n",
+ "\t}\n",
+ "\tcontinue;\n",
+ " }\n",
+ " var element = document.createElement('script');\n",
+ " element.onload = on_load;\n",
+ " element.onerror = on_error;\n",
+ " element.async = false;\n",
+ " element.src = url;\n",
+ " element.type = \"module\";\n",
+ " console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n",
+ " document.head.appendChild(element);\n",
+ " }\n",
+ " for (const name in js_exports) {\n",
+ " var url = js_exports[name];\n",
+ " if (skip.indexOf(url) >= 0 || root[name] != null) {\n",
+ "\tif (!window.requirejs) {\n",
+ "\t on_load();\n",
+ "\t}\n",
+ "\tcontinue;\n",
+ " }\n",
+ " var element = document.createElement('script');\n",
+ " element.onerror = on_error;\n",
+ " element.async = false;\n",
+ " element.type = \"module\";\n",
+ " console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n",
+ " element.textContent = `\n",
+ " import ${name} from \"${url}\"\n",
+ " window.${name} = ${name}\n",
+ " window._bokeh_on_load()\n",
+ " `\n",
+ " document.head.appendChild(element);\n",
+ " }\n",
+ " if (!js_urls.length && !js_modules.length) {\n",
+ " on_load()\n",
+ " }\n",
+ " };\n",
+ "\n",
+ " function inject_raw_css(css) {\n",
+ " const element = document.createElement(\"style\");\n",
+ " element.appendChild(document.createTextNode(css));\n",
+ " document.body.appendChild(element);\n",
+ " }\n",
+ "\n",
+ " var js_urls = [\"https://cdn.bokeh.org/bokeh/release/bokeh-3.2.2.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-gl-3.2.2.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-widgets-3.2.2.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-tables-3.2.2.min.js\", \"https://cdn.holoviz.org/panel/1.2.1/dist/panel.min.js\"];\n",
+ " var js_modules = [];\n",
+ " var js_exports = {};\n",
+ " var css_urls = [];\n",
+ " var inline_js = [ function(Bokeh) {\n",
+ " Bokeh.set_log_level(\"info\");\n",
+ " },\n",
+ "function(Bokeh) {} // ensure no trailing comma for IE\n",
+ " ];\n",
+ "\n",
+ " function run_inline_js() {\n",
+ " if ((root.Bokeh !== undefined) || (force === true)) {\n",
+ " for (var i = 0; i < inline_js.length; i++) {\n",
+ " inline_js[i].call(root, root.Bokeh);\n",
+ " }\n",
+ " // Cache old bokeh versions\n",
+ " if (Bokeh != undefined && !reloading) {\n",
+ "\tvar NewBokeh = root.Bokeh;\n",
+ "\tif (Bokeh.versions === undefined) {\n",
+ "\t Bokeh.versions = new Map();\n",
+ "\t}\n",
+ "\tif (NewBokeh.version !== Bokeh.version) {\n",
+ "\t Bokeh.versions.set(NewBokeh.version, NewBokeh)\n",
+ "\t}\n",
+ "\troot.Bokeh = Bokeh;\n",
+ " }} else if (Date.now() < root._bokeh_timeout) {\n",
+ " setTimeout(run_inline_js, 100);\n",
+ " } else if (!root._bokeh_failed_load) {\n",
+ " console.log(\"Bokeh: BokehJS failed to load within specified timeout.\");\n",
+ " root._bokeh_failed_load = true;\n",
+ " }\n",
+ " root._bokeh_is_initializing = false\n",
+ " }\n",
+ "\n",
+ " function load_or_wait() {\n",
+ " // Implement a backoff loop that tries to ensure we do not load multiple\n",
+ " // versions of Bokeh and its dependencies at the same time.\n",
+ " // In recent versions we use the root._bokeh_is_initializing flag\n",
+ " // to determine whether there is an ongoing attempt to initialize\n",
+ " // bokeh, however for backward compatibility we also try to ensure\n",
+ " // that we do not start loading a newer (Panel>=1.0 and Bokeh>3) version\n",
+ " // before older versions are fully initialized.\n",
+ " if (root._bokeh_is_initializing && Date.now() > root._bokeh_timeout) {\n",
+ " root._bokeh_is_initializing = false;\n",
+ " root._bokeh_onload_callbacks = undefined;\n",
+ " console.log(\"Bokeh: BokehJS was loaded multiple times but one version failed to initialize.\");\n",
+ " load_or_wait();\n",
+ " } else if (root._bokeh_is_initializing || (typeof root._bokeh_is_initializing === \"undefined\" && root._bokeh_onload_callbacks !== undefined)) {\n",
+ " setTimeout(load_or_wait, 100);\n",
+ " } else {\n",
+ " Bokeh = root.Bokeh;\n",
+ " bokeh_loaded = Bokeh != null && (Bokeh.version === py_version || (Bokeh.versions !== undefined && Bokeh.versions.has(py_version)));\n",
+ " root._bokeh_is_initializing = true\n",
+ " root._bokeh_onload_callbacks = []\n",
+ " if (!reloading && (!bokeh_loaded || is_dev)) {\n",
+ "\troot.Bokeh = undefined;\n",
+ " }\n",
+ " load_libs(css_urls, js_urls, js_modules, js_exports, function() {\n",
+ "\tconsole.debug(\"Bokeh: BokehJS plotting callback run at\", now());\n",
+ "\trun_inline_js();\n",
+ " });\n",
+ " }\n",
+ " }\n",
+ " // Give older versions of the autoload script a head-start to ensure\n",
+ " // they initialize before we start loading newer version.\n",
+ " setTimeout(load_or_wait, 100)\n",
+ "}(window));"
+ ],
+ "application/vnd.holoviews_load.v0+json": "(function(root) {\n function now() {\n return new Date();\n }\n\n var force = true;\n var py_version = '3.2.2'.replace('rc', '-rc.').replace('.dev', '-dev.');\n var is_dev = py_version.indexOf(\"+\") !== -1 || py_version.indexOf(\"-\") !== -1;\n var reloading = false;\n var Bokeh = root.Bokeh;\n var bokeh_loaded = Bokeh != null && (Bokeh.version === py_version || (Bokeh.versions !== undefined && Bokeh.versions.has(py_version)));\n\n if (typeof (root._bokeh_timeout) === \"undefined\" || force) {\n root._bokeh_timeout = Date.now() + 5000;\n root._bokeh_failed_load = false;\n }\n\n function run_callbacks() {\n try {\n root._bokeh_onload_callbacks.forEach(function(callback) {\n if (callback != null)\n callback();\n });\n } finally {\n delete root._bokeh_onload_callbacks;\n }\n console.debug(\"Bokeh: all callbacks have finished\");\n }\n\n function load_libs(css_urls, js_urls, js_modules, js_exports, callback) {\n if (css_urls == null) css_urls = [];\n if (js_urls == null) js_urls = [];\n if (js_modules == null) js_modules = [];\n if (js_exports == null) js_exports = {};\n\n root._bokeh_onload_callbacks.push(callback);\n\n if (root._bokeh_is_loading > 0) {\n console.debug(\"Bokeh: BokehJS is being loaded, scheduling callback at\", now());\n return null;\n }\n if (js_urls.length === 0 && js_modules.length === 0 && Object.keys(js_exports).length === 0) {\n run_callbacks();\n return null;\n }\n if (!reloading) {\n console.debug(\"Bokeh: BokehJS not loaded, scheduling load and callback at\", now());\n }\n\n function on_load() {\n root._bokeh_is_loading--;\n if (root._bokeh_is_loading === 0) {\n console.debug(\"Bokeh: all BokehJS libraries/stylesheets loaded\");\n run_callbacks()\n }\n }\n window._bokeh_on_load = on_load\n\n function on_error() {\n console.error(\"failed to load \" + url);\n }\n\n var skip = [];\n if (window.requirejs) {\n window.requirejs.config({'packages': {}, 'paths': {'jspanel': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/jspanel', 'jspanel-modal': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/modal/jspanel.modal', 'jspanel-tooltip': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/tooltip/jspanel.tooltip', 'jspanel-hint': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/hint/jspanel.hint', 'jspanel-layout': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/layout/jspanel.layout', 'jspanel-contextmenu': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/contextmenu/jspanel.contextmenu', 'jspanel-dock': 'https://cdn.jsdelivr.net/npm/jspanel4@4.12.0/dist/extensions/dock/jspanel.dock', 'gridstack': 'https://cdn.jsdelivr.net/npm/gridstack@7.2.3/dist/gridstack-all', 'notyf': 'https://cdn.jsdelivr.net/npm/notyf@3/notyf.min'}, 'shim': {'jspanel': {'exports': 'jsPanel'}, 'gridstack': {'exports': 'GridStack'}}});\n require([\"jspanel\"], function(jsPanel) {\n\twindow.jsPanel = jsPanel\n\ton_load()\n })\n require([\"jspanel-modal\"], function() {\n\ton_load()\n })\n require([\"jspanel-tooltip\"], function() {\n\ton_load()\n })\n require([\"jspanel-hint\"], function() {\n\ton_load()\n })\n require([\"jspanel-layout\"], function() {\n\ton_load()\n })\n require([\"jspanel-contextmenu\"], function() {\n\ton_load()\n })\n require([\"jspanel-dock\"], function() {\n\ton_load()\n })\n require([\"gridstack\"], function(GridStack) {\n\twindow.GridStack = GridStack\n\ton_load()\n })\n require([\"notyf\"], function() {\n\ton_load()\n })\n root._bokeh_is_loading = css_urls.length + 9;\n } else {\n root._bokeh_is_loading = css_urls.length + js_urls.length + js_modules.length + Object.keys(js_exports).length;\n }\n\n var existing_stylesheets = []\n var links = document.getElementsByTagName('link')\n for (var i = 0; i < links.length; i++) {\n var link = links[i]\n if (link.href != null) {\n\texisting_stylesheets.push(link.href)\n }\n }\n for (var i = 0; i < css_urls.length; i++) {\n var url = css_urls[i];\n if (existing_stylesheets.indexOf(url) !== -1) {\n\ton_load()\n\tcontinue;\n }\n const element = document.createElement(\"link\");\n element.onload = on_load;\n element.onerror = on_error;\n element.rel = \"stylesheet\";\n element.type = \"text/css\";\n element.href = url;\n console.debug(\"Bokeh: injecting link tag for BokehJS stylesheet: \", url);\n document.body.appendChild(element);\n } if (((window['jsPanel'] !== undefined) && (!(window['jsPanel'] instanceof HTMLElement))) || window.requirejs) {\n var urls = ['https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/jspanel.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/modal/jspanel.modal.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/tooltip/jspanel.tooltip.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/hint/jspanel.hint.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/layout/jspanel.layout.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/contextmenu/jspanel.contextmenu.js', 'https://cdn.holoviz.org/panel/1.2.1/dist/bundled/floatpanel/jspanel4@4.12.0/dist/extensions/dock/jspanel.dock.js'];\n for (var i = 0; i < urls.length; i++) {\n skip.push(urls[i])\n }\n } if (((window['GridStack'] !== undefined) && (!(window['GridStack'] instanceof HTMLElement))) || window.requirejs) {\n var urls = ['https://cdn.holoviz.org/panel/1.2.1/dist/bundled/gridstack/gridstack@7.2.3/dist/gridstack-all.js'];\n for (var i = 0; i < urls.length; i++) {\n skip.push(urls[i])\n }\n } if (((window['Notyf'] !== undefined) && (!(window['Notyf'] instanceof HTMLElement))) || window.requirejs) {\n var urls = ['https://cdn.holoviz.org/panel/1.2.1/dist/bundled/notificationarea/notyf@3/notyf.min.js'];\n for (var i = 0; i < urls.length; i++) {\n skip.push(urls[i])\n }\n } var existing_scripts = []\n var scripts = document.getElementsByTagName('script')\n for (var i = 0; i < scripts.length; i++) {\n var script = scripts[i]\n if (script.src != null) {\n\texisting_scripts.push(script.src)\n }\n }\n for (var i = 0; i < js_urls.length; i++) {\n var url = js_urls[i];\n if (skip.indexOf(url) !== -1 || existing_scripts.indexOf(url) !== -1) {\n\tif (!window.requirejs) {\n\t on_load();\n\t}\n\tcontinue;\n }\n var element = document.createElement('script');\n element.onload = on_load;\n element.onerror = on_error;\n element.async = false;\n element.src = url;\n console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n document.head.appendChild(element);\n }\n for (var i = 0; i < js_modules.length; i++) {\n var url = js_modules[i];\n if (skip.indexOf(url) !== -1 || existing_scripts.indexOf(url) !== -1) {\n\tif (!window.requirejs) {\n\t on_load();\n\t}\n\tcontinue;\n }\n var element = document.createElement('script');\n element.onload = on_load;\n element.onerror = on_error;\n element.async = false;\n element.src = url;\n element.type = \"module\";\n console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n document.head.appendChild(element);\n }\n for (const name in js_exports) {\n var url = js_exports[name];\n if (skip.indexOf(url) >= 0 || root[name] != null) {\n\tif (!window.requirejs) {\n\t on_load();\n\t}\n\tcontinue;\n }\n var element = document.createElement('script');\n element.onerror = on_error;\n element.async = false;\n element.type = \"module\";\n console.debug(\"Bokeh: injecting script tag for BokehJS library: \", url);\n element.textContent = `\n import ${name} from \"${url}\"\n window.${name} = ${name}\n window._bokeh_on_load()\n `\n document.head.appendChild(element);\n }\n if (!js_urls.length && !js_modules.length) {\n on_load()\n }\n };\n\n function inject_raw_css(css) {\n const element = document.createElement(\"style\");\n element.appendChild(document.createTextNode(css));\n document.body.appendChild(element);\n }\n\n var js_urls = [\"https://cdn.bokeh.org/bokeh/release/bokeh-3.2.2.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-gl-3.2.2.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-widgets-3.2.2.min.js\", \"https://cdn.bokeh.org/bokeh/release/bokeh-tables-3.2.2.min.js\", \"https://cdn.holoviz.org/panel/1.2.1/dist/panel.min.js\"];\n var js_modules = [];\n var js_exports = {};\n var css_urls = [];\n var inline_js = [ function(Bokeh) {\n Bokeh.set_log_level(\"info\");\n },\nfunction(Bokeh) {} // ensure no trailing comma for IE\n ];\n\n function run_inline_js() {\n if ((root.Bokeh !== undefined) || (force === true)) {\n for (var i = 0; i < inline_js.length; i++) {\n inline_js[i].call(root, root.Bokeh);\n }\n // Cache old bokeh versions\n if (Bokeh != undefined && !reloading) {\n\tvar NewBokeh = root.Bokeh;\n\tif (Bokeh.versions === undefined) {\n\t Bokeh.versions = new Map();\n\t}\n\tif (NewBokeh.version !== Bokeh.version) {\n\t Bokeh.versions.set(NewBokeh.version, NewBokeh)\n\t}\n\troot.Bokeh = Bokeh;\n }} else if (Date.now() < root._bokeh_timeout) {\n setTimeout(run_inline_js, 100);\n } else if (!root._bokeh_failed_load) {\n console.log(\"Bokeh: BokehJS failed to load within specified timeout.\");\n root._bokeh_failed_load = true;\n }\n root._bokeh_is_initializing = false\n }\n\n function load_or_wait() {\n // Implement a backoff loop that tries to ensure we do not load multiple\n // versions of Bokeh and its dependencies at the same time.\n // In recent versions we use the root._bokeh_is_initializing flag\n // to determine whether there is an ongoing attempt to initialize\n // bokeh, however for backward compatibility we also try to ensure\n // that we do not start loading a newer (Panel>=1.0 and Bokeh>3) version\n // before older versions are fully initialized.\n if (root._bokeh_is_initializing && Date.now() > root._bokeh_timeout) {\n root._bokeh_is_initializing = false;\n root._bokeh_onload_callbacks = undefined;\n console.log(\"Bokeh: BokehJS was loaded multiple times but one version failed to initialize.\");\n load_or_wait();\n } else if (root._bokeh_is_initializing || (typeof root._bokeh_is_initializing === \"undefined\" && root._bokeh_onload_callbacks !== undefined)) {\n setTimeout(load_or_wait, 100);\n } else {\n Bokeh = root.Bokeh;\n bokeh_loaded = Bokeh != null && (Bokeh.version === py_version || (Bokeh.versions !== undefined && Bokeh.versions.has(py_version)));\n root._bokeh_is_initializing = true\n root._bokeh_onload_callbacks = []\n if (!reloading && (!bokeh_loaded || is_dev)) {\n\troot.Bokeh = undefined;\n }\n load_libs(css_urls, js_urls, js_modules, js_exports, function() {\n\tconsole.debug(\"Bokeh: BokehJS plotting callback run at\", now());\n\trun_inline_js();\n });\n }\n }\n // Give older versions of the autoload script a head-start to ensure\n // they initialize before we start loading newer version.\n setTimeout(load_or_wait, 100)\n}(window));"
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "\n",
+ "if ((window.PyViz === undefined) || (window.PyViz instanceof HTMLElement)) {\n",
+ " window.PyViz = {comms: {}, comm_status:{}, kernels:{}, receivers: {}, plot_index: []}\n",
+ "}\n",
+ "\n",
+ "\n",
+ " function JupyterCommManager() {\n",
+ " }\n",
+ "\n",
+ " JupyterCommManager.prototype.register_target = function(plot_id, comm_id, msg_handler) {\n",
+ " if (window.comm_manager || ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null))) {\n",
+ " var comm_manager = window.comm_manager || Jupyter.notebook.kernel.comm_manager;\n",
+ " comm_manager.register_target(comm_id, function(comm) {\n",
+ " comm.on_msg(msg_handler);\n",
+ " });\n",
+ " } else if ((plot_id in window.PyViz.kernels) && (window.PyViz.kernels[plot_id])) {\n",
+ " window.PyViz.kernels[plot_id].registerCommTarget(comm_id, function(comm) {\n",
+ " comm.onMsg = msg_handler;\n",
+ " });\n",
+ " } else if (typeof google != 'undefined' && google.colab.kernel != null) {\n",
+ " google.colab.kernel.comms.registerTarget(comm_id, (comm) => {\n",
+ " var messages = comm.messages[Symbol.asyncIterator]();\n",
+ " function processIteratorResult(result) {\n",
+ " var message = result.value;\n",
+ " console.log(message)\n",
+ " var content = {data: message.data, comm_id};\n",
+ " var buffers = []\n",
+ " for (var buffer of message.buffers || []) {\n",
+ " buffers.push(new DataView(buffer))\n",
+ " }\n",
+ " var metadata = message.metadata || {};\n",
+ " var msg = {content, buffers, metadata}\n",
+ " msg_handler(msg);\n",
+ " return messages.next().then(processIteratorResult);\n",
+ " }\n",
+ " return messages.next().then(processIteratorResult);\n",
+ " })\n",
+ " }\n",
+ " }\n",
+ "\n",
+ " JupyterCommManager.prototype.get_client_comm = function(plot_id, comm_id, msg_handler) {\n",
+ " if (comm_id in window.PyViz.comms) {\n",
+ " return window.PyViz.comms[comm_id];\n",
+ " } else if (window.comm_manager || ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null))) {\n",
+ " var comm_manager = window.comm_manager || Jupyter.notebook.kernel.comm_manager;\n",
+ " var comm = comm_manager.new_comm(comm_id, {}, {}, {}, comm_id);\n",
+ " if (msg_handler) {\n",
+ " comm.on_msg(msg_handler);\n",
+ " }\n",
+ " } else if ((plot_id in window.PyViz.kernels) && (window.PyViz.kernels[plot_id])) {\n",
+ " var comm = window.PyViz.kernels[plot_id].connectToComm(comm_id);\n",
+ " comm.open();\n",
+ " if (msg_handler) {\n",
+ " comm.onMsg = msg_handler;\n",
+ " }\n",
+ " } else if (typeof google != 'undefined' && google.colab.kernel != null) {\n",
+ " var comm_promise = google.colab.kernel.comms.open(comm_id)\n",
+ " comm_promise.then((comm) => {\n",
+ " window.PyViz.comms[comm_id] = comm;\n",
+ " if (msg_handler) {\n",
+ " var messages = comm.messages[Symbol.asyncIterator]();\n",
+ " function processIteratorResult(result) {\n",
+ " var message = result.value;\n",
+ " var content = {data: message.data};\n",
+ " var metadata = message.metadata || {comm_id};\n",
+ " var msg = {content, metadata}\n",
+ " msg_handler(msg);\n",
+ " return messages.next().then(processIteratorResult);\n",
+ " }\n",
+ " return messages.next().then(processIteratorResult);\n",
+ " }\n",
+ " }) \n",
+ " var sendClosure = (data, metadata, buffers, disposeOnDone) => {\n",
+ " return comm_promise.then((comm) => {\n",
+ " comm.send(data, metadata, buffers, disposeOnDone);\n",
+ " });\n",
+ " };\n",
+ " var comm = {\n",
+ " send: sendClosure\n",
+ " };\n",
+ " }\n",
+ " window.PyViz.comms[comm_id] = comm;\n",
+ " return comm;\n",
+ " }\n",
+ " window.PyViz.comm_manager = new JupyterCommManager();\n",
+ " \n",
+ "\n",
+ "\n",
+ "var JS_MIME_TYPE = 'application/javascript';\n",
+ "var HTML_MIME_TYPE = 'text/html';\n",
+ "var EXEC_MIME_TYPE = 'application/vnd.holoviews_exec.v0+json';\n",
+ "var CLASS_NAME = 'output';\n",
+ "\n",
+ "/**\n",
+ " * Render data to the DOM node\n",
+ " */\n",
+ "function render(props, node) {\n",
+ " var div = document.createElement(\"div\");\n",
+ " var script = document.createElement(\"script\");\n",
+ " node.appendChild(div);\n",
+ " node.appendChild(script);\n",
+ "}\n",
+ "\n",
+ "/**\n",
+ " * Handle when a new output is added\n",
+ " */\n",
+ "function handle_add_output(event, handle) {\n",
+ " var output_area = handle.output_area;\n",
+ " var output = handle.output;\n",
+ " if ((output.data == undefined) || (!output.data.hasOwnProperty(EXEC_MIME_TYPE))) {\n",
+ " return\n",
+ " }\n",
+ " var id = output.metadata[EXEC_MIME_TYPE][\"id\"];\n",
+ " var toinsert = output_area.element.find(\".\" + CLASS_NAME.split(' ')[0]);\n",
+ " if (id !== undefined) {\n",
+ " var nchildren = toinsert.length;\n",
+ " var html_node = toinsert[nchildren-1].children[0];\n",
+ " html_node.innerHTML = output.data[HTML_MIME_TYPE];\n",
+ " var scripts = [];\n",
+ " var nodelist = html_node.querySelectorAll(\"script\");\n",
+ " for (var i in nodelist) {\n",
+ " if (nodelist.hasOwnProperty(i)) {\n",
+ " scripts.push(nodelist[i])\n",
+ " }\n",
+ " }\n",
+ "\n",
+ " scripts.forEach( function (oldScript) {\n",
+ " var newScript = document.createElement(\"script\");\n",
+ " var attrs = [];\n",
+ " var nodemap = oldScript.attributes;\n",
+ " for (var j in nodemap) {\n",
+ " if (nodemap.hasOwnProperty(j)) {\n",
+ " attrs.push(nodemap[j])\n",
+ " }\n",
+ " }\n",
+ " attrs.forEach(function(attr) { newScript.setAttribute(attr.name, attr.value) });\n",
+ " newScript.appendChild(document.createTextNode(oldScript.innerHTML));\n",
+ " oldScript.parentNode.replaceChild(newScript, oldScript);\n",
+ " });\n",
+ " if (JS_MIME_TYPE in output.data) {\n",
+ " toinsert[nchildren-1].children[1].textContent = output.data[JS_MIME_TYPE];\n",
+ " }\n",
+ " output_area._hv_plot_id = id;\n",
+ " if ((window.Bokeh !== undefined) && (id in Bokeh.index)) {\n",
+ " window.PyViz.plot_index[id] = Bokeh.index[id];\n",
+ " } else {\n",
+ " window.PyViz.plot_index[id] = null;\n",
+ " }\n",
+ " } else if (output.metadata[EXEC_MIME_TYPE][\"server_id\"] !== undefined) {\n",
+ " var bk_div = document.createElement(\"div\");\n",
+ " bk_div.innerHTML = output.data[HTML_MIME_TYPE];\n",
+ " var script_attrs = bk_div.children[0].attributes;\n",
+ " for (var i = 0; i < script_attrs.length; i++) {\n",
+ " toinsert[toinsert.length - 1].childNodes[1].setAttribute(script_attrs[i].name, script_attrs[i].value);\n",
+ " }\n",
+ " // store reference to server id on output_area\n",
+ " output_area._bokeh_server_id = output.metadata[EXEC_MIME_TYPE][\"server_id\"];\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "/**\n",
+ " * Handle when an output is cleared or removed\n",
+ " */\n",
+ "function handle_clear_output(event, handle) {\n",
+ " var id = handle.cell.output_area._hv_plot_id;\n",
+ " var server_id = handle.cell.output_area._bokeh_server_id;\n",
+ " if (((id === undefined) || !(id in PyViz.plot_index)) && (server_id !== undefined)) { return; }\n",
+ " var comm = window.PyViz.comm_manager.get_client_comm(\"hv-extension-comm\", \"hv-extension-comm\", function () {});\n",
+ " if (server_id !== null) {\n",
+ " comm.send({event_type: 'server_delete', 'id': server_id});\n",
+ " return;\n",
+ " } else if (comm !== null) {\n",
+ " comm.send({event_type: 'delete', 'id': id});\n",
+ " }\n",
+ " delete PyViz.plot_index[id];\n",
+ " if ((window.Bokeh !== undefined) & (id in window.Bokeh.index)) {\n",
+ " var doc = window.Bokeh.index[id].model.document\n",
+ " doc.clear();\n",
+ " const i = window.Bokeh.documents.indexOf(doc);\n",
+ " if (i > -1) {\n",
+ " window.Bokeh.documents.splice(i, 1);\n",
+ " }\n",
+ " }\n",
+ "}\n",
+ "\n",
+ "/**\n",
+ " * Handle kernel restart event\n",
+ " */\n",
+ "function handle_kernel_cleanup(event, handle) {\n",
+ " delete PyViz.comms[\"hv-extension-comm\"];\n",
+ " window.PyViz.plot_index = {}\n",
+ "}\n",
+ "\n",
+ "/**\n",
+ " * Handle update_display_data messages\n",
+ " */\n",
+ "function handle_update_output(event, handle) {\n",
+ " handle_clear_output(event, {cell: {output_area: handle.output_area}})\n",
+ " handle_add_output(event, handle)\n",
+ "}\n",
+ "\n",
+ "function register_renderer(events, OutputArea) {\n",
+ " function append_mime(data, metadata, element) {\n",
+ " // create a DOM node to render to\n",
+ " var toinsert = this.create_output_subarea(\n",
+ " metadata,\n",
+ " CLASS_NAME,\n",
+ " EXEC_MIME_TYPE\n",
+ " );\n",
+ " this.keyboard_manager.register_events(toinsert);\n",
+ " // Render to node\n",
+ " var props = {data: data, metadata: metadata[EXEC_MIME_TYPE]};\n",
+ " render(props, toinsert[0]);\n",
+ " element.append(toinsert);\n",
+ " return toinsert\n",
+ " }\n",
+ "\n",
+ " events.on('output_added.OutputArea', handle_add_output);\n",
+ " events.on('output_updated.OutputArea', handle_update_output);\n",
+ " events.on('clear_output.CodeCell', handle_clear_output);\n",
+ " events.on('delete.Cell', handle_clear_output);\n",
+ " events.on('kernel_ready.Kernel', handle_kernel_cleanup);\n",
+ "\n",
+ " OutputArea.prototype.register_mime_type(EXEC_MIME_TYPE, append_mime, {\n",
+ " safe: true,\n",
+ " index: 0\n",
+ " });\n",
+ "}\n",
+ "\n",
+ "if (window.Jupyter !== undefined) {\n",
+ " try {\n",
+ " var events = require('base/js/events');\n",
+ " var OutputArea = require('notebook/js/outputarea').OutputArea;\n",
+ " if (OutputArea.prototype.mime_types().indexOf(EXEC_MIME_TYPE) == -1) {\n",
+ " register_renderer(events, OutputArea);\n",
+ " }\n",
+ " } catch(err) {\n",
+ " }\n",
+ "}\n"
+ ],
+ "application/vnd.holoviews_load.v0+json": "\nif ((window.PyViz === undefined) || (window.PyViz instanceof HTMLElement)) {\n window.PyViz = {comms: {}, comm_status:{}, kernels:{}, receivers: {}, plot_index: []}\n}\n\n\n function JupyterCommManager() {\n }\n\n JupyterCommManager.prototype.register_target = function(plot_id, comm_id, msg_handler) {\n if (window.comm_manager || ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null))) {\n var comm_manager = window.comm_manager || Jupyter.notebook.kernel.comm_manager;\n comm_manager.register_target(comm_id, function(comm) {\n comm.on_msg(msg_handler);\n });\n } else if ((plot_id in window.PyViz.kernels) && (window.PyViz.kernels[plot_id])) {\n window.PyViz.kernels[plot_id].registerCommTarget(comm_id, function(comm) {\n comm.onMsg = msg_handler;\n });\n } else if (typeof google != 'undefined' && google.colab.kernel != null) {\n google.colab.kernel.comms.registerTarget(comm_id, (comm) => {\n var messages = comm.messages[Symbol.asyncIterator]();\n function processIteratorResult(result) {\n var message = result.value;\n console.log(message)\n var content = {data: message.data, comm_id};\n var buffers = []\n for (var buffer of message.buffers || []) {\n buffers.push(new DataView(buffer))\n }\n var metadata = message.metadata || {};\n var msg = {content, buffers, metadata}\n msg_handler(msg);\n return messages.next().then(processIteratorResult);\n }\n return messages.next().then(processIteratorResult);\n })\n }\n }\n\n JupyterCommManager.prototype.get_client_comm = function(plot_id, comm_id, msg_handler) {\n if (comm_id in window.PyViz.comms) {\n return window.PyViz.comms[comm_id];\n } else if (window.comm_manager || ((window.Jupyter !== undefined) && (Jupyter.notebook.kernel != null))) {\n var comm_manager = window.comm_manager || Jupyter.notebook.kernel.comm_manager;\n var comm = comm_manager.new_comm(comm_id, {}, {}, {}, comm_id);\n if (msg_handler) {\n comm.on_msg(msg_handler);\n }\n } else if ((plot_id in window.PyViz.kernels) && (window.PyViz.kernels[plot_id])) {\n var comm = window.PyViz.kernels[plot_id].connectToComm(comm_id);\n comm.open();\n if (msg_handler) {\n comm.onMsg = msg_handler;\n }\n } else if (typeof google != 'undefined' && google.colab.kernel != null) {\n var comm_promise = google.colab.kernel.comms.open(comm_id)\n comm_promise.then((comm) => {\n window.PyViz.comms[comm_id] = comm;\n if (msg_handler) {\n var messages = comm.messages[Symbol.asyncIterator]();\n function processIteratorResult(result) {\n var message = result.value;\n var content = {data: message.data};\n var metadata = message.metadata || {comm_id};\n var msg = {content, metadata}\n msg_handler(msg);\n return messages.next().then(processIteratorResult);\n }\n return messages.next().then(processIteratorResult);\n }\n }) \n var sendClosure = (data, metadata, buffers, disposeOnDone) => {\n return comm_promise.then((comm) => {\n comm.send(data, metadata, buffers, disposeOnDone);\n });\n };\n var comm = {\n send: sendClosure\n };\n }\n window.PyViz.comms[comm_id] = comm;\n return comm;\n }\n window.PyViz.comm_manager = new JupyterCommManager();\n \n\n\nvar JS_MIME_TYPE = 'application/javascript';\nvar HTML_MIME_TYPE = 'text/html';\nvar EXEC_MIME_TYPE = 'application/vnd.holoviews_exec.v0+json';\nvar CLASS_NAME = 'output';\n\n/**\n * Render data to the DOM node\n */\nfunction render(props, node) {\n var div = document.createElement(\"div\");\n var script = document.createElement(\"script\");\n node.appendChild(div);\n node.appendChild(script);\n}\n\n/**\n * Handle when a new output is added\n */\nfunction handle_add_output(event, handle) {\n var output_area = handle.output_area;\n var output = handle.output;\n if ((output.data == undefined) || (!output.data.hasOwnProperty(EXEC_MIME_TYPE))) {\n return\n }\n var id = output.metadata[EXEC_MIME_TYPE][\"id\"];\n var toinsert = output_area.element.find(\".\" + CLASS_NAME.split(' ')[0]);\n if (id !== undefined) {\n var nchildren = toinsert.length;\n var html_node = toinsert[nchildren-1].children[0];\n html_node.innerHTML = output.data[HTML_MIME_TYPE];\n var scripts = [];\n var nodelist = html_node.querySelectorAll(\"script\");\n for (var i in nodelist) {\n if (nodelist.hasOwnProperty(i)) {\n scripts.push(nodelist[i])\n }\n }\n\n scripts.forEach( function (oldScript) {\n var newScript = document.createElement(\"script\");\n var attrs = [];\n var nodemap = oldScript.attributes;\n for (var j in nodemap) {\n if (nodemap.hasOwnProperty(j)) {\n attrs.push(nodemap[j])\n }\n }\n attrs.forEach(function(attr) { newScript.setAttribute(attr.name, attr.value) });\n newScript.appendChild(document.createTextNode(oldScript.innerHTML));\n oldScript.parentNode.replaceChild(newScript, oldScript);\n });\n if (JS_MIME_TYPE in output.data) {\n toinsert[nchildren-1].children[1].textContent = output.data[JS_MIME_TYPE];\n }\n output_area._hv_plot_id = id;\n if ((window.Bokeh !== undefined) && (id in Bokeh.index)) {\n window.PyViz.plot_index[id] = Bokeh.index[id];\n } else {\n window.PyViz.plot_index[id] = null;\n }\n } else if (output.metadata[EXEC_MIME_TYPE][\"server_id\"] !== undefined) {\n var bk_div = document.createElement(\"div\");\n bk_div.innerHTML = output.data[HTML_MIME_TYPE];\n var script_attrs = bk_div.children[0].attributes;\n for (var i = 0; i < script_attrs.length; i++) {\n toinsert[toinsert.length - 1].childNodes[1].setAttribute(script_attrs[i].name, script_attrs[i].value);\n }\n // store reference to server id on output_area\n output_area._bokeh_server_id = output.metadata[EXEC_MIME_TYPE][\"server_id\"];\n }\n}\n\n/**\n * Handle when an output is cleared or removed\n */\nfunction handle_clear_output(event, handle) {\n var id = handle.cell.output_area._hv_plot_id;\n var server_id = handle.cell.output_area._bokeh_server_id;\n if (((id === undefined) || !(id in PyViz.plot_index)) && (server_id !== undefined)) { return; }\n var comm = window.PyViz.comm_manager.get_client_comm(\"hv-extension-comm\", \"hv-extension-comm\", function () {});\n if (server_id !== null) {\n comm.send({event_type: 'server_delete', 'id': server_id});\n return;\n } else if (comm !== null) {\n comm.send({event_type: 'delete', 'id': id});\n }\n delete PyViz.plot_index[id];\n if ((window.Bokeh !== undefined) & (id in window.Bokeh.index)) {\n var doc = window.Bokeh.index[id].model.document\n doc.clear();\n const i = window.Bokeh.documents.indexOf(doc);\n if (i > -1) {\n window.Bokeh.documents.splice(i, 1);\n }\n }\n}\n\n/**\n * Handle kernel restart event\n */\nfunction handle_kernel_cleanup(event, handle) {\n delete PyViz.comms[\"hv-extension-comm\"];\n window.PyViz.plot_index = {}\n}\n\n/**\n * Handle update_display_data messages\n */\nfunction handle_update_output(event, handle) {\n handle_clear_output(event, {cell: {output_area: handle.output_area}})\n handle_add_output(event, handle)\n}\n\nfunction register_renderer(events, OutputArea) {\n function append_mime(data, metadata, element) {\n // create a DOM node to render to\n var toinsert = this.create_output_subarea(\n metadata,\n CLASS_NAME,\n EXEC_MIME_TYPE\n );\n this.keyboard_manager.register_events(toinsert);\n // Render to node\n var props = {data: data, metadata: metadata[EXEC_MIME_TYPE]};\n render(props, toinsert[0]);\n element.append(toinsert);\n return toinsert\n }\n\n events.on('output_added.OutputArea', handle_add_output);\n events.on('output_updated.OutputArea', handle_update_output);\n events.on('clear_output.CodeCell', handle_clear_output);\n events.on('delete.Cell', handle_clear_output);\n events.on('kernel_ready.Kernel', handle_kernel_cleanup);\n\n OutputArea.prototype.register_mime_type(EXEC_MIME_TYPE, append_mime, {\n safe: true,\n index: 0\n });\n}\n\nif (window.Jupyter !== undefined) {\n try {\n var events = require('base/js/events');\n var OutputArea = require('notebook/js/outputarea').OutputArea;\n if (OutputArea.prototype.mime_types().indexOf(EXEC_MIME_TYPE) == -1) {\n register_renderer(events, OutputArea);\n }\n } catch(err) {\n }\n}\n"
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "ERROR 1: PROJ: proj_create_from_database: Open of /home/slanglois/mambaforge/envs/xhydro/share/proj failed\n"
+ ]
+ }
+ ],
+ "source": [
+ "import xarray as xr\n",
+ "import xhydro as xh\n",
+ "import numpy as np\n",
+ "import xdatasets as xd"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "## Prepare the data"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To conduct frequency analysis on historical time series from various sites, we begin by obtaining a dataset comprising hydrological information. \n",
+ "\n",
+ "Here, we use the [xdataset](https://hydrologie.github.io/xdatasets/notebooks/getting_started.html) library to acquire hydrological data from the [Ministère de l'Environnement, de la Lutte contre les changements climatiques, de la Faune et des Parcs](https://www.cehq.gouv.qc.ca/atlas-hydroclimatique/stations-hydrometriques/index.htm). Specifically, our query focuses on stations with IDs beginning with `02`, possessing a natural flow pattern and limited to streamflow data. \n",
+ "\n",
+ "Users may prefer to generate their own `xarray.DataArray` using their individual dataset. At a minimum, the `xarray.DataArray` used for frequency analysis needs to have an `id` and a `time` dimension."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "
<xarray.Dataset>\n",
+ "Dimensions: (id: 37, time: 20454)\n",
+ "Coordinates: (12/15)\n",
+ " drainage_area (id) float32 1.09e+03 647.0 59.8 ... 227.0 2.163e+03 48.1\n",
+ " end_date (id) datetime64[ns] 2006-10-13 2023-06-14 ... 2023-06-14\n",
+ " * id (id) object '020302' '020404' '020502' ... '024014' '024015'\n",
+ " latitude (id) float32 48.77 48.81 48.98 48.98 ... 46.05 46.2 46.18\n",
+ " longitude (id) float32 -64.52 -64.92 -64.43 ... -71.45 -72.1 -71.75\n",
+ " name (id) object 'Saint' 'York' ... 'Bécancour' 'Bourbon'\n",
+ " ... ...\n",
+ " spatial_agg <U9 'watershed'\n",
+ " start_date (id) datetime64[ns] 1989-08-12 1980-10-01 ... 2006-07-24\n",
+ " * time (time) datetime64[ns] 1970-01-01 1970-01-02 ... 2025-12-31\n",
+ " time_agg <U4 'mean'\n",
+ " timestep <U1 'D'\n",
+ " variable <U10 'streamflow'\n",
+ "Data variables:\n",
+ " streamflow (id, time) float32 nan nan nan nan nan ... nan nan nan nan
drainage_area
(id)
float32
1.09e+03 647.0 ... 2.163e+03 48.1
- long_name :
- drainage_area
- units :
- km2
array([1090. , 647. , 59.8 , 626. , 1200. , 772. , 721. ,\n",
+ " 1655. , 223. , 494. , 98.6 , 1615. , 930. , 267. ,\n",
+ " 515. , 1042. , 796. , 61. , 191. , 821. , 1152. ,\n",
+ " 708. , 5820. , 696. , 781. , 806. , 3085. , 154. ,\n",
+ " 356. , 385. , 914. , 100.87, 2330. , 25.7 , 227. ,\n",
+ " 2163. , 48.1 ], dtype=float32)
end_date
(id)
datetime64[ns]
2006-10-13 ... 2023-06-14
array(['2006-10-13T00:00:00.000000000', '2023-06-14T00:00:00.000000000',\n",
+ " '1997-06-08T00:00:00.000000000', '2023-06-14T00:00:00.000000000',\n",
+ " '1996-08-13T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '1996-08-14T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '2023-06-14T00:00:00.000000000', '1998-05-25T00:00:00.000000000',\n",
+ " '2023-06-14T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '2023-06-12T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '2023-06-10T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '2023-06-10T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '2023-06-11T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '2023-06-10T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '2023-06-10T00:00:00.000000000', '2023-06-10T00:00:00.000000000',\n",
+ " '2023-06-10T00:00:00.000000000', '2023-06-09T00:00:00.000000000',\n",
+ " '2023-06-10T00:00:00.000000000', '2023-06-09T00:00:00.000000000',\n",
+ " '2007-05-24T00:00:00.000000000', '2023-06-14T00:00:00.000000000',\n",
+ " '2023-06-12T00:00:00.000000000', '2023-06-12T00:00:00.000000000',\n",
+ " '2001-10-01T00:00:00.000000000', '1995-10-25T00:00:00.000000000',\n",
+ " '2010-11-19T00:00:00.000000000', '2023-06-11T00:00:00.000000000',\n",
+ " '2023-06-14T00:00:00.000000000'], dtype='datetime64[ns]')
id
(id)
object
'020302' '020404' ... '024015'
array(['020302', '020404', '020502', '020602', '020802', '021407', '021502',\n",
+ " '021601', '021702', '021915', '021916', '022003', '022301', '022505',\n",
+ " '022507', '022513', '022704', '023002', '023004', '023106', '023303',\n",
+ " '023401', '023402', '023422', '023427', '023428', '023429', '023432',\n",
+ " '023701', '023702', '024003', '024004', '024007', '024010', '024013',\n",
+ " '024014', '024015'], dtype=object)
latitude
(id)
float32
48.77 48.81 48.98 ... 46.2 46.18
- long_name :
- latitude
- standard_name :
- latitude
- units :
- decimal_degrees
array([48.769165, 48.806946, 48.982224, 48.97778 , 49.202778, 49.04361 ,\n",
+ " 49.055557, 48.773613, 48.766945, 48.517776, 48.419724, 48.412777,\n",
+ " 48.08917 , 47.650833, 47.611946, 47.821945, 47.38111 , 46.7075 ,\n",
+ " 46.815834, 46.82 , 46.691387, 46.656944, 46.586945, 46.166943,\n",
+ " 45.573055, 46.060276, 46.09639 , 46.54028 , 46.500557, 46.540554,\n",
+ " 46.30611 , 46.22 , 46.19472 , 46.175556, 46.045277, 46.19889 ,\n",
+ " 46.184166], dtype=float32)
longitude
(id)
float32
-64.52 -64.92 ... -72.1 -71.75
- long_name :
- longitude
- standard_name :
- longitude
- units :
- decimal_degrees
array([-64.51583 , -64.916664, -64.42694 , -64.69972 , -65.29472 ,\n",
+ " -66.47583 , -66.66917 , -67.540276, -67.666115, -68.15972 ,\n",
+ " -68.35611 , -68.555 , -69.195274, -69.51222 , -69.64472 ,\n",
+ " -69.51667 , -69.95389 , -70.960556, -70.90056 , -70.756386,\n",
+ " -71.068054, -71.28889 , -71.21361 , -70.63917 , -70.880554,\n",
+ " -70.5325 , -70.65444 , -71.34 , -72.10833 , -72.09333 ,\n",
+ " -71.450554, -71.77695 , -72.28333 , -71.45722 , -71.44722 ,\n",
+ " -72.098335, -71.754166], dtype=float32)
name
(id)
object
'Saint' 'York' ... 'Bourbon'
array(['Saint', 'York', 'Au Renard', 'Dartmouth', 'Madeleine', 'Sainte',\n",
+ " 'Cap', 'Matane', 'Blanche', 'Neigette', 'Petite rivière Neigette',\n",
+ " 'Rimouski', 'Des Trois', 'Fourchue', 'Du Loup', 'Du Loup',\n",
+ " 'Ouelle', 'Boyer Sud', 'Boyer', 'Du Sud', 'Etchemin', 'Beaurivage',\n",
+ " 'Chaudière', 'Famine', 'Chaudière', 'Du Loup', 'Chaudière',\n",
+ " "Bras d'Henri", 'Petite rivière du Chêne',\n",
+ " 'Petite rivière du Chêne', 'Bécancour', 'Bourbon', 'Bécancour',\n",
+ " 'Bullard', 'Bécancour', 'Bécancour', 'Bourbon'], dtype=object)
province
(id)
object
'QC' 'QC' 'QC' ... 'QC' 'QC' 'QC'
array(['QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC',\n",
+ " 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC',\n",
+ " 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC',\n",
+ " 'QC', 'QC', 'QC', 'QC'], dtype=object)
regulated
(id)
object
'Natural' 'Natural' ... 'Natural'
array(['Natural', 'Natural', 'Natural', 'Natural', 'Natural', 'Natural',\n",
+ " 'Natural', 'Influenced (daily)', 'Influenced (daily)', 'Natural',\n",
+ " 'Natural', 'Influenced (daily)', 'Influenced (daily)',\n",
+ " 'Influenced (monthly)', 'Natural', 'Influenced (monthly)',\n",
+ " 'Natural', 'Natural', 'Natural', 'Influenced (daily)', 'Natural',\n",
+ " 'Natural', 'Influenced (daily)', 'Natural', 'Influenced (monthly)',\n",
+ " 'Natural', 'Influenced (monthly)', 'Natural', 'Natural', 'Natural',\n",
+ " 'Natural', 'Influenced (daily)', 'Natural', 'Natural', 'Natural',\n",
+ " 'Natural', 'Natural'], dtype=object)
source
()
<U102
'Ministère de l’Environnement, d...
array('Ministère de l’Environnement, de la Lutte contre les changements climatiques, de la Faune et des Parcs',\n",
+ " dtype='<U102')
spatial_agg
()
<U9
'watershed'
array('watershed', dtype='<U9')
start_date
(id)
datetime64[ns]
1989-08-12 ... 2006-07-24
array(['1989-08-12T00:00:00.000000000', '1980-10-01T00:00:00.000000000',\n",
+ " '1977-07-10T00:00:00.000000000', '1970-10-01T00:00:00.000000000',\n",
+ " '1970-01-01T00:00:00.000000000', '1973-06-20T00:00:00.000000000',\n",
+ " '1970-01-01T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1970-01-01T00:00:00.000000000', '1974-11-07T00:00:00.000000000',\n",
+ " '2000-09-01T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1970-01-01T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1978-09-09T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1982-10-01T00:00:00.000000000', '1993-06-30T00:00:00.000000000',\n",
+ " '1996-05-29T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1980-10-01T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1970-01-01T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1976-10-19T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1970-01-01T00:00:00.000000000', '1972-06-28T00:00:00.000000000',\n",
+ " '1972-08-24T00:00:00.000000000', '2007-01-11T00:00:00.000000000',\n",
+ " '1970-01-01T00:00:00.000000000', '1970-01-01T00:00:00.000000000',\n",
+ " '1970-01-01T00:00:00.000000000', '1977-11-01T00:00:00.000000000',\n",
+ " '1979-05-12T00:00:00.000000000', '1999-11-12T00:00:00.000000000',\n",
+ " '2006-07-24T00:00:00.000000000'], dtype='datetime64[ns]')
time
(time)
datetime64[ns]
1970-01-01 ... 2025-12-31
array(['1970-01-01T00:00:00.000000000', '1970-01-02T00:00:00.000000000',\n",
+ " '1970-01-03T00:00:00.000000000', ..., '2025-12-29T00:00:00.000000000',\n",
+ " '2025-12-30T00:00:00.000000000', '2025-12-31T00:00:00.000000000'],\n",
+ " dtype='datetime64[ns]')
time_agg
()
<U4
'mean'
array('mean', dtype='<U4')
timestep
()
<U1
'D'
variable
()
<U10
'streamflow'
array('streamflow', dtype='<U10')
PandasIndex
PandasIndex(Index(['020302', '020404', '020502', '020602', '020802', '021407', '021502',\n",
+ " '021601', '021702', '021915', '021916', '022003', '022301', '022505',\n",
+ " '022507', '022513', '022704', '023002', '023004', '023106', '023303',\n",
+ " '023401', '023402', '023422', '023427', '023428', '023429', '023432',\n",
+ " '023701', '023702', '024003', '024004', '024007', '024010', '024013',\n",
+ " '024014', '024015'],\n",
+ " dtype='object', name='id'))
PandasIndex
PandasIndex(DatetimeIndex(['1970-01-01', '1970-01-02', '1970-01-03', '1970-01-04',\n",
+ " '1970-01-05', '1970-01-06', '1970-01-07', '1970-01-08',\n",
+ " '1970-01-09', '1970-01-10',\n",
+ " ...\n",
+ " '2025-12-22', '2025-12-23', '2025-12-24', '2025-12-25',\n",
+ " '2025-12-26', '2025-12-27', '2025-12-28', '2025-12-29',\n",
+ " '2025-12-30', '2025-12-31'],\n",
+ " dtype='datetime64[ns]', name='time', length=20454, freq=None))
"
+ ],
+ "text/plain": [
+ "\n",
+ "Dimensions: (id: 37, time: 20454)\n",
+ "Coordinates: (12/15)\n",
+ " drainage_area (id) float32 1.09e+03 647.0 59.8 ... 227.0 2.163e+03 48.1\n",
+ " end_date (id) datetime64[ns] 2006-10-13 2023-06-14 ... 2023-06-14\n",
+ " * id (id) object '020302' '020404' '020502' ... '024014' '024015'\n",
+ " latitude (id) float32 48.77 48.81 48.98 48.98 ... 46.05 46.2 46.18\n",
+ " longitude (id) float32 -64.52 -64.92 -64.43 ... -71.45 -72.1 -71.75\n",
+ " name (id) object 'Saint' 'York' ... 'Bécancour' 'Bourbon'\n",
+ " ... ...\n",
+ " spatial_agg \n",
+ " \n",
+ "\n",
+ ""
+ ],
+ "text/plain": [
+ "Column\n",
+ " [0] HoloViews(DynamicMap, height=300, sizing_mode='fixed', widget_location='bottom', width=700)\n",
+ " [1] WidgetBox(align=('center', 'end'))\n",
+ " [0] Select(margin=(20, 20, 20, 20), name='id', options=['020302', '020404', ...], value='020302', width=250)"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {
+ "application/vnd.holoviews_exec.v0+json": {
+ "id": "p1002"
+ }
+ },
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "(data\n",
+ " .streamflow\n",
+ " .dropna('time', 'all')\n",
+ " .hvplot(x='time',grid=True, widget_location='bottom', groupby='id')\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Customize the analysis settings\n",
+ "\n",
+ "With a collection of hydrological data now at our disposal, we can provide the `xarray.Dataset` to the `Data` object. This step allows us to fine-tune certain configurations before proceeding with the frequency analysis."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from xhydro.frequency_analysis.local import Data"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "xfa = Data(data)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### a) Define the seasons\n",
+ "We can define seasons by supplying a season's name along with a range of Julian days."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fall_start = xh.get_julian_day(month=9, day = 1)\n",
+ "fall_end = xh.get_julian_day(month=12, day=1)\n",
+ "\n",
+ "spring_start = xh.get_julian_day(month=2, day=11)\n",
+ "spring_end = xh.get_julian_day(month=6, day=19)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/home/slanglois/PycharmProjects/xhydro/xhydro/frequency_analysis/local.py:191: UserWarning: Warning, fall overlapping with Fall\n",
+ " warnings.warn(\"Warning, \" + name + \" overlapping with \" + season)\n",
+ "/home/slanglois/PycharmProjects/xhydro/xhydro/frequency_analysis/local.py:191: UserWarning: Warning, spring overlapping with Spring\n",
+ " warnings.warn(\"Warning, \" + name + \" overlapping with \" + season)\n"
+ ]
+ }
+ ],
+ "source": [
+ "xfa.season = ['fall', fall_start, fall_end]\n",
+ "xfa.season = ['spring', spring_start, spring_end]\n",
+ "xfa.season = ['annual', 1, 365]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['Fall', 'Spring', 'spring_custom', 'fall', 'spring', 'annual']"
+ ]
+ },
+ "execution_count": 25,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "xfa.get_seasons()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "If a season is no longer required, it can readily be remove like this : "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['Fall', 'Spring', 'spring_custom', 'fall', 'spring']"
+ ]
+ },
+ "execution_count": 27,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "xfa.rm_season('annual')\n",
+ "xfa.get_seasons()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "In cases where distinct catchments necessitate individualized Julian Day ranges for each year, users can explicitly define these ranges"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
<xarray.Dataset>\n",
+ "Dimensions: (year: 113, id: 1)\n",
+ "Coordinates:\n",
+ " * year (year) int64 1910 1911 1912 1913 1914 ... 2018 2019 2020 2021 2022\n",
+ " * id (id) <U6 '020302'\n",
+ "Data variables:\n",
+ " value (id, year) object [70, 139] [70, 139] ... [70, 139] [70, 139]
PandasIndex
PandasIndex(Index([1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919,\n",
+ " ...\n",
+ " 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022],\n",
+ " dtype='int64', name='year', length=113))
PandasIndex
PandasIndex(Index(['020302'], dtype='object', name='id'))
"
+ ],
+ "text/plain": [
+ "\n",
+ "Dimensions: (year: 113, id: 1)\n",
+ "Coordinates:\n",
+ " * year (year) int64 1910 1911 1912 1913 1914 ... 2018 2019 2020 2021 2022\n",
+ " * id (id) \n",
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " | \n",
+ " season | \n",
+ " year | \n",
+ " id | \n",
+ " Maxima | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 83 | \n",
+ " spring_custom | \n",
+ " 1993 | \n",
+ " 020302 | \n",
+ " 135.600006 | \n",
+ "
\n",
+ " \n",
+ " 84 | \n",
+ " spring_custom | \n",
+ " 1994 | \n",
+ " 020302 | \n",
+ " 501.000000 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ " season year id Maxima\n",
+ "83 spring_custom 1993 020302 135.600006\n",
+ "84 spring_custom 1994 020302 501.000000"
+ ]
+ },
+ "execution_count": 52,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "coords_to_drop = set(maxima1.coords )- set(maxima1.dims)\n",
+ "maxima1.drop(coords_to_drop).to_dataframe(name='Maxima').reset_index().dropna(how='any')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " season | \n",
+ " year | \n",
+ " id | \n",
+ " Maxima | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 4 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 020802 | \n",
+ " 266.000000 | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 021502 | \n",
+ " 204.000000 | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 021601 | \n",
+ " 413.000000 | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 021702 | \n",
+ " 54.700001 | \n",
+ "
\n",
+ " \n",
+ " 11 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 022003 | \n",
+ " 309.000000 | \n",
+ "
\n",
+ " \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " 1990 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 023702 | \n",
+ " 84.209999 | \n",
+ "
\n",
+ " \n",
+ " 1991 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 024003 | \n",
+ " 208.600006 | \n",
+ "
\n",
+ " \n",
+ " 1992 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 024004 | \n",
+ " 17.190001 | \n",
+ "
\n",
+ " \n",
+ " 1996 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 024014 | \n",
+ " 405.500000 | \n",
+ "
\n",
+ " \n",
+ " 1997 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 024015 | \n",
+ " 11.680000 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
1302 rows × 4 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " season year id Maxima\n",
+ "4 Spring 1970 020802 266.000000\n",
+ "6 Spring 1970 021502 204.000000\n",
+ "7 Spring 1970 021601 413.000000\n",
+ "8 Spring 1970 021702 54.700001\n",
+ "11 Spring 1970 022003 309.000000\n",
+ "... ... ... ... ...\n",
+ "1990 Spring 2023 023702 84.209999\n",
+ "1991 Spring 2023 024003 208.600006\n",
+ "1992 Spring 2023 024004 17.190001\n",
+ "1996 Spring 2023 024014 405.500000\n",
+ "1997 Spring 2023 024015 11.680000\n",
+ "\n",
+ "[1302 rows x 4 columns]"
+ ]
+ },
+ "execution_count": 53,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "coords_to_drop = set(maxima2.coords )- set(maxima2.dims)\n",
+ "maxima2.drop(coords_to_drop).to_dataframe(name='Maxima').reset_index().dropna(how='any')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "On calcul les volume de crues à dates fixes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " end_date | \n",
+ " start_date | \n",
+ " volume | \n",
+ "
\n",
+ " \n",
+ " year | \n",
+ " id | \n",
+ " variable | \n",
+ " spatial_agg | \n",
+ " timestep | \n",
+ " time_agg | \n",
+ " source | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 1970 | \n",
+ " 020302 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 1970-02-05 | \n",
+ " 1970-02-04 | \n",
+ " NaN | \n",
+ "
\n",
+ " \n",
+ " 020404 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 1970-02-05 | \n",
+ " 1970-02-04 | \n",
+ " NaN | \n",
+ "
\n",
+ " \n",
+ " 020502 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 1970-02-05 | \n",
+ " 1970-02-04 | \n",
+ " NaN | \n",
+ "
\n",
+ " \n",
+ " 020602 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 1970-02-05 | \n",
+ " 1970-02-04 | \n",
+ " NaN | \n",
+ "
\n",
+ " \n",
+ " 020802 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 1970-02-05 | \n",
+ " 1970-02-04 | \n",
+ " 6.851520 | \n",
+ "
\n",
+ " \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " 2023 | \n",
+ " 024007 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 2023-02-05 | \n",
+ " 2023-02-04 | \n",
+ " NaN | \n",
+ "
\n",
+ " \n",
+ " 024010 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 2023-02-05 | \n",
+ " 2023-02-04 | \n",
+ " NaN | \n",
+ "
\n",
+ " \n",
+ " 024013 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 2023-02-05 | \n",
+ " 2023-02-04 | \n",
+ " NaN | \n",
+ "
\n",
+ " \n",
+ " 024014 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 2023-02-05 | \n",
+ " 2023-02-04 | \n",
+ " 2.327616 | \n",
+ "
\n",
+ " \n",
+ " 024015 | \n",
+ " streamflow | \n",
+ " watershed | \n",
+ " 0 | \n",
+ " mean | \n",
+ " 0 | \n",
+ " 2023-02-05 | \n",
+ " 2023-02-04 | \n",
+ " 0.172256 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
1998 rows × 3 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " end_date \\\n",
+ "year id variable spatial_agg timestep time_agg source \n",
+ "1970 020302 streamflow watershed 0 mean 0 1970-02-05 \n",
+ " 020404 streamflow watershed 0 mean 0 1970-02-05 \n",
+ " 020502 streamflow watershed 0 mean 0 1970-02-05 \n",
+ " 020602 streamflow watershed 0 mean 0 1970-02-05 \n",
+ " 020802 streamflow watershed 0 mean 0 1970-02-05 \n",
+ "... ... \n",
+ "2023 024007 streamflow watershed 0 mean 0 2023-02-05 \n",
+ " 024010 streamflow watershed 0 mean 0 2023-02-05 \n",
+ " 024013 streamflow watershed 0 mean 0 2023-02-05 \n",
+ " 024014 streamflow watershed 0 mean 0 2023-02-05 \n",
+ " 024015 streamflow watershed 0 mean 0 2023-02-05 \n",
+ "\n",
+ " start_date \\\n",
+ "year id variable spatial_agg timestep time_agg source \n",
+ "1970 020302 streamflow watershed 0 mean 0 1970-02-04 \n",
+ " 020404 streamflow watershed 0 mean 0 1970-02-04 \n",
+ " 020502 streamflow watershed 0 mean 0 1970-02-04 \n",
+ " 020602 streamflow watershed 0 mean 0 1970-02-04 \n",
+ " 020802 streamflow watershed 0 mean 0 1970-02-04 \n",
+ "... ... \n",
+ "2023 024007 streamflow watershed 0 mean 0 2023-02-04 \n",
+ " 024010 streamflow watershed 0 mean 0 2023-02-04 \n",
+ " 024013 streamflow watershed 0 mean 0 2023-02-04 \n",
+ " 024014 streamflow watershed 0 mean 0 2023-02-04 \n",
+ " 024015 streamflow watershed 0 mean 0 2023-02-04 \n",
+ "\n",
+ " volume \n",
+ "year id variable spatial_agg timestep time_agg source \n",
+ "1970 020302 streamflow watershed 0 mean 0 NaN \n",
+ " 020404 streamflow watershed 0 mean 0 NaN \n",
+ " 020502 streamflow watershed 0 mean 0 NaN \n",
+ " 020602 streamflow watershed 0 mean 0 NaN \n",
+ " 020802 streamflow watershed 0 mean 0 6.851520 \n",
+ "... ... \n",
+ "2023 024007 streamflow watershed 0 mean 0 NaN \n",
+ " 024010 streamflow watershed 0 mean 0 NaN \n",
+ " 024013 streamflow watershed 0 mean 0 NaN \n",
+ " 024014 streamflow watershed 0 mean 0 2.327616 \n",
+ " 024015 streamflow watershed 0 mean 0 0.172256 \n",
+ "\n",
+ "[1998 rows x 3 columns]"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "vol = xfa.calculate_volume(dates=[35, 36])\n",
+ "vol.volume.to_dataframe()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "On calcul les volumes de crues avec un DataSet"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " | \n",
+ " units | \n",
+ " start_date | \n",
+ " end_date | \n",
+ " volume | \n",
+ "
\n",
+ " \n",
+ " year | \n",
+ " id | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ " | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 1993 | \n",
+ " 020302 | \n",
+ " hm³ | \n",
+ " 1993-03-11 | \n",
+ " 1993-05-19 | \n",
+ " 22411.681368 | \n",
+ "
\n",
+ " \n",
+ " 1994 | \n",
+ " 020302 | \n",
+ " hm³ | \n",
+ " 1994-03-11 | \n",
+ " 1994-05-19 | \n",
+ " 49109.812832 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " units start_date end_date volume\n",
+ "year id \n",
+ "1993 020302 hm³ 1993-03-11 1993-05-19 22411.681368\n",
+ "1994 020302 hm³ 1994-03-11 1994-05-19 49109.812832"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "sub_set_example = xfa.select_catchments(['020302'])\n",
+ "vol = sub_set_example.calculate_volume(dates=dates_ds)\n",
+ "vol.to_dataframe().dropna()\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " year | \n",
+ " id | \n",
+ " units | \n",
+ " start_date | \n",
+ " end_date | \n",
+ " volume | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1993 | \n",
+ " 020302 | \n",
+ " hm³ | \n",
+ " 1993-03-11 | \n",
+ " 1993-05-19 | \n",
+ " 22411.681368 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 1994 | \n",
+ " 020302 | \n",
+ " hm³ | \n",
+ " 1994-03-11 | \n",
+ " 1994-05-19 | \n",
+ " 49109.812832 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " year id units start_date end_date volume\n",
+ "0 1993 020302 hm³ 1993-03-11 1993-05-19 22411.681368\n",
+ "1 1994 020302 hm³ 1994-03-11 1994-05-19 49109.812832"
+ ]
+ },
+ "execution_count": 14,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "vol.to_dataframe().dropna().reset_index()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "Use get_maximum to get the maximums per season for selected catcment, if no period selected, anual maxmaximum will be fectch"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " season | \n",
+ " year | \n",
+ " start_date | \n",
+ " end_date | \n",
+ " streamflow | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 3 | \n",
+ " 020602 | \n",
+ " Whole year | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 37.900002 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 020802 | \n",
+ " Whole year | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 266.000000 | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 021502 | \n",
+ " Whole year | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 204.000000 | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 021601 | \n",
+ " Whole year | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 413.000000 | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 021702 | \n",
+ " Whole year | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 54.700001 | \n",
+ "
\n",
+ " \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " 1990 | \n",
+ " 023702 | \n",
+ " Whole year | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 84.209999 | \n",
+ "
\n",
+ " \n",
+ " 1991 | \n",
+ " 024003 | \n",
+ " Whole year | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 208.600006 | \n",
+ "
\n",
+ " \n",
+ " 1992 | \n",
+ " 024004 | \n",
+ " Whole year | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 17.190001 | \n",
+ "
\n",
+ " \n",
+ " 1996 | \n",
+ " 024014 | \n",
+ " Whole year | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 405.500000 | \n",
+ "
\n",
+ " \n",
+ " 1997 | \n",
+ " 024015 | \n",
+ " Whole year | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 11.680000 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
1412 rows × 6 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id season year start_date end_date streamflow\n",
+ "3 020602 Whole year 1970 02-11 06-19 37.900002\n",
+ "4 020802 Whole year 1970 02-11 06-19 266.000000\n",
+ "6 021502 Whole year 1970 02-11 06-19 204.000000\n",
+ "7 021601 Whole year 1970 02-11 06-19 413.000000\n",
+ "8 021702 Whole year 1970 02-11 06-19 54.700001\n",
+ "... ... ... ... ... ... ...\n",
+ "1990 023702 Whole year 2023 02-11 06-19 84.209999\n",
+ "1991 024003 Whole year 2023 02-11 06-19 208.600006\n",
+ "1992 024004 Whole year 2023 02-11 06-19 17.190001\n",
+ "1996 024014 Whole year 2023 02-11 06-19 405.500000\n",
+ "1997 024015 Whole year 2023 02-11 06-19 11.680000\n",
+ "\n",
+ "[1412 rows x 6 columns]"
+ ]
+ },
+ "execution_count": 15,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "xfa.get_maximum(tolerence=.85)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " season | \n",
+ " year | \n",
+ " start_date | \n",
+ " end_date | \n",
+ " streamflow | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 4 | \n",
+ " 020802 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 266.000000 | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 021502 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 204.000000 | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 021601 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 413.000000 | \n",
+ "
\n",
+ " \n",
+ " 8 | \n",
+ " 021702 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 54.700001 | \n",
+ "
\n",
+ " \n",
+ " 11 | \n",
+ " 022003 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 309.000000 | \n",
+ "
\n",
+ " \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " 1990 | \n",
+ " 023702 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 84.209999 | \n",
+ "
\n",
+ " \n",
+ " 1991 | \n",
+ " 024003 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 208.600006 | \n",
+ "
\n",
+ " \n",
+ " 1992 | \n",
+ " 024004 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 17.190001 | \n",
+ "
\n",
+ " \n",
+ " 1996 | \n",
+ " 024014 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 405.500000 | \n",
+ "
\n",
+ " \n",
+ " 1997 | \n",
+ " 024015 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 11.680000 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
1302 rows × 6 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id season year start_date end_date streamflow\n",
+ "4 020802 Spring 1970 02-11 06-19 266.000000\n",
+ "6 021502 Spring 1970 02-11 06-19 204.000000\n",
+ "7 021601 Spring 1970 02-11 06-19 413.000000\n",
+ "8 021702 Spring 1970 02-11 06-19 54.700001\n",
+ "11 022003 Spring 1970 02-11 06-19 309.000000\n",
+ "... ... ... ... ... ... ...\n",
+ "1990 023702 Spring 2023 02-11 06-19 84.209999\n",
+ "1991 024003 Spring 2023 02-11 06-19 208.600006\n",
+ "1992 024004 Spring 2023 02-11 06-19 17.190001\n",
+ "1996 024014 Spring 2023 02-11 06-19 405.500000\n",
+ "1997 024015 Spring 2023 02-11 06-19 11.680000\n",
+ "\n",
+ "[1302 rows x 6 columns]"
+ ]
+ },
+ "execution_count": 16,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "xfa.get_maximum(tolerence=0.15, seasons=['Spring'])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# CLass Local()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "\n",
+ "Init local with a data Ds"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 57,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from xhydro.frequency_analysis.local import Local"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 58,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "return_period = np.array([2, 5, 10, 20, 50, 100, 200, 1000, 2000, 10000])\n",
+ "dist_list = ['expon', 'gamma', 'genextreme', 'gennorm', 'gumbel_r', 'pearson3', 'weibull_min']\n",
+ "\n",
+ "fa = Local(data_ds=xfa,\n",
+ " return_period=return_period,\n",
+ " dist_list=dist_list,\n",
+ " tolerence=0.15,\n",
+ " seasons=['Spring'],\n",
+ " min_year=15,\n",
+ " vars_of_interest=['max'])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 59,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
<xarray.Dataset>\n",
+ "Dimensions: (id: 33, season: 1, scipy_dist: 7,\n",
+ " return_period: 10, time: 54, dparams: 6)\n",
+ "Coordinates: (12/19)\n",
+ " * id (id) object '020404' '020502' ... '024014' '024015'\n",
+ " * season (season) <U6 'Spring'\n",
+ " * scipy_dist (scipy_dist) <U11 'expon' 'gamma' ... 'weibull_min'\n",
+ " drainage_area (id) float32 647.0 59.8 626.0 ... 2.163e+03 48.1\n",
+ " end_date <U5 '06-19'\n",
+ " latitude (id) float32 48.81 48.98 48.98 ... 46.05 46.2 46.18\n",
+ " ... ...\n",
+ " time_agg <U4 'mean'\n",
+ " timestep <U1 'D'\n",
+ " variable <U10 'streamflow'\n",
+ " * return_period (return_period) float64 2.0 5.0 10.0 ... 2e+03 1e+04\n",
+ " * time (time) int64 1970 1971 1972 1973 ... 2021 2022 2023\n",
+ " * dparams (dparams) <U5 'a' 'beta' 'c' 'loc' 'scale' 'skew'\n",
+ "Data variables:\n",
+ " value_criterions (season, id, scipy_dist) object {'aic': 473.117809...\n",
+ " streamflow_quantiles (scipy_dist, season, return_period, id) float64 11...\n",
+ " streamflow (season, time, id) float32 nan nan ... 405.5 11.68\n",
+ " streamflow_parameters (scipy_dist, season, dparams, id) float64 nan ... nan
- id: 33
- season: 1
- scipy_dist: 7
- return_period: 10
- time: 54
- dparams: 6
id
(id)
object
'020404' '020502' ... '024015'
array(['020404', '020502', '020602', '020802', '021407', '021502', '021601',\n",
+ " '021702', '021915', '021916', '022003', '022301', '022505', '022507',\n",
+ " '022513', '022704', '023002', '023004', '023106', '023303', '023401',\n",
+ " '023402', '023422', '023427', '023429', '023432', '023701', '023702',\n",
+ " '024003', '024007', '024013', '024014', '024015'], dtype=object)
season
(season)
<U6
'Spring'
array(['Spring'], dtype='<U6')
scipy_dist
(scipy_dist)
<U11
'expon' 'gamma' ... 'weibull_min'
array(['expon', 'gamma', 'genextreme', 'gennorm', 'gumbel_r', 'pearson3',\n",
+ " 'weibull_min'], dtype='<U11')
drainage_area
(id)
float32
647.0 59.8 626.0 ... 2.163e+03 48.1
array([ 647. , 59.8, 626. , 1200. , 772. , 721. , 1655. , 223. ,\n",
+ " 494. , 98.6, 1615. , 930. , 267. , 515. , 1042. , 796. ,\n",
+ " 61. , 191. , 821. , 1152. , 708. , 5820. , 696. , 781. ,\n",
+ " 3085. , 154. , 356. , 385. , 914. , 2330. , 227. , 2163. ,\n",
+ " 48.1], dtype=float32)
end_date
()
<U5
'06-19'
array('06-19', dtype='<U5')
latitude
(id)
float32
48.81 48.98 48.98 ... 46.2 46.18
array([48.806946, 48.982224, 48.97778 , 49.202778, 49.04361 , 49.055557,\n",
+ " 48.773613, 48.766945, 48.517776, 48.419724, 48.412777, 48.08917 ,\n",
+ " 47.650833, 47.611946, 47.821945, 47.38111 , 46.7075 , 46.815834,\n",
+ " 46.82 , 46.691387, 46.656944, 46.586945, 46.166943, 45.573055,\n",
+ " 46.09639 , 46.54028 , 46.500557, 46.540554, 46.30611 , 46.19472 ,\n",
+ " 46.045277, 46.19889 , 46.184166], dtype=float32)
longitude
(id)
float32
-64.92 -64.43 ... -72.1 -71.75
array([-64.916664, -64.42694 , -64.69972 , -65.29472 , -66.47583 ,\n",
+ " -66.66917 , -67.540276, -67.666115, -68.15972 , -68.35611 ,\n",
+ " -68.555 , -69.195274, -69.51222 , -69.64472 , -69.51667 ,\n",
+ " -69.95389 , -70.960556, -70.90056 , -70.756386, -71.068054,\n",
+ " -71.28889 , -71.21361 , -70.63917 , -70.880554, -70.65444 ,\n",
+ " -71.34 , -72.10833 , -72.09333 , -71.450554, -72.28333 ,\n",
+ " -71.44722 , -72.098335, -71.754166], dtype=float32)
name
(id)
object
'York' 'Au Renard' ... 'Bourbon'
array(['York', 'Au Renard', 'Dartmouth', 'Madeleine', 'Sainte', 'Cap',\n",
+ " 'Matane', 'Blanche', 'Neigette', 'Petite rivière Neigette',\n",
+ " 'Rimouski', 'Des Trois', 'Fourchue', 'Du Loup', 'Du Loup',\n",
+ " 'Ouelle', 'Boyer Sud', 'Boyer', 'Du Sud', 'Etchemin', 'Beaurivage',\n",
+ " 'Chaudière', 'Famine', 'Chaudière', 'Chaudière', "Bras d'Henri",\n",
+ " 'Petite rivière du Chêne', 'Petite rivière du Chêne', 'Bécancour',\n",
+ " 'Bécancour', 'Bécancour', 'Bécancour', 'Bourbon'], dtype=object)
province
(id)
object
'QC' 'QC' 'QC' ... 'QC' 'QC' 'QC'
array(['QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC',\n",
+ " 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC',\n",
+ " 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC', 'QC'],\n",
+ " dtype=object)
regulated
(id)
object
'Natural' 'Natural' ... 'Natural'
array(['Natural', 'Natural', 'Natural', 'Natural', 'Natural', 'Natural',\n",
+ " 'Influenced (daily)', 'Influenced (daily)', 'Natural', 'Natural',\n",
+ " 'Influenced (daily)', 'Influenced (daily)', 'Influenced (monthly)',\n",
+ " 'Natural', 'Influenced (monthly)', 'Natural', 'Natural', 'Natural',\n",
+ " 'Influenced (daily)', 'Natural', 'Natural', 'Influenced (daily)',\n",
+ " 'Natural', 'Influenced (monthly)', 'Influenced (monthly)',\n",
+ " 'Natural', 'Natural', 'Natural', 'Natural', 'Natural', 'Natural',\n",
+ " 'Natural', 'Natural'], dtype=object)
source
()
<U102
'Ministère de l’Environnement, d...
array('Ministère de l’Environnement, de la Lutte contre les changements climatiques, de la Faune et des Parcs',\n",
+ " dtype='<U102')
spatial_agg
()
<U9
'watershed'
array('watershed', dtype='<U9')
start_date
()
<U5
'02-11'
array('02-11', dtype='<U5')
time_agg
()
<U4
'mean'
array('mean', dtype='<U4')
timestep
()
<U1
'D'
variable
()
<U10
'streamflow'
array('streamflow', dtype='<U10')
return_period
(return_period)
float64
2.0 5.0 10.0 ... 1e+03 2e+03 1e+04
array([2.e+00, 5.e+00, 1.e+01, 2.e+01, 5.e+01, 1.e+02, 2.e+02, 1.e+03, 2.e+03,\n",
+ " 1.e+04])
time
(time)
int64
1970 1971 1972 ... 2021 2022 2023
array([1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981,\n",
+ " 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993,\n",
+ " 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,\n",
+ " 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017,\n",
+ " 2018, 2019, 2020, 2021, 2022, 2023])
dparams
(dparams)
<U5
'a' 'beta' 'c' 'loc' 'scale' 'skew'
array(['a', 'beta', 'c', 'loc', 'scale', 'skew'], dtype='<U5')
value_criterions
(season, id, scipy_dist)
object
{'aic': 473.11780986942864, 'bic...
array([[[{'aic': 473.11780986942864, 'bic': 476.64021010081575, 'aicc': 473.41780986942865},\n",
+ " {'aic': 458.2985841195104, 'bic': 463.58218446659106, 'aicc': 458.91396873489504},\n",
+ " {'aic': 457.5956462573534, 'bic': 462.87924660443406, 'aicc': 458.21103087273804},\n",
+ " {'aic': 458.6848317031879, 'bic': 463.9684320502686, 'aicc': 459.30021631857255},\n",
+ " {'aic': 455.63163990069773, 'bic': 459.15404013208484, 'aicc': 455.93163990069775},\n",
+ " {'aic': 458.2985784132112, 'bic': 463.58217876029187, 'aicc': 458.91396302859584},\n",
+ " {'aic': 459.5333847180589, 'bic': 464.8169850651396, 'aicc': 460.14876933344357}],\n",
+ " [{'aic': 137.48047244218606, 'bic': 139.26121595797838, 'aicc': 138.28047244218607},\n",
+ " {'aic': 137.75356631454824, 'bic': 140.42468158823672, 'aicc': 139.46785202883396},\n",
+ " {'aic': 136.8654215598545, 'bic': 139.53653683354298, 'aicc': 138.57970727414022},\n",
+ " {'aic': 134.53673193984034, 'bic': 137.20784721352882, 'aicc': 136.25101765412606},\n",
+ " {'aic': 135.50196162177616, 'bic': 137.28270513756848, 'aicc': 136.30196162177617},\n",
+ " {'aic': 137.75356403379647, 'bic': 140.42467930748495, 'aicc': 139.4678497480822},\n",
+ " {'aic': 138.02489617831452, 'bic': 140.696011452003, 'aicc': 139.73918189260024}],\n",
+ " [{'aic': 624.3630092927904, 'bic': 628.3035931198946, 'aicc': 624.6030092927904},\n",
+ " {'aic': 586.9471403610117, 'bic': 592.8580161016681, 'aicc': 587.4369362793791},\n",
+ " {'aic': 586.6564878404336, 'bic': 592.5673635810899, 'aicc': 587.1462837588009},\n",
+ " {'aic': 589.6958604474956, 'bic': 595.606736188152, 'aicc': 590.185656365863},\n",
+ " {'aic': 585.7242141775773, 'bic': 589.6647980046815, 'aicc': 585.9642141775773},\n",
+ " {'aic': inf, 'bic': inf, 'aicc': inf},\n",
+ "...\n",
+ " {'aic': 202.47393852687838, 'bic': 205.88042117466583, 'aicc': 203.73709642161523},\n",
+ " {'aic': 193.80010833894926, 'bic': 197.2065909867367, 'aicc': 195.0632662336861},\n",
+ " {'aic': 200.57031958968292, 'bic': 202.84130802154124, 'aicc': 201.17031958968292},\n",
+ " {'aic': 182.86976493393976, 'bic': 186.2762475817272, 'aicc': 184.1329228286766},\n",
+ " {'aic': 192.4003234937207, 'bic': 195.80680614150813, 'aicc': 193.66348138845754}],\n",
+ " [{'aic': 309.1924956653078, 'bic': 311.5486033260037, 'aicc': 309.76392423673633},\n",
+ " {'aic': 307.20391074649456, 'bic': 310.7380722375384, 'aicc': 308.40391074649455},\n",
+ " {'aic': 307.600772792574, 'bic': 311.13493428361784, 'aicc': 308.800772792574},\n",
+ " {'aic': 309.834483455967, 'bic': 313.36864494701086, 'aicc': 311.034483455967},\n",
+ " {'aic': 305.6362220893123, 'bic': 307.9923297500082, 'aicc': 306.20765066074085},\n",
+ " {'aic': 307.2039064774573, 'bic': 310.7380679685011, 'aicc': 308.40390647745727},\n",
+ " {'aic': 306.66172427226934, 'bic': 310.1958857633132, 'aicc': 307.8617242722693}],\n",
+ " [{'aic': 99.08375778278946, 'bic': 100.7501844709019, 'aicc': 99.94090063993232},\n",
+ " {'aic': 98.00805512555614, 'bic': 100.50769515772478, 'aicc': 99.85420897170998},\n",
+ " {'aic': 102.98921985069077, 'bic': 105.48885988285943, 'aicc': 104.83537369684461},\n",
+ " {'aic': 107.10226730923392, 'bic': 109.60190734140258, 'aicc': 108.94842115538776},\n",
+ " {'aic': 101.55648839938996, 'bic': 103.2229150875024, 'aicc': 102.41363125653282},\n",
+ " {'aic': 95.20549363483296, 'bic': 97.7051336670016, 'aicc': 97.0516474809868},\n",
+ " {'aic': 101.01183131846669, 'bic': 103.51147135063533, 'aicc': 102.85798516462053}]]],\n",
+ " dtype=object)
streamflow_quantiles
(scipy_dist, season, return_period, id)
float64
110.2 18.59 ... 1.198e+03 52.72
- long_name :
- expon quantiles
- description :
- Quantiles estimated by the expon distribution
- method :
- ML
- estimator :
- Maximum likelihood
- scipy_dist :
- expon
- units :
- history :
- [2023-08-25 19:18:07] fit: Estimate distribution parameters by maximum likelihood method along dimension time. - xclim version: 0.44.0\n",
+ "[2023-08-25 19:18:07] parametric_quantile: Compute parametric quantiles from distribution parameters - xclim version: 0.44.0
- cell_methods :
- dparams: ppf
array([[[[ 110.21113366, 18.59451115, 153.73287271, ...,\n",
+ " 51.89495989, 400.4811277 , 12.73898431],\n",
+ " [ 189.05242425, 32.33530772, 271.07100632, ...,\n",
+ " 72.64256731, 595.0439914 , 18.26330051],\n",
+ " [ 248.69355822, 42.72981906, 359.83387781, ...,\n",
+ " 88.33752644, 742.22511605, 22.4422844 ],\n",
+ " ...,\n",
+ " [ 644.94067526, 111.78945757, 949.561631 , ...,\n",
+ " 192.61257781, 1720.07534204, 50.20685236],\n",
+ " [ 704.58180922, 122.18396891, 1038.32450249, ...,\n",
+ " 208.30753694, 1867.25646668, 54.38583625],\n",
+ " [ 843.06423378, 146.31927682, 1244.42550759, ...,\n",
+ " 244.75010349, 2209.00045503, 64.08913634]]],\n",
+ "\n",
+ "\n",
+ " [[[ 128.7783143 , 20.92413964, 187.28919282, ...,\n",
+ " 46.28603169, 440.91659175, 11.13242962],\n",
+ " [ 175.10263601, 31.0907859 , 240.90671671, ...,\n",
+ " 70.48110046, 572.22471177, 15.7136441 ],\n",
+ " [ 204.13713176, 37.8489556 , 272.34180883, ...,\n",
+ "...\n",
+ " 221.87596511, 1122.42881603, 50.21448017],\n",
+ " [ 382.56745993, 82.68033148, 469.9186434 , ...,\n",
+ " 241.52873181, 1186.62666263, 54.50586709],\n",
+ " [ 430.89964241, 95.38453665, 523.27680773, ...,\n",
+ " 287.30385074, 1332.70781459, 64.48551237]]],\n",
+ "\n",
+ "\n",
+ " [[[ 129.45070937, 20.66122563, 188.4162542 , ...,\n",
+ " 47.88623494, 442.15724326, 13.11895674],\n",
+ " [ 178.32464317, 31.64565212, 245.10679678, ...,\n",
+ " 68.10287468, 576.10175634, 18.14281077],\n",
+ " [ 206.96509585, 38.78137703, 275.86986073, ...,\n",
+ " 85.09474758, 656.90593121, 21.68382551],\n",
+ " ...,\n",
+ " [ 339.54567013, 76.85579449, 404.37233237, ...,\n",
+ " 217.36464342, 1046.03235919, 42.90284809],\n",
+ " [ 355.30303669, 81.84008197, 418.56721529, ...,\n",
+ " 239.2403129 , 1093.56479399, 45.8977128 ],\n",
+ " [ 389.5527219 , 92.96238633, 448.83075319, ...,\n",
+ " 291.48310284, 1197.64266376, 52.72355079]]]])
streamflow
(season, time, id)
float32
nan nan nan ... nan 405.5 11.68
array([[[ nan, nan, nan, ..., nan, nan, nan],\n",
+ " [ nan, nan, 241. , ..., nan, nan, nan],\n",
+ " [ nan, nan, 223. , ..., nan, nan, nan],\n",
+ " ...,\n",
+ " [ 69.66, nan, 131.3 , ..., nan, 470.6 , 8.56],\n",
+ " [136. , nan, 195.4 , ..., nan, 325.2 , 12.75],\n",
+ " [ 80.52, nan, 250.4 , ..., nan, 405.5 , 11.68]]],\n",
+ " dtype=float32)
streamflow_parameters
(scipy_dist, season, dparams, id)
float64
nan nan nan nan ... nan nan nan nan
- method :
- ML
- estimator :
- Maximum likelihood
- units :
array([[[[ nan, nan, nan, ...,\n",
+ " nan, nan, nan],\n",
+ " [ nan, nan, nan, ...,\n",
+ " nan, nan, nan],\n",
+ " [ nan, nan, nan, ...,\n",
+ " nan, nan, nan],\n",
+ " [ 5.05699997e+01, 8.19999981e+00, 6.49700012e+01, ...,\n",
+ " 3.62000008e+01, 2.53300003e+02, 8.56000042e+00],\n",
+ " [ 8.60439682e+01, 1.49961100e+01, 1.28057755e+02, ...,\n",
+ " 2.26430397e+01, 2.12337479e+02, 6.02899933e+00],\n",
+ " [ nan, nan, nan, ...,\n",
+ " nan, nan, nan]]],\n",
+ "\n",
+ "\n",
+ " [[[ 4.44675107e+00, 2.43446858e+00, 1.18928192e+01, ...,\n",
+ " 5.38363498e-01, 3.54418700e+00, 7.17556632e-01],\n",
+ " [ nan, nan, nan, ...,\n",
+ " nan, nan, nan],\n",
+ " [ nan, nan, nan, ...,\n",
+ " nan, nan, nan],\n",
+ "...\n",
+ " [ 1.36616121e+02, 2.31973493e+01, 1.94996604e+02, ...,\n",
+ " 5.97643853e+01, 4.65624593e+02, 1.42167225e+01],\n",
+ " [ 5.03876330e+01, 1.09402406e+01, 5.71821538e+01, ...,\n",
+ " 2.61676491e+01, 1.42163323e+02, 5.95445934e+00],\n",
+ " [ 9.48882989e-01, 1.28173480e+00, 9.05252787e-01, ...,\n",
+ " 2.22094909e+00, 1.06243389e+00, 2.10526847e+00]]],\n",
+ "\n",
+ "\n",
+ " [[[ nan, nan, nan, ...,\n",
+ " nan, nan, nan],\n",
+ " [ nan, nan, nan, ...,\n",
+ " nan, nan, nan],\n",
+ " [ 1.82950969e+00, 1.37451012e+00, 2.41899296e+00, ...,\n",
+ " 8.38804235e-01, 1.67425667e+00, 1.14582866e+00],\n",
+ " [ 4.58751127e+01, 7.67302614e+00, 5.23295870e+01, ...,\n",
+ " 3.61999966e+01, 2.37323643e+02, 8.49239163e+00],\n",
+ " [ 1.02113560e+02, 1.69571753e+01, 1.58349816e+02, ...,\n",
+ " 1.80899918e+01, 2.54960537e+02, 6.37052474e+00],\n",
+ " [ nan, nan, nan, ...,\n",
+ " nan, nan, nan]]]])
PandasIndex
PandasIndex(Index(['020404', '020502', '020602', '020802', '021407', '021502', '021601',\n",
+ " '021702', '021915', '021916', '022003', '022301', '022505', '022507',\n",
+ " '022513', '022704', '023002', '023004', '023106', '023303', '023401',\n",
+ " '023402', '023422', '023427', '023429', '023432', '023701', '023702',\n",
+ " '024003', '024007', '024013', '024014', '024015'],\n",
+ " dtype='object', name='id'))
PandasIndex
PandasIndex(Index(['Spring'], dtype='object', name='season'))
PandasIndex
PandasIndex(Index(['expon', 'gamma', 'genextreme', 'gennorm', 'gumbel_r', 'pearson3',\n",
+ " 'weibull_min'],\n",
+ " dtype='object', name='scipy_dist'))
PandasIndex
PandasIndex(Index([ 2.0, 5.000000000000001, 10.000000000000002,\n",
+ " 19.999999999999982, 49.99999999999996, 99.99999999999991,\n",
+ " 199.99999999999983, 999.9999999999991, 2000.0000000002203,\n",
+ " 10000.0000000011],\n",
+ " dtype='float64', name='return_period'))
PandasIndex
PandasIndex(Index([1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981,\n",
+ " 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993,\n",
+ " 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,\n",
+ " 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017,\n",
+ " 2018, 2019, 2020, 2021, 2022, 2023],\n",
+ " dtype='int64', name='time'))
PandasIndex
PandasIndex(Index(['a', 'beta', 'c', 'loc', 'scale', 'skew'], dtype='object', name='dparams'))
"
+ ],
+ "text/plain": [
+ "\n",
+ "Dimensions: (id: 33, season: 1, scipy_dist: 7,\n",
+ " return_period: 10, time: 54, dparams: 6)\n",
+ "Coordinates: (12/19)\n",
+ " * id (id) object '020404' '020502' ... '024014' '024015'\n",
+ " * season (season) \n",
+ "\n",
+ "\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " season | \n",
+ " time | \n",
+ " start_date | \n",
+ " end_date | \n",
+ " streamflow | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 3 | \n",
+ " 020802 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 266.000000 | \n",
+ "
\n",
+ " \n",
+ " 5 | \n",
+ " 021502 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 204.000000 | \n",
+ "
\n",
+ " \n",
+ " 6 | \n",
+ " 021601 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 413.000000 | \n",
+ "
\n",
+ " \n",
+ " 7 | \n",
+ " 021702 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 54.700001 | \n",
+ "
\n",
+ " \n",
+ " 10 | \n",
+ " 022003 | \n",
+ " Spring | \n",
+ " 1970 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 309.000000 | \n",
+ "
\n",
+ " \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " 1774 | \n",
+ " 023432 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 66.580002 | \n",
+ "
\n",
+ " \n",
+ " 1776 | \n",
+ " 023702 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 84.209999 | \n",
+ "
\n",
+ " \n",
+ " 1777 | \n",
+ " 024003 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 208.600006 | \n",
+ "
\n",
+ " \n",
+ " 1780 | \n",
+ " 024014 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 405.500000 | \n",
+ "
\n",
+ " \n",
+ " 1781 | \n",
+ " 024015 | \n",
+ " Spring | \n",
+ " 2023 | \n",
+ " 02-11 | \n",
+ " 06-19 | \n",
+ " 11.680000 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "1287 rows × 6 columns
\n",
+ ""
+ ],
+ "text/plain": [
+ " id season time start_date end_date streamflow\n",
+ "3 020802 Spring 1970 02-11 06-19 266.000000\n",
+ "5 021502 Spring 1970 02-11 06-19 204.000000\n",
+ "6 021601 Spring 1970 02-11 06-19 413.000000\n",
+ "7 021702 Spring 1970 02-11 06-19 54.700001\n",
+ "10 022003 Spring 1970 02-11 06-19 309.000000\n",
+ "... ... ... ... ... ... ...\n",
+ "1774 023432 Spring 2023 02-11 06-19 66.580002\n",
+ "1776 023702 Spring 2023 02-11 06-19 84.209999\n",
+ "1777 024003 Spring 2023 02-11 06-19 208.600006\n",
+ "1780 024014 Spring 2023 02-11 06-19 405.500000\n",
+ "1781 024015 Spring 2023 02-11 06-19 11.680000\n",
+ "\n",
+ "[1287 rows x 6 columns]"
+ ]
+ },
+ "execution_count": 19,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fa.view_values('max')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " season | \n",
+ " scipy_dist | \n",
+ " value_criterions | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 020404 | \n",
+ " Spring | \n",
+ " expon | \n",
+ " {'aic': 473.11780986942864, 'bic': 476.6402101... | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 020404 | \n",
+ " Spring | \n",
+ " gamma | \n",
+ " {'aic': 458.2985841195104, 'bic': 463.58218446... | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 020404 | \n",
+ " Spring | \n",
+ " genextreme | \n",
+ " {'aic': 457.5956462573534, 'bic': 462.87924660... | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 020404 | \n",
+ " Spring | \n",
+ " gennorm | \n",
+ " {'aic': 458.6848317031879, 'bic': 463.96843205... | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 020404 | \n",
+ " Spring | \n",
+ " gumbel_r | \n",
+ " {'aic': 455.63163990069773, 'bic': 459.1540401... | \n",
+ "
\n",
+ " \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " 226 | \n",
+ " 024015 | \n",
+ " Spring | \n",
+ " genextreme | \n",
+ " {'aic': 102.98921985069077, 'bic': 105.4888598... | \n",
+ "
\n",
+ " \n",
+ " 227 | \n",
+ " 024015 | \n",
+ " Spring | \n",
+ " gennorm | \n",
+ " {'aic': 107.10226730923392, 'bic': 109.6019073... | \n",
+ "
\n",
+ " \n",
+ " 228 | \n",
+ " 024015 | \n",
+ " Spring | \n",
+ " gumbel_r | \n",
+ " {'aic': 101.55648839938996, 'bic': 103.2229150... | \n",
+ "
\n",
+ " \n",
+ " 229 | \n",
+ " 024015 | \n",
+ " Spring | \n",
+ " pearson3 | \n",
+ " {'aic': 95.20549363483296, 'bic': 97.705133667... | \n",
+ "
\n",
+ " \n",
+ " 230 | \n",
+ " 024015 | \n",
+ " Spring | \n",
+ " weibull_min | \n",
+ " {'aic': 101.01183131846669, 'bic': 103.5114713... | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
231 rows × 4 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id season scipy_dist \\\n",
+ "0 020404 Spring expon \n",
+ "1 020404 Spring gamma \n",
+ "2 020404 Spring genextreme \n",
+ "3 020404 Spring gennorm \n",
+ "4 020404 Spring gumbel_r \n",
+ ".. ... ... ... \n",
+ "226 024015 Spring genextreme \n",
+ "227 024015 Spring gennorm \n",
+ "228 024015 Spring gumbel_r \n",
+ "229 024015 Spring pearson3 \n",
+ "230 024015 Spring weibull_min \n",
+ "\n",
+ " value_criterions \n",
+ "0 {'aic': 473.11780986942864, 'bic': 476.6402101... \n",
+ "1 {'aic': 458.2985841195104, 'bic': 463.58218446... \n",
+ "2 {'aic': 457.5956462573534, 'bic': 462.87924660... \n",
+ "3 {'aic': 458.6848317031879, 'bic': 463.96843205... \n",
+ "4 {'aic': 455.63163990069773, 'bic': 459.1540401... \n",
+ ".. ... \n",
+ "226 {'aic': 102.98921985069077, 'bic': 105.4888598... \n",
+ "227 {'aic': 107.10226730923392, 'bic': 109.6019073... \n",
+ "228 {'aic': 101.55648839938996, 'bic': 103.2229150... \n",
+ "229 {'aic': 95.20549363483296, 'bic': 97.705133667... \n",
+ "230 {'aic': 101.01183131846669, 'bic': 103.5114713... \n",
+ "\n",
+ "[231 rows x 4 columns]"
+ ]
+ },
+ "execution_count": 20,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fa.view_criterions('max')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " season | \n",
+ " scipy_dist | \n",
+ " return_period | \n",
+ " streamflow_quantiles | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 020404 | \n",
+ " Spring | \n",
+ " expon | \n",
+ " 2.0 | \n",
+ " 110.0 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 020502 | \n",
+ " Spring | \n",
+ " expon | \n",
+ " 2.0 | \n",
+ " 19.0 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 020602 | \n",
+ " Spring | \n",
+ " expon | \n",
+ " 2.0 | \n",
+ " 154.0 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 020802 | \n",
+ " Spring | \n",
+ " expon | \n",
+ " 2.0 | \n",
+ " 255.0 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 021407 | \n",
+ " Spring | \n",
+ " expon | \n",
+ " 2.0 | \n",
+ " 195.0 | \n",
+ "
\n",
+ " \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ " ... | \n",
+ "
\n",
+ " \n",
+ " 2305 | \n",
+ " 024003 | \n",
+ " Spring | \n",
+ " weibull_min | \n",
+ " 10000.0 | \n",
+ " 528.0 | \n",
+ "
\n",
+ " \n",
+ " 2306 | \n",
+ " 024007 | \n",
+ " Spring | \n",
+ " weibull_min | \n",
+ " 10000.0 | \n",
+ " 1180.0 | \n",
+ "
\n",
+ " \n",
+ " 2307 | \n",
+ " 024013 | \n",
+ " Spring | \n",
+ " weibull_min | \n",
+ " 10000.0 | \n",
+ " 291.0 | \n",
+ "
\n",
+ " \n",
+ " 2308 | \n",
+ " 024014 | \n",
+ " Spring | \n",
+ " weibull_min | \n",
+ " 10000.0 | \n",
+ " 1198.0 | \n",
+ "
\n",
+ " \n",
+ " 2309 | \n",
+ " 024015 | \n",
+ " Spring | \n",
+ " weibull_min | \n",
+ " 10000.0 | \n",
+ " 53.0 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
2310 rows × 5 columns
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id season scipy_dist return_period streamflow_quantiles\n",
+ "0 020404 Spring expon 2.0 110.0\n",
+ "1 020502 Spring expon 2.0 19.0\n",
+ "2 020602 Spring expon 2.0 154.0\n",
+ "3 020802 Spring expon 2.0 255.0\n",
+ "4 021407 Spring expon 2.0 195.0\n",
+ "... ... ... ... ... ...\n",
+ "2305 024003 Spring weibull_min 10000.0 528.0\n",
+ "2306 024007 Spring weibull_min 10000.0 1180.0\n",
+ "2307 024013 Spring weibull_min 10000.0 291.0\n",
+ "2308 024014 Spring weibull_min 10000.0 1198.0\n",
+ "2309 024015 Spring weibull_min 10000.0 53.0\n",
+ "\n",
+ "[2310 rows x 5 columns]"
+ ]
+ },
+ "execution_count": 21,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fa.view_quantiles('max')"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "xhydro",
+ "language": "python",
+ "name": "xhydro"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.16"
+ },
+ "vscode": {
+ "interpreter": {
+ "hash": "e28391989cdb8b31df72dd917935faad186df3329a743c813090fc6af977a1ca"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/xhydro/__init__.py b/xhydro/__init__.py
index 1f718d19..ef963d2a 100644
--- a/xhydro/__init__.py
+++ b/xhydro/__init__.py
@@ -6,3 +6,5 @@
__author__ = """Thomas-Charles Fortier Filion"""
__email__ = "tcff_hydro@outlook.com"
__version__ = "0.1.9"
+
+from .utils import get_julian_day, get_timestep
diff --git a/xhydro/frequency_analysis/__init__.py b/xhydro/frequency_analysis/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/xhydro/frequency_analysis/local.py b/xhydro/frequency_analysis/local.py
new file mode 100644
index 00000000..abdc12ca
--- /dev/null
+++ b/xhydro/frequency_analysis/local.py
@@ -0,0 +1,939 @@
+import copy
+import fnmatch
+import warnings
+from typing import Union
+
+import numpy as np
+import pandas as pd
+import scipy.stats
+import xarray as xr
+from statsmodels.tools import eval_measures
+from xclim.indices.stats import fit, parametric_quantile
+
+import xhydro as xh
+
+__all__ = [
+ "Data",
+ "Local",
+]
+
+
+class Data:
+ def __init__(self, ds: xr.Dataset):
+ """init function takes a dataset as input and initialize an empty
+ season dictionary and a list of catchments from dimension id
+
+ Parameters
+ ----------
+ ds : xr.Dataset
+ _description_
+ """
+ self.data = ds
+ self._season = {}
+ self._catchments = self.data.id.to_numpy().tolist()
+
+ def _repr_html_(self):
+ """Function to show xr.Dataset._repr_html_ when looking at class Data
+
+ Returns
+ -------
+ xr.Dataset._repr_html_()
+ """
+ return self.data._repr_html_()
+
+ def copy(self):
+ """makes a copy of itself using copy library
+
+ Returns
+ -------
+ xhydro.frequency_analysis.local.Data()
+ """
+ return copy.copy(self)
+
+ def select_catchments(self, catchment_list: list):
+ """
+ select specified catchements from attribute data.
+ Also supports the use of a wildcard (*).
+
+ Parameters
+ ----------
+ catchment_list : List
+ List of catchments that will be selcted along the id dimension
+
+ Returns
+ -------
+ ds : xarray.DataSet
+ New dataset with only specified catchments
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path =
+ '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> filtered_ds = donnees.select_catchments(
+ catchment_list = ['051001','051005'])
+ >>> filtered_ds = donnees.select_catchments(
+ catchment_list = ['05*','0234*', '023301'])
+ """
+
+ # Create a copy of the object
+ obj = self.copy()
+
+ def multi_filter(names, patterns: list):
+ # sub function to select complete list based on wilcards
+ return [
+ name
+ for name in names
+ for pattern in patterns
+ if fnmatch.fnmatch(name, pattern)
+ ]
+
+ # Getting the full list
+ catchment_list = multi_filter(obj._catchments, catchment_list)
+
+ # Setting the list as a class attribute
+ obj._catchments = catchment_list
+
+ # Filtering over the list
+ obj.data = obj.data.sel(id=self.data.id.isin(catchment_list))
+ return obj
+
+ def custom_group_by(self, beg: int, end: int):
+ """
+ a custum fonction to groupby with specified julian days.
+
+ Parameters
+ ----------
+ beg : Int
+ Julian day of the begining of the period
+
+ end : Int
+ Julian day of the end of the period
+
+ Returns
+ -------
+ ds : xarray.DataSet
+ dataset with data grouped by time over specified dates
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path =
+ '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> grouped_ds = donnees.custom_group_by(1, 90)
+
+ """
+
+ if beg > end:
+ # TODO when beg `end, it means it has to overlap years,
+ # for example, from octobre to march
+ pass
+ else:
+ # +1 to include the end
+ return self.data.sel(
+ time=np.isin(self.data.time.dt.dayofyear, range(beg, end + 1))
+ ).groupby("time.year")
+
+ @property
+ def season(self):
+ return self._season
+
+ @season.setter
+ def season(self, liste: list):
+ """
+ The setter for the season property Issues a Warining
+ if a new season is overlapping with another one.
+ AlsO issues a warning if the season name was already used,
+ and then overwrites it.
+
+ Parameters
+ ----------
+ liste : List
+ List of Name, begining in Julian day, end in Julian day
+
+ Returns
+ -------
+ ds : xarray.DataSet
+ appends Dict self._season with name as key,
+ and begining and end as values
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path =
+ '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.season = ['Yearly', 1, 365]
+ """
+
+ # TODO Replace with dictionary instead
+ name = liste[0]
+ beg = liste[1]
+ end = liste[2]
+ for season, dates in self.season.items():
+ if not isinstance(dates, xr.Dataset):
+ # We dont check for overlapping if season is a dataset
+ if name == season:
+ warnings.warn(
+ "Warning, "
+ + name
+ + " was already defined and has been overwritten"
+ )
+ elif (
+ dates[0] <= beg
+ and dates[1] >= beg
+ or dates[0] <= end
+ and dates[1] >= end
+ ):
+ warnings.warn("Warning, " + name + " overlapping with " + season)
+
+ self._season[name] = [beg, end]
+
+ def rm_season(self, name: str):
+ """
+ Fonction to remove a season.
+ Isues a Warining if the name is not a valid season.
+
+ Parameters
+ ----------
+ name : Str
+ Name of the season to be removed
+
+ Returns
+ -------
+ ds : xarray.DataSet
+ The dataset is returned with =out the season specified.
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path =
+ '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.season = ['Yearly', 1, 365]
+ >>> donnees.rm_season('Yearly')
+ """
+
+ try:
+ del self._season[name]
+ except:
+ print("No season named " + name)
+
+ def get_seasons(self):
+ """
+ Function to list the seasons.
+
+ Returns
+ -------
+ list : List
+ a list of the season names
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path =
+ '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.season = ['Yearly', 1, 365]
+ >>> donnees.season = ['Spring', 60, 120]
+ >>> ['Yearly', 'Spring']
+ """
+ return list(self.season.keys())
+
+ def _get_season_values(self, season: str):
+ """Function to get the values of a given season
+
+ Parameters
+ ----------
+ season : str
+ name of a previously defined season
+
+ Returns
+ -------
+ list
+ return a list of julian day of begining and end of season
+ """
+ return self._season[season]
+
+ def get_bool_over_tolerence(self, tol: float, season=None):
+ """
+ Fonction to check if a season has enough values to be used.
+ For each season True will be returned if there is less missing data
+ than the specified tolerence.
+
+ Parameters
+ ----------
+ tol : Float
+ Tolerance in decimal form (0.15 for 15%)
+
+ season : String
+ Name of the season to be checked
+
+ Returns
+ -------
+ da : xr.DataArray
+ DataArray of boolean
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path =
+ '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.custom_group_by(1, 365.max().where(
+ grouped_ds.get_bool_over_tolerence(tolerence, season), drop=True)
+ """
+ ds = copy.copy(self)
+ if season is None:
+ # If no season is specified,
+ # tolerence will be based on 365 values per year
+ # TODO generalize for different time step
+ tolerence = 365 * tol
+ grouped_ds = ds.data.groupby("time.year").count()
+
+ else:
+ season_vals = ds._get_season_values(season)
+ season_size = season_vals[1] - season_vals[0] + 1
+ # TODO generalize for different time step
+ grouped_ds = ds.custom_group_by(season_vals[0], season_vals[1]).count()
+ tolerence = season_size * (1 - tol)
+
+ return (grouped_ds[list(grouped_ds.keys())[0]] > tolerence).load()
+
+ def get_maximum(self, tolerence: float = None, seasons=None):
+ """
+ Fonction to tiddy _get_max results.
+
+ Parameters
+ ----------
+ tolerence : Float
+ Tolerance in decimal form (0.15 for 15%)
+
+ seasons : List
+ List of season's name to be checked
+
+ Returns
+ -------
+ df : pd.Dataframe
+ Dataframe organised with id, season, year, Maximums
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path =
+ '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.get_maximum(tolerence=0.15, seasons=['Spring'])
+ >>> catchment_list = ['023301']
+ >>> sub_set = donnees.select_catchments(catchment_list = catchment_list)
+ >>> sub_set.season = ['Spring', 60, 182]
+ >>> sub_set.get_maximum(tolerence=0.15, seasons=['Spring'])
+ >>> id season year Maximums
+ 0 023301 Spring 1928 231.000000
+ 1 023301 Spring 1929 241.000000
+ 2 023301 Spring 1930 317.000000
+ ...
+ """
+ return (
+ self._get_max(tolerence=tolerence, seasons=seasons)
+ .to_dataframe()
+ .reset_index()[
+ [
+ "id",
+ "season",
+ "year",
+ "start_date",
+ "end_date",
+ list(self.data.keys())[0],
+ ]
+ ]
+ .dropna()
+ )
+
+ def _get_max(self, tolerence=None, seasons=[]):
+ """
+ Fonction to get maximum value per season, according to a tolerence.
+
+ Parameters
+ ----------
+ tolerence : Float
+ Tolerance in decimal form (0.15 for 15%)
+
+ seasons : List
+ List of season's name to be checked
+
+ Returns
+ -------
+ da : xr.DataArray
+ DataArray of maximums
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path =
+ '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.get_maximum(tolerence=0.15, seasons=['Spring'])
+ >>> catchment_list = ['023301']
+ >>> sub_set = donnees.select_catchments(catchment_list = catchment_list)
+ >>> sub_set.season = ['Spring', 60, 182]
+ >>> sub_set._get_max(tolerence=0.15, seasons=['Spring'])
+ xarray.DataArray 'value' (season: 1, year: 52, id: 1)
+ """
+ grouped_ds = self.copy()
+
+ def max_over_one_season(grouped_ds, tolerence, season):
+ season_vals = grouped_ds._get_season_values(season)
+ if isinstance(season_vals, xr.Dataset):
+ years = np.unique(season_vals.year)
+ bvs = np.unique(season_vals.id)
+ max = np.empty((len(years), len(bvs)), dtype=object)
+ beg = np.empty((len(years), len(bvs)), dtype=object)
+ end = np.empty((len(years), len(bvs)), dtype=object)
+ for y, year in enumerate(years):
+ for b, bv in enumerate(bvs):
+ dd = season_vals.sel(year=year, id=bv).value.to_numpy().tolist()
+ beg[y, b] = pd.to_datetime(
+ str(year) + str(dd[0]), format="%Y%j"
+ )
+ end[y, b] = pd.to_datetime(
+ str(year) + str(dd[1]), format="%Y%j"
+ )
+ ds_year = grouped_ds.data.where(
+ grouped_ds.data.time.dt.year == year, drop=True
+ )
+ ds_year = ds_year.sel(id=bv)
+
+ # +1 to include end
+ ds_period = ds_year.sel(
+ time=np.isin(
+ ds_year.time.dt.dayofyear, range(dd[0], dd[1] + 1)
+ )
+ )
+
+ d = ds_period[list(ds_period.keys())[0]].values
+ timestep = xh.get_timestep(ds_year)
+ # timestep = float(
+ # ds_year.time.dt.dayofyear.timestep.values.tolist()
+ # )
+ nb_expected = (dd[1] + 1 - dd[0]) / timestep
+ # nb_expected is used to account for missing and nan
+ if np.count_nonzero(~np.isnan(d)) / nb_expected > (
+ 1 - tolerence
+ ):
+ max[y, b] = np.nanmax(d) # .tolist()
+ else:
+ max[y, b] = np.nan
+
+ max_ds = xr.Dataset()
+
+ max_ds.coords["year"] = xr.DataArray(years, dims=("year",))
+ max_ds.coords["id"] = xr.DataArray(bvs, dims=("id",))
+ max_ds.coords["start_date"] = xr.DataArray(beg, dims=("year", "id"))
+ max_ds.coords["end_date"] = xr.DataArray(end, dims=("year", "id"))
+ max_ds["value"] = xr.DataArray(max.astype(float), dims=("year", "id"))
+ # For each bv
+ # For each year
+ # check for tolerence
+ # get max
+ # create a DS
+ return max_ds
+ else:
+ # TODO add year from grouped_ds.data.dt.year
+ # and make full str start_date and end_date
+ grouped_ds.data.coords["start_date"] = pd.to_datetime(
+ str(season_vals[0]), format="%j"
+ ).strftime("%m-%d")
+ grouped_ds.data.coords["end_date"] = pd.to_datetime(
+ str(season_vals[1]), format="%j"
+ ).strftime("%m-%d")
+
+ return (
+ grouped_ds.custom_group_by(season_vals[0], season_vals[1])
+ .max()
+ .where(
+ grouped_ds.get_bool_over_tolerence(tolerence, season), drop=True
+ )
+ )
+
+ if seasons:
+ # Creating a new dimension of season and
+ # merging all Dataset from max_over_one_season
+ ds = xr.concat(
+ [
+ max_over_one_season(grouped_ds, tolerence, season)
+ .assign_coords(season=season)
+ .expand_dims("season")
+ for season in seasons
+ ],
+ dim="season",
+ )
+ return ds[list(ds.keys())[0]]
+
+ else:
+ # TODO Tolerence not used if no period is defined
+ return (
+ grouped_ds.data.groupby("time.year")
+ .max()
+ .assign_coords(season="Whole year")
+ .expand_dims("season")
+ )
+
+ def calculate_volume(self, dates: Union[list, xr.Dataset] = None, tolerence=0.15):
+ ds = self.copy()
+
+ def conversion_factor_to_hm3(timestep):
+ # flow is in m³/s and we want m³, so we multiply by seconds
+ # TODO check if last date is included
+ return pd.to_timedelta(1, unit=timestep).total_seconds()
+
+ if dates is None:
+ # TODO use season dates
+ pass
+ elif isinstance(dates, list):
+ # TODO bool over tolerence takes season, generalise
+ with warnings.catch_warnings(): # Removes overlaping warning
+ warnings.simplefilter("ignore")
+ self.season = ["Volumes", dates[0], dates[1]]
+ grouped_ds = (
+ ds.custom_group_by(dates[0], dates[1])
+ .sum()
+ .where(ds.get_bool_over_tolerence(tolerence, "Volumes"), drop=True)
+ )
+ self.rm_season("Volumes")
+ # Transform tp hm³
+ # TODO add start and end and clear other attributes
+ grouped_ds = (
+ grouped_ds
+ * xr.apply_ufunc(
+ conversion_factor_to_hm3,
+ grouped_ds["timestep"],
+ input_core_dims=[[]],
+ vectorize=True,
+ )
+ * (dates[1] - dates[0])
+ / 1000000 # from m³ to hm³
+ )
+
+ df = grouped_ds.year.to_dataframe()
+ df["beg"] = dates[0]
+ df["end"] = dates[1]
+
+ grouped_ds["start_date"] = pd.to_datetime(
+ df["year"] * 1000 + df["beg"], format="%Y%j"
+ )
+ grouped_ds["end_date"] = pd.to_datetime(
+ df["year"] * 1000 + df["end"], format="%Y%j"
+ )
+ grouped_ds["units"] = "hm³"
+
+ return grouped_ds.drop_vars(
+ [
+ "drainage_area",
+ "latitude",
+ "longitude",
+ "name",
+ "source",
+ "timestep",
+ "province",
+ "regulated",
+ ]
+ ).rename_vars({list(grouped_ds.keys())[0]: "volume"})
+ elif isinstance(dates, xr.Dataset):
+ # TODO Make sure DS has same dimensions than target
+ vol = np.empty(
+ (len(np.unique(ds.data.time.dt.year)), len(ds.data.id)), dtype=object
+ )
+ beg = np.empty(
+ (len(np.unique(ds.data.time.dt.year)), len(ds.data.id)), dtype=object
+ )
+ end = np.empty(
+ (len(np.unique(ds.data.time.dt.year)), len(ds.data.id)), dtype=object
+ )
+ for y, year in enumerate(np.unique(ds.data.time.dt.year)):
+ for b, bv in enumerate(ds.data.id):
+ try:
+ dd = dates.sel(year=year, id=bv).value.to_numpy().tolist()
+ except KeyError:
+ # KeyError can occur if ds is incomplete
+ pass
+ beg[y, b] = pd.to_datetime(str(year) + str(dd[0]), format="%Y%j")
+ end[y, b] = pd.to_datetime(str(year) + str(dd[1]), format="%Y%j")
+ ds_year = ds.data.where(ds.data.time.dt.year == year, drop=True)
+ ds_year = ds_year.sel(id=bv)
+ # +1 pou inclure la fin,
+ # TODO si une seule journe dans ds_period, ¸a donne 0
+ # TODO check for tolerence
+ ds_period = ds_year.sel(
+ time=np.isin(ds_year.time.dt.dayofyear, range(dd[0], dd[1] + 1))
+ )
+ # delta en ns, à rapporter en s (1000000000)
+ # puis le tout en hm³ (1000000)
+ delta = ds_period.time[-1] - ds_period.time[0]
+ delta = delta.to_numpy().tolist() / (1000000000 * 1000000)
+ vol[y, b] = (
+ sum(
+ ds_period.squeeze()[list(ds_period.keys())[0]].values
+ ).tolist()
+ * delta
+ )
+
+ vol_ds = xr.Dataset()
+
+ vol_ds.coords["year"] = xr.DataArray(
+ np.unique(ds.data.time.dt.year), dims=("year",)
+ )
+ vol_ds.coords["id"] = xr.DataArray(ds.data.id.to_numpy(), dims=("id",))
+ vol_ds.coords["units"] = "hm³"
+ vol_ds.coords["start_date"] = xr.DataArray(beg, dims=("year", "id"))
+ vol_ds.coords["end_date"] = xr.DataArray(end, dims=("year", "id"))
+ vol_ds["volume"] = xr.DataArray(vol, dims=("year", "id"))
+
+ return vol_ds
+
+
+class Local:
+ def __init__(
+ self,
+ data_ds,
+ return_period,
+ dates_vol=None,
+ dist_list=[
+ "expon",
+ "gamma",
+ "genextreme",
+ "genpareto",
+ "gumbel_r",
+ "pearson3",
+ "weibull_min",
+ ],
+ tolerence=0.15,
+ seasons=None,
+ min_year=15,
+ vars_of_interest=["max", "vol"],
+ calculated=False,
+ ):
+ # TODO if type of data is object instead of float, it will crash, better to convert or to raise a warning ?
+ try:
+ # if data is provided
+ self.data = data_ds.astype(float)
+ except AttributeError:
+ # if not
+ self.data = data_ds.data.astype(float)
+ self.data = data_ds
+ self.return_period = return_period
+ self.dist_list = dist_list
+ self.dates_vol = dates_vol
+ self.tolerence = tolerence
+ self.seasons = seasons
+ self.min_year = min_year
+ self.analyse_max = None
+ self.analyse_vol = None
+ # TODO list(ds.keys())[0] used multiples time, use self.var_name instead and generalise for all var, not just [0]
+ self.var_name = list(self.data.data.keys())[0]
+
+ data_ds
+
+ if "max" in vars_of_interest:
+ self.analyse_max = self._freq_analys(calculated, var_of_interest="max")
+ if "vol" in vars_of_interest:
+ self.analyse_vol = self._freq_analys(calculated, var_of_interest="vol")
+
+ def _freq_analys(self, calculated: bool, var_of_interest: str):
+ """
+ This function is executed upon initialization of calss Local. It performs multiple frequency analysis over the data provided.
+
+ Parameters
+ ----------
+ self.data : xhydro.Data
+ a dataset containing hydrological data
+
+ self.return_period : list
+ list of return periods as float
+
+ self.dist_list : list
+ list of distribution supported by scypy.stat
+
+ self.tolerence : float
+ percentage of missing value tolerence in decimal form (0.15 for 15%), if above within the season, the maximum for that year will be skipped
+
+ self.seasons : list
+ list of seasons names, begining and end of said seasons must have been set previously in the object xhydro.Data
+
+ self.min_year : int
+ Minimum number of year. If a station has less year than the minimum, for any given season, the station will be skipped
+
+ Returns
+ -------
+ self.analyse : xr.Dataset
+ A Dataset with dimensions id, season, scipy_dist and return_period indexes with variables Criterions and Quantiles
+
+ Examples
+ --------
+ >>> TODO Not sure how to set example here
+
+
+ """
+
+ def get_criterions(data, params, dist):
+ data = data[~np.isnan(data)]
+ # params = params[~np.isnan(params)]
+
+ LLH = dist.logpdf(data, *params).sum() # log-likelihood
+
+ aic = eval_measures.aic(llf=LLH, nobs=len(data), df_modelwc=len(params))
+ bic = eval_measures.bic(llf=LLH, nobs=len(data), df_modelwc=len(params))
+ try:
+ aicc = eval_measures.aicc(
+ llf=LLH, nobs=len(data), df_modelwc=len(params)
+ )
+ except:
+ aicc = np.nan
+
+ # logLik = np.sum( stats.gamma.logpdf(data, fitted_params[0], loc=fitted_params[1], scale=fitted_params[2]) )
+ # k = len(fitted_params)
+ # aic = 2*k - 2*(logLik)
+ return {"aic": aic, "bic": bic, "aicc": aicc}
+
+ def fit_one_distrib(ds_max, dist):
+ return (
+ fit(ds_max.chunk(dict(time=-1)), dist=dist)
+ .assign_coords(scipy_dist=dist)
+ .expand_dims("scipy_dist")
+ ) # .rename('value')
+
+ if calculated:
+ ds_calc = self.data.rename({"year": "time"}).load()
+ else:
+ if var_of_interest == "max":
+ ds_calc = (
+ self.data._get_max(self.tolerence, self.seasons)
+ .rename({"year": "time"})
+ .load()
+ )
+ elif var_of_interest == "vol":
+ ds_calc = (
+ self.data.calculate_volume(
+ tolerence=self.tolerence, dates=self.dates_vol
+ )
+ .rename({"year": "time", "volume": "value"})
+ .astype(float)
+ .load()
+ )
+ ds_calc = ds_calc.value
+ ds_calc = ds_calc.dropna(dim="id", thresh=self.min_year)
+
+ quantiles = []
+ criterions = []
+ parameters = []
+ for dist in self.dist_list:
+ # FIXME .load() causes issues, but it was added to fix something
+
+ params = fit_one_distrib(ds_calc, dist).load()
+ parameters.append(params)
+ # quantiles.append(xr.merge([parametric_quantile(params, q=1 - 1.0 / T).rename('value') for T in self.return_period]))
+ quantiles.append(
+ xr.merge(
+ [
+ parametric_quantile(params, q=1 - 1.0 / T)
+ for T in self.return_period
+ ]
+ )
+ )
+ dist_obj = getattr(scipy.stats, dist)
+ # criterions.append(xr.apply_ufunc(get_criterions, ds_calc, params, dist_obj, input_core_dims=[['time'], ['dparams'], []], vectorize=True).to_dataset(name='Criterions'))
+
+ crit = xr.apply_ufunc(
+ get_criterions,
+ ds_calc,
+ params,
+ dist_obj,
+ input_core_dims=[["time"], ["dparams"], []],
+ vectorize=True,
+ )
+ # If crit is a DataArray, the variable we name it value, if it's a DataSet, it will already have variables names
+ if isinstance(crit, xr.DataArray):
+ crit.name = "value"
+ criterions.append(crit)
+
+ def append_var_names(ds, str):
+ try:
+ var_list = list(ds.keys())
+ except:
+ new_name = ds.name + str
+ return ds.rename(new_name)
+ dict_names = dict(zip(var_list, [s + str for s in var_list]))
+ return ds.rename(dict_names)
+
+ # ds_paramters = xr.Dataset()
+ ds_paramters = xr.concat(
+ parameters, dim="scipy_dist", combine_attrs="drop_conflicts"
+ )
+ ds_paramters = append_var_names(ds_paramters, "_parameters")
+
+ ds_quantiles = xr.merge(quantiles)
+ ds_quantiles = append_var_names(ds_quantiles, "_quantiles")
+
+ ds_criterions = xr.merge(criterions)
+ ds_criterions = append_var_names(ds_criterions, "_criterions")
+
+ ds_quantiles = ds_quantiles.rename({"quantile": "return_period"})
+ ds_quantiles["return_period"] = 1.0 / (1 - ds_quantiles.return_period)
+
+ return xr.merge([ds_criterions, ds_quantiles, ds_calc, ds_paramters])
+
+ def view_criterions(self, var_of_interest):
+ """
+ Fonction to get Criterions results from a xhydro.Local object. Output is rounded for easiser visualisation.
+
+ Returns
+ -------
+ df : pd.Dataframe
+ Dataframe organised with id, season, year, scipy_dist
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path = '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.get_maximum(tolerence=0.15, seasons=['Spring'])
+ >>> catchment_list = ['023301']
+ >>> sub_set = donnees.select_catchments(catchment_list = catchment_list)
+ >>> sub_set.season = ['Spring', 60, 182]
+ >>> return_period = np.array([1.01, 2, 2.33, 5, 10, 20, 50, 100, 200, 500, 1000, 10000])
+ >>> dist_list = ['expon', 'gamma', 'genextreme', 'genpareto', 'gumbel_r', 'pearson3', 'weibull_min']
+ >>> fa = xh.Local(data_ds = sub_set, return_period = return_period, dist_list = dist_list, tolerence = 0.15, seasons = ['Automne', 'Printemps'], min_year = 15)
+ >>> fa.view_quantiles()
+ >>> id season level_2 return_period Quantiles
+ 0 023301 Spring expon 1.01 22.157376
+ 1 023301 Spring expon 2.00 87.891419
+ 2 023301 Spring expon 2.33 102.585536
+ 3 023301 Spring expon 5.00 176.052678
+ 4 023301 Spring expon 10.00 242.744095
+ ...
+ """
+ # dataarray to_dataframe uses first diemnsion as nameless index, so depending on the position in dim_order, dimension gets names level_x
+ var_list = [s for s in self.analyse_max.keys() if "criterions" in s]
+
+ if var_of_interest == "vol":
+ return (
+ self.analyse_vol[var_list]
+ .to_dataframe()
+ .reset_index()[["id", "scipy_dist"] + var_list]
+ .round()
+ )
+ elif var_of_interest == "max":
+ return (
+ self.analyse_max[var_list]
+ .to_dataframe()
+ .reset_index()[["id", "season", "scipy_dist"] + var_list]
+ .round()
+ )
+ else:
+ return print('use "vol" for volumes or "max" for maximums ')
+
+ def view_quantiles(self, var_of_interest):
+ """
+ Fonction to get Quantiles results from a xhydro.Local object.
+
+ Returns
+ -------
+ df : pd.Dataframe
+ Dataframe organised with id, season, year, scipy_dist, return_period
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path = '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.get_maximum(tolerence=0.15, seasons=['Spring'])
+ >>> catchment_list = ['023301']
+ >>> sub_set = donnees.select_catchments(catchment_list = catchment_list)
+ >>> sub_set.season = ['Spring', 60, 182]
+ >>> return_period = np.array([1.01, 2, 2.33, 5, 10, 20, 50, 100, 200, 500, 1000, 10000])
+ >>> dist_list = ['expon', 'gamma', 'genextreme', 'genpareto', 'gumbel_r', 'pearson3', 'weibull_min']
+ >>> fa = xh.Local(data_ds = sub_set, return_period = return_period, dist_list = dist_list, tolerence = 0.15, seasons = ['Automne', 'Printemps'], min_year = 15)
+ >>> fa.view_criterions()
+ >>> id season level_2 Criterions
+ >>> 0 023301 Spring expon {'aic': 582.9252842821857, 'bic': 586.82777171...
+ >>> 1 023301 Spring gamma {'aic': 1739.77441499742, 'bic': 1745.62814615...
+ ...
+ """
+
+ var_list = [s for s in self.analyse_max.keys() if "quantiles" in s]
+ if var_of_interest == "vol":
+ return (
+ self.analyse_vol[var_list]
+ .to_dataframe()
+ .reset_index()[
+ ["id", "season", "scipy_dist", "return_period"] + var_list
+ ]
+ .round()
+ )
+ elif var_of_interest == "max":
+ return (
+ self.analyse_max[var_list]
+ .to_dataframe()
+ .reset_index()[
+ ["id", "season", "scipy_dist", "return_period"] + var_list
+ ]
+ .round()
+ )
+ else:
+ return print('use "vol" for volumes or "max" for maximums ')
+
+ def view_values(self, var_of_interest):
+ """
+ Fonction to get values results from a xhydro.Local object.
+
+ Returns
+ -------
+ df : pd.Dataframe
+ Dataframe organised with id, season, year, scipy_dist, return_period
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path = '/datasets/xhydro/tests/cehq/zarr'
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> donnees.get_maximum(tolerence=0.15, seasons=['Spring'])
+ >>> catchment_list = ['023301']
+ >>> sub_set = donnees.select_catchments(catchment_list = catchment_list)
+ >>> sub_set.season = ['Spring', 60, 182]
+ >>> return_period = np.array([1.01, 2, 2.33, 5, 10, 20, 50, 100, 200, 500, 1000, 10000])
+ >>> dist_list = ['expon', 'gamma', 'genextreme', 'genpareto', 'gumbel_r', 'pearson3', 'weibull_min']
+ >>> fa = xh.Local(data_ds = sub_set, return_period = return_period, dist_list = dist_list, tolerence = 0.15, seasons = ['Automne', 'Printemps'], min_year = 15)
+ >>> fa.view_criterions()
+ >>> id season level_2 Criterions
+ >>> 0 023301 Spring expon {'aic': 582.9252842821857, 'bic': 586.82777171...
+ >>> 1 023301 Spring gamma {'aic': 1739.77441499742, 'bic': 1745.62814615...
+ ...
+ """
+ # TODO Output as dict is ugly
+
+ if var_of_interest == "vol":
+ return self.analyse_vol[self.var_name].to_dataframe().dropna().reset_index()
+ elif var_of_interest == "max":
+ return (
+ self.analyse_max[self.var_name]
+ .to_dataframe()
+ .reset_index()[
+ ["id", "season", "time", "start_date", "end_date", self.var_name]
+ ]
+ .dropna()
+ )
+ else:
+ return print('use "vol" for volumes or "max" for maximums ')
diff --git a/xhydro/utils.py b/xhydro/utils.py
new file mode 100644
index 00000000..22c2daaa
--- /dev/null
+++ b/xhydro/utils.py
@@ -0,0 +1,52 @@
+import datetime
+
+
+def get_julian_day(month, day, year=None):
+ """
+ Return julian day for a specified date, if year is not specified, uses curent year
+
+ Parameters
+ ----------
+ month : int
+ integer of the target month
+
+ day : int
+ integer of the target day
+
+ year : int
+ integer of the target year
+
+ Returns
+ -------
+ int
+ julian day (1 - 366)
+
+ Examples
+ --------
+ >>> import xarray as xr
+ >>> cehq_data_path = "/dbfs/mnt/devdlzxxkp01/datasets/xhydro/tests/cehq/zarr"
+ >>> ds = xr.open_zarr(cehq_data_path, consolidated=True)
+ >>> donnees = Data(ds)
+ >>> jj = donnees.get_julian_day(month=9, day=1)
+ >>> jj: 244
+ >>> jj = donnees.get_julian_day(month=9, day=1, year=2000)
+ >>> jj: 245
+ """
+ if year is None:
+ year = datetime.date.today().year
+
+ return datetime.datetime(year, month, day).timetuple().tm_yday
+
+
+def get_timestep(array_in):
+ if len(array_in) < 2:
+ # Returns a timestep of one for a one timestep array
+ return 1
+ timestep = ((array_in[-1] - array_in[0]) / (array_in.size - 1)).values.astype(
+ "timedelta64[m]"
+ )
+ if timestep >= 60 and timestep < 24 * 60:
+ timestep = timestep.astype("timedelta64[h]")
+ elif timestep >= 24 * 60:
+ timestep = timestep.astype("timedelta64[D]")
+ return timestep