Skip to content

Commit

Permalink
[ci skip] Publishing updated documentation for '' branch
Browse files Browse the repository at this point in the history
  • Loading branch information
tgsmith61591 committed Oct 23, 2023
1 parent 287bcc5 commit c7a3bdc
Show file tree
Hide file tree
Showing 447 changed files with 78,261 additions and 315 deletions.
4 changes: 4 additions & 0 deletions 2.0.4/.buildinfo
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 543e056b2d7a7e3deaa82c5a39e31702
tags: 645f666f9bcd5a90fca523b33c5a78b7
Empty file added 2.0.4/.nojekyll
Empty file.
Binary file added 2.0.4/_downloads/auto_examples_jupyter.zip
Binary file not shown.
Binary file added 2.0.4/_downloads/auto_examples_python.zip
Binary file not shown.
54 changes: 54 additions & 0 deletions 2.0.4/_downloads/example_add_new_samples.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Adding new observations to your model\n\n\n\nThis example demonstrates how to add new ground truth\nobservations to your model so that forecasting continues\nwith respect to true, observed values. This also slightly\nupdates the model parameters, taking several new steps from\nthe existing model parameters.\n\n.. raw:: html\n\n <br/>\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"print(__doc__)\n\n# Author: Taylor Smith <taylor.smith@alkaline-ml.com>\n\nimport pmdarima as pm\nfrom pmdarima import model_selection\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# #############################################################################\n# Load the data and split it into separate pieces\ndata = pm.datasets.load_lynx()\ntrain, test = model_selection.train_test_split(data, train_size=100)\n\n# #############################################################################\n# Fit with some validation (cv) samples\narima = pm.auto_arima(train, start_p=1, start_q=1, d=0, max_p=5, max_q=5,\n out_of_sample_size=10, suppress_warnings=True,\n stepwise=True, error_action='ignore')\n\n# Now plot the results and the forecast for the test set\npreds, conf_int = arima.predict(n_periods=test.shape[0],\n return_conf_int=True)\n\nfig, axes = plt.subplots(2, 1, figsize=(12, 8))\nx_axis = np.arange(train.shape[0] + preds.shape[0])\naxes[0].plot(x_axis[:train.shape[0]], train, alpha=0.75)\naxes[0].scatter(x_axis[train.shape[0]:], preds, alpha=0.4, marker='o')\naxes[0].scatter(x_axis[train.shape[0]:], test, alpha=0.4, marker='x')\naxes[0].fill_between(x_axis[-preds.shape[0]:], conf_int[:, 0], conf_int[:, 1],\n alpha=0.1, color='b')\n\n# fill the section where we \"held out\" samples in our model fit\n\naxes[0].set_title(\"Train samples & forecasted test samples\")\n\n# Now add the actual samples to the model and create NEW forecasts\narima.update(test)\nnew_preds, new_conf_int = arima.predict(n_periods=10, return_conf_int=True)\nnew_x_axis = np.arange(data.shape[0] + 10)\n\naxes[1].plot(new_x_axis[:data.shape[0]], data, alpha=0.75)\naxes[1].scatter(new_x_axis[data.shape[0]:], new_preds, alpha=0.4, marker='o')\naxes[1].fill_between(new_x_axis[-new_preds.shape[0]:],\n new_conf_int[:, 0],\n new_conf_int[:, 1],\n alpha=0.1, color='g')\naxes[1].set_title(\"Added new observed values with new forecasts\")\nplt.show()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
65 changes: 65 additions & 0 deletions 2.0.4/_downloads/example_add_new_samples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
"""
=====================================
Adding new observations to your model
=====================================
This example demonstrates how to add new ground truth
observations to your model so that forecasting continues
with respect to true, observed values. This also slightly
updates the model parameters, taking several new steps from
the existing model parameters.
.. raw:: html
<br/>
"""
print(__doc__)

# Author: Taylor Smith <taylor.smith@alkaline-ml.com>

import pmdarima as pm
from pmdarima import model_selection
import matplotlib.pyplot as plt
import numpy as np

# #############################################################################
# Load the data and split it into separate pieces
data = pm.datasets.load_lynx()
train, test = model_selection.train_test_split(data, train_size=100)

# #############################################################################
# Fit with some validation (cv) samples
arima = pm.auto_arima(train, start_p=1, start_q=1, d=0, max_p=5, max_q=5,
out_of_sample_size=10, suppress_warnings=True,
stepwise=True, error_action='ignore')

# Now plot the results and the forecast for the test set
preds, conf_int = arima.predict(n_periods=test.shape[0],
return_conf_int=True)

fig, axes = plt.subplots(2, 1, figsize=(12, 8))
x_axis = np.arange(train.shape[0] + preds.shape[0])
axes[0].plot(x_axis[:train.shape[0]], train, alpha=0.75)
axes[0].scatter(x_axis[train.shape[0]:], preds, alpha=0.4, marker='o')
axes[0].scatter(x_axis[train.shape[0]:], test, alpha=0.4, marker='x')
axes[0].fill_between(x_axis[-preds.shape[0]:], conf_int[:, 0], conf_int[:, 1],
alpha=0.1, color='b')

# fill the section where we "held out" samples in our model fit

axes[0].set_title("Train samples & forecasted test samples")

# Now add the actual samples to the model and create NEW forecasts
arima.update(test)
new_preds, new_conf_int = arima.predict(n_periods=10, return_conf_int=True)
new_x_axis = np.arange(data.shape[0] + 10)

axes[1].plot(new_x_axis[:data.shape[0]], data, alpha=0.75)
axes[1].scatter(new_x_axis[data.shape[0]:], new_preds, alpha=0.4, marker='o')
axes[1].fill_between(new_x_axis[-new_preds.shape[0]:],
new_conf_int[:, 0],
new_conf_int[:, 1],
alpha=0.1, color='g')
axes[1].set_title("Added new observed values with new forecasts")
plt.show()
54 changes: 54 additions & 0 deletions 2.0.4/_downloads/example_array_concatenation.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Array concatenation\n\n\n\nIn this example, we demonstrate pyramid's convenient ``c`` function, which is,\nin essence, the same as R's. It's nothing more than a convenience function in\nthe package, but one you should understand if you're contributing.\n\n.. raw:: html\n\n <br/>\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"print(__doc__)\n\n# Author: Taylor Smith <taylor.smith@alkaline-ml.com>\n\nimport pmdarima as pm\nimport numpy as np\n\n# #############################################################################\n# You can use the 'c' function to define an array from *args\narray1 = pm.c(1, 2, 3, 4, 5)\n\n# Or you can define an array from an existing iterable:\narray2 = pm.c([1, 2, 3, 4, 5])\nassert np.array_equal(array1, array2)\n\n# You can even use 'c' to flatten arrays:\narray_flat = pm.c(1, 2, 3, [4, 5])\nassert np.array_equal(array_flat, np.arange(5) + 1)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
32 changes: 32 additions & 0 deletions 2.0.4/_downloads/example_array_concatenation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
"""
===================
Array concatenation
===================
In this example, we demonstrate pyramid's convenient ``c`` function, which is,
in essence, the same as R's. It's nothing more than a convenience function in
the package, but one you should understand if you're contributing.
.. raw:: html
<br/>
"""
print(__doc__)

# Author: Taylor Smith <taylor.smith@alkaline-ml.com>

import pmdarima as pm
import numpy as np

# #############################################################################
# You can use the 'c' function to define an array from *args
array1 = pm.c(1, 2, 3, 4, 5)

# Or you can define an array from an existing iterable:
array2 = pm.c([1, 2, 3, 4, 5])
assert np.array_equal(array1, array2)

# You can even use 'c' to flatten arrays:
array_flat = pm.c(1, 2, 3, [4, 5])
assert np.array_equal(array_flat, np.arange(5) + 1)
54 changes: 54 additions & 0 deletions 2.0.4/_downloads/example_array_differencing.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Array differencing\n\n\n\nIn this example, we demonstrate pyramid's array differencing, and how it's used\nin conjunction with the ``d`` term to lag a time series.\n\n.. raw:: html\n\n <br/>\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"print(__doc__)\n\n# Author: Taylor Smith <taylor.smith@alkaline-ml.com>\n\nfrom pmdarima.utils import array\n\n# Build an array and show first order differencing results\nx = array.c(10, 4, 2, 9, 34)\nlag_1 = array.diff(x, lag=1, differences=1)\n\n# The result will be the same as: x[1:] - x[:-1]\nprint(lag_1) # [-6., -2., 7., 25.]\n\n# Note that lag and differences are not the same! If we crank diff up by one,\n# it performs the same differencing as above TWICE. Lag, therefore, controls\n# the number of steps backward the ts looks when it differences, and the\n# `differences` parameter controls how many times to repeat.\nprint(array.diff(x, lag=1, differences=2)) # [4., 9., 18.]\n\n# Conversely, when we set lag to 2, the array looks two steps back for its\n# differencing operation (only one).\nprint(array.diff(x, lag=2, differences=1)) # [-8., 5., 32.]\n\n# The lag parameter is controlled by `m`, which is the seasonal periodicity of\n# a time series. If your series is non-seasonal, lag will typically be 1."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
38 changes: 38 additions & 0 deletions 2.0.4/_downloads/example_array_differencing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
"""
==================
Array differencing
==================
In this example, we demonstrate pyramid's array differencing, and how it's used
in conjunction with the ``d`` term to lag a time series.
.. raw:: html
<br/>
"""
print(__doc__)

# Author: Taylor Smith <taylor.smith@alkaline-ml.com>

from pmdarima.utils import array

# Build an array and show first order differencing results
x = array.c(10, 4, 2, 9, 34)
lag_1 = array.diff(x, lag=1, differences=1)

# The result will be the same as: x[1:] - x[:-1]
print(lag_1) # [-6., -2., 7., 25.]

# Note that lag and differences are not the same! If we crank diff up by one,
# it performs the same differencing as above TWICE. Lag, therefore, controls
# the number of steps backward the ts looks when it differences, and the
# `differences` parameter controls how many times to repeat.
print(array.diff(x, lag=1, differences=2)) # [4., 9., 18.]

# Conversely, when we set lag to 2, the array looks two steps back for its
# differencing operation (only one).
print(array.diff(x, lag=2, differences=1)) # [-8., 5., 32.]

# The lag parameter is controlled by `m`, which is the seasonal periodicity of
# a time series. If your series is non-seasonal, lag will typically be 1.
54 changes: 54 additions & 0 deletions 2.0.4/_downloads/example_auto_arima.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Fitting an auto_arima model\n\n\n\nThis example demonstrates how we can use the ``auto_arima`` function to\nselect an optimal time series model. We'll be fitting our model on the lynx\ndataset available in the `datasets` submodule.\n\n.. raw:: html\n\n <br/>\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"print(__doc__)\n\n# Author: Taylor Smith <taylor.smith@alkaline-ml.com>\n\nimport pmdarima as pm\nfrom pmdarima import model_selection\nfrom sklearn.metrics import mean_squared_error\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# #############################################################################\n# Load the data and split it into separate pieces\ndata = pm.datasets.load_lynx()\ntrain, test = model_selection.train_test_split(data, train_size=90)\n\n# Fit a simple auto_arima model\nmodl = pm.auto_arima(train, start_p=1, start_q=1, start_P=1, start_Q=1,\n max_p=5, max_q=5, max_P=5, max_Q=5, seasonal=True,\n stepwise=True, suppress_warnings=True, D=10, max_D=10,\n error_action='ignore')\n\n# Create predictions for the future, evaluate on test\npreds, conf_int = modl.predict(n_periods=test.shape[0], return_conf_int=True)\n\n# Print the error:\nprint(\"Test RMSE: %.3f\" % np.sqrt(mean_squared_error(test, preds)))\n\n# #############################################################################\n# Plot the points and the forecasts\nx_axis = np.arange(train.shape[0] + preds.shape[0])\nx_years = x_axis + 1821 # Year starts at 1821\n\nplt.plot(x_years[x_axis[:train.shape[0]]], train, alpha=0.75)\nplt.plot(x_years[x_axis[train.shape[0]:]], preds, alpha=0.75) # Forecasts\nplt.scatter(x_years[x_axis[train.shape[0]:]], test,\n alpha=0.4, marker='x') # Test data\nplt.fill_between(x_years[x_axis[-preds.shape[0]:]],\n conf_int[:, 0], conf_int[:, 1],\n alpha=0.1, color='b')\nplt.title(\"Lynx forecasts\")\nplt.xlabel(\"Year\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.9"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Loading

0 comments on commit c7a3bdc

Please sign in to comment.