From d806571066cf4a6b61008ae02ba2fa7ce35ae0ab Mon Sep 17 00:00:00 2001 From: perib Date: Wed, 27 Mar 2024 12:19:06 -0700 Subject: [PATCH 1/6] graph and tree random length initial pipeline --- tpot2/search_spaces/pipelines/graph.py | 30 ++++++++++++++++++++------ 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/tpot2/search_spaces/pipelines/graph.py b/tpot2/search_spaces/pipelines/graph.py index c8a5280f..0ebe7092 100644 --- a/tpot2/search_spaces/pipelines/graph.py +++ b/tpot2/search_spaces/pipelines/graph.py @@ -303,7 +303,7 @@ def _crossover_swap_branch(self, G2, rng=None): pair_gen = select_nodes_randomly(self.graph, G2.graph, rng=rng) for node1, node2 in pair_gen: - #TODO: if root is in inner_config_dict, then do use it? + #TODO: if root is in inner_search_space, then do use it? if node1 is self.root or node2 is G2.root: #dont want to add root as inner node continue @@ -354,7 +354,7 @@ def _crossover_take_branch(self, G2, rng=None): pair_gen = select_nodes_randomly(self.graph, G2.graph, rng=rng) for node1, node2 in pair_gen: - #TODO: if root is in inner_config_dict, then do use it? + #TODO: if root is in inner_search_space, then do use it? if node2 is G2.root: #dont want to add root as inner node continue @@ -365,7 +365,7 @@ def _crossover_take_branch(self, G2, rng=None): #icheck if node2 is graph individual # if isinstance(node2,GraphIndividual): - # if not ((isinstance(node2,GraphIndividual) and ("Recursive" in self.inner_config_dict or "Recursive" in self.leaf_search_space))): + # if not ((isinstance(node2,GraphIndividual) and ("Recursive" in self.inner_search_space or "Recursive" in self.leaf_search_space))): # continue #isolating the branch @@ -624,9 +624,8 @@ class GraphPipeline(SklearnIndividualGenerator): def __init__(self, root_search_space : SklearnIndividualGenerator, leaf_search_space : SklearnIndividualGenerator = None, inner_search_space : SklearnIndividualGenerator =None, - max_size: int = 10, - crossover_same_depth=False, - rng=None) -> None: + max_size: int = np.inf, + crossover_same_depth=False) -> None: """ Generates a directed acyclic graph of variable size. Search spaces for root, leaf, and inner nodes can be defined separately if desired. @@ -642,4 +641,21 @@ def __init__(self, root_search_space : SklearnIndividualGenerator, self.crossover_same_depth = crossover_same_depth def generate(self, rng=None): - return GraphPipelineIndividual(self.search_space, self.leaf_search_space, self.inner_search_space, self.max_size, self.crossover_same_depth, rng=rng) \ No newline at end of file + rng = np.random.default_rng(rng) + ind = GraphPipelineIndividual(self.search_space, self.leaf_search_space, self.inner_search_space, self.max_size, self.crossover_same_depth, rng=rng) + # if user specified limit, grab a random number between that limit + + n_nodes = min(rng.integers(1, self.max_size), 5) + starting_ops = [] + if self.inner_search_space is not None: + starting_ops.append(ind._mutate_insert_inner_node) + if self.leaf_search_space is not None or self.inner_search_space is not None: + starting_ops.append(ind._mutate_insert_leaf) + n_nodes -= 1 + + if len(starting_ops) > 0: + for _ in range(n_nodes-1): + func = rng.choice(starting_ops) + func(rng=rng) + + return ind \ No newline at end of file From 3e2a3c4d6007da7decd78774a8cd21a97b23f13f Mon Sep 17 00:00:00 2001 From: perib Date: Fri, 29 Mar 2024 15:57:44 -0700 Subject: [PATCH 2/6] flatten pipelines to graph, graphpipe params --- Tutorial/2_Search_Spaces.ipynb | 192 +++++++++++++--------- tpot2/graphsklearn.py | 4 +- tpot2/search_spaces/base.py | 104 +++++++++++- tpot2/search_spaces/pipelines/__init__.py | 4 +- tpot2/search_spaces/pipelines/graph.py | 142 +++++++++++++--- 5 files changed, 343 insertions(+), 103 deletions(-) diff --git a/Tutorial/2_Search_Spaces.ipynb b/Tutorial/2_Search_Spaces.ipynb index c4aa8ab2..8e0af2b9 100644 --- a/Tutorial/2_Search_Spaces.ipynb +++ b/Tutorial/2_Search_Spaces.ipynb @@ -31,7 +31,7 @@ "output_type": "stream", "text": [ "sampled hyperparameters\n", - "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 5, 'p': 3, 'weights': 'uniform'}\n" + "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 6, 'p': 3, 'weights': 'distance'}\n" ] } ], @@ -154,9 +154,9 @@ "output_type": "stream", "text": [ "sampled hyperparameters\n", - "{'metric': 'minkowski', 'n_jobs': 1, 'n_neighbors': 6, 'p': 2, 'weights': 'uniform'}\n", + "{'metric': 'minkowski', 'n_jobs': 1, 'n_neighbors': 9, 'p': 2, 'weights': 'distance'}\n", "mutated hyperparameters\n", - "{'metric': 'minkowski', 'n_jobs': 1, 'n_neighbors': 4, 'p': 3, 'weights': 'distance'}\n" + "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 5, 'p': 1, 'weights': 'uniform'}\n" ] } ], @@ -187,14 +187,14 @@ "output_type": "stream", "text": [ "original hyperparameters for individual 1\n", - "{'metric': 'minkowski', 'n_jobs': 1, 'n_neighbors': 7, 'p': 1, 'weights': 'uniform'}\n", + "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 9, 'p': 2, 'weights': 'uniform'}\n", "original hyperparameters for individual 2\n", - "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 5, 'p': 2, 'weights': 'distance'}\n", + "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 4, 'p': 1, 'weights': 'uniform'}\n", "\n", "post crossover hyperparameters for individual 1\n", - "{'metric': 'minkowski', 'n_jobs': 1, 'n_neighbors': 7, 'p': 2, 'weights': 'uniform'}\n", + "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 9, 'p': 2, 'weights': 'uniform'}\n", "post crossover hyperparameters for individual 2\n", - "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 5, 'p': 2, 'weights': 'distance'}\n" + "{'metric': 'euclidean', 'n_jobs': 1, 'n_neighbors': 4, 'p': 1, 'weights': 'uniform'}\n" ] } ], @@ -637,10 +637,10 @@ " /* fitted */\n", " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", - "
KNeighborsClassifier(n_jobs=1, n_neighbors=7)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + "
KNeighborsClassifier(metric='euclidean', n_jobs=1, n_neighbors=9)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ - "KNeighborsClassifier(n_jobs=1, n_neighbors=7)" + "KNeighborsClassifier(metric='euclidean', n_jobs=1, n_neighbors=9)" ] }, "execution_count": 5, @@ -676,7 +676,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 6, @@ -1194,10 +1194,13 @@ " /* fitted */\n", " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", - "
KNeighborsClassifier(metric='euclidean', n_jobs=1, n_neighbors=3)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + "
LogisticRegression(C=99.0450142669678, class_weight='balanced', dual=True,\n",
+       "                   max_iter=1000, n_jobs=1, solver='liblinear')
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ - "KNeighborsClassifier(metric='euclidean', n_jobs=1, n_neighbors=3)" + "LogisticRegression(C=99.0450142669678, class_weight='balanced', dual=True,\n", + " max_iter=1000, n_jobs=1, solver='liblinear')" ] }, "execution_count": 7, @@ -1631,10 +1634,13 @@ " /* fitted */\n", " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", - "
KNeighborsClassifier(metric='euclidean', n_jobs=1, n_neighbors=1, p=1)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + "
KNeighborsClassifier(metric='euclidean', n_jobs=1, n_neighbors=4, p=3,\n",
+       "                     weights='distance')
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ - "KNeighborsClassifier(metric='euclidean', n_jobs=1, n_neighbors=1, p=1)" + "KNeighborsClassifier(metric='euclidean', n_jobs=1, n_neighbors=4, p=3,\n", + " weights='distance')" ] }, "execution_count": 8, @@ -2085,13 +2091,13 @@ " /* fitted */\n", " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", - "
DecisionTreeClassifier(criterion='entropy', max_depth=22, max_features=1.0,\n",
-       "                       min_samples_leaf=16, min_samples_split=20)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + "
DecisionTreeClassifier(criterion='entropy', max_depth=2, max_features='log2',\n",
+       "                       min_samples_leaf=4, min_samples_split=10)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ - "DecisionTreeClassifier(criterion='entropy', max_depth=22, max_features=1.0,\n", - " min_samples_leaf=16, min_samples_split=20)" + "DecisionTreeClassifier(criterion='entropy', max_depth=2, max_features='log2',\n", + " min_samples_leaf=4, min_samples_split=10)" ] }, "execution_count": 9, @@ -2526,10 +2532,13 @@ " /* fitted */\n", " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", - "
KNeighborsClassifier(n_jobs=1, n_neighbors=4)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + "
DecisionTreeClassifier(criterion='entropy', max_depth=25, max_features='log2',\n",
+       "                       min_samples_leaf=6, min_samples_split=13)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ - "KNeighborsClassifier(n_jobs=1, n_neighbors=4)" + "DecisionTreeClassifier(criterion='entropy', max_depth=25, max_features='log2',\n", + " min_samples_leaf=6, min_samples_split=13)" ] }, "execution_count": 10, @@ -2961,13 +2970,10 @@ " /* fitted */\n", " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", - "
MLPClassifier(alpha=0.09935758704160183,\n",
-       "              learning_rate_init=0.004466259151092733)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + "
BernoulliNB(alpha=1.1043626639293316)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ - "MLPClassifier(alpha=0.09935758704160183,\n", - " learning_rate_init=0.004466259151092733)" + "BernoulliNB(alpha=1.1043626639293316)" ] }, "execution_count": 11, @@ -3402,13 +3408,10 @@ " /* fitted */\n", " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", - "
DecisionTreeClassifier(criterion='entropy', max_depth=11, max_features=1.0,\n",
-       "                       min_samples_leaf=12, min_samples_split=8)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + "
SVC(C=0.007250294080496579, degree=2, max_iter=3000, probability=True)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ - "DecisionTreeClassifier(criterion='entropy', max_depth=11, max_features=1.0,\n", - " min_samples_leaf=12, min_samples_split=8)" + "SVC(C=0.007250294080496579, degree=2, max_iter=3000, probability=True)" ] }, "execution_count": 12, @@ -3849,26 +3852,19 @@ " /* fitted */\n", " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", - "
Pipeline(steps=[('selectfwe', SelectFwe(alpha=0.007682074361801758)),\n",
-       "                ('fastica', FastICA(n_components=64)),\n",
-       "                ('randomforestclassifier',\n",
-       "                 RandomForestClassifier(bootstrap=False, criterion='entropy',\n",
-       "                                        min_samples_leaf=10,\n",
-       "                                        min_samples_split=6))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + "
Pipeline(steps=[('selectfwe', SelectFwe(alpha=0.0004402567631974485)),\n",
+       "                ('rbfsampler',\n",
+       "                 RBFSampler(gamma=0.5507862784926447, n_components=4)),\n",
+       "                ('multinomialnb', MultinomialNB(alpha=0.019703201853925403))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ - "Pipeline(steps=[('selectfwe', SelectFwe(alpha=0.007682074361801758)),\n", - " ('fastica', FastICA(n_components=64)),\n", - " ('randomforestclassifier',\n", - " RandomForestClassifier(bootstrap=False, criterion='entropy',\n", - " min_samples_leaf=10,\n", - " min_samples_split=6))])" + "Pipeline(steps=[('selectfwe', SelectFwe(alpha=0.0004402567631974485)),\n", + " ('rbfsampler',\n", + " RBFSampler(gamma=0.5507862784926447, n_components=4)),\n", + " ('multinomialnb', MultinomialNB(alpha=0.019703201853925403))])" ] }, "execution_count": 13, @@ -4308,27 +4304,79 @@ " background-color: var(--sklearn-color-fitted-level-3);\n", "}\n", "
Pipeline(steps=[('selectpercentile',\n",
-       "                 SelectPercentile(percentile=75.04535288452273)),\n",
+       "                 SelectPercentile(percentile=1.0089148758394795)),\n",
        "                ('nystroem',\n",
-       "                 Nystroem(gamma=0.4607961332716787, kernel='laplacian',\n",
-       "                          n_components=90)),\n",
-       "                ('bernoullinb',\n",
-       "                 BernoulliNB(alpha=2.4816194955956314, fit_prior=False))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
SelectPercentile(percentile=1.0089148758394795)
Nystroem(gamma=0.2371171340711561, kernel='cosine', n_components=73)
XGBClassifier(base_score=None, booster=None, callbacks=None,\n",
+       "              colsample_bylevel=None, colsample_bynode=None,\n",
+       "              colsample_bytree=None, device=None, early_stopping_rounds=None,\n",
+       "              enable_categorical=False, eval_metric=None, feature_types=None,\n",
+       "              gamma=None, grow_policy=None, importance_type=None,\n",
+       "              interaction_constraints=None, learning_rate=0.003591562007988768,\n",
+       "              max_bin=None, max_cat_threshold=None, max_cat_to_onehot=None,\n",
+       "              max_delta_step=None, max_depth=8, max_leaves=None,\n",
+       "              min_child_weight=1, missing=nan, monotone_constraints=None,\n",
+       "              multi_strategy=None, n_estimators=100, n_jobs=1,\n",
+       "              num_parallel_tree=None, random_state=None, ...)
" ], "text/plain": [ "Pipeline(steps=[('selectpercentile',\n", - " SelectPercentile(percentile=75.04535288452273)),\n", + " SelectPercentile(percentile=1.0089148758394795)),\n", " ('nystroem',\n", - " Nystroem(gamma=0.4607961332716787, kernel='laplacian',\n", - " n_components=90)),\n", - " ('bernoullinb',\n", - " BernoulliNB(alpha=2.4816194955956314, fit_prior=False))])" + " Nystroem(gamma=0.2371171340711561, kernel='cosine',\n", + " n_components=73)),\n", + " ('xgbclassifier',\n", + " XGBClassifier(base_score=None, booster=None, callbacks=None,\n", + " colsample_bylevel=None, colsample_bynode=None,\n", + " colsample_bytree=None, device=None,\n", + " early_stopping_rounds=None,\n", + " enab...\n", + " feature_types=None, gamma=None, grow_policy=None,\n", + " importance_type=None,\n", + " interaction_constraints=None,\n", + " learning_rate=0.003591562007988768, max_bin=None,\n", + " max_cat_threshold=None, max_cat_to_onehot=None,\n", + " max_delta_step=None, max_depth=8,\n", + " max_leaves=None, min_child_weight=1, missing=nan,\n", + " monotone_constraints=None, multi_strategy=None,\n", + " n_estimators=100, n_jobs=1,\n", + " num_parallel_tree=None, random_state=None, ...))])" ] }, "execution_count": 14, @@ -4359,11 +4407,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Generation: 0%| | 0/5 [00:00
TPOTEstimator(classification=True, generations=5, max_eval_time_seconds=300,\n",
        "              population_size=10, scorers=['roc_auc'], scorers_weights=[1],\n",
-       "              search_space=<tpot2.search_spaces.pipelines.graph.GraphPipeline object at 0x77c026a110c0>,\n",
+       "              search_space=<tpot2.search_spaces.pipelines.graph.GraphPipeline object at 0x7544c5ab8f40>,\n",
        "              verbose=2)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" ], "text/plain": [ "TPOTEstimator(classification=True, generations=5, max_eval_time_seconds=300,\n", " population_size=10, scorers=['roc_auc'], scorers_weights=[1],\n", - " search_space=,\n", + " search_space=,\n", " verbose=2)" ] }, @@ -4845,7 +4889,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "auroc score 0.9876518024288388\n" + "auroc score 0.9501489525273881\n" ] } ], @@ -4865,7 +4909,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnYAAAHWCAYAAAD6oMSKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAABeQ0lEQVR4nO3deVzUdeI/8NcwAzKAIIciiqgRGs3AcIgH3uuReaWZR1aiaVr5tXW11vxZ2fHw51rqul1q5UG7bet32yxWM/155H0gIHOkYoqAIAhyw3DM8fujmvrkESrwnuP1/G9fKLyo1nn5fjMzMqvVagUREREROTw30QWIiIiIqHlw2BERERE5CQ47IiIiIifBYUdERETkJDjsiIiIiJwEhx0RERGRk+CwIyIiInISHHZEREREToLDjoiIiMhJcNgREREROQkOOyIiIiInwWFHRERE5CQ47IiIiIicBIcdERERkZPgsCMiIiJyEhx2RERERE6Cw46IiIjISXDYERERETkJDjsiIiIiJ8FhR0REROQkOOyIiIiInASHHREREZGT4LAjIiIichIcdkREREROgsOOiIiIyElw2BERERE5CQ47IiIiIifBYUdERETkJBSiCxARNSez2YzS0lIUFRWhqKgIxYWFqDcaYTGb4SaXo41SifYdOyI4OBjBwcEICAiAXC4XXZuIqFnIrFarVXQJIqJ7VVZWhszMTOjS01FXUwOryQQfoxF+paVwN5ngZrXCIpOhUaFARUAAqpVKyBQKeHp7IyouDhqNBv7+/qK/DSKie8JhR0QOraCgAMeOHEH2hQtwr61FWG4eQkpL4VdTA3ez+Za/r1EuR4W3N64GBCA3rAsavbzQPSIC/QcOREhISCt+B0REzYfDjogckslkwtGjR5F69Ch8Skpwf04uQktKILdY7vhzmd3ccCUoCD90DUN1UBAS+vdH//79oVDwp1WIyLFw2BGRwyksLMTOlBSUXcnHAxcuICI/H27N8EeZRSbDhc6dcS4iAgGhnTF6/Hh07NixGRoTEbUODjsicig5OTnYvm0bvAquIv7sWfjW1jb716j08kJaZCRqO3XCxKlT0LVr12b/GkRELYHDjogcRk5ODv7z+ecIzMlF7++/h+Iurl2byuTmhpOqB1EaFoZJjz/OcUdEDoGvY0dEDqGwsBDbt21DQE4u+hoMLTrqAEBhsaCf3oCA3Fxs3/a/KCwsbNGvR0TUHDjsiMjumUwm7ExJgVfBVfT5/vtm+Xm6pnCzWtHH8D2UVwvwTUoKTCZTq3xdIqK7xWFHRHbv6NGjKLuSj/izZ1v8pO63FBYL4r8/i9L8fBw7dqxVvzYR0Z3isCMiu1ZQUIDUo0fxwIULLfJEiabwq61Fz6wLOHXkCK5evSqkAxFRU3DYEZFdO3bkCHxKShCRny+0R4/8fPiUlODokSNCexAR3Q6HHRHZrbKyMmRfuID7c3Jb7efqbsXNakV4Ti6ys7JQVlYmtAsR0a1w2BGR3crMzIR7bS1CS0pEVwEAdCkpgaK2FlqtVnQVIqKb4rAjIrtkNpuhS09HWG7eXb1NWEuQWyzompcHbVoazLd5H1oiIlE47IjILpWWlqKupgYhpaWiq0iEXP+xV6md9SIiAgC+wzUR2aWioiJYTSa0q66W5JFHDiPC2xtmqxXhSi+s6tEDSrkchfX1eOvSRZyrqYGfQoHQNp54LTwcQR4eAICXs7KQVVuDL2Nib/t1P8zNxbaiQhjNZpzq2++Gj/vV1MBqMqGoqAjt27dvvm+YiKgZ8MSOiOxSUVERfIzGG163rq1CgZTYOOyMi4e7mwyfF16F1WrFc99/jyH+AdjXKwFfxsTiqU6dUNrYCABosFhwsqIcDRYLcuuMt/26A/z98W9NzC0/7m42w8doRFFR0T1/j0REzY3DjojsUnFhIfx+57qzl68fco11OFZRDi+5GyZ37Gj7WIKfH3p4ewMAjpSVoZevH8a0b49vim//RIzotm3R4adTvlvxLS1DMd9ijIjsEIcdEdmleqMR7rd5Cy+T1YpDZaXo4e2Fi7W1UPn43PLXflNSjIeDgjAmqD2+KSm+524eJhMa6uru+fMQETU3DjsisksWs/mmr11XZTJhfEY6Hj2TgU5tPPFYcMeb/O5f1FssOFVRgQH+/ghTKqGQyXDpHt/Bws1qgZnvG0tEdohPniAiu+Qml8Mik92Q//wzdr8WrvTCnpLrN/0835WWotJkwkNppwEA1WYzvikpxv+Edb3rbhaZG+QK/vFJRPaHJ3ZEZJfaKJVobOJ4SmzXDtVmE7781RMaTldUIKumBt+UFGN1zwdwIKE3DiT0xn9iYvDNPb7gcYNCAQ9Pz3v6HERELYHDjojsUvuOHVERENCkXyuTyfBh5IP4f9evY9jpVIxOT8PfrxbAWy7HifJyDGjXzvZrwzyVkEOGrJqam36udTmXMfDUSVSaTBh46iQ251+54ddUBvijfcfbXwETEYkgs1oFvwEjEdFN6PV6fPPvf2PswUNwt6N3eWiUy7Fj8CCMnjwZarVadB0iIgme2BGRXQoODoZMoUDFTy9ZYi8qvL0hUygQHBwsugoR0Q34079EZJcCAgLg6e2NqwEBCKqsbPbP//rFH5D+m8/7UrfuGOjvf9vfdzXwx14BTbwmJiJqTRx2RGSX5HI5ouLicOb6dTyYmwv5b96B4l69Hn7/Hf8es5sbcrp0QVx8PORyebP2ISJqDryKJSK7pdFo0OjlhStBQbf9dY0mE64VX0PB1auorGr+072f5QUFweTlhejo6Bb7GkRE94LDjojslr+/P7pHROCHrmE3fU07ALBYrSgtLYXJZAJgRXV1NRpb4MWDLTIZLnYNQ/cePeD/O9e1RESicNgRkV3rP3AgqoOCcKFz55t+vLKyEmZzy78LRFbnzqgOCkL/AQNa/GsREd0tDjsismshISFI6N8f5yIiUOnlJflYXX09amulr0fn4dEG7nfxrhAWiwX1DQ242es/VXh54XyPCPQeMAAhISF3/LmJiFoLhx0R2b3+/fvDP7Qz0iIjYXL78Y8ti9WK8vJyya+TydzQ7lcvRtxUxro6FBYV4fr1EhRevYpao9H2MZObG9IejERA585ITEy8l2+DiKjFcdgRkd1TKBQYM348ajt1wknVg7DIZKioqIDFIn3hYl9fXyju4tmqVZWVwE9ndVZYUV5ehpLrJagzmXBS9SCMIZ0wevx4KPj+sERk5zjsiMghdOzYEROnTkFpWBiOPNAT1Q31ko+3aeMJ799c1TbZTZ6YYTSbcSjifmT7+SFxyGB05FuIEZED4LAjIofRtWtXDHv4YWR5e+PMoEGo9fUF8PMVrN9df942Hh6S/13j64szgwYj298fWz77DEOHDsX+/fvvqTsRUWvge8USkcOwWq2YPHkyDh8+jPFjxqCzvz8izp3Dg9eK4ePpedeft7a2FuUV5bDIZCjo0QMXHngA+aWlSPnmG1y7dg0AMGjQIBw8eLC5vhUiohbBHxghIofxr3/9C//5z38AAFs+/RSJiYmw9u+Pqvp6hOfkoktJyd29Q4WHB4q6dkXe/fejxMcHR1NTcezYMZjNv/wMX9u2bZvr2yAiajE8sSMih1BQUAC1Wo2ysjJbFhgYiO+++w7nzp5FdlYWFLW16JqXh5DrpfCrqYG72XzLz9col6PC2xtXAwNwOTQUJQ0NyMrOxtFjx1BYWCj5taGhoThw4ADuv//O34aMiKg18cSOiOye1WrF3LlzJaMOANavXw+1Wm0bfFqtFtq0NFysqYHVZIKP0Qjf0jJ4mExws1pgkbmhQaFAZYA/qpVKyBQKeHp7IzY2FpMnT8b169dv+vVVKhXCw8Nb41slIronPLEjIru3efNmzJ49W5JNnToV//rXv274tWazGaWlpSgqKkJRURGKCwvRUFcHs8kEuUIBD09PtO/YEcHBwQgODkZAQADkcjkSEhJw+vTp23aYNWtWs39vRETNicOOiOxaTk4OoqKiUFVVZcuCg4NhMBgQGBjYbF/n0KFDeOyxx1BeXo5JkyZh3759KC4utn3c19cXOp0OYWFhzfY1iYiaG4cdEdkti8WCkSNHYt++fZI8JSUF48aNa/avZ7VaUVdXB6VSiZSUFDzyyCOSjw8fPhx79uyB7Cave0dEZA/4OnZEZLc2bNhww6ibOXNmi4w6AJDJZFAqlQCA8ePHIykpSfLxvXv3YsOGDS3ytYmImgNP7IjILv3www/QaDSora21ZaGhodDr9fDzu/sXI74T5eXlUKvVyM/Pt2VeXl7QarV8MgUR2SWe2BGR3TGbzZg1a5Zk1AHApk2bWm3UAUC7du2wefNmSVZbW4tZs2ZJXuOOiMhecNgRkd1Zt24djhw5IsmeffZZjBw5stW7jBw5EvPmzZNkhw8fxt/+9rdW70JE9Ht4FUtEduXs2bOIjY1FfX29LevevTu0Wi18fHyEdKqqqoJGo0F2drYta9OmDTIyMhAZGSmkExHRzfDEjojshslkQlJSkmTUyWQybN26VdioA358O7EtW7ZIsvr6eiQlJcFkMglqRUR0Iw47IrIbq1atQmpqqiRbuHAhBg0aJKjRLwYPHoyFCxdKstTUVLz99ttiChER3QSvYonILmRmZiIhIQGNjY22rGfPnsjIyLC9BIloRqMRMTExyMrKsmXu7u5ITU2FRqMR2IyI6Ec8sSMi4RoaGjBjxgzJqHNzc0NycrLdjDoAUCqVSE5OhpvbL390NjY2IikpCQ0NDQKbERH9iMOOiIR78803odVqJdmSJUvQp08fQY1urW/fvvjzn/8syTIzM/HWW28JakRE9AtexRKRUKdOnUJiYqLkdeGioqKQmpqKNm3aCGx2a/X19ejVqxf0er0tk8vlOH78OBISEgQ2IyJXx2FHRMIYjUbExcXh3LlztkyhUCA1NRUxMTHiijVBRkYGevfuLXlWbGRkJNLT0+Hp6SmwGRG5Ml7FEpEwr776qmTUAcBrr71m96MOAGJjY/Hqq69KsrNnz96QERG1Jp7YEZEQhw8fxuDBg/HrP4Li4+Nx/PhxuLu7C2zWdI2NjejXrx/S0tJsmUwmw6FDhzBgwACBzYjIVXHYEVGrq66uhkajwaVLl2xZmzZtkJaWBpVKJbDZnTMYDIiLi5M8KzY8PByZmZnw9vYW2IyIXBGvYomo1S1ZskQy6gDgrbfecrhRBwAqleqGZ8RevHgRS5YsEdSIiFwZT+yIqFXt3bsXI0aMkGSJiYk4dOgQ5HK5oFb3xmw2Y+DAgTh+/Lgk37t3L4YNGyaoFRG5Ig47Imo1FRUViIqKQl5eni1TKpXIzMxERESEwGb37sKFC9BoNDAajbYsLCwMOp0Ovr6+ApsRkSvhVSwRtZpFixZJRh0AvP322w4/6gAgIiICq1atkmS5ublYtGiRoEZE5Ip4YkdErWLHjh0YN26cJBs6dCj27t0reYsuR2axWDB8+HAcOHBAku/YsQNjxowR1IqIXAmHHRG1uOvXr0OtVqOwsNCWtW3bFlqtFt26dRNXrAVcvnwZUVFRqK6utmUhISHQ6/UICAgQ2IyIXIFz/DWZiOzaggULJKMOANauXet0ow4AunXrhrVr10qyq1evYsGCBYIaEZEr4YkdEbWoL774ApMnT5ZkDz/8MHbu3AmZTCaoVcuyWq0YPXo0vv32W0n+xRdfYNKkSYJaEZEr4LAjohZz7do1qFQqlJSU2LJ27drBYDCgU6dOApu1vPz8fKjVapSXl9uyoKAgGAwGdOjQQVwxInJqvIolohZhtVoxb948yagDgPfff9/pRx0AdO7cGe+9954kKykpwXPPPQf+fZqIWgqHHRG1iM8++wxfffWVJJs4cSKmT58uppAATzzxBCZMmCDJvvzyS/zzn/8UU4iInB6vYomo2fEa8hdFRUVQq9UueR1NRK2PJ3ZE1KysVivmzJkjGXUAsGHDBpcbdQAQHByM9evXS7Ly8nLMmTOHV7JE1Ow47IioWW3atOmGZ4NOnz7dpZ8N+thjj+Hxxx+XZLt27cLmzZsFNSIiZ8WrWCJqNnxx3lsrLS2FSqW64UWadTodunbtKrAZETkTntgRUbOwWCx4+umnJaMOAD7++GOXH3UAEBAQgI8//liSVVVV4emnn4bFYhHUioicDYcdETWLDz744Ib3SJ09ezbfI/VXxo4di6efflqS7d+/Hx9++KGgRkTkbHgVS0T37MKFC9BoNDAajbYsLCwMOp0Ovr6+ApvZn4qKCkRFRSEvL8+WKZVKZGZmIiIiQmAzInIGPLEjontiNpuRlJQkGXUAsHnzZo66m/Dz87vhSRNGoxEzZ86E2WwW1IqInAWHHRHdkzVr1uD48eOSbP78+Rg2bJigRvZv+PDheP755yXZsWPHsHbtWkGNiMhZ8CqWiO6awWBAXFwcGhoabFl4eDgyMzPh7e0tsJn9q66uRkxMDC5evGjLPDw8kJ6eDpVKJbAZETkyntgR0V1pbGzEjBkzJKNOJpMhOTmZo64JfHx8sHXrVshkMlvW0NCApKQkNDY2CmxGRI6Mw46I7srKlSuRnp4uyRYvXoz+/fsLauR4BgwYgEWLFkmytLQ0/OUvfxHUiIgcHa9iieiOpaeno0+fPjCZTLYsMjIS6enp8PT0FNjM8RiNRsTFxeHcuXO2TKFQ4NSpU4iNjRXYjIgcEU/siOiO1NfXIykpSTLq5HI5kpOTOeruglKpRHJyMuRyuS0zmUxISkpCfX29wGZE5Ig47Ijojrz++uvQ6/WSbOnSpUhISBDUyPH17t0bL7/8siTT6XR44403BDUiIkfFq1giarITJ06gf//+krfA0mg0OHXqFDw8PAQ2c3wNDQ1ISEiAVqu1ZW5ubjh27Bj69OkjsBkRORIOOyJqktraWsTGxiIrK8uWubu74/Tp04iOjhbYzHlkZmYiISFB8qzYnj17IiMjA0qlUmAzInIUvIoloiZZtmyZZNQBP17LctQ1H41Gg+XLl0uy8+fPY9myZYIaEZGj4YkdEf2ugwcPYsiQIZKsd+/eOHr0KBQKhZhSTspkMiExMRGpqam2TCaT4bvvvsOgQYMENiMiR8BhR0S3VVVVBY1Gg+zsbFvm6emJjIwMPPDAAwKbOa+zZ88iNjZW8qzY7t27Q6vVwsfHR2AzIrJ3vIolott66aWXJKMOAFasWMFR14IiIyOxYsUKSZadnY2XXnpJUCMichQ8sSOiW9q9ezdGjRolyQYOHIgDBw5IXneNmp/ZbMaQIUNw5MgRSb57926MHDlSUCsisnccdkR0U+Xl5VCr1cjPz7dlXl5e0Gq1CA8PF9jMdVy8eBHR0dGora21ZaGhodDpdGjXrp24YkRkt3gVS0Q3tXDhQsmoA4DVq1dz1LWi8PBwvPPOO5LsypUr+NOf/iSoERHZO57YEdENUlJS8Mgjj0iy4cOHY8+ePZDJZIJauSaLxYKRI0di3759kvzrr7/G+PHjBbUiInvFYUdEEiUlJVCr1SgqKrJlvr6+0Ol0CAsLE9jMdeXm5kKtVqOqqsqWBQcHw2AwIDAwUGAzIrI3vIolIon58+dLRh0ArFu3jqNOoLCwMKxbt06SFRUVYf78+WIKEZHd4okdEdls27YN06ZNk2Rjx45FSkoKr2AFs1qtGDduHHbu3CnJt23bhilTpghqRUT2hsOOiAAAhYWFUKlUKC0ttWX+/v4wGAwICQkR2Ix+dvXqVahUKpSVldmywMBAGAwGBAcHC2xGRPaCV7FEBKvVinnz5klGHQB8+OGHHHV2JCQkBB988IEku379OubOnQv+HZ2IAA47IgLw6aefIiUlRZI99thjmDp1qqBGdCvTpk3DpEmTJFlKSgr+/ve/C2pERPaEV7FELi4vLw9RUVGoqKiwZR06dIBer0f79u0FNqNbKS4uhkqlQnFxsS3z8/ODXq9HaGiowGZEJBpP7IhcmNVqxZw5cySjDgA2btzIUWfH2rdvj40bN0qyiooKzJ49m1eyRC6Ow47IhX300UfYs2ePJHvqqacwYcIEMYWoySZOnIgnn3xSku3Zswcff/yxoEZEZA94FUvkoi5duoTo6GjU1NTYsk6dOkGv18Pf319gM2qqsrIyqNVqFBQU2DJvb2/odDp0795dYDMiEoUndkQuyGKxYNasWZJRBwCbNm3iqHMg/v7++OSTTyRZTU0NZs2aBYvFIqgVEYnEYUfkgt59910cOnRIkj3zzDMYNWqUoEZ0tx5++GHMmTNHkh08eBDvvfeeoEZEJBKvYolczPnz5xETE4O6ujpb1q1bN2i1WrRt21ZgM7pblZWViI6ORk5Oji3z9PTEmTNn0LNnT4HNiKi18cSOyIWYTCYkJSVJRh0AbN68maPOgfn6+mLLli2SrK6uDjNnzoTZbBbUiohE4LAjciGrV6/GyZMnJdkLL7yAoUOHCmpEzWXo0KFYsGCBJDtx4gRWr14tqBERicCrWCIXodPpEB8fj8bGRlsWERGBM2fOwMvLS2Azai41NTWIiYnBDz/8YMs8PDyQlpYGtVotsBkRtRae2BG5gIaGBiQlJUlGnZubG7Zu3cpR50S8vb2RnJwMN7df/mhvaGjAjBkzJP/uich5cdgRuYAVK1YgIyNDkr344otITEwU1IhaSmJiIhYvXizJMjIysGLFCkGNiKg18SqWyMmlpaWhT58+kh+iV6lUOH36NDw9PQU2o5ZSV1eH+Ph4fP/997ZMoVDgxIkTiI+PF9iMiFoahx2RE7vZA7xcLsfJkyf5AO/kbjXo09LS0KZNG4HNiKgl8SqWyIktX75cMuoA4JVXXuGocwHx8fFYtmyZJDMYDFi+fLmgRkTUGnhiR+Skjh07hgEDBuDX/xePjY3FyZMn4e7uLrAZtZaGhgb06dMHZ86csWVubm44cuQI+vXrJ64YEbUYDjsiJ3Srl704ffo0oqKiBDaj1saXuSFyLbyKJXJCS5culYw6AHjjjTc46lxQVFQU3njjDUl24cIFLF26VFAjImpJPLEjcjIHDhzAH/7wB0nWt29fHD58GAqFQlArEslkMmHAgAE3vOvI/v37+a4jRE6Gw47IidzszeCVSiXOnDmDHj16CGxGop0/fx4xMTGS9wnu1q0btFot3yeYyInwKpbIiSxevFgy6gBg5cqVHHWEnj17YuXKlZLs8uXLePHFFwU1IqKWwBM7Iiexa9cujB49WpINHjwY+/fvl7zFFLkui8WCP/zhDzh48KAk37VrF0aNGiWoFRE1Jw47IidQVlYGtVqNgoICW+bj4wOtVovu3bsLbEb25tKlS4iOjkZNTY0t69SpE/R6Pfz9/QU2I6LmwL/GEzmBF154QTLqAGDNmjUcdXSD++67D2vWrJFkBQUF+OMf/yioERE1J57YETm47du349FHH5VkDz30EHbt2gWZTCaoFdkzq9WKUaNGYc+ePZJ8+/btmDBhgphSRNQsOOyIHFhxcTFUKhWKi4ttmZ+fH/R6PUJDQwU2I3uXl5eHqKgoVFRU2LIOHTpAr9ejffv2ApsR0b3gVSyRg7JarXjuueckow4A3n33XY46+l1dunTB3/72N0l27do1PP/88+Df94kcF0/siBzU559/junTp0uyRx55BNu3b+cVLDWJ1WrFhAkTkJKSIsk///xzTJs2TVArIroXHHZEDqigoABqtRplZWW2LDAwEAaDAcHBwQKbkaMpLCyESqVCaWmpLfP394fBYEBISIjAZkR0N3gVS+RgrFYr5s6dKxl1ALB+/XqOOrpjHTt2xPr16yVZWVkZ5s6dyytZIgfEYUfkYLZs2YKdO3dKsqlTp2Ly5MmCGpGjmzJlCqZMmSLJduzYga1bt4opRER3jVexRA4kJycHUVFRqKqqsmXBwcEwGAwIDAwU2IwcXUlJCdRqNYqKimyZr68vdDodwsLCBDYjojvBEzsiB2GxWDB79mzJqAOAjz/+mKOO7llQUBA++ugjSVZZWYnZs2fzSpbIgXDYETmIDRs2YN++fZJs5syZGDdunKBG5GzGjx+PpKQkSbZ3715s2LBBUCMiulO8iiVyAD/88AM0Gg1qa2ttWWhoKPR6Pfz8/AQ2I2dTXl4OtVqN/Px8W+bl5QWtVovw8HCBzYioKXhiR2TnzGYzZs2aJRl1ALBp0yaOOmp27dq1w+bNmyVZbW0tZs2aBbPZLKgVETUVhx2RnVu3bh2OHDkiyZ599lmMHDlSUCNydiNHjsS8efMk2eHDh294pwoisj+8iiWyY2fPnkVsbCzq6+ttWffu3aHVauHj4yOwGTm7qqoqaDQaZGdn27I2bdogIyMDkZGRApsR0e3wxI7ITplMJiQlJUlGnUwmw9atWznqqMW1bdsWW7ZskWT19fVISkqCyWQS1IqIfg+HHZGdWrVqFVJTUyXZwoULMWjQIEGNyNUMHjwYCxculGSpqal4++23xRQiot/Fq1giO5SZmYmEhAQ0Njbasp49eyIjIwNKpVJgM3I1RqMRMTExyMrKsmXu7u5ITU2FRqMR2IyIboYndkR2pqGhATNmzJCMOjc3NyQnJ3PUUatTKpVITk6Gm9svDxeNjY1ISkpCQ0ODwGZEdDMcdkR25s0334RWq5VkS5YsQZ8+fQQ1IlfXt29f/PnPf5ZkmZmZeOuttwQ1IqJb4VUskR05deoUEhMTJa8XFhUVhdTUVLRp00ZgM3J19fX16NWrF/R6vS2Ty+U4fvw4EhISBDYjol/jsCOyE0ajEXFxcTh37pwtUygUSE1NRUxMjLhiRD/JyMhA7969Jc+KjYyMRHp6Ojw9PQU2I6Kf8SqWyE68+uqrklEHAK+99hpHHdmN2NhYvPrqq5Ls7NmzN2REJA5P7IjswOHDhzF48GD8+v+O8fHxOH78ONzd3QU2I5JqbGxEv379kJaWZstkMhkOHTqEAQMGCGxGRACHHZFw1dXV0Gg0uHTpki1r06YN0tLSoFKpBDYjujmDwYC4uDjJs2LDw8ORmZkJb29vgc2IiFexRIItWbJEMuoA4K233uKoI7ulUqlueEbsxYsXsWTJEkGNiOhnPLEjEmjv3r0YMWKEJEtMTMShQ4cgl8sFtSL6fWazGQMHDsTx48cl+d69ezFs2DBBrYiIw45IkIqKCkRFRSEvL8+WKZVKZGZmIiIiQmAzoqa5cOECNBoNjEajLQsLC4NOp4Ovr6/AZkSui1exRIIsWrRIMuoA4O233+aoI4cRERGBVatWSbLc3FwsWrRIUCMi4okdkQA7duzAuHHjJNnQoUOxd+9eyVs3Edk7i8WC4cOH48CBA5J8x44dGDNmjKBWRK6Lw46olV2/fh1qtRqFhYW2rG3bttBqtejWrZu4YkR36fLly4iKikJ1dbUtCwkJgV6vR0BAgMBmRK6HRwNErWzBggWSUQcAa9eu5agjh9WtWzesXbtWkl29ehULFiwQ1IjIdfHEjqgVffHFF5g8ebIke/jhh7Fz507IZDJBrYjundVqxejRo/Htt99K8i+++AKTJk0S1IrI9XDYEbWSa9euQaVSoaSkxJa1a9cOBoMBnTp1EtiMqHnk5+dDrVajvLzclgUFBcFgMKBDhw7iihG5EF7FErUCq9WKefPmSUYdALz//vscdeQ0OnfujPfee0+SlZSU4LnnngPPEIhaB4cdUSv47LPP8NVXX0myiRMnYvr06WIKEbWQJ554AhMmTJBkX375Jf75z3+KKUTkYngVS9TCeD1FrqaoqAhqtZo/dkAkAE/siFqQ1WrFnDlzJKMOADZs2MBRR04rODgY69evl2Tl5eWYM2cOr2SJWhiHHVEL2rRp0w3PEpw+fTqfJUhO77HHHsPjjz8uyXbt2oXNmzcLakTkGngVS9RC+KKt5OpKS0uhUqlueDFunU6Hrl27CmxG5Lx4YkfUAiwWC55++mnJqAOATz75hKOOXEZAQAA+/vhjSVZVVYWnn34aFotFUCsi58ZhR9QCPvjggxveO3P27NkYPXq0oEZEYowdOxazZs2SZPv378eHH34oqBGRc+NVLFEzy8rKQkxMDIxGoy0LCwuDTqeDr6+vwGZEYlRUVCAqKgp5eXm2zMvLC2fOnEFERITAZkTOhyd2RM3IbDZj5syZklEHAJs3b+aoI5fl5+d3w5MmamtrMXPmTJjNZkGtiJwThx1RM1qzZg2OHz8uyebPn49hw4YJakRkH4YPH47nn39ekh07dgx//etfBTUick68iiVqJgaDAXFxcWhoaLBl4eHhyMzMhLe3t8BmRPahuroaMTExuHjxoi3z8PBAeno6VCqVwGZEzoMndkTNoLGxETNmzJCMOplMhuTkZI46op/4+Phg69atkMlktqyhoQFJSUlobGwU2IzIeXDYETWDlStXIj09XZItXrwY/fv3F9SIyD4NGDAAixYtkmRpaWn4y1/+IqgRkXPhVSzRPUpPT0efPn1gMplsWWRkJNLT0+Hp6SmwGZF9MhqNiIuLw7lz52yZQqHAqVOnEBsbK7AZkePjiR3RPaivr0dSUpJk1MnlciQnJ3PUEd2CUqlEcnIy5HK5LTOZTEhKSkJ9fb3AZkSOj8OO6B68/vrr0Ov1kmzp0qVISEgQ1IjIMfTu3Rsvv/yyJNPpdHjjjTcENSJyDryKJbpLJ06cQP/+/SVvjaTRaHDq1Cl4eHgIbEbkGBoaGpCQkACtVmvL3NzccOzYMfTp00dgMyLHxWFHdBdqa2sRGxuLrKwsW+bu7o7Tp08jOjpaYDMix5KZmYmEhATJs2J79uyJjIwMKJVKgc2IHBOvYonuwrJlyySjDvjxWpajjujOaDQaLF++XJKdP38ey5YtE9SIyLHxxI7oDh08eBBDhgyRZL1798bRo0ehUCjElCJyYCaTCYmJiUhNTbVlMpkM3333HQYNGiSwGZHj4bAjugNVVVXQaDTIzs62ZZ6ensjIyMADDzwgsBmRYzt79ixiY2Mlz4rt3r07tFotfHx8BDYjciy8iiW6Ay+99JJk1AHAihUrOOqI7lFkZCRWrFghybKzs/HSSy8JakTkmHhiR9REu3fvxqhRoyTZwIEDceDAAcnrcRHR3TGbzRgyZAiOHDkiyXfv3o2RI0cKakXkWDjsiJqgvLwcarUa+fn5tszLywtarRbh4eECmxE5l4sXLyI6Ohq1tbW2LDQ0FDqdDu3atRNXjMhB8CqWqAkWLlwoGXUAsHr1ao46omYWHh6Od955R5JduXIFf/rTnwQ1InIsPLEj+h0pKSl45JFHJNnw4cOxZ88eyGQyQa2InJfFYsHIkSOxb98+Sf71119j/PjxgloROQYOO6LbKCkpgVqtRlFRkS3z9fWFTqdDWFiYwGZEzi03NxdqtRpVVVW2LDg4GAaDAYGBgQKbEdk3XsUS3cb8+fMlow4A1q1bx1FH1MLCwsKwbt06SVZUVIT58+eLKUTkIHhiR3QL27Ztw7Rp0yTZ2LFjkZKSwitYolZgtVoxbtw47Ny5U5Jv27YNU6ZMEdSKyL5x2BHdRGFhIVQqFUpLS22Zv78/DAYDQkJCBDYjci1Xr16FSqVCWVmZLQsMDITBYEBwcLDAZkT2iVexRL9htVoxb948yagDgA8//JCjjqiVhYSE4IMPPpBk169fx9y5c8FzCaIbcdgR/cann36KlJQUSfbYY49h6tSpghoRubZp06Zh0qRJkiwlJQV///vfBTUisl+8iiX6lby8PERFRaGiosKWdejQAXq9Hu3btxfYjMi1FRcXQ6VSobi42Jb5+flBr9cjNDRUYDMi+8ITO6KfWK1WzJkzRzLqAGDjxo0cdUSCtW/fHhs3bpRkFRUVmD17Nq9kiX6Fw47oJx999BH27NkjyZ566ilMmDBBTCEikpg4cSKefPJJSbZnzx58/PHHghoR2R9exRIBuHTpEqKjo1FTU2PLOnXqBL1eD39/f4HNiOjXysrKoFarUVBQYMu8vb2h0+nQvXt3gc2I7ANP7MjlWSwWzJo1SzLqAGDTpk0cdUR2xt/fH5988okkq6mpwaxZs2CxWAS1IrIfHHbk8t59910cOnRIkj3zzDMYNWqUoEZEdDsPP/ww5syZI8kOHjyI9957T1AjIvvBq1hyaefPn0dMTAzq6upsWbdu3aDVatG2bVuBzYjodiorKxEdHY2cnBxb5unpiTNnzqBnz54CmxGJxRM7clkmkwlJSUmSUQcAmzdv5qgjsnO+vr7YsmWLJKurq8PMmTNhNpsFtSISj8OOXNbq1atx8uRJSfbCCy9g6NChghoR0Z0YOnQoFixYIMlOnDiB1atXC2pEJB6vYskl6XQ6xMfHo7Gx0ZZFRETgzJkz8PLyEtiMiO5ETU0NYmJi8MMPP9gyDw8PpKWlQa1WC2xGJAZP7MjlNDQ0ICkpSTLq3NzcsHXrVo46Igfj7e2N5ORkuLn98nDW0NCAGTNmSP4/TuQqOOzI5axYsQIZGRmS7MUXX0RiYqKgRkR0LxITE7F48WJJlpGRgRUrVghqRCQOr2LJpaSlpaFPnz6SH65WqVQ4ffo0PD09BTYjontRV1eH+Ph4fP/997ZMoVDgxIkTiI+PF9iMqHVx2JHLuNkf/HK5HCdPnuQf/ERO4FZ/cUtLS0ObNm0ENiNqPbyKJZexfPlyyagDgFdeeYWjjshJxMfHY9myZZLMYDBg+fLlghoRtT6e2JFLOHbsGAYMGIBf/+ceGxuLkydPwt3dXWAzImpODQ0N6NOnD86cOWPL3NzccPjwYf4cLbkEDjtyerd6OYTTp08jKipKYDMiagl8OSNyZbyKJae3dOlSyagDgDfffJOjjshJRUVF4Y033pBkFy5cwNKlSwU1Imo9PLEjp7Z//34MGzZMkvXt2xdHjhyBXC4X1IqIWprJZMKAAQNueHeZ/fv3891lyKlx2JHTutmbhCuVSpw5cwY9evQQ2IyIWsP58+cRExMjeT/obt26QavV8v2gyWnxKpac1uLFiyWjDgBWrlzJUUfkInr27ImVK1dKssuXL+PFF18U1Iio5fHEjpzSrl27MHr0aEk2ePBg7N+/X/LWQ0Tk3CwWC/7whz/g4MGDknzXrl0YNWqUoFZELYfDjpxOWVkZ1Go1CgoKbJmPjw+0Wi26d+8usBkRiXDp0iVER0ejpqbGlnXu3Bk6nQ7+/v4CmxE1Px5dkNN54YUXJKMOANasWcNRR+Si7rvvPqxZs0aS5efn449//KOgRkQthyd25FS2b9+ORx99VJI99NBD2LVrF2QymaBWRCSa1WrFqFGjsGfPHkm+fft2TJgwQUwpohbAYUdOo7i4GCqVCsXFxbbMz88Per0eoaGhApsRkT3Iy8tDVFQUKioqbFmHDh2g1+vRvn17gc2Img+vYskpWK1WPPfcc5JRBwDvvvsuRx0RAQC6dOmCv/3tb5Ls2rVreP7558EzDnIWPLEjp/D5559j+vTpkuyRRx7B9u3beQVLRDZWqxUTJkxASkqKJP/8888xbdo0Qa2Img+HHTm8goICqNVqlJWV2bLAwEAYDAYEBwcLbEZE9qiwsBAqlQqlpaW2zN/fHwaDASEhIQKbEd07XsWSQ7NarZg7d65k1AHA+vXrOeqI6KY6duyI9evXS7KysjLMnTuXV7Lk8DjsyKFt2bIFO3fulGRTp07F5MmTBTUiIkcwZcoUTJkyRZLt2LEDW7duFVOIqJnwKpYcVk5ODqKiolBVVWXLgoODYTAYEBgYKLAZETmCkpISqNVqFBUV2TJfX1/odDqEhYUJbEZ093hiRw7JYrFg9uzZklEHAB9//DFHHRE1SVBQED766CNJVllZidmzZ/NKlhwWhx05pA0bNmDfvn2SbObMmRg3bpygRkTkiMaPH4+kpCRJtnfvXmzYsEFQI6J7w6tYcjg//PADNBoNamtrbVloaCj0ej38/PwENiMiR1ReXg61Wo38/Hxb5uXlBa1Wi/DwcIHNiO4cT+zIoZjNZsyaNUsy6gBg06ZNHHVEdFfatWuHzZs3S7La2lrMmjULZrNZUCuiu8NhRw5l3bp1OHLkiCR79tlnMXLkSEGNiMgZjBw5EvPmzZNkhw8fvuGdKojsHa9iyWGcPXsWsbGxqK+vt2Xdu3eHVquFj4+PwGZE5Ayqqqqg0WiQnZ1ty9q0aYOMjAxERkYKbEbUdDyxI4dgMpmQlJQkGXUymQxbt27lqCOiZtG2bVts2bJFktXX1yMpKQkmk0lQK6I7w2FHDmHVqlVITU2VZAsXLsSgQYMENSIiZzR48GAsXLhQkqWmpuLtt98WU4joDvEqluxeZmYmEhIS0NjYaMt69uyJjIwMKJVKgc2IyBkZjUbExMQgKyvLlrm7uyM1NRUajUZgM6LfxxM7smsNDQ2YMWOGZNS5ubkhOTmZo46IWoRSqURycjLc3H55iGxsbERSUhIaGhoENiP6fRx2ZNfefPNNaLVaSbZkyRL06dNHUCMicgV9+/bFn//8Z0mWmZmJt956S1AjoqbhVSzZrVOnTiExMVHyOlJRUVFITU1FmzZtBDYjIldQX1+PXr16Qa/X2zK5XI7jx48jISFBYDOiW+OwI7tkNBoRFxeHc+fO2TKFQoHU1FTExMSIK0ZELiUjIwO9e/eWPCs2MjIS6enp8PT0FNiM6OZ4FUt26dVXX5WMOgB47bXXOOqIqFXFxsbi1VdflWRnz569ISOyFzyxI7tz+PBhDB48GL/+TzM+Ph7Hjx+Hu7u7wGZE5IoaGxvRr18/pKWl2TKZTIZDhw5hwIABApsR3YjDjuxKdXU1NBoNLl26ZMvatGmDtLQ0qFQqgc2IyJUZDAbExcVJnhUbHh6OzMxMeHt7C2xGJMWrWLIrS5YskYw6AHjrrbc46ohIKJVKdcMzYi9evIglS5YIakR0czyxI7uxd+9ejBgxQpIlJibi0KFDkMvlgloREf3IbDZj4MCBOH78uCTfu3cvhg0bJqgVkRSHHdmFiooKREVFIS8vz5YplUpkZmYiIiJCYDMiol9cuHABGo0GRqPRloWFhUGn08HX11dgM6If8SqW7MKiRYskow4A3n77bY46IrIrERERWLVqlSTLzc3FokWLBDUikuKJHQm3Y8cOjBs3TpINHToUe/fulbylDxGRPbBYLBg+fDgOHDggyXfs2IExY8YIakX0Iw47Eur69etQq9UoLCy0ZW3btoVWq0W3bt3EFSMiuo3Lly8jKioK1dXVtiwkJAR6vR4BAQECm5Gr43EICbVgwQLJqAOAtWvXctQRkV3r1q0b1q5dK8muXr2KBQsWCGpE9COe2JEwX3zxBSZPnizJHn74YezcuRMymUxQKyKiprFarRg9ejS+/fZbSf7FF19g0qRJglqRq+OwIyGuXbsGlUqFkpISW9auXTsYDAZ06tRJYDMioqbLz8+HWq1GeXm5LQsKCoLBYECHDh3EFSOXxatYanVWqxXz5s2TjDoAeP/99znqiMihdO7cGe+9954kKykpwXPPPQeem5AIHHbU6j777DN89dVXkmzixImYPn26mEJERPfgiSeewIQJEyTZl19+iX/+859iCpFL41UstSpeWxCRMyoqKoJareaPl5BwPLGjVmO1WjFnzhzJqAOAjRs3ctQRkUMLDg7G+vXrJVl5eTnmzJnDK1lqVRx21Go2bdp0w7PHpk+fjkcffVRQIyKi5vPYY4/h8ccfl2S7du3C5s2bBTUiV8SrWGoVfDFPInIFpaWlUKlUN7zouk6nQ9euXQU2I1fBEztqcRaLBbNmzZKMOgD45JNPOOqIyKkEBATg448/lmRVVVV4+umnYbFYBLUiV8JhRy3ugw8+wHfffSfJZs+ejdGjR4spRETUgsaOHYunn35aku3fvx8ffvihoEbkSngVSy0qKysLMTExMBqNtiwsLAw6nQ6+vr4CmxERtZyKigpERUUhLy/PlimVSmRmZiIiIkJgM3J2PLGjFmM2mzFz5kzJqAOAzZs3c9QRkVPz8/O74UkTRqMRM2fOhNlsFtSKXAGHHbWYNWvW4Pjx45Js/vz5GDZsmKBGREStZ/jw4Xj++ecl2bFjx7B27VpBjcgV8CqWWoTBYEBcXBwaGhpsWXh4ODIzM+Ht7S2wGRFR66murkZMTAwuXrxoyzw8PJCeng6VSiWwGTkrnthRs2tsbMSMGTMko04mkyE5OZmjjohcio+PD7Zu3QqZTGbLGhoakJSUhMbGRoHNyFlx2FGzW7lyJdLT0yXZ4sWL0b9/f0GNiIjEGTBgABYtWiTJ0tLS8Je//EVQI3JmvIqlZpWeno4+ffrAZDLZssjISKSnp8PT01NgMyIicYxGI+Li4nDu3DlbplAocOrUKcTGxgpsRs6GJ3bUbOrr65GUlCQZdXK5HMnJyRx1ROTSlEolkpOTIZfLbZnJZEJSUhLq6+sFNiNnw2FHzeb111+HXq+XZEuXLkVCQoKgRkRE9qN37954+eWXJZlOp8Mbb7whqBE5I17FUrM4ceIE+vfvL3nLHI1Gg1OnTsHDw0NgMyIi+9HQ0ICEhARotVpb5ubmhmPHjqFPnz4Cm5Gz4LCje1ZbW4vY2FhkZWXZMnd3d5w+fRrR0dECmxER2Z/MzEwkJCRInhXbs2dPZGRkQKlUCmxGzoBXsXTPli1bJhl1wI/Xshx1REQ30mg0WL58uSQ7f/48li1bJqgROROe2NE9OXjwIIYMGSLJevfujaNHj0KhUIgpRURk50wmExITE5GammrLZDIZvvvuOwwaNEhgM3J0HHZ016qqqqDRaJCdnW3LPD09kZGRgQceeEBgMyIi+3f27FnExsZKnhXbvXt3aLVa+Pj4CGxGjoxXsXTXXnrpJcmoA4AVK1Zw1BERNUFkZCRWrFghybKzs/HSSy8JakTOgCd2dFd2796NUaNGSbKBAwfiwIEDktdpIiKiWzObzRgyZAiOHDkiyXfv3o2RI0cKakWOjMOO7lh5eTnUajXy8/NtmZeXF7RaLcLDwwU2IyJyPBcvXkR0dDRqa2ttWWhoKHQ6Hdq1ayeuGDkkXsXSHVu4cKFk1AHA6tWrOeqIiO5CeHg43nnnHUl25coV/OlPfxLUiBwZT+zojqSkpOCRRx6RZMOHD8eePXsgk8kEtSIicmwWiwUjR47Evn37JPnXX3+N8ePHC2pFjojDjpqspKQEarUaRUVFtszX1xc6nQ5hYWECmxEROb7c3Fyo1WpUVVXZsuDgYBgMBgQGBgpsRo6EV7HUZPPnz5eMOgBYt24dRx0RUTMICwvDunXrJFlRURHmz58vphA5JJ7YUZNs27YN06ZNk2Rjx45FSkoKr2CJiJqJ1WrFuHHjsHPnTkm+bds2TJkyRVArciQcdvS7CgsLoVKpUFpaasv8/f1hMBgQEhIisBkRkfO5evUqVCoVysrKbFlgYCAMBgOCg4MFNiNHwKtYui2r1Yp58+ZJRh0AfPjhhxx1REQtICQkBB988IEku379OubOnQuexdDv4bCj2/r000+RkpIiyR577DFMnTpVUCMiIuc3bdo0TJo0SZKlpKTg73//u6BG5Ch4FUu3lJeXh6ioKFRUVNiyDh06QK/Xo3379gKbERE5v+LiYqhUKhQXF9syPz8/6PV6hIaGCmxG9owndnRTVqsVc+bMkYw6ANi4cSNHHRFRK2jfvj02btwoySoqKjB79mxeydItcdjRTX300UfYs2ePJHvqqacwYcIEMYWIiFzQxIkT8eSTT0qyPXv24OOPPxbUiOwdr2LpBpcuXUJ0dDRqampsWadOnaDX6+Hv7y+wGRGR6ykrK4NarUZBQYEt8/b2hk6nQ/fu3QU2I3vEEzuSsFgsmDVrlmTUAcCmTZs46oiIBPD398cnn3wiyWpqajBr1ixYLBZBrchecdiRxLvvvotDhw5JsmeeeQajRo0S1IiIiB5++GHMmTNHkh08eBDvvfeeoEZkr3gVSzbnz59HTEwM6urqbFm3bt2g1WrRtm1bgc2IiKiyshLR0dHIycmxZZ6enjhz5gx69uwpsBnZE57YEQDAZDIhKSlJMuoAYPPmzRx1RER2wNfXF1u2bJFkdXV1mDlzJsxms6BWZG847AgAsHr1apw8eVKSvfDCCxg6dKigRkRE9FtDhw7FggULJNmJEyewevVqQY3I3vAqlqDT6RAfH4/GxkZbFhERgTNnzsDLy0tgMyIi+q2amhrExMTghx9+sGUeHh5IS0uDWq0W2IzsAU/sXFxDQwOSkpIko87NzQ1bt27lqCMiskPe3t5ITk6Gm9svD+ENDQ2YMWOG5M9yck0cdi5uxYoVyMjIkGQvvvgiEhMTBTUiIqLfk5iYiMWLF0uyjIwMrFixQlAjshe8inVhaWlp6NOnj+SHblUqFU6fPg1PT0+BzYiI6PfU1dUhPj4e33//vS1TKBQ4ceIE4uPjBTYjkTjsXNTN/kCQy+U4efIk/0AgInIQt/oLelpaGtq0aSOwGYnCq1gXtXz5csmoA4BXXnmFo46IyIHEx8dj2bJlksxgMGD58uWCGpFoPLFzQceOHcOAAQPw63/1sbGxOHnyJNzd3QU2IyKiO9XQ0IC+fftKfl7azc0NR44cQb9+/QQ2IxE47FwMnyZPROR8+LJV9DNexbqYpUuXSkYdALz55pscdUREDiwqKgpvvvmmJLtw4QKWLl0qqBGJwhM7F7J//34MGzZMkvXt2xdHjhyBXC4X1IqIiJqDyWTCgAEDbngXof379/NdhFwIh52LuNmbRyuVSpw5cwY9evQQ2IyIiJrL+fPnERMTI3nf765du0Kn0/F9v10Er2JdxOLFiyWjDgBWrlzJUUdE5ER69uyJlStXSrKcnJwbXsyYnBdP7FzArl27MHr0aEk2ePBg7N+/X/KWNERE5PgsFgv+8Ic/4ODBg5J8165dGDVqlKBW1Fo47JxcWVkZ1Go1CgoKbJmPjw+0Wi26d+8usBkREbWUS5cuITo6GjU1NbasU6dO0Ov18Pf3F9iMWhqPa5zcCy+8IBl1ALBmzRqOOiIiJ3bfffdhzZo1kqygoAB//OMfBTWi1sITOye2fft2PProo5LsoYcewq5duyCTyQS1IiKi1mC1WjFq1Cjs2bNHkm/fvh0TJkwQU4paHIedkyouLoZKpUJxcbEt8/Pzg16vR2hoqMBmRETUWvLy8hAVFYWKigpb1qFDB+j1erRv315gM2opvIp1QlarFc8995xk1AHAu+++y1FHRORCunTpgr/97W+S7Nq1a3j++efBcx3nxBM7J/T5559j+vTpkuyRRx7B9u3beQVLRORirFYrJkyYgJSUFEn++eefY9q0aYJaUUvhsHMyBQUFUKvVKCsrs2WBgYEwGAwIDg4W2IyIiEQpLCyESqVCaWmpLfP394fBYEBISIjAZtTceBXrRKxWK+bOnSsZdQCwfv16jjoiIhfWsWNHrF+/XpKVlZVh7ty5vJJ1Mhx2TmTLli3YuXOnJJs6dSomT54sqBEREdmLKVOmYMqUKZJsx44d2Lp1q5hC1CJ4FeskcnJyEBUVhaqqKlsWHBwMg8GAwMBAgc2IiMhelJSUQK1Wo6ioyJb5+vpCp9MhLCxMYDNqLjyxcwIWiwWzZ8+WjDoA+PjjjznqiIjIJigoCB999JEkq6ysxOzZs3kl6yQ47JzAhg0bsG/fPkk2c+ZMjBs3TlAjIiKyV+PHj0dSUpIk27t3LzZs2CCoETUnXsU6uB9++AEajQa1tbW2LDQ0FHq9Hn5+fgKbERGRvSovL4darUZ+fr4t8/LyglarRXh4uMBmdK94YufAzGYzZs2aJRl1ALBp0yaOOiIiuqV27dph8+bNkqy2thazZs2C2WwW1IqaA4edA1u3bh2OHDkiyZ599lmMHDlSUCMiInIUI0eOxLx58yTZ4cOHb3inCnIsvIp1UGfPnkVsbCzq6+ttWffu3aHVauHj4yOwGREROYqqqipoNBpkZ2fbsjZt2iAjIwORkZECm9Hd4omdAzKZTEhKSpKMOplMhq1bt3LUERFRk7Vt2xZbtmyRZPX19UhKSoLJZBLUiu4Fh50DWrVqFVJTUyXZwoULMWjQIEGNiIjIUQ0ePBgLFy6UZKmpqXj77bfFFKJ7wqtYB5OZmYmEhAQ0Njbasp49eyIjIwNKpVJgMyIiclRGoxExMTHIysqyZe7u7khNTYVGoxHYjO4UT+wcSENDA2bMmCEZdW5ubkhOTuaoIyKiu6ZUKpGcnAw3t19mQWNjI5KSktDQ0CCwGd0pDjsH8uabb0Kr1UqyJUuWoE+fPoIaERGRs+jbty/+/Oc/S7LMzEy89dZbghrR3eBVrIM4deoUEhMTJa8vFBUVhdTUVLRp00ZgMyIichb19fXo1asX9Hq9LZPL5Th+/DgSEhIENqOm4rBzAEajEXFxcTh37pwtUygUSE1NRUxMjLhiRETkdDIyMtC7d2/Js2IjIyORnp4OT09Pgc2oKXgV6wBeffVVyagDgNdee42jjoiIml1sbCxeffVVSXb27NkbMrJPPLGzc4cPH8bgwYPx639N8fHxOH78ONzd3QU2IyIiZ9XY2Ih+/fohLS3NlslkMhw6dAgDBgwQ2Ix+D4edHauuroZGo8GlS5dsWZs2bZCWlgaVSiWwGREROTuDwYC4uDjJs2LDw8ORmZkJb29vgc3odngVa8eWLFkiGXUA8NZbb3HUERFRi1OpVDc8I/bixYtYsmSJoEbUFDyxs1N79+7FiBEjJFliYiIOHToEuVwuqBUREbkSs9mMgQMH4vjx45J87969GDZsmKBWdDscdnaooqICUVFRyMvLs2VKpRKZmZmIiIgQ2IyIiFzNhQsXoNFoYDQabVlYWBh0Oh18fX0FNqOb4VWsHVq0aJFk1AHA22+/zVFHREStLiIiAqtWrZJkubm5WLRokaBGdDs8sbMzO3bswLhx4yTZ0KFDsXfvXslbvRAREbUWi8WC4cOH48CBA5J8x44dGDNmjKBWdDMcdnbk+vXrUKvVKCwstGVt27aFVqtFt27dxBUjIiKXd/nyZURFRaG6utqWhYSEQK/XIyAgQGAz+jUeAdmRBQsWSEYdAKxdu5ajjoiIhOvWrRvWrl0rya5evYoFCxYIakQ3wxM7O/HFF19g8uTJkuzhhx/Gzp07IZPJBLUiIiL6hdVqxejRo/Htt99K8i+++AKTJk0S1Ip+jcPODly7dg0qlQolJSW2rF27djAYDOjUqZPAZkRERFL5+flQq9UoLy+3ZUFBQTAYDOjQoYO4YgSAV7HCWa1WzJs3TzLqAOD999/nqCMiIrvTuXNnvPfee5KspKQEzz33HHhWJB6HnWCfffYZvvrqK0n26KOPYvr06WIKERER/Y4nnngCEydOlGRffvkl/vnPfwpqRD/jVaxAPM4mIiJHdasfI9Lr9ejcubPAZq6NJ3aCWK1WzJkzRzLqAGDjxo0cdUREZPc6dOiA9evXS7Ly8nI888wzvJIViMNOkE8++eSGZxVNnz4djz76qKBGREREd+axxx7D448/Lsl27dqFTZs2CWpEvIoVgC/ySEREzqK0tBQqlUryOqw+Pj7Q6XR8HVYBeGLXyiwWC2bNmiUZdcCPJ3gcdURE5GgCAgLwySefSLLq6mo8/fTTsFgsglq5Lg67VvbBBx/gu+++k2SzZ8/G6NGjxRQiIiK6R2PGjMHTTz8tyQ4cOIAPP/xQUCPXxavYVpSVlYWYmBgYjUZbFhYWBp1OB19fX4HNiIiI7k1FRQWioqKQl5dny5RKJTIzMxERESGwmWvhiV0rMZvNmDlzpmTUAcDmzZs56oiIyOH5+flh8+bNksxoNGLmzJkwm82CWrkeDrtWsmbNGhw/flySzZ8/H8OGDRPUiIiIqHkNHz4czz//vCQ7duwY1q5dK6iR6+FVbCswGAyIi4tDQ0ODLQsPD0dmZia8vb0FNiMiImpe1dXViImJwcWLF22Zh4cH0tPToVKpBDZzDTyxa2GNjY2YMWOGZNTJZDIkJydz1BERkdPx8fHB1q1bIZPJbFlDQwOSkpLQ2NgosJlr4LBrYStXrkR6erokW7x4Mfr37y+oERERUcsaMGAAFi1aJMnS0tLwl7/8RVAj18Gr2BaUnp6OPn36wGQy2bLIyEikp6fD09NTYDMiIqKWZTQaERcXh3PnztkyhUKBU6dOITY2VmAz58YTuxZSX1+PpKQkyaiTy+VITk7mqCMiIqenVCqRnJwMuVxuy0wmE5KSklBfXy+wmXPjsGshr7/+OvR6vSRbunQpEhISBDUiIiJqXb1798bLL78syXQ6Hd544w1BjZwfr2JbwIkTJ9C/f3/JW6loNBqcOnUKHh4eApsRERG1roaGBiQkJECr1doyNzc3HDt2DH369BHYzDlx2DWz2tpaxMbGIisry5a5u7vj9OnTiI6OFtiMiIhIjMzMTCQkJEieFduzZ09kZGRAqVQKbOZ8eBXbzJYtWyYZdcCP17IcdURE5Ko0Gg2WL18uyc6fP49ly5YJauS8eGLXjA4ePIghQ4ZIst69e+Po0aNQKBRiShEREdkBk8mExMREpKam2jKZTIbvvvsOgwYNEtjMuXDYNZOqqipoNBpkZ2fbMk9PT2RkZOCBBx4Q2IyIiMg+nD17FrGxsZJnxXbv3h1arRY+Pj4CmzkPXsU2k5deekky6gBgxYoVHHVEREQ/iYyMxIoVKyRZdnY2XnrpJUGNnA9P7JrB7t27MWrUKEk2cOBAHDhwQPL6PURERK7ObDZjyJAhOHLkiCTfvXs3Ro4cKaiV8+Cwu0fl5eVQq9XIz8+3ZV5eXtBqtQgPDxfYjIiIyD5dvHgR0dHRqK2ttWWhoaHQ6XRo166duGJOgFex92jhwoWSUQcAq1ev5qgjIiK6hfDwcLzzzjuS7MqVK/jTn/4kqJHz4IndPUhJScEjjzwiyYYPH449e/ZAJpMJakVERGT/LBYLRo4ciX379knyr7/+GuPHjxfUyvFx2N2lkpISqNVqFBUV2TJfX1/odDqEhYUJbEZEROQYcnNzoVarUVVVZcuCg4NhMBgQGBgosJnj4lXsXZo/f75k1AHAunXrOOqIiIiaKCwsDOvWrZNkRUVFmD9/vphCToAndndh27ZtmDZtmiQbO3YsUlJSeAVLRER0B6xWK8aNG4edO3dK8m3btmHKlCmCWjkuDrs7VFhYCJVKhdLSUlvm7+8Pg8GAkJAQgc2IiIgc09WrV6FSqVBWVmbLAgMDYTAYEBwcLLCZ4+FV7B2wWq2YN2+eZNQBwIcffshRR0REdJdCQkLwwQcfSLLr169j7ty54PnTneGw+x0WiwVGoxEA8OmnnyIlJUXy8cceewxTp04VUY2IiMhpTJs2DZMmTZJkKSkp+Pvf/y6okWPiVextfPPNN3jiiSdgNBoxZcoUfP3116isrLR9vEOHDtDr9Wjfvr3AlkRERM6huLgYKpUKxcXFtszPzw96vR6hoaECmzkODrvbuP/++3Hx4sVbfnz79u2YMGFC6xUiIiJyctu3b8ejjz4qyUaOHIlvv/2WT1BsApcYdmazGaWlpSgqKkJRURGKCwtRbzTCYjbDTS5HG6US7Tt2RHBwMIKDgxEQEICqqir4+/vf8nM+/vjj+Oc//9mK3wUREZFreOqpp/CPf/xDkm3cuBFz586VZHfz+O7s7+Hu1MOurKwMmZmZ0KWno66mBlaTCT5GI/xKS+FuMsHNaoVFJkOjQoGKgABUK5WQKRTw9PZG+06d8Oyzz6KiouKmnzsiIgIHDhxA586dW/m7IiIicm5lZWVQq9UoKCiwZd7e3tDpdOjevfs9Pb5HxcVBo9Hc9vDGkTnlsCsoKMCxI0eQfeEC3GtrEZabh5DSUvjV1MDdbL7l72uUy1Hh7Y2rAQG41CkEpWYzLmRn48ixYygsLLzh1z/55JP8oU4iIqIWsGvXLowePVqSjRkzBklPPYXLP/xw14/vuWFd0Ojlhe4REeg/cKDTvaqFUw07k8mEo0ePIvXoUfiUlOD+nFyElpRAbrHc8eeqMNYi29cXuRERKPHxwdHUVBw7dgzmX/2H88gjj+Crr75qxu+AiIiIfvbMM8/gk08+gVwuR2JiIvonJKBTQwMiC67e9eO72c0NV4KC8EPXMFQHBSGhf3/0798fCoWiBb6D1uc0w66wsBA7U1JQdiUfD1y4gIj8fLjdw7dWVlYGY50RFpkMBT164MIDDyC/tBQp33yDa9euwc/PD3v27EHv3r2b8bsgIiKin1VWVmLw4MHoFRuLzv7+iDh3Dp2zLiA4KOieh5hFJsOFzp1xLiICAaGdMXr8eHTs2LGZmovjFMMuJycH27dtg1fBVcSfPQvf2tp7/pxFRUUwW345nav19cXZ+Hhc9fKC0WzGK6+84hT/ARAREdmrnJwc/OvTT+Gem4vItDR4/fSSYx7uHggMCkJzPEe20ssLaZGRqO3UCROnTkHXrl2b4bOK4/AvUJyTk4P/fP45/LMvY2BGRrOMOgD47dr1qqxEnxMnEFlXj/tCQ1FfX98sX4eIiIhu9PPje8eCq+h/+rRt1AFAQ2MDaqqrm+Xr+NbWYmBGBtpdzsZ/Pv8cOTk5zfJ5RXHoYVdYWIjt27YhICcXfQ0GKO7irv1WfNu2BX76u4BM5gZ//wB0aOeP/mfPIiA3F9u3/e9Nn1BBRERE9+a3j+/+3j6Qy6VXr5VVVWg0mZrl6yksFvTTG5zi8d1hh53JZMLOlBR4FVxFn++/v6efp7sZLy8vBAcHIzAwCB07doTS0xMA4Ga1oo/heyivFuCblBSYmuk/KiIiIrr547tMJoN/u3aA5PLVivLyshtu2O6Wszy+O+ywO3r0KMqu5CP+7NlmPan7NbmbG9p4eNxwh6+wWBD//VmU5ufj2LFjLfK1iYiIXNGtHt89PDzg4+0t+bWNjY1obGhotq/tDI/vDjnsCgoKkHr0KB64cKHZfqbuTvnV1qJn1gWcOnIEV69eFdKBiIjImfze43tb37ZQKNwlWXM/A9TRH98dctgdO3IEPiUliMjPF9qjR34+fEpKcPTIEaE9iIiInMHvPb7LIENAQADcFe4AZPDy8oaHh0ez93Dkx3eHezW+srIyZF+4gNic3Gb/ubo75Wa1IjwnF2cCA1FWVua0b09CRETU0pr6+K6Qy9G+ffsW7eLIj+8Od2KXmZkJ99pahJaUiK4CAOhSUgJFbS20Wq3oKkRERA6Lj+/Nw6GGndlshi49HWG5eXf1NiItQW6xoGteHrRpaZK3GyMiIqKm4eN787mjYffpp58iNjYWZWVlmDlzJrp37257OrBer8eQIUNu+/tTUlLw17/+9ba/5vXXX8f7779/Q/7dd99hwoQJqKupQUhp6Z3Uvq0qkwkvZ2XhD6mpePRMBmYb9Mg21uJkeTkWnP2+SZ8j5Hop6mpqUHqLXqdPn8ZLL70EACguLkafPn0QGxuLgwcP4oknnrjn7+HUqVPo1asX3N3dsWPHjnv+fERERM3lzTffhEqlQlRUFHr16oXs7Owbfk1p6Y+Po09v+9ddfY2t+flo+NUgHJp6CuPS0zA+Ix3jM9KRazTe1ee91eO7PT/uNvln7L788kusWrUKBw4csN01m0wmfP7553jqqaea9DnGjx9/dy1/Ul9fD6vJhHZ3+GrTFqsVbrKbv/HIkqws9PT2wr5evSCTyZBVU4OShsY7+vx+NTWwmkwoKiq66b1/r1690KtXLwDAvn37kJCQYBuvgwcPbvLXMZvNkMvlN+SdOnXCpk2bsGbNmjvqTURE1JKOHTuGAwcO4MyZM3B3d8eVK1fg/ZuXLAF+fBtPq8kE2V3+7HxyQT4md+yIXz+N4l+aGHjf5DHzTtzq8d2eH3ebfGL38ssv49tvv0WHDh1s2cKFC/HOO+/gt283azabsXjxYiQkJECj0eCzzz4DAGzduhUvvvgiACArKwu9evWCRqPBokWLbMMHAM6cOYNBgwbhvvvuw7/+9ct6LykpwdZPP8WY1FNYlX3Jln91rQhj09MwJj0Nn1y5AgC4UleHselpWHjuLB5OT0O1yYTZej3GpqdhbHoaDpeV4bLRiHM1NVgQ1hWyn4ZfD29vJPj5Sb6fM5WVmJJ5BhMy0vGENhP5dXUAgBPl5RibnoZHU1PxwcaNKCoqgk6nQ1xcHGJiYhATE4Nr167hu+++w2OPPQadToc///nP+N///V/06tULly9ftn3ft/tn9uijj2LIkCGYPHnyTf/dhIaGQqPRwM3NoW7WiYjIyRUWFiIoKAju7j++REloaCj8/f2xe/du9OvXD7GxsXjyySdx5coV+PzmVO2jK3l49EwGxqWnYdNPj+0AsD4vF2PT0zAuPQ1b8vPxj4ICXGtowLTMM3j2e8Mtu8zS63D5p68x4NRJfHWtCAAw/+z3MFRXw2y1YuWlSz99zXSkXLsGd7MZPkYjioqKJJ/Lnh93m3xit3PnTnTp0kWS9ezZEz179sTXX3+N+++/35Zv2rQJISEhSE1NhdFoRN++fTFq1CjJ7124cCFeeeUVTJgwAa+88orkYxcvXsS+ffuQm5uLhx56CNOmTQMAfP/991g1YQJGXsnHDJ0WJ8vL0VWpxHu5ufiPJgZKuRxTM8+gbzs/tFO442JtLVb3fAAPeHtjd0kJ2rkrsEmthtVqRY3ZjJMVFXjA2/uWp3k/u9/LC59HayCXybDv+nV8mJeHFRER2JKfj6Xd70N/f38c6N4dxYWF+Oqrr/Dcc8/hmWeegdFolJywRUVF4c0334Rer8fq1atx+fLlJv0zy8zMREZGBnx9fZv6r4uIiEi4ESNGYPny5XjwwQcxYsQIPPXUU+jWrRveeecd7N+/H0qlEq+99hr+91//wjDFL5PkSFkZCuvr8R9NDCz4cZQN9PdHQX09jpeX48uYWHi4uaG8sRHt3N2xKf/KDSd00zLPQCaToYOHBz5RqRHv64u0ygq4yYD27h5Iq6zEhA7BOF9Tgwe8vfHvokJ08PDAlzGxqDObMTkzEwP9/eFbWoZiB3qLsSYPu3/84x944403bsiXLl2K5557Dps2bbJle/bsgV6vxz/+8Q8AQEVFBS5duiT5fWlpaXjkkUcAAFOnTsW3335r+9jYsWPh7u6O8PBwlJeX2/L7w8MR4ukJhUyGUUFBSKusRKXZhH5+7dDup78NPBQUhLSKSgwLDEQ3pRIP/HTk28PbCysuVeDt7GyMCAxE7B2MpAqTCS9lnUduXR0sViv8fnpxxDhfX6y+fBkXjbUIDQlBQ10d+vXrhzfffBPXr1/HlClTcN999zXpa9zun9lDDz3EUUdERA6nbdu2yMjIwIEDB7Bv3z6MGDECn376KbRaLfr16wfgxx+z6hYWBveQENvvO1Jehu9Ky3C6MgMAUGM2I9toRFplJSYFd4THTydlPz/238xvh168rx/+W3wNbpBhSseO+G/xNWQbaxHq6Qm5TIajZWXIqq3F18XXAADVZhPy6urgYTKh7qebOkfQ5GH39ddfo0uXLpgzZ44kj4uLg7+/P/bt22fLLBYLNm7ceMPPjxkMtz4i/bU2bdrc8mO/fm2b3zlog/JX/0K7K73wdWwcDpSWYmX2JYxr3wED/f1xvrbmtj+DBwB/y83B4IAATOsYgqyaGrx8IQsAMK9LFwzy98d3ZaV45ZudWBoXi0Uvv4zevXvjv//9L0aMGIF///vfTfqeb/fPzMvLq0mfg4iIyN4oFAqMGDECI0aMQFBQEP74xz9izJgx2LJli+3XbNm4EW6/egsvixX4n7AwPBocLPlcaZWVd90jpm1brLh0EXKZDDNCOuFQWSn2Xy9FXNsfD04sAN66/3709msn+X2ZVgvMDvS+sU2+HP7mm2/wf//v/8XOnTtv+Nj/+T//B6tXr7b975EjR+LDDz+0PT1Yr9ff8FThuLg4/Pe//wWAJo+fHy5eRHFtLUxWK/aUXEe8ry+ifdrieEU5KkyNaLBY8P+uX0ev3/yMHAAU1dfDSy7Ho8HBSOrUGWdrqtFNqUQPL298kJdr+znBCzU1OF1RIfm91SYzgj1+HJtfXvvlnj3XaESkjw+e6xKG0HbtUFpejkuXLiE8PBx/+tOfMHLkSHz/fdOeWduUf2ZERESO5Pz587h48SIAwGq1Qq/XY968eThw4ABycnIAAJWVlbheWgrLrw5YBvi3w7+LCmH86XHwSl0dqkwmJLZrh/8UFdqeAVve+OOTHb3lctT8zmOmUi6Hp5scGZWVuN/LC7G+vkguyEe834/DbkA7f3x29SrMP+2BrJoamK1WWGRukCsc5/0cmty0U6dO2LFjBx566CF8+eWXko8NGjQIYWFhtv/9zDPPIDs7G7GxsbBYLAgJCcGuXbskv+evf/0rnnzySbz66qsYOHBgk64a7w8Px/rjx/FOaSn+EBBgW9X/0yUMT2i1sAKY2CEYKh8fXPnNsWlWbS1WZV+Cm0wGTzc3/N+ICADAX3pEYMWlSxh2+jS85G7o2KYNXrkvHEX19b98P6GhWJKVhb/lXMZA/wBbvqUgHycrKiAHEBwaigdVKmzbtg3/+Mc/4O7ujq5du2LixIlITU393e+tKf/MbkWr1WL06NEoKyvDjh07EBERgePHjzfp9xIREbWU6upq/M///A8qfzppi4+PxwsvvIC4uDhMmjQJDQ0NcHNzw/gxY9D4q/E0yD8AP9TWYkrmGVgAtFUo8P4DkRgSEABDdTUmnMmAQibDpA7BSOrcGVM6dsRTOi26K5XY8KDqln3ifH2RbayFTCZDL18//PXyZcT8dGI3pWNHXKmrw4SMdFgAtP/pZ/MaFAp4eHpKPo89P+7KrL99Smsrqa2thVKphEwmwzvvvIOioiLJqd/N7Nu3D+d378aI4ydaqWXT/b9+fdHzoYcwbNgw0VWIiIgcCh/fm4+ws8VTp05h4cKFMJvNCA0Nxaeffvq7vyc4OBhpSiUa5XK429E1ZaNcjmqlEsG/+VkAIiIi+n18fG8+wobdkCFDcObMmTv6PcHBwZApFKjw9kbQPfwAZXOr8PaGTKFo8X/xu3fvxpIlSyRZ//798cEHH7To1yUiImpJ9vr4vru2Fus++gif/uc/UPx0VWzvj7uO89OAAAICAuDp7Y2rAQF29S/+auCPvQICAn7/F9+Dhx56CA899FCLfg0iIqLWZq+P710f6Iml06bi+T/+8abv/GSP7O8lk29DLpcjKi4OuWFdYLaTV3s2u7khp0sXRMfHO8y/dCIiInvCx/fmYx//9O6ARqNBo5cXrgQFia4CAMgLCoLJywvR0dGiqxARETksPr43D4cbdv7+/ugeEYEfuoZJXvNGBItMhotdw9C9Rw/4+/sL7UJEROTI+PjePBxu2AFA/4EDUR0UhAudOwvtkdW5M6qDgtB/wAChPYiIiJwBH9/vnUMOu5CQECT0749zERGoFPR2WxVeXjjfIwK9BwxAyK/e346IiIjuDh/f751DDjvgx6cb+4d2RlpkJEyt/IOWJjc3pD0YiYDOnZGYmNiqX5uIiMiZ8fH93jjssFMoFBgzfjxqO3XCSdWDrXYfb5HJcFL1IIwhnTB6/Hjb69oQERHRvePj+71x2GEHAB07dsTEqVNQGhaG42pViy97k5sbjqtVKA0Lw8SpU9CxY8cW/XpERESuiI/vd0/Ye8U2p5ycHGzf9r/wKihA/Nmz8K2tbfavUeHlhbQHI2EM6YSJU6ega9euzf41iIiI6Bd8fL9zTjHsAKCwsBA7U1JQdiUfD1y4gIj8fLg1w7dmkcmQ1bkzzveIQEDnzhg9frxDL3kiIiJHwsf3O+M0ww4ATCYTjh49itSjR+FTUoLwnFx0KSmB3GK5489ldnNDXlAQLnYNQ3VQEHoPGIDExESHvXMnIiJyVHx8bzqnGnY/KygowLGjR5GdlQVFbS265uUh5Hop/Gpq4G423/L3NcrlqPD2xtXAAOR06QKTlxe69+iB/g76lGciIiJnwsf33+eUw+5nZWVl0Gq10Kaloa6mBlaTCT5GI3xLy+BhMsHNaoFF5oYGhQKVAf6oViohUyjg6e2N6Ph4REdHO9wrThMRETk7Pr7fmlMPu5+ZzWaUlpaiqKgIRUVFKC4sRENdHcwmE+QKBTw8PdG+Y0cEBwcjODgYAQEBDvWGv0RERK6Ij+83colhR0REROQKHPp17IiIiIjoFxx2RERERE6Cw46IiIjISXDYERERETkJDjsiIiIiJ8FhR0REROQkOOyIiIiInASHHREREZGT4LAjIiIichIcdkREREROgsOOiIiIyElw2BERERE5CQ47IiIiIifBYUdERETkJDjsiIiIiJwEhx0RERGRk+CwIyIiInISHHZEREREToLDjoiIiMhJcNgREREROQkOOyIiIiInwWFHRERE5CQ47IiIiIicBIcdERERkZPgsCMiIiJyEhx2RERERE6Cw46IiIjISfx/ZNSdbtJKiCMAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAnYAAAHWCAYAAAD6oMSKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/H5lhTAAAACXBIWXMAAA9hAAAPYQGoP6dpAABxbklEQVR4nO3dd1hUV/4/8PcU2oAgiAKCoEuw0UHUiNgFAmiMu4nGJJaYYtZd45pkXX8pZpOvm03buOlFE3U3bZNsEgKixhIVsNCkqqAiUmSULp0pvz9M7nptGXWGOzO8X8/j88SPc2Y+qPG+Oeeec2V6vV4PIiIiIrJ4cqkbICIiIiLjYLAjIiIishIMdkRERERWgsGOiIiIyEow2BERERFZCQY7IiIiIivBYEdERERkJRjsiIiIiKwEgx0RERGRlWCwIyIiIrISDHZEREREVoLBjoiIiMhKMNgRERERWQkGOyIiIiIrwWBHREREZCUY7IiIiIisBIMdERERkZVgsCMiIiKyEgx2RERERFaCwY6IiIjISjDYEREREVkJBjsiIiIiK8FgR0RERGQlGOyIiIiIrASDHREREZGVYLAjIiIishIMdkRERERWgsGOiIiIyEoopW6AiMiYtFotGhoaoFaroVarcb62Fl0dHdBptZArFLBzcMBAT094eHjAw8MDbm5uUCgUUrdNRGQUMr1er5e6CSKiW9XY2Ij8/HwU5uais60Neo0GTh0dcGlogI1GA7leD51Mhh6lEs1ubmh1cIBMqYS9oyOCIyIQGhoKV1dXqb8MIqJbwmBHRBatpqYGmenpKC8rg017O3zPVMKroQEubW2w0WqvOa5HoUCzoyPOurnhjO8Q9KhUGBYQgOiYGHh5efXiV0BEZDwMdkRkkTQaDTIyMpCVkQGnujrcVnEGPnV1UOh0N/xeWrkcVe7uOOHni1Z3d0RFRyM6OhpKJe9WISLLwmBHRBantrYWqcnJaKyqxsiyMgRUV0NuhH/KdDIZyry9cSwgAG4+3kiYPRuenp5G6JiIqHcw2BGRRamoqMC3X34JVc1ZRB49Cuf2dqN/RotKhZxRo9A+eDDumncP/Pz8jP4ZRESmwGBHRBajoqIC33z+OQZUnMHYkhIob2LZ1VAauRyHAkejwdcXv733XoY7IrIIPMeOiCxCbW0tvv3yS7hVnMH44mKThjoAUOp0uL2oGG5nzuDbL/+D2tpak34eEZExMNgRkdnTaDRITU6GquYsxpWUGOV+OkPI9XqMKy6Bw9kabE1Ohkaj6ZXPJSK6WQx2RGT2MjIy0FhVjcijR00+U3c5pU6HyJKjaKiuRmZmZq9+NhHRjWKwIyKzVlNTg6yMDIwsKzPJRglDuLS3Y0RpGQ6np+Ps2bOS9EBEZAgGOyIya5np6XCqq0NAdbWkfQyvroZTXR0y0tMl7YOI6HoY7IjIbDU2NqK8rAy3VZzptfvqrkWu18O/4gzKS0vR2NgoaS9ERNfCYEdEZis/Px827e3wqauTuhUAwJC6Oijb21FQUCB1K0REV8VgR0RmSavVojA3F75nKm/qMWGmoNDp4FdZiYKcHGiv8xxaIiKpMNgRkVlqaGhAZ1sbvBoapG5FxKv+Yl8NZtYXERHAYEdEt+CFF15AYGAggoODMWbMGJSXl1/zte7u7jf03mq1GnqNBsnHjqH7khm7qVmHMSs3B7Nyc7CkqBDnu7tvuv+b4dLWhj1790KtVgO4uGv3vvvuAwBs2rQJTz755A2/55NPPokRI0YgODgYDz74IM/LI6KbxmBHRDclMzMTe/bswZEjR1BYWIjvvvsO/fv3N9r7q9VqOHV04F/VVei5bOPEF6Fh+CEiEkFO/fB+ZaVB76c10uYLG60W+zIzhWA3ePBgfPrpp7f0nnFxcSguLkZBQQG6urqwZcsWY7RKRH0Qgx0R3ZTa2lq4u7vDxsYGAODj4wNXV1ds374dt99+O8LDw3H//fej+yozai+//DKioqIQEhKC1157TaivW7cOwcHBCAkJweZPPsHh9HSc6+7G/PwjWFZSfMX7RLk4o6KzA1q9Hi+dOoW5R/IwKzcXyefOAQD+q1Zj+dES3F9QgBXHjuJ8dzeWlRRjVm4u7szLxemODgDAh1WVP4/NwcaqKgDAoaYmLC4qxGMlJYjNzsbfTp0CAPzj9Gl0dHZi6YMPYtmyZTh9+jTGjBlzRW/nz5/H3LlzMWbMGNx+++3Iy8u75u/lzJkzoVQqIZPJMGbMGFRLfLQLEVkuBjsiuikzZ87EsWPHMHr0aDz++OPIzs5GXV0dXn31VezevRt5eXn4zW9+g48++kg0bseOHaiqqsLhw4eRl5eHrVu3oqioCFu3bsXu3buRnZ2NgoICjIuKQsLw4Rhka4svQsPw/ujAK3rY3dCAESpHfKWuxSBbW/w3LBxfhYbio6oqNPb0AACOtbXh/dGj8c6o0fi/Uycx1c0NP0RE4KvQMAyytUV6YyNqu7rwTWgYvguPwN7GBpS2tQEASlpb8eJttyElIgJ7GupR09mJVUOHwtHWFi889xzef//9a/7+rFy5EmvWrEF2dja2bNmCZcuW/ervqUajwWeffYbY2Ngb+aMgIhIopW6AiCxTv379kJeXhz179mDXrl2YOXMmtmzZgoKCAtx+++0AgK6uLiQmJorG7dixA6mpqdi/fz8A4MKFCygtLUV6ejqWLFkCOzs7AIDK3v6aZ9fNzz8CmUyGESpHrPIfiqfLSlHa3o7vz1+cqWvValDZ2QkAiOnvCiflxX/qspub8caIkQAAW7kctgDSmxrxU0Mjslsuzqi1abUo7+hAf6US4f2c4W5rCwAIUDmiuqsLg+3tAQDaX7kPbufOnSgu/t8soyFn3z311FMYP348xo0b96uvJSK6GgY7IrppSqUSM2fOxMyZM+Hu7o7HH38ciYmJ+OSTT645RqfTYe3atVi0aJGonn7ZEx3kCgV0MtlV3+OL0DA4KhT/e08AL952G8a69Be97kR7O+wV11+Y0OmBP/j6Yq6Hh6h+qKkJtvL/fb5CBuguCZoK5a//85mdnQ2lAa8DgHfffRdHjx5FSkqKQa8nIroaLsUS0U05fvw4Tp48CQDQ6/UoKirCo48+ij179qCiogIA0NLScsVO2djYWGzYsAHtPz/39fTp02hubsaMGTPwySefoKurCwDQo9WiR6mEo0KBtl85M25if1d8evassEGitK3tqpslxri44Ct1LQCgW6dDu1aLia798ZW6Fh0/f0ZVZycu/MpsnEwmg/LnmbxrmTp1Kt577z3h5/n5+dd8bWpqKjZs2ID//Oc/BgdBIqKr4b8gRHRTWltb8Yc//AEtLS0AgMjISKxYsQIRERH47W9/i+7ubsjlcqxfvx7Dhg0TxsXHx6OkpATjx4+HTqdD//798c033yAhIQE5OTmIiIiAjY0NoqOjEejmhns8PfFAYQGGOThc9T47ALjH0xNVnZ2Yk5cLHYCBtrbYEBh0xeue/o0/ni4rxb9raqCUyfHGyJGY5OqGE+3tuCf/CHQA+imVeHvkqOt+7WODg7HmmWewLyMDf/nLX676mrfeegvLli3Dhg0b0N3djdmzZyM0NPSqr3388cfR09ODSZMmAQDuvvtuPP3009ftgYjoamR6vcQPYCQiuoqioiJs/eorJO3dBxszespDj0KBlMmTkHD33QgKujI8EhFJiUuxRGSWPDw8IFMq0ezoKHUrIs2OjpAplfC47J48IiJzwGBHRGbJzc0N9o6OOOvmJnUrImcHXOzL7Sb6WrduHcLCwkQ/rrfRhIjoRnEplojM1k8//YQjP/6I+PQMKC55rJhUtHI50iZGIyI2FpMnT5a6HSKiK3DGjojMVmhoKHpUKlT9ynNmezQanDt/DjVnz6LlQovJ+ql0d4dGpUJISIjJPoOI6FYw2BGR2XJ1dcWwgACc8PO95pl2Or0eDQ0N0Gg0APRobW1Fz68cV3IzdDIZTvr5Ytjw4XB1dTX6+xMRGQODHRGZteiYGLS6u6PM2/uqv97S0gKt1vhB7nKl3t5odXdH9MSJJv8sIqKbxWBHRGbNy8sLUdHROBYQgBaVSvRrnV1daG9vE9Vsbe1gY+RDfptVKhwfHoCxEyfCy8vLqO9NRGRMDHZEZPaio6Ph6uONnFGjoJFf/GdLp9ejqalJ9DqZTI7+/fsb9bM1cjlyRo+Cm7c3JkyYYNT3JiIyNgY7IjJ7SqUSibNno33wYBwKHA2dTIbm5mbodOKDi52dnaG85Bmyt0onk+FQ4Gh0eA1GwuzZfNwXEZk9Bjsisgienp64a949aPD1RfrIEWjt7hL9up2dPRwvW6q9FRq5HAeCAtHg64u75t0DT09Po703EZGpMNgRkcXw8/PD9DvuQKmjI45MmoR2Z2cAvyzBuhjtc5pVKuyLCEfT0GH47b33ws/Pz2jvTURkSjygmIgshl6vx9133439+/djdmIivF1dEXDsGEafOw8ne/tbfn+dTIZSb28cHx4AN29vJMyezZk6IrIoDHZEZDE+//xzLFiwAACgUCgwYcIETI2OhldXF/wrzmBIXd1NPaFCK5ej0t0dJ/180erujrETJ2LChAm8p46ILA6DHRFZhJqaGgQFBaGxsVGoDRgwAD/99BOOHT2K8tJSKNvb4VdZCa/6Bri0tcFGq73m+/UoFGh2dMTZAW6oGDIEGpUKw4YPRzSPNCEiC8ZvR4nI7On1ejzyyCOiUAcA7733HoKCgoTAV1BQgIKcHJxsa4Neo4FTRwecGxphq9FArtdBJ5OjW6lEi5srWh0cIFMqYe/oiIjISISEhPCJEkRk8ThjR0Rm7+OPP8bSpUtFtXnz5uGLL7644rVarRYNDQ1Qq9VQq9U4X1uL7s5OaDUaKJRK2NrbY6CnJzw8PODh4QE3NzcojHhEChGRlBjsiMisVVRUIDg4GBcuXBBqHh4eKC4uxoABAyTsjIjI/PC4EyIyWzqdDkuXLhWFOgD46KOPGOqIiK6CwY6IzNb777+PXbt2iWqLFy/GrFmzJOqIiMi8cSmWiMzSiRMnEBoaivb2dqHm4+ODoqIiuLgY7zBiIiJrwhk7IjI7Wq0WS5YsEYU6ANi4cSNDHRHRdTDYEZHZWb9+PdLT00W1ZcuWITY2VqKOiIgsA5diicisHD16FOHh4ejq6hJqw4YNQ0FBAZycnCTsjIjI/HHGjojMhkajwaJFi0ShTiaTYdOmTQx1REQGYLAjIrPx8ssvIysrS1RbuXIlJk2aJFFHRESWhUuxRGQW8vPzERUVhZ6eHqE2YsQI5OXlwcHBQcLOiIgsB2fsiEhy3d3dWLhwoSjUyeVybN68maGOiOgGMNgRkeReeOEFFBQUiGqrV6/GuHHjJOqIiMgycSmWiCR1+PBhTJgwAVqtVqgFBwcjKysLdnZ2EnZGRGR5GOyISDIdHR2IiIjAsWPHhJpSqURWVhbCwsKka4yIyEJxKZaIJPPss8+KQh0APPfccwx1REQ3iTN2RCSJ/fv3Y/Lkybj0n6DIyEgcOHAANjY2EnZGRGS5GOyIqNe1trYiNDQUp06dEmp2dnbIyclBYGCghJ0REVk2LsUSUa9bvXq1KNQBwIsvvshQR0R0izhjR0S9aufOnZg5c6aoNmHCBOzbtw8KhUKiroiIrAODHRH1mubmZgQHB6OyslKoOTg4ID8/HwEBARJ2RkRkHbgUS0S9ZtWqVaJQBwCvvPIKQx0RkZFwxo6IekVKSgpmzZolqk2dOhU7d+6EXM7vMYmIjIHBjohMrr6+HkFBQaitrRVq/fr1Q0FBAYYOHSpdY0REVobfJhORyf3xj38UhToA+Mc//sFQR0RkZJyxIyKT+vrrr3H33XeLanfccQdSU1Mhk8kk6oqIyDox2BGRyZw7dw6BgYGoq6sTav3790dxcTEGDx4sYWdERNaJS7FEZBJ6vR6PPvqoKNQBwNtvv81QR0RkIgx2RGQSn376Kb777jtR7a677sKCBQukaYiIqA/gUiwRGV11dTWCgoLQ1NQk1Nzd3VFcXIxBgwZJ1xgRkZXjjB0RGZVer8dDDz0kCnUA8P777zPUERGZGIMdERnVxo0bsW3bNlFtwYIF+O1vfytRR0REfQeXYonIaE6fPo3g4GC0trYKNS8vLxQVFcHNzU3CzoiI+gbO2BGRUeh0Ojz44IOiUAcAH330EUMdEVEvYbAjIqN45513sGfPHlFt6dKlSExMlKgjIqK+h0uxRHTLysrKEBoaio6ODqHm6+uLwsJCODs7S9gZEVHfwhk7IrolWq0WixYtEoU6APj4448Z6oiIehmDHRHdktdffx0HDhwQ1ZYvX47p06dL1BERUd/FpVgiumnFxcWIiIhAd3e3UPP390d+fj4cHR0l7IyIqG/ijB0R3ZSenh4sXLhQFOpkMhk2b97MUEdEJBEGOyK6KS+99BJyc3NFtSeeeALR0dESdURERFyKJaIblpubi3HjxkGj0Qi1UaNGITc3F/b29hJ2RkTUt3HGjohuSFdXFxYtWiQKdQqFAps3b2aoIyKSGIMdEd2Q559/HkVFRaLamjVrEBUVJVFHRET0Cy7FEpHBDh48iOjoaOh0OqEWGhqKw4cPw9bWVsLOiIgIYLAjIgO1t7cjPDwcpaWlQs3GxgbZ2dkICQmRsDMiIvoFl2KJyCBPP/20KNQBF5dlGeqIiMwHZ+yI6Fft3bsXU6ZMEdXGjh2LjIwMKJVKaZoiIqIrMNgR0XVduHABoaGhKC8vF2r29vbIy8vDyJEjJeyMiIgux6VYIrqup556ShTqAGDdunUMdUREZogzdkR0Tdu3b0d8fLyoFhMTgz179kChUEjUFRERXQuDHRFdVVNTE4KCglBdXS3UVCoVCgoK4O/vL2FnRER0LVyKJaKrWrlypSjUAcBrr73GUEdEZMY4Y0dEV0hOTsadd94pqs2YMQM7duyATCaTqCsiIvo1DHZEJFJXV4egoCCo1Wqh5uzsjMLCQvj6+krYGRER/RouxRKRyPLly0WhDgDWr1/PUEdEZAE4Y0dEgi+//BLz588X1ZKSkpCcnMwlWCIiC8BgR0QAgNraWgQGBqKhoUGoubq6ori4GF5eXhJ2RkREhuJSLBFBr9fj0UcfFYU6AHj33XcZ6oiILAiDHRFhy5YtSE5OFtV+97vfYd68eRJ1REREN4NLsUR9XGVlJYKDg9Hc3CzUBg0ahKKiIgwcOFDCzoiI6EZxxo6oD9Pr9XjooYdEoQ4APvjgA4Y6IiILxGBH1Id9+OGH2LFjh6j2wAMPYM6cOdI0REREt4RLsUR91KlTpxASEoK2tjahNnjwYBQVFcHV1VXCzoiI6GZxxo6oD9LpdFiyZIko1AHAxo0bGeqIiCwYgx1RH/Tmm29i3759otrDDz+M+Ph4iToiIiJj4FIsUR9z/PhxhIWFobOzU6gNHToUBQUF6Nevn4SdERHRreKMHVEfotFosGjRIlGoA4CPP/6YoY6IyAow2BH1Ia+99hoOHTokqq1YsQJTp06VqCMiIjImLsUS9RGFhYWIjIxET0+PUAsICMCRI0egUqkk7IyIiIyFM3ZEfUB3dzcWLVokCnVyuRybNm1iqCMisiIMdkR9wLp165CXlyeqPfnkk5gwYYJEHRERkSlwKZbIyuXk5GDcuHHQarVCLTAwENnZ2bC3t5ewMyIiMjbO2BFZsc7OTixcuFAU6hQKBTZv3sxQR0RkhRjsiKzY2rVrUVJSIqo988wziIyMlKgjIiIyJS7FElmpzMxMTJw4EZf+Lx4eHo5Dhw7BxsZGws6IiMhUGOyIrFBbWxvCwsJw4sQJoWZra4vs7GwEBwdL2BkREZkSl2KJrNCaNWtEoQ4A/vrXvzLUERFZOc7YEVmZPXv2YNq0aaLa+PHjsX//fiiVSom6IiKi3sBgR2RFWlpaEBISgoqKCqHm4OCAI0eOYPjw4RJ2RkREvYFLsURW5IknnhCFOgB46aWXGOqIiPoIztgRWYm0tDQkJCSIapMnT8bu3bshl/N7OCKivoDBjsgKNDY2IigoCDU1NULNyckJBQUFGDZsmISdERFRb+K38URWYMWKFaJQBwCvv/46Qx0RUR/DGTsiC/ftt99i7ty5olpcXBzS0tIgk8kk6oqshVarRUNDA9RqNdRqNc7X1qKrowM6rRZyhQJ2Dg4Y6OkJDw8PeHh4wM3NDQqFQuq2ifosBjsiC3b+/HkEBgbi/PnzQs3FxQVFRUXw8fGRsDOydI2NjcjPz0dhbi4629qg12jg1NEBl4YG2Gg0kOv10Mlk6FEq0ezmhlYHB8iUStg7OiI4IgKhoaFwdXWV+ssg6nN4qBWRhdLr9XjsscdEoQ4A3nzzTYY6umk1NTXITE9HeVkZbNrb4XumEl4NDXBpa4ONVnvNcT0KBZodHXHWzQ1H6uuRlZGBYQEBiI6JgZeXVy9+BUR9G2fsiCzU559/jgULFohqd955J7799lsuwdIN02g0yMjIQFZGBpzq6nBbxRn41NVBodPd8Htp5XJUubvjhJ8vWt3dERUdjejoaB6QTdQLGOyILFBNTQ2CgoLQ2Ngo1AYMGIDi4mJ4eHhI2BlZotraWqQmJ6Oxqhojy8oQUF0NuREuDTqZDGXe3jgWEAA3H28kzJ4NT09PI3RMRNfCb5+ILIxer8cjjzwiCnUA8N577zHU0Q2rqKjAt19+CVXNWUw9ehTO7e1Ge2+5Xo8RVVXwamhATssofNHUjLvm3QM/Pz+jfQYRiXHGjsjCfPzxx1i6dKmoNm/ePHzxxRcSdUSWqqKiAt98/jkGVJzB2JISKG9i2dVQGrkchwJHo8HXF7+9916GOyITYbAjsiAVFRUIDg7GhQsXhJqHhweKi4sxYMAACTsjS1NbW4svtmxB//LTuL242ChLr79GJ5PhQFAgmoYOw/yFD3BZlsgEeEAxkYXQ6XRYunSpKNQBwEcffcRQRzdEo9EgNTkZqpqzGFdS0iuhDri4NDuuuAQOZ2uwNTkZGo2mVz6XqC9hsCOyEO+//z527dolqi1evBizZs2SqCOyVBkZGWisqkbk0aMmXX69GqVOh8iSo2iorkZmZmavfjZRX8BgR2QBTpw4gaeeekpU8/Hxwfr166VpiCxWTU0NsjIyMLKszKgbJW6ES3s7RpSW4XB6Os6ePStJD0TWisGOyMxptVosWbIE7ZddhDdu3AgXFxeJuiJLlZmeDqe6OgRUV0vax/DqajjV1SEjPV3SPoisDYMdkZlbv3490i+7+C1btgyxsbESdUSWqrGxEeVlZbit4kyv3Vd3LXK9Hv4VZ1BeWnrF0T1EdPMY7IjM2NGjR/H000+LasOGDcOrr74qUUdkyfLz82HT3g6fujqpWwEADKmrg7K9HQUFBVK3QmQ1GOyIzJRGo8GiRYvQ1dUl1GQyGTZt2gQnJycJOyNLpNVqUZibC98zlTf1mDBTUOh08KusREFODrTXeQ4tERmOwY7ITL388svIysoS1VauXIlJkyZJ1BGZ2gsvvIDAwEAEBwdjzJgxKC8vv+Zr3d3db+i9Gxoa0NnWhn05Oei+JNhNzTqMWbk5mJWbgyVFhTjf3X3T/d8Mr/oGbNu2DQ0NDQAubu647777AACbNm3Ck08+ecPvuWHDBgQEBEAmk6G1tdWo/RKZOwY7IjOUn5+Pv/71r6LaiBEjsG7dOok6IlPLzMzEnj17cOTIERQWFuK7775D//79jfb+arUaeo0GX506iZ7L7q/7IjQMP0REIsipH96vrDTo/bRGukfPpa0NP6WnQ61WAwAGDx6MTz/99Jbec9y4cdixYwefbkF9EoMdkZnp7u7GwoUL0dPTI9Tkcjk2b94MBwcHCTsjU6qtrYW7uztsbGwAXDzOxtXVFdu3b8ftt9+O8PBw3H///ei+yozayy+/jKioKISEhOC1114T6uvWrUNwcDBCQkLwzjvvICcjA+e7uzE//wiWlRRf8T5RLs6o6OyAVq/HS6dOYe6RPMzKzUXyuXMAgP+q1Vh+tAT3FxRgxbGjON/djWUlxZiVm4s783JxuqMDAPBhVeXPY3OwsaoKAHCoqQmLiwrxWEkJYrOz8bdTpwAAb508ic7OTsyZMwfLli3D6dOnMWbMmCt6O3/+PObOnYsxY8bg9ttvR15e3jV/L4ODgzFs2DBDf+uJrIpS6gaISOyFF1644mby1atXY9y4cRJ1RL1h5syZWLt2LUaPHo2ZM2figQcewNChQ/Hqq69i9+7dcHBwwHPPPYePPvoIy5cvF8bt2LEDVVVVOHz4MHQ6HWbOnIn4+HicOXMGu3fvRnZ2Nuzs7LDhgw/g0NODDFtbfBEaBkeF4ooedjc0YITKEV+pazHI1hb/DQtHp1aLu/PzEePqCgA41taG78PC4aRU4vFjRzHVzQ3zPL3QrdNBo9cjvbERtV1d+CY0DDoAS4oKhbElra3YGhEJZ6USibk5WDx4MFYNHYp/nz+H//vrXzH/vvtw+vTpq/7+rFy5EmvWrEFUVBTKyspw//3349ChQ0b/cyCydAx2RGbk8OHD+Pvf/y6qBQcHY+3atRJ1RL2lX79+yMvLw549e7Br1y7MnDkTW7ZsQUFBAW6//XYAQFdXFxITE0XjduzYgdTUVOzfvx8AcOHCBZSWliI9PR1LliyBnZ0dAMBGoYDNNR7hNT//CGQyGUaoHLHKfyieLitFaXs7vj9/caauVatBZWcnACCmvyuclBcvHdnNzXhjxEgAgK1cDlsA6U2N+KmhEdktF2fU2rRalHd0oL9SifB+znC3tQUABKgcUd3VhcH29pAB6P75/a9l586dKC7+3ywjj0ghujoGOyIz0dHRgUWLFol2ByqVSmzZskW4OJN1UyqVmDlzJmbOnAl3d3c8/vjjSExMxCeffHLNMTqdDmvXrsWiRYtE9cvPPtRptdc8u+7yGTwdgBdvuw1jXfqLXneivR32iuvfwaPTA3/w9cVcDw9R/VBTE2zlMuHnChmgu6QfrQHPjc3OzoZSycsW0fXwHjsiM/Hss8/i2LFjotpzzz2HsLAwaRqiXnX8+HGcPHkSAKDX61FUVIRHH30Ue/bsQUVFBQCgpaXlip2ysbGx2LBhg/BkktOnT6O5uRkzZszAJ598IhyX097ZCZ1MBkeFAm2/crTIxP6u+PTsWWGDRGlb21U3S4xxccFX6loAQLdOh3atFhNd++MrdS06fv6Mqs5OXPiV0CaXySCTX/9yNHXqVLz33nvCz/Pz86/7eqK+isGOyAzs378f//jHP0S1yMhI/OUvf5GoI+ptra2tuP/++xEYGIigoCDodDqsWLECH330EX77298iJCQEkyZNEkLeL+Lj43HXXXdh/PjxCAoKwv3334/Ozk4kJCRgypQpiIiIQFhYGA5nZ6NHqcQ9np54oLDgqpsnfnGPpyd87OwxJy8Xibk5+Fv5KVxtru/p3/hjZ309ZuXmYF5+Ps51d2OSqxtmDhiAe/KPIDE3B0+WHkfXr5ybFx0QgGeefx7Lli275mveeust/PTTTwgNDcWoUaPw2WefXfO1H3zwAXx8fFBVVYURI0Zg1apV1/18Imsi0+slfq4MUR/X2tqK0NBQnPp5lyAA2NnZIScnB4GBgRJ2RtZk165dOL59O2YeOCh1K1f48fbxGBEXh+nTp0vdCpHF44wdkcRWr14tCnUA8OKLLzLUkVF5eHig1cEBPVfZDSulHoUCrQ4O8LjsnjwiujkMdkQS2rlzJ959911RbcKECVw6IqPz8PCATKlEs6Oj1K2INDs6QqZU3lSwW7duHcLCwkQ/rrfRhKgv4FIskUSam5sRHByMyktO+ndwcEB+fj4CAgIk7IyskVarxbv//Ce8844g+BpnxUmhcNhQVIeF4fePPw6Fmc0mElkiztgRSWTVqlWiUAcAr7zyCkMdmYRCoUBwRATO+A6B9ld2oN6KG5kp0MrlqBgyBCGRkQx1REbCYEckgZSUFHz88cei2tSpU/H73/9eoo6oLwgNDUWPSoUqd3eTvH9zSwvOnq2BWq1G9yWPxLuWSnd3aFQqhISEmKQfor6IwY6ol9XX1+Phhx8W1fr164ePP/4YchPOpBC5urpiWEAATvj5QieT/fqAG9Cj0aCtrRUAoNVp0dDQAJ3+2sec6GQynPTzxbDhw+H68yPHiOjW8SpC1Mv++Mc/ora2VlT7xz/+gaFDh0rTEPUp0TExaHV3R5m3t1HfV3ZZUNTptGhubrnm60u9vdHq7o7oiRON2gdRX8dgR9SLvv76a3z++eei2h133IGlS5dK1BH1NV5eXoiKjsaxgAC0qFRGe1+lQgE7O3tRraOjHR1XeQZss0qF48MDMHbiRHh5eRmtByJisCPqNefOncNjjz0mqvXv3x8bNmy4YraDyJSio6Ph6uONnFGjoDHi8n///v0hk4nfr7m5CdpLnjyhkcuRM3oU3Ly9MWHCBKN9NhFdxGBH1Av0ej0effRR1NXViepvv/02Bg8eLFFX1FcplUokzp6N9sGDcShwtNHut1PI5XBxcRHVdDodmpubL/63TIZDgaPR4TUYCbNnQ6lUGuVzieh/GOyIesGnn36K7777TlS76667sGDBAmkaoj7P09MTd827Bw2+vjgQFGi0mTuVgwPs7R1Etc7ODlzo6sKBoEA0+Prirnn3wNPT0yifR0RiPKCYyMSqq6sRFBSEpqYmoebu7o7i4mIMGjRIusaIAFRUVODbL/8DVU0NIo8ehXN7+y2/p1anw/nz56D7eQm2zdkZx8eMge43v8Hv7r0Xfn5+t/wZRHR1nLEjMiG9Xo+HHnpIFOoA4P3332eoI7Pg5+eH+QsfgGL0KOwZNw7HfXxueWn24pJsf+hkMlSNGIHDU6eipKcHmVlZ8PX1NVLnRHQ1nLEjMqENGzZccWbdggUL8Omnn0rUEdHVaTQaZGRkICsjA051dfCvOIMhdXVQ6K59Ft21aOVyVLq7o9jTA2p7e2RkZSEzMxNarRYbNmzgLnAiE2KwIzKR06dPIzg4GK2trULNy8sLRUVFcHNzk7AzomurqalBZkYGyktLoWxvh19lJbzqG+DS1gYbrfaa43oUCjQ7OuLsADdUDBkCjUoFbz8//O3vf0dxcbHwun79+qGwsJDLsUQmwmBHZAI6nQ4zZszAnj17RPXU1FQkJCRI1BWR4RobG1FQUICCnBx0trVBr9HAqaMDzg2NsNVoINfroJPJ0a1UosXNFa0ODpAplbB3dERIZCRCQkLg6uqKlJQUzJo1S/Te06ZNw48//sgnrRCZAIMdkQm89dZbWLFihai2dOlSbNiwQaKOiG6OVnvx8WBqtRpqtRrna2vR3dkJrUYDhVIJW3t7DPT0hIeHBzw8PODm5gaFQiF6jwcffBCffPKJqPbWW2/hD3/4Q29+KUR9AoMdkZGVlpYiLCwMHR0dQs3X1xeFhYVwdnaWsDMiaTQ3NyM4OBiVlZVCTaVS4ciRIwgICJCwMyLrw3lwIiPSarVYvHixKNQBwMcff8xQR32Wi4sLPv74Y1Gtvb0dixcvhvY69+0R0Y1jsCMyotdffx0HDhwQ1ZYvX47p06dL1BGReZgxYwZ+//vfi2qZmZl44403JOqIyDpxKZbISIqLixEREYHu7m6h5u/vj/z8fDg6OkrYGZF5aG1tRVhYGE6ePCnU7OzskJubi9GjR0vYGZH14IwdkRH09PRg4cKFolAnk8mwefNmhjqinzk5OWHTpk2QXXIAcldXFxYtWoSenh4JOyOyHgx2REbw0ksvITc3V1R74oknEB0dLVFHROZp4sSJWLVqlaiWnZ2Nv//97xJ1RGRduBRLdItyc3Mxbtw4aDQaoTZq1Cjk5ubC3t5ews6IzFNHRwciIiJw7NgxoaZUKpGVlYWwsDDpGiOyApyxI7oFvywjXRrqFAoFNm/ezFBHdA0ODg7YvHmz6Lw7jUaDhQsXoqurS8LOiCwfgx3RLXj++edRVFQkqq1ZswZRUVESdURkGcaOHYu//OUvolphYSFeeOEFiToisg5ciiW6SQcPHkR0dDR0lzwkPTQ0FIcPH4atra2EnRFZhu7ubkRFRaGgoECoyeVyZGZmYty4cRJ2RmS5GOyIbkJ7ezvCw8NRWloq1GxsbJCdnY2QkBAJOyOyLPn5+YiKihLtih0xYgTy8vLg4OAgYWdElolLsUQ34emnnxaFOuDisixDHdGNCQ0Nxdq1a0W148eP4+mnn5aoIyLLxhk7ohu0d+9eTJkyRVQbO3YsMjIyoFQqpWmKyIJpNBpMmDABWVlZQk0mk+Gnn37CpEmTJOyMyPIw2BHdgAsXLiA0NBTl5eVCzd7eHnl5eRg5cqSEnRFZtqNHjyI8PFy0K3bYsGEoKCiAk5OThJ0RWRYuxRLdgKeeekoU6gBg3bp1DHVEt2jUqFFYt26dqFZeXo6nnnpKoo6ILBNn7IgMtH37dsTHx4tqMTEx2LNnj+g8LiK6OVqtFlOmTEF6erqovn37dsTGxkrUFZFlYbAjMkBTUxOCgoJQXV0t1FQqFQoKCuDv7y9hZ0TW5eTJkwgJCUF7e7tQ8/HxQWFhIfr37y9dY0QWgkuxRAZYuXKlKNQBwGuvvcZQR2Rk/v7+ePXVV0W1qqoq/OlPf5KoIyLLwhk7ol+RnJyMO++8U1SbMWMGduzYAZlMJlFXRNZLp9MhLi4OO3fuFNW///57zJ49W6KuiCwDgx3RddTV1SEoKAhqtVqoOTs7o7CwEL6+vhJ2RmTdzpw5g+DgYLS0tAg1Dw8PFBcXY8CAARJ2RmTeuBRLdB3Lly8XhToAWL9+PUMdkYn5+vpi/fr1opparcby5culaYjIQnDGjugavvzyS8yfP19US0pKQnJyMpdgiXqBXq/H7NmzkZKSIqp/+eWXuOeeeyTqisi8MdgRXUVtbS0CAwPR0NAg1FxdXVFcXAwvLy8JOyPqW86ePYvAwEA0NjYKtQEDBqC4uBgeHh4SdkZknrgUS3QZvV6PRx99VBTqAODdd99lqCPqZV5eXnjnnXdEtfr6ejzyyCPgvATRlRjsiC6zZcsWJCcni2q/+93vMG/ePIk6Iurb5s+fj9/97neiWnJyMv71r39J1BGR+eJSLNElKisrERwcjObmZqE2aNAgFBUVYeDAgRJ2RtS3nT9/HoGBgTh//rxQc3FxQVFREXx8fCTsjMi8cMaO6Gd6vR4PPfSQKNQBwAcffMBQRySxgQMH4sMPPxTVmpubsXTpUi7JEl2CwY7oZx9++CF27Nghqj3wwAOYM2eONA0RkcicOXNw//33i2o7duzARx99JFFHROaHS7FEAE6dOoWQkBC0tbUJtcGDB6OoqAiurq4SdkZEl2psbERQUBBqamqEmqOjIwoLCzFs2DAJOyMyD5yxoz5Pp9NhyZIlolAHABs3bmSoIzIzrq6u2Lhxo6jW1taGJUuWQKfTSdQVkflgsKM+780338S+fftEtYcffhjx8fESdURE1xMfH4+HH35YVNu7dy/eeustiToiMh9ciqU+7fjx4wgLC0NnZ6dQGzp0KAoKCtCvXz8JOyOi67lw4QKCg4NRUVEh1Ozt7XHkyBGMGDFCws6IpMUZO+qzNBoNFi1aJAp1APDxxx8z1BGZuX79+uGTTz4R1To7O7F48WJotVqJuiKSHoMd9VmvvfYaDh06JKqtWLECU6dOlagjIroRU6dOxYoVK0S1gwcP4rXXXpOoIyLpcSmW+qTCwkJERkaip6dHqAUEBODIkSNQqVQSdkZEN6K9vR1hYWEoKysTara2tsjJyUFQUJCEnRFJgzN21Od0d3dj0aJFolAnl8uxadMmhjoiC6NSqbBp0ybI5f+7nHV3d2PhwoWi/8eJ+goGO+pz1q1bh7y8PFHtySefxIQJEyTqiIhuxYQJE/Dkk0+Kanl5eVi3bp1EHRFJh0ux1Kfk5ORg3LhxopurAwMDkZ2dDXt7ewk7I6Jb0dnZiTFjxqC4uFioKRQKHDp0CJGRkRJ2RtS7OGNHfUZnZycWLlwoCnUKhQKbN29mqCOycPb29ti8eTMUCoVQ02q1V935TmTNGOyoz1i7di1KSkpEtWeeeYbfzRNZicjISDzzzDOiWnFxMdauXStRR0S9j0ux1CdkZmZi4sSJuPSve3h4OA4dOgQbGxsJOyMiY+rp6cG4ceNE99HK5XLs37+f99FSn8BgR1avra0NYWFhOHHihFCztbVFdnY2goODJeyMiEyhqKgIkZGR6O7uFmo8zoj6Ci7FktVbs2aNKNQBwAsvvMBQR2SlgoKC8Ne//lVUKysrw5o1ayTqiKj3cMaOrNru3bsxffp0UW38+PFIT08X3WRNRNZFo9EgJiYGBw8eFNV3797Np8uQVWOwI6vV0tKCkJAQ0UPCHRwccOTIEQwfPlzCzoioNxw/fhxhYWGiXbF+fn4oLCzk86DJanEplqzWE088IQp1APDSSy8x1BH1ESNGjMDf//53Ua2iogJPPPGERB0RmR5n7MgqpaWlISEhQVSbPHkydu/eLXr0EBFZN51Oh2nTpmHv3r2ielpaGuLj4yXqish0GOzI6jQ2NiIoKAg1NTVCzcnJCQUFBRg2bJiEnRGRFMrLyxEcHIy2tjahNnjwYBQVFcHV1VXCzoiMj1MXZHVWrFghCnUA8PrrrzPUEfVRw4YNw+uvvy6q1dTU4PHHH5eoIyLT4YwdWZVvv/0Wc+fOFdXi4uKQlpYGmUwmUVdEJDW9Xo/4+Hjs2LFDVP/2228xZ84caZoiMgEGO7Ia58+fR2BgIM6fPy/UXFxcUFRUBB8fHwk7IyJzUFVVhaCgIDQ3Nwu1QYMGobi4GO7u7hJ2RmQ8XIolq6DX6/HYY4+JQh0AvPnmmwx1RAQA8PHxwZtvvimqnTt3Do899hg4x0HWgjN2ZBU+//xzLFiwQFS788478e2333IJlogEer0ec+bMQXJysqj++eefY/78+RJ1RWQ8DHZk8WpqahAUFITGxkahNmDAABQXF8PDw0PCzojIHNXW1iIoKAj19fVCzc3NDUVFRfDy8pKwM6Jbx6VYsmh6vR6PPPKIKNQBwHvvvcdQR0RX5enpiXfffVdUa2howCOPPMIlWbJ4DHZk0T755BOkpqaKavPmzcPdd98tUUdEZAnuuecezJs3T1RLSUnB5s2bJeqIyDi4FEsWq6KiAsHBwbhw4YJQ8/DwQHFxMQYMGCBhZ0RkCerr6xEYGAi1Wi3UnJ2dUVRUhCFDhkjYGdHN44wdWSSdToelS5eKQh0AfPTRRwx1RGSQAQMG4MMPPxTVWlpa8OCDD3JJliwWgx1ZpPfffx+7du0S1RYvXoxZs2ZJ1BERWaLZs2dj0aJFotrOnTvx/vvvS9QR0a3hUixZnBMnTiA0NBTt7e1CzcfHB0VFRXBxcZGwMyKyRE1NTQgODkZVVZVQc3R0RH5+Pvz9/SXsjOjGccaOLIpWq8WSJUtEoQ4ANm7cyFBHRDelf//+2Lhxo6jW1taGJUuWQKfTSdQV0c1hsCOLsn79eqSnp4tqy5YtQ2xsrEQdEZE1iI2NxbJly0S1/fv345///KdEHRHdHC7FksU4evQowsPD0dXVJdSGDRuGgoICODk5SdgZEVmD1tZWhISEoLy8XKjZ2dnhyJEjGDlypISdERmOM3ZkETQaDRYtWiQKdTKZDJs2bWKoIyKjcHJywieffCJ6DGFXVxcWLVoEjUYjYWdEhmOwI4vw8ssvIysrS1RbuXIlJk2aJFFHRGSNJk+ejMcff1xUO3z4MF555RWJOiK6MVyKJbOXn5+PqKgo9PT0CLURI0YgLy8PDg4OEnZGRNaoo6MD4eHhOH78uFCzsbFBdnY2QkJCJOyM6Ndxxo7MWnd3NxYuXCgKdXK5HJs3b2aoIyKTcHBwwObNmyGX/+8S2dPTg4ULF6K7u1vCzoh+HYMdmbUXXngBBQUFotrq1asxbtw4iToior5g3LhxWL16taiWn5+P//u//5OoIyLDcCmWzNbhw4cxYcIEaLVaoRYcHIysrCzY2dlJ2BkR9QVdXV2IiopCYWGhUFMoFDhw4ACioqIk7Izo2hjsyCx1dHQgIiICx44dE2pKpRJZWVkICwuTrjEi6lPy8vIwduxY0a7YUaNGITc3F/b29hJ2RnR1XIols/Tss8+KQh0APPfccwx1RNSrwsPD8eyzz4pqR48evaJGZC44Y0dmZ//+/Zg8eTIu/asZGRmJAwcOwMbGRsLOiKgv6unpwe23346cnByhJpPJsH//fkRHR0vYGdGVGOzIrLS2tiI0NBSnTp0SanZ2dsjJyUFgYKCEnRFRX1ZcXIyIiAjRrlh/f3/k5+fD0dFRws6IxLgUS2Zl9erVolAHAC+++CJDHRFJKjAw8IodsSdPnsRf/vIXiToiujrO2JHZ2LlzJ2bOnCmqTZgwAfv27YNCoZCoKyKii7RaLSZNmoTMzExRfefOnZg+fbpEXRGJMdiRWWhubkZwcDAqKyuFmoODA/Lz8xEQECBhZ0RE/1NWVobQ0FB0dHQINV9fXxQWFsLZ2VnCzogu4lIsmYVVq1aJQh0AvPLKKwx1RGRWAgIC8PLLL4tqZ86cwapVqyTqiEiMM3YkuZSUFMyaNUtUmzp1Knbu3Cl6pA8RkTnQ6XSYMWMG9uzZI6qnpqYiISFBoq6ILmKwI0nV19cjKCgItbW1Qq1fv34oKCjA0KFDpWuMiOg6Tp8+jeDgYLS2tgo1Ly8vFBUVwc3NTcLOqK/jdAhJ6o9//KMo1AHAP/7xD4Y6IjJrQ4cOxRtvvCGqnT17FitWrJCoI6KLOGNHkvn6669x9913i2p33HEHUlNTIZPJJOqKiMgwer0eiYmJSEtLE9W/+eYbzJ07FxqNBgqFgv+eUa9isCNJnDt3DoGBgairqxNq/fv3R3FxMQYPHixhZ0REhquurkZQUBCampqEmru7O+bMmYPPPvsMLi4u+OyzzzBlyhTJeqS+hcGOep1er8fcuXPx3Xffier//ve/cd9990nTFBHRTfr3v/+NBx544Jq/HhQUhMLCwl7siPoyBjvqdVf7R/Cuu+7CN998wyULIrI4er0ec+bMQXJy8jVf097eDgcHB2i1WjQ0NECtVkOtVuN8bS26Ojqg02ohVyhg5+CAgZ6e8PDwgIeHB9zc3HhAO90QBjvqVddatiguLsagQYOka4yI6CZVV1dj2rRpKC0tveZrDh8+jLa2NhTm5qKzrQ16jQZOHR1waWiAjUYDuV4PnUyGHqUSzW5uaHVwgEyphL2jI4IjIhAaGgpXV9de/KrIUimlboD6Dr1ej4ceekgU6gDggw8+YKgjIov11FNPXTPUeXp6YuKECfgxNRWqnh74nqmEV0MDXNraYKPVXvM9exQKNDs64qybG47U1yMrIwPDAgIQHRMDLy8vU30pZAUY7KjXbNy4Edu2bRPVFixYgLlz50rUERHRrWtoaLiiplAoMGHCBERHRcG9tRUjs7Phf6EVCp3OoPe00Wrh3tIC95YWjD5zBlXu7jhRX49PT5xAVHQ0oqOjoVTyEk5X4lIs9Qoe5klE1mrv3r1ISEhAe3s7AGDQoEGYnZgIb1dXBBw7hsGlpXCyd0D//v1v6XN0MhnKvL1xLCAAbj7eSJg9G56enkb4CsiaMNiRyel0OkyfPh0//fSTqM7H7xCRtTh9+jTWrFmDzMxM3DNnDrza2zEqJweqlhYAgEKhhIeRbjlpUamQM2oU2gcPxl3z7oGfn59R3pesA4Mdmdxbb711xWnsS5cuxYYNGyTqiIjI+CoqKvDFli1wPnkKIw5kQnHJPXQKuQIeHh5G+yyNXI5DgaPR4OuL3957L8MdCRjsyKRKS0sRFhaGjo4Ooebr64vCwkI4OztL2BkRkfHU1tbiiy1b0L/8NG4vLkZnezuam5qgx8VLrItLfziqVEb9TJ1MhgNBgWgaOgzzFz7AZVkCwGfFkglptVosXrxYFOoA4OOPP2aoIyKrodFokJqcDFXNWYwrKYFcr4fKwQGeXl5wc3ODxyAPo4c6AJDr9RhXXAKHszXYmpwMjUZj9M8gy8NgRybz+uuv48CBA6La8uXLMX36dIk6IiIyvoyMDDRWVSPy6FEoL9n1KgNgb2dv0gOGlTodIkuOoqG6GpmZmSb7HLIcXIolkyguLkZERAS6u7uFmr+/P/Lz8+Ho6ChhZ0RExlNTU4PPNm3CyMIijKiqkqyPYz4+OB4chPuWLOE5d30cZ+zI6Hp6erBw4UJRqJPJZNi8eTNDHRFZlcz0dDjV1SGgulrSPoZXV8Oprg4Z6emS9kHSY7Ajo3vppZeQm5srqj3xxBOIjo6WqCMiIuNrbGxEeVkZbqs4A7nEi19yvR7+FWdQXlqKxsZGSXshaTHYkVHl5ubixRdfFNVGjRp1RY2IyNLl5+fDpr0dPnV1UrcCABhSVwdlezsKCgqkboUkxGBHRtPV1YVFixaJdmYpFAps3rwZ9vb2EnZGRGRcWq0Whbm58D1TafBjwkxNodPBr7ISBTk50F7nObRk3RjsyGief/55FBUViWpr1qxBVFSURB0RUV935swZJCYmIiAgALfddhvWrl2Lm9kzePr0aYwZM0b4eUNDAz7auBFny8quO+6jqkrRz0el78fsvFzhx7dq9Q33cj1e9Q3obGtDQ0MD3N3db/n9Dh8+jDFjxsDGxgYpKSlG6JBMjcGOjOLgwYN45ZVXRLXQ0FA8++yzEnVERH2dXq/HXXfdhQULFqCsrAxFRUXIzc3Fm2++ecvvrVarodfr4XTZOZ2X++iynbL9lEokh0cIP+4y4tMoAMClrQ16jQbqGwyM15rhGzx4MDZu3Ih7773XGO1RL1BK3QBZvvb2dixatAi6S5YjbGxssGXLFtja2krYGRH1Zbt27YKTkxPuu+8+AIC9vT3efPNNxMTEoLGxEVVVVSgtLUVVVRX+9re/Yf78+QCAl19+GV9//TW6urqwcOFCPPnkk1e8t1qthlKrhUJx8d+99MZGvHK6HFq9HtH9XbFm2DC8UVGBCxoNZuflIqxfP7xwW8A1ex178ADmenggvbERbjY2eH90IFQKBco72vHciRNo6tHARi7D5qBg2MhkeObECRxva4WtXI4XbwvAaCcnNPR0Y2XhMVQUFyH3+HHhvbVaLf785z9j37596O7uxp///Gfcd9992LRpE5KTk9HQ0AA3Nzf897//vaIvHx8f+Pj4QC7nPJClYLCjW/b000+jtLRUVHv++ecREhIiUUdEREBJSQkiIiJEtWHDhqGtrQ0tLS04efIkdu3ahTNnziAuLg7z58/Hjh07UFVVhcOHD0On02HmzJmIj4+Hk5MTSkpKEBYWBgBoamzE+dpaYPgIdGq1eOZEGf4dHILBdnZ4tKQYO+rrsWroUHxRexbJ4f/r4Zeg94v/N+w3GN+/P5o0GsS4uuIvw36Dp44fx476OswZ5IEnjx/Hn/yGYqKrK9q0WtjKZNhSUwMnhQI/RETiSEsLVpeW4oeICLx15gymuLkhcNp07NH+717njRs3wsvLC1lZWejo6MD48eMRHx8P4OIGkLy8PD4NyIow2NEt2bt3L9avXy+qjR07Fn/+85+laYiIyEBJSUmwsbGBv78/mpqaAAA7duxAamoq9u/fDwC4cOECSktLERERgdGjRyM7OxsAsHnDBrz3f/8HADjV0YFhDg7w+XmT2OyBg5DT0oy4q9zj9stS7OUcFQpE93cFAAQ5OaG6swutGg2aNRpMdHUVXgMA2S0teNjHBwAQ5uyMLp0OFzQa5LS0YJnPENRoNBg7JhLf//CD8DUVFRXh3//+NwCgubkZp06dAgDExcUx1FkZBju6aRcuXMCSJUtENXt7e2zevBlKJf9qEZG0Ro8efcXyYnl5ORwdHeHs7Aw7O7srxuh0OqxduxaLFi0S1U+fPi1+nVYLGPHsOhuZTPhvuUwG7U2+t0wmg1yvg/aS0wl0Oh0++OADTJ48WfTa4uJiqEzwDFuSFhfN6aY99dRTKC8vF9XWrVuHkSNHStQREdH/TJ8+Hc3Nzfj8888BXDySaeXKlVe9Z+4XsbGx2LBhA9rb2wFcDHTNzc1XvE6uUAA/h7HfODjgdEcHqjs70dXdje9qz2KU0gYdnZ1Q3EJIc1Iq4aJUIuPnA4fbtFr06HQY4+yMH86fAwDkX7gAe4Uc/ZRKRDo7Y+v589DJ5DickyP6mt59911hg0RRURGPQ7FiDHZ0U7Zv344PPvhAVIuJicHjjz8uUUdERGIymQzffvsttmzZgoCAAIwePRrBwcFYsWLFNcfEx8fjrrvuwvjx4xEUFIT7778fnZ2dV7zOzsEBOpkMOuiBnh6s9hqMhwsKMCsvF54yGcYqFWhsbMCsAQOQlJuD505cPBbll3vsfvnxya88iuzV4SPwbuUZzMrNwaLCQnTqdLjPywsXNBrMys3BCydP4O8BwwEAf/T1xe6Geqz6/ns0XhJGH374YQwdOhTh4eEICgrCn/70J4OPfCkoKICPjw+++uorLF68GLfffrtB40g6Mv3NHOhDfVpTUxOCgoJQfck/SCqVCgUFBfD395ewMyIi0ysvL8d//vMfKCsrMWbbdgDXvow6OfWDc79+vdccgB9vH48RcXGYPn16r34umQfO2NENW7lypSjUAcBrr73GUEdEVkmj0WDfvn3485//jMDAQPzmN7/Bv/71L1xwcIBGqbjmOBlkcOjlp+70KBRodXCAh5HPxyPLwTvc6YYkJydj8+bNotqMGTOwbNkyiToiIjK++vp6pKWlITU1Fdu2bRN2zf5CrVZDo9ejzcUFLvX1ol9TKJSwt7eHo6MKSkXvXmabHR0hUypvONht374dq1evFtWio6PxzjvvGLM96gUMdmSwuro6PPLII6Kas7MzNm7cCNklO7qIiCyNXq9HYWEhUlNTkZKSgoMHD4oOXb9cfX092jo70Th4MFzqG2Brawt7e3vY29lJeirA2QFusHd0hJub2w2Ni4uLQ1xcnIm6ot7EYEcGW758+RWPqVm/fj18fX0l6oiI6OZ1dHRg9+7dQpirrKz89UE/c3d3h06hwPnhwxFV3wAbM7hdXSuXo2LIEERERkKhuPYSMVk3BjsyyJdffon//Oc/olpSUhIWL14sTUNERDehsrJSCHK7d+9Gx6886/VSYWFhSEpKQmJiIqKiotDS0oIN776LmoED4XfunAm7Nkyluzs0KhWf+tPHMdjRr6qtrcXvf/97Uc3V1RUffvghl2CJyKxptVocOnQIKSkpSE1NRUFBgcFjVSoVZsyYgcTERCQkJMDn56c9/MLV1RXDAgJwor4eQ86fh1zCWTudTIaTfr4YNnw4XH9+UgX1TQx2dF16vR6PPvooGhoaRPV3330XXl5eEnVFRHRtjY2N2L59O1JTU5GWlob6yzY3XI+fnx+SkpKQlJSEKVOmwP5XdrVGx8Tg0xMnUObtjRFVVbfa+k0r9fZGq7s77pw4UbIeyDww2NF1bdmyBcnJyaLa7373O8ybN0+ijoiIxPR6PY4dOybMyqWnpxv8ZAW5XI7o6GgkJiYiKSkJo0ePvqGVCC8vL0RFRyOrswteDQ1w/vmJFb2pWaXC8eEBGDtxIr/hJh5QTNdWWVmJ4OBg0eN0Bg0ahKKiIgwcOFDCzoior+vs7MTevXuF++Uuf7zh9bi6uuKOO+5AUlIS4uLibngH6eU0Gg02f/wxtCVHEZOXB+V1dtMam0Yux76IcNiMGoWFDz7I53QTZ+zo6vR6PR566KErnpH4wQcfMNQRkSRqamqwdetWpKSkYOfOnWhrazN4bFBQkLDxYfz48UYNQEqlEomzZ+OLpmYc6u7C7UXFvXK/nU4mw6HA0ejwGow7Z89mqCMAnLGja/jggw+uOHT4gQcewJYtWyTqiIj6Gp1Oh+zsbGFWLjc31+CxdnZ2mD59OhITE5GYmAg/Pz8TdnpRRUUFvvn8c7idOYNxxSUmnbnTyOU4FDgaDb6++O299/bK10eWgcGOrnDq1CmEhISIvhsePHgwioqKuNuKiEyqpaUFP/74I1JSUrB161acu4FjRLy9vYWND9OmTYNKpTJhp1dXUVGBb7/8D1Q1NYg8etQk99w1q1TIGT0KHV6Dcde8exjqSITBjkR0Oh2mTp2Kffv2ieppaWmIj4+XqCsismZlZWXCxod9+/ahp6fHoHEymQzjx48XNj6EhISYxRFMtbW1SE1ORmNVNUaWlSGgutooS7M6mQyl3t44PjwAbt7eSJg9G56enkbomKwJgx2JrF+/Hn/6059EtYcffhgffvihRB0RkbXp7u7G/v37hSXWsrIyg8e6uLggLi4OSUlJiI+PN9t7fjUaDTIyMpCVkQGnujr4V5zBkLo6KG5ieVYrl6PS3R0n/XzR6u6OsRMnYsKECbynjq6KwY4Ex48fR1hYGDo7O4Xa0KFDUVBQgH79+knYGRFZOrVajbS0NKSkpGDHjh24cOGCwWNHjhwpzMpFR0fDxsbGhJ0aV01NDTIzMlBeWgplezv8KivhVd8Al7Y22FznSJYehQLNjo44O8ANFUOGQKNSYdjw4YjmkSb0KxjsCMDF7y4nTpyIQ4cOieq7d+/G1KlTJeqKiCyVXq9HXl6eMCuXlZUFQy83tra2mDx5srCL1d/f38Tdml5jYyMKCgpQkJODzrY26DUaOHV0wLmhEbYaDeR6HXQyObqVSrS4uaLVwQEypRL2jo4IiYxESEgI73EmgzDYEQDg73//O9asWSOqrVixAv/85z8l6oiILE1rayt27dolbHyoqakxeKynp6ewg3XGjBlWu0qg1WrR0NAAtVoNtVqN87W16O7shFajgUKphK29PQZ6esLDwwMeHh5wc3ODQqGQum2yIAx2hMLCQkRGRopuWA4ICMCRI0ck2VVGRJajvLxcmJXbs2cPuru7DR47ZswYYRdreHg45HK5CTsl6ht452Uf193djUWLFolCnVwux6ZNmxjqiOgKGo0GmZmZwi7WkpISg8c6OTkhNjYWiYmJSEhI4I5OIhNgsOvj1q1bh7y8PFHtySefxIQJEyTqiIjMTX19PdLS0pCamopt27ahqanJ4LH+/v7CrFxMTAzs7OxM1ygRcSm2L8vJycG4ceNED8sODAxEdnY27O3tJeyMiKSk1+tRWFgoLLEePHgQOgOP6VAqlYiJiRE2PgwfPtwszpYj6is4Y9dHdXZ2YuHChaJQp1AosHnzZoY6oj6oo6MDu3fvFsJcZWWlwWMHDhyIhIQEJCYmIjY2Fi4uLibslIiuh8Guj1q7du0V98Y888wziIyMlKgjIuptlZWVQpDbvXs3Ojo6DB4bFhYmzMpFRUVx5yaRmeBSbB+UmZmJiRMnis6UCg8Px6FDhyzq4E8iujFarRaHDh0SNj4UFBQYPFalUmHGjBnCxgcfHx8TdkpEN4vBro9pa2tDWFgYTpw4IdRsbW2Rk5ODoKAgCTsjIlNobGzE9u3bkZqairS0NNTX1xs81s/PT9j4MGXKFN6mQWQBuBTbx6xZs0YU6gDghRdeYKgjshJ6vR7Hjh0TZuXS09NF99Jej1wuR3R0tPD4rtGjR3PjA5GF4YxdH7J7925Mnz5dVBs/fjzS09N5fwyRBevs7MTevXuF++XKy8sNHuvq6oo77rgDSUlJiIuLg5ubmwk7JSJTY7DrI1paWhASEoKKigqh5uDggCNHjmD48OESdkZEN6OmpgZbt25FSkoKdu7ciba2NoPHBgUFCRsfxo8fD6WSizdE1oL/N/cRTzzxhCjUAcBLL73EUEdkIXQ6HbKzs4VZudzcXIPH2tnZYfr06cKzWP38/EzYKRFJiTN2fUBaWhoSEhJEtcmTJ2P37t18NiORGWtpacGPP/6IlJQUbN26FefOnTN4rLe3t7DxYdq0aXxEIFEfwWBn5RobGxEUFISamhqh5uTkhIKCAgwbNkzCzojoasrKyoSND/v27RM9x/l6ZDIZxo8fL2x8CAkJ4cYHoj6IS7FWbsWKFaJQBwCvv/46Qx2Rmeju7sb+/fuFJdaysjKDx7q4uCAuLg5JSUmIj4/HwIEDTdgpEVkCzthZsW+//RZz584V1eLi4pCWlsbv5IkkpFarkZaWhpSUFOzYsQMXLlwweOzIkSOFWbno6GgeKk5EIgx2Vur8+fMIDAzE+fPnhZqLiwuKiop4YjxRL9Pr9cjLyxNm5bKysmDoP722traYPHmysIvV39/fxN0SkSXjUqwV0uv1eOyxx0ShDgDefPNNhjqiXtLa2opdu3YJGx8uvyXiejw9PYUdrDNmzEC/fv1M2CkRWRPO2Fmhzz//HAsWLBDV7rzzTnz77bdcgiUyofLycmFWbs+ePeju7jZ47JgxY4RdrOHh4dyxTkQ3hcHOytTU1CAoKAiNjY1CbcCAASguLoaHh4eEnRFZH41Gg8zMTGEXa0lJicFjnZycEBsbi8TERCQkJMDT09OEnRJRX8GlWCui1+vxyCOPiEIdALz33nsMdURGUl9fj7S0NKSmpmLbtm1oamoyeKy/v78wKxcTEwM7OzvTNUpEfRKDnRX55JNPkJqaKqrNmzcPd999t0QdEVk+vV6PwsJCYYn14MGD0Ol0Bo1VKpWIiYkRNj4MHz6ct0MQkUlxKdZKVFRUIDg4WHRsgoeHB4qLizFgwAAJOyOyPB0dHdi9e7cQ5iorKw0eO3DgQCQkJCAxMRGxsbFwcXExYadERGKcsbMCOp0OS5cuveIsrI8++oihjshAlZWVQpDbvXs3Ojo6DB4bFhYmzMpFRUVBoVCYsFMiomtjsLMC77//Pnbt2iWqLV68GLNmzZKoIyLzp9VqcejQIWHjQ0FBgcFjVSoVZsyYIWx84DFCRGQuuBRr4U6cOIHQ0FC0t7cLNR8fHxQVFXEJiOgyjY2N2L59O1JTU5GWlob6+nqDx/r5+QkbH6ZMmQJ7e3sTdkpEdHM4Y2fBtFotlixZIgp1ALBx40aGOiJc3Phw7NgxYVYuPT0dWq3WoLFyuRzR0dHC47tGjx7NjQ9EZPYY7CzY+vXrkZ6eLqotW7YMsbGxEnVEJL3Ozk7s3btXuF+uvLzc4LGurq644447kJSUhLi4OLi5uZmwUyIi4+NSrIU6evQowsPD0dXVJdSGDRuGgoICODk5SdgZUe+rqanB1q1bkZKSgp07d6Ktrc3gsUFBQcLGh/Hjx0Op5Pe7RGS5+C+YBdJoNFi0aJEo1MlkMmzatImhjvoEnU6H7OxsYVYuNzfX4LF2dnaYNm2aEOb8/PxM2CkRUe9isLNAL7/8MrKyskS1lStXYtKkSRJ1RGR6LS0t+PHHH5GSkoKtW7fi3LlzBo/19vYWgty0adPg6Ohowk6JiKTDpVgLk5+fj6ioKPT09Ai1ESNGIC8vDw4ODhJ2RmR8ZWVlwsaHffv2if7eX49MJsP48eOFjQ8hISHc+EBEfQJn7CxId3c3Fi5cKLq4yeVybN68maGOrEJ3dzf2798vLLGWlZUZPNbFxQVxcXFISkpCfHw8Bg4caMJOiYjME4OdBXnhhReuOER19erVGDdunEQdEd06tVqNtLQ0pKSkYMeOHVc8QeV6Ro4cKczKRUdHw8bGxoSdEhGZPy7FWojDhw9jwoQJojO4goODkZWVBTs7Owk7I7oxer0eeXl5wqxcVlYWDP1nyNbWFpMnTxbul/P39zdxt0REloXBzgJ0dHQgIiICx44dE2pKpRJZWVkICwuTrjEiA7W2tmLXrl3CxoeamhqDx3p6eiIxMRGJiYmYMWMG+vXrZ8JOiYgsG5diLcCzzz4rCnUA8NxzzzHUkVkrLy8XZuX27NmD7u5ug8eOGTNGeHxXeHg45HK5CTslIrIenLEzc/v378fkyZNFS1WRkZE4cOAA7ycis6LRaJCZmSnsYi0pKTF4rJOTE2JjY5GYmIiEhAR4enqasFMiIuvFYGfGWltbERoailOnTgk1Ozs75OTkIDAwUMLOiC6qr69HWloaUlNTsW3bNjQ1NRk81t/fX5iVi4mJ4b2iRERGwKVYM7Z69WpRqAOAF198kaGOJKPX61FYWCgssR48eBA6nc6gsUqlEjExMcLGh+HDh/NsOSIiI+OMnZnauXMnZs6cKapNmDAB+/btg0KhkKgr6os6Ojqwe/duIcxVVlYaPHbgwIFISEhAYmIiYmNj4eLiYsJOiYiIwc4MNTc3Izg4WHQBdXBwQH5+PgICAiTsjPqKyspKIcjt3r0bHR0dBo8NCwsTZuWioqL4jQgRUS/iUqwZWrVq1RWzIq+88gpDHZmMVqvFoUOHhI0Plx+EfT0qlQrTp09HUlISEhIS4OPjY8JOiYjoejhjZ2ZSUlIwa9YsUW3q1KnYuXMnj3wgo2psbMT27duRmpqKtLQ01NfXGzzWz89P2PgwZcoU2Nvbm7BTIiIyFIOdGamvr0dQUBBqa2uFWr9+/VBQUIChQ4dK1xhZBb1ej2PHjgmzcunp6aInmVyPXC5HdHS08Piu0aNHc+MDEZEZ4lKsGfnjH/8oCnUA8I9//IOhjm5aZ2cn9u7dK9wvV15ebvBYV1dX3HHHHUhKSkJcXBzc3NxM2CkRERkDZ+zMxNdff427775bVLvjjjuQmprKmRG6ITU1Ndi6dStSUlKwc+dOtLW1GTw2KChI2Pgwfvx4KJX83o+IyJIw2JmBc+fOITAwEHV1dUKtf//+KC4uxuDBgyXsjCyBTqdDdna2MCuXm5tr8Fg7OztMnz5deBarn5+fCTslIiJT47fjEtPr9Xj00UdFoQ4A3n77bYY6uqaWlhb8+OOPSElJwdatW3Hu3DmDx3p7ewsbH6ZNmwaVSmXCTomIqDcx2Ens008/xXfffSeqzZ07FwsWLJCmITJbZWVlwsaHffv2oaenx6BxMpkM48ePFzY+hISEcHmfiMhKcSlWQtXV1QgKChI9X9Pd3R3FxcUYNGiQdI2RWeju7sb+/fuFJdaysjKDx7q4uCAuLg5JSUmIj4/HwIEDTdgpERGZC87YSUSv1+Ohhx664qHpH3zwAUNdH6ZWq5GWloaUlBTs2LEDFy5cMHjsyJEjhVm56Oho2NjYmLBTIiIyRwx2EtmwYQO2bdsmqi1YsABz586VqCOSgl6vR15enjArl5WVBUMn0W1tbTF58mRhF6u/v7+JuyUiInPHpVgJnD59GsHBwWhtbRVqXl5eKCoq4llhfUBrayt27dolbHyoqakxeKynp6ewg3XGjBno16+fCTslIiJLwxm7XqbT6bBkyRJRqAMuzuAx1Fmv8vJyYVZuz5496O7uNnjsmDFjhF2s4eHhfLQcERFdE4NdL3vnnXfw008/iWpLly5FQkKCNA2RSWg0GmRmZgq7WEtKSgwe6+TkhNjYWCQmJiIhIQGenp4m7JSIiKwJl2J7UWlpKcLCwtDR0SHUfH19UVhYCGdnZwk7I2Oor69HWloaUlNTsW3btis2xlyPv7+/MCsXExMDOzs70zVKRERWizN2vUSr1WLx4sWiUAcAH3/8MUOdhdLr9SgsLBSWWA8ePAidTmfQWKVSiZiYGGHjw/Dhw3m2HBER3TIGu17y+uuv48CBA6La8uXLMX36dIk6opvR0dGB3bt3C0uslZWVBo8dOHAgEhISkJiYiNjYWLi4uJiwUyIi6ou4FNsLiouLERERIbph3t/fH/n5+XB0dJSwMzJEZWWlMCu3a9cudHZ2Gjw2LCxMmJWLioqCQqEwYadERNTXccbOxHp6erBw4UJRqJPJZNi8eTNDnZnSarU4dOiQMCtXUFBg8FiVSoUZM2YIGx98fHxM2CkREZEYg52JvfTSS8jNzRXVnnjiCURHR0vUEV1NY2Mjtm/fjtTUVKSlpaG+vt7gsX5+fsLGhylTpsDe3t6EnRIREV0bl2JNKDc3F+PGjYNGoxFqo0aNQm5uLi/+EtPr9Th27JgwK5eeng6tVmvQWLlcjujoaOHxXaNHj+bGByIiMgucsTORrq4uLFq0SBTqFAoFNm/ezFAnkc7OTuzdu1e4X668vNzgsW5uboiPj0dSUhLi4uJ4mDQREZklBjsTef7551FUVCSqrVmzBlFRURJ11DfV1NRg69atSElJwc6dO9HW1mbw2KCgIGHjw/jx46FU8n8XIiIyb1yKNYGDBw8iOjpadKZZaGgoDh8+DFtbWwk7s346nQ7Z2dnCEuvl9zdej52dHaZPny48i9XPz8+EnRIRERkfg52Rtbe3Izw8HKWlpULNxsYG2dnZCAkJkbAz69XS0oIff/wRKSkp2Lp1K86dO2fwWG9vb2Hjw7Rp06BSqUzYKRERkWlxbcnInn76aVGoAy4uyzLUGVdZWZkwK7dv3z709PQYNE4mk2H8+PHCxoeQkBBufCAiIqvBGTsj2rt3L6ZMmSKqjR07FhkZGbw/6xZ1d3dj//79wsaHsrIyg8e6uLggLi4OSUlJiI+Px8CBA03YKRERkXQY7IzkwoULCA0NFe20tLe3R15eHkaOHClhZ5ZLrVYjLS0NKSkp2LFjBy5cuGDw2JEjRwqzctHR0bCxsTFhp0REROaB00hG8tRTT11xfMa6desY6m6AXq9HXl6eMCuXlZUFQ7/vsLW1xeTJk4VdrP7+/ibuloiIyPxwxs4Itm/fjvj4eFEtJiYGe/bs4bNBf0Vrayt27dolbHyoqakxeKynp6ewg3XGjBno16+fCTslIiIyfwx2t6ipqQlBQUGorq4WaiqVCgUFBZw1uoZTp04hNTUVqamp2LNnj+g5ur8mKipKWGINDw+HXC43YadERESWhUuxt2jlypWiUAcAr732GkPdJTQaDTIzM5GSkoKUlBQcPXrU4LFOTk6IjY1FUlIS7rjjDnh6epqwUyIiIsvGGbtbkJycjDvvvFNUmzFjBnbs2NHnj9Coq6vDtm3bkJqaim3btqGpqcngsf7+/sLZcjExMbCzszNdo0RERFaEwe4m1dXVISgoCGq1Wqg5OzujsLAQvr6+EnYmDb1ej8LCQmHjw8GDB0VP3rgepVKJmJgYYePD8OHD+3wwJiIiuhlcir1Jy5cvF4U6AFi/fn2fCnUdHR3YvXu3cFBwZWWlwWMHDhyIhIQEJCYmIjY2Fi4uLibslIiIqG/gjN1N+PLLLzF//nxRLSkpCcnJyVY/01RZWSnMyu3atQudnZ0Gjw0LCxNm5aKiorhjmIiIyMgY7G5QbW0tAgMD0dDQINRcXV1RXFwMLy8vCTszDa1Wi0OHDgmzcgUFBQaPValUmDFjBhITE5GQkAAfHx8TdkpERERcir0Ber0ejz76qCjUAcC7775rVaGusbER27dvR2pqKtLS0lBfX2/w2KFDhwrHkUyZMgX29vYm7JSIiIguxWD3K3Q6Hbq6uuDg4IAtW7YgOTlZ9Ou/+93vMG/ePIm6Mw69Xo9jx44Js3Lp6enQarUGjZXL5YiOjhaWWEePHm31y9FERETmikux17F161bcd9996OjowD333IPvv/8eLS0twq8PGjQIRUVFFvlQ+c7OTuzdu1e4X+7yx6Fdj5ubG+Lj45GUlIS4uDi4ubmZsFMiIiIyFIPdddx22204efLkNX/922+/xZw5c3qvoVtUU1ODrVu3IiUlBTt37kRbW5vBY4OCgoRZufHjx0Op5GQvERGRuekTwU6r1aKhoQFqtRpqtRrna2vR1dEBnVYLuUIBOwcHDPT0hIeHBzw8PODm5oYLFy7A1dX1mu9577334rPPPuvFr+LG6XQ6ZGdnC0usubm5Bo+1s7PD9OnThWex+vn5mbBTIiKiG3cz13drP5HBqqddGhsbkZ+fj8LcXHS2tUGv0cCpowMuDQ1w0Ggg1+uhk8nQo1TiuJsbchwcIFMqYe/oiIGDB8PFxQXNzc1Xfe/s7GxUV1fD29u7l7+q62tpacGPP/6IlJQUbN26FefOnTN4rLe3t/DEh2nTpkGlUpmwUyIioptzK9f34IgIhIaGXnfyxpJZ5YxdTU0NMtPTUV5WBpv2dvieqYRXQwNc2tpgc51NAT0KBZodHXHWzQ2nBnuhQatFWXk50jMzUVtbe8Xr77//fvzrX/8y5ZdikLKyMmFWbt++fejp6TFonEwmw/jx44VdrCEhIdz4QEREZssY1/czvkPQo1JhWEAAomNirOpUC8DKgp1Go0FGRgayMjLgVFeH2yrOwKeuDgoDH211qeaOdpQ7O+NMQADqnJyQkZWFzMxM0W7RO++8E999950RvwLDdHd3Y//+/cLGh7KyMoPHuri4IC4uDklJSYiPj7fIjR9ERNS3GPP6rpXLUeXujhN+vmh1d0dUdDSio6Ot5t5xqwl2tbW1SE1ORmNVNUaWlSGguhryW/jSGhsb0dHZAZ1Mhprhw1E2ciSqGxqQ/PPypouLC3bs2IGxY8ca8au4NrVajbS0NKSkpGDHjh24cOGCwWNHjhwpbHyIjo6GjY2NCTslIiIyHmNf33+hk8lQ5u2NYwEBcPPxRsLs2fD09DRCx9KyimBXUVGBb7/8Eqqas4g8ehTO7e23/J5qtRpa3f9m59qdnXE0MhJnVSp0aLV45plnTPoXQK/XIy8vT1hizcrKgqF/VLa2tpg8ebIQ5vz9/U3WJxERkamY4vp+uRaVCjmjRqF98GDcNe8ei98saPHBrqKiAt98/jkGVJzB2JISKG9iWvZqatVq6HTi9XqlgwOOT4hG07Ch+O299xr9D7+1tRW7du0SNj7U1NQYPNbT01PYwTpjxgz069fPqL0RERH1JlNd369GI5fjUOBoNPj6muT63pssOtjV1tbiiy1b0L/8NG4vLjbK1Owv2tvb0dTcDEAPmUyO/v37w8HeHjqZDAeCAtE0dBjmL3zglmftTp06hdTUVKSmpmLPnj3o7u42eGxUVJSw8SE8PBxyufyWeiEiIjIHpry+X4uxr+9Ssdhgp9FosPnjj6EtOYqYvDyTJHmtTgeNRgNbW1tculdUI5djX0Q4bEaNwsIHH4RSqYRer0d3d/fF115nZ6lGo0FmZiZSUlKQkpKCo0ePGtyPk5MTYmNjkZSUhDvuuMNi/9IRERFdS29c36/52Ve5vlsay+v4ZxkZGWisqsbUo0dN9oeukMuhsLW9oq7U6RBZchQ/OTsjMzMTvr6+uO+++5CZmYkpU6bghx9+gJOTk/D6uro6bNu2Dampqdi2bRuampoM7sHf3184Wy4mJgZ2dnbG+NKIiIjMUm9c36/l8uv7pEmTevXzjcEig11NTQ2yMjIwsqzMJDdSGsKlvR0jSstwQKnEH7/+GgUFBQCAn376CW+88QbuvPNO4TiSgwcPQmfgX06lUomYmBhh48Pw4cN5thwREfUJ5nR9P2xnh4CAAIs7584il2K//s9/UHfwIKZm5/TKuvu1dGo02BESggN15/HNf/8r1G1sbAw+JBgABg4ciISEBCQmJiI2NhYuLi6maJeIiMismcv1XSeTYc+YSLjffjt+d/fdkvVxMyxuxq6xsRHlZWUIrzgj6R96V3c3GhsaMOREGerDw0WPHzMk1IWHhwsbH8aMGWP1z64jIiK6HnO5vgOAXK+Hf8UZHBkwAI2NjRb1+DGLC3b5+fmwaW+HT12dZD20tbWhuaUFgB7ulZVQBQcjNDQU+/btu+YYlUqFGTNmIDExEQkJCfDx8em9homIiMycOVzfLzWkrg5F7e0oKCjA5MmTpW7HYBYV7LRaLQpzc+F7pvKmHiNiDHq9Xgh1AKDQ6eBTUYGI4GDs379fdIjw0KFDhVm5KVOmwN7eXpKeiYiIzJk5XN8vp9Dp4FdZiYKcHEycONFiVtZu6OCzLVu2IDw8HI2NjVi8eDGGDRsGjUYDACgqKsKUKVOuOz45ORlvvPHGdV/z/PPP4+23376i/tNPP2HOnDnobGuDV0PDjbR9XRc0GvyltBTTsrIw90gelhYXobyjHYeamvDHoyVXvP7iJgjxFLHb2bNwtLfHgAEDhJqtrS2OHz+Ot99+G+7u7nj22WcBAOfPn8e4ceMQHh6OvXv34r777rvlr+Hw4cMYM2YMbGxskJKScsvvR0REZCwvvPACAgMDERwcjDFjxqC8vPyK1zQ0NKCzrQ0PfvnFTX3GpupqdF8SCKdmHcas3BzMzsvF7LxcnOnouKn39aq/2FfDZbnDnK+7Bs/Y/fe//8XLL7+MPXv2CGvNGo0Gn3/+OR544AGD3mP27Nk31+XPurq6oNdo0L+19YbG6fR6yK+xs3R1aSlGOKqwa8wYyGQylLa1oa772vfIKRQKKJU20Gj+9xrHpiYoZTJ4eHig7ucp5O7ubpSXl2PEiBEYM2YMxowZAwDYtWsXoqKihPB6I9O7Wq32qt8xDB48GBs3bsTrr79u8HsRERGZWmZmJvbs2YMjR47AxsYGVVVVcHR0vOJ1arUaeo0Gspu8t25zTTXu9vTEpQeUfREaBsdbnGVzaWuDXqOBWq3GwIEDhbo5X3cNDnZ/+ctfsGvXLgwaNEiorVy5Eq+++iruv/9+0Wu1Wi3+/Oc/Y9++feju7saf//xn3Hfffdi0aROKiorw2muvobS0FAsWLEBPTw+mT5+Offv2ITs7GwBw5MgRTJo0CVVVVfjb3/6G+fPnA7h4HtymLVvwvlqNaW5uWD3sNwCA786psaGqCnoAdw3ywEM+Pqjq7MSykmLcplLhaFsbvgkNw+PHjkHd3QUAWD3sNxhib49jbW14e9Qo4UiR4T//hTt0yVlzR1pa8LfyU+jW6eCoUOCV4SPgqtcjva4Or5+tgRxA56bNGBESjOLiYmFcbGwsvvrqK5w4cQL/+te/sGLFCqxatQpdXV3Yu3cv3nnnHSxfvhzff/89tFotXn75ZRw+fBg9PT145JFHcOedd+Lrr7/Grl270NzcDBcXF7z33ntX/fPp168f2traUFtbi1OnThn6x0pERGQy+fn5cHBwQGVlpVDr7u7G999/jzfffBNdXV0ICAjA/Pnz4djWBgDQaC8+znNDdRV21NejW6/HnEGD8JDPEADAe5VnkHr+PGQA5np4wkYmw7nubszPPwJve3u8Pzrwqr0sKSrEWv/bMNTBARMPH8KTQ4diziAPLD9agt8P8cVIR0e8Ul6OrJZm9Oj0eNjHB7MHDYJTRwfUajWCgoKE9/Lx8YGPj49ZPvHJ4GCXmpqKIUOGiGojRozAiBEj8P333+O2224T6hs3boSXlxeysrLQ0dGB8ePHIz4+XjR25cqVeOaZZzBnzhw888wzol87efIkdu3ahTNnziAuLk4IdiUlJXh5zhzEVlVjYWEBDjU1wc/BAW+dOYNvQsPgoFBgXv4RjO/vgv5KG5xsb8drI0ZipKMjttfVob+NEhuDgqDX69Gm1eJQczNGOjpeczbvF7epVPg8JBQKmQy76uvxbuUZ/NHZBZ+eP4flAwZgjEqFI2FhOHTZdwZnzpzBuHHjhJ9v27ZN+O+GhgZhts7f3/+Kz1y1ahVWrVp1Rf1qr73Ufy85doWIiMgcXO/aVVJSgqozZ/DIyJHQ6XQ4d06NrPZ2lLe14S0PD+gAPHn2LKIcVGiUyXCgqQn/DQuHrVyOpp4e9Lexwcbqqitm6ObnH4FMJsMgW1tsCAxCpLMzclqaIZcBA21skdPSgjmDPHC8rQ0jHR3xlboWg2xt8d+wcHRqtbg7Px8xrq5wbmjE+draXvhdMg6Dg92///1v/PWvf72ivmbNGjz22GPYuHGjUNuxYweKiorw73//GwDQ3Nx8xSxSTk4O7rzzTgDAvHnzRKEnKSkJNjY28Pf3Fz2l4TZ/f3jZ20MpkyHe3R05LS1o0Wpwu0t/9LexAQDEubsjp7kF0wcMwFAHB4z8eQZuuKMK604145XycswcMADhzs6Gfulo1mjwVOlxnOnshE6vh6NMBp2TE4Ls7fFhfT0qursxtKMD9m5uBr8nERERXdTZ1QXlJc9Kz2pvx8H2dhRUVQEAOnQ6HG1owCmFAr/18ITtzzNlv1z7r+byoBfp7IIfzp+DHDLc4+mJH86fQ3lHO3zs7aGQyZDR2IjS9nZ8f/4cAKBVq0FlZydsNRp0dnaa4ss2CYOD3ffff48hQ4bgoYceEtUjIiLg6uqKXbt2CTWdTocPPvjgivvHLl2mvJ7rPTbr0rNtfu2BDA6X/IEOc1Dh+/AI7GlowEvlpzBr4CDEuLrieHvbde/BA4B/nqnAZDc3zPf0QmlbG546dvH5rve5umKcSoUD7e342549iEtMNOjrIyIiov+ZEBUFWUuL8HM9gEWuroi/ZBJGJpPj1C08jSKsXz+sO3USCpkMC70GY19jA3bXNyCi38XP0AF48bbbMNalv2hcvl4H7c8bRS2BwYvDW7duxd/+9jekpqZe8Wv/7//9P7z22mvCz2NjY/Huu+9C+/M6eVFRkfDfv4iIiMAPP/wAAPjqq68M6uHEyZM4394OjV6PHXX1iHR2RohTPxxobkKzpgfdOh1+rK/HmKs8uUHd1QWVQoG5Hh5YNNgbR9taMdTBAcNVjnin8oxwTElZWxuyfz5o+BetGi08bC+Gzf+eU0OuUECpVKK6pwe32dnhAVdXeDk7o+mSv5RERERkmMbmZugvuV9tjEqF1AsX0PnzTtdzWh0UTk6Y0L8/vlHXCjtgm35+IICjQoG2y3LG5RwUCtjLFchracFtKhXCnZ2xuaYakS4Xg93E/q749OxZaH/OA6VtbdDq9dDJ5FAoLed0OIM7HTx4MFJSUhAXF3fFfVyTJk2Cr6+v8POHH34Y5eXlCA8Ph06ng5eXF9LS0kRj3njjDdx///149tlnERMTA2cDlkZv8/fHewcO4NWGBkxzcxNS9R+G+OK+ggJh80SgkxOqLps2LW1vx8vlpyCXyWAvl+NvAQEAgL8PD8C6U6cwPTsbKoUcnnZ2eOY3/lB3df3v6/HxwerSUvyz4jRiXN0gAzBo4CC8XVqKQ7VqyKCHl48P+l32NYwfPx4//PADcnJy8P777+Ozzz7Dv/71L5SUlOCll15CRUUFFixYgIyMDGi1WqxduxY7duyATqeDp6cnvvvuO3z++efC66+lsLAQd911F5qamuDg4AB/f3/89NNPv/r7SUREZEq5ublYtWoVLly4AODiU5feeustZGZm4rnnnkNPTw9kMhmGjxwJ2/PnIZfL4eXphTkAGqursUKthh5AP6USbw8ahClOTihubcWcI3lQymT47SAPLPL2xj2ennigsADDHByuuXkCACKcnVHe0Q6ZTIYxzi544/RphP08Y3ePpyeqOjsxJy8XOgADf743r1uphO1l59AWFBQgISEBjY2NSElJQUBAAA4cOGCa38QbJNmzYtvb2+Hg4ACZTIZXX30VarVaNOt3Nbt27cLx7dsx88DBXurSMN093dg2Zgy2Hj2K3bt3A7i4nHz27FmLegwJERGRFMz1+g4AP94+HiPi4jB9+nSpWzGIZHOLhw8fxsqVK6HVauHj44MtW7b86hgPDw/kODigR6GAza9MufYmmb0DtAMGYPny5Rg8eDDOnz+PJ598kqGOiIjIAOZ6fe9RKNDq4AAPDw+pWzGYZMFuypQpOHLkyA2N8fDwgEypRLOjI9zN6H62ZkdHyJRKxMTEYO7cuSb7nO3bt2P16tWiWnR0NN555x2TfSYREZGpmev1fXt7O9Z/+CG2fPMNlD/fZ2fu113LuRsQgJubG+wdHXHWzc2s/uDPDrjYl5uJjzuJi4tDXFycST+DiIiot5nr9d1v5AismT8Pv3/8cet8VqzUFAoFgiMicMZ3CLRmctqzVi5HxZAhCImMtJg/dCIiInPC67vxmMfv3g0IDQ1Fj0qFKnd3qVsBAFS6u0OjUiEkJETqVoiIiCwWr+/GYXHBztXVFcMCAnDCzxe6Xzuh2MR0MhlO+vli2PDh3ChBRER0C3h9Nw6LC3YAEB0Tg1Z3d5R5e0vaR6m3N1rd3RE9caKkfRAREVkDXt9vnUUGOy8vL0RFR+NYQABaVCpJemhWqXB8eADGTpwILy8vSXogIiKyJry+3zqLDHbAxe3Grj7eyBk1CppevtFSI5cjZ/QouHl7Y8KECb362URERNaM1/dbY7HBTqlUInH2bLQPHoxDgaN7bT1eJ5PhUOBodHgNRsLs2cK5NkRERHTreH2/NRYb7ADA09MTd827Bw2+vjgQFGjyZK+Ry3EgKBANvr64a9498PT0NOnnERER9UW8vt88yZ4Va0wVFRX49sv/QFVTg8ijR+Hc3m70z2hWqZAzehQ6vAbjrnn3wM/Pz+ifQURERP/D6/uNs4pgBwC1tbVITU5GY1U1RpaVIaC6GnIjfGk6mQyl3t44PjwAbt7eSJg926KTPBERkSXh9f3GWE2wAwCNRoOMjAxkZWTAqa4O/hVnMKSuDgqd7obfSyuXo9LdHSf9fNHq7o6xEydiwoQJFrvmTkREZKl4fTecVQW7X9TU1CAzIwPlpaVQtrfDr7ISXvUNcGlrg41We81xPQoFmh0dcXaAGyqGDIFGpcKw4cMRbaFbnomIiKwJr++/ziqD3S8aGxtRUFCAgpwcdLa1Qa/RwKmjA84NjbDVaCDX66CTydGtVKLFzRWtDg6QKZWwd3RESGQkQkJCLO7EaSIiImvH6/u1WXWw+4VWq0VDQwPUajXUajXO19aiu7MTWo0GCqUStvb2GOjpCQ8PD3h4eMDNzc2iHvhLRETUF/H6fqU+EeyIiIiI+gKLPseOiIiIiP6HwY6IiIjISjDYEREREVkJBjsiIiIiK8FgR0RERGQlGOyIiIiIrASDHREREZGVYLAjIiIishIMdkRERERWgsGOiIiIyEow2BERERFZCQY7IiIiIivBYEdERERkJRjsiIiIiKwEgx0RERGRlWCwIyIiIrISDHZEREREVoLBjoiIiMhKMNgRERERWQkGOyIiIiIrwWBHREREZCUY7IiIiIisBIMdERERkZVgsCMiIiKyEgx2RERERFaCwY6IiIjISjDYEREREVmJ/w8S7Po0nUEo/AAAAABJRU5ErkJggg==", "text/plain": [ "
" ] diff --git a/tpot2/graphsklearn.py b/tpot2/graphsklearn.py index e0d500ae..7c519af5 100644 --- a/tpot2/graphsklearn.py +++ b/tpot2/graphsklearn.py @@ -231,7 +231,7 @@ def __init__( graph, cross_val_predict_cv=0, #signature function(estimator, X, y=none) method='auto', - memory=None, #TODO memory caching like sklearn.pipeline + memory=None, use_label_encoder=False, **kwargs, ): @@ -252,7 +252,7 @@ def __init__( The prediction method to use for the inner classifiers or regressors. If 'auto', it will try to use predict_proba, decision_function, or predict in that order. memory: str or object with the joblib.Memory interface, optional - Used to cache the fitted transformers of the pipeline. By default, no caching is performed. If a string is given, it is the path to the caching directory. + Used to cache the input and outputs of nodes to prevent refitting or computationally heavy transformations. By default, no caching is performed. If a string is given, it is the path to the caching directory. use_label_encoder: bool, optional If True, the label encoder is used to encode the labels to be 0 to N. If False, the label encoder is not used. diff --git a/tpot2/search_spaces/base.py b/tpot2/search_spaces/base.py index 88955ba7..09fc61e5 100644 --- a/tpot2/search_spaces/base.py +++ b/tpot2/search_spaces/base.py @@ -6,7 +6,10 @@ from typing import Generator, List, Tuple, Union import random from sklearn.base import BaseEstimator - +import sklearn +import networkx as nx +from . import graph_utils +from typing import final class SklearnIndividual(tpot2.BaseIndividual): @@ -25,10 +28,107 @@ def export_pipeline(self) -> BaseEstimator: def unique_id(self): return self + @final + def export_flattened_graphpipeline(self) -> tpot2.GraphPipeline: + return flatten_to_graphpipeline(self.export_pipeline()) class SklearnIndividualGenerator(): def __init__(self,): pass def generate(self, rng=None) -> SklearnIndividual: - pass \ No newline at end of file + pass + + + + + + +def flatten_graphpipeline(est): + flattened_full_graph = est.graph.copy() + + #put ests into the node label from the attributes + + flattened_full_graph = nx.relabel_nodes(flattened_full_graph, {n: flattened_full_graph.nodes[n]['instance'] for n in flattened_full_graph.nodes}) + + + remove_list = [] + for node in flattened_full_graph.nodes: + if isinstance(node, nx.DiGraph): + flattened = flatten_any(node) + + roots = graph_utils.get_roots(flattened) + leaves = graph_utils.get_leaves(flattened) + + n1_s = flattened_full_graph.successors(node) + n1_p = flattened_full_graph.predecessors(node) + + remove_list.append(node) + + flattened_full_graph = nx.compose(flattened_full_graph, flattened) + + + flattened_full_graph.add_edges_from([ (n2, n) for n in n1_s for n2 in leaves]) + flattened_full_graph.add_edges_from([ (n, n2) for n in n1_p for n2 in roots]) + + for node in remove_list: + flattened_full_graph.remove_node(node) + + return flattened_full_graph + +def flatten_pipeline(est): + graph = nx.DiGraph() + steps = [flatten_any(s[1]) for s in est.steps] + + #add steps to graph and connect them + for s in steps: + graph = nx.compose(graph, s) + + #connect leaves of each step to the roots of the next step + for i in range(len(steps)-1): + roots = graph_utils.get_roots(steps[i]) + leaves = graph_utils.get_leaves(steps[i+1]) + graph.add_edges_from([ (l,r) for l in leaves for r in roots]) + + + return graph + + + +def flatten_estimator(est): + graph = nx.DiGraph() + graph.add_node(est) + return graph + +def flatten_any(est): + if isinstance(est, tpot2.GraphPipeline): + return flatten_graphpipeline(est) + elif isinstance(est, sklearn.pipeline.Pipeline): + return flatten_pipeline(est) + else: + return flatten_estimator(est) + + +def flatten_to_graphpipeline(est): + #rename nodes to string representation of the instance and put the instance in the node attributes + flattened_full_graph = flatten_any(est) + + instance_to_label = {} + label_to_instance = {} + for node in flattened_full_graph.nodes: + found_unique_label = False + i=1 + while not found_unique_label: + new_label = f"{node.__class__.__name__}_{i}" + if new_label not in label_to_instance: + found_unique_label = True + i+=1 + label_to_instance[new_label] = node + instance_to_label[node] = new_label + + flattened_full_graph = nx.relabel_nodes(flattened_full_graph, instance_to_label) + + for label, instance in label_to_instance.items(): + flattened_full_graph.nodes[label]["instance"] = instance + + return tpot2.GraphPipeline(flattened_full_graph) \ No newline at end of file diff --git a/tpot2/search_spaces/pipelines/__init__.py b/tpot2/search_spaces/pipelines/__init__.py index ec90eb0e..b0c2c74d 100644 --- a/tpot2/search_spaces/pipelines/__init__.py +++ b/tpot2/search_spaces/pipelines/__init__.py @@ -3,4 +3,6 @@ from .sequential import * from .graph import * from .tree import * -from .wrapper import * \ No newline at end of file +from .wrapper import * + +from . import graph_utils \ No newline at end of file diff --git a/tpot2/search_spaces/pipelines/graph.py b/tpot2/search_spaces/pipelines/graph.py index 0ebe7092..9f70577f 100644 --- a/tpot2/search_spaces/pipelines/graph.py +++ b/tpot2/search_spaces/pipelines/graph.py @@ -1,10 +1,6 @@ import tpot2 import numpy as np -import pandas as pd -import sklearn -from tpot2 import config from typing import Generator, List, Tuple, Union -import random from ..base import SklearnIndividual, SklearnIndividualGenerator import networkx as nx import copy @@ -12,20 +8,67 @@ import itertools from .graph_utils import * from ..nodes.estimator_node import EstimatorNodeIndividual - +from typing import Union, Callable +import sklearn class GraphPipelineIndividual(SklearnIndividual): - def __init__(self, - root_search_space : SklearnIndividualGenerator, - leaf_search_space : SklearnIndividualGenerator = None, - inner_search_space : SklearnIndividualGenerator =None, - max_size: int = 10, - crossover_same_depth=False, - rng=None) -> None: - """ - Generates a tree shaped pipeline individual. Can be used to export a sklearn Pipeline that uses feature unions to merge branches of the pipeline. + """ + Defines a search space of pipelines in the shape of a Directed Acyclic Graphs. The search spaces for root, leaf, and inner nodes can be defined separately if desired. + Each graph will have a single root serving as the final estimator which is drawn from the `root_search_space`. If the `leaf_search_space` is defined, all leaves + in the pipeline will be drawn from that search space. If the `leaf_search_space` is not defined, all leaves will be drawn from the `inner_search_space`. + Nodes that are not leaves or roots will be drawn from the `inner_search_space`. If the `inner_search_space` is not defined, there will be no inner nodes. + + `cross_val_predict_cv`, `method`, `memory`, and `use_label_encoder` are passed to the GraphPipeline object when the pipeline is exported and not directly used in the search space. + + Exports to a GraphPipeline object. + + Parameters + ---------- + + root_search_space: SklearnIndividualGenerator + The search space for the root node of the graph. This node will be the final estimator in the pipeline. + inner_search_space: SklearnIndividualGenerator, optional + The search space for the inner nodes of the graph. If not defined, there will be no inner nodes. + + leaf_search_space: SklearnIndividualGenerator, optional + The search space for the leaf nodes of the graph. If not defined, the leaf nodes will be drawn from the inner_search_space. + + crossover_same_depth: bool, optional + If True, crossover will only occur between nodes at the same depth in the graph. If False, crossover will occur between nodes at any depth. + + cross_val_predict_cv: int, cross-validation generator or an iterable, optional + Determines the cross-validation splitting strategy used in inner classifiers or regressors + + method: str, optional + The prediction method to use for the inner classifiers or regressors. If 'auto', it will try to use predict_proba, decision_function, or predict in that order. + + memory: str or object with the joblib.Memory interface, optional + Used to cache the input and outputs of nodes to prevent refitting or computationally heavy transformations. By default, no caching is performed. If a string is given, it is the path to the caching directory. + + use_label_encoder: bool, optional + If True, the label encoder is used to encode the labels to be 0 to N. If False, the label encoder is not used. + Mainly useful for classifiers (XGBoost) that require labels to be ints from 0 to N. + Can also be a sklearn.preprocessing.LabelEncoder object. If so, that label encoder is used. + + rng: int, RandomState instance or None, optional + Seed for sampling the first graph instance. + """ + + def __init__( + self, + root_search_space: SklearnIndividualGenerator, + leaf_search_space: SklearnIndividualGenerator = None, + inner_search_space: SklearnIndividualGenerator = None, + max_size: int = np.inf, + crossover_same_depth: bool = False, + cross_val_predict_cv: Union[int, Callable] = 0, #signature function(estimator, X, y=none) + method: str = 'auto', + memory=None, + use_label_encoder: bool = False, + rng=None): + super().__init__() self.__debug = False @@ -38,6 +81,11 @@ def __init__(self, self.max_size = max_size self.crossover_same_depth = crossover_same_depth + self.cross_val_predict_cv = cross_val_predict_cv + self.method = method + self.memory = memory + self.use_label_encoder = use_label_encoder + self.root = self.root_search_space.generate(rng) self.graph = nx.DiGraph() self.graph.add_node(self.root) @@ -535,7 +583,7 @@ def _merge_duplicated_nodes(self): return graph_changed - def export_pipeline(self, **graph_pipeline_args): + def export_pipeline(self): estimator_graph = self.graph.copy() #mapping = {node:node.method_class(**node.hyperparameters) for node in estimator_graph} @@ -561,7 +609,7 @@ def export_pipeline(self, **graph_pipeline_args): for label, instance in label_to_instance.items(): estimator_graph.nodes[label]["instance"] = instance - return tpot2.GraphPipeline(graph=estimator_graph, **graph_pipeline_args) + return tpot2.GraphPipeline(graph=estimator_graph, memory=self.memory, use_label_encoder=self.use_label_encoder, method=self.method, cross_val_predict_cv=self.cross_val_predict_cv) def plot(self): @@ -621,28 +669,74 @@ def unique_id(self): class GraphPipeline(SklearnIndividualGenerator): - def __init__(self, root_search_space : SklearnIndividualGenerator, - leaf_search_space : SklearnIndividualGenerator = None, - inner_search_space : SklearnIndividualGenerator =None, - max_size: int = np.inf, - crossover_same_depth=False) -> None: + def __init__(self, + root_search_space: SklearnIndividualGenerator, + leaf_search_space: SklearnIndividualGenerator = None, + inner_search_space: SklearnIndividualGenerator = None, + max_size: int = np.inf, + crossover_same_depth: bool = False, + cross_val_predict_cv: Union[int, Callable] = 0, #signature function(estimator, X, y=none) + method: str = 'auto', + memory=None, + use_label_encoder: bool = False,): """ - Generates a directed acyclic graph of variable size. Search spaces for root, leaf, and inner nodes can be defined separately if desired. + Defines a search space of pipelines in the shape of a Directed Acyclic Graphs. The search spaces for root, leaf, and inner nodes can be defined separately if desired. + Each graph will have a single root serving as the final estimator which is drawn from the `root_search_space`. If the `leaf_search_space` is defined, all leaves + in the pipeline will be drawn from that search space. If the `leaf_search_space` is not defined, all leaves will be drawn from the `inner_search_space`. + Nodes that are not leaves or roots will be drawn from the `inner_search_space`. If the `inner_search_space` is not defined, there will be no inner nodes. + + `cross_val_predict_cv`, `method`, `memory`, and `use_label_encoder` are passed to the GraphPipeline object when the pipeline is exported and not directly used in the search space. + Exports to a GraphPipeline object. + Parameters + ---------- + + root_search_space: SklearnIndividualGenerator + The search space for the root node of the graph. This node will be the final estimator in the pipeline. + + inner_search_space: SklearnIndividualGenerator, optional + The search space for the inner nodes of the graph. If not defined, there will be no inner nodes. + + leaf_search_space: SklearnIndividualGenerator, optional + The search space for the leaf nodes of the graph. If not defined, the leaf nodes will be drawn from the inner_search_space. + + crossover_same_depth: bool, optional + If True, crossover will only occur between nodes at the same depth in the graph. If False, crossover will occur between nodes at any depth. + + cross_val_predict_cv: int, cross-validation generator or an iterable, optional + Determines the cross-validation splitting strategy used in inner classifiers or regressors + + method: str, optional + The prediction method to use for the inner classifiers or regressors. If 'auto', it will try to use predict_proba, decision_function, or predict in that order. + + memory: str or object with the joblib.Memory interface, optional + Used to cache the input and outputs of nodes to prevent refitting or computationally heavy transformations. By default, no caching is performed. If a string is given, it is the path to the caching directory. + + use_label_encoder: bool, optional + If True, the label encoder is used to encode the labels to be 0 to N. If False, the label encoder is not used. + Mainly useful for classifiers (XGBoost) that require labels to be ints from 0 to N. + Can also be a sklearn.preprocessing.LabelEncoder object. If so, that label encoder is used. + """ - self.search_space = root_search_space + self.root_search_space = root_search_space self.leaf_search_space = leaf_search_space self.inner_search_space = inner_search_space self.max_size = max_size self.crossover_same_depth = crossover_same_depth + self.cross_val_predict_cv = cross_val_predict_cv + self.method = method + self.memory = memory + self.use_label_encoder = use_label_encoder + def generate(self, rng=None): rng = np.random.default_rng(rng) - ind = GraphPipelineIndividual(self.search_space, self.leaf_search_space, self.inner_search_space, self.max_size, self.crossover_same_depth, rng=rng) + ind = GraphPipelineIndividual(self.root_search_space, self.leaf_search_space, self.inner_search_space, self.max_size, self.crossover_same_depth, + self.cross_val_predict_cv, self.method, self.memory, self.use_label_encoder, rng=rng) # if user specified limit, grab a random number between that limit n_nodes = min(rng.integers(1, self.max_size), 5) From 59dad61b6854c160220e4986bf19d2e8fcf3229b Mon Sep 17 00:00:00 2001 From: perib Date: Wed, 10 Apr 2024 11:42:04 -0700 Subject: [PATCH 3/6] flatten to graphpipeline, steadystate --- tpot2/config/get_configspace.py | 13 ++- tpot2/evolvers/__init__.py | 2 +- tpot2/evolvers/steady_state_evolver.py | 6 - tpot2/search_spaces/base.py | 8 +- .../{pipelines => }/graph_utils.py | 0 .../nodes/genetic_feature_selection.py | 6 +- tpot2/search_spaces/pipelines/__init__.py | 4 +- tpot2/search_spaces/pipelines/graph.py | 5 +- tpot2/search_spaces/pipelines/tree.py | 2 +- tpot2/tpot_estimator/estimator.py | 44 +++++--- tpot2/tpot_estimator/estimator_utils.py | 104 ++++-------------- .../tpot_estimator/steady_state_estimator.py | 79 ++++++------- 12 files changed, 111 insertions(+), 162 deletions(-) rename tpot2/search_spaces/{pipelines => }/graph_utils.py (100%) diff --git a/tpot2/config/get_configspace.py b/tpot2/config/get_configspace.py index 2c4485bf..44892278 100644 --- a/tpot2/config/get_configspace.py +++ b/tpot2/config/get_configspace.py @@ -18,7 +18,8 @@ from . import classifiers_sklearnex from . import regressors_sklearnex - +from ConfigSpace import ConfigurationSpace +from ConfigSpace import ConfigurationSpace, Integer, Float, Categorical, Normal #autoqtl_builtins from tpot2.builtin_modules import genetic_encoders @@ -163,7 +164,7 @@ "classifiers" : ["LogisticRegression", "DecisionTreeClassifier", "KNeighborsClassifier", "GradientBoostingClassifier", "ExtraTreesClassifier", "RandomForestClassifier", "SGDClassifier", "GaussianNB", "BernoulliNB", "MultinomialNB", "XGBClassifier", "SVC", "MLPClassifier"], "regressors" : ["ElasticNetCV", "ExtraTreesRegressor", "GradientBoostingRegressor", "AdaBoostRegressor", "DecisionTreeRegressor", "KNeighborsRegressor", "LassoLarsCV", "SVR", "RandomForestRegressor", "RidgeCV", "XGBRegressor", "SGDRegressor" ], "transformers": ["Binarizer", "Normalizer", "PCA", "ZeroCount", "OneHotEncoder", "FastICA", "FeatureAgglomeration", "Nystroem", "RBFSampler"], - "arithmatic": ["AddTransformer", "mul_neg_1_Transformer", "MulTransformer", "SafeReciprocalTransformer", "EQTransformer", "NETransformer", "GETransformer", "GTTransformer", "LETransformer", "LTTransformer", "MinTransformer", "MaxTransformer", "ZeroTransformer", "OneTransformer", "NTransformer"], + "arithmatic": ["AddTransformer", "mul_neg_1_Transformer", "MulTransformer", "SafeReciprocalTransformer", "EQTransformer", "NETransformer", "GETransformer", "GTTransformer", "LETransformer", "LTTransformer", "MinTransformer", "MaxTransformer"], "imputers": [], "skrebate": ["ReliefF", "SURF", "SURFstar", "MultiSURF"], "genetic_encoders": ["DominantEncoder", "RecessiveEncoder", "HeterosisEncoder", "UnderDominanceEncoder", "OverDominanceEncoder"], @@ -286,7 +287,13 @@ def get_configspace(name, n_classes=3, n_samples=100, n_features=100, random_sta case "OneTransformer": return {} case "NTransformer": - return {} + return ConfigurationSpace( + + space = { + + 'n': Float("n", bounds=(-1e3, 1e3), log=True), + } + ) #imputers.py diff --git a/tpot2/evolvers/__init__.py b/tpot2/evolvers/__init__.py index 1d6af1a9..cf130f80 100644 --- a/tpot2/evolvers/__init__.py +++ b/tpot2/evolvers/__init__.py @@ -1,2 +1,2 @@ from .base_evolver import * -#from .steady_state_evolver import * \ No newline at end of file +from .steady_state_evolver import * \ No newline at end of file diff --git a/tpot2/evolvers/steady_state_evolver.py b/tpot2/evolvers/steady_state_evolver.py index 5db3e502..1aa457c8 100644 --- a/tpot2/evolvers/steady_state_evolver.py +++ b/tpot2/evolvers/steady_state_evolver.py @@ -1,17 +1,11 @@ #All abstract methods in the Evolutionary_Optimization module - -from abc import abstractmethod import tpot2 import typing import tqdm -from tpot2.individual_representations import BaseIndividual import time import numpy as np -import copy -import scipy import os import pickle -import statistics from tqdm.dask import TqdmCallback import distributed from dask.distributed import Client diff --git a/tpot2/search_spaces/base.py b/tpot2/search_spaces/base.py index 09fc61e5..80388708 100644 --- a/tpot2/search_spaces/base.py +++ b/tpot2/search_spaces/base.py @@ -29,8 +29,8 @@ def unique_id(self): return self @final - def export_flattened_graphpipeline(self) -> tpot2.GraphPipeline: - return flatten_to_graphpipeline(self.export_pipeline()) + def export_flattened_graphpipeline(self, **graphpipeline_kwargs) -> tpot2.GraphPipeline: + return flatten_to_graphpipeline(self.export_pipeline(), **graphpipeline_kwargs) class SklearnIndividualGenerator(): def __init__(self,): @@ -109,7 +109,7 @@ def flatten_any(est): return flatten_estimator(est) -def flatten_to_graphpipeline(est): +def flatten_to_graphpipeline(est, **graphpipeline_kwargs): #rename nodes to string representation of the instance and put the instance in the node attributes flattened_full_graph = flatten_any(est) @@ -131,4 +131,4 @@ def flatten_to_graphpipeline(est): for label, instance in label_to_instance.items(): flattened_full_graph.nodes[label]["instance"] = instance - return tpot2.GraphPipeline(flattened_full_graph) \ No newline at end of file + return tpot2.GraphPipeline(flattened_full_graph, **graphpipeline_kwargs) \ No newline at end of file diff --git a/tpot2/search_spaces/pipelines/graph_utils.py b/tpot2/search_spaces/graph_utils.py similarity index 100% rename from tpot2/search_spaces/pipelines/graph_utils.py rename to tpot2/search_spaces/graph_utils.py diff --git a/tpot2/search_spaces/nodes/genetic_feature_selection.py b/tpot2/search_spaces/nodes/genetic_feature_selection.py index e51ff8ba..1894026a 100644 --- a/tpot2/search_spaces/nodes/genetic_feature_selection.py +++ b/tpot2/search_spaces/nodes/genetic_feature_selection.py @@ -157,7 +157,7 @@ def __init__(self, crossover_rate = 0.5, mutation_rate_rate = 0, crossover_rate_rate = 0, - rng=None,): + ): self.n_features = n_features self.start_p = start_p @@ -165,7 +165,7 @@ def __init__(self, self.crossover_rate = crossover_rate self.mutation_rate_rate = mutation_rate_rate self.crossover_rate_rate = crossover_rate_rate - self.rng = rng + def generate(self, rng=None) -> SklearnIndividual: return GeneticFeatureSelectorIndividual( mask=self.n_features, @@ -174,5 +174,5 @@ def generate(self, rng=None) -> SklearnIndividual: crossover_rate=self.crossover_rate, mutation_rate_rate=self.mutation_rate_rate, crossover_rate_rate=self.crossover_rate_rate, - rng=self.rng + rng=rng ) \ No newline at end of file diff --git a/tpot2/search_spaces/pipelines/__init__.py b/tpot2/search_spaces/pipelines/__init__.py index b0c2c74d..ec90eb0e 100644 --- a/tpot2/search_spaces/pipelines/__init__.py +++ b/tpot2/search_spaces/pipelines/__init__.py @@ -3,6 +3,4 @@ from .sequential import * from .graph import * from .tree import * -from .wrapper import * - -from . import graph_utils \ No newline at end of file +from .wrapper import * \ No newline at end of file diff --git a/tpot2/search_spaces/pipelines/graph.py b/tpot2/search_spaces/pipelines/graph.py index 9f70577f..5c6668b9 100644 --- a/tpot2/search_spaces/pipelines/graph.py +++ b/tpot2/search_spaces/pipelines/graph.py @@ -6,7 +6,7 @@ import copy import matplotlib.pyplot as plt import itertools -from .graph_utils import * +from ..graph_utils import * from ..nodes.estimator_node import EstimatorNodeIndividual from typing import Union, Callable import sklearn @@ -360,7 +360,8 @@ def _crossover_swap_branch(self, G2, rng=None): node1_is_leaf = len(list(self.graph.successors(node1))) == 0 node2_is_leaf = len(list(G2.graph.successors(node2))) == 0 #if not ((node1_is_leaf and node1_is_leaf) or (not node1_is_leaf and not node2_is_leaf)): #if node1 is a leaf - if (node1_is_leaf and (not node2_is_leaf)) or ( (not node1_is_leaf) and node2_is_leaf): + #if (node1_is_leaf and (not node2_is_leaf)) or ( (not node1_is_leaf) and node2_is_leaf): + if not node1_is_leaf: #only continue if node1 and node2 are both leaves or both not leaves continue diff --git a/tpot2/search_spaces/pipelines/tree.py b/tpot2/search_spaces/pipelines/tree.py index de4c2aef..813a59e1 100644 --- a/tpot2/search_spaces/pipelines/tree.py +++ b/tpot2/search_spaces/pipelines/tree.py @@ -13,7 +13,7 @@ from .graph import GraphPipelineIndividual, GraphPipeline -from .graph_utils import * +from ..graph_utils import * class TreePipelineIndividual(GraphPipelineIndividual): def __init__(self, diff --git a/tpot2/tpot_estimator/estimator.py b/tpot2/tpot_estimator/estimator.py index 7465564c..999dbffe 100644 --- a/tpot2/tpot_estimator/estimator.py +++ b/tpot2/tpot_estimator/estimator.py @@ -29,7 +29,9 @@ def set_dask_settings(): #TODO inherit from _BaseComposition? class TPOTEstimator(BaseEstimator): - def __init__(self, scorers, + def __init__(self, + search_space, + scorers, scorers_weights, classification, cv = 5, @@ -38,13 +40,12 @@ def __init__(self, scorers, objective_function_names = None, bigger_is_better = True, - search_space = None, - - + export_graphpipeline = False, cross_val_predict_cv = 0, + memory = None, + categorical_features = None, subsets = None, - memory = None, preprocessing = False, population_size = 50, initial_population_size = None, @@ -87,7 +88,7 @@ def __init__(self, scorers, #dask parameters n_jobs=1, - memory_limit = "4GB", + memory_limit = None, client = None, processes = True, @@ -369,10 +370,17 @@ def __init__(self, scorers, self.search_space = search_space + self.export_graphpipeline = export_graphpipeline self.cross_val_predict_cv = cross_val_predict_cv + self.memory = memory + + if self.cross_val_predict_cv !=0 or self.memory is not None: + if not self.export_graphpipeline: + raise ValueError("cross_val_predict_cv and memory parameters are parameters for GraphPipeline. To enable these options export_graphpipeline to be True. Otherwise these can be passed into the relevant Search spaces as parameters.") + self.categorical_features = categorical_features self.subsets = subsets - self.memory = memory + self.preprocessing = preprocessing self.validation_strategy = validation_strategy self.validation_fraction = validation_fraction @@ -600,6 +608,7 @@ def objective_function(pipeline_individual, scorers= self._scorers, cv=self.cv_gen, other_objective_functions=self.other_objective_functions, + export_graphpipeline=self.export_graphpipeline, memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv, **kwargs): @@ -611,6 +620,7 @@ def objective_function(pipeline_individual, scorers= scorers, cv=cv, other_objective_functions=other_objective_functions, + export_graphpipeline=export_graphpipeline, memory=memory, cross_val_predict_cv=cross_val_predict_cv, **kwargs, @@ -713,6 +723,7 @@ def ind_generator(rng): scorers= self._scorers, cv=self.cv_gen, other_objective_functions=self.other_objective_functions, + export_graphpipeline=self.export_graphpipeline, memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv, @@ -724,6 +735,7 @@ def ind_generator(rng): scorers= scorers, cv=cv, other_objective_functions=other_objective_functions, + export_graphpipeline=export_graphpipeline, memory=memory, cross_val_predict_cv=cross_val_predict_cv, **kwargs, @@ -738,7 +750,8 @@ def ind_generator(rng): self.objective_names_for_selection = val_objective_names self.evaluated_individuals.loc[best_pareto_front_idx,val_objective_names] = val_scores - self.evaluated_individuals["Validation_Pareto_Front"] = tpot2.utils.get_pareto_front(self.evaluated_individuals, val_objective_names, self.objective_function_weights, invalid_values=["TIMEOUT","INVALID"]) + self.evaluated_individuals["Validation_Pareto_Front"] = tpot2.utils.get_pareto_frontier(self.evaluated_individuals, column_names=val_objective_names, weights=self.objective_function_weights, invalid_values=["TIMEOUT","INVALID"]) + elif validation_strategy == 'split': @@ -765,6 +778,7 @@ def ind_generator(rng): y_val, scorers= self._scorers, other_objective_functions=self.other_objective_functions, + export_graphpipeline=self.export_graphpipeline, memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv, **kwargs: val_objective_function_generator( @@ -775,6 +789,7 @@ def ind_generator(rng): y_val, scorers= scorers, other_objective_functions=other_objective_functions, + export_graphpipeline=export_graphpipeline, memory=memory, cross_val_predict_cv=cross_val_predict_cv, **kwargs, @@ -787,11 +802,11 @@ def ind_generator(rng): val_objective_names = ['validation_'+name for name in self.objective_names] self.objective_names_for_selection = val_objective_names self.evaluated_individuals.loc[best_pareto_front_idx,val_objective_names] = val_scores - self.evaluated_individuals["Validation_Pareto_Front"] = tpot2.utils.get_pareto_front(self.evaluated_individuals, val_objective_names, self.objective_function_weights, invalid_values=["TIMEOUT","INVALID"]) + self.evaluated_individuals["Validation_Pareto_Front"] = tpot2.utils.get_pareto_frontier(self.evaluated_individuals, column_names=val_objective_names, weights=self.objective_function_weights, invalid_values=["TIMEOUT","INVALID"]) else: self.objective_names_for_selection = self.objective_names - - val_scores = self.evaluated_individuals[~self.evaluated_individuals[self.objective_names_for_selection].isin(["TIMEOUT","INVALID"]).any(axis=1)][self.objective_names_for_selection].astype(float) + + val_scores = self.evaluated_individuals[~self.evaluated_individuals[self.objective_names_for_selection].isna().all(1)][self.objective_names_for_selection] weighted_scores = val_scores*self.objective_function_weights if self.bigger_is_better: @@ -805,7 +820,10 @@ def ind_generator(rng): #TODO #best_individual_pipeline = best_individual.export_pipeline(memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv) - best_individual_pipeline = best_individual.export_pipeline() + if self.export_graphpipeline: + best_individual_pipeline = best_individual.export_flattened_graphpipeline(memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv) + else: + best_individual_pipeline = best_individual.export_pipeline() if self.preprocessing: self.fitted_pipeline_ = sklearn.pipeline.make_pipeline(sklearn.base.clone(self._preprocessing_pipeline), best_individual_pipeline ) @@ -888,7 +906,7 @@ def make_evaluated_individuals(self): self.evaluated_individuals = self.evaluated_individuals.set_index(self.evaluated_individuals.index.map(object_to_int)) self.evaluated_individuals['Parents'] = self.evaluated_individuals['Parents'].apply(lambda row: convert_parents_tuples_to_integers(row, object_to_int)) - self.evaluated_individuals["Instance"] = self.evaluated_individuals["Individual"].apply(lambda ind: apply_make_pipeline(ind, preprocessing_pipeline=self._preprocessing_pipeline)) + self.evaluated_individuals["Instance"] = self.evaluated_individuals["Individual"].apply(lambda ind: apply_make_pipeline(ind, preprocessing_pipeline=self._preprocessing_pipeline, export_graphpipeline=self.export_graphpipeline, memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv)) return self.evaluated_individuals diff --git a/tpot2/tpot_estimator/estimator_utils.py b/tpot2/tpot_estimator/estimator_utils.py index c0b79739..7be96e26 100644 --- a/tpot2/tpot_estimator/estimator_utils.py +++ b/tpot2/tpot_estimator/estimator_utils.py @@ -13,97 +13,33 @@ def convert_parents_tuples_to_integers(row, object_to_int): return np.nan #TODO add kwargs -def apply_make_pipeline(graphindividual, preprocessing_pipeline=None): +def apply_make_pipeline(graphindividual, preprocessing_pipeline=None, export_graphpipeline=False, **pipeline_kwargs): try: - if preprocessing_pipeline is None: - return graphindividual.export_pipeline() - else: - return sklearn.pipeline.make_pipeline(sklearn.base.clone(preprocessing_pipeline), graphindividual.export_pipeline()) - except: - return None - -def get_configuration_dictionary(options, n_samples, n_features, classification, random_state=None, cv=None, subsets=None, feature_names=None, n_classes=None): - if options is None: - return options - - if isinstance(options, dict): - return recursive_with_defaults(options, n_samples, n_features, classification, random_state=None, cv=None, subsets=subsets, feature_names=feature_names, n_classes=n_classes) - - if not isinstance(options, list): - options = [options] - - config_dict = {} - - for option in options: - - if option == "selectors": - config_dict.update(tpot2.config.make_selector_config_dictionary(random_state=random_state, classifier=classification)) - - elif option == "classifiers": - config_dict.update(tpot2.config.make_classifier_config_dictionary(random_state=random_state, n_samples=n_samples, n_classes=n_classes)) - - elif option == "classifiers_sklearnex": - config_dict.update(tpot2.config.make_sklearnex_classifier_config_dictionary(random_state=random_state, n_samples=n_samples, n_classes=n_classes)) - - elif option == "regressors": - config_dict.update(tpot2.config.make_regressor_config_dictionary(random_state=random_state, cv=cv, n_samples=n_samples)) - - elif option == "regressors_sklearnex": - config_dict.update(tpot2.config.make_sklearnex_regressor_config_dictionary(random_state=random_state, n_samples=n_samples)) - - elif option == "transformers": - config_dict.update(tpot2.config.make_transformer_config_dictionary(random_state=random_state, n_features=n_features)) - - elif option == "arithmetic_transformer": - config_dict.update(tpot2.config.make_arithmetic_transformer_config_dictionary()) - - elif option == "feature_set_selector": - config_dict.update(tpot2.config.make_FSS_config_dictionary(subsets, n_features, feature_names=feature_names)) - - elif option == "skrebate": - config_dict.update(tpot2.config.make_skrebate_config_dictionary(n_features=n_features)) - - elif option == "MDR": - config_dict.update(tpot2.config.make_MDR_config_dictionary()) - - elif option == "continuousMDR": - config_dict.update(tpot2.config.make_ContinuousMDR_config_dictionary()) - - elif option == "FeatureEncodingFrequencySelector": - config_dict.update(tpot2.config.make_FeatureEncodingFrequencySelector_config_dictionary()) - - elif option == "genetic encoders": - config_dict.update(tpot2.config.make_genetic_encoders_config_dictionary()) - - elif option == "passthrough": - config_dict.update(tpot2.config.make_passthrough_config_dictionary()) - + if export_graphpipeline: + est = graphindividual.export_flattened_graphpipeline(**pipeline_kwargs) else: - config_dict.update(recursive_with_defaults(option, n_samples, n_features, classification, random_state, cv, subsets=subsets, feature_names=feature_names, n_classes=n_classes)) - - if len(config_dict) == 0: - raise ValueError("No valid configuration options were provided. Please check the options you provided and try again.") + est = graphindividual.export_pipeline() - return config_dict -def recursive_with_defaults(config_dict, n_samples, n_features, classification, random_state=None, cv=None, subsets=None, feature_names=None, n_classes=None): + if preprocessing_pipeline is None: + return est + else: + return sklearn.pipeline.make_pipeline(sklearn.base.clone(preprocessing_pipeline), est) + except: + return None - for key in 'leaf_config_dict', 'root_config_dict', 'inner_config_dict', 'Recursive': - if key in config_dict: - value = config_dict[key] - if key=="Resursive": - config_dict[key] = recursive_with_defaults(value, n_samples, n_features, classification, random_state, cv, subsets=None, feature_names=None, n_classes=None) - else: - config_dict[key] = get_configuration_dictionary(value, n_samples, n_features, classification, random_state, cv, subsets, feature_names, n_classes) - return config_dict -def objective_function_generator(pipeline, x,y, scorers, cv, other_objective_functions, step=None, budget=None, generation=1, is_classification=True, **pipeline_kwargs): +def objective_function_generator(pipeline, x,y, scorers, cv, other_objective_functions, step=None, budget=None, generation=1, is_classification=True, export_graphpipeline=False, **pipeline_kwargs): #pipeline = pipeline.export_pipeline(**pipeline_kwargs) - pipeline = pipeline.export_pipeline() + if export_graphpipeline: + pipeline = pipeline.export_flattened_graphpipeline(**pipeline_kwargs) + else: + pipeline = pipeline.export_pipeline() + if budget is not None and budget < 1: if is_classification: x,y = sklearn.utils.resample(x,y, stratify=y, n_samples=int(budget*len(x)), replace=False, random_state=1) @@ -129,9 +65,13 @@ def objective_function_generator(pipeline, x,y, scorers, cv, other_objective_fun return np.concatenate([cv_obj_scores,other_scores]) -def val_objective_function_generator(pipeline, X_train, y_train, X_test, y_test, scorers, other_objective_functions, **pipeline_kwargs): +def val_objective_function_generator(pipeline, X_train, y_train, X_test, y_test, scorers, other_objective_functions, export_graphpipeline=False, **pipeline_kwargs): #subsample the data - pipeline = pipeline.export_pipeline(**pipeline_kwargs) + if export_graphpipeline: + pipeline = pipeline.export_flattened_graphpipeline(**pipeline_kwargs) + else: + pipeline = pipeline.export_pipeline() + fitted_pipeline = sklearn.base.clone(pipeline) fitted_pipeline.fit(X_train, y_train) diff --git a/tpot2/tpot_estimator/steady_state_estimator.py b/tpot2/tpot_estimator/steady_state_estimator.py index 777c8cad..c73584b6 100644 --- a/tpot2/tpot_estimator/steady_state_estimator.py +++ b/tpot2/tpot_estimator/steady_state_estimator.py @@ -27,7 +27,9 @@ def set_dask_settings(): #TODO inherit from _BaseComposition? class TPOTEstimatorSteadyState(BaseEstimator): - def __init__(self, scorers= [], + def __init__(self, + search_space, + scorers= [], scorers_weights = [], classification = False, cv = 5, @@ -35,15 +37,14 @@ def __init__(self, scorers= [], other_objective_functions_weights = [], objective_function_names = None, bigger_is_better = True, - max_size = np.inf, - linear_pipeline = False, - root_config_dict= 'Auto', - inner_config_dict=["selectors", "transformers"], - leaf_config_dict= None, + + + export_graphpipeline = False, cross_val_predict_cv = 0, + memory = None, + categorical_features = None, subsets = None, - memory = None, preprocessing = False, validation_strategy = "none", validation_fraction = .2, @@ -77,7 +78,6 @@ def __init__(self, scorers= [], stepwise_steps = 5, warm_start = False, - subset_column = None, verbose = 0, periodic_checkpoint_folder = None, @@ -364,8 +364,6 @@ def __init__(self, scorers= [], warm_start : bool, default=False If True, will use the continue the evolutionary algorithm from the last generation of the previous run. - subset_column : str or int, default=None - EXPERIMENTAL The column to use for the subset selection. Must also pass in unique_subset_values to GraphIndividual to function. verbose : int, default=1 How much information to print during the optimization process. Higher values include the information from lower values. @@ -422,6 +420,7 @@ def __init__(self, scorers= [], # sklearn BaseEstimator must have a corresponding attribute for each parameter. # These should not be modified once set. + self.search_space = search_space self.scorers = scorers self.scorers_weights = scorers_weights self.classification = classification @@ -430,15 +429,18 @@ def __init__(self, scorers= [], self.other_objective_functions_weights = other_objective_functions_weights self.objective_function_names = objective_function_names self.bigger_is_better = bigger_is_better - self.max_size = max_size - self.linear_pipeline = linear_pipeline - self.root_config_dict= root_config_dict - self.inner_config_dict= inner_config_dict - self.leaf_config_dict= leaf_config_dict + + self.export_graphpipeline = export_graphpipeline self.cross_val_predict_cv = cross_val_predict_cv + self.memory = memory + + if self.cross_val_predict_cv !=0 or self.memory is not None: + if not self.export_graphpipeline: + raise ValueError("cross_val_predict_cv and memory parameters are parameters for GraphPipeline. To enable these options export_graphpipeline to be True. Otherwise these can be passed into the relevant Search spaces as parameters.") + + self.categorical_features = categorical_features self.subsets = subsets - self.memory = memory self.preprocessing = preprocessing self.validation_strategy = validation_strategy self.validation_fraction = validation_fraction @@ -468,7 +470,6 @@ def __init__(self, scorers= [], self.stepwise_steps = stepwise_steps self.warm_start = warm_start - self.subset_column = subset_column self.verbose = verbose self.periodic_checkpoint_folder = periodic_checkpoint_folder @@ -660,17 +661,6 @@ def fit(self, X, y): else: self.feature_names = None - if self.root_config_dict == 'Auto': - if self.classification: - n_classes = len(np.unique(y)) - root_config_dict = get_configuration_dictionary("classifiers", n_samples, n_features, self.classification, self.random_state, self.cv_gen, subsets=self.subsets, feature_names=self.feature_names, n_classes=n_classes) - else: - root_config_dict = get_configuration_dictionary("regressors", n_samples, n_features, self.classification, self.random_state, self.cv_gen, subsets=self.subsets, feature_names=self.feature_names) - else: - root_config_dict = get_configuration_dictionary(self.root_config_dict, n_samples, n_features, self.classification, self.random_state, self.cv_gen, subsets=self.subsets,feature_names=self.feature_names) - - inner_config_dict = get_configuration_dictionary(self.inner_config_dict, n_samples, n_features, self.classification, self.random_state, self.cv_gen, subsets=self.subsets, feature_names=self.feature_names) - leaf_config_dict = get_configuration_dictionary(self.leaf_config_dict, n_samples, n_features, self.classification, self.random_state, self.cv_gen, subsets=self.subsets, feature_names=self.feature_names) @@ -681,9 +671,9 @@ def objective_function(pipeline_individual, scorers= self._scorers, cv=self.cv_gen, other_objective_functions=self.other_objective_functions, + export_graphpipeline=self.export_graphpipeline, memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv, - subset_column=self.subset_column, **kwargs): return objective_function_generator( pipeline_individual, @@ -693,19 +683,16 @@ def objective_function(pipeline_individual, scorers= scorers, cv=cv, other_objective_functions=other_objective_functions, + export_graphpipeline=export_graphpipeline, memory=memory, cross_val_predict_cv=cross_val_predict_cv, - subset_column=subset_column, **kwargs, ) - self.individual_generator_instance = tpot2.individual_representations.graph_pipeline_individual.estimator_graph_individual_generator( - inner_config_dict=inner_config_dict, - root_config_dict=root_config_dict, - leaf_config_dict=leaf_config_dict, - max_size = self.max_size, - linear_pipeline=self.linear_pipeline, - ) + def ind_generator(rng): + rng = np.random.default_rng(rng) + while True: + yield self.search_space.generate(rng) @@ -718,7 +705,7 @@ def objective_function(pipeline_individual, #If warm start and we have an evolver instance, use the existing one if not(self.warm_start and self._evolver_instance is not None): - self._evolver_instance = self._evolver( individual_generator=self.individual_generator_instance, + self._evolver_instance = self._evolver( individual_generator=ind_generator(self.rng), objective_functions= [objective_function], objective_function_weights = self.objective_function_weights, objective_names=self.objective_names, @@ -805,9 +792,10 @@ def objective_function(pipeline_individual, scorers= self._scorers, cv=self.cv_gen, other_objective_functions=self.other_objective_functions, + export_graphpipeline=self.export_graphpipeline, memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv, - subset_column=self.subset_column, + **kwargs: objective_function_generator( ind, X, @@ -816,9 +804,9 @@ def objective_function(pipeline_individual, scorers= scorers, cv=cv, other_objective_functions=other_objective_functions, + export_graphpipeline=export_graphpipeline, memory=memory, cross_val_predict_cv=cross_val_predict_cv, - subset_column=subset_column, **kwargs, )] @@ -858,9 +846,9 @@ def objective_function(pipeline_individual, y_val, scorers= self._scorers, other_objective_functions=self.other_objective_functions, + export_graphpipeline=self.export_graphpipeline, memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv, - subset_column=self.subset_column, **kwargs: val_objective_function_generator( ind, X, @@ -869,9 +857,9 @@ def objective_function(pipeline_individual, y_val, scorers= scorers, other_objective_functions=other_objective_functions, + export_graphpipeline=export_graphpipeline, memory=memory, cross_val_predict_cv=cross_val_predict_cv, - subset_column=subset_column, **kwargs, )] @@ -898,7 +886,10 @@ def objective_function(pipeline_individual, self.selected_best_score = self.evaluated_individuals.loc[best_idx] - best_individual_pipeline = best_individual.export_pipeline(memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv, subset_column=self.subset_column) + if self.export_graphpipeline: + best_individual_pipeline = best_individual.export_flattened_graphpipeline(memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv) + else: + best_individual_pipeline = best_individual.export_pipeline() if self.preprocessing: self.fitted_pipeline_ = sklearn.pipeline.make_pipeline(sklearn.base.clone(self._preprocessing_pipeline), best_individual_pipeline ) @@ -979,7 +970,7 @@ def make_evaluated_individuals(self): self.evaluated_individuals = self.evaluated_individuals.set_index(self.evaluated_individuals.index.map(object_to_int)) self.evaluated_individuals['Parents'] = self.evaluated_individuals['Parents'].apply(lambda row: convert_parents_tuples_to_integers(row, object_to_int)) - self.evaluated_individuals["Instance"] = self.evaluated_individuals["Individual"].apply(lambda ind: apply_make_pipeline(ind, preprocessing_pipeline=self._preprocessing_pipeline)) + self.evaluated_individuals["Instance"] = self.evaluated_individuals["Individual"].apply(lambda ind: apply_make_pipeline(ind, preprocessing_pipeline=self._preprocessing_pipeline, export_graphpipeline=self.export_graphpipeline, memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv)) return self.evaluated_individuals From d2dab4eadec600e5b4addaa634ade88936936750 Mon Sep 17 00:00:00 2001 From: perib Date: Wed, 17 Apr 2024 21:23:44 -0700 Subject: [PATCH 4/6] lots of edits to configuration spaces --- README.md | 10 - Tutorial/2_Search_Spaces.ipynb | 455 +++++++++++++++++- .../builtin_modules/column_one_hot_encoder.py | 1 + tpot2/config/classifiers.py | 370 ++++++++++---- tpot2/config/classifiers_sklearnex.py | 10 +- tpot2/config/get_configspace.py | 199 ++++++-- tpot2/config/mdr_configs.py | 8 +- tpot2/config/regressors.py | 387 ++++++++++----- tpot2/config/regressors_sklearnex.py | 14 +- tpot2/config/special_configs.py | 51 -- tpot2/config/tests/__init__.py | 0 tpot2/config/tests/test_get_configspace.py | 26 + tpot2/config/transformers.py | 35 ++ tpot2/search_spaces/nodes/estimator_node.py | 61 ++- 14 files changed, 1271 insertions(+), 356 deletions(-) create mode 100644 tpot2/config/tests/__init__.py create mode 100644 tpot2/config/tests/test_get_configspace.py diff --git a/README.md b/README.md index 6f30b08d..f7551551 100644 --- a/README.md +++ b/README.md @@ -159,16 +159,6 @@ Setting `verbose` to 5 can be helpful during debugging as it will print out the We welcome you to check the existing issues for bugs or enhancements to work on. If you have an idea for an extension to TPOT2, please file a new issue so we can discuss it. -### Known issues -* TPOT2 uses the func_timeout package to terminate long running pipelines. The early termination signal may fail on particular estimators and cause TPOT2 to run for longer than intended. If you are using your own custom configuration dictionaries, and are noticing that TPOT2 is running for longer than intended, this may be the issue. We are currently looking into it. Sometimes restarting TPOT2 resolves the issue. -* Periodic checkpoint folder may not correctly resume if using budget and/or initial_population size. -* Population class is slow to add new individuals. The Population class needs to be updated to use a dictionary for storage rather than a pandas dataframe. -* Crossover may sometimes go over the size restrictions. -* Memory caching with GraphPipeline may miss some nodes where the ordering on inputs happens to be different between two nodes. - - - - ### Support for TPOT2 TPOT2 was developed in the [Artificial Intelligence Innovation (A2I) Lab](http://epistasis.org/) at Cedars-Sinai with funding from the [NIH](http://www.nih.gov/) under grants U01 AG066833 and R01 LM010098. We are incredibly grateful for the support of the NIH and the Cedars-Sinai during the development of this project. diff --git a/Tutorial/2_Search_Spaces.ipynb b/Tutorial/2_Search_Spaces.ipynb index 8e0af2b9..51d2aff7 100644 --- a/Tutorial/2_Search_Spaces.ipynb +++ b/Tutorial/2_Search_Spaces.ipynb @@ -141,7 +141,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "You can sample generate an individual with the generate() function. This individual samples from the search space as well as provides mutation and crossover functions to modify the current sample." + "You can sample generate an individual with the generate() function. This individual samples from the search space as well as provides mutation and crossover functions to modify the current sample.\n", + "\n", + "Note that ConfigurationSpace does not support None as a parameter. Instead, use the special string \"\\\". TPOT will automatically replace instances of this string with the Python None." ] }, { @@ -652,6 +654,455 @@ "knn_individual1.export_pipeline()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If a dictionary of parameters is passed instead of of a ConfigSpace, then the hyperparameters will be fixed and not learned." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
KNeighborsClassifier(n_neighbors=10)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
" + ], + "text/plain": [ + "KNeighborsClassifier(n_neighbors=10)" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import tpot2\n", + "from ConfigSpace import ConfigurationSpace\n", + "from ConfigSpace import ConfigurationSpace, Integer, Float, Categorical, Normal\n", + "from sklearn.neighbors import KNeighborsClassifier\n", + "\n", + "space = {\n", + "\n", + " 'n_neighbors':10,\n", + "}\n", + "\n", + "knn_node = tpot2.search_spaces.nodes.EstimatorNode(\n", + " method = KNeighborsClassifier,\n", + " space = space,\n", + ")\n", + "\n", + "knn_node.generate().export_pipeline()" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1658,7 +2109,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "TPOT2 also comes with predefined search spaces. the helper function `tpot2.config.get_search_space` takes in a string or a list of strings, and returns either a EstimatorNode or a ChoicePipeline,respectively. \n", + "TPOT2 also comes with predefined search spaces. The current search spaces were adapted from a combination of the original TPOT package as well as the search spaces used in [AutoSklearn](https://github.com/automl/auto-sklearn/tree/development/autosklearn/pipeline/components). The helper function `tpot2.config.get_search_space` takes in a string or a list of strings, and returns either a EstimatorNode or a ChoicePipeline,respectively. \n", "\n", "strings can correspond to individual methods. Tehre are also special strings that return predefined lists of methods. \n", "\n", diff --git a/tpot2/builtin_modules/column_one_hot_encoder.py b/tpot2/builtin_modules/column_one_hot_encoder.py index 4f3843bf..34c3320e 100644 --- a/tpot2/builtin_modules/column_one_hot_encoder.py +++ b/tpot2/builtin_modules/column_one_hot_encoder.py @@ -44,6 +44,7 @@ def __init__(self, columns='auto', drop=None, handle_unknown='error', sparse_out ---------- columns : str, list, default='auto' + Determines which columns to onehot encode with sklearn.preprocessing.OneHotEncoder. - 'auto' : Automatically select categorical features based on columns with less than 10 unique values - 'categorical' : Automatically select categorical features - 'numeric' : Automatically select numeric features diff --git a/tpot2/config/classifiers.py b/tpot2/config/classifiers.py index 14649f61..6423f328 100644 --- a/tpot2/config/classifiers.py +++ b/tpot2/config/classifiers.py @@ -1,31 +1,42 @@ from ConfigSpace import ConfigurationSpace from ConfigSpace import ConfigurationSpace, Integer, Float, Categorical, Normal +from ConfigSpace import EqualsCondition, OrConjunction, NotEqualsCondition, InCondition +from ..search_spaces.nodes.estimator_node import NONE_SPECIAL_STRING, TRUE_SPECIAL_STRING, FALSE_SPECIAL_STRING +import numpy as np + #TODO Conditional search space to prevent invalid combinations of hyperparameters -def get_LogisticRegression_ConfigurationSpace(random_state=None): - - space = { - 'solver': Categorical('solver', ['saga','liblinear']), - 'penalty': Categorical("penalty", ['elasticnet','l1', 'l2']), #TODO workaround to support None option? - 'dual': Categorical("dual", [True, False]), - 'C': Float("C", bounds=(1e-4, 1e4), log=True), - - #TODO workaround for including None as a value for class_weight - 'class_weight': Categorical("class_weight", ['balanced']), - 'n_jobs': 1, - 'max_iter': 1000, - } +def get_LogisticRegression_ConfigurationSpace(n_samples, n_features, random_state): + + dual = n_samples<=n_features + + dual = TRUE_SPECIAL_STRING if dual else FALSE_SPECIAL_STRING + + space = {"solver":"saga", + "max_iter":1000, + "n_jobs":1, + "dual":dual, + } + + penalty = Categorical('penalty', ['l1', 'l2',"elasticnet"], default='l2') + C = Float('C', (0.01, 1e5), log=True) + l1_ratio = Float('l1_ratio', (0.0, 1.0)) + + l1_ratio_condition = EqualsCondition(l1_ratio, penalty, 'elasticnet') if random_state is not None: #This is required because configspace doesn't allow None as a value space['random_state'] = random_state - return ConfigurationSpace( - space = space - ) + + cs = ConfigurationSpace(space) + cs.add_hyperparameters([penalty, C, l1_ratio]) + cs.add_conditions([l1_ratio_condition]) + + return cs -def get_KNeighborsClassifier_ConfigurationSpace(n_samples=10): +def get_KNeighborsClassifier_ConfigurationSpace(n_samples): return ConfigurationSpace( space = { @@ -39,14 +50,14 @@ def get_KNeighborsClassifier_ConfigurationSpace(n_samples=10): ) -def get_DecisionTreeClassifier_ConfigurationSpace(random_state=None, n_featues=20): +def get_DecisionTreeClassifier_ConfigurationSpace(n_featues, random_state): space = { 'criterion': Categorical("criterion", ['gini', 'entropy']), - 'max_depth': Integer("max_depth", bounds=(1, 2*n_featues)), - 'min_samples_split': Integer("min_samples_split", bounds=(2, 20)), + 'max_depth': Integer("max_depth", bounds=(1, 2*n_featues)), #max of 20? log scale? + 'min_samples_split': Integer("min_samples_split", bounds=(1, 20)), 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 20)), - 'max_features': Categorical("max_features", [1.0, 'sqrt', 'log2']), + 'max_features': Categorical("max_features", [NONE_SPECIAL_STRING, 'sqrt', 'log2']), 'min_weight_fraction_leaf': 0.0, } @@ -58,54 +69,66 @@ def get_DecisionTreeClassifier_ConfigurationSpace(random_state=None, n_featues=2 space = space ) +#TODO Conditional search spaces +def get_LinearSVC_ConfigurationSpace(random_state): + space = {"dual":"auto"} + + penalty = Categorical('penalty', ['l1', 'l2']) + C = Float('C', (0.01, 1e5), log=True) + loss = Categorical('loss', ['hinge', 'squared_hinge']) -def get_SVC_ConfigurationSpace(random_state=None): + loss_condition = EqualsCondition(loss, penalty, 'l2') - space = { - 'kernel': Categorical("kernel", ['poly', 'rbf', 'linear', 'sigmoid']), - 'C': Float("C", bounds=(1e-4, 25), log=True), - 'degree': Integer("degree", bounds=(1, 4)), - - #'class_weight': Categorical("class_weight", [None, 'balanced']), #TODO add class_weight. configspace doesn't allow None as a value. - 'max_iter': 3000, - 'tol': 0.001, - 'probability': Categorical("probability", [True]), # configspace doesn't allow bools as a default value? but does allow them as a value inside a Categorical - } if random_state is not None: #This is required because configspace doesn't allow None as a value space['random_state'] = random_state - - return ConfigurationSpace( - space = space - ) -#TODO Conditional search spaces -def get_LinearSVC_ConfigurationSpace(random_state=None,): + + cs = ConfigurationSpace(space) + cs.add_hyperparameters([penalty, C, loss]) + cs.add_conditions([loss_condition]) + + return cs + + +def get_SVC_ConfigurationSpace(random_state): + space = { - 'penalty': Categorical("penalty", ['l1', 'l2']), - 'loss': Categorical("loss", ['hinge', 'squared_hinge']), - 'dual': Categorical("dual", [True, False]), - 'C': Float("C", bounds=(1e-4, 25), log=True), - } - + 'max_iter': 3000, + 'probability':TRUE_SPECIAL_STRING} + + kernel = Categorical("kernel", ['poly', 'rbf', 'sigmoid']) + C = Float('C', (0.01, 1e5), log=True) + degree = Integer("degree", bounds=(1, 5)) + gamma = Float("gamma", bounds=(1e-5, 8), log=True) + shrinking = Categorical("shrinking", [True, False]) + coef0 = Float("coef0", bounds=(-1, 1)) + + degree_condition = EqualsCondition(degree, kernel, 'poly') + gamma_condition = InCondition(gamma, kernel, ['rbf', 'poly']) + coef0_condition = InCondition(coef0, kernel, ['poly', 'sigmoid']) + if random_state is not None: #This is required because configspace doesn't allow None as a value space['random_state'] = random_state - - return ConfigurationSpace( - space = space - ) + cs = ConfigurationSpace(space) + cs.add_hyperparameters([kernel, C, coef0, degree, gamma, shrinking]) + cs.add_conditions([degree_condition, gamma_condition, coef0_condition]) + return cs -def get_RandomForestClassifier_ConfigurationSpace(random_state=None): + +def get_RandomForestClassifier_ConfigurationSpace(n_features, random_state): space = { - 'n_estimators': 100, + 'n_estimators': 128, #as recommended by Oshiro et al. (2012 + 'max_features': Integer("max_features", bounds=(1, max(1, n_features))), #log scale like autosklearn? 'criterion': Categorical("criterion", ['gini', 'entropy']), 'min_samples_split': Integer("min_samples_split", bounds=(2, 20)), 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 20)), 'bootstrap': Categorical("bootstrap", [True, False]), + 'class_weight': Categorical("class_weight", [NONE_SPECIAL_STRING, 'balanced']), } if random_state is not None: #This is required because configspace doesn't allow None as a value @@ -115,46 +138,21 @@ def get_RandomForestClassifier_ConfigurationSpace(random_state=None): space = space ) -def get_GradientBoostingClassifier_ConfigurationSpace(random_state=None, n_classes=None): - - if n_classes is not None and n_classes > 2: - loss = 'log_loss' - else: - loss = Categorical("loss", ['log_loss', 'exponential']) - - space = { - 'n_estimators': 100, - 'loss': loss, - 'learning_rate': Float("learning_rate", bounds=(1e-3, 1), log=True), - 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 200)), - 'min_samples_split': Integer("min_samples_split", bounds=(2, 20)), - 'subsample': Float("subsample", bounds=(0.1, 1.0)), - 'max_features': Float("max_features", bounds=(0.1, 1.0)), - 'max_depth': Integer("max_depth", bounds=(1, 10)), - - #TODO include max leaf nodes? - #TODO validation fraction + n_iter_no_change? maybe as conditional - - 'tol': 1e-4, - } - - if random_state is not None: #This is required because configspace doesn't allow None as a value - space['random_state'] = random_state - - return ConfigurationSpace( - space = space - ) - -def get_XGBClassifier_ConfigurationSpace(random_state=None,): +def get_XGBClassifier_ConfigurationSpace(random_state,): space = { 'n_estimators': 100, 'learning_rate': Float("learning_rate", bounds=(1e-3, 1), log=True), - 'subsample': Float("subsample", bounds=(0.1, 1.0)), + 'subsample': Float("subsample", bounds=(0.5, 1.0)), 'min_child_weight': Integer("min_child_weight", bounds=(1, 21)), - 'max_depth': Integer("max_depth", bounds=(1, 11)), + 'gamma': Float("gamma", bounds=(1e-4, 20), log=True), + 'max_depth': Integer("max_depth", bounds=(3, 18)), + 'reg_alpha': Float("reg_alpha", bounds=(1e-4, 100), log=True), + 'reg_lambda': Float("reg_lambda", bounds=(1e-4, 1), log=True), 'n_jobs': 1, + 'nthread': 1, + 'verbosity': 0, } if random_state is not None: #This is required because configspace doesn't allow None as a value @@ -164,7 +162,7 @@ def get_XGBClassifier_ConfigurationSpace(random_state=None,): space = space ) -def get_LGBMClassifier_ConfigurationSpace(random_state=None,): +def get_LGBMClassifier_ConfigurationSpace(random_state,): space = { 'objective': 'binary', @@ -184,7 +182,7 @@ def get_LGBMClassifier_ConfigurationSpace(random_state=None,): ) -def get_ExtraTreesClassifier_ConfigurationSpace(random_state=None): +def get_ExtraTreesClassifier_ConfigurationSpace(random_state): space = { 'n_estimators': 100, 'criterion': Categorical("criterion", ["gini", "entropy"]), @@ -204,41 +202,36 @@ def get_ExtraTreesClassifier_ConfigurationSpace(random_state=None): -def get_SGDClassifier_ConfigurationSpace(random_state=None): +def get_SGDClassifier_ConfigurationSpace(random_state): space = { - 'loss': Categorical("loss", ['log_loss', 'modified_huber']), + 'loss': Categorical("loss", ['squared_hinge', 'modified_huber']), #don't include hinge because we have LinearSVC, don't include log because we have LogisticRegression 'penalty': 'elasticnet', 'alpha': Float("alpha", bounds=(1e-5, 0.01), log=True), - 'learning_rate': Categorical("learning_rate", ['invscaling', 'constant']), 'l1_ratio': Float("l1_ratio", bounds=(0.0, 1.0)), 'eta0': Float("eta0", bounds=(0.01, 1.0)), - 'power_t': Float("power_t", bounds=(1e-5, 100.0), log=True), 'n_jobs': 1, 'fit_intercept': Categorical("fit_intercept", [True]), + 'class_weight': Categorical("class_weight", [NONE_SPECIAL_STRING, 'balanced']), } if random_state is not None: #This is required because configspace doesn't allow None as a value space['random_state'] = random_state - return ConfigurationSpace( + power_t = Float("power_t", bounds=(1e-5, 100.0), log=True) + learning_rate = Categorical("learning_rate", ['invscaling', 'constant', "optimal"]) + powertcond = EqualsCondition(power_t, learning_rate, 'invscaling') + + + cs = ConfigurationSpace( space = space ) + cs.add_hyperparameters([power_t, learning_rate]) + cs.add_conditions([powertcond]) + return cs -def get_MLPClassifier_ConfigurationSpace(random_state=None): - space = { - 'alpha': Float("alpha", bounds=(1e-4, 1e-1), log=True), - 'learning_rate_init': Float("learning_rate_init", bounds=(1e-3, 1.), log=True), - } - - if random_state is not None: #This is required because configspace doesn't allow None as a value - space['random_state'] = random_state - - return ConfigurationSpace( - space = space - ) GaussianNB_ConfigurationSpace = {} @@ -261,12 +254,11 @@ def get_MultinomialNB_ConfigurationSpace(): -def get_AdaBoostClassifier_ConfigurationSpace(random_state=None): +def get_AdaBoostClassifier_ConfigurationSpace(random_state): space = { 'n_estimators': Integer("n_estimators", bounds=(50, 500)), 'learning_rate': Float("learning_rate", bounds=(0.01, 2), log=True), 'algorithm': Categorical("algorithm", ['SAMME', 'SAMME.R']), - 'max_depth': Integer("max_depth", bounds=(1, 10)), } if random_state is not None: #This is required because configspace doesn't allow None as a value @@ -274,4 +266,172 @@ def get_AdaBoostClassifier_ConfigurationSpace(random_state=None): return ConfigurationSpace( space = space - ) \ No newline at end of file + ) + + +def get_QuadraticDiscriminantAnalysis_ConfigurationSpace(): + return ConfigurationSpace( + space = { + 'reg_param': Float("reg_param", bounds=(0, 1)), + } + ) + +def get_PassiveAggressiveClassifier_ConfigurationSpace(random_state): + space = { + 'C': Float("C", bounds=(1e-5, 10), log=True), + 'loss': Categorical("loss", ['hinge', 'squared_hinge']), + 'average': Categorical("average", [True, False]), + } + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + return ConfigurationSpace( + space = space + ) +#TODO support auto shrinkage when solver is svd. may require custom node +def get_LinearDiscriminantAnalysis_ConfigurationSpace(): + + solver = Categorical("solver", ['svd', 'lsqr', 'eigen']), + shrinkage = Float("shrinkage", bounds=(0, 1)), + + shrinkcond = NotEqualsCondition(shrinkage, solver, 'svd') + + cs = ConfigurationSpace() + cs.add_hyperparameters([solver, shrinkage]) + cs.add_conditions([shrinkcond]) + + return + + + +#### Gradient Boosting Classifiers + +def get_GradientBoostingClassifier_ConfigurationSpace(n_features, random_state): + early_stop = Categorical("early_stop", ["off", "valid", "train"]) + n_iter_no_change = Integer("n_iter_no_change",bounds=(1,20)) + validation_fraction = Float("validation_fraction", bounds=(0.01, 0.4)) + + n_iter_no_change_cond = InCondition(n_iter_no_change, early_stop, ["valid", "train"] ) + validation_fraction_cond = EqualsCondition(validation_fraction, early_stop, "valid") + + space = { + 'loss': Categorical("loss", ['log_loss', 'exponential']), + 'learning_rate': Float("learning_rate", bounds=(1e-3, 1), log=True), + 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 200)), + 'min_samples_split': Integer("min_samples_split", bounds=(2, 20)), + 'subsample': Float("subsample", bounds=(0.1, 1.0)), + 'max_features': Integer("max_features", bounds=(1, max(1, n_features))), + 'max_leaf_nodes': Integer("max_leaf_nodes", bounds=(3, 2047)), + 'max_depth': Integer("max_depth", bounds=(1, 2*n_features)), + 'tol': 1e-4, + } + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + cs = ConfigurationSpace( + space = space + ) + cs.add_hyperparameters([n_iter_no_change, validation_fraction, early_stop ]) + cs.add_conditions([validation_fraction_cond, n_iter_no_change_cond]) + return cs + + + + +#only difference is l2_regularization +def get_HistGradientBoostingClassifier_ConfigurationSpace(n_features, random_state): + early_stopping = Categorical("early_stopping", ["off", "valid", "train"]) + n_iter_no_change = Integer("n_iter_no_change",bounds=(1,20)) + validation_fraction = Float("validation_fraction", bounds=(0.01, 0.4)) + + n_iter_no_change_cond = InCondition(n_iter_no_change, early_stopping, ["valid", "train"] ) + validation_fraction_cond = EqualsCondition(validation_fraction, early_stopping, "valid") + + space = { + 'loss': Categorical("loss", ['log_loss', 'exponential']), + 'learning_rate': Float("learning_rate", bounds=(1e-3, 1), log=True), + 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 200)), + 'max_features': Float("max_features", bounds=(0.1,1.0)), + 'max_leaf_nodes': Integer("max_leaf_nodes", bounds=(3, 2047)), + 'max_depth': Integer("max_depth", bounds=(1, 2*n_features)), + 'l2_regularization': Float("l2_regularization", bounds=(1e-10, 1), log=True), + 'tol': 1e-4, + } + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + cs = ConfigurationSpace( + space = space + ) + cs.add_hyperparameters([n_iter_no_change, validation_fraction, early_stopping ]) + cs.add_conditions([validation_fraction_cond, n_iter_no_change_cond]) + + return cs + +def GradientBoostingClassifier_hyperparameter_parser(params): + + final_params = { + 'loss': params['loss'], + 'learning_rate': params['learning_rate'], + 'min_samples_leaf': params['min_samples_leaf'], + 'min_samples_split': params['min_samples_split'], + 'subsample': params['subsample'], + 'max_features': params['max_features'], + 'max_leaf_nodes': params['max_leaf_nodes'], + 'max_depth': params['max_depth'], + 'tol': params['tol'], + } + + if "l2_regularization" in params: + final_params['l2_regularization'] = params['l2_regularization'] + + if params['early_stop'] == 'off': + final_params['n_iter_no_change'] = None + final_params['validation_fraction'] = None + elif params['early_stop'] == 'valid': + final_params['n_iter_no_change'] = params['n_iter_no_change'] + final_params['validation_fraction'] = params['validation_fraction'] + elif params['early_stop'] == 'train': + final_params['n_iter_no_change'] = params['n_iter_no_change'] + final_params['validation_fraction'] = None + + + return final_params + + +### + +def get_MLPClassifier_ConfigurationSpace(random_state): + space = {"n_iter_no_change":32} + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + cs = ConfigurationSpace( + space = space + ) + + n_hidden_layers = Integer("n_hidden_layers", bounds=(1, 3)) + n_nodes_per_layer = Integer("n_nodes_per_layer", bounds=(16, 512)) + activation = Categorical("activation", ['tanh', 'relu']) + alpha = Float("alpha", bounds=(1e-7, 1e-1), log=True) + learning_rate = Float("learning_rate", bounds=(1e-4, 1e-1), log=True) + early_stopping = Categorical("early_stopping", [True,False]) + + cs.add_hyperparameters([n_hidden_layers, n_nodes_per_layer, activation, alpha, learning_rate, early_stopping]) + + return cs + +def MLPClassifier_hyperparameter_parser(params): + hyperparameters = { + 'n_iter_no_change': params['n_iter_no_change'], + 'hidden_layer_sizes' : [params['n_nodes_per_layer']]*params['n_hidden_layers'], + 'activation': params['activation'], + 'alpha': params['alpha'], + 'learning_rate': params['learning_rate'], + 'early_stopping': params['early_stopping'], + } + return hyperparameters \ No newline at end of file diff --git a/tpot2/config/classifiers_sklearnex.py b/tpot2/config/classifiers_sklearnex.py index a158a9a6..ad581898 100644 --- a/tpot2/config/classifiers_sklearnex.py +++ b/tpot2/config/classifiers_sklearnex.py @@ -2,7 +2,7 @@ from ConfigSpace import ConfigurationSpace, Integer, Float, Categorical, Normal -def get_RandomForestClassifier_ConfigurationSpace(random_state=None): +def get_RandomForestClassifier_ConfigurationSpace(random_state): space = { 'n_estimators': 100, #TODO make this a higher number? learned? 'bootstrap': Categorical("bootstrap", [True, False]), @@ -19,7 +19,7 @@ def get_RandomForestClassifier_ConfigurationSpace(random_state=None): space = space ) -def get_KNeighborsClassifier_ConfigurationSpace(n_samples=10): +def get_KNeighborsClassifier_ConfigurationSpace(n_samples): return ConfigurationSpace( space = { 'n_neighbors': Integer("n_neighbors", bounds=(1, max(n_samples, 100)), log=True), @@ -29,7 +29,7 @@ def get_KNeighborsClassifier_ConfigurationSpace(n_samples=10): #TODO add conditionals -def get_LogisticRegression_ConfigurationSpace(random_state=None): +def get_LogisticRegression_ConfigurationSpace(random_state): space = { 'solver': Categorical("solver", ['liblinear', 'sag', 'saga']), 'penalty': Categorical("penalty", ['l1', 'l2']), @@ -45,7 +45,7 @@ def get_LogisticRegression_ConfigurationSpace(random_state=None): space = space ) -def get_SVC_ConfigurationSpace(random_state=None): +def get_SVC_ConfigurationSpace(random_state): space = { 'kernel': Categorical("kernel", ['poly', 'rbf', 'linear', 'sigmoid']), 'C': Float("C", bounds=(1e-4, 25), log=True), @@ -62,7 +62,7 @@ def get_SVC_ConfigurationSpace(random_state=None): space = space ) -def get_NuSVC_ConfigurationSpace(random_state=None): +def get_NuSVC_ConfigurationSpace(random_state): space = { 'nu': Float("nu", bounds=(0.05, 1.0)), 'kernel': Categorical("kernel", ['poly', 'rbf', 'linear', 'sigmoid']), diff --git a/tpot2/config/get_configspace.py b/tpot2/config/get_configspace.py index 44892278..cf75cd47 100644 --- a/tpot2/config/get_configspace.py +++ b/tpot2/config/get_configspace.py @@ -52,6 +52,7 @@ from sklearn.kernel_approximation import RBFSampler from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import StandardScaler +from sklearn.preprocessing import PowerTransformer, QuantileTransformer from sklearn.feature_selection import SelectFwe @@ -62,13 +63,12 @@ import sklearn.feature_selection - +#TODO create a selectomixin using these? from sklearn.feature_selection import f_classif from sklearn.feature_selection import f_regression from sklearn.linear_model import SGDRegressor -from sklearn.linear_model import LinearRegression from sklearn.linear_model import Ridge from sklearn.linear_model import Lasso from sklearn.linear_model import ElasticNet @@ -76,21 +76,23 @@ from sklearn.linear_model import LassoLars, LassoLarsCV from sklearn.linear_model import RidgeCV +from sklearn.svm import SVR, SVC +from sklearn.svm import LinearSVR, LinearSVC -from sklearn.svm import SVR -from sklearn.svm import LinearSVR - -from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor,RandomForestRegressor +from sklearn.ensemble import AdaBoostRegressor, AdaBoostClassifier, GradientBoostingRegressor,RandomForestRegressor from sklearn.ensemble import BaggingRegressor from sklearn.ensemble import ExtraTreesRegressor +from sklearn.ensemble import HistGradientBoostingClassifier, HistGradientBoostingRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import ElasticNetCV -from xgboost import XGBRegressor +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis -from tpot2.builtin_modules import RFE_ExtraTreesClassifier, SelectFromModel_ExtraTreesClassifier, RFE_ExtraTreesRegressor, SelectFromModel_ExtraTreesRegressor +from sklearn.gaussian_process import GaussianProcessRegressor + +from xgboost import XGBRegressor from tpot2.builtin_modules import AddTransformer, mul_neg_1_Transformer, MulTransformer, SafeReciprocalTransformer, EQTransformer, NETransformer, GETransformer, GTTransformer, LETransformer, LTTransformer, MinTransformer, MaxTransformer, ZeroTransformer, OneTransformer, NTransformer @@ -99,8 +101,11 @@ #MDR -all_methods = [SGDClassifier, RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, MLPClassifier, DecisionTreeClassifier, XGBClassifier, KNeighborsClassifier, SVC, LogisticRegression, LGBMClassifier, LinearSVC, GaussianNB, BernoulliNB, MultinomialNB, ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, BaggingRegressor, DecisionTreeRegressor, KNeighborsRegressor, XGBRegressor, RFE_ExtraTreesClassifier, SelectFromModel_ExtraTreesClassifier, RFE_ExtraTreesRegressor, SelectFromModel_ExtraTreesRegressor, ZeroCount, OneHotEncoder, ColumnOneHotEncoder, Binarizer, FastICA, FeatureAgglomeration, MaxAbsScaler, MinMaxScaler, Normalizer, Nystroem, PCA, PolynomialFeatures, RBFSampler, RobustScaler, StandardScaler, SelectFwe, SelectPercentile, VarianceThreshold, RFE, SelectFromModel, f_classif, f_regression, SGDRegressor, LinearRegression, Ridge, Lasso, ElasticNet, Lars, LassoLars, LassoLarsCV, RidgeCV, SVR, LinearSVR, AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor, BaggingRegressor, ExtraTreesRegressor, DecisionTreeRegressor, KNeighborsRegressor, ElasticNetCV, +all_methods = [SGDClassifier, RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, MLPClassifier, DecisionTreeClassifier, XGBClassifier, KNeighborsClassifier, SVC, LogisticRegression, LGBMClassifier, LinearSVC, GaussianNB, BernoulliNB, MultinomialNB, ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, BaggingRegressor, DecisionTreeRegressor, KNeighborsRegressor, XGBRegressor, ZeroCount, OneHotEncoder, ColumnOneHotEncoder, Binarizer, FastICA, FeatureAgglomeration, MaxAbsScaler, MinMaxScaler, Normalizer, Nystroem, PCA, PolynomialFeatures, RBFSampler, RobustScaler, StandardScaler, SelectFwe, SelectPercentile, VarianceThreshold, SGDRegressor, Ridge, Lasso, ElasticNet, Lars, LassoLars, LassoLarsCV, RidgeCV, SVR, LinearSVR, AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor, BaggingRegressor, ExtraTreesRegressor, DecisionTreeRegressor, KNeighborsRegressor, ElasticNetCV, + AdaBoostClassifier, + GaussianProcessRegressor, HistGradientBoostingClassifier, HistGradientBoostingRegressor, AddTransformer, mul_neg_1_Transformer, MulTransformer, SafeReciprocalTransformer, EQTransformer, NETransformer, GETransformer, GTTransformer, LETransformer, LTTransformer, MinTransformer, MaxTransformer, ZeroTransformer, OneTransformer, NTransformer, + PowerTransformer, QuantileTransformer, ] @@ -118,36 +123,21 @@ all_methods.append(MultiSURF) if 'sklearnex' in sys.modules: - from sklearnex.linear_model import LinearRegression - from sklearnex.linear_model import Ridge - from sklearnex.linear_model import Lasso - from sklearnex.linear_model import ElasticNet - from sklearnex.svm import SVR - from sklearnex.svm import NuSVR - from sklearnex.ensemble import RandomForestRegressor - from sklearnex.neighbors import KNeighborsRegressor - - from sklearnex.ensemble import RandomForestClassifier - from sklearnex.neighbors import KNeighborsClassifier - from sklearnex.svm import SVC - from sklearnex.svm import NuSVC - from sklearnex.linear_model import LogisticRegression - - - all_methods.append(LinearRegression) - all_methods.append(Ridge) - all_methods.append(Lasso) - all_methods.append(ElasticNet) - all_methods.append(SVR) - all_methods.append(NuSVR) - all_methods.append(RandomForestRegressor) - all_methods.append(KNeighborsRegressor) - - all_methods.append(RandomForestClassifier) - all_methods.append(KNeighborsClassifier) - all_methods.append(SVC) - all_methods.append(NuSVC) - all_methods.append(LogisticRegression) + import sklearnex + + all_methods.append(sklearnex.linear_model.LinearRegression) + all_methods.append(sklearnex.linear_model.Ridge) + all_methods.append(sklearnex.linear_model.Lasso) + all_methods.append(sklearnex.linear_model.ElasticNet) + all_methods.append(sklearnex.svm.SVR) + all_methods.append(sklearnex.svm.NuSVR) + all_methods.append(sklearnex.ensemble.RandomForestRegressor) + all_methods.append(sklearnex.neighbors.KNeighborsRegressor) + all_methods.append(sklearnex.ensemble.RandomForestClassifier) + all_methods.append(sklearnex.neighbors.KNeighborsClassifier) + all_methods.append(sklearnex.svm.SVC) + all_methods.append(sklearnex.svm.NuSVC) + all_methods.append(sklearnex.linear_model.LogisticRegression) STRING_TO_CLASS = { @@ -155,15 +145,18 @@ } - +from sklearn.linear_model import PassiveAggressiveClassifier +from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis +from sklearn.linear_model import ARDRegression +from sklearn.gaussian_process import GaussianProcessRegressor GROUPNAMES = { "selectors": ["SelectFwe", "SelectPercentile", "VarianceThreshold",], "selectors_classification": ["SelectFwe", "SelectPercentile", "VarianceThreshold", "RFE_classification", "SelectFromModel_classification"], "selectors_regression": ["SelectFwe", "SelectPercentile", "VarianceThreshold", "RFE_regression", "SelectFromModel_regression"], - "classifiers" : ["LogisticRegression", "DecisionTreeClassifier", "KNeighborsClassifier", "GradientBoostingClassifier", "ExtraTreesClassifier", "RandomForestClassifier", "SGDClassifier", "GaussianNB", "BernoulliNB", "MultinomialNB", "XGBClassifier", "SVC", "MLPClassifier"], - "regressors" : ["ElasticNetCV", "ExtraTreesRegressor", "GradientBoostingRegressor", "AdaBoostRegressor", "DecisionTreeRegressor", "KNeighborsRegressor", "LassoLarsCV", "SVR", "RandomForestRegressor", "RidgeCV", "XGBRegressor", "SGDRegressor" ], - "transformers": ["Binarizer", "Normalizer", "PCA", "ZeroCount", "OneHotEncoder", "FastICA", "FeatureAgglomeration", "Nystroem", "RBFSampler"], + "classifiers" : ['AdaBoostClassifier', 'BernoulliNB', 'DecisionTreeClassifier', 'ExtraTreesClassifier', 'GaussianNB', 'HistGradientBoostingClassifier', 'KNeighborsClassifier', 'LogisticRegression', "LinearSVC", "SVC", 'MLPClassifier', 'MultinomialNB', "PassiveAggressiveClassifier", "QuadraticDiscriminantAnalysis", 'RandomForestClassifier', 'SGDClassifier', 'XGBClassifier'], + "regressors" : ['AdaBoostRegressor', "ARDRegression", 'DecisionTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor', 'HistGradientBoostingRegressor', 'KNeighborsRegressor', 'LinearDiscriminantAnalysis', 'LinearSVR', "MLPRegressor", 'RandomForestRegressor', 'SGDRegressor', 'SVR', 'XGBRegressor'], + "transformers": ["Binarizer", "Normalizer", "PCA", "ZeroCount", "OneHotEncoder", "FastICA", "FeatureAgglomeration", "Nystroem", "RBFSampler", "QuantileTransformer", "PowerTransformer"], "arithmatic": ["AddTransformer", "mul_neg_1_Transformer", "MulTransformer", "SafeReciprocalTransformer", "EQTransformer", "NETransformer", "GETransformer", "GTTransformer", "LETransformer", "LTTransformer", "MinTransformer", "MaxTransformer"], "imputers": [], "skrebate": ["ReliefF", "SURF", "SURFstar", "MultiSURF"], @@ -194,20 +187,24 @@ def get_configspace(name, n_classes=3, n_samples=100, n_features=100, random_sta #classifiers.py + case "AdaBoostClassifier": + return classifiers.get_AdaBoostClassifier_ConfigurationSpace(random_state=random_state) case "LogisticRegression": - return classifiers.get_LogisticRegression_ConfigurationSpace(random_state=random_state) + return classifiers.get_LogisticRegression_ConfigurationSpace(n_samples=n_samples, n_features=n_features, random_state=random_state) case "KNeighborsClassifier": return classifiers.get_KNeighborsClassifier_ConfigurationSpace(n_samples=n_samples) case "DecisionTreeClassifier": - return classifiers.get_DecisionTreeClassifier_ConfigurationSpace(random_state=random_state) + return classifiers.get_DecisionTreeClassifier_ConfigurationSpace(n_featues=n_features, random_state=random_state) case "SVC": return classifiers.get_SVC_ConfigurationSpace(random_state=random_state) case "LinearSVC": return classifiers.get_LinearSVC_ConfigurationSpace(random_state=random_state) case "RandomForestClassifier": - return classifiers.get_RandomForestClassifier_ConfigurationSpace(random_state=random_state) + return classifiers.get_RandomForestClassifier_ConfigurationSpace(n_features=n_features, random_state=random_state) case "GradientBoostingClassifier": - return classifiers.get_GradientBoostingClassifier_ConfigurationSpace(n_classes=n_classes) + return classifiers.get_GradientBoostingClassifier_ConfigurationSpace(n_features=n_features, random_state=random_state) + case "HistGradientBoostingClassifier": + return classifiers.get_HistGradientBoostingClassifier_ConfigurationSpace(n_features=n_features, random_state=random_state) case "XGBClassifier": return classifiers.get_XGBClassifier_ConfigurationSpace(random_state=random_state) case "LGBMClassifier": @@ -224,7 +221,63 @@ def get_configspace(name, n_classes=3, n_samples=100, n_features=100, random_sta return classifiers.get_MultinomialNB_ConfigurationSpace() case "GaussianNB": return {} - + case "LassoLarsCV": + return {} + case "ElasticNetCV": + return regressors.ElasticNetCV_configspace + case "RidgeCV": + return {} + + #regressors.py + case "RandomForestRegressor": + return regressors.get_RandomForestRegressor_ConfigurationSpace(random_state=random_state) + case "SGDRegressor": + return regressors.get_SGDRegressor_ConfigurationSpace(random_state=random_state) + case "Ridge": + return regressors.get_Ridge_ConfigurationSpace(random_state=random_state) + case "Lasso": + return regressors.get_Lasso_ConfigurationSpace(random_state=random_state) + case "ElasticNet": + return regressors.get_ElasticNet_ConfigurationSpace(random_state=random_state) + case "Lars": + return regressors.get_Lars_ConfigurationSpace(random_state=random_state) + case "OthogonalMatchingPursuit": + return regressors.get_OthogonalMatchingPursuit_ConfigurationSpace() + case "BayesianRidge": + return regressors.get_BayesianRidge_ConfigurationSpace() + case "LassoLars": + return regressors.get_LassoLars_ConfigurationSpace(random_state=random_state) + case "BaggingRegressor": + return regressors.get_BaggingRegressor_ConfigurationSpace(random_state=random_state) + case "ARDRegression": + return regressors.get_ARDRegression_ConfigurationSpace() + case "TheilSenRegressor": + return regressors.get_TheilSenRegressor_ConfigurationSpace(random_state=random_state) + case "Perceptron": + return regressors.get_Perceptron_ConfigurationSpace(random_state=random_state) + case "DecisionTreeRegressor": + return regressors.get_DecisionTreeRegressor_ConfigurationSpace(n_features=n_features, random_state=random_state) + case "LinearSVR": + return regressors.get_LinearSVR_ConfigurationSpace(random_state=random_state) + case "SVR": + return regressors.get_SVR_ConfigurationSpace() + case "XGBRegressor": + return regressors.get_XGBRegressor_ConfigurationSpace(random_state=random_state) + case "AdaBoostRegressor": + return regressors.get_AdaBoostRegressor_ConfigurationSpace(random_state=random_state) + case "ExtraTreesRegressor": + return regressors.get_ExtraTreesRegressor_ConfigurationSpace(random_state=random_state) + case "GradientBoostingRegressor": + return regressors.get_GradientBoostingRegressor_ConfigurationSpace(n_features=n_features, random_state=random_state) + case "HistGradientBoostingRegressor": + return regressors.get_HistGradientBoostingRegressor_ConfigurationSpace(n_features=n_features, random_state=random_state) + case "MLPRegressor": + return regressors.get_MLPRegressor_ConfigurationSpace(random_state=random_state) + case "KNeighborsRegressor": + return regressors.get_KNeighborsRegressor_ConfigurationSpace(n_samples=n_samples) + case "GaussianProcessRegressor": + return regressors.get_GaussianProcessRegressor_ConfigurationSpace(n_features=n_features, random_state=random_state) + #transformers.py case "Binarizer": return transformers.Binarizer_configspace @@ -244,7 +297,23 @@ def get_configspace(name, n_classes=3, n_samples=100, n_features=100, random_sta return transformers.get_Nystroem_configspace(n_features=n_features, random_state=random_state) case "RBFSampler": return transformers.get_RBFSampler_configspace(n_features=n_features, random_state=random_state) - + case "MinMaxScaler": + return {} + case "PowerTransformer": + return {} + case "QuantileTransformer": + return transformers.get_QuantileTransformer_configspace(random_state=random_state) + case "RobustScaler": + return transformers.RobustScaler_configspace + case "ColumnOneHotEncoder": + return {} + case "MaxAbsScaler": + return {} + case "PolynomialFeatures": + return transformers.PolynomialFeatures_configspace + case "StandardScaler": + return {} + #selectors.py case "SelectFwe": return selectors.SelectFwe_configspace @@ -256,6 +325,7 @@ def get_configspace(name, n_classes=3, n_samples=100, n_features=100, random_sta return selectors.RFE_configspace_part case "SelectFromModel": return selectors.SelectFromModel_configspace_part + #special_configs.py case "AddTransformer": @@ -291,7 +361,7 @@ def get_configspace(name, n_classes=3, n_samples=100, n_features=100, random_sta space = { - 'n': Float("n", bounds=(-1e3, 1e3), log=True), + 'n': Float("n", bounds=(-1e3, 1e3)), } ) @@ -341,7 +411,8 @@ def get_configspace(name, n_classes=3, n_samples=100, n_features=100, random_sta case "KNeighborsRegressor_sklearnex": return regressors_sklearnex.get_KNeighborsRegressor_ConfigurationSpace(n_samples=n_samples) - return {} + #raise error + raise ValueError(f"Could not find configspace for {name}") def get_search_space(name, n_classes=3, n_samples=100, n_features=100, random_state=None): @@ -359,9 +430,12 @@ def get_search_space(name, n_classes=3, n_samples=100, n_features=100, random_st return get_search_space(name_list, n_classes=n_classes, n_samples=n_samples, n_features=n_features, random_state=random_state) if name is None: + warnings.warn(f"name is None") return None if name not in STRING_TO_CLASS: + print("FOOO ", name) + warnings.warn(f"Could not find class for {name}") return None return get_node(name, n_classes=n_classes, n_samples=n_samples, n_features=n_features, random_state=random_state) @@ -370,6 +444,8 @@ def get_search_space(name, n_classes=3, n_samples=100, n_features=100, random_st def get_node(name, n_classes=3, n_samples=100, n_features=100, random_state=None): #these are wrappers that take in another estimator as a parameter + # TODO Add AdaBoostRegressor, AdaBoostClassifier as wrappers? wrap a decision tree with different params? + # TODO add other meta-estimators? if name == "RFE_classification": rfe_sp = get_configspace(name, n_classes=n_classes, n_samples=n_samples, random_state=random_state) ext = get_node("ExtraTreesClassifier", n_classes=n_classes, n_samples=n_samples, random_state=random_state) @@ -386,7 +462,26 @@ def get_node(name, n_classes=3, n_samples=100, n_features=100, random_state=None sfm_sp = get_configspace(name, n_classes=n_classes, n_samples=n_samples, random_state=random_state) ext = get_node("ExtraTreesRegressor", n_classes=n_classes, n_samples=n_samples, random_state=random_state) return WrapperPipeline(nodegen=ext, method=SelectFromModel, configspace=sfm_sp) - + + #these are nodes that have special search spaces which require custom parsing of the hyperparameters + if name == "RobustScaler": + configspace = get_configspace(name, n_classes=n_classes, n_samples=n_samples, random_state=random_state) + return EstimatorNode(STRING_TO_CLASS[name], configspace, hyperparameter_parser=transformers.robust_scaler_hyperparameter_parser) + if name == "GradientBoostingClassifier" or name == "HistGradientBoosting": + configspace = get_configspace(name, n_classes=n_classes, n_samples=n_samples, random_state=random_state) + return EstimatorNode(STRING_TO_CLASS[name], configspace, hyperparameter_parser=classifiers.GradientBoostingClassifier_hyperparameter_parser) + if name == "GradientBoostingRegressor" or name == "HistGradientBoostingRegressor": + configspace = get_configspace(name, n_classes=n_classes, n_samples=n_samples, random_state=random_state) + return EstimatorNode(STRING_TO_CLASS[name], configspace, hyperparameter_parser=regressors.GradientBoostingRegressor_hyperparameter_parser) + if name == "MLPClassifier": + configspace = get_configspace(name, n_classes=n_classes, n_samples=n_samples, random_state=random_state) + return EstimatorNode(STRING_TO_CLASS[name], configspace, hyperparameter_parser=classifiers.MLPClassifier_hyperparameter_parser) + if name == "MLPRegressor": + configspace = get_configspace(name, n_classes=n_classes, n_samples=n_samples, random_state=random_state) + return EstimatorNode(STRING_TO_CLASS[name], configspace, hyperparameter_parser=regressors.MLPRegressor_hyperparameter_parser) + if name == "GaussianProcessRegressor": + configspace = get_configspace(name, n_classes=n_classes, n_samples=n_samples, random_state=random_state) + return EstimatorNode(STRING_TO_CLASS[name], configspace, hyperparameter_parser=regressors.GaussianProcessRegressor_hyperparameter_parser) configspace = get_configspace(name, n_classes=n_classes, n_samples=n_samples, n_features=n_features, random_state=random_state) if configspace is None: diff --git a/tpot2/config/mdr_configs.py b/tpot2/config/mdr_configs.py index b99ec81e..df92cd17 100644 --- a/tpot2/config/mdr_configs.py +++ b/tpot2/config/mdr_configs.py @@ -14,7 +14,7 @@ -def get_skrebate_ReliefF_config_space(n_features=10): +def get_skrebate_ReliefF_config_space(n_features): return ConfigurationSpace( space = { 'n_features_to_select': Integer('n_features_to_select', bounds=(1, n_features), log=True), @@ -23,7 +23,7 @@ def get_skrebate_ReliefF_config_space(n_features=10): ) -def get_skrebate_SURF_config_space(n_features=10): +def get_skrebate_SURF_config_space(n_features): return ConfigurationSpace( space = { 'n_features_to_select': Integer('n_features_to_select', bounds=(1, n_features), log=True), @@ -31,13 +31,13 @@ def get_skrebate_SURF_config_space(n_features=10): ) -def get_skrebate_SURFstar_config_space(n_features=10): +def get_skrebate_SURFstar_config_space(n_features): return ConfigurationSpace( space = { 'n_features_to_select': Integer('n_features_to_select', bounds=(1, n_features), log=True), } ) -def get_skrebate_MultiSURF_config_space(n_features=10): +def get_skrebate_MultiSURF_config_space(n_features): return ConfigurationSpace( space = { 'n_features_to_select': Integer('n_features_to_select', bounds=(1, n_features), log=True), diff --git a/tpot2/config/regressors.py b/tpot2/config/regressors.py index 845f9ff1..e87e9eda 100644 --- a/tpot2/config/regressors.py +++ b/tpot2/config/regressors.py @@ -1,37 +1,21 @@ -from sklearn.linear_model import SGDRegressor -from sklearn.linear_model import LinearRegression -from sklearn.linear_model import Ridge -from sklearn.linear_model import Lasso -from sklearn.linear_model import ElasticNet -from sklearn.linear_model import Lars -from sklearn.linear_model import LassoLars, LassoLarsCV -from sklearn.linear_model import RidgeCV - - -from sklearn.svm import SVR -from sklearn.svm import LinearSVR - -from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor,RandomForestRegressor -from sklearn.ensemble import BaggingRegressor -from sklearn.ensemble import ExtraTreesRegressor -from sklearn.tree import DecisionTreeRegressor -from sklearn.neighbors import KNeighborsRegressor -from sklearn.linear_model import ElasticNetCV - -from xgboost import XGBRegressor -from functools import partial - - +import sklearn from ConfigSpace import ConfigurationSpace from ConfigSpace import ConfigurationSpace, Integer, Float, Categorical, Normal - - +from ConfigSpace import EqualsCondition, OrConjunction, NotEqualsCondition, InCondition +from ..search_spaces.nodes.estimator_node import NONE_SPECIAL_STRING, TRUE_SPECIAL_STRING, FALSE_SPECIAL_STRING +import numpy as np #TODO: fill in remaining #TODO check for places were we could use log scaling -def get_RandomForestRegressor_ConfigurationSpace(random_state=None): + +ElasticNetCV_configspace = { + "l1_ratio" : np.arange(0.0, 1.01, 0.05), +} + +def get_RandomForestRegressor_ConfigurationSpace(random_state): space = { 'n_estimators': 100, + 'criterion': Categorical("criterion", ['mse', 'mae', "friedman_mse"]), 'max_features': Float("max_features", bounds=(0.05, 1.0)), 'bootstrap': Categorical("bootstrap", [True, False]), 'min_samples_split': Integer("min_samples_split", bounds=(2, 21)), @@ -46,27 +30,49 @@ def get_RandomForestRegressor_ConfigurationSpace(random_state=None): ) -def get_SGDRegressor_ConfigurationSpace(random_state=None): +def get_SGDRegressor_ConfigurationSpace(random_state): space = { - 'loss': Categorical("loss", ['squared_error', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']), - 'penalty': 'elasticnet', - 'alpha': Float("alpha", bounds=(1e-5, 0.01), log=True), - 'learning_rate': Categorical("learning_rate", ['invscaling', 'constant']), - 'l1_ratio': Float("l1_ratio", bounds=(0.0, 1.0)), - 'eta0': Float("eta0", bounds=(0.01, 1.0)), - 'power_t': Float("power_t", bounds=(1e-5, 100.0), log=True), + 'alpha': Float("alpha", bounds=(1e-7, 1e-1), log=True), + 'average': Categorical("average", [True, False]), 'fit_intercept': Categorical("fit_intercept", [True]), } if random_state is not None: #This is required because configspace doesn't allow None as a value space['random_state'] = random_state - return ConfigurationSpace( + cs = ConfigurationSpace( space = space ) + l1_ratio = Float("l1_ratio", bounds=(1e-7, 1.0), log=True) + penalty = Categorical("penalty", ["l1", "l2", "elasticnet"]) + epsilon = Float("epsilon", bounds=(1e-5, 1e-1), log=True) + loss = Categorical("loss", ["squared_loss", "huber", "epsilon_insensitive", "squared_epsilon_insensitive",]) + eta0 = Float("eta0", bounds=(1e-7, 1e-1), log=True) + learning_rate = Categorical("learning_rate", ['optimal', 'invscaling', 'constant']) + power_t = Float("power_t", bounds=(1e-5, 1.0), log=True) + + elasticnet = EqualsCondition(l1_ratio, penalty, "elasticnet") + epsilon_condition = InCondition( + epsilon, + loss, + ["huber", "epsilon_insensitive", "squared_epsilon_insensitive"], + ) + + eta0_in_inv_con = InCondition(eta0, learning_rate, ["invscaling", "constant"]) + power_t_condition = EqualsCondition(power_t, learning_rate, "invscaling") + + cs.add_hyperparameters( + [l1_ratio, penalty, epsilon, loss, eta0, learning_rate, power_t] + ) + cs.add_conditions( + [elasticnet, epsilon_condition, power_t_condition, eta0_in_inv_con] + ) + + return cs -def get_Ridge_ConfigurationSpace(random_state=None): + +def get_Ridge_ConfigurationSpace(random_state): space = { 'alpha': Float("alpha", bounds=(0.0, 1.0)), 'fit_intercept': Categorical("fit_intercept", [True]), @@ -81,7 +87,7 @@ def get_Ridge_ConfigurationSpace(random_state=None): space = space ) -def get_Lasso_ConfigurationSpace(random_state=None): +def get_Lasso_ConfigurationSpace(random_state): space = { 'alpha': Float("alpha", bounds=(0.0, 1.0)), 'fit_intercept': Categorical("fit_intercept", [True]), @@ -95,7 +101,7 @@ def get_Lasso_ConfigurationSpace(random_state=None): space = space ) -def get_ElasticNet_ConfigurationSpace(random_state=None): +def get_ElasticNet_ConfigurationSpace(random_state): space = { 'alpha': Float("alpha", bounds=(0.0, 1.0)), 'l1_ratio': Float("l1_ratio", bounds=(0.0, 1.0)), @@ -109,7 +115,7 @@ def get_ElasticNet_ConfigurationSpace(random_state=None): ) -def get_Lars_ConfigurationSpace(random_state=None): +def get_Lars_ConfigurationSpace(random_state): space = { } @@ -138,7 +144,7 @@ def get_BayesianRidge_ConfigurationSpace(): ) -def get_LassoLars_ConfigurationSpace(random_state=None): +def get_LassoLars_ConfigurationSpace(random_state): space = { 'alpha': Float("alpha", bounds=(0.0, 1.0)), 'eps': Float("eps", bounds=(1e-5, 1e-1), log=True), @@ -151,15 +157,8 @@ def get_LassoLars_ConfigurationSpace(random_state=None): space = space ) -def get_LassoLarsCV_ConfigurationSpace(cv): - return ConfigurationSpace( - space = { - 'cv': cv, - } - ) - -def get_BaggingRegressor_ConfigurationSpace(random_state=None): +def get_BaggingRegressor_ConfigurationSpace(random_state): space = { 'max_samples': Float("max_samples", bounds=(0.05, 1.00)), 'max_features': Float("max_features", bounds=(0.05, 1.00)), @@ -178,19 +177,19 @@ def get_ARDRegression_ConfigurationSpace(): return ConfigurationSpace( space = { - 'alpha_1': Float("alpha_1", bounds=(1e-6, 1e-1), log=True), - 'alpha_2': Float("alpha_2", bounds=(1e-6, 1e-1), log=True), - 'lambda_1': Float("lambda_1", bounds=(1e-6, 1e-1), log=True), - 'lambda_2': Float("lambda_2", bounds=(1e-6, 1e-1), log=True), - 'threshold_lambda': Integer("threshold_lambda", bounds=(100, 1000)), + 'alpha_1': Float("alpha_1", bounds=(1e-10, 1e-3), log=True), + 'alpha_2': Float("alpha_2", bounds=(1e-10, 1e-3), log=True), + 'lambda_1': Float("lambda_1", bounds=(1e-10, 1e-3), log=True), + 'lambda_2': Float("lambda_2", bounds=(1e-10, 1e-3), log=True), + 'threshold_lambda': Integer("threshold_lambda", bounds=(1e3, 1e5)), } ) -def get_TheilSenRegressor_ConfigurationSpace(random_state=None): +def get_TheilSenRegressor_ConfigurationSpace(random_state): space = { - 'n_subsamples': Integer("n_subsamples", bounds=(10, 100)), - 'max_subpopulation': Integer("max_subpopulation", bounds=(100, 1000)), + 'n_subsamples': Integer("n_subsamples", bounds=(10, 10000)), + 'max_subpopulation': Integer("max_subpopulation", bounds=(10, 1000)), } if random_state is not None: #This is required because configspace doesn't allow None as a value @@ -201,21 +200,10 @@ def get_TheilSenRegressor_ConfigurationSpace(random_state=None): ) -def get_SVR_ConfigurationSpace(): - return ConfigurationSpace( - space = { - 'kernel': Categorical("kernel", ['poly', 'rbf', 'linear', 'sigmoid']), - 'C': Float("C", bounds=(1e-4, 25), log=True), - 'degree': Integer("degree", bounds=(1, 4)), - 'max_iter': 3000, - 'tol': 0.005, - } - ) - -def get_Perceptron_ConfigurationSpace(random_state=None): +def get_Perceptron_ConfigurationSpace(random_state): space = { - 'penalty': Categorical("penalty", [None, 'l2', 'l1', 'elasticnet']), + 'penalty': Categorical("penalty", [NONE_SPECIAL_STRING, 'l2', 'l1', 'elasticnet']), 'alpha': Float("alpha", bounds=(1e-5, 1e-1), log=True), 'l1_ratio': Float("l1_ratio", bounds=(0.0, 1.0)), 'learning_rate': Categorical("learning_rate", ['constant', 'optimal', 'invscaling']), @@ -229,36 +217,12 @@ def get_Perceptron_ConfigurationSpace(random_state=None): space = space ) -def get_MLPRegressor_ConfigurationSpace(random_state=None): - space = { - 'alpha': Float("alpha", bounds=(1e-4, 1e-1), log=True), - 'learning_rate_init': Float("learning_rate_init", bounds=(1e-3, 1.), log=True), - } - - if random_state is not None: #This is required because configspace doesn't allow None as a value - space['random_state'] = random_state - - return ConfigurationSpace( - space = space - ) - - -def get_GradientBoostingRegressor_ConfigurationSpace(random_state=None): - space = { - 'n_estimators': 100, - 'loss': Categorical("loss", ['ls', 'lad', 'huber', 'quantile']), - 'learning_rate': Float("learning_rate", bounds=(1e-4, 1), log=True), - 'max_depth': Integer("max_depth", bounds=(1, 11)), - 'min_samples_split': Integer("min_samples_split", bounds=(2, 21)), - 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 21)), - 'subsample': Float("subsample", bounds=(0.05, 1.00)), - 'max_features': Float("max_features", bounds=(0.05, 1.00)), - } -def get_DecisionTreeRegressor_ConfigurationSpace(random_state=None): +def get_DecisionTreeRegressor_ConfigurationSpace(n_features, random_state): space = { - 'max_depth': Integer("max_depth", bounds=(1, 11)), + 'criterion': Categorical("criterion", ['squared_error', 'friedman_mse', 'mae']), + 'max_depth': Integer("max_depth", bounds=(1, n_features*2)), 'min_samples_split': Integer("min_samples_split", bounds=(2, 21)), 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 21)), } @@ -268,21 +232,22 @@ def get_DecisionTreeRegressor_ConfigurationSpace(random_state=None): ) -def get_KNeighborsRegressor_ConfigurationSpace(n_samples=100): +def get_KNeighborsRegressor_ConfigurationSpace(n_samples): return ConfigurationSpace( space = { - 'n_neighbors': Integer("n_neighbors", bounds=(1, n_samples)), + 'n_neighbors': Integer("n_neighbors", bounds=(1, min(100,n_samples))), 'weights': Categorical("weights", ['uniform', 'distance']), 'p': Integer("p", bounds=(1, 3)), 'metric': Categorical("metric", ['minkowski', 'euclidean', 'manhattan']), } ) -def get_LinearSVR_ConfigurationSpace(random_state=None): + +def get_LinearSVR_ConfigurationSpace(random_state): space = { 'epsilon': Float("epsilon", bounds=(1e-4, 1.0), log=True), - 'C': Float("C", bounds=(1e-4, 25.0), log=True), - 'dual': Categorical("dual", [True, False]), + 'C': Float('C', (0.01, 1e5), log=True), + 'dual': "auto", 'loss': Categorical("loss", ['epsilon_insensitive', 'squared_epsilon_insensitive']), } @@ -293,14 +258,49 @@ def get_LinearSVR_ConfigurationSpace(random_state=None): space = space ) +#add coef0? +def get_SVR_ConfigurationSpace(): + space = { + 'epislon': Float("epsilon", bounds=(1e-4, 1.0), log=True), + 'shrinking': Categorical("shrinking", [True, False]), + 'C': Float('C', (0.01, 1e5), log=True), + 'max_iter': 3000, + 'tol': 0.005, + } + + cs = ConfigurationSpace( + space = space + ) + + kernel = Categorical("kernel", ['poly', 'rbf', 'linear', 'sigmoid']) + degree = Integer("degree", bounds=(1, 5)) + gamma = Float("gamma", bounds=(1e-5, 10.0), log=True) + coef0 = Float("coef0", bounds=(-1, 1)) + + + degree_condition = EqualsCondition(degree, kernel, 'poly') + gamma_condition = InCondition(gamma, kernel, ['poly', 'rbf',]) + coef0_condition = InCondition(coef0, kernel, ['poly', 'sigmoid']) + + cs.add_hyperparameters([kernel, degree, gamma, coef0]) + cs.add_conditions([degree_condition,gamma_condition]) + + return cs + + -def get_XGBRegressor_ConfigurationSpace(random_state=None): + +def get_XGBRegressor_ConfigurationSpace(random_state): space = { + 'n_estimators': 100, 'learning_rate': Float("learning_rate", bounds=(1e-3, 1), log=True), - 'subsample': Float("subsample", bounds=(0.05, 1.0)), + 'subsample': Float("subsample", bounds=(0.5, 1.0)), 'min_child_weight': Integer("min_child_weight", bounds=(1, 21)), - 'n_estimators': 100, - 'max_depth': Integer("max_depth", bounds=(1, 11)), + 'gamma': Float("gamma", bounds=(1e-4, 20), log=True), + 'max_depth': Integer("max_depth", bounds=(3, 18)), + 'reg_alpha': Float("reg_alpha", bounds=(1e-4, 100), log=True), + 'reg_lambda': Float("reg_lambda", bounds=(1e-4, 1), log=True), + 'n_jobs': 1, 'nthread': 1, 'verbosity': 0, 'objective': 'reg:squarederror', @@ -314,11 +314,11 @@ def get_XGBRegressor_ConfigurationSpace(random_state=None): ) -def get_AdaBoostRegressor_ConfigurationSpace(random_state=None): +def get_AdaBoostRegressor_ConfigurationSpace(random_state): space = { - 'n_estimators': Integer("n_estimators", bounds=(50, 100)), - 'learning_rate': Float("learning_rate", bounds=(1e-3, 1.0), log=True), + 'n_estimators': Integer("n_estimators", bounds=(50, 500)), + 'learning_rate': Float("learning_rate", bounds=(1e-3, 2.0), log=True), 'loss': Categorical("loss", ['linear', 'square', 'exponential']), } @@ -330,9 +330,10 @@ def get_AdaBoostRegressor_ConfigurationSpace(random_state=None): space = space ) -def get_ExtraTreesRegressor_ConfigurationSpace(random_state=None): +def get_ExtraTreesRegressor_ConfigurationSpace(random_state): space = { 'n_estimators': 100, + 'criterion': Categorical("criterion", ["squared_error", "friedman_mse", "mae"]), 'max_features': Float("max_features", bounds=(0.05, 1.0)), 'min_samples_split': Integer("min_samples_split", bounds=(2, 21)), 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 21)), @@ -344,4 +345,168 @@ def get_ExtraTreesRegressor_ConfigurationSpace(random_state=None): return ConfigurationSpace( space = space - ) \ No newline at end of file + ) +### + +def get_GaussianProcessRegressor_ConfigurationSpace(n_features, random_state): + space = { + 'n_features': n_features, + 'alpha': Float("alpha", bounds=(1e-14, 1.0), log=True), + 'thetaL': Float("thetaL", bounds=(1e-10, 1e-3), log=True), + 'thetaU': Float("thetaU", bounds=(1.0, 100000), log=True), + } + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + return ConfigurationSpace( + space = space + ) + +def GaussianProcessRegressor_hyperparameter_parser(params): + kernel = sklearn.gaussian_process.kernels.RBF( + length_scale = [1.0]*params['n_features'], + length_scale_bounds=[(params['thetaL'], params['thetaU'])] * params['n_features'], + ) + final_params = {"kernel": kernel, + "alpha": params['alpha'], + "n_restarts_optimizer": 10, + "optimizer": "fmin_l_bfgs_b", + "normalize_y": True, + "copy_X_train": True, + } + + if "random_state" in params: + final_params['random_state'] = params['random_state'] + + return final_params + +### +def get_GradientBoostingRegressor_ConfigurationSpace(n_features, random_state): + early_stop = Categorical("early_stop", ["off", "valid", "train"]) + n_iter_no_change = Integer("n_iter_no_change",bounds=(1,20)) + validation_fraction = Float("validation_fraction", bounds=(0.01, 0.4)) + + n_iter_no_change_cond = InCondition(n_iter_no_change, early_stop, ["valid", "train"] ) + validation_fraction_cond = EqualsCondition(validation_fraction, early_stop, "valid") + + space = { + 'loss': Categorical("loss", ['log_loss', 'exponential']), + 'learning_rate': Float("learning_rate", bounds=(1e-3, 1), log=True), + 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 200)), + 'min_samples_split': Integer("min_samples_split", bounds=(2, 20)), + 'subsample': Float("subsample", bounds=(0.1, 1.0)), + 'max_features': Integer("max_features", bounds=(1, max(1, n_features))), + 'max_leaf_nodes': Integer("max_leaf_nodes", bounds=(3, 2047)), + 'max_depth': Integer("max_depth", bounds=(1, 2*n_features)), + 'tol': 1e-4, + } + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + cs = ConfigurationSpace( + space = space + ) + cs.add_hyperparameters([n_iter_no_change, validation_fraction, early_stop ]) + cs.add_conditions([validation_fraction_cond, n_iter_no_change_cond]) + return cs + +#only difference is l2_regularization +def get_HistGradientBoostingRegressor_ConfigurationSpace(n_features, random_state): + early_stop = Categorical("early_stop", ["off", "valid", "train"]) + n_iter_no_change = Integer("n_iter_no_change",bounds=(1,20)) + validation_fraction = Float("validation_fraction", bounds=(0.01, 0.4)) + + n_iter_no_change_cond = InCondition(n_iter_no_change, early_stop, ["valid", "train"] ) + validation_fraction_cond = EqualsCondition(validation_fraction, early_stop, "valid") + + space = { + 'loss': Categorical("loss", ['log_loss', 'exponential']), + 'learning_rate': Float("learning_rate", bounds=(1e-3, 1), log=True), + 'min_samples_leaf': Integer("min_samples_leaf", bounds=(1, 200)), + 'max_features': Float("max_features", bounds=(0.1,1.0)), + 'max_leaf_nodes': Integer("max_leaf_nodes", bounds=(3, 2047)), + 'max_depth': Integer("max_depth", bounds=(1, 2*n_features)), + 'l2_regularization': Float("l2_regularization", bounds=(1e-10, 1), log=True), + 'tol': 1e-4, + } + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + cs = ConfigurationSpace( + space = space + ) + cs.add_hyperparameters([n_iter_no_change, validation_fraction, early_stop ]) + cs.add_conditions([validation_fraction_cond, n_iter_no_change_cond]) + + return cs + +def GradientBoostingRegressor_hyperparameter_parser(params): + + final_params = { + 'loss': params['loss'], + 'learning_rate': params['learning_rate'], + 'min_samples_leaf': params['min_samples_leaf'], + 'max_features': params['max_features'], + 'max_leaf_nodes': params['max_leaf_nodes'], + 'max_depth': params['max_depth'], + 'tol': params['tol'], + } + + if "l2_regularization" in params: + final_params['l2_regularization'] = params['l2_regularization'] + + if params['early_stop'] == 'off': + final_params['n_iter_no_change'] = None + final_params['validation_fraction'] = None + elif params['early_stop'] == 'valid': + final_params['n_iter_no_change'] = params['n_iter_no_change'] + final_params['validation_fraction'] = params['validation_fraction'] + elif params['early_stop'] == 'train': + final_params['n_iter_no_change'] = params['n_iter_no_change'] + final_params['validation_fraction'] = None + + + return final_params + + + +### + +def get_MLPRegressor_ConfigurationSpace(random_state): + space = {"n_iter_no_change":32} + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + cs = ConfigurationSpace( + space = space + ) + + n_hidden_layers = Integer("n_hidden_layers", bounds=(1, 3)) + n_nodes_per_layer = Integer("n_nodes_per_layer", bounds=(16, 512)) + activation = Categorical("activation", ['tanh', 'relu']) + alpha = Float("alpha", bounds=(1e-7, 1e-1), log=True) + learning_rate = Float("learning_rate", bounds=(1e-4, 1e-1), log=True) + early_stopping = Categorical("early_stopping", [True,False]) + + cs.add_hyperparameters([n_hidden_layers, n_nodes_per_layer, activation, alpha, learning_rate, early_stopping]) + + return cs + +def MLPRegressor_hyperparameter_parser(params): + hyperparameters = { + 'n_iter_no_change': params['n_iter_no_change'], + 'hidden_layer_sizes' : [params['n_nodes_per_layer']]*params['n_hidden_layers'], + 'activation': params['activation'], + 'alpha': params['alpha'], + 'learning_rate': params['learning_rate'], + 'early_stopping': params['early_stopping'], + } + return hyperparameters + + + + \ No newline at end of file diff --git a/tpot2/config/regressors_sklearnex.py b/tpot2/config/regressors_sklearnex.py index 3473de56..7346a7c3 100644 --- a/tpot2/config/regressors_sklearnex.py +++ b/tpot2/config/regressors_sklearnex.py @@ -3,7 +3,7 @@ -def get_RandomForestRegressor_ConfigurationSpace(random_state=None): +def get_RandomForestRegressor_ConfigurationSpace(random_state): space = { 'n_estimators': 100, 'max_features': Float("max_features", bounds=(0.05, 1.0)), @@ -20,7 +20,7 @@ def get_RandomForestRegressor_ConfigurationSpace(random_state=None): ) -def get_KNeighborsRegressor_ConfigurationSpace(n_samples=100): +def get_KNeighborsRegressor_ConfigurationSpace(n_samples): return ConfigurationSpace( space = { 'n_neighbors': Integer("n_neighbors", bounds=(1, max(n_samples, 100))), @@ -29,7 +29,7 @@ def get_KNeighborsRegressor_ConfigurationSpace(n_samples=100): ) -def get_Ridge_ConfigurationSpace(random_state=None): +def get_Ridge_ConfigurationSpace(random_state): space = { 'alpha': Float("alpha", bounds=(0.0, 1.0)), 'fit_intercept': Categorical("fit_intercept", [True]), @@ -43,7 +43,7 @@ def get_Ridge_ConfigurationSpace(random_state=None): space = space ) -def get_Lasso_ConfigurationSpace(random_state=None): +def get_Lasso_ConfigurationSpace(random_state): space = { 'alpha': Float("alpha", bounds=(0.0, 1.0)), 'fit_intercept': Categorical("fit_intercept", [True]), @@ -60,7 +60,7 @@ def get_Lasso_ConfigurationSpace(random_state=None): space = space ) -def get_ElasticNet_ConfigurationSpace(random_state=None): +def get_ElasticNet_ConfigurationSpace(random_state): space = { 'alpha': Float("alpha", bounds=(0.0, 1.0)), 'l1_ratio': Float("l1_ratio", bounds=(0.0, 1.0)), @@ -74,7 +74,7 @@ def get_ElasticNet_ConfigurationSpace(random_state=None): ) -def get_SVR_ConfigurationSpace(random_state=None): +def get_SVR_ConfigurationSpace(random_state): space = { 'kernel': Categorical("kernel", ['poly', 'rbf', 'linear', 'sigmoid']), 'C': Float("C", bounds=(1e-4, 25), log=True), @@ -90,7 +90,7 @@ def get_SVR_ConfigurationSpace(random_state=None): space = space ) -def get_NuSVR_ConfigurationSpace(random_state=None): +def get_NuSVR_ConfigurationSpace(random_state): space = { 'nu': Float("nu", bounds=(0.05, 1.0)), 'kernel': Categorical("kernel", ['poly', 'rbf', 'linear', 'sigmoid']), diff --git a/tpot2/config/special_configs.py b/tpot2/config/special_configs.py index 38545f6c..5d22dfad 100644 --- a/tpot2/config/special_configs.py +++ b/tpot2/config/special_configs.py @@ -30,54 +30,3 @@ def get_ArithmeticTransformer_ConfigurationSpace(): # MinTransformer: {} # MaxTransformer: {} - - -def get_FeatureSetSelector_ConfigurationSpace(names_list = None, subset_dict=None): - return ConfigurationSpace( - space = { - 'name': Categorical("name", names_list), - } - ) - - -def make_FSS_config_dictionary(subsets=None, n_features=None, feature_names=None): - """Create the search space of parameters for FeatureSetSelector. - - Parameters - ---------- - subsets: Sets the subsets to select from. - - str : If a string, it is assumed to be a path to a csv file with the subsets. - The first column is assumed to be the name of the subset and the remaining columns are the features in the subset. - - list or np.ndarray : If a list or np.ndarray, it is assumed to be a list of subsets. - - n_features: int the number of features in the dataset. - If subsets is None, each column will be treated as a subset. One column will be selected per subset. - """ - - #require at least of of the parameters - if subsets is None and n_features is None: - raise ValueError('At least one of the parameters must be provided') - - if isinstance(subsets, str): - df = pd.read_csv(subsets,header=None,index_col=0) - df['features'] = df.apply(lambda x: list([x[c] for c in df.columns]),axis=1) - subset_dict = {} - for row in df.index: - subset_dict[row] = df.loc[row]['features'] - elif isinstance(subsets, dict): - subset_dict = subsets - elif isinstance(subsets, list) or isinstance(subsets, np.ndarray): - subset_dict = {str(i):subsets[i] for i in range(len(subsets))} - else: - if feature_names is None: - subset_dict = {str(i):i for i in range(n_features)} - else: - subset_dict = {str(i):feature_names[i] for i in range(len(feature_names))} - - names_list = list(subset_dict.keys()) - - return ConfigurationSpace({ - 'name': Categorical("name", names_list), - 'subset_dict': Categorical("subset", subset_dict), - }) - diff --git a/tpot2/config/tests/__init__.py b/tpot2/config/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tpot2/config/tests/test_get_configspace.py b/tpot2/config/tests/test_get_configspace.py new file mode 100644 index 00000000..a2ebcb59 --- /dev/null +++ b/tpot2/config/tests/test_get_configspace.py @@ -0,0 +1,26 @@ +import pytest +import tpot2 +from sklearn.datasets import load_iris +import random +import sklearn + +import tpot2.config + +from ..get_configspace import STRING_TO_CLASS + +def test_loop_through_all_hyperparameters(): + + n_classes=3 + n_samples=100 + n_features=100 + random_state=None + + for class_name, _ in STRING_TO_CLASS.items(): + estnode_gen = tpot2.config.get_search_space(class_name, n_classes=n_classes, n_samples=n_samples, n_features=n_features, random_state=random_state) + + #generate 10 random hyperparameters and make sure they are all valid + for i in range(10): + estnode = estnode_gen.generate() + est = estnode.export_pipeline() + + \ No newline at end of file diff --git a/tpot2/config/transformers.py b/tpot2/config/transformers.py index f74d5e18..04180ac4 100644 --- a/tpot2/config/transformers.py +++ b/tpot2/config/transformers.py @@ -18,6 +18,13 @@ ZeroCount_configspace = {} +PolynomialFeatures_configspace = ConfigurationSpace( + space = { + 'degree': Integer('degree', bounds=(2, 3)), + 'interaction_only': Categorical('interaction_only', [True, False]), + } +) + OneHotEncoder_configspace = {} #TODO include the parameter for max unique values def get_FastICA_configspace(n_features=100, random_state=None): @@ -76,3 +83,31 @@ def get_RBFSampler_configspace(n_features=100, random_state=None): space = space ) + + +def get_QuantileTransformer_configspace(random_state=None): + + space = { + 'n_quantiles': Integer('n_quantiles', bounds=(10, 2000)), + 'output_distribution': Categorical('output_distribution', ['uniform', 'normal']), + } + + if random_state is not None: #This is required because configspace doesn't allow None as a value + space['random_state'] = random_state + + return ConfigurationSpace( + space = space + + ) + + + +### ROBUST SCALER + +RobustScaler_configspace = ConfigurationSpace({ + "q_min": Float("q_min", bounds=(0.001, 0.3)), + "q_max": Float("q_max", bounds=(0.7, 0.999)), + }) + +def robust_scaler_hyperparameter_parser(params): + return {"quantile_range": (params["q_min"], params["q_max"])} \ No newline at end of file diff --git a/tpot2/search_spaces/nodes/estimator_node.py b/tpot2/search_spaces/nodes/estimator_node.py index 6e084b59..0ec71e98 100644 --- a/tpot2/search_spaces/nodes/estimator_node.py +++ b/tpot2/search_spaces/nodes/estimator_node.py @@ -1,22 +1,45 @@ # try https://automl.github.io/ConfigSpace/main/api/hyperparameters.html -import tpot2 + import numpy as np -import pandas as pd -import sklearn -from tpot2 import config -from typing import Generator, List, Tuple, Union -import random from ..base import SklearnIndividual, SklearnIndividualGenerator from ConfigSpace import ConfigurationSpace +from typing import final + +NONE_SPECIAL_STRING = "" +TRUE_SPECIAL_STRING = "" +FALSE_SPECIAL_STRING = "" + + +def default_hyperparameter_parser(params:dict) -> dict: + return params + class EstimatorNodeIndividual(SklearnIndividual): + """ + Note that ConfigurationSpace does not support None as a parameter. Instead, use the special string "". TPOT will automatically replace instances of this string with the Python None. + + Parameters + ---------- + method : type + The class of the estimator to be used + + space : ConfigurationSpace|dict + The hyperparameter space to be used. If a dict is passed, hyperparameters are fixed and not learned. + + """ def __init__(self, method: type, space: ConfigurationSpace|dict, #TODO If a dict is passed, hyperparameters are fixed and not learned. Is this confusing? Should we make a second node type? + hyperparameter_parser: callable = None, rng=None) -> None: super().__init__() self.method = method self.space = space + if hyperparameter_parser is None: + self.hyperparameter_parser = default_hyperparameter_parser + else: + self.hyperparameter_parser = hyperparameter_parser + if isinstance(space, dict): self.hyperparameters = space else: @@ -24,6 +47,8 @@ def __init__(self, method: type, self.space.seed(rng.integers(0, 2**32)) self.hyperparameters = self.space.sample_configuration().get_dictionary() + self.check_hyperparameters_for_None() + def mutate(self, rng=None): if isinstance(self.space, dict): return False @@ -32,6 +57,7 @@ def mutate(self, rng=None): self.space.seed(rng.integers(0, 2**32)) self.hyperparameters = self.space.sample_configuration().get_dictionary() + self.check_hyperparameters_for_None() return True def crossover(self, other, rng=None): @@ -48,17 +74,34 @@ def crossover(self, other, rng=None): if hyperparameter in other.hyperparameters: self.hyperparameters[hyperparameter] = other.hyperparameters[hyperparameter] + self.check_hyperparameters_for_None() + + return True + + def check_hyperparameters_for_None(self): + for key, value in self.hyperparameters.items(): + #if string + if isinstance(value, str): + if value == NONE_SPECIAL_STRING: + self.hyperparameters[key] = None + elif value == TRUE_SPECIAL_STRING: + self.hyperparameters[key] = True + elif value == FALSE_SPECIAL_STRING: + self.hyperparameters[key] = False + + @final #this method should not be overridden, instead override hyperparameter_parser def export_pipeline(self, **kwargs): - return self.method(**self.hyperparameters) + return self.method(**self.hyperparameter_parser(self.hyperparameters)) def unique_id(self): #return a dictionary of the method and the hyperparameters return (self.method, self.hyperparameters) class EstimatorNode(SklearnIndividualGenerator): - def __init__(self, method, space): + def __init__(self, method, space, hyperparameter_parser=default_hyperparameter_parser): self.method = method self.space = space + self.hyperparameter_parser = hyperparameter_parser def generate(self, rng=None): - return EstimatorNodeIndividual(self.method, self.space) \ No newline at end of file + return EstimatorNodeIndividual(self.method, self.space, hyperparameter_parser=self.hyperparameter_parser, rng=rng) \ No newline at end of file From a66ff10bf5f36ce7ebc64e44c72bb44b39bf6103 Mon Sep 17 00:00:00 2001 From: perib Date: Wed, 17 Apr 2024 21:47:53 -0700 Subject: [PATCH 5/6] edits --- tpot2/config/get_configspace.py | 7 +- tpot2/config/tests/test.ipynb | 264 +++++++++++++++++++++ tpot2/config/tests/test_get_configspace.py | 3 +- tpot2/tpot_estimator/tests/__init__.py | 0 4 files changed, 271 insertions(+), 3 deletions(-) create mode 100644 tpot2/config/tests/test.ipynb create mode 100644 tpot2/tpot_estimator/tests/__init__.py diff --git a/tpot2/config/get_configspace.py b/tpot2/config/get_configspace.py index cf75cd47..5706c4f6 100644 --- a/tpot2/config/get_configspace.py +++ b/tpot2/config/get_configspace.py @@ -156,7 +156,12 @@ "selectors_regression": ["SelectFwe", "SelectPercentile", "VarianceThreshold", "RFE_regression", "SelectFromModel_regression"], "classifiers" : ['AdaBoostClassifier', 'BernoulliNB', 'DecisionTreeClassifier', 'ExtraTreesClassifier', 'GaussianNB', 'HistGradientBoostingClassifier', 'KNeighborsClassifier', 'LogisticRegression', "LinearSVC", "SVC", 'MLPClassifier', 'MultinomialNB', "PassiveAggressiveClassifier", "QuadraticDiscriminantAnalysis", 'RandomForestClassifier', 'SGDClassifier', 'XGBClassifier'], "regressors" : ['AdaBoostRegressor', "ARDRegression", 'DecisionTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor', 'HistGradientBoostingRegressor', 'KNeighborsRegressor', 'LinearDiscriminantAnalysis', 'LinearSVR', "MLPRegressor", 'RandomForestRegressor', 'SGDRegressor', 'SVR', 'XGBRegressor'], - "transformers": ["Binarizer", "Normalizer", "PCA", "ZeroCount", "OneHotEncoder", "FastICA", "FeatureAgglomeration", "Nystroem", "RBFSampler", "QuantileTransformer", "PowerTransformer"], + + + "transformers": ["Binarizer", "PCA", "ZeroCount", "ColumnOneHotEncoder", "FastICA", "FeatureAgglomeration", "Nystroem", "RBFSampler", "QuantileTransformer", "PowerTransformer"], + "scalers": ["MinMaxScaler", "RobustScaler", "StandardScaler", "MaxAbsScaler", "Normalizer", ], + "all_transformers" : ["transformers", "scalers"], + "arithmatic": ["AddTransformer", "mul_neg_1_Transformer", "MulTransformer", "SafeReciprocalTransformer", "EQTransformer", "NETransformer", "GETransformer", "GTTransformer", "LETransformer", "LTTransformer", "MinTransformer", "MaxTransformer"], "imputers": [], "skrebate": ["ReliefF", "SURF", "SURFstar", "MultiSURF"], diff --git a/tpot2/config/tests/test.ipynb b/tpot2/config/tests/test.ipynb new file mode 100644 index 00000000..97580f08 --- /dev/null +++ b/tpot2/config/tests/test.ipynb @@ -0,0 +1,264 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import importlib.util\n", + "import sys\n", + "import numpy as np\n", + "import warnings\n", + "\n", + "\n", + "\n", + "from ConfigSpace import ConfigurationSpace\n", + "from ConfigSpace import ConfigurationSpace, Integer, Float, Categorical, Normal\n", + "\n", + "#autoqtl_builtins\n", + "from tpot2.builtin_modules import genetic_encoders\n", + "from tpot2.builtin_modules import feature_encoding_frequency_selector\n", + "\n", + "from sklearn.linear_model import SGDClassifier\n", + "from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier\n", + "from sklearn.neural_network import MLPClassifier\n", + "from sklearn.tree import DecisionTreeClassifier\n", + "from xgboost import XGBClassifier\n", + "from sklearn.neighbors import KNeighborsClassifier\n", + "from sklearn.svm import SVC\n", + "from sklearn.linear_model import LogisticRegression\n", + "from lightgbm import LGBMClassifier\n", + "from sklearn.svm import LinearSVC\n", + "from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB\n", + "from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier\n", + "\n", + "\n", + "from tpot2.builtin_modules import ZeroCount, OneHotEncoder, ColumnOneHotEncoder\n", + "from sklearn.preprocessing import Binarizer\n", + "from sklearn.decomposition import FastICA\n", + "from sklearn.cluster import FeatureAgglomeration\n", + "from sklearn.preprocessing import MaxAbsScaler\n", + "from sklearn.preprocessing import MinMaxScaler\n", + "from sklearn.preprocessing import Normalizer\n", + "from sklearn.kernel_approximation import Nystroem\n", + "from sklearn.decomposition import PCA\n", + "from sklearn.preprocessing import PolynomialFeatures\n", + "from sklearn.kernel_approximation import RBFSampler\n", + "from sklearn.preprocessing import RobustScaler\n", + "from sklearn.preprocessing import StandardScaler\n", + "from sklearn.preprocessing import PowerTransformer, QuantileTransformer\n", + "\n", + "\n", + "from sklearn.feature_selection import SelectFwe\n", + "from sklearn.feature_selection import SelectPercentile\n", + "from sklearn.feature_selection import VarianceThreshold\n", + "from sklearn.feature_selection import RFE\n", + "from sklearn.feature_selection import SelectFromModel\n", + "\n", + "import sklearn.feature_selection\n", + "\n", + "#TODO create a selectomixin using these?\n", + "from sklearn.feature_selection import f_classif\n", + "from sklearn.feature_selection import f_regression\n", + "\n", + "\n", + "from sklearn.linear_model import SGDRegressor\n", + "from sklearn.linear_model import LinearRegression\n", + "from sklearn.linear_model import Ridge\n", + "from sklearn.linear_model import Lasso\n", + "from sklearn.linear_model import ElasticNet\n", + "from sklearn.linear_model import Lars\n", + "from sklearn.linear_model import LassoLars, LassoLarsCV\n", + "from sklearn.linear_model import RidgeCV\n", + "\n", + "from sklearn.svm import SVR, SVC\n", + "from sklearn.svm import LinearSVR, LinearSVC\n", + "\n", + "from sklearn.ensemble import AdaBoostRegressor, AdaBoostClassifier, GradientBoostingRegressor,RandomForestRegressor\n", + "from sklearn.ensemble import BaggingRegressor\n", + "from sklearn.ensemble import ExtraTreesRegressor\n", + "from sklearn.ensemble import HistGradientBoostingClassifier, HistGradientBoostingRegressor\n", + "from sklearn.tree import DecisionTreeRegressor\n", + "from sklearn.neighbors import KNeighborsRegressor\n", + "from sklearn.linear_model import ElasticNetCV\n", + "\n", + "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n", + "\n", + "\n", + "from sklearn.gaussian_process import GaussianProcessRegressor\n", + "\n", + "from xgboost import XGBRegressor\n", + "\n", + "\n", + "from tpot2.builtin_modules import AddTransformer, mul_neg_1_Transformer, MulTransformer, SafeReciprocalTransformer, EQTransformer, NETransformer, GETransformer, GTTransformer, LETransformer, LTTransformer, MinTransformer, MaxTransformer, ZeroTransformer, OneTransformer, NTransformer\n", + "\n", + "\n", + "#MDR\n", + "\n", + "\n", + "all_methods = [SGDClassifier, RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier, MLPClassifier, DecisionTreeClassifier, XGBClassifier, KNeighborsClassifier, SVC, LogisticRegression, LGBMClassifier, LinearSVC, GaussianNB, BernoulliNB, MultinomialNB, ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, BaggingRegressor, DecisionTreeRegressor, KNeighborsRegressor, XGBRegressor, ZeroCount, OneHotEncoder, ColumnOneHotEncoder, Binarizer, FastICA, FeatureAgglomeration, MaxAbsScaler, MinMaxScaler, Normalizer, Nystroem, PCA, PolynomialFeatures, RBFSampler, RobustScaler, StandardScaler, SelectFwe, SelectPercentile, VarianceThreshold, SGDRegressor, LinearRegression, Ridge, Lasso, ElasticNet, Lars, LassoLars, LassoLarsCV, RidgeCV, SVR, LinearSVR, AdaBoostRegressor, GradientBoostingRegressor, RandomForestRegressor, BaggingRegressor, ExtraTreesRegressor, DecisionTreeRegressor, KNeighborsRegressor, ElasticNetCV,\n", + " AdaBoostClassifier,\n", + " GaussianProcessRegressor, HistGradientBoostingClassifier, HistGradientBoostingRegressor,\n", + " AddTransformer, mul_neg_1_Transformer, MulTransformer, SafeReciprocalTransformer, EQTransformer, NETransformer, GETransformer, GTTransformer, LETransformer, LTTransformer, MinTransformer, MaxTransformer, ZeroTransformer, OneTransformer, NTransformer,\n", + " PowerTransformer, QuantileTransformer,\n", + " ]\n", + "\n", + "\n", + "#if mdr is installed\n", + "if 'mdr' in sys.modules:\n", + " from mdr import MDR, ContinuousMDR\n", + " all_methods.append(MDR)\n", + " all_methods.append(ContinuousMDR)\n", + "\n", + "if 'skrebate' in sys.modules:\n", + " from skrebate import ReliefF, SURF, SURFstar, MultiSURF\n", + " all_methods.append(ReliefF)\n", + " all_methods.append(SURF)\n", + " all_methods.append(SURFstar)\n", + " all_methods.append(MultiSURF)\n", + "\n", + "if 'sklearnex' in sys.modules:\n", + " from sklearnex.linear_model import LinearRegression\n", + " from sklearnex.linear_model import Ridge\n", + " from sklearnex.linear_model import Lasso\n", + " from sklearnex.linear_model import ElasticNet\n", + " from sklearnex.svm import SVR\n", + " from sklearnex.svm import NuSVR\n", + " from sklearnex.ensemble import RandomForestRegressor\n", + " from sklearnex.neighbors import KNeighborsRegressor\n", + "\n", + " from sklearnex.ensemble import RandomForestClassifier\n", + " from sklearnex.neighbors import KNeighborsClassifier\n", + " from sklearnex.svm import SVC\n", + " from sklearnex.svm import NuSVC\n", + " from sklearnex.linear_model import LogisticRegression\n", + "\n", + "\n", + " all_methods.append(LinearRegression)\n", + " all_methods.append(Ridge)\n", + " all_methods.append(Lasso)\n", + " all_methods.append(ElasticNet)\n", + " all_methods.append(SVR)\n", + " all_methods.append(NuSVR)\n", + " all_methods.append(RandomForestRegressor)\n", + " all_methods.append(KNeighborsRegressor)\n", + " KNeighborsClassifier\n", + " all_methods.append(RandomForestClassifier)\n", + " all_methods.append(KNeighborsClassifier)\n", + " all_methods.append(SVC)\n", + " all_methods.append(NuSVC)\n", + " all_methods.append(LogisticRegression)\n", + "\n", + "\n", + "STRING_TO_CLASS = {\n", + " t.__name__: t for t in all_methods\n", + "}\n", + "\n", + "\n", + "from sklearn.linear_model import PassiveAggressiveClassifier\n", + "from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\n", + "from sklearn.linear_model import ARDRegression\n", + "from sklearn.gaussian_process import GaussianProcessRegressor\n", + "\n", + "GROUPNAMES = {\n", + " \"selectors\": [\"SelectFwe\", \"SelectPercentile\", \"VarianceThreshold\",],\n", + " \"selectors_classification\": [\"SelectFwe\", \"SelectPercentile\", \"VarianceThreshold\", \"RFE_classification\", \"SelectFromModel_classification\"],\n", + " \"selectors_regression\": [\"SelectFwe\", \"SelectPercentile\", \"VarianceThreshold\", \"RFE_regression\", \"SelectFromModel_regression\"],\n", + " \"classifiers\" : ['AdaBoostClassifier', 'BernoulliNB', 'DecisionTreeClassifier', 'ExtraTreesClassifier', 'GaussianNB', 'HistGradientBoostingClassifier', 'KNeighborsClassifier', 'LogisticRegression', \"LinearSVC\", \"SVC\", 'MLPClassifier', 'MultinomialNB', \"PassiveAggressiveClassifier\", \"QuadraticDiscriminantAnalysis\", 'RandomForestClassifier', 'SGDClassifier', 'XGBClassifier'],\n", + " \"regressors\" : ['AdaBoostRegressor', \"ARDRegression\", 'DecisionTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor', 'HistGradientBoostingRegressor', 'KNeighborsRegressor', 'LinearDiscriminantAnalysis', 'LinearSVR', \"MLPRegressor\", 'RandomForestRegressor', 'SGDRegressor', 'SVR', 'XGBRegressor'],\n", + " \"transformers\": [\"Binarizer\", \"Normalizer\", \"PCA\", \"ZeroCount\", \"OneHotEncoder\", \"FastICA\", \"FeatureAgglomeration\", \"Nystroem\", \"RBFSampler\", \"QuantileTransformer\", \"PowerTransformer\"],\n", + " \"arithmatic\": [\"AddTransformer\", \"mul_neg_1_Transformer\", \"MulTransformer\", \"SafeReciprocalTransformer\", \"EQTransformer\", \"NETransformer\", \"GETransformer\", \"GTTransformer\", \"LETransformer\", \"LTTransformer\", \"MinTransformer\", \"MaxTransformer\"],\n", + " \"imputers\": [],\n", + " \"skrebate\": [\"ReliefF\", \"SURF\", \"SURFstar\", \"MultiSURF\"],\n", + " \"genetic_encoders\": [\"DominantEncoder\", \"RecessiveEncoder\", \"HeterosisEncoder\", \"UnderDominanceEncoder\", \"OverDominanceEncoder\"],\n", + "\n", + " \"classifiers_sklearnex\" : [\"RandomForestClassifier_sklearnex\", \"LogisticRegression_sklearnex\", \"KNeighborsClassifier_sklearnex\", \"SVC_sklearnex\",\"NuSVC_sklearnex\"],\n", + " \"regressors_sklearnex\" : [\"LinearRegression_sklearnex\", \"Ridge_sklearnex\", \"Lasso_sklearnex\", \"ElasticNet_sklearnex\", \"SVR_sklearnex\", \"NuSVR_sklearnex\", \"RandomForestRegressor_sklearnex\", \"KNeighborsRegressor_sklearnex\"],\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "ename": "TypeError", + "evalue": "RFE.__init__() missing 1 required positional argument: 'estimator'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[2], line 30\u001b[0m\n\u001b[1;32m 26\u001b[0m estnode \u001b[38;5;241m=\u001b[39m estnode_gen\u001b[38;5;241m.\u001b[39mgenerate()\n\u001b[1;32m 27\u001b[0m est \u001b[38;5;241m=\u001b[39m estnode\u001b[38;5;241m.\u001b[39mexport_pipeline()\n\u001b[0;32m---> 30\u001b[0m \u001b[43mtest_loop_through_all_hyperparameters\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "Cell \u001b[0;32mIn[2], line 27\u001b[0m, in \u001b[0;36mtest_loop_through_all_hyperparameters\u001b[0;34m()\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m1\u001b[39m):\n\u001b[1;32m 26\u001b[0m estnode \u001b[38;5;241m=\u001b[39m estnode_gen\u001b[38;5;241m.\u001b[39mgenerate()\n\u001b[0;32m---> 27\u001b[0m est \u001b[38;5;241m=\u001b[39m \u001b[43mestnode\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mexport_pipeline\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/common/Projects/TPOT_Dev/tpot2/tpot2/search_spaces/nodes/estimator_node.py:92\u001b[0m, in \u001b[0;36mEstimatorNodeIndividual.export_pipeline\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[38;5;129m@final\u001b[39m \u001b[38;5;66;03m#this method should not be overridden, instead override hyperparameter_parser\u001b[39;00m\n\u001b[1;32m 91\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mexport_pipeline\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[0;32m---> 92\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmethod\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhyperparameter_parser\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mhyperparameters\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n", + "\u001b[0;31mTypeError\u001b[0m: RFE.__init__() missing 1 required positional argument: 'estimator'" + ] + } + ], + "source": [ + "import pytest\n", + "import tpot2\n", + "from sklearn.datasets import load_iris\n", + "import random\n", + "import sklearn\n", + "\n", + "import tpot2.config\n", + "\n", + "import importlib.util\n", + "import sys\n", + "import numpy as np\n", + "import warnings\n", + "\n", + "def test_loop_through_all_hyperparameters():\n", + "\n", + " n_classes=3\n", + " n_samples=100\n", + " n_features=100\n", + " random_state=None\n", + "\n", + " for class_name, _ in STRING_TO_CLASS.items():\n", + " estnode_gen = tpot2.config.get_search_space(class_name, n_classes=n_classes, n_samples=n_samples, n_features=n_features, random_state=random_state)\n", + "\n", + " #generate 10 random hyperparameters and make sure they are all valid\n", + " for i in range(1):\n", + " estnode = estnode_gen.generate()\n", + " est = estnode.export_pipeline()\n", + " \n", + "\n", + "test_loop_through_all_hyperparameters()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tpot2.config.get_search_space(\"SGDClassifier\", n_classes=3, n_samples=100, n_features=5, random_state=5)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "tpot2env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tpot2/config/tests/test_get_configspace.py b/tpot2/config/tests/test_get_configspace.py index a2ebcb59..bccb349f 100644 --- a/tpot2/config/tests/test_get_configspace.py +++ b/tpot2/config/tests/test_get_configspace.py @@ -19,8 +19,7 @@ def test_loop_through_all_hyperparameters(): estnode_gen = tpot2.config.get_search_space(class_name, n_classes=n_classes, n_samples=n_samples, n_features=n_features, random_state=random_state) #generate 10 random hyperparameters and make sure they are all valid - for i in range(10): + for i in range(1): estnode = estnode_gen.generate() est = estnode.export_pipeline() - \ No newline at end of file diff --git a/tpot2/tpot_estimator/tests/__init__.py b/tpot2/tpot_estimator/tests/__init__.py new file mode 100644 index 00000000..e69de29b From ca42398b0952c5812ebe6679aeaa63f7fbbb5ca9 Mon Sep 17 00:00:00 2001 From: perib Date: Thu, 18 Apr 2024 10:54:26 -0700 Subject: [PATCH 6/6] edits --- tpot2/config/get_configspace.py | 16 ++++++++-------- tpot2/tests/test_estimators.py | 7 ++++--- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/tpot2/config/get_configspace.py b/tpot2/config/get_configspace.py index 5706c4f6..473233ea 100644 --- a/tpot2/config/get_configspace.py +++ b/tpot2/config/get_configspace.py @@ -88,7 +88,10 @@ from sklearn.linear_model import ElasticNetCV from sklearn.discriminant_analysis import LinearDiscriminantAnalysis - +from sklearn.linear_model import PassiveAggressiveClassifier +from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis +from sklearn.linear_model import ARDRegression +from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process import GaussianProcessRegressor @@ -105,7 +108,7 @@ AdaBoostClassifier, GaussianProcessRegressor, HistGradientBoostingClassifier, HistGradientBoostingRegressor, AddTransformer, mul_neg_1_Transformer, MulTransformer, SafeReciprocalTransformer, EQTransformer, NETransformer, GETransformer, GTTransformer, LETransformer, LTTransformer, MinTransformer, MaxTransformer, ZeroTransformer, OneTransformer, NTransformer, - PowerTransformer, QuantileTransformer, + PowerTransformer, QuantileTransformer,ARDRegression, QuadraticDiscriminantAnalysis, PassiveAggressiveClassifier, LinearDiscriminantAnalysis, ] @@ -145,17 +148,14 @@ } -from sklearn.linear_model import PassiveAggressiveClassifier -from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis -from sklearn.linear_model import ARDRegression -from sklearn.gaussian_process import GaussianProcessRegressor + GROUPNAMES = { "selectors": ["SelectFwe", "SelectPercentile", "VarianceThreshold",], "selectors_classification": ["SelectFwe", "SelectPercentile", "VarianceThreshold", "RFE_classification", "SelectFromModel_classification"], "selectors_regression": ["SelectFwe", "SelectPercentile", "VarianceThreshold", "RFE_regression", "SelectFromModel_regression"], - "classifiers" : ['AdaBoostClassifier', 'BernoulliNB', 'DecisionTreeClassifier', 'ExtraTreesClassifier', 'GaussianNB', 'HistGradientBoostingClassifier', 'KNeighborsClassifier', 'LogisticRegression', "LinearSVC", "SVC", 'MLPClassifier', 'MultinomialNB', "PassiveAggressiveClassifier", "QuadraticDiscriminantAnalysis", 'RandomForestClassifier', 'SGDClassifier', 'XGBClassifier'], - "regressors" : ['AdaBoostRegressor', "ARDRegression", 'DecisionTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor', 'HistGradientBoostingRegressor', 'KNeighborsRegressor', 'LinearDiscriminantAnalysis', 'LinearSVR', "MLPRegressor", 'RandomForestRegressor', 'SGDRegressor', 'SVR', 'XGBRegressor'], + "classifiers" : ['AdaBoostClassifier', 'BernoulliNB', 'DecisionTreeClassifier', 'ExtraTreesClassifier', 'GaussianNB', 'HistGradientBoostingClassifier', 'KNeighborsClassifier','LinearDiscriminantAnalysis', 'LogisticRegression', "LinearSVC", "SVC", 'MLPClassifier', 'MultinomialNB', "PassiveAggressiveClassifier", "QuadraticDiscriminantAnalysis", 'RandomForestClassifier', 'SGDClassifier', 'XGBClassifier'], + "regressors" : ['AdaBoostRegressor', "ARDRegression", 'DecisionTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor', 'HistGradientBoostingRegressor', 'KNeighborsRegressor', 'LinearSVR', "MLPRegressor", 'RandomForestRegressor', 'SGDRegressor', 'SVR', 'XGBRegressor'], "transformers": ["Binarizer", "PCA", "ZeroCount", "ColumnOneHotEncoder", "FastICA", "FeatureAgglomeration", "Nystroem", "RBFSampler", "QuantileTransformer", "PowerTransformer"], diff --git a/tpot2/tests/test_estimators.py b/tpot2/tests/test_estimators.py index 5c6f47ba..98b607e0 100644 --- a/tpot2/tests/test_estimators.py +++ b/tpot2/tests/test_estimators.py @@ -7,7 +7,8 @@ #standard test @pytest.fixture def tpot_estimator(): - return tpot2.TPOTEstimator( population_size=50, + return tpot2.TPOTEstimator( population_size=10, + generations=5, scorers=['roc_auc_ovr'], scorers_weights=[1], classification=True, @@ -81,11 +82,11 @@ def test_tpot_estimator_config_dict_type(): @pytest.fixture def tpot_classifier(): - return tpot2.tpot_estimator.templates.TPOTClassifier(max_time_seconds=300,verbose=3) + return tpot2.tpot_estimator.templates.TPOTClassifier(max_time_seconds=10,verbose=3) @pytest.fixture def tpot_regressor(): - return tpot2.tpot_estimator.templates.TPOTRegressor(max_time_seconds=300,verbose=3) + return tpot2.tpot_estimator.templates.TPOTRegressor(max_time_seconds=10,verbose=3) def test_tpot_classifier_fit(tpot_classifier,sample_dataset): #load iris dataset