-
Notifications
You must be signed in to change notification settings - Fork 527
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
What's the difference between adanet.Estimator
and adanet.AutoEnsembleEstimator
#134
Comments
In addition to that question, when I use Here is my code and the result of the code. # Lint as: python3
import numpy as np
import tensorflow as tf
from time import time
from datetime import datetime
from absl import app
import adanet
def main(args):
(x_train, y_train), (x_test, y_test) = (
tf.keras.datasets.boston_housing.load_data())
def input_fn(partition):
def _input_fn():
feat_tensor_dict = {}
if partition == 'train':
x = x_train.copy()
y = y_train.copy()
else:
x = x_test.copy()
y = y_test.copy()
for i in range(0, np.size(x, 1)):
feat_nam = ('feat' + str(i))
feat_tensor_dict[feat_nam] = tf.convert_to_tensor(
x[:, i], dtype=tf.float32)
label_tensor = tf.convert_to_tensor(y, dtype=tf.float32)
return (feat_tensor_dict, label_tensor)
return _input_fn
feat_nam_lst = ['feat' + str(i) for i in range(0, np.size(x_train, 1))]
feature_columns = []
for item in feat_nam_lst:
feature_columns.append(tf.feature_column.numeric_column(item))
head = tf.estimator.RegressionHead(1)
lr_estimator = tf.estimator.LinearEstimator(
head=head, feature_columns=feature_columns)
dnn_estimator_1 = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[5])
dnn_estimator_2 = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[5, 5])
dnn_estimator_3 = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[100,100])
dnn_estimator_4 = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[50, 1500])
folder_dir = "/Users/zhangjue/Desktop/autoensemble/"
logdir_adanet = folder_dir + "adanet/" +datetime.now().strftime("%Y%m%d-%H%M%S")
config = tf.estimator.RunConfig(model_dir=logdir_adanet)
estimator = adanet.AutoEnsembleEstimator(
head=head,
candidate_pool=lambda config: {
'dnn1': dnn_estimator_1,
'dnn2': dnn_estimator_2,
'dnn3': dnn_estimator_3,
'dnn4': dnn_estimator_4
},
max_iteration_steps=5000,
config=config)
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn(partition = "train")
,max_steps = 5000)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn(partition='test'))
result,_ = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
print(result)
if __name__ == "__main__":
app.run(main)
Here is the result.
|
@jeffltc If you want to do multiple boosting rounds, make sure that You should also create your estimators within the # Lint as: python3
import numpy as np
import tensorflow as tf
from time import time
from datetime import datetime
from absl import app
import adanet
def main(args):
(x_train, y_train), (x_test, y_test) = (
tf.keras.datasets.boston_housing.load_data())
def input_fn(partition):
def _input_fn():
feat_tensor_dict = {}
if partition == 'train':
x = x_train.copy()
y = y_train.copy()
else:
x = x_test.copy()
y = y_test.copy()
for i in range(0, np.size(x, 1)):
feat_nam = ('feat' + str(i))
feat_tensor_dict[feat_nam] = tf.convert_to_tensor(
x[:, i], dtype=tf.float32)
label_tensor = tf.convert_to_tensor(y, dtype=tf.float32)
return (feat_tensor_dict, label_tensor)
return _input_fn
feat_nam_lst = ['feat' + str(i) for i in range(0, np.size(x_train, 1))]
feature_columns = []
for item in feat_nam_lst:
feature_columns.append(tf.feature_column.numeric_column(item))
head = tf.estimator.RegressionHead(1)
folder_dir = "/Users/zhangjue/Desktop/autoensemble/"
logdir_adanet = folder_dir + "adanet/" +datetime.now().strftime("%Y%m%d-%H%M%S")
config = tf.estimator.RunConfig(model_dir=logdir_adanet)
estimator = adanet.AutoEnsembleEstimator(
head=head,
ensemble_strategies=[
adanet.ensemble.GrowStrategy(),
adanet.ensemble.AllStrategy(),
],
candidate_pool=lambda config: {
"lr": tf.estimator.LinearEstimator(
head=head, feature_columns=feature_columns, config=config),
"dnn1": tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[5], config=config),
"dnn2": tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[5, 5], config=config),
"dnn3": tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[100,100], config=config),
"dnn4": tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[50, 1500], config=config),
},
max_iteration_steps=5000,
evaluator=adanet.Evaluator(input_fn=input_fn(partition='test')),
config=config)
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn(partition = "train"),
max_steps = 5000 * 3)
eval_spec = tf.estimator.EvalSpec(
input_fn=input_fn(partition='test'))
result,_ = tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
print(result)
if __name__ == "__main__":
app.run(main) |
@cweill Thank you so much! Crystal clear! Could you please introduce the difference between |
@jeffltc I'm glad I could help! @zhiqwang:
If you already have a |
Hi, adanet team,
I'm confused between the API of
adanet.Estimator
andadanet.AutoEnsembleEstimator
. I noticed thatadanet.AutoEnsembleEstimator
was released in the version 0.4, but the tutorial provided here are alladanet.Estimator
. Is there any suggestion to choose this two API?The text was updated successfully, but these errors were encountered: