diff --git a/examples/model_selection_psql/msmlp/model.py b/examples/model_selection_psql/msmlp/model.py index 81ccd5b05..be898b291 100644 --- a/examples/model_selection_psql/msmlp/model.py +++ b/examples/model_selection_psql/msmlp/model.py @@ -32,9 +32,7 @@ singa_dtype = {"float16": tensor.float16, "float32": tensor.float32} -#### self-defined loss begin - -### reference from autograd.py +### refer to autograd.py class SumError(Operator): def __init__(self): @@ -52,7 +50,7 @@ def forward(self, x): # self.n *= s # loss /= self.n return loss - + def backward(self, dy=1.0): # dx = self.err dev = device.get_default_device() @@ -62,12 +60,21 @@ def backward(self, dy=1.0): dx *= dy return dx -### called in the MSMLP class for sum error loss gradients def se_loss(x): - # assert x.shape == t.shape, "input and target shape different: %s, %s" % ( - # x.shape, t.shape) return SumError()(x)[0] +### refer to layer.py +class SumErrorLayer(Layer): + """ + Generate a SumError Layer + """ + + def __init__(self): + super(SumErrorLayer, self).__init__() + + def forward(self, x): + return se_loss(x) + class MSMLP(model.Model): def __init__(self, data_size=10, perceptron_size=100, num_classes=10): @@ -79,8 +86,8 @@ def __init__(self, data_size=10, perceptron_size=100, num_classes=10): self.linear1 = layer.Linear(perceptron_size) self.linear2 = layer.Linear(num_classes) self.softmax_cross_entropy = layer.SoftMaxCrossEntropy() - self.sum_error = SumErrorLayer() - + self.sum_error = SumErrorLayer() # for synflow backward + def forward(self, inputs): y = self.linear1(inputs) y = self.relu(y)