diff --git a/configs/downstream/all.yaml b/configs/downstream/all.yaml index 6b19631..e4f556d 100644 --- a/configs/downstream/all.yaml +++ b/configs/downstream/all.yaml @@ -25,20 +25,20 @@ datasets: task: multilabel pre-transforms: model: - - name: rf - n_estimators: 500 - n_jobs: -1 - random_state: 42 - - name: svm - random_state: 42 - - name: xgb - random_state: 42 + # - name: rf + # n_estimators: 500 + # n_jobs: -1 + # random_state: 42 + #- name: svm + # random_state: 42 + #- name: xgb + # random_state: 42 - name: mlp feat_dim: 1024 hidden_dim: 1024 batch_size: 256 num_layers: 3 - epochs: 1 + epochs: 100 patience: 30 learning_rate: 0 optimizer: Adam diff --git a/gifflar/model/baselines/rgcn.py b/gifflar/model/baselines/rgcn.py index a37e761..7aff5bd 100644 --- a/gifflar/model/baselines/rgcn.py +++ b/gifflar/model/baselines/rgcn.py @@ -63,7 +63,7 @@ def __init__( **kwargs) self.convs = torch.nn.ModuleList() - dims = [kwargs["feat_dim"], hidden_dim // 2] + [hidden_dim] * (num_layers - 1) + dims = [feat_dim, hidden_dim // 2] + [hidden_dim] * (num_layers - 1) for i in range(num_layers): convs = { # Set the inner layers to be a single weight without using the nodes embedding (therefore, e=-1)