Skip to content

Commit

Permalink
bumped tsai version
Browse files Browse the repository at this point in the history
  • Loading branch information
oguiza committed Jan 31, 2024
1 parent bbc0d93 commit a4be67e
Show file tree
Hide file tree
Showing 5 changed files with 70 additions and 70 deletions.
76 changes: 38 additions & 38 deletions nbs/030_models.utils.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@
"#|export\n",
"def SeqTokenizer(c_in, embed_dim, token_size=60, norm=False):\n",
" \"Generates non-overlapping tokens from sub-sequences within a sequence by applying a sliding window\"\n",
" return ConvBlock(c_in, embed_dim, token_size, stride=token_size, padding=0, act=None, \n",
" return ConvBlock(c_in, embed_dim, token_size, stride=token_size, padding=0, act=None,\n",
" norm='Batch' if norm else None, bias=norm is None)\n",
"\n",
"SeqEmbed = SeqTokenizer"
Expand Down Expand Up @@ -149,18 +149,18 @@
"#|export\n",
"def check_bias(m, cond=noop, verbose=False):\n",
" mean, std = [], []\n",
" for i,l in enumerate(get_layers(m, cond=cond)): \n",
" if hasattr(l, 'bias') and l.bias is not None: \n",
" for i,l in enumerate(get_layers(m, cond=cond)):\n",
" if hasattr(l, 'bias') and l.bias is not None:\n",
" b = l.bias.data\n",
" mean.append(b.mean())\n",
" std.append(b.std())\n",
" pv(f'{i:3} {l.__class__.__name__:15} shape: {str(list(b.shape)):15} mean: {b.mean():+6.3f} std: {b.std():+6.3f}', verbose)\n",
" return np.array(mean), np.array(std)\n",
" \n",
"\n",
"def check_weight(m, cond=noop, verbose=False):\n",
" mean, std = [], []\n",
" for i,l in enumerate(get_layers(m, cond=cond)): \n",
" if hasattr(l, 'weight') and l.weight is not None: \n",
" for i,l in enumerate(get_layers(m, cond=cond)):\n",
" if hasattr(l, 'weight') and l.weight is not None:\n",
" w = l.weight.data\n",
" mean.append(w.mean())\n",
" std.append(w.std())\n",
Expand Down Expand Up @@ -234,10 +234,10 @@
"outputs": [],
"source": [
"#|export\n",
"def build_ts_model(arch, c_in=None, c_out=None, seq_len=None, d=None, dls=None, device=None, verbose=False, \n",
" s_cat_idxs=None, s_cat_embeddings=None, s_cat_embedding_dims=None, s_cont_idxs=None, \n",
"def build_ts_model(arch, c_in=None, c_out=None, seq_len=None, d=None, dls=None, device=None, verbose=False,\n",
" s_cat_idxs=None, s_cat_embeddings=None, s_cat_embedding_dims=None, s_cont_idxs=None,\n",
" o_cat_idxs=None, o_cat_embeddings=None, o_cat_embedding_dims=None, o_cont_idxs=None,\n",
" patch_len=None, patch_stride=None, fusion_layers=128, fusion_act='relu', fusion_dropout=0., fusion_use_bn=True, \n",
" patch_len=None, patch_stride=None, fusion_layers=128, fusion_act='relu', fusion_dropout=0., fusion_use_bn=True,\n",
" pretrained=False, weights_path=None, exclude_head=True, cut=-1, init=None, arch_config={}, **kwargs):\n",
"\n",
" device = ifnone(device, default_device())\n",
Expand All @@ -246,20 +246,20 @@
" c_out = ifnone(c_out, dls.c)\n",
" seq_len = ifnone(seq_len, dls.len)\n",
" d = ifnone(d, dls.d)\n",
" \n",
"\n",
" if s_cat_idxs or s_cat_embeddings or s_cat_embedding_dims or s_cont_idxs or o_cat_idxs or o_cat_embeddings or o_cat_embedding_dims or o_cont_idxs:\n",
" from tsai.models.multimodal import MultInputWrapper\n",
" model = MultInputWrapper(arch, c_in=c_in, c_out=c_out, seq_len=seq_len, d=d,\n",
" s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims, s_cont_idxs=s_cont_idxs, \n",
" o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims, o_cont_idxs=o_cont_idxs, \n",
" patch_len=patch_len, patch_stride=patch_stride, \n",
" fusion_layers=fusion_layers, fusion_act=fusion_act, fusion_dropout=fusion_dropout, fusion_use_bn=fusion_use_bn,\n",
" s_cat_idxs=s_cat_idxs, s_cat_embeddings=s_cat_embeddings, s_cat_embedding_dims=s_cat_embedding_dims, s_cont_idxs=s_cont_idxs,\n",
" o_cat_idxs=o_cat_idxs, o_cat_embeddings=o_cat_embeddings, o_cat_embedding_dims=o_cat_embedding_dims, o_cont_idxs=o_cont_idxs,\n",
" patch_len=patch_len, patch_stride=patch_stride,\n",
" fusion_layers=fusion_layers, fusion_act=fusion_act, fusion_dropout=fusion_dropout, fusion_use_bn=fusion_use_bn,**arch_config,\n",
" **kwargs)\n",
" else:\n",
" if d and arch.__name__ not in [\"PatchTST\", \"PatchTSTPlus\", 'TransformerRNNPlus', 'TransformerLSTMPlus', 'TransformerGRUPlus', \n",
" \"RNN_FCNPlus\", \"LSTM_FCNPlus\", \"GRU_FCNPlus\", \"MRNN_FCNPlus\", \"MLSTM_FCNPlus\", \"MGRU_FCNPlus\", \n",
" if d and arch.__name__ not in [\"PatchTST\", \"PatchTSTPlus\", 'TransformerRNNPlus', 'TransformerLSTMPlus', 'TransformerGRUPlus',\n",
" \"RNN_FCNPlus\", \"LSTM_FCNPlus\", \"GRU_FCNPlus\", \"MRNN_FCNPlus\", \"MLSTM_FCNPlus\", \"MGRU_FCNPlus\",\n",
" \"RNNAttentionPlus\", \"LSTMAttentionPlus\", \"GRUAttentionPlus\", \"ConvTran\", \"ConvTranPlus\"]:\n",
" if 'custom_head' not in kwargs.keys(): \n",
" if 'custom_head' not in kwargs.keys():\n",
" if \"rocket\" in arch.__name__.lower() or 'hydra' in arch.__name__.lower():\n",
" kwargs['custom_head'] = partial(rocket_nd_head, d=d)\n",
" elif \"xresnet1d\" in arch.__name__.lower():\n",
Expand All @@ -271,13 +271,13 @@
" if 'ltsf_' in arch.__name__.lower() or 'patchtst' in arch.__name__.lower():\n",
" pv(f'arch: {arch.__name__}(c_in={c_in} c_out={c_out} seq_len={seq_len} pred_dim={d} arch_config={arch_config}, kwargs={kwargs})', verbose)\n",
" model = (arch(c_in=c_in, c_out=c_out, seq_len=seq_len, pred_dim=d, **arch_config, **kwargs)).to(device=device)\n",
" elif arch.__name__ in ['TransformerRNNPlus', 'TransformerLSTMPlus', 'TransformerGRUPlus', \"RNN_FCNPlus\", \"LSTM_FCNPlus\", \"GRU_FCNPlus\", \"MRNN_FCNPlus\", \n",
" elif arch.__name__ in ['TransformerRNNPlus', 'TransformerLSTMPlus', 'TransformerGRUPlus', \"RNN_FCNPlus\", \"LSTM_FCNPlus\", \"GRU_FCNPlus\", \"MRNN_FCNPlus\",\n",
" \"MLSTM_FCNPlus\", \"MGRU_FCNPlus\", \"RNNAttentionPlus\", \"LSTMAttentionPlus\", \"GRUAttentionPlus\", \"ConvTran\", \"ConvTranPlus\", 'mWDNPlus']:\n",
" pv(f'arch: {arch.__name__}(c_in={c_in} c_out={c_out} seq_len={seq_len} d={d} arch_config={arch_config}, kwargs={kwargs})', verbose)\n",
" model = (arch(c_in=c_in, c_out=c_out, seq_len=seq_len, d=d, **arch_config, **kwargs)).to(device=device)\n",
" elif sum([1 for v in ['RNN_FCN', 'LSTM_FCN', 'RNNPlus', 'LSTMPlus', 'GRUPlus', 'InceptionTime', 'TSiT', 'Sequencer', 'XceptionTimePlus',\n",
" 'GRU_FCN', 'OmniScaleCNN', 'mWDN', 'TST', 'XCM', 'MLP', 'MiniRocket', 'InceptionRocket', 'ResNetPlus', \n",
" 'RNNAttention', 'LSTMAttention', 'GRUAttention', 'MultiRocket', 'MultiRocketPlus', 'Hydra', 'HydraPlus', \n",
" 'GRU_FCN', 'OmniScaleCNN', 'mWDN', 'TST', 'XCM', 'MLP', 'MiniRocket', 'InceptionRocket', 'ResNetPlus',\n",
" 'RNNAttention', 'LSTMAttention', 'GRUAttention', 'MultiRocket', 'MultiRocketPlus', 'Hydra', 'HydraPlus',\n",
" 'HydraMultiRocket', 'HydraMultiRocketPlus']\n",
" if v in arch.__name__]):\n",
" pv(f'arch: {arch.__name__}(c_in={c_in} c_out={c_out} seq_len={seq_len} arch_config={arch_config} kwargs={kwargs})', verbose)\n",
Expand Down Expand Up @@ -324,7 +324,7 @@
" setattr(model, \"__name__\", arch.__name__)\n",
"\n",
" return model\n",
" \n",
"\n",
"build_model = build_ts_model\n",
"create_model = build_ts_model"
]
Expand Down Expand Up @@ -354,10 +354,10 @@
" if dls is not None:\n",
" c_in = ifnone(c_in, dls.vars)\n",
" c_out = ifnone(c_out, dls.c)\n",
" \n",
"\n",
" model = arch(pretrained=pretrained, c_in=c_in, n_out=c_out, **arch_config, **kwargs).to(device=device)\n",
" setattr(model, \"__name__\", arch.__name__)\n",
" if init is not None: \n",
" if init is not None:\n",
" apply_init(model[1] if pretrained else model, init)\n",
" return model"
]
Expand All @@ -378,7 +378,7 @@
" assert n_out, \"`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`\"\n",
" if y_range is None and 'y_range' in kwargs: y_range = kwargs.pop('y_range')\n",
" model = arch(emb_szs, len(dls.cont_names), n_out, layers, y_range=y_range, **arch_config, **kwargs).to(device=device)\n",
" \n",
"\n",
" if hasattr(model, \"head_nf\"): head_nf = model.head_nf\n",
" else: head_nf = get_nf(model)\n",
" setattr(model, \"__name__\", arch.__name__)\n",
Expand Down Expand Up @@ -472,10 +472,10 @@
" if seq_len is None:\n",
" seq_len = 50\n",
" return_q_len = False\n",
" try: \n",
" try:\n",
" params_0 = list(mod.parameters())[0]\n",
" xb = torch.rand(1, c_in, seq_len, device=params_0.device, dtype=params_0.dtype)\n",
" except: \n",
" except:\n",
" xb = torch.rand(1, c_in, seq_len)\n",
" training = mod.training\n",
" mod.eval()\n",
Expand All @@ -487,7 +487,7 @@
" mod.training = training\n",
" if return_q_len:\n",
" return c_out, q_len\n",
" else: \n",
" else:\n",
" return c_out, None"
]
},
Expand Down Expand Up @@ -542,7 +542,7 @@
"\n",
"def true_forecaster(o, split, horizon=1):\n",
" o_true = o[split]\n",
" if is_listy(horizon): \n",
" if is_listy(horizon):\n",
" o_true = o_true[np.newaxis].repeat(len(horizon), 0)\n",
" return o_true"
]
Expand All @@ -555,14 +555,14 @@
{
"data": {
"text/plain": [
"(array([0.35514714, 0.79371588, 0.89324833, 1.78069172, 2.26929887,\n",
" 2.8927336 , 2.93275611, 3.693081 , 4.34535574, 5.22502166,\n",
" 5.42378301, 5.57022079, 5.61515208, 5.66301663, 5.87813728,\n",
" 6.26954548, 7.12932702, 7.40652216, 7.58915979, 8.19573115]),\n",
" array([5.22502166, 5.42378301, 5.57022079, 5.61515208, 5.66301663,\n",
" 5.87813728, 6.26954548, 7.12932702, 7.40652216, 7.58915979]),\n",
" array([5.42378301, 5.57022079, 5.61515208, 5.66301663, 5.87813728,\n",
" 6.26954548, 7.12932702, 7.40652216, 7.58915979, 8.19573115]))"
"(array([ 0.74775537, 1.41245663, 2.12445924, 2.8943163 , 3.56384351,\n",
" 4.23789602, 4.83134182, 5.18560431, 5.30551186, 6.29076506,\n",
" 6.58873471, 7.03661275, 7.0884361 , 7.57927022, 8.21911791,\n",
" 8.59726773, 9.37382718, 10.17298849, 10.40118308, 10.82265631]),\n",
" array([ 6.29076506, 6.58873471, 7.03661275, 7.0884361 , 7.57927022,\n",
" 8.21911791, 8.59726773, 9.37382718, 10.17298849, 10.40118308]),\n",
" array([ 6.58873471, 7.03661275, 7.0884361 , 7.57927022, 8.21911791,\n",
" 8.59726773, 9.37382718, 10.17298849, 10.40118308, 10.82265631]))"
]
},
"execution_count": null,
Expand Down Expand Up @@ -595,9 +595,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"/Users/nacho/notebooks/tsai/nbs/030_models.utils.ipynb saved at 2023-07-04 17:34:15\n",
"/Users/nacho/notebooks/tsai/nbs/030_models.utils.ipynb saved at 2024-01-31 13:03:06\n",
"Correct notebook to script conversion! 😃\n",
"Tuesday 04/07/23 17:34:17 CEST\n"
"Wednesday 31/01/24 13:03:08 CET\n"
]
},
{
Expand Down
Binary file modified nbs/models/test.pth
Binary file not shown.
6 changes: 3 additions & 3 deletions settings.ini
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ lib_name = tsai
repo = tsai
user = timeseriesAI
branch = main
version = 0.3.8
version = 0.3.9
description = Practical Deep Learning for Time Series / Sequential Data library based on fastai & Pytorch
keywords = fastai time-series time-series-classification time-series-regression deep-learning Pytorch
author = Ignacio Oguiza and contributors
Expand Down Expand Up @@ -35,8 +35,8 @@ recursive = True
clean_ids = True
black_formatting = False
readme_nb = index.ipynb
allowed_metadata_keys =
allowed_cell_metadata_keys =
allowed_metadata_keys =
allowed_cell_metadata_keys =
jupyter_hooks = True
clear_all = False
put_version_in_init = True
Expand Down
2 changes: 1 addition & 1 deletion tsai/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.3.8"
__version__ = "0.3.9"
Loading

0 comments on commit a4be67e

Please sign in to comment.