|
{ |
|
"architectures": [ |
|
"FIMMJP" |
|
], |
|
"base_model": "cvejoski/fim-mjp-test", |
|
"initial_distribution_decoder": { |
|
"dropout": 0.1, |
|
"hidden_act": { |
|
"name": "torch.nn.SELU" |
|
}, |
|
"hidden_layers": [ |
|
64, |
|
64 |
|
], |
|
"name": "fim.models.blocks.base.MLP" |
|
}, |
|
"intensity_matrix_decoder": { |
|
"dropout": 0.1, |
|
"hidden_act": { |
|
"name": "torch.nn.SELU" |
|
}, |
|
"hidden_layers": [ |
|
64, |
|
64 |
|
], |
|
"name": "fim.models.blocks.base.MLP" |
|
}, |
|
"model_type": "fimmjp", |
|
"n_states": 6, |
|
"path_attention": { |
|
"batch_first": true, |
|
"embed_dim": 64, |
|
"name": "torch.nn.MultiheadAttention", |
|
"num_heads": 4 |
|
}, |
|
"pos_encodings": { |
|
"name": "fim.models.blocks.positional_encodings.SineTimeEncoding", |
|
"out_features": 64 |
|
}, |
|
"torch_dtype": "float32", |
|
"transformers_version": "4.46.0", |
|
"ts_encoder": { |
|
"encoder_layer": { |
|
"batch_first": true, |
|
"d_model": 64, |
|
"dim_feedforward": 128, |
|
"dropout": 0.1, |
|
"name": "torch.nn.TransformerEncoderLayer", |
|
"nhead": 4 |
|
}, |
|
"name": "torch.nn.TransformerEncoder", |
|
"num_layers": 2 |
|
}, |
|
"use_adjacency_matrix": false, |
|
"use_num_of_paths": true |
|
} |
|
|