Upload 11 files
Browse files- .DS_Store +0 -0
- .gitattributes +36 -35
- .gitignore +0 -0
- README.md +195 -0
- config.yaml +33 -0
- dataset.tsv +15 -0
- global_cmvn +1 -0
- model.safetensors +3 -0
- paper.pdf +3 -0
- pytorch_model.bin +3 -0
- vocab.txt +0 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
.gitattributes
CHANGED
@@ -1,35 +1,36 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
paper.pdf filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
File without changes
|
README.md
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: vie
|
3 |
+
datasets:
|
4 |
+
- legacy-datasets/common_voice
|
5 |
+
- vlsp2020_vinai_100h
|
6 |
+
- AILAB-VNUHCM/vivos
|
7 |
+
- doof-ferb/vlsp2020_vinai_100h
|
8 |
+
- doof-ferb/fpt_fosd
|
9 |
+
- doof-ferb/infore1_25hours
|
10 |
+
- linhtran92/viet_bud500
|
11 |
+
- doof-ferb/LSVSC
|
12 |
+
- doof-ferb/vais1000
|
13 |
+
- doof-ferb/VietMed_labeled
|
14 |
+
- NhutP/VSV-1100
|
15 |
+
- doof-ferb/Speech-MASSIVE_vie
|
16 |
+
- doof-ferb/BibleMMS_vie
|
17 |
+
- capleaf/viVoice
|
18 |
+
metrics:
|
19 |
+
- wer
|
20 |
+
pipeline_tag: automatic-speech-recognition
|
21 |
+
tags:
|
22 |
+
- transcription
|
23 |
+
- audio
|
24 |
+
- speech
|
25 |
+
- chunkformer
|
26 |
+
- asr
|
27 |
+
- automatic-speech-recognition
|
28 |
+
license: cc-by-nc-4.0
|
29 |
+
model-index:
|
30 |
+
- name: ChunkFormer Large Vietnamese
|
31 |
+
results:
|
32 |
+
- task:
|
33 |
+
name: Speech Recognition
|
34 |
+
type: automatic-speech-recognition
|
35 |
+
dataset:
|
36 |
+
name: common-voice-vietnamese
|
37 |
+
type: common_voice
|
38 |
+
args: vi
|
39 |
+
metrics:
|
40 |
+
- name: Test WER
|
41 |
+
type: wer
|
42 |
+
value: 6.66
|
43 |
+
source:
|
44 |
+
name: Common Voice Vi Leaderboard
|
45 |
+
url: https://paperswithcode.com/sota/speech-recognition-on-common-voice-vi
|
46 |
+
- task:
|
47 |
+
name: Speech Recognition
|
48 |
+
type: automatic-speech-recognition
|
49 |
+
dataset:
|
50 |
+
name: VIVOS
|
51 |
+
type: vivos
|
52 |
+
args: vi
|
53 |
+
metrics:
|
54 |
+
- name: Test WER
|
55 |
+
type: wer
|
56 |
+
value: 4.18
|
57 |
+
source:
|
58 |
+
name: Vivos Leaderboard
|
59 |
+
url: https://paperswithcode.com/sota/speech-recognition-on-vivos
|
60 |
+
- task:
|
61 |
+
name: Speech Recognition
|
62 |
+
type: automatic-speech-recognition
|
63 |
+
dataset:
|
64 |
+
name: VLSP - Task 1
|
65 |
+
type: vlsp
|
66 |
+
args: vi
|
67 |
+
metrics:
|
68 |
+
- name: Test WER
|
69 |
+
type: wer
|
70 |
+
value: 14.09
|
71 |
+
---
|
72 |
+
|
73 |
+
# **ChunkFormer-Large-Vie: Large-Scale Pretrained ChunkFormer for Vietnamese Automatic Speech Recognition**
|
74 |
+
<style>
|
75 |
+
img {
|
76 |
+
display: inline;
|
77 |
+
}
|
78 |
+
</style>
|
79 |
+
[](https://paperswithcode.com/sota/speech-recognition-on-common-voice-vi)
|
80 |
+
[](https://paperswithcode.com/sota/speech-recognition-on-vivos)
|
81 |
+
|
82 |
+
[](https://creativecommons.org/licenses/by-nc/4.0/)
|
83 |
+
[](https://github.com/khanld/chunkformer)
|
84 |
+
[](https://arxiv.org/abs/2502.14673)
|
85 |
+
[](#description)
|
86 |
+
|
87 |
+
---
|
88 |
+
## Table of contents
|
89 |
+
1. [Model Description](#description)
|
90 |
+
2. [Documentation and Implementation](#implementation)
|
91 |
+
3. [Benchmark Results](#benchmark)
|
92 |
+
4. [Usage](#usage)
|
93 |
+
6. [Citation](#citation)
|
94 |
+
7. [Contact](#contact)
|
95 |
+
|
96 |
+
---
|
97 |
+
<a name = "description" ></a>
|
98 |
+
## Model Description
|
99 |
+
**ChunkFormer-Large-Vie** is a large-scale Vietnamese Automatic Speech Recognition (ASR) model based on the **ChunkFormer** architecture, introduced at **ICASSP 2025**. The model has been fine-tuned on approximately **3000 hours** of public Vietnamese speech data sourced from diverse datasets. A list of datasets can be found [**HERE**](dataset.tsv).
|
100 |
+
|
101 |
+
**!!! Please note that only the \[train-subset\] was used for tuning the model.**
|
102 |
+
|
103 |
+
---
|
104 |
+
<a name = "implementation" ></a>
|
105 |
+
## Documentation and Implementation
|
106 |
+
The [Documentation]() and [Implementation](https://github.com/khanld/chunkformer) of ChunkFormer are publicly available.
|
107 |
+
|
108 |
+
---
|
109 |
+
<a name = "benchmark" ></a>
|
110 |
+
## Benchmark Results
|
111 |
+
We evaluate the models using **Word Error Rate (WER)**. To ensure consistency and fairness in comparison, we manually apply **Text Normalization**, including the handling of numbers, uppercase letters, and punctuation.
|
112 |
+
|
113 |
+
1. **Public Models**:
|
114 |
+
| STT | Model | #Params | Vivos | Common Voice | VLSP - Task 1 | Avg. |
|
115 |
+
|-----|------------------------------------------------------------------------|---------|-------|--------------|---------------|------|
|
116 |
+
| 1 | **ChunkFormer** | 110M | 4.18 | 6.66 | 14.09 | **8.31** |
|
117 |
+
| 2 | [vinai/PhoWhisper-large](https://huggingface.co/vinai/PhoWhisper-large) | 1.55B | 4.67 | 8.14 | 13.75 | 8.85 |
|
118 |
+
| 3 | [nguyenvulebinh/wav2vec2-base-vietnamese-250h](https://huggingface.co/nguyenvulebinh/wav2vec2-base-vietnamese-250h) | 95M | 10.77 | 18.34 | 13.33 | 14.15 |
|
119 |
+
| 4 | [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) | 1.55B | 8.81 | 15.45 | 20.41 | 14.89 |
|
120 |
+
| 5 | [khanhld/wav2vec2-base-vietnamese-160h](https://huggingface.co/khanhld/wav2vec2-base-vietnamese-160h) | 95M | 15.05 | 10.78 | 31.62 | 19.16 |
|
121 |
+
| 6 | [homebrewltd/Ichigo-whisper-v0.1](https://huggingface.co/homebrewltd/Ichigo-whisper-v0.1) | 22M | 13.46 | 23.52 | 21.64 | 19.54 |
|
122 |
+
|
123 |
+
2. **Private Models (API)**:
|
124 |
+
| STT | Model | VLSP - Task 1 |
|
125 |
+
|-----|--------|---------------|
|
126 |
+
| 1 | **ChunkFormer** | **14.1** |
|
127 |
+
| 2 | Viettel | 14.5 |
|
128 |
+
| 3 | Google | 19.5 |
|
129 |
+
| 4 | FPT | 28.8 |
|
130 |
+
|
131 |
+
---
|
132 |
+
<a name = "usage" ></a>
|
133 |
+
## Quick Usage
|
134 |
+
To use the ChunkFormer model for Vietnamese Automatic Speech Recognition, follow these steps:
|
135 |
+
|
136 |
+
1. **Download the ChunkFormer Repository**
|
137 |
+
```bash
|
138 |
+
git clone https://github.com/khanld/chunkformer.git
|
139 |
+
cd chunkformer
|
140 |
+
pip install -r requirements.txt
|
141 |
+
```
|
142 |
+
2. **Download the Model Checkpoint from Hugging Face**
|
143 |
+
```bash
|
144 |
+
pip install huggingface_hub
|
145 |
+
huggingface-cli download khanhld/chunkformer-large-vie --local-dir "./chunkformer-large-vie"
|
146 |
+
```
|
147 |
+
or
|
148 |
+
```bash
|
149 |
+
git lfs install
|
150 |
+
git clone https://huggingface.co/khanhld/chunkformer-large-vie
|
151 |
+
```
|
152 |
+
This will download the model checkpoint to the checkpoints folder inside your chunkformer directory.
|
153 |
+
|
154 |
+
3. **Run the model**
|
155 |
+
```bash
|
156 |
+
python decode.py \
|
157 |
+
--model_checkpoint path/to/local/chunkformer-large-vie \
|
158 |
+
--long_form_audio path/to/audio.wav \
|
159 |
+
--total_batch_duration 14400 \ #in second, default is 1800
|
160 |
+
--chunk_size 64 \
|
161 |
+
--left_context_size 128 \
|
162 |
+
--right_context_size 128
|
163 |
+
```
|
164 |
+
Example Output:
|
165 |
+
```
|
166 |
+
[00:00:01.200] - [00:00:02.400]: this is a transcription example
|
167 |
+
[00:00:02.500] - [00:00:03.700]: testing the long-form audio
|
168 |
+
```
|
169 |
+
**Advanced Usage** can be found [HERE](https://github.com/khanld/chunkformer/tree/main?tab=readme-ov-file#usage)
|
170 |
+
|
171 |
+
---
|
172 |
+
<a name = "citation" ></a>
|
173 |
+
## Citation
|
174 |
+
If you use this work in your research, please cite:
|
175 |
+
|
176 |
+
```bibtex
|
177 |
+
@INPROCEEDINGS{10888640,
|
178 |
+
author={Le, Khanh and Ho, Tuan Vu and Tran, Dung and Chau, Duc Thanh},
|
179 |
+
booktitle={ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
|
180 |
+
title={ChunkFormer: Masked Chunking Conformer For Long-Form Speech Transcription},
|
181 |
+
year={2025},
|
182 |
+
volume={},
|
183 |
+
number={},
|
184 |
+
pages={1-5},
|
185 |
+
keywords={Scalability;Memory management;Graphics processing units;Signal processing;Performance gain;Hardware;Resource management;Speech processing;Standards;Context modeling;chunkformer;masked batch;long-form transcription},
|
186 |
+
doi={10.1109/ICASSP49660.2025.10888640}}
|
187 |
+
}
|
188 |
+
```
|
189 |
+
|
190 |
+
---
|
191 |
+
<a name = "contact"></a>
|
192 |
+
## Contact
|
193 | |
194 |
+
- [](https://github.com/khanld)
|
195 |
+
- [](https://www.linkedin.com/in/khanhld257/)
|
config.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
encoder: chunkformer
|
2 |
+
is_json_cmvn: true
|
3 |
+
cmvn_file: chunkformer-large-vie/global_cmvn
|
4 |
+
input_dim: 80
|
5 |
+
output_dim: 6992
|
6 |
+
|
7 |
+
encoder_conf:
|
8 |
+
output_size: 512 # dimension of attention
|
9 |
+
attention_heads: 8
|
10 |
+
linear_units: 2048 # the number of units of position-wise feed forward
|
11 |
+
num_blocks: 17 # the number of encoder blocks
|
12 |
+
dropout_rate: 0.1
|
13 |
+
positional_dropout_rate: 0.1
|
14 |
+
attention_dropout_rate: 0.1
|
15 |
+
input_layer: 'depthwise' # encoder input type, you can chose conv2d, conv2d6 and conv2d8
|
16 |
+
normalize_before: true
|
17 |
+
cnn_module_kernel: 15
|
18 |
+
use_cnn_module: true
|
19 |
+
activation_type: 'swish'
|
20 |
+
pos_enc_layer_type: 'stream_rel_pos'
|
21 |
+
selfattention_layer_type: 'stream_rel_selfattn'
|
22 |
+
causal: false
|
23 |
+
use_dynamic_chunk: false
|
24 |
+
use_limited_chunk: false
|
25 |
+
use_context_hint_chunk: false
|
26 |
+
right_context_probs: [0.75]
|
27 |
+
right_context_sizes: [128, 128, 128]
|
28 |
+
limited_decoding_chunk_sizes: [64, 128, 256]
|
29 |
+
limited_left_chunk_sizes: [128, 256, 128]
|
30 |
+
cnn_module_norm: 'layer_norm' # using nn.LayerNorm makes model converge faster
|
31 |
+
use_dynamic_left_chunk: false
|
32 |
+
use_dynamic_conv: true
|
33 |
+
freeze_subsampling_layer: false
|
dataset.tsv
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
data Estimated hour Link
|
3 |
+
AILAB-VNUHCM/vivos 15 https://huggingface.co/datasets/AILAB-VNUHCM/vivos
|
4 |
+
doof-ferb/vlsp2020_vinai_100h 100 https://huggingface.co/datasets/doof-ferb/vlsp2020_vinai_100h
|
5 |
+
doof-ferb/fpt_fosd 100 https://huggingface.co/datasets/doof-ferb/fpt_fosd
|
6 |
+
doof-ferb/infore1_25hours 25 https://huggingface.co/datasets/doof-ferb/infore1_25hours
|
7 |
+
linhtran92/viet_bud500 500 https://huggingface.co/datasets/linhtran92/viet_bud500
|
8 |
+
doof-ferb/LSVSC 100 https://huggingface.co/datasets/doof-ferb/LSVSC
|
9 |
+
doof-ferb/vais1000 2 https://huggingface.co/datasets/doof-ferb/vais1000
|
10 |
+
doof-ferb/VietMed_labeled 3 https://huggingface.co/datasets/doof-ferb/VietMed_labeled
|
11 |
+
NhutP/VSV-1100 1100 https://huggingface.co/datasets/NhutP/VSV-1100
|
12 |
+
doof-ferb/Speech-MASSIVE_vie 1 https://huggingface.co/datasets/doof-ferb/Speech-MASSIVE_vie
|
13 |
+
doof-ferb/BibleMMS_vie 1 https://huggingface.co/datasets/doof-ferb/BibleMMS_vie
|
14 |
+
capleaf/viVoice 1000 https://huggingface.co/datasets/capleaf/viVoice
|
15 |
+
linhtran92/viet_youtube_asr_corpus_v2 100 https://huggingface.co/datasets/linhtran92/viet_youtube_asr_corpus_v2
|
global_cmvn
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"mean_stat": [11719716864.0, 12109591552.0, 13372244992.0, 14431860736.0, 15379044352.0, 16048251904.0, 16420151296.0, 16462806016.0, 16435909632.0, 16277902336.0, 16328012800.0, 16365081600.0, 16567385088.0, 16710700032.0, 16709065728.0, 16702018560.0, 16609516544.0, 16454155264.0, 16481000448.0, 16182266880.0, 15951698944.0, 16123002880.0, 15874306048.0, 15979340800.0, 15849950208.0, 15899969536.0, 15757883392.0, 15813523456.0, 15752173568.0, 15734062080.0, 15719752704.0, 15666200576.0, 15647021056.0, 15673847808.0, 15692921856.0, 15761361920.0, 15846681600.0, 15889658880.0, 15914716160.0, 15815398400.0, 15872250880.0, 15785680896.0, 15849245696.0, 15839742976.0, 15926661120.0, 16051468288.0, 16199239680.0, 16324781056.0, 16408131584.0, 16480964608.0, 16454390784.0, 16394480640.0, 16379509760.0, 16433426432.0, 16476341248.0, 16498354176.0, 16476703744.0, 16432917504.0, 16377098240.0, 16309888000.0, 16293634048.0, 16223672320.0, 16094388224.0, 15930726400.0, 15795167232.0, 15654544384.0, 15517620224.0, 15460015104.0, 15418070016.0, 15386569728.0, 15329652736.0, 15294281728.0, 15287447552.0, 15333227520.0, 15422593024.0, 15514677248.0, 15527650304.0, 15340513280.0, 14994009088.0, 14165908480.0], "var_stat": [150319136768.0, 161704574976.0, 194916073472.0, 223425839104.0, 250741407744.0, 271204204544.0, 283238137856.0, 285300719616.0, 283947597824.0, 278054535168.0, 279273865216.0, 280822546432.0, 287651364864.0, 292578492416.0, 292665622528.0, 292221845504.0, 288983515136.0, 283743879168.0, 284274917376.0, 274934988800.0, 267933974528.0, 272869457920.0, 265080864768.0, 267923013632.0, 263733788672.0, 265018474496.0, 260564975616.0, 262147883008.0, 260146626560.0, 259420569600.0, 258994372608.0, 257408565248.0, 256659259392.0, 257174503424.0, 257758822400.0, 259958472704.0, 262545031168.0, 263766671360.0, 264487256064.0, 261593858048.0, 263239712768.0, 260605640704.0, 262439223296.0, 262260129792.0, 264741273600.0, 268570198016.0, 273416044544.0, 277515993088.0, 280181997568.0, 282168197120.0, 280982159360.0, 279186898944.0, 278822191104.0, 280607490048.0, 282087915520.0, 282732265472.0, 281854050304.0, 280297209856.0, 278515056640.0, 276357185536.0, 275706642432.0, 273547116544.0, 269666041856.0, 264639021056.0, 260329226240.0, 256096075776.0, 252133769216.0, 250319650816.0, 249039454208.0, 247930339328.0, 246243737600.0, 245079801856.0, 244788805632.0, 246247636992.0, 248876138496.0, 251564457984.0, 251790016512.0, 246503702528.0, 236531630080.0, 212810301440.0], "frame_num": 1074281799}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:95394958efbb22d9bf22b027f1867d7b04aa803b09ec82d1e5bd67a40f7ea6c8
|
3 |
+
size 613743208
|
paper.pdf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e3c63fd1e0799d7095603e6cfce0db0b17a75673249d1558033f70630cb948db
|
3 |
+
size 799967
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9c9d6e0dab607655c8babb01718066f6e08ccc8e6419acc1a67df971ad7ee99
|
3 |
+
size 613951335
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|