jialucode commited on
Commit
0fb79ec
·
verified ·
1 Parent(s): 1700b9f

Upload 3 files

Browse files
data/data_loader.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import pickle
4
+ import random
5
+ from pathlib import Path
6
+ import time
7
+ import wfdb
8
+ import numpy as np
9
+ import torch
10
+ from PIL import Image
11
+ from torch.utils.data import Dataset
12
+ from torchvision.transforms import ColorJitter
13
+ from scipy.io import loadmat
14
+ from load_class import get_temp_qa, change_ecg_to_qa, prepare_ecg_qa_data
15
+ from utils import set_device
16
+ import matplotlib.pyplot as plt
17
+ import argparse
18
+ from meta_trainer import MetaTrainer
19
+ import warnings
20
+ from transformers import AutoTokenizer
21
+
22
+ warnings.filterwarnings("ignore")
23
+
24
+ torch.manual_seed(222)
25
+ torch.cuda.manual_seed_all(222)
26
+ np.random.seed(222)
27
+
28
+ PROJECT_ROOT = str(Path.cwd().parent.parent) # project path
29
+ LOG_PATH = PROJECT_ROOT + "/logs/"
30
+ MODELS_PATH = PROJECT_ROOT + "/models/"
31
+
32
+ class FSL_ECG_QA_DataLoader(Dataset):
33
+ """
34
+ This is DataLoader for episodic training on FSL_ECG_QA dataset
35
+ NOTICE: meta-learning is different from general supervised learning, especially the concept of batch and set.
36
+ batch: contains several sets / tasks
37
+ sets: conains n_way * k_shot for meta-train set, n_way * k_query for meta-test set.
38
+ """
39
+
40
+ def __init__(self, mode, batchsz, n_way, k_shot, k_query, seq_len, seq_len_a, repeats, tokenizer,
41
+ prefix_length, startidx=0, all_ids=None, in_templates=None, prompt=1, paraphrased_path="",test_dataset=""):
42
+ self.batchsz = batchsz
43
+ self.n_way = n_way
44
+ self.k_shot = k_shot
45
+ self.k_query = k_query
46
+ self.repeats = repeats
47
+ self.setsz = self.n_way * self.k_shot if self.repeats == 0 else self.n_way * self.k_shot * (self.repeats + 1)
48
+ self.querysz = self.n_way * self.k_query # number of samples per set for evaluation
49
+ self.seq_len = seq_len # sentence seq length
50
+ self.seq_len_a = seq_len_a
51
+ self.prefix_length = prefix_length
52
+ self.startidx = startidx # index label not from 0, but from startidx
53
+ self.device = set_device()
54
+ print('shuffle DB: %s, b:%d, %d-way, %d-shot, %d-query, %d-repeats' % (mode, batchsz, n_way, k_shot,
55
+ k_query, repeats))
56
+ self.gpt_tokenizer = tokenizer
57
+ self.mode = mode
58
+ self.all_ids = all_ids
59
+ self.prompt = prompt
60
+ self.test_dataset=test_dataset
61
+
62
+ json_data_ecg = change_ecg_to_qa(all_ids, in_templates, paraphrased_path, test_dataset=test_dataset)
63
+
64
+ self.data = []
65
+ self.img2caption = {}
66
+
67
+ for i, (category_name, ecg_q_as) in enumerate(json_data_ecg.items()):
68
+ self.data.append(ecg_q_as)
69
+
70
+ self.cls_num = len(self.data)
71
+ print("self.cls_num", self.mode, self.cls_num)
72
+
73
+ self.create_batch(self.batchsz)
74
+
75
+
76
+ def create_batch(self, batchsz):
77
+ """
78
+ create batch for meta-learning.
79
+ ×episode× here means batch, and it means how many sets we want to retain.
80
+ :param episodes: batch size
81
+ :return:
82
+ """
83
+ self.support_x_batch = [] # support set batch
84
+ self.query_x_batch = [] # query set batch
85
+ # Creating of tasks; batchsz is the num. of iterations when sampling from the task distribution
86
+ for b in range(batchsz): # for each batch
87
+ # 1.select n_way classes randomly
88
+ selected_cls = np.random.choice(self.cls_num, self.n_way, replace=False) # no duplicate
89
+ support_x = []
90
+ query_x = []
91
+ for cls in selected_cls:
92
+ selected_question = np.random.choice(len(self.data[cls]), 1)[0]
93
+ selected_imgs_idx = np.random.choice(len(self.data[cls][selected_question]), self.k_shot + self.k_query)
94
+ np.random.shuffle(selected_imgs_idx)
95
+ indexDtrain = np.array(selected_imgs_idx[:self.k_shot]) # idx for Dtrain
96
+ indexDtest = np.array(selected_imgs_idx[self.k_shot:]) # idx for Dtest
97
+ support_x.append(
98
+ np.array(self.data[cls][selected_question])[indexDtrain].tolist()) # get all images filename for current Dtrain
99
+ query_x.append(np.array(self.data[cls][selected_question])[indexDtest].tolist())
100
+ if self.repeats > 0:
101
+ for i in range(self.repeats):
102
+ support_x.append(np.array(self.data[cls][selected_question])[indexDtrain].tolist())
103
+
104
+ # shuffle the corresponding relation between support set and query set
105
+ random.shuffle(support_x)
106
+ random.shuffle(query_x)
107
+
108
+ self.support_x_batch.append(support_x) # append set to current sets
109
+ self.query_x_batch.append(query_x) # append sets to current sets
110
+
111
+ # shuffle the corresponding relation between support set and query set
112
+ random.shuffle(support_x)
113
+ random.shuffle(query_x)
114
+ self.support_x_batch.append(support_x) # append set to current sets
115
+ self.query_x_batch.append(query_x) # append sets to current sets
116
+
117
+ def get_ptbxl_data_path(self, ecg_id):
118
+ return os.path.join(
119
+ f"{int(ecg_id / 1000) * 1000 :05d}",
120
+ f"{ecg_id:05d}_hr"
121
+ )
122
+
123
+ def gen_prompt(self, q_str):
124
+ if self.prompt == 1:
125
+ token_p = "Question: "+q_str+"Answer: "
126
+ if self.prompt == 2:
127
+ token_p = q_str
128
+ if self.prompt == 3:
129
+ token_p = q_str+"the answer can be both, none or in question."
130
+ return token_p
131
+
132
+ def __getitem__(self, index):
133
+ """
134
+ index means index of sets, 0<= index <= batchsz-1
135
+ :param index:
136
+ :return:
137
+ """
138
+ support_x = torch.FloatTensor(self.setsz, 12, 2500)
139
+ query_x = torch.FloatTensor(self.querysz, 12, 2500)
140
+
141
+ support_y_q = []
142
+ support_y_a = []
143
+ support_y_q_mask = []
144
+ support_y_a_mask = []
145
+ query_y_q = []
146
+ query_y_a = []
147
+ query_y_q_mask = []
148
+ query_y_a_mask = []
149
+
150
+ flatten_support_x = [f"/gpfs/home1/jtang1/multimodal_fsl_99/process_ptbxl2/{self.get_ptbxl_data_path(sample['ecg_id'][0])}"
151
+ for sublist in self.support_x_batch[index] for sample in sublist]
152
+ flatten_query_x = [f"/gpfs/home1/jtang1/multimodal_fsl_99/process_ptbxl2/{self.get_ptbxl_data_path(sample['ecg_id'][0])}"
153
+ for sublist in self.query_x_batch[index] for sample in sublist]
154
+
155
+ for sublist in self.support_x_batch[index]:
156
+ for sample in sublist:
157
+ q_str = sample["question"].lower()
158
+ for num_a, content in enumerate(sample["answer"]):
159
+ if num_a != 0:
160
+ a_str += ", " + content.lower()
161
+ else:
162
+ a_str = content.lower()
163
+
164
+ q_str_tokenized = self.gpt_tokenizer(self.gen_prompt(q_str), return_tensors="pt")['input_ids']
165
+
166
+ caption_padded_q, mask_0_q = pad_tokens(q_str_tokenized, self.seq_len, self.prefix_length,
167
+ self.gpt_tokenizer.eos_token_id)
168
+ support_y_q.append(caption_padded_q)
169
+ support_y_q_mask.append(mask_0_q)
170
+
171
+ a_str_tokenized = self.gpt_tokenizer(a_str, return_tensors="pt")['input_ids']
172
+ caption_padded_a, mask_0_a = pad_tokens(a_str_tokenized, self.seq_len_a, self.prefix_length,
173
+ self.gpt_tokenizer.eos_token_id)
174
+ support_y_a.append(caption_padded_a)
175
+ support_y_a_mask.append(mask_0_a)
176
+
177
+ support_y_q = torch.stack(support_y_q)
178
+ support_y_a = torch.stack(support_y_a)
179
+ support_y_q_mask = torch.stack(support_y_q_mask)
180
+ support_y_a_mask = torch.stack(support_y_a_mask)
181
+
182
+ for sublist in self.query_x_batch[index]:
183
+ for sample in sublist:
184
+ q_str = sample["question"].lower()
185
+ for num_a, content in enumerate(sample["answer"]):
186
+ if num_a != 0:
187
+ a_str += ", " + content.lower()
188
+ else:
189
+ a_str = content.lower()
190
+
191
+ q_str_tokenized = self.gpt_tokenizer(self.gen_prompt(q_str), return_tensors="pt")['input_ids']
192
+ caption_padded_q, mask_0_q = pad_tokens(q_str_tokenized, self.seq_len, self.prefix_length,
193
+ self.gpt_tokenizer.eos_token_id)
194
+ query_y_q.append(caption_padded_q)
195
+ query_y_q_mask.append(mask_0_q)
196
+
197
+ a_str_tokenized = self.gpt_tokenizer(a_str, return_tensors="pt")['input_ids']
198
+ caption_padded_a, mask_0_a = pad_tokens(a_str_tokenized, self.seq_len_a, self.prefix_length,
199
+ self.gpt_tokenizer.eos_token_id)
200
+ query_y_a.append(caption_padded_a)
201
+ query_y_a_mask.append(mask_0_a)
202
+
203
+ query_y_q = torch.stack(query_y_q)
204
+ query_y_q_mask = torch.stack(query_y_q_mask)
205
+ query_y_a = torch.stack(query_y_a)
206
+ query_y_a_mask = torch.stack(query_y_a_mask)
207
+
208
+ # Reading of ecgs:
209
+ for i, path in enumerate(flatten_support_x):
210
+ ecg = loadmat(path)['feats']
211
+ support_x[i] = torch.tensor(ecg)
212
+
213
+ for i, path in enumerate(flatten_query_x):
214
+ ecg = loadmat(path)['feats']
215
+ query_x[i] = torch.tensor(ecg)
216
+
217
+ return support_x, support_y_q, support_y_a, support_y_q_mask, support_y_a_mask, flatten_support_x, query_x, query_y_q, query_y_a, query_y_q_mask, query_y_a_mask, flatten_query_x
218
+
219
+ def __len__(self):
220
+ return self.batchsz
221
+
222
+
223
+ def pad_tokens(tokens, seq_len, prefix_length, eos_token_id):
224
+ tokens = tokens.squeeze(0)
225
+ padding = seq_len - tokens.shape[0]
226
+ if padding > 0:
227
+ tokens = torch.cat((tokens, torch.zeros(padding, dtype=torch.int64) - 1))
228
+ elif padding < 0:
229
+ tokens = tokens[:seq_len]
230
+ mask = tokens.ge(0) # mask is zero where we out of sequence
231
+ tokens[~mask] = eos_token_id
232
+ mask = mask.float()
233
+ mask = torch.cat((torch.ones(prefix_length), mask), dim=0) # adding prefix mask
234
+ return tokens, mask
235
+
236
+
237
+ if __name__ == '__main__':
238
+ argparser = argparse.ArgumentParser()
239
+
240
+ argparser.add_argument('--experiment_id', type=int, default=666)
241
+ argparser.add_argument('--batchsz_train', type=int, default=10000)
242
+ argparser.add_argument('--batchsz_test', type=int, default=1000)
243
+ argparser.add_argument('--model_name', type=str, help="path to model download from hugging face", default="path/to/model")
244
+ argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
245
+ argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=15)
246
+
247
+ argparser.add_argument('--paraphrased_path', type=str, default='path/to/paraphrased',
248
+ help='path to ./paraphrased containing trian/val/test ECG-QA json files')
249
+ argparser.add_argument('--question_type', type=str, help='question types, single-verify, single-choose, single-query,all', default='single-verify')
250
+ argparser.add_argument('--epoch', type=int, help='epoch number', default=10000)
251
+ argparser.add_argument('--n_way', type=int, help='n way', default=5)
252
+ argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=5)
253
+ argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=5)
254
+ argparser.add_argument('--prompt', type=int, help='1,Question: +q_str+Answer:,2,q_str,3,q_str+the answer can be both, none or in question.', default=1)
255
+ argparser.add_argument('--dif_exp', type=int, help='0,same_exp,1,dif_exp', default=0)
256
+ argparser.add_argument('--frozen_gpt', type=int, help='0,unfrozen_gpt,1,frozen_gpt', default=1)
257
+ argparser.add_argument('--frozen_features', type=int, help='0,unfrozen_features,1,frozen_features', default=1)
258
+ argparser.add_argument('--repeats', type=int, help='repeats for support set', default=0)
259
+ argparser.add_argument('--seq_len', help='for padding batch', type=int, default=30)
260
+ argparser.add_argument('--seq_len_a', help='for padding batch', type=int, default=30)
261
+ argparser.add_argument('--prefix_length', type=int, default=4)
262
+ argparser.add_argument('--mapper_type', type=str, help='ATT MLP', default="MLP")
263
+ argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=1)
264
+ argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=5e-4)
265
+ argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.05)
266
+ argparser.add_argument('--test_dataset', type=str, default="ptb-xl", choices=["ptb-xl", "mimic"], help='Dataset to use (ptb-xl or mimic)')
267
+ args = argparser.parse_args()
268
+
269
+ class_qa, train_temp, test_temp = prepare_ecg_qa_data(args)
270
+
271
+ device = set_device()
272
+ meta = MetaTrainer(args, args.experiment_id, is_pretrained=False).to(device)
273
+ params = list(filter(lambda p: p.requires_grad, meta.model.parameters()))
274
+ params_summed = sum(p.numel() for p in params)
275
+ print("Total num of params: {} ".format(params_summed))
276
+ gpt_tokenizer = AutoTokenizer.from_pretrained(args.model_name)
277
+ data_loader_train = FSL_ECG_QA_DataLoader(mode='train', n_way=args.n_way, k_shot=args.k_spt,k_query=args.k_qry, batchsz=args.batchsz_train,
278
+ seq_len=args.seq_len, seq_len_a=args.seq_len_a,repeats=args.repeats, tokenizer=gpt_tokenizer,
279
+ prefix_length=args.prefix_length,all_ids=class_qa, in_templates=train_temp, prompt=args.prompt,
280
+ paraphrased_path= args.paraphrased_path, test_dataset=args.test_dataset)
281
+ data_loader_test = FSL_ECG_QA_DataLoader(mode='test', n_way=args.n_way, k_shot=args.k_spt,k_query=args.k_qry, batchsz=args.batchsz_train,
282
+ seq_len=args.seq_len, seq_len_a=args.seq_len_a,repeats=args.repeats, tokenizer=gpt_tokenizer,
283
+ prefix_length=args.prefix_length,all_ids=class_qa, in_templates=test_temp, prompt=args.prompt,
284
+ paraphrased_path= args.paraphrased_path, test_dataset=args.test_dataset)
285
+ batch = next(iter(data_loader_train))
286
+
287
+ if isinstance(batch, dict):
288
+ for key, value in batch.items():
289
+ print(f"{key}: {value}")
290
+ elif isinstance(batch, (list, tuple)):
291
+ for i, item in enumerate(batch):
292
+ print(f"Item {i}: {item}")
293
+ else:
294
+ print(batch)
295
+
data/load_class.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import json
4
+ import argparse
5
+ from typing import List, Dict, Any, Union
6
+ from collections import Counter
7
+ import numpy as np
8
+
9
+ def get_temp_qa(question_type, paraphrased_path):
10
+ """
11
+ Load all question data and return a list of filtered template IDs based on question_type.
12
+
13
+ Args:
14
+ question_type (str): Type of question to filter by ('single-query', 'single-verify', etc., or 'all').
15
+ paraphrased_path (str): Base path for the dataset.
16
+
17
+ Returns:
18
+ List[int]: A list of unique template IDs matching the filter criteria.
19
+ """
20
+ data = []
21
+
22
+ for split in ["train", "valid", "test"]:
23
+ file_pattern = os.path.join(paraphrased_path, split, "*.json")
24
+ for fname in sorted(glob.glob(file_pattern)):
25
+ with open(fname, "r") as f:
26
+ records = json.load(f)
27
+ data.extend(records)
28
+
29
+ print(f"Loaded {len(data)} samples!")
30
+
31
+ temp_list = []
32
+ if question_type != "all":
33
+ for item in data:
34
+ if item['question_type'] == question_type:
35
+ temp_list.append(item['template_id'])
36
+ else:
37
+ for item in data:
38
+ if "single-" in item['question_type']:
39
+ temp_list.append(item['template_id'])
40
+
41
+ temp_ids = list(set(temp_list))
42
+
43
+ # Filter template IDs to avoid conflict class
44
+ temp_ids = [i for i in temp_ids if i not in [17, 14, 26, 28, 4, 40, 31]]
45
+ return temp_ids
46
+
47
+ def prepare_ecg_qa_data(args):
48
+ if args.question_type == "all":
49
+ class_qa = get_class_qa("single-verify")
50
+ ecg_qa_list_1 = change_ecg_to_qa(
51
+ class_qa, "single-verify",
52
+ paraphrased_path=args.paraphrased_path,
53
+ dif_exp=args.dif_exp
54
+ )
55
+
56
+ class_qa += get_class_qa("single-choose")
57
+ ecg_qa_list_2 = change_ecg_to_qa(
58
+ class_qa, "single-choose",
59
+ paraphrased_path=args.paraphrased_path,
60
+ dif_exp=args.dif_exp
61
+ )
62
+
63
+ class_qa += get_class_qa("single-query")
64
+ ecg_qa_list_3 = change_ecg_to_qa(
65
+ class_qa, "single-query",
66
+ paraphrased_path=args.paraphrased_path,
67
+ dif_exp=args.dif_exp
68
+ )
69
+
70
+ ecg_qa_list = {**ecg_qa_list_1, **ecg_qa_list_2, **ecg_qa_list_3}
71
+ else:
72
+ class_qa = get_temp_qa(args.question_type, args.paraphrased_path)
73
+ ecg_qa_list = change_ecg_to_qa(
74
+ class_qa, args.question_type,
75
+ paraphrased_path=args.paraphrased_path,
76
+ dif_exp=args.dif_exp
77
+ )
78
+
79
+ all_ecg_qa_temp = list(ecg_qa_list.keys())
80
+ sample_size = int(len(all_ecg_qa_temp) * 0.8)
81
+ train_temp = np.random.choice(all_ecg_qa_temp, sample_size, replace=False).tolist()
82
+ test_temp = [i for i in all_ecg_qa_temp if i not in train_temp]
83
+
84
+ return class_qa, train_temp, test_temp
85
+
86
+ def change_ecg_to_qa(
87
+ sample_ids,
88
+ question_type,
89
+ paraphrased_path,
90
+ in_template=None,
91
+ dif_exp=1,
92
+ attr="",
93
+ test_dataset="ptb-xl"
94
+ ):
95
+ """
96
+ Process ECG data to create a dictionary of question-answer pairs categorized by template.
97
+
98
+ Args:
99
+ sample_ids (List[int]): List of template IDs to include.
100
+ question_type (str): Type of question to filter.
101
+ paraphrased_path (str): Base path for all data.
102
+ data_root_path (str, optional): Path to the data root directory.
103
+ in_template (List, optional): List of templates to include.
104
+ dif_exp (int, optional): Controls return format. Default is 1.
105
+ attr (str, optional): Attribute type filter.
106
+ test_dataset (str, optional): Dataset to use ('ptb-xl' or 'mimic'). Default is 'ptb-xl'.
107
+
108
+ Returns:
109
+ Dict: Dictionary of ECG QA data categorized by template.
110
+ """
111
+ data = []
112
+
113
+
114
+ # Define subdirectories to process
115
+ subdirectories = ["train", "valid", "test"]
116
+
117
+ # Load data from all subdirectories
118
+ for subdir in subdirectories:
119
+ directory_path = os.path.join(paraphrased_path, subdir)
120
+ if not os.path.exists(directory_path):
121
+ print(f"Warning: Directory {directory_path} does not exist!")
122
+ continue
123
+
124
+ file_pattern = os.path.join(directory_path, "*.json")
125
+ files = sorted(glob.glob(file_pattern))
126
+
127
+ if not files:
128
+ print(f"Warning: No JSON files found in {directory_path}")
129
+ continue
130
+
131
+ for fname in files:
132
+ with open(fname, "r") as f:
133
+ json_data = json.load(f)
134
+ data.extend(json_data)
135
+
136
+ # Filter by attribute type if specified
137
+ if attr != "":
138
+ sample_data = [item for item in data if item['attribute_type'] == attr]
139
+ else:
140
+ sample_data = data
141
+
142
+ # Filter samples by file existence if using MIMIC dataset
143
+ if test_dataset == "mimic":
144
+ ecg_data_path = os.path.join(paraphrased_path, "mimic_iv_ecg/processed_test_30k/")
145
+ if not os.path.exists(ecg_data_path):
146
+ print(f"Warning: MIMIC ECG data path {ecg_data_path} does not exist!")
147
+ sample_data = [sample for sample in sample_data if os.path.isfile(os.path.join(ecg_data_path, f"{sample['ecg_id'][0]}.mat"))]
148
+
149
+ ecg_qa_dict = {}
150
+
151
+ if len(sample_data) == 0:
152
+ print(f"Cannot find template_id == {sample_ids} or no data available")
153
+ else:
154
+ for sample in sample_data:
155
+ template_id = sample['template_id']
156
+ if template_id in sample_ids:
157
+ process_sample_by_type(sample, template_id, ecg_qa_dict, in_template)
158
+
159
+ filter_ecg_qa_dict_by_question_type(ecg_qa_dict, question_type)
160
+
161
+ if dif_exp == 1:
162
+ return {key: [value] for key, value in ecg_qa_dict.items()}
163
+
164
+ return filter_by_question_frequency(ecg_qa_dict)
165
+
166
+
167
+ def process_sample_by_type(sample, template_id, ecg_qa_dict, in_template):
168
+ """
169
+ Process a sample based on its question type.
170
+
171
+ Args:
172
+ sample (Dict): The sample data.
173
+ template_id (int): The template ID.
174
+ ecg_qa_dict (Dict): Dictionary to populate with processed data.
175
+ in_template (List, optional): List of templates to include.
176
+ """
177
+ question_type = sample['question_type']
178
+
179
+ if question_type == 'single-verify':
180
+ process_single_verify_sample(sample, template_id, ecg_qa_dict, in_template)
181
+ elif question_type == 'single-choose':
182
+ process_single_choose_sample(sample, template_id, ecg_qa_dict, in_template)
183
+ elif question_type == 'single-query':
184
+ process_single_query_sample(sample, template_id, ecg_qa_dict, in_template)
185
+
186
+
187
+ def process_single_verify_sample(sample, template_id, ecg_qa_dict, in_template):
188
+ """
189
+ Process a single-verify type sample.
190
+
191
+ Args:
192
+ sample (Dict): The sample data.
193
+ template_id (int): The template ID.
194
+ ecg_qa_dict (Dict): Dictionary to populate with processed data.
195
+ in_template (List, optional): List of templates to include.
196
+ """
197
+ if sample['answer'][0] in ["yes", "no"]:
198
+ answer = sample['answer'][0]
199
+ all_attributes = "_".join(sorted(sample['attribute']))
200
+ dict_key = f"{template_id}_{all_attributes}_{answer}"
201
+
202
+ if in_template is None or dict_key in in_template:
203
+ if dict_key not in ecg_qa_dict:
204
+ ecg_qa_dict[dict_key] = [sample]
205
+ else:
206
+ ecg_qa_dict[dict_key].append(sample)
207
+
208
+
209
+ def process_single_choose_sample(sample, template_id, ecg_qa_dict, in_template):
210
+ """
211
+ Process a single-choose type sample.
212
+
213
+ Args:
214
+ sample (Dict): The sample data.
215
+ template_id (int): The template ID.
216
+ ecg_qa_dict (Dict): Dictionary to populate with processed data.
217
+ in_template (List, optional): List of templates to include.
218
+ """
219
+ if len(sample['answer']) == 1:
220
+ answer = sample['answer'][0]
221
+ elif len(sample['answer']) == 2:
222
+ answer = "both"
223
+ sample['answer'] = ["both"]
224
+ else:
225
+ print("single-choose data have more than 2 answers!")
226
+ return
227
+
228
+ all_attributes = "_".join(sorted(sample['attribute']))
229
+
230
+ if in_template is None:
231
+ handle_single_choose_without_template(sample, template_id, answer, all_attributes, ecg_qa_dict)
232
+ else:
233
+ handle_single_choose_with_template(sample, template_id, answer, all_attributes, in_template, ecg_qa_dict)
234
+
235
+
236
+ def handle_single_choose_without_template(sample, template_id, answer, all_attributes, ecg_qa_dict):
237
+ """
238
+ Handle a single-choose type sample when no template is provided.
239
+
240
+ Args:
241
+ sample (Dict): The sample data.
242
+ template_id (int): The template ID.
243
+ answer (str): The answer string.
244
+ all_attributes (str): The joined attributes string.
245
+ ecg_qa_dict (Dict): Dictionary to populate with processed data.
246
+ """
247
+ if answer == "both":
248
+ dict_key = f"{template_id}_{all_attributes}_{answer}"
249
+ add_to_ecg_qa_dict(dict_key, sample, ecg_qa_dict)
250
+ elif answer == "none":
251
+ for attr in sample['attribute']:
252
+ dict_key = f"{template_id}_{attr}_{answer}"
253
+ add_to_ecg_qa_dict(dict_key, sample, ecg_qa_dict)
254
+ else:
255
+ dict_key = f"{template_id}_{answer}_{answer}"
256
+ add_to_ecg_qa_dict(dict_key, sample, ecg_qa_dict)
257
+
258
+
259
+ def handle_single_choose_with_template(sample, template_id, answer, all_attributes, in_template, ecg_qa_dict):
260
+ """
261
+ Handle a single-choose type sample when a template is provided.
262
+
263
+ Args:
264
+ sample (Dict): The sample data.
265
+ template_id (int): The template ID.
266
+ answer (str): The answer string.
267
+ all_attributes (str): The joined attributes string.
268
+ in_template (List): List of templates to include.
269
+ ecg_qa_dict (Dict): Dictionary to populate with processed data.
270
+ """
271
+ full_key = f"{template_id}_{all_attributes}_{answer}"
272
+ short_key = f"{template_id}_{answer}"
273
+
274
+ if full_key in in_template or short_key in in_template:
275
+ if answer == "both":
276
+ dict_key = f"{template_id}_{all_attributes}_{answer}"
277
+ add_to_ecg_qa_dict(dict_key, sample, ecg_qa_dict)
278
+ elif answer == "none":
279
+ for attr in sample['attribute']:
280
+ dict_key = f"{template_id}_{attr}_{answer}"
281
+ add_to_ecg_qa_dict(dict_key, sample, ecg_qa_dict)
282
+ else:
283
+ dict_key = f"{template_id}_{answer}_{answer}"
284
+ add_to_ecg_qa_dict(dict_key, sample, ecg_qa_dict)
285
+
286
+
287
+ def process_single_query_sample(sample, template_id, ecg_qa_dict, in_template):
288
+ """
289
+ Process a single-query type sample.
290
+
291
+ Args:
292
+ sample (Dict): The sample data.
293
+ template_id (int): The template ID.
294
+ ecg_qa_dict (Dict): Dictionary to populate with processed data.
295
+ in_template (List, optional): List of templates to include.
296
+ """
297
+ if len(sample['answer']) == 1:
298
+ answer = sample['answer'][0]
299
+ elif len(sample['answer']) >= 2:
300
+ answer = ", ".join(sample['answer'])
301
+ sample['answer'] = [answer]
302
+
303
+ all_attributes = "_".join(sorted(sample['attribute']))
304
+ dict_key = f"{template_id}_{all_attributes}_{answer}"
305
+
306
+ if in_template is None or dict_key in in_template:
307
+ if dict_key not in ecg_qa_dict:
308
+ ecg_qa_dict[dict_key] = [sample]
309
+ else:
310
+ ecg_qa_dict[dict_key].append(sample)
311
+
312
+
313
+ def add_to_ecg_qa_dict(key, sample, ecg_qa_dict):
314
+ """
315
+ Add a sample to the ECG QA dictionary.
316
+
317
+ Args:
318
+ key (str): The dictionary key.
319
+ sample (Dict): The sample data.
320
+ ecg_qa_dict (Dict): Dictionary to populate with processed data.
321
+ """
322
+ if key not in ecg_qa_dict:
323
+ ecg_qa_dict[key] = [sample]
324
+ else:
325
+ ecg_qa_dict[key].append(sample)
326
+
327
+
328
+ def filter_ecg_qa_dict_by_question_type(ecg_qa_dict, question_type):
329
+ """
330
+ Filter the ECG QA dictionary by question type and minimum sample counts.
331
+
332
+ Args:
333
+ ecg_qa_dict (Dict): Dictionary to filter.
334
+ question_type (str): Type of question to filter by.
335
+ """
336
+ for key in list(ecg_qa_dict.keys()):
337
+ if question_type in ["single-verify", "single-choose", "single-query"]:
338
+ qt = question_type
339
+ elif question_type == "all":
340
+ if len(ecg_qa_dict[key]) == 0:
341
+ del ecg_qa_dict[key]
342
+ continue
343
+ qt = ecg_qa_dict[key][0]['question_type']
344
+ else:
345
+ continue
346
+
347
+ if qt == "single-verify" and len(ecg_qa_dict[key]) < 140:
348
+ del ecg_qa_dict[key]
349
+ elif qt == "single-choose" and len(ecg_qa_dict[key]) < 14:
350
+ del ecg_qa_dict[key]
351
+ elif qt == "single-query" and len(ecg_qa_dict[key]) < 50:
352
+ del ecg_qa_dict[key]
353
+
354
+
355
+ def filter_by_question_frequency(ecg_qa_dict):
356
+ """
357
+ Filter the ECG QA dictionary by question frequency.
358
+
359
+ Args:
360
+ ecg_qa_dict (Dict): Dictionary to filter.
361
+
362
+ Returns:
363
+ Dict: Filtered dictionary.
364
+ """
365
+ for key in list(ecg_qa_dict.keys()):
366
+ question_id_counter = Counter(sample['question_id'] for sample in ecg_qa_dict[key])
367
+ frequent_question_ids = [q_id for q_id, count in question_id_counter.items() if count >= 10]
368
+
369
+ all_samples_by_question = []
370
+ for question_id in frequent_question_ids:
371
+ question_samples = [sample for sample in ecg_qa_dict[key] if sample['question_id'] == question_id]
372
+ all_samples_by_question.append(question_samples)
373
+
374
+ if len(all_samples_by_question) != 0:
375
+ ecg_qa_dict[key] = all_samples_by_question
376
+ else:
377
+ del ecg_qa_dict[key]
378
+
379
+ return ecg_qa_dict
380
+
381
+
382
+ def validate_path(path):
383
+ """
384
+ Validate if a path exists.
385
+
386
+ Args:
387
+ path (str): Path to validate.
388
+
389
+ Returns:
390
+ bool: True if path exists, False otherwise.
391
+ """
392
+ if not os.path.exists(path):
393
+ return False
394
+ return True
395
+
396
+
397
+ def main():
398
+ """
399
+ Main function to run the ECG data processing.
400
+ """
401
+ parser = argparse.ArgumentParser(description='ECG Data Processor')
402
+ parser.add_argument('--paraphrased_path', type=str,
403
+ default='path/to/paraphrased',
404
+ help='path to ./paraphrased containing trian/val/test ECG-QA json files')
405
+ parser.add_argument('--test_dataset', type=str, default="ptb-xl", choices=["ptb-xl", "mimic"],
406
+ help='Dataset to use (ptb-xl or mimic)')
407
+
408
+ args = parser.parse_args()
409
+ paraphrased_path = args.paraphrased_path
410
+
411
+ # Process each question type
412
+ for q_type in ['single-verify', 'single-choose', 'single-query', 'all']:
413
+ print(f"\nProcessing {q_type} question type...")
414
+
415
+ # Get template IDs for the question type
416
+ temp_ids = get_temp_qa(q_type, paraphrased_path)
417
+ print(f"{q_type} temp_ids: {temp_ids} (total: {len(temp_ids)})")
418
+
419
+ # Get the QA dictionary
420
+ ecg_qa_dict = change_ecg_to_qa(
421
+ temp_ids,
422
+ q_type,
423
+ paraphrased_path=paraphrased_path,
424
+ test_dataset=args.test_dataset
425
+ )
426
+ print(f"ECG QA dictionary length: {len(ecg_qa_dict)}")
427
+
428
+ # Calculate total QA count
429
+ total_qa_count = 0
430
+ for category_key in ecg_qa_dict.keys():
431
+ category_values = ecg_qa_dict.get(category_key)
432
+ category_total = sum(len(item) for item in category_values)
433
+ total_qa_count += category_total
434
+
435
+ print(f"Total QA count for '{q_type}': {total_qa_count}")
436
+ print("-" * 40)
437
+
438
+
439
+ if __name__ == "__main__":
440
+ main()
data/processed_test_30k.json ADDED
The diff for this file is too large to render. See raw diff