added lof scores computed with dev train and add train data separately.
Browse files- data/scores.csv +0 -0
- dcase23-task2-enriched.py +10 -0
data/scores.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
dcase23-task2-enriched.py
CHANGED
|
@@ -367,6 +367,10 @@ class DCASE2023Task2Dataset(datasets.GeneratorBasedBuilder):
|
|
| 367 |
"d2v": datasets.Value("string"),
|
| 368 |
"d3p": datasets.Value("string"),
|
| 369 |
"d3v": datasets.Value("string"),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 370 |
}
|
| 371 |
if self.config.embeddings_urls is not None:
|
| 372 |
features.update({
|
|
@@ -419,6 +423,7 @@ class DCASE2023Task2Dataset(datasets.GeneratorBasedBuilder):
|
|
| 419 |
"audio_files": dl_manager.iter_archive(audio_path[split]),
|
| 420 |
"embeddings": embeddings[split],
|
| 421 |
"metadata_file": dl_manager.download_and_extract(self.config.data_urls["metadata"]) if self.config.data_urls["metadata"] is not None else None,
|
|
|
|
| 422 |
"is_streaming": dl_manager.is_streaming,
|
| 423 |
},
|
| 424 |
) for split in split_type if split in self.config.splits
|
|
@@ -431,11 +436,14 @@ class DCASE2023Task2Dataset(datasets.GeneratorBasedBuilder):
|
|
| 431 |
audio_files: Optional[Iterable],
|
| 432 |
embeddings: Optional[Dict],
|
| 433 |
metadata_file: Optional[str],
|
|
|
|
| 434 |
is_streaming: Optional[bool],
|
| 435 |
):
|
| 436 |
"""Yields examples."""
|
| 437 |
if metadata_file is not None:
|
| 438 |
metadata = pd.read_csv(metadata_file)
|
|
|
|
|
|
|
| 439 |
data_fields = list(self._info().features.keys())
|
| 440 |
|
| 441 |
id_ = 0
|
|
@@ -450,6 +458,8 @@ class DCASE2023Task2Dataset(datasets.GeneratorBasedBuilder):
|
|
| 450 |
result = {field: None for field in data_fields}
|
| 451 |
if metadata_file is not None:
|
| 452 |
result.update(metadata[metadata["path"] == lookup].T.squeeze().to_dict())
|
|
|
|
|
|
|
| 453 |
for emb_key in embeddings.keys():
|
| 454 |
result[emb_key] = np.asarray(embeddings[emb_key][lookup]).squeeze().tolist()
|
| 455 |
result["path"] = path
|
|
|
|
| 367 |
"d2v": datasets.Value("string"),
|
| 368 |
"d3p": datasets.Value("string"),
|
| 369 |
"d3v": datasets.Value("string"),
|
| 370 |
+
"dev_train_lof_anomaly": datasets.Value("int64"),
|
| 371 |
+
"dev_train_lof_anomaly_score": datasets.Value("float32"),
|
| 372 |
+
"add_train_lof_anomaly": datasets.Value("int64"),
|
| 373 |
+
"add_train_lof_anomaly_score": datasets.Value("float32"),
|
| 374 |
}
|
| 375 |
if self.config.embeddings_urls is not None:
|
| 376 |
features.update({
|
|
|
|
| 423 |
"audio_files": dl_manager.iter_archive(audio_path[split]),
|
| 424 |
"embeddings": embeddings[split],
|
| 425 |
"metadata_file": dl_manager.download_and_extract(self.config.data_urls["metadata"]) if self.config.data_urls["metadata"] is not None else None,
|
| 426 |
+
"scores_file": dl_manager.download_and_extract("data/scores.csv"),
|
| 427 |
"is_streaming": dl_manager.is_streaming,
|
| 428 |
},
|
| 429 |
) for split in split_type if split in self.config.splits
|
|
|
|
| 436 |
audio_files: Optional[Iterable],
|
| 437 |
embeddings: Optional[Dict],
|
| 438 |
metadata_file: Optional[str],
|
| 439 |
+
scores_file: Optional[str],
|
| 440 |
is_streaming: Optional[bool],
|
| 441 |
):
|
| 442 |
"""Yields examples."""
|
| 443 |
if metadata_file is not None:
|
| 444 |
metadata = pd.read_csv(metadata_file)
|
| 445 |
+
if scores_file is not None:
|
| 446 |
+
scores = pd.read_csv(scores_file)
|
| 447 |
data_fields = list(self._info().features.keys())
|
| 448 |
|
| 449 |
id_ = 0
|
|
|
|
| 458 |
result = {field: None for field in data_fields}
|
| 459 |
if metadata_file is not None:
|
| 460 |
result.update(metadata[metadata["path"] == lookup].T.squeeze().to_dict())
|
| 461 |
+
if scores is not None:
|
| 462 |
+
result.update(scores[scores["path"] == lookup].T.squeeze().to_dict())
|
| 463 |
for emb_key in embeddings.keys():
|
| 464 |
result[emb_key] = np.asarray(embeddings[emb_key][lookup]).squeeze().tolist()
|
| 465 |
result["path"] = path
|