Update vlm-projects-multi-lang-final.py

#2
Files changed (1) hide show
  1. vlm-projects-multi-lang-final.py +26 -37
vlm-projects-multi-lang-final.py CHANGED
@@ -1,4 +1,4 @@
1
- # dataset.py
2
  import os
3
  import pandas as pd
4
  import datasets
@@ -8,20 +8,16 @@ _HOMEPAGE = "https://huggingface.co/datasets/tungvu3196/vlm-projects-multi-lang-
8
  _LICENSE = "apache-2.0"
9
  _CITATION = ""
10
 
11
- LANGUAGES = [
12
- "English","Vietnamese","French","German","Spanish","Russian","Korean",
13
- "Mandarin","Japanese","Thai","Indonesian","Malay","Arabic","Hindi",
14
- "Turkish","Portuguese"
15
- ]
16
 
17
  class VlmProjectsMultiLangFinal(datasets.GeneratorBasedBuilder):
18
  BUILDER_CONFIGS = [
19
  datasets.BuilderConfig(
20
- name=lang_name,
21
- version=datasets.Version("1.0.0"),
22
- description=f"Dataset in {lang_name}",
23
- )
24
- for lang_name in LANGUAGES
25
  ]
26
 
27
  def _info(self):
@@ -53,45 +49,38 @@ class VlmProjectsMultiLangFinal(datasets.GeneratorBasedBuilder):
53
  "Start date": datasets.Value("float64"),
54
  "Status": datasets.Value("string"),
55
  "__index_level_0__": datasets.Value("int64"),
56
- # These two will render in the Viewer if the underlying files exist in the repo:
57
- "image": datasets.Image(), # path or dict -> file in repo
58
- "image_with_bboxes": datasets.Image(),
59
- # keep as string/URL if it's not a local file:
60
- "rotated_link": datasets.Value("string"),
61
  }),
62
  )
63
 
64
  def _split_generators(self, dl_manager):
65
- # Map config name ("English") to folder ("english")
66
- lang_dir = self.config.name.lower()
67
  base = os.path.join(self.config.data_dir or "data", lang_dir)
68
- return [
69
- datasets.SplitGenerator(
70
- name=datasets.Split.TRAIN,
71
- gen_kwargs={"filepath": os.path.join(base, "train.parquet"),
72
- "base_dir": base},
73
- ),
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TEST,
76
- gen_kwargs={"filepath": os.path.join(base, "test.parquet"),
77
- "base_dir": base},
78
- ),
79
- ]
 
80
 
81
  def _generate_examples(self, filepath, base_dir):
82
- # Read parquet produced by your pipeline
83
  df = pd.read_parquet(filepath)
84
-
85
  for i, row in df.iterrows():
86
  ex = row.to_dict()
87
 
88
- # If parquet stored relative paths like "images/xyz.png", keep them relative to repo:
89
  for col in ("image", "image_with_bboxes"):
90
  p = ex.get(col)
91
- if isinstance(p, str) and len(p):
92
- # If the path isn't an URL, make it relative to the dataset files
93
  if not (p.startswith("http://") or p.startswith("https://")):
94
  ex[col] = os.path.join(base_dir, p).replace("\\", "/")
95
- # if it *is* a URL, leave as-is (Image will try to download)
96
-
97
  yield i, ex
 
1
+ # vlm-projects-multi-lang-final.py
2
  import os
3
  import pandas as pd
4
  import datasets
 
8
  _LICENSE = "apache-2.0"
9
  _CITATION = ""
10
 
11
+ LANGUAGES = ["English","Vietnamese","French","German","Spanish","Russian","Korean",
12
+ "Mandarin","Japanese","Thai","Indonesian","Malay","Arabic","Hindi",
13
+ "Turkish","Portuguese"]
 
 
14
 
15
  class VlmProjectsMultiLangFinal(datasets.GeneratorBasedBuilder):
16
  BUILDER_CONFIGS = [
17
  datasets.BuilderConfig(
18
+ name=lang_name, version=datasets.Version("1.0.0"),
19
+ description=f"Dataset in {lang_name}"
20
+ ) for lang_name in LANGUAGES
 
 
21
  ]
22
 
23
  def _info(self):
 
49
  "Start date": datasets.Value("float64"),
50
  "Status": datasets.Value("string"),
51
  "__index_level_0__": datasets.Value("int64"),
52
+ "image": datasets.Image(), # file in repo or https URL
53
+ "image_with_bboxes": datasets.Image(), # file in repo or https URL
54
+ "rotated_link": datasets.Value("string")
 
 
55
  }),
56
  )
57
 
58
  def _split_generators(self, dl_manager):
59
+ lang_dir = self.config.name.lower() # "English" -> "english"
 
60
  base = os.path.join(self.config.data_dir or "data", lang_dir)
61
+ gens = []
62
+
63
+ # Add only the splits that actually exist to avoid build errors
64
+ for split_name in ("train", "test"):
65
+ fp = os.path.join(base, f"{split_name}.parquet")
66
+ if os.path.exists(fp):
67
+ gens.append(
68
+ datasets.SplitGenerator(
69
+ name=getattr(datasets.Split, split_name.upper()),
70
+ gen_kwargs={"filepath": fp, "base_dir": base},
71
+ )
72
+ )
73
+ return gens
74
 
75
  def _generate_examples(self, filepath, base_dir):
 
76
  df = pd.read_parquet(filepath)
 
77
  for i, row in df.iterrows():
78
  ex = row.to_dict()
79
 
80
+ # Make image paths resolvable by the viewer
81
  for col in ("image", "image_with_bboxes"):
82
  p = ex.get(col)
83
+ if isinstance(p, str) and p:
 
84
  if not (p.startswith("http://") or p.startswith("https://")):
85
  ex[col] = os.path.join(base_dir, p).replace("\\", "/")
 
 
86
  yield i, ex