Xenova HF Staff commited on
Commit
4439ab3
·
verified ·
1 Parent(s): c06b59f

Update index.html

Browse files
Files changed (1) hide show
  1. index.html +11 -9
index.html CHANGED
@@ -99,8 +99,9 @@
99
  static model = null;
100
  static processor = null;
101
  static model_id = null;
102
- static async getInstance(modelId, dtypeSettings, device) {
103
  if (this.model_id !== modelId) {
 
104
  this.model = null;
105
  this.processor = null;
106
  this.model_id = modelId;
@@ -116,6 +117,7 @@
116
  decoder_model_merged: dtypeSettings.decoder,
117
  },
118
  device: device,
 
119
  });
120
  }
121
  return [this.processor, this.model];
@@ -128,11 +130,11 @@
128
  const resultsDiv = document.getElementById("results");
129
  resultsDiv.innerHTML = "";
130
 
131
- const modelIds = [
132
- "HuggingFaceTB/SmolVLM-256M-Instruct",
133
- "HuggingFaceTB/SmolVLM-500M-Instruct",
134
- "HuggingFaceTB/SmolVLM-Instruct"
135
- ];
136
 
137
  const decoder_dtype = document.getElementById("decoder-dtype").value || "q4";
138
  const embed_dtype = document.getElementById("embed-dtype").value || "q4";
@@ -146,7 +148,7 @@
146
  const dtypeSettings = { decoder: decoder_dtype, embed: embed_dtype, vision: vision_dtype };
147
  const image = await load_image(imageUrl);
148
 
149
- for (const modelId of modelIds) {
150
  const modelShortName = modelId.split("/").pop();
151
  const modelSection = document.createElement("div");
152
  modelSection.className = "model-results";
@@ -158,7 +160,7 @@
158
 
159
  try {
160
  status.innerText = "Loading processor and model...";
161
- const [processor, model] = await SmolVLM.getInstance(modelId, dtypeSettings, device);
162
 
163
  status.innerText = "Warming up...";
164
  const messages = [{
@@ -180,7 +182,7 @@
180
  status.innerText = `Running benchmark... (${i + 1}/${numRuns})`;
181
  bar.innerText = createProgressBar(i + 1, numRuns);
182
  const start = performance.now();
183
- const [processor, model] = await SmolVLM.getInstance(modelId, dtypeSettings, device);
184
  const text = processor.apply_chat_template(messages, { add_generation_prompt: true });
185
  const inputs = await processor(text, [image], { do_image_splitting: doImageSplitting });
186
 
 
99
  static model = null;
100
  static processor = null;
101
  static model_id = null;
102
+ static async getInstance(modelId, dtypeSettings, device, revision) {
103
  if (this.model_id !== modelId) {
104
+ await this.model?.dispose();
105
  this.model = null;
106
  this.processor = null;
107
  this.model_id = modelId;
 
117
  decoder_model_merged: dtypeSettings.decoder,
118
  },
119
  device: device,
120
+ revision,
121
  });
122
  }
123
  return [this.processor, this.model];
 
130
  const resultsDiv = document.getElementById("results");
131
  resultsDiv.innerHTML = "";
132
 
133
+ const modelIds = {
134
+ "HuggingFaceTB/SmolVLM-256M-Instruct": "refs/pr/11",
135
+ "HuggingFaceTB/SmolVLM-500M-Instruct": "refs/pr/9",
136
+ "HuggingFaceTB/SmolVLM-Instruct": "main"
137
+ };
138
 
139
  const decoder_dtype = document.getElementById("decoder-dtype").value || "q4";
140
  const embed_dtype = document.getElementById("embed-dtype").value || "q4";
 
148
  const dtypeSettings = { decoder: decoder_dtype, embed: embed_dtype, vision: vision_dtype };
149
  const image = await load_image(imageUrl);
150
 
151
+ for (const [modelId, revision] of Object.entries(modelIds)) {
152
  const modelShortName = modelId.split("/").pop();
153
  const modelSection = document.createElement("div");
154
  modelSection.className = "model-results";
 
160
 
161
  try {
162
  status.innerText = "Loading processor and model...";
163
+ const [processor, model] = await SmolVLM.getInstance(modelId, dtypeSettings, device, revision);
164
 
165
  status.innerText = "Warming up...";
166
  const messages = [{
 
182
  status.innerText = `Running benchmark... (${i + 1}/${numRuns})`;
183
  bar.innerText = createProgressBar(i + 1, numRuns);
184
  const start = performance.now();
185
+ // const [processor, model] = await SmolVLM.getInstance(modelId, dtypeSettings, device, revision);
186
  const text = processor.apply_chat_template(messages, { add_generation_prompt: true });
187
  const inputs = await processor(text, [image], { do_image_splitting: doImageSplitting });
188