DOoM-lb / src /display /utils.py
Anonumous's picture
Add files
6ee7257
raw
history blame contribute delete
6.44 kB
from dataclasses import dataclass, make_dataclass
from enum import Enum
import json
import logging
from datetime import datetime
import pandas as pd
# Configure logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
def parse_datetime(datetime_str):
formats = [
"%Y-%m-%dT%H-%M-%S.%f", # Format with dashes
"%Y-%m-%dT%H:%M:%S.%f", # Standard format with colons
"%Y-%m-%dT%H %M %S.%f", # Spaces as separator
]
for fmt in formats:
try:
return datetime.strptime(datetime_str, fmt)
except ValueError:
continue
# in rare cases set unix start time for files with incorrect time (legacy files)
logging.error(f"No valid date format found for: {datetime_str}")
return datetime(1970, 1, 1)
def load_json_data(file_path):
"""Safely load JSON data from a file."""
try:
with open(file_path, "r") as file:
return json.load(file)
except json.JSONDecodeError:
print(f"Error reading JSON from {file_path}")
return None # Or raise an exception
def fields(raw_class):
return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
class Tasks(Enum):
math = Task("RussianMath", "score", "math_score")
physics = Task("RussianPhysics", "score", "physics_score")
combined = Task("Combined", "score", "score")
# These classes are for user facing column names,
# to avoid having to change them all around the code
# when a modif is needed
@dataclass(frozen=True)
class ColumnContent:
name: str
type: str
displayed_by_default: bool
hidden: bool = False
never_hidden: bool = False
dummy: bool = False
auto_eval_column_dict = []
# Init
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("model", "markdown", True, never_hidden=True)])
# Scores
auto_eval_column_dict.append(["score", ColumnContent, ColumnContent("score", "number", True)])
for task in Tasks:
if task != Tasks.combined: # Combined score уже добавлен выше
auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
# Model information
auto_eval_column_dict.append(["total_tokens", ColumnContent, ColumnContent("total_tokens", "number", False)])
auto_eval_column_dict.append(["evaluation_time", ColumnContent, ColumnContent("evaluation_time", "number", False)])
auto_eval_column_dict.append(["system_prompt", ColumnContent, ColumnContent("system_prompt", "str", False)])
# We use make dataclass to dynamically fill the scores from Tasks
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
@dataclass(frozen=True)
class EvalQueueColumn: # Queue column
model = ColumnContent("model", "markdown", True)
baseline_row = {
AutoEvalColumn.model.name: "<p>Baseline</p>",
AutoEvalColumn.score.name: 0.1,
AutoEvalColumn.math.name: 0.1,
AutoEvalColumn.physics.name: 0.1,
AutoEvalColumn.total_tokens.name: 0,
AutoEvalColumn.evaluation_time.name: 0,
AutoEvalColumn.system_prompt.name: "Вы - полезный помощник по математике и физике. Ответьте на русском языке.",
}
# Define the human baselines
human_baseline_row = {
AutoEvalColumn.model.name: "<p>Human performance</p>",
AutoEvalColumn.score.name: 0.9,
AutoEvalColumn.math.name: 0.9,
AutoEvalColumn.physics.name: 0.9,
AutoEvalColumn.total_tokens.name: 0,
AutoEvalColumn.evaluation_time.name: 0,
AutoEvalColumn.system_prompt.name: "Вы - полезный помощник по математике и физике. Ответьте на русском языке.",
}
@dataclass
class ModelDetails:
name: str
symbol: str = "" # emoji, only for the model type
class ModelType(Enum):
PT = ModelDetails(name="pretrained", symbol="🟢")
CPT = ModelDetails(name="continuously pretrained", symbol="🟩")
FT = ModelDetails(name="fine-tuned on domain-specific datasets", symbol="🔶")
chat = ModelDetails(name="chat models (RLHF, DPO, IFT, ...)", symbol="💬")
merges = ModelDetails(name="base merges and moerges", symbol="🤝")
Unknown = ModelDetails(name="", symbol="?")
def to_str(self, separator=" "):
return f"{self.value.symbol}{separator}{self.value.name}"
@staticmethod
def from_str(type):
if "fine-tuned" in type or "🔶" in type:
return ModelType.FT
if "continously pretrained" in type or "🟩" in type:
return ModelType.CPT
if "pretrained" in type or "🟢" in type:
return ModelType.PT
if any([k in type for k in ["instruction-tuned", "RL-tuned", "chat", "🟦", "⭕", "💬"]]):
return ModelType.chat
if "merge" in type or "🤝" in type:
return ModelType.merges
return ModelType.Unknown
class WeightType(Enum):
Adapter = ModelDetails("Adapter")
Original = ModelDetails("Original")
Delta = ModelDetails("Delta")
class Precision(Enum):
float16 = ModelDetails("float16")
bfloat16 = ModelDetails("bfloat16")
qt_8bit = ModelDetails("8bit")
qt_4bit = ModelDetails("4bit")
qt_GPTQ = ModelDetails("GPTQ")
Unknown = ModelDetails("?")
def from_str(precision):
if precision in ["torch.float16", "float16"]:
return Precision.float16
if precision in ["torch.bfloat16", "bfloat16"]:
return Precision.bfloat16
if precision in ["8bit"]:
return Precision.qt_8bit
if precision in ["4bit"]:
return Precision.qt_4bit
if precision in ["GPTQ", "None"]:
return Precision.qt_GPTQ
return Precision.Unknown
# Column selection
COLS = [c.name for c in fields(AutoEvalColumn)]
TYPES = [c.type for c in fields(AutoEvalColumn)]
EVAL_COLS = [c.name for c in fields(EvalQueueColumn)]
EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)]
NUMERIC_INTERVALS = {
"?": pd.Interval(-1, 0, closed="right"),
"~0.1": pd.Interval(0, 0.2, closed="right"),
"~0.3": pd.Interval(0.2, 0.4, closed="right"),
"~0.5": pd.Interval(0.4, 0.6, closed="right"),
"~0.7": pd.Interval(0.6, 0.8, closed="right"),
"0.8+": pd.Interval(0.8, 1.0, closed="right"),
}