diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5a0b16aea9121f43dd7a476309b1af15dbc75170 --- /dev/null +++ b/.gitignore @@ -0,0 +1,185 @@ +.ruff_cache +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/env.sh +venv/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Files created by experiments +output/ +snapshot/ +*.m4a +notebooks/scratch.ipynb +notebooks/inspect.ipynb +notebooks/effects.ipynb +notebooks/*.ipynb +notebooks/*.gif +notebooks/*.wav +notebooks/*.mp4 +*runs/ +boards/ +samples/ +*.ipynb + +results.json +metrics.csv +mprofile_* +mem.png + +results/ +mprofile* +*.png +# do not ignore the test wav file +!tests/audio/short_test_audio.wav +!tests/audio/output.wav +*/.DS_Store +.DS_Store +env.sh +_codebraid/ +**/*.html +**/*.exec.md +flagged/ +log.txt +ckpt/ +.syncthing* +tests/assets/ +archived/ + +scratch/ + +runs-archive +lyrebird-audiotools +lyrebird-audio-codec +samples-*/** + +gradio-outputs/ +samples*/ +models-all/ +models.zip +audiotools/ +descript-audio-codec/ +# *.pth +.git-old diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b5685772804c8af4235a8504dc6752bfc9ae5d1d --- /dev/null +++ b/Makefile @@ -0,0 +1,13 @@ +.PHONY: style format + + +style: + python -m black --line-length 119 . + python -m isort . + ruff check --fix . + + +quality: + python -m black --check --line-length 119 . + python -m isort --check-only . + ruff check . diff --git a/README.md b/README.md index c34257ce5f1a6feddb96d4714ee15209e49b838e..bb76754aee150bf2b8e7792197bb33db88f463db 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,53 @@ ---- -title: DOoM Lb -emoji: πŸ“ˆ -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 5.25.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +# DeathMath Leaderboard + +DeathMath - это Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊ для ΠΎΡ†Π΅Π½ΠΊΠΈ способности ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ Ρ€Π΅ΡˆΠ°Ρ‚ΡŒ слоТныС матСматичСскиС ΠΈ физичСскиС Π·Π°Π΄Π°Ρ‡ΠΈ Π½Π° русском языкС. + +## Π’Π΅ΠΊΡƒΡ‰ΠΈΠΉ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄ + +ПослСднСС ΠΎΠ±Π½ΠΎΠ²Π»Π΅Π½ΠΈΠ΅: 2025-04-20 16:33:11 + +| МодСль | ΠžΠ±Ρ‰ΠΈΠΉ Π±Π°Π»Π» | ΠœΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ° | Π€ΠΈΠ·ΠΈΠΊΠ° | Π’ΠΎΠΊΠ΅Π½Ρ‹ | ВрСмя ΠΎΡ†Π΅Π½ΠΊΠΈ | +|--------|------------|------------|---------|---------|--------------| +| o3-mini-high | 0.601 | 0.847 | 0.355 | 2,455,126 | 4015.4s | +| o4-mini-high | 0.591 | 0.863 | 0.318 | 1,898,964 | 4623.6s | +| Gemini 2.5 Pro Preview | 0.586 | 0.800 | 0.373 | 1,394,299 | 4533.2s | +| Gemini 2.0 Flash | 0.422 | 0.553 | 0.291 | 731,337 | 857.6s | +| gpt-4.1 | 0.386 | 0.563 | 0.209 | 405,803 | 1918.8s | +| Claude 3.7 Sonnet | 0.368 | 0.526 | 0.209 | 398,016 | 1095.8s | +| Claude 3.5 Sonnet | 0.339 | 0.432 | 0.245 | 222,241 | 670.5s | +| Gemma 3 27B | 0.321 | 0.468 | 0.173 | 357,617 | 2030.3s | +| Gemma 3 12B | 0.298 | 0.442 | 0.155 | 441,055 | 3916.3s | +| Qwen2.5 72B Instruct | 0.278 | 0.384 | 0.173 | 366,729 | 2460.1s | +| gpt-4o | 0.262 | 0.405 | 0.118 | 468,809 | 1078.4s | +| GigaChat-2-Max | 0.250 | 0.326 | 0.173 | 220,487 | 1006.2s | +| GigaChat-2-Pro | 0.209 | 0.326 | 0.091 | 212,196 | 1002.6s | +| GigaChat-Max | 0.139 | 0.179 | 0.100 | 201,090 | 978.8s | +| DeepSeek V3 0324 | 0.132 | 0.174 | 0.091 | 359,162 | 4257.7s | +| Gemma 3 4B | 0.124 | 0.221 | 0.027 | 572,095 | 1682.7s | +| GigaChat-2 | 0.094 | 0.142 | 0.045 | 299,747 | 834.7s | + +## Как ΠΏΡ€ΠΈΠ½ΡΡ‚ΡŒ участиС Π² Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊΠ΅ + +Для участия Π² Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊΠ΅ DeathMath: + +1. ΠšΠ»ΠΎΠ½ΠΈΡ€ΡƒΠΉΡ‚Π΅ Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ ΠΈ запуститС тСсты вашСй ΠΌΠΎΠ΄Π΅Π»ΠΈ +2. Π—Π°Π³Ρ€ΡƒΠ·ΠΈΡ‚Π΅ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ Ρ‡Π΅Ρ€Π΅Π· [HuggingFace Space](https://huggingface.co/spaces/Vikhrmodels/DeathMath-leaderboard) +3. Π”ΠΎΠΆΠ΄ΠΈΡ‚Π΅ΡΡŒ ΠΏΡ€ΠΎΠ²Π΅Ρ€ΠΊΠΈ ΠΈ добавлСния Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ² Π² Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄ + +## Π€ΠΎΡ€ΠΌΠ°Ρ‚ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ² + +Π Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ Π΄ΠΎΠ»ΠΆΠ½Ρ‹ Π±Ρ‹Ρ‚ΡŒ Π² Ρ„ΠΎΡ€ΠΌΠ°Ρ‚Π΅ JSON со ΡΠ»Π΅Π΄ΡƒΡŽΡ‰Π΅ΠΉ структурой: +```json +{ + "score": 0.586, + "math_score": 0.8, + "physics_score": 0.373, + "total_tokens": 1394299, + "evaluation_time": 4533.2, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} +``` + +## ЛицСнзия + +Π‘Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊ распространяСтся ΠΏΠΎΠ΄ Π»ΠΈΡ†Π΅Π½Π·ΠΈΠ΅ΠΉ Apache 2.0 diff --git a/apache2.0 b/apache2.0 new file mode 100644 index 0000000000000000000000000000000000000000..e3c46b6ebec8f7a09123d5dcb1d4ecf29b7037e7 --- /dev/null +++ b/apache2.0 @@ -0,0 +1,190 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + + + Copyright [2024] [Vikhr models] + + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..7b5ebc37f197f89302c0cbee1b7bcc0e36732e41 --- /dev/null +++ b/app.py @@ -0,0 +1,293 @@ +import logging +import os +os.makedirs("tmp", exist_ok=True) +os.environ['TMP_DIR'] = "tmp" +import subprocess +import shutil +import glob +import gradio as gr +import numpy as np +from src.radial.radial import create_plot +from apscheduler.schedulers.background import BackgroundScheduler +from gradio_leaderboard import Leaderboard, SelectColumns +from gradio_space_ci import enable_space_ci +import json +from io import BytesIO + +def handle_file_upload(file): + file_path = file.name.split("/")[-1] if "/" in file.name else file.name + logging.info("File uploaded: %s", file_path) + with open(file.name, "r") as f: + v = json.load(f) + return v, file_path +def submit_file(v, file_path, mn, profile: gr.OAuthProfile | None): + if profile is None: + return "Hub Login Required" + new_file = v['results'] + new_file['model'] = profile.username + "/" + mn + new_file['moviesmc'] = new_file['moviemc']["acc,none"] + new_file['musicmc'] = new_file['musicmc']["acc,none"] + new_file['booksmc'] = new_file['bookmc']["acc,none"] + new_file['mmluproru'] = new_file['mmluproru']["acc,none"] + new_file['lawmc'] = new_file['lawmc']["acc,none"] + new_file['model_dtype'] = v['config']["model_dtype"] + new_file['ppl'] = 0 + new_file.pop('moviemc') + new_file.pop('bookmc') + + buf = BytesIO() + buf.write(json.dumps(new_file).encode('utf-8')) + API.upload_file( + path_or_fileobj=buf, + path_in_repo="model_data/external/" + profile.username+mn + ".json", + repo_id="Vikhrmodels/s-openbench-eval", + repo_type="dataset", + ) + os.environ[RESET_JUDGEMENT_ENV] = "1" + return "Success!" + +from src.display.about import ( + INTRODUCTION_TEXT, + TITLE, +LLM_BENCHMARKS_TEXT +) +from src.display.css_html_js import custom_css +from src.display.utils import ( + AutoEvalColumn, + fields, +) +from src.envs import API, H4_TOKEN, HF_HOME, REPO_ID, RESET_JUDGEMENT_ENV +from src.leaderboard.build_leaderboard import build_leadearboard_df, download_openbench, download_dataset +import huggingface_hub +# huggingface_hub.login(token=H4_TOKEN) + +os.environ["GRADIO_ANALYTICS_ENABLED"] = "false" + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + +# Start ephemeral Spaces on PRs (see config in README.md) +enable_space_ci() + +# download_openbench() + +def restart_space(): + API.restart_space(repo_id=REPO_ID) + download_openbench() + +def update_plot(selected_models): + return create_plot(selected_models) + +def build_demo(): + download_openbench() + demo = gr.Blocks(title="Small Shlepa", css=custom_css) + leaderboard_df = build_leadearboard_df() + with demo: + gr.HTML(TITLE) + gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") + + with gr.Tabs(elem_classes="tab-buttons"): + with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0): + Leaderboard( + value=leaderboard_df, + datatype=[c.type for c in fields(AutoEvalColumn)], + select_columns=SelectColumns( + default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default], + cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden or c.dummy], + label="Select Columns to Display:", + ), + search_columns=[ + AutoEvalColumn.model.name, + # AutoEvalColumn.fullname.name, + # AutoEvalColumn.license.name + ], + ) + + # with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=1): + # gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") + # with gr.TabItem("❗FAQ", elem_id="llm-benchmark-tab-table", id=2): + # gr.Markdown(FAQ_TEXT, elem_classes="markdown-text") + + with gr.TabItem("πŸš€ Submit ", elem_id="llm-benchmark-tab-table", id=3): + with gr.Row(): + gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") + with gr.Row(): + gr.Markdown("# ✨ Submit your model here!", elem_classes="markdown-text") + + with gr.Column(): + + # def upload_file(file,su,mn): + # file_path = file.name.split("/")[-1] if "/" in file.name else file.name + # logging.info("New submition: file saved to %s", file_path) + # with open(file.name, "r") as f: + # v=json.load(f) + # new_file = v['results'] + # new_file['model'] = mn+"/"+su + # new_file['moviesmc']=new_file['moviemc']["acc,none"] + # new_file['musicmc']=new_file['musicmc']["acc,none"] + # new_file['booksmc']=new_file['bookmc']["acc,none"] + # new_file['lawmc']=new_file['lawmc']["acc,none"] + # # name = v['config']["model_args"].split('=')[1].split(',')[0] + # new_file['model_dtype'] = v['config']["model_dtype"] + # new_file['ppl'] = 0 + # new_file.pop('moviemc') + # new_file.pop('bookmc') + # buf = BytesIO() + # buf.write(json.dumps(new_file).encode('utf-8')) + # API.upload_file( + # path_or_fileobj=buf, + # path_in_repo="model_data/external/" + su+mn + ".json", + # repo_id="Vikhrmodels/s-openbench-eval", + # repo_type="dataset", + # ) + # os.environ[RESET_JUDGEMENT_ENV] = "1" + # return file.name + # gr.LoginButton() + model_name_textbox = gr.Textbox(label="Model name") + # submitter_username = gr.Textbox(label="Username") + + # def toggle_upload_button(model_name, username): + # return bool(model_name) and bool(username) + file_output = gr.File(label="Drag and drop JSON file judgment here", type="filepath") + # upload_button = gr.Button("Click to Upload & Submit Answers", elem_id="upload_button",variant='primary') + uploaded_file = gr.State() + file_path = gr.State() + with gr.Row(): + with gr.Column(): + out = gr.Textbox("Бтатус ΠΎΡ‚ΠΏΡ€Π°Π²ΠΊΠΈ") + with gr.Column(): + login_button = gr.LoginButton(elem_id="oauth-button") + + submit_button = gr.Button("Submit File", elem_id="submit_button", variant='primary') + + file_output.upload( + handle_file_upload, + file_output, + [uploaded_file, file_path] + ) + + submit_button.click( + submit_file, + [uploaded_file, file_path, model_name_textbox], + [out] + ) + + with gr.TabItem("πŸ“Š Analytics", elem_id="llm-benchmark-tab-table", id=4): + with gr.Column(): + model_dropdown = gr.Dropdown( + choices=leaderboard_df["model"].tolist(), + label="Models", + value=leaderboard_df["model"].tolist(), + multiselect=True, + info="Select models" + ) + with gr.Column(): + plot = gr.Plot(update_plot(model_dropdown.value)) + # plot = gr.Plot() + model_dropdown.change( + fn=update_plot, + inputs=[model_dropdown], + outputs=[plot] + ) + return demo + + +# print(os.system('cd src/gen && ../../.venv/bin/python gen_judgment.py')) +# print(os.system('cd src/gen/ && python show_result.py --output')) + + +def update_board(): + need_reset = os.environ.get(RESET_JUDGEMENT_ENV) + logging.info("Updating the judgement: %s", need_reset) + if need_reset != "1": + # return + pass + os.environ[RESET_JUDGEMENT_ENV] = "0" + + # `shutil.rmtree("./m_data")` is a Python command that removes a directory and all its contents + # recursively. In this specific context, it is used to delete the directory named "m_data" along + # with all its files and subdirectories. This command helps in cleaning up the existing data in + # the "m_data" directory before downloading new dataset files into it. + # shutil.rmtree("./m_data") + # shutil.rmtree("./data") + download_dataset("Vikhrmodels/s-openbench-eval", "m_data") + data_list = [{"musicmc": 0.3021276595744681, "lawmc": 0.2800829875518672, "model": "apsys/saiga_3_8b", "moviesmc": 0.3472222222222222, "booksmc": 0.2800829875518672, "model_dtype": "torch.float16", "ppl": 0, 'mmluproru':0}] + for file in glob.glob("./m_data/model_data/external/*.json"): + with open(file) as f: + try: + data = json.load(f) + data_list.append(data) + except Exception as e: + pass # data was badly formatted, should not fail + print("DATALIST,", data_list) + + if len(data_list)>1: + data_list.pop(0) + + if len(data_list)>4: + with open("genned.json", "w") as f: + json.dump(data_list, f) + + + API.upload_file( + path_or_fileobj="genned.json", + path_in_repo="leaderboard.json", + repo_id="Vikhrmodels/s-shlepa-metainfo", + repo_type="dataset", + ) + restart_space() + + + # gen_judgement_file = os.path.join(HF_HOME, "src/gen/gen_judgement.py") + # subprocess.run(["python3", gen_judgement_file], check=True) + +def update_board_(): + need_reset = os.environ.get(RESET_JUDGEMENT_ENV) + logging.info("Updating the judgement: %s", need_reset) + if need_reset != "1": + # return + pass + os.environ[RESET_JUDGEMENT_ENV] = "0" + + # `shutil.rmtree("./m_data")` is a Python command that removes a directory and all its contents + # recursively. In this specific context, it is used to delete the directory named "m_data" along + # with all its files and subdirectories. This command helps in cleaning up the existing data in + # the "m_data" directory before downloading new dataset files into it. + # shutil.rmtree("./m_data") + # shutil.rmtree("./data") + download_dataset("Vikhrmodels/s-openbench-eval", "m_data") + data_list = [{"musicmc": 0.3021276595744681, "lawmc": 0.2800829875518672, "model": "apsys/saiga_3_8b", "moviesmc": 0.3472222222222222, "booksmc": 0.2800829875518672, "model_dtype": "torch.float16", "ppl": 0, 'mmluproru':0}] + for file in glob.glob("./m_data/model_data/external/*.json"): + with open(file) as f: + try: + data = json.load(f) + data_list.append(data) + except Exception as e: + pass # data was badly formatted, should not fail + print("DATALIST,", data_list) + + if len(data_list)>1: + data_list.pop(0) + + if len(data_list)>4: + with open("genned.json", "w") as f: + json.dump(data_list, f) + + + API.upload_file( + path_or_fileobj="genned.json", + path_in_repo="leaderboard.json", + repo_id="Vikhrmodels/s-shlepa-metainfo", + repo_type="dataset", + ) + +if __name__ == "__main__": + os.environ[RESET_JUDGEMENT_ENV] = "1" + + scheduler = BackgroundScheduler() + update_board_() + scheduler.add_job(update_board, "interval", minutes=10) + scheduler.start() + + demo_app = build_demo() + demo_app.launch(debug=True,share=True) diff --git a/data/leaderboard.json b/data/leaderboard.json new file mode 100644 index 0000000000000000000000000000000000000000..4c0aa353d3dfc4fd9de6e2662c1fe0bbbf30aee9 --- /dev/null +++ b/data/leaderboard.json @@ -0,0 +1 @@ +[{"musicmc": 0.2936170212765957, "lawmc": 0.48094747682801237, "model": "apsys/saiga_3_8b", "moviesmc": 0.3402777777777778, "booksmc": 0.3112033195020747, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2723404255319149, "lawmc": 0.4850669412976313, "model": "Nexusflow/Starling-LM-7B-beta", "moviesmc": 0.38657407407407407, "booksmc": 0.3070539419087137, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.09361702127659574, "mmluproru": 0.10207253886010363, "lawmc": 0.11431513903192585, "model": "NousResearch/Llama-2-7b-hf", "moviesmc": 0.07175925925925926, "booksmc": 0.1078838174273859, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.20851063829787234, "lawmc": 0.47167868177136973, "model": "Salesforce/LLaMA-3-8B-SFR-Iterative-DPO-R", "moviesmc": 0.3055555555555556, "booksmc": 0.26141078838174275, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2680851063829787, "mmluproru": 0.20103626943005182, "lawmc": 0.5386199794026777, "model": "Vikhrmodels/it-5.2-fp16-cp", "moviesmc": 0.4537037037037037, "booksmc": 0.3070539419087137, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.3021276595744681, "lawmc": 0.544799176107106, "model": "alexwortega/saiga_submit", "moviesmc": 0.3958333333333333, "booksmc": 0.3381742738589212, "model_dtype": "torch.bfloat16", "ppl": 0}, {"musicmc": 0.28085106382978725, "mmluproru": 0.17979274611398963, "lawmc": 0.5324407826982492, "model": "apsys/T-lite-instruct-0.1", "moviesmc": 0.4699074074074074, "booksmc": 0.3360995850622407, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.28085106382978725, "mmluproru": 0.17979274611398963, "lawmc": 0.5324407826982492, "model": "apsys/tlite-it-0.1", "moviesmc": 0.4699074074074074, "booksmc": 0.3360995850622407, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2872340425531915, "lawmc": 0.5066941297631308, "model": "vikhr-52-7b-chat-hf/apsys", "moviesmc": 0.4837962962962963, "booksmc": 0.3070539419087137, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.28085106382978725, "mmluproru": 0.18808290155440416, "lawmc": 0.6426364572605562, "model": "apsys/vikhr-it-5.4-fp16-orpo-v2 ", "moviesmc": 0.4699074074074074, "booksmc": 0.33402489626556015, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.20851063829787234, "lawmc": 0.42636457260556127, "model": "cohere/aya-8b", "moviesmc": 0.3287037037037037, "booksmc": 0.24273858921161826, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2553191489361702, "mmluproru": 0.2621761658031088, "lawmc": 0.5818743563336766, "model": "google/gemma-2-9b", "moviesmc": 0.5046296296296297, "booksmc": 0.3360995850622407, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.25957446808510637, "mmluproru": 0.19378238341968912, "lawmc": 0.518022657054583, "model": "lightblue/suzume-llama-3-8B-multilingual", "moviesmc": 0.3287037037037037, "booksmc": 0.2966804979253112, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2936170212765957, "lawmc": 0.5345005149330587, "model": "RefalMachine/llama3 ushanka", "moviesmc": 0.35185185185185186, "booksmc": 0.3257261410788382, "model_dtype": "torch.bfloat16", "ppl": 0}, {"musicmc": 0.28297872340425534, "lawmc": 0.5406797116374872, "model": "microsoft/Phi-3-medium-4k-instruct", "moviesmc": 0.42824074074074076, "booksmc": 0.3817427385892116, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.3021276595744681, "lawmc": 0.544799176107106, "model": "IlyaGusev/saiga_llama3_8b", "moviesmc": 0.3958333333333333, "booksmc": 0.3381742738589212, "model_dtype": "torch.bfloat16", "ppl": 0}, {"musicmc": 0.251063829787234, "lawmc": 0.48712667353244077, "model": "apsys/vikhr-52-7b", "moviesmc": 0.4212962962962963, "booksmc": 0.3112033195020747, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.24468085106382978, "lawmc": 0.4788877445932029, "model": "apsys/vikhr-53-7b-32k", "moviesmc": 0.4050925925925926, "booksmc": 0.3049792531120332, "model_dtype": "torch.float16", "ppl": 0}] \ No newline at end of file diff --git a/generate_initial_leaderboard.py b/generate_initial_leaderboard.py new file mode 100644 index 0000000000000000000000000000000000000000..739db6528c976c5e79faeba6a1be1050b2985026 --- /dev/null +++ b/generate_initial_leaderboard.py @@ -0,0 +1,329 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Π‘ΠΊΡ€ΠΈΠΏΡ‚ для Π³Π΅Π½Π΅Ρ€Π°Ρ†ΠΈΠΈ ΠΏΠ΅Ρ€Π²ΠΎΠ½Π°Ρ‡Π°Π»ΡŒΠ½ΠΎΠ³ΠΎ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° DeathMath ΠΈ Π·Π°Π³Ρ€ΡƒΠ·ΠΊΠΈ Π΄Π°Π½Π½Ρ‹Ρ… Π² HuggingFace. +Π˜ΡΠΏΠΎΠ»ΡŒΠ·ΡƒΠ΅Ρ‚ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ ΠΈΠ· Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΈ results ΠΈ Π·Π°Π³Ρ€ΡƒΠΆΠ°Π΅Ρ‚ ΠΈΡ… Π² Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ Vikhrmodels/DeathMath-leaderboard-data. +""" + +import os +import json +import logging +import pandas as pd +import argparse +from pathlib import Path +from huggingface_hub import HfApi, create_repo +from datetime import datetime + +# Настройка логирования +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", + handlers=[ + logging.FileHandler("leaderboard_generation.log"), + logging.StreamHandler() + ] +) +logger = logging.getLogger(__name__) + +# ΠšΠΎΠ½ΡΡ‚Π°Π½Ρ‚Ρ‹ +REPO_ID = "Vikhrmodels/DeathMath-leaderboard-data" +METAINFO_REPO_ID = "Vikhrmodels/DeathMath-leaderboard-metainfo" + +def setup_repositories(token): + """ + Π‘ΠΎΠ·Π΄Π°Π΅Ρ‚ Π½Π΅ΠΎΠ±Ρ…ΠΎΠ΄ΠΈΠΌΡ‹Π΅ Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΈ Π½Π° HuggingFace Hub, Ссли ΠΎΠ½ΠΈ Π΅Ρ‰Π΅ Π½Π΅ ΡΡƒΡ‰Π΅ΡΡ‚Π²ΡƒΡŽΡ‚. + + Args: + token (str): Π’ΠΎΠΊΠ΅Π½ для доступа ΠΊ HuggingFace Hub + """ + api = HfApi(token=token) + + try: + # ΠŸΡ€ΠΎΠ²Π΅Ρ€ΠΊΠ° ΠΈ созданиС рСпозитория для Π΄Π°Π½Π½Ρ‹Ρ… Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + try: + api.repo_info(repo_id=REPO_ID, repo_type="dataset") + logger.info(f"Π Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ {REPO_ID} ΡƒΠΆΠ΅ сущСствуСт") + except Exception: + logger.info(f"Π‘ΠΎΠ·Π΄Π°Π½ΠΈΠ΅ рСпозитория для Π΄Π°Π½Π½Ρ‹Ρ… Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°: {REPO_ID}") + create_repo(repo_id=REPO_ID, repo_type="dataset", token=token) + + # ΠŸΡ€ΠΎΠ²Π΅Ρ€ΠΊΠ° ΠΈ созданиС рСпозитория для ΠΌΠ΅Ρ‚Π°Π΄Π°Π½Π½Ρ‹Ρ… Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + try: + api.repo_info(repo_id=METAINFO_REPO_ID, repo_type="dataset") + logger.info(f"Π Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ {METAINFO_REPO_ID} ΡƒΠΆΠ΅ сущСствуСт") + except Exception: + logger.info(f"Π‘ΠΎΠ·Π΄Π°Π½ΠΈΠ΅ рСпозитория для ΠΌΠ΅Ρ‚Π°Π΄Π°Π½Π½Ρ‹Ρ… Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°: {METAINFO_REPO_ID}") + create_repo(repo_id=METAINFO_REPO_ID, repo_type="dataset", token=token) + + return api + except Exception as e: + logger.error(f"Ошибка ΠΏΡ€ΠΈ создании Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠ΅Π²: {e}") + raise + +def load_results(results_file): + """ + Π—Π°Π³Ρ€ΡƒΠΆΠ°Π΅Ρ‚ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ ΠΈΠ· JSON Ρ„Π°ΠΉΠ»Π° ΠΈ удаляСт Π΄ΡƒΠ±Π»ΠΈΠΊΠ°Ρ‚Ρ‹. + + Args: + results_file (str): ΠŸΡƒΡ‚ΡŒ ΠΊ Ρ„Π°ΠΉΠ»Ρƒ с Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Π°ΠΌΠΈ + + Returns: + list: Бписок записСй для Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° Π±Π΅Π· Π΄ΡƒΠ±Π»ΠΈΠΊΠ°Ρ‚ΠΎΠ² + """ + try: + with open(results_file, "r", encoding="utf-8") as f: + data = json.load(f) + + leaderboard_entries = [] + seen_models = set() # ΠœΠ½ΠΎΠΆΠ΅ΡΡ‚Π²ΠΎ для отслСТивания ΡƒΠΆΠ΅ ΠΎΠ±Ρ€Π°Π±ΠΎΡ‚Π°Π½Π½Ρ‹Ρ… ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ + + for key, value in data.items(): + if "_Combined_" in key: # Π±Π΅Ρ€Π΅ΠΌ Ρ‚ΠΎΠ»ΡŒΠΊΠΎ ΠΊΠΎΠΌΠ±ΠΈΠ½ΠΈΡ€ΠΎΠ²Π°Π½Π½Ρ‹Π΅ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ + model_name = value["model_name"] + + # ΠŸΡ€ΠΎΠΏΡƒΡΠΊΠ°Π΅ΠΌ модСль, Ссли ΠΎΠ½Π° ΡƒΠΆΠ΅ Π±Ρ‹Π»Π° Π΄ΠΎΠ±Π°Π²Π»Π΅Π½Π° + if model_name in seen_models: + logger.info(f"ΠŸΡ€ΠΎΠΏΡƒΡΠΊΠ°Π΅ΠΌ Π΄ΡƒΠ±Π»ΠΈΡ€ΡƒΡŽΡ‰ΡƒΡŽΡΡ модСль: {model_name}") + continue + + # ДобавляСм модСль Π²ΠΎ мноТСство ΠΎΠ±Ρ€Π°Π±ΠΎΡ‚Π°Π½Π½Ρ‹Ρ… + seen_models.add(model_name) + + leaderboard_entry = { + "model_name": model_name, + "score": value["score"], + "math_score": value["math_score"], + "physics_score": value["physics_score"], + "total_tokens": value["total_tokens"], + "evaluation_time": value["evaluation_time"], + "system_prompt": value.get("system_prompt", + "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС.") + } + leaderboard_entries.append(leaderboard_entry) + + # Π‘ΠΎΡ€Ρ‚ΠΈΡ€ΠΎΠ²ΠΊΠ° ΠΏΠΎ ΠΎΠ±Ρ‰Π΅ΠΌΡƒ Π±Π°Π»Π»Ρƒ + leaderboard_entries.sort(key=lambda x: x["score"], reverse=True) + logger.info(f"Π—Π°Π³Ρ€ΡƒΠΆΠ΅Π½ΠΎ {len(leaderboard_entries)} ΡƒΠ½ΠΈΠΊΠ°Π»ΡŒΠ½Ρ‹Ρ… ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ послС удалСния Π΄ΡƒΠ±Π»ΠΈΠΊΠ°Ρ‚ΠΎΠ²") + return leaderboard_entries + + except Exception as e: + logger.error(f"Ошибка ΠΏΡ€ΠΈ Π·Π°Π³Ρ€ΡƒΠ·ΠΊΠ΅ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ²: {e}") + raise + +def prepare_directory_structure(): + """ + Π‘ΠΎΠ·Π΄Π°Π΅Ρ‚ Π½Π΅ΠΎΠ±Ρ…ΠΎΠ΄ΠΈΠΌΡƒΡŽ структуру Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΉ для Π²Π½Π΅ΡˆΠ½ΠΈΡ… ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ. + + Returns: + str: ΠŸΡƒΡ‚ΡŒ ΠΊ Π²Ρ€Π΅ΠΌΠ΅Π½Π½ΠΎΠΉ Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΈ с ΠΏΠΎΠ΄Π³ΠΎΡ‚ΠΎΠ²Π»Π΅Π½Π½ΠΎΠΉ структурой + """ + temp_dir = Path("./temp_leaderboard") + model_data_dir = temp_dir / "model_data" / "external" + + # ΠžΡ‡ΠΈΡΡ‚ΠΊΠ° ΠΈ созданиС Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΉ + if temp_dir.exists(): + import shutil + shutil.rmtree(temp_dir) + + model_data_dir.mkdir(parents=True, exist_ok=True) + + return str(temp_dir) + +def upload_model_files(api, leaderboard_entries, temp_dir): + """ + Π—Π°Π³Ρ€ΡƒΠΆΠ°Π΅Ρ‚ Ρ„Π°ΠΉΠ»Ρ‹ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ Π² Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ Π΄Π°Π½Π½Ρ‹Ρ… Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°. + + Args: + api (HfApi): ЭкзСмпляр API для взаимодСйствия с HuggingFace + leaderboard_entries (list): Бписок записСй для Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + temp_dir (str): ΠŸΡƒΡ‚ΡŒ ΠΊ Π²Ρ€Π΅ΠΌΠ΅Π½Π½ΠΎΠΉ Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΈ + """ + model_data_dir = os.path.join(temp_dir, "model_data", "external") + + for entry in leaderboard_entries: + model_name = entry["model_name"] + safe_filename = model_name.replace("/", "_").replace(" ", "_") + file_path = os.path.join(model_data_dir, f"{safe_filename}.json") + + with open(file_path, "w", encoding="utf-8") as f: + json.dump(entry, f, ensure_ascii=False, indent=2) + + # Π—Π°Π³Ρ€ΡƒΠ·ΠΊΠ° Ρ„Π°ΠΉΠ»Π° ΠΌΠΎΠ΄Π΅Π»ΠΈ Π² Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ + api.upload_file( + path_or_fileobj=file_path, + path_in_repo=f"model_data/external/{safe_filename}.json", + repo_id=REPO_ID, + repo_type="dataset" + ) + logger.info(f"Π—Π°Π³Ρ€ΡƒΠΆΠ΅Π½ Ρ„Π°ΠΉΠ» ΠΌΠΎΠ΄Π΅Π»ΠΈ: {safe_filename}.json") + +def generate_leaderboard_json(leaderboard_entries): + """ + Π‘ΠΎΠ·Π΄Π°Π΅Ρ‚ JSON Ρ„Π°ΠΉΠ» с Π΄Π°Π½Π½Ρ‹ΠΌΠΈ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°. + + Args: + leaderboard_entries (list): Бписок записСй для Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + + Returns: + str: ΠŸΡƒΡ‚ΡŒ ΠΊ созданному JSON Ρ„Π°ΠΉΠ»Ρƒ + """ + leaderboard_file = "leaderboard.json" + + with open(leaderboard_file, "w", encoding="utf-8") as f: + json.dump(leaderboard_entries, f, ensure_ascii=False, indent=2) + + return leaderboard_file + +def generate_readme(leaderboard_entries): + """ + Π“Π΅Π½Π΅Ρ€ΠΈΡ€ΡƒΠ΅Ρ‚ README.md с ΠΈΠ½Ρ„ΠΎΡ€ΠΌΠ°Ρ†ΠΈΠ΅ΠΉ ΠΎ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π΅. + + Args: + leaderboard_entries (list): Бписок записСй для Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + + Returns: + str: ΠŸΡƒΡ‚ΡŒ ΠΊ созданному README Ρ„Π°ΠΉΠ»Ρƒ + """ + readme_file = "README.md" + + # Π‘ΠΎΠ·Π΄Π°Π΅ΠΌ DataFrame для ΡƒΠ΄ΠΎΠ±Π½ΠΎΠ³ΠΎ форматирования Ρ‚Π°Π±Π»ΠΈΡ†Ρ‹ + df = pd.DataFrame(leaderboard_entries) + + # Π€ΠΎΡ€ΠΌΠ°Ρ‚ΠΈΡ€ΡƒΠ΅ΠΌ числовыС ΠΊΠΎΠ»ΠΎΠ½ΠΊΠΈ + for col in ["score", "math_score", "physics_score"]: + if col in df.columns: + df[col] = df[col].apply(lambda x: f"{x:.3f}") + + if "total_tokens" in df.columns: + df["total_tokens"] = df["total_tokens"].apply(lambda x: f"{int(x):,}") + + if "evaluation_time" in df.columns: + df["evaluation_time"] = df["evaluation_time"].apply(lambda x: f"{x:.1f}s") + + # Π‘ΠΎΠ·Π΄Π°Π΅ΠΌ содСрТимоС README + current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + readme_content = f"""# DeathMath Leaderboard + +DeathMath - это Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊ для ΠΎΡ†Π΅Π½ΠΊΠΈ способности ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ Ρ€Π΅ΡˆΠ°Ρ‚ΡŒ слоТныС матСматичСскиС ΠΈ физичСскиС Π·Π°Π΄Π°Ρ‡ΠΈ Π½Π° русском языкС. + +## Π’Π΅ΠΊΡƒΡ‰ΠΈΠΉ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄ + +ПослСднСС ΠΎΠ±Π½ΠΎΠ²Π»Π΅Π½ΠΈΠ΅: {current_date} + +| МодСль | ΠžΠ±Ρ‰ΠΈΠΉ Π±Π°Π»Π» | ΠœΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ° | Π€ΠΈΠ·ΠΈΠΊΠ° | Π’ΠΎΠΊΠ΅Π½Ρ‹ | ВрСмя ΠΎΡ†Π΅Π½ΠΊΠΈ | +|--------|------------|------------|---------|---------|--------------| +""" + + # ДобавляСм строки Ρ‚Π°Π±Π»ΠΈΡ†Ρ‹ + for _, row in df.iterrows(): + readme_content += f"| {row['model_name']} | {row['score']} | {row['math_score']} | {row['physics_score']} | {row.get('total_tokens', 'N/A')} | {row.get('evaluation_time', 'N/A')} |\n" + + readme_content += """ +## Как ΠΏΡ€ΠΈΠ½ΡΡ‚ΡŒ участиС Π² Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊΠ΅ + +Для участия Π² Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊΠ΅ DeathMath: + +1. ΠšΠ»ΠΎΠ½ΠΈΡ€ΡƒΠΉΡ‚Π΅ Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ ΠΈ запуститС тСсты вашСй ΠΌΠΎΠ΄Π΅Π»ΠΈ +2. Π—Π°Π³Ρ€ΡƒΠ·ΠΈΡ‚Π΅ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ Ρ‡Π΅Ρ€Π΅Π· [HuggingFace Space](https://huggingface.co/spaces/Vikhrmodels/DeathMath-leaderboard) +3. Π”ΠΎΠΆΠ΄ΠΈΡ‚Π΅ΡΡŒ ΠΏΡ€ΠΎΠ²Π΅Ρ€ΠΊΠΈ ΠΈ добавлСния Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ² Π² Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄ + +## Π€ΠΎΡ€ΠΌΠ°Ρ‚ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ² + +Π Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ Π΄ΠΎΠ»ΠΆΠ½Ρ‹ Π±Ρ‹Ρ‚ΡŒ Π² Ρ„ΠΎΡ€ΠΌΠ°Ρ‚Π΅ JSON со ΡΠ»Π΅Π΄ΡƒΡŽΡ‰Π΅ΠΉ структурой: +```json +{ + "score": 0.586, + "math_score": 0.8, + "physics_score": 0.373, + "total_tokens": 1394299, + "evaluation_time": 4533.2, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} +``` + +## ЛицСнзия + +Π‘Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊ распространяСтся ΠΏΠΎΠ΄ Π»ΠΈΡ†Π΅Π½Π·ΠΈΠ΅ΠΉ Apache 2.0 +""" + + with open(readme_file, "w", encoding="utf-8") as f: + f.write(readme_content) + + return readme_file + +def upload_leaderboard_files(api, leaderboard_file, readme_file): + """ + Π—Π°Π³Ρ€ΡƒΠΆΠ°Π΅Ρ‚ Ρ„Π°ΠΉΠ»Ρ‹ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° Π² Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ ΠΌΠ΅Ρ‚Π°Π΄Π°Π½Π½Ρ‹Ρ…. + + Args: + api (HfApi): ЭкзСмпляр API для взаимодСйствия с HuggingFace + leaderboard_file (str): ΠŸΡƒΡ‚ΡŒ ΠΊ JSON Ρ„Π°ΠΉΠ»Ρƒ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + readme_file (str): ΠŸΡƒΡ‚ΡŒ ΠΊ README Ρ„Π°ΠΉΠ»Ρƒ + """ + # Π—Π°Π³Ρ€ΡƒΠ·ΠΊΠ° JSON Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + api.upload_file( + path_or_fileobj=leaderboard_file, + path_in_repo="leaderboard.json", + repo_id=METAINFO_REPO_ID, + repo_type="dataset" + ) + logger.info(f"Π—Π°Π³Ρ€ΡƒΠΆΠ΅Π½ Ρ„Π°ΠΉΠ» Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°: leaderboard.json Π² {METAINFO_REPO_ID}") + + # Π—Π°Π³Ρ€ΡƒΠ·ΠΊΠ° README + api.upload_file( + path_or_fileobj=readme_file, + path_in_repo="README.md", + repo_id=METAINFO_REPO_ID, + repo_type="dataset" + ) + logger.info(f"Π—Π°Π³Ρ€ΡƒΠΆΠ΅Π½ README: README.md Π² {METAINFO_REPO_ID}") + +def main(): + # ΠŸΠ°Ρ€ΡΠΈΠ½Π³ Π°Ρ€Π³ΡƒΠΌΠ΅Π½Ρ‚ΠΎΠ² ΠΊΠΎΠΌΠ°Π½Π΄Π½ΠΎΠΉ строки + parser = argparse.ArgumentParser(description="ГСнСрация ΠΏΠ΅Ρ€Π²ΠΎΠ½Π°Ρ‡Π°Π»ΡŒΠ½ΠΎΠ³ΠΎ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° DeathMath") + parser.add_argument("--results", default="../results/leaderboard_results.json", + help="ΠŸΡƒΡ‚ΡŒ ΠΊ Ρ„Π°ΠΉΠ»Ρƒ с Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Π°ΠΌΠΈ (ΠΏΠΎ ΡƒΠΌΠΎΠ»Ρ‡Π°Π½ΠΈΡŽ: ../results/leaderboard_results.json)") + parser.add_argument("--token", required=True, help="Π’ΠΎΠΊΠ΅Π½ для доступа ΠΊ HuggingFace Hub") + + args = parser.parse_args() + + try: + logger.info("НачинаСм Π³Π΅Π½Π΅Ρ€Π°Ρ†ΠΈΡŽ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° DeathMath") + + # НастраиваСм Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΈ + api = setup_repositories(args.token) + logger.info("Π Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΈ ΡƒΡΠΏΠ΅ΡˆΠ½ΠΎ настроСны") + + # Π—Π°Π³Ρ€ΡƒΠΆΠ°Π΅ΠΌ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ + leaderboard_entries = load_results(args.results) + logger.info(f"Π—Π°Π³Ρ€ΡƒΠΆΠ΅Π½ΠΎ {len(leaderboard_entries)} записСй для Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°") + + # ΠŸΠΎΠ΄Π³ΠΎΡ‚Π°Π²Π»ΠΈΠ²Π°Π΅ΠΌ структуру Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΉ + temp_dir = prepare_directory_structure() + logger.info(f"Π‘ΠΎΠ·Π΄Π°Π½Π° врСмСнная дирСктория: {temp_dir}") + + # Π—Π°Π³Ρ€ΡƒΠΆΠ°Π΅ΠΌ Ρ„Π°ΠΉΠ»Ρ‹ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ + upload_model_files(api, leaderboard_entries, temp_dir) + logger.info("Π€Π°ΠΉΠ»Ρ‹ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΡƒΡΠΏΠ΅ΡˆΠ½ΠΎ Π·Π°Π³Ρ€ΡƒΠΆΠ΅Π½Ρ‹") + + # Π“Π΅Π½Π΅Ρ€ΠΈΡ€ΡƒΠ΅ΠΌ JSON Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + leaderboard_file = generate_leaderboard_json(leaderboard_entries) + logger.info(f"Π‘ΠΎΠ·Π΄Π°Π½ Ρ„Π°ΠΉΠ» Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°: {leaderboard_file}") + + # Π“Π΅Π½Π΅Ρ€ΠΈΡ€ΡƒΠ΅ΠΌ README + readme_file = generate_readme(leaderboard_entries) + logger.info(f"Π‘ΠΎΠ·Π΄Π°Π½ README: {readme_file}") + + # Π—Π°Π³Ρ€ΡƒΠΆΠ°Π΅ΠΌ Ρ„Π°ΠΉΠ»Ρ‹ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + upload_leaderboard_files(api, leaderboard_file, readme_file) + logger.info("Π€Π°ΠΉΠ»Ρ‹ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° ΡƒΡΠΏΠ΅ΡˆΠ½ΠΎ Π·Π°Π³Ρ€ΡƒΠΆΠ΅Π½Ρ‹") + + logger.info("ГСнСрация Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° ΡƒΡΠΏΠ΅ΡˆΠ½ΠΎ Π·Π°Π²Π΅Ρ€ΡˆΠ΅Π½Π°!") + + except Exception as e: + logger.error(f"Ошибка ΠΏΡ€ΠΈ Π³Π΅Π½Π΅Ρ€Π°Ρ†ΠΈΠΈ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°: {e}") + raise + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/genned.json b/genned.json new file mode 100644 index 0000000000000000000000000000000000000000..246164b9e39beb2e21986a7ba325dcf2bc21dbaf --- /dev/null +++ b/genned.json @@ -0,0 +1 @@ +[{"musicmc": 0.2936170212765957, "lawmc": 0.5345005149330587, "model": "RefalMachine/llama3 ushanka", "moviesmc": 0.35185185185185186, "booksmc": 0.3257261410788382, "model_dtype": "torch.bfloat16", "ppl": 0}, {"musicmc": 0.251063829787234, "lawmc": 0.48712667353244077, "model": "apsys/vikhr-52-7b", "moviesmc": 0.4212962962962963, "booksmc": 0.3112033195020747, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.09361702127659574, "mmluproru": 0.10207253886010363, "lawmc": 0.11431513903192585, "model": "NousResearch/Llama-2-7b-hf", "moviesmc": 0.07175925925925926, "booksmc": 0.1078838174273859, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2553191489361702, "mmluproru": 0.2621761658031088, "lawmc": 0.5818743563336766, "model": "google/gemma-2-9b", "moviesmc": 0.5046296296296297, "booksmc": 0.3360995850622407, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.20851063829787234, "lawmc": 0.42636457260556127, "model": "cohere/aya-8b", "moviesmc": 0.3287037037037037, "booksmc": 0.24273858921161826, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2936170212765957, "lawmc": 0.48094747682801237, "model": "apsys/saiga_3_8b", "moviesmc": 0.3402777777777778, "booksmc": 0.3112033195020747, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.3021276595744681, "lawmc": 0.544799176107106, "model": "alexwortega/saiga_submit", "moviesmc": 0.3958333333333333, "booksmc": 0.3381742738589212, "model_dtype": "torch.bfloat16", "ppl": 0}, {"musicmc": 0.28297872340425534, "lawmc": 0.5406797116374872, "model": "microsoft/Phi-3-medium-4k-instruct", "moviesmc": 0.42824074074074076, "booksmc": 0.3817427385892116, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.28085106382978725, "mmluproru": 0.17979274611398963, "lawmc": 0.5324407826982492, "model": "apsys/tlite-it-0.1", "moviesmc": 0.4699074074074074, "booksmc": 0.3360995850622407, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2680851063829787, "mmluproru": 0.20103626943005182, "lawmc": 0.5386199794026777, "model": "Vikhrmodels/it-5.2-fp16-cp", "moviesmc": 0.4537037037037037, "booksmc": 0.3070539419087137, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2723404255319149, "lawmc": 0.4850669412976313, "model": "Nexusflow/Starling-LM-7B-beta", "moviesmc": 0.38657407407407407, "booksmc": 0.3070539419087137, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.20851063829787234, "lawmc": 0.47167868177136973, "model": "Salesforce/LLaMA-3-8B-SFR-Iterative-DPO-R", "moviesmc": 0.3055555555555556, "booksmc": 0.26141078838174275, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.25957446808510637, "mmluproru": 0.19378238341968912, "lawmc": 0.518022657054583, "model": "lightblue/suzume-llama-3-8B-multilingual", "moviesmc": 0.3287037037037037, "booksmc": 0.2966804979253112, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.28085106382978725, "mmluproru": 0.18808290155440416, "lawmc": 0.6426364572605562, "model": "apsys/vikhr-it-5.4-fp16-orpo-v2 ", "moviesmc": 0.4699074074074074, "booksmc": 0.33402489626556015, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.2872340425531915, "lawmc": 0.5066941297631308, "model": "vikhr-52-7b-chat-hf/apsys", "moviesmc": 0.4837962962962963, "booksmc": 0.3070539419087137, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.3021276595744681, "lawmc": 0.544799176107106, "model": "IlyaGusev/saiga_llama3_8b", "moviesmc": 0.3958333333333333, "booksmc": 0.3381742738589212, "model_dtype": "torch.bfloat16", "ppl": 0}, {"musicmc": 0.24468085106382978, "lawmc": 0.4788877445932029, "model": "apsys/vikhr-53-7b-32k", "moviesmc": 0.4050925925925926, "booksmc": 0.3049792531120332, "model_dtype": "torch.float16", "ppl": 0}, {"musicmc": 0.28085106382978725, "mmluproru": 0.17979274611398963, "lawmc": 0.5324407826982492, "model": "apsys/T-lite-instruct-0.1", "moviesmc": 0.4699074074074074, "booksmc": 0.3360995850622407, "model_dtype": "torch.float16", "ppl": 0}] \ No newline at end of file diff --git a/leaderboard.json b/leaderboard.json new file mode 100644 index 0000000000000000000000000000000000000000..33badea377a317c99157db4bef09e6eb49f96c66 --- /dev/null +++ b/leaderboard.json @@ -0,0 +1,155 @@ +[ + { + "model_name": "o3-mini-high", + "score": 0.600956937799043, + "math_score": 0.8473684210526315, + "physics_score": 0.35454545454545455, + "total_tokens": 2455126, + "evaluation_time": 4015.4359402656555, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "o4-mini-high", + "score": 0.5906698564593301, + "math_score": 0.8631578947368421, + "physics_score": 0.3181818181818182, + "total_tokens": 1898964, + "evaluation_time": 4623.6044108867645, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "Gemini 2.5 Pro Preview", + "score": 0.5863636363636364, + "math_score": 0.8, + "physics_score": 0.37272727272727274, + "total_tokens": 1394299, + "evaluation_time": 4533.155055761337, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "Gemini 2.0 Flash", + "score": 0.4217703349282297, + "math_score": 0.5526315789473685, + "physics_score": 0.2909090909090909, + "total_tokens": 731337, + "evaluation_time": 857.6413371562958, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "gpt-4.1", + "score": 0.3861244019138756, + "math_score": 0.5631578947368421, + "physics_score": 0.20909090909090908, + "total_tokens": 405803, + "evaluation_time": 1918.7988040447235, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "Claude 3.7 Sonnet", + "score": 0.36770334928229664, + "math_score": 0.5263157894736842, + "physics_score": 0.20909090909090908, + "total_tokens": 398016, + "evaluation_time": 1095.7695870399475, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "Claude 3.5 Sonnet", + "score": 0.33851674641148327, + "math_score": 0.43157894736842106, + "physics_score": 0.24545454545454545, + "total_tokens": 222241, + "evaluation_time": 670.5163931846619, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "Gemma 3 27B", + "score": 0.32057416267942584, + "math_score": 0.46842105263157896, + "physics_score": 0.17272727272727273, + "total_tokens": 357617, + "evaluation_time": 2030.33176279068, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "Gemma 3 12B", + "score": 0.29832535885167466, + "math_score": 0.4421052631578947, + "physics_score": 0.15454545454545454, + "total_tokens": 441055, + "evaluation_time": 3916.2552330493927, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "Qwen2.5 72B Instruct", + "score": 0.2784688995215311, + "math_score": 0.38421052631578945, + "physics_score": 0.17272727272727273, + "total_tokens": 366729, + "evaluation_time": 2460.056980371475, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "gpt-4o", + "score": 0.2617224880382775, + "math_score": 0.4052631578947368, + "physics_score": 0.11818181818181818, + "total_tokens": 468809, + "evaluation_time": 1078.4077816009521, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "GigaChat-2-Max", + "score": 0.24952153110047848, + "math_score": 0.3263157894736842, + "physics_score": 0.17272727272727273, + "total_tokens": 220487, + "evaluation_time": 1006.1656014919281, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "GigaChat-2-Pro", + "score": 0.20861244019138758, + "math_score": 0.3263157894736842, + "physics_score": 0.09090909090909091, + "total_tokens": 212196, + "evaluation_time": 1002.5515208244324, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "GigaChat-Max", + "score": 0.1394736842105263, + "math_score": 0.17894736842105263, + "physics_score": 0.1, + "total_tokens": 201090, + "evaluation_time": 978.7567253112793, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "DeepSeek V3 0324", + "score": 0.13229665071770336, + "math_score": 0.1736842105263158, + "physics_score": 0.09090909090909091, + "total_tokens": 359162, + "evaluation_time": 4257.714092254639, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "Gemma 3 4B", + "score": 0.12416267942583732, + "math_score": 0.22105263157894736, + "physics_score": 0.02727272727272727, + "total_tokens": 572095, + "evaluation_time": 1682.6655840873718, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + }, + { + "model_name": "GigaChat-2", + "score": 0.0937799043062201, + "math_score": 0.14210526315789473, + "physics_score": 0.045454545454545456, + "total_tokens": 299747, + "evaluation_time": 834.6775443553925, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." + } +] \ No newline at end of file diff --git a/m_data/.gitattributes b/m_data/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..28df5f900b358436f0267334b3e3e9af33f917ba --- /dev/null +++ b/m_data/.gitattributes @@ -0,0 +1,55 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.lz4 filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +# Audio files - uncompressed +*.pcm filter=lfs diff=lfs merge=lfs -text +*.sam filter=lfs diff=lfs merge=lfs -text +*.raw filter=lfs diff=lfs merge=lfs -text +# Audio files - compressed +*.aac filter=lfs diff=lfs merge=lfs -text +*.flac filter=lfs diff=lfs merge=lfs -text +*.mp3 filter=lfs diff=lfs merge=lfs -text +*.ogg filter=lfs diff=lfs merge=lfs -text +*.wav filter=lfs diff=lfs merge=lfs -text +# Image files - uncompressed +*.bmp filter=lfs diff=lfs merge=lfs -text +*.gif filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.tiff filter=lfs diff=lfs merge=lfs -text +# Image files - compressed +*.jpg filter=lfs diff=lfs merge=lfs -text +*.jpeg filter=lfs diff=lfs merge=lfs -text +*.webp filter=lfs diff=lfs merge=lfs -text diff --git a/m_data/README.md b/m_data/README.md new file mode 100644 index 0000000000000000000000000000000000000000..154df8298fab5ecf322016157858e08cd1bccbe1 --- /dev/null +++ b/m_data/README.md @@ -0,0 +1,3 @@ +--- +license: apache-2.0 +--- diff --git a/m_data/leaderboard.json b/m_data/leaderboard.json new file mode 100644 index 0000000000000000000000000000000000000000..7234d42c7f4fea1080bc49be1c03fd4bd85061e0 --- /dev/null +++ b/m_data/leaderboard.json @@ -0,0 +1,11 @@ +[ + { + "musicmc": 0, + "lawmc": 0.2800829875518672, + "moviesmc": 0.3472222222222222, + "booksmc": 0.2800829875518672, + "model_dtype": "torch.float16", + "model": "apsys/apsys1", + "ppl": 0 + } +] \ No newline at end of file diff --git a/m_data/model_data/external/saiga_3_8bapsys.json b/m_data/model_data/external/saiga_3_8bapsys.json new file mode 100644 index 0000000000000000000000000000000000000000..915d78d04f71aec2072fb3eec90e4d0e9660bbd4 --- /dev/null +++ b/m_data/model_data/external/saiga_3_8bapsys.json @@ -0,0 +1 @@ +{"musicmc": 0.2936170212765957, "lawmc": 0.48094747682801237, "model": "apsys/saiga_3_8b", "moviesmc": 0.3402777777777778, "booksmc": 0.3112033195020747, "model_dtype": "torch.float16", "ppl": 0} \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..e391c02b74b30e25cc6780df7daa9b8054adb760 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,54 @@ +[tool.ruff] +line-length = 120 +target-version = "py312" +include = ["*.py", "*.pyi", "**/pyproject.toml", "*.ipynb"] +ignore=["I","EM","FBT","TRY003","S101","D101","D102","D103","D104","D105","G004","D107","FA102"] +fixable=["ALL"] +select=["ALL"] + +[tool.ruff.lint] +select = ["E", "F"] +fixable = ["ALL"] +ignore = ["E501"] # line too long (black is taking care of this) + +[tool.isort] +profile = "black" +line_length = 119 + +[tool.black] +line-length = 119 + +[tool.poetry] +package-mode = false +name = "open-llm-leaderboard" +version = "0.1.0" +description = "" +authors = [] +readme = "README.md" + +[tool.poetry.dependencies] +python = "3.12.1" +apscheduler = "3.10.1" +black = "23.11.0" +click = "8.1.3" +datasets = "2.14.5" +huggingface-hub = ">=0.18.0" +matplotlib = "3.8.4" +numpy = "1.26.0" +pandas = "2.2.2" +plotly = "5.14.1" +python-dateutil = "2.8.2" +requests = "2.28.2" +sentencepiece = "^0.2.0" +tqdm = "4.65.0" +transformers = "4.40.0" +tokenizers = ">=0.15.0" +gradio-space-ci = {git = "https://huggingface.co/spaces/Wauplin/gradio-space-ci", rev = "0.2.3"} +gradio = " 4.20.0" +isort = "^5.13.2" +ruff = "^0.3.5" +gradio-leaderboard = "0.0.8" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d56d89ba1dcbcf623b156af2c9d7deeed5bd1d0 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,23 @@ +APScheduler==3.10.1 +black==23.11.0 +click==8.1.3 +datasets==2.14.5 +huggingface-hub>=0.18.0 +matplotlib==3.8.4 +numpy==1.26.0 +pandas==2.2.2 +plotly==5.14.1 +python-dateutil==2.8.2 +requests==2.28.2 +sentencepiece +tqdm==4.65.0 +transformers==4.40.0 +tokenizers>=0.15.0 +gradio-space-ci @ git+https://huggingface.co/spaces/Wauplin/gradio-space-ci@0.2.3 # CI !!! +gradio==4.20.0 +gradio_leaderboard==0.0.8 +tiktoken +openai +shortuuid +httpx==0.25.2 +scikit-learn diff --git a/src/display/about.py b/src/display/about.py new file mode 100644 index 0000000000000000000000000000000000000000..d37df4ce1f757e67053eebaab5632535146d32b8 --- /dev/null +++ b/src/display/about.py @@ -0,0 +1,128 @@ +from src.display.utils import ModelType + +TITLE = """

DeathMath Leaderboard

ΠžΡ†Π΅Π½ΠΊΠ° ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ Π½Π° слоТных матСматичСских ΠΈ физичСских Π·Π°Π΄Π°Ρ‡Π°Ρ…

""" + +INTRODUCTION_TEXT = """ +# DeathMath Benchmark + +DeathMath - это Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊ для ΠΎΡ†Π΅Π½ΠΊΠΈ способности ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ Ρ€Π΅ΡˆΠ°Ρ‚ΡŒ слоТныС матСматичСскиС ΠΈ физичСскиС Π·Π°Π΄Π°Ρ‡ΠΈ Π½Π° русском языкС. + +## Π§Ρ‚ΠΎ ΠΎΡ†Π΅Π½ΠΈΠ²Π°Π΅Ρ‚ Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊ? + +- **RussianMath Score**: ΠžΡ†Π΅Π½ΠΊΠ° способности Ρ€Π΅ΡˆΠ°Ρ‚ΡŒ матСматичСскиС Π·Π°Π΄Π°Ρ‡ΠΈ Π½Π° русском языкС +- **RussianPhysics Score**: ΠžΡ†Π΅Π½ΠΊΠ° способности Ρ€Π΅ΡˆΠ°Ρ‚ΡŒ Π·Π°Π΄Π°Ρ‡ΠΈ ΠΏΠΎ Ρ„ΠΈΠ·ΠΈΠΊΠ΅ Π½Π° русском языкС +- **Combined Score**: ΠžΠ±Ρ‰Π°Ρ ΠΎΡ†Π΅Π½ΠΊΠ° (срСднСС ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠΈ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠΈ) +""" + +LLM_BENCHMARKS_TEXT = """ +## Как Π·Π°ΠΏΡƒΡΡ‚ΠΈΡ‚ΡŒ Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊ DeathMath + +Для ΠΎΡ†Π΅Π½ΠΊΠΈ вашСй ΠΌΠΎΠ΄Π΅Π»ΠΈ Π½Π° Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊΠ΅ DeathMath Π²Π°ΠΌ Π½ΡƒΠΆΠ½ΠΎ: + +### Установка +ΠšΠ»ΠΎΠ½ΠΈΡ€ΡƒΠΉΡ‚Π΅ Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΉ DeathMath ΠΈ установитС Π½Π΅ΠΎΠ±Ρ…ΠΎΠ΄ΠΈΠΌΡ‹Π΅ зависимости: +```bash +git clone https://github.com/DeathMath/benchmark.git +cd DeathMath +pip install -r requirements.txt +``` + +### Запуск +Для запуска ΠΎΡ†Π΅Π½ΠΊΠΈ ΠΈΡΠΏΠΎΠ»ΡŒΠ·ΡƒΠΉΡ‚Π΅ скрипт runner.py: +```bash +python runner.py --config configs/run.yaml --model your_model_name_or_path +``` + +### Π€ΠΎΡ€ΠΌΠ°Ρ‚ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ² +ПослС выполнСния ΠΎΡ†Π΅Π½ΠΊΠΈ, Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ Π±ΡƒΠ΄ΡƒΡ‚ сохранСны Π² Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΈ `results/`. Π’Π°ΠΌ Π½ΡƒΠΆΠ½ΠΎ Π±ΡƒΠ΄Π΅Ρ‚ ΠΏΠΎΠ΄Π³ΠΎΡ‚ΠΎΠ²ΠΈΡ‚ΡŒ JSON Ρ„Π°ΠΉΠ» с Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Π°ΠΌΠΈ Π² ΡΠ»Π΅Π΄ΡƒΡŽΡ‰Π΅ΠΌ Ρ„ΠΎΡ€ΠΌΠ°Ρ‚Π΅: + +```json +{ + "score": 0.586, + "math_score": 0.8, + "physics_score": 0.373, + "total_tokens": 1394299, + "evaluation_time": 4533.2, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} +``` + +### Π—Π°Π³Ρ€ΡƒΠ·ΠΊΠ° Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ² +Π—Π°Π³Ρ€ΡƒΠ·ΠΈΡ‚Π΅ ΠΏΠΎΠ»ΡƒΡ‡Π΅Π½Π½Ρ‹ΠΉ JSON Ρ„Π°ΠΉΠ» Ρ‡Π΅Ρ€Π΅Π· Π²ΠΊΠ»Π°Π΄ΠΊΡƒ "Submit Model" Π½Π° этом Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π΅. + +### ΠŸΠΎΠ»ΠΈΡ‚ΠΈΠΊΠ° ΠΏΡ€ΠΎΡ‚ΠΈΠ² читСрства +ΠŸΡ€ΠΈ ΠΎΠ±Π½Π°Ρ€ΡƒΠΆΠ΅Π½ΠΈΠΈ ΠΏΠΎΠΏΡ‹Ρ‚ΠΎΠΊ манипуляции Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Π°ΠΌΠΈ ΠΈΠ»ΠΈ ΠΌΠΎΠ΄ΠΈΡ„ΠΈΠΊΠ°Ρ†ΠΈΠΈ Π²Ρ‹Ρ…ΠΎΠ΄Π½ΠΎΠ³ΠΎ Ρ„Π°ΠΉΠ»Π°, ΠΌΡ‹ оставляСм Π·Π° собой ΠΏΡ€Π°Π²ΠΎ ΡƒΠ΄Π°Π»ΠΈΡ‚ΡŒ ваш Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ ΠΈΠ· Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π°. +""" + +FAQ_TEXT = """ +## Часто Π·Π°Π΄Π°Π²Π°Π΅ΠΌΡ‹Π΅ вопросы + +### ΠžΠ±Ρ‰ΠΈΠ΅ вопросы +**Q: КакиС Ρ‚ΠΈΠΏΡ‹ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΠΏΠΎΠ΄Π΄Π΅Ρ€ΠΆΠΈΠ²Π°ΡŽΡ‚ΡΡ?** +A: ΠœΡ‹ ΠΏΠΎΠ΄Π΄Π΅Ρ€ΠΆΠΈΠ²Π°Π΅ΠΌ Π»ΡŽΠ±Ρ‹Π΅ языковыС ΠΌΠΎΠ΄Π΅Π»ΠΈ, ΠΊΠΎΡ‚ΠΎΡ€Ρ‹Π΅ ΠΌΠΎΠΆΠ½ΠΎ Π·Π°ΠΏΡƒΡΡ‚ΠΈΡ‚ΡŒ локально ΠΈΠ»ΠΈ Ρ‡Π΅Ρ€Π΅Π· API, ΠΈ ΠΊΠΎΡ‚ΠΎΡ€Ρ‹Π΅ ΠΌΠΎΠ³ΡƒΡ‚ Ρ€Π΅ΡˆΠ°Ρ‚ΡŒ Π·Π°Π΄Π°Ρ‡ΠΈ Π½Π° русском языкС. + +**Q: Как ΠΎΡ†Π΅Π½ΠΈΠ²Π°ΡŽΡ‚ΡΡ ΠΌΠΎΠ΄Π΅Π»ΠΈ Π² Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊΠ΅?** +A: МодСли ΠΎΡ†Π΅Π½ΠΈΠ²Π°ΡŽΡ‚ΡΡ ΠΏΠΎ способности Ρ€Π΅ΡˆΠ°Ρ‚ΡŒ матСматичСскиС ΠΈ физичСскиС Π·Π°Π΄Π°Ρ‡ΠΈ Π½Π° русском языкС. ΠžΡ†Π΅Π½ΠΊΠΈ Π²Ρ‹ΡΡ‚Π°Π²Π»ΡΡŽΡ‚ΡΡ Π½Π° основС ΠΏΡ€Π°Π²ΠΈΠ»ΡŒΠ½ΠΎΡΡ‚ΠΈ Ρ€Π΅ΡˆΠ΅Π½ΠΈΠΉ. + +### ΠžΡ‚ΠΏΡ€Π°Π²ΠΊΠ° Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ² +**Q: Как ΠΎΡ‚ΠΏΡ€Π°Π²ΠΈΡ‚ΡŒ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ ΠΌΠΎΠ΅ΠΉ ΠΌΠΎΠ΄Π΅Π»ΠΈ?** +A: ЗапуститС ΠΎΡ†Π΅Π½ΠΊΡƒ, ΠΏΠΎΠ΄Π³ΠΎΡ‚ΠΎΠ²ΡŒΡ‚Π΅ JSON Ρ„Π°ΠΉΠ» с Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Π°ΠΌΠΈ ΠΈ Π·Π°Π³Ρ€ΡƒΠ·ΠΈΡ‚Π΅ Π΅Π³ΠΎ Ρ‡Π΅Ρ€Π΅Π· Π²ΠΊΠ»Π°Π΄ΠΊΡƒ "Submit Model". + +**Q: ΠœΠΎΠ³Ρƒ Π»ΠΈ я ΠΎΠ±Π½ΠΎΠ²ΠΈΡ‚ΡŒ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ ΠΌΠΎΠ΅ΠΉ ΠΌΠΎΠ΄Π΅Π»ΠΈ?** +A: Π”Π°, Π²Ρ‹ ΠΌΠΎΠΆΠ΅Ρ‚Π΅ ΠΎΡ‚ΠΏΡ€Π°Π²ΠΈΡ‚ΡŒ Π½ΠΎΠ²Ρ‹Π΅ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ Ρ‚ΠΎΠΉ ΠΆΠ΅ ΠΌΠΎΠ΄Π΅Π»ΠΈ, Ссли, Π½Π°ΠΏΡ€ΠΈΠΌΠ΅Ρ€, Π²Ρ‹ ΡƒΠ»ΡƒΡ‡ΡˆΠΈΠ»ΠΈ Π΅Π΅ Ρ€Π°Π±ΠΎΡ‚Ρƒ. + +### ВСхничСскиС вопросы +**Q: Π§Ρ‚ΠΎ Π΄Π΅Π»Π°Ρ‚ΡŒ, Ссли Π²ΠΎΠ·Π½ΠΈΠΊΠ»ΠΈ ΠΏΡ€ΠΎΠ±Π»Π΅ΠΌΡ‹ с запуском ΠΎΡ†Π΅Π½ΠΊΠΈ?** +A: ΠŸΡ€ΠΎΠ²Π΅Ρ€ΡŒΡ‚Π΅ ΠΏΡ€Π°Π²ΠΈΠ»ΡŒΠ½ΠΎΡΡ‚ΡŒ установки всСх зависимостСй ΠΈ ΠΊΠΎΠ½Ρ„ΠΈΠ³ΡƒΡ€Π°Ρ†ΠΈΠΈ. Если ΠΏΡ€ΠΎΠ±Π»Π΅ΠΌΠ° Π½Π΅ Ρ€Π΅ΡˆΠ°Π΅Ρ‚ΡΡ, создайтС issue Π² Ρ€Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΈ ΠΏΡ€ΠΎΠ΅ΠΊΡ‚Π°. + +**Q: Как ΠΏΡ€ΠΎΠ²Π΅Ρ€ΡΡŽΡ‚ΡΡ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ Π½Π° Π΄ΠΎΡΡ‚ΠΎΠ²Π΅Ρ€Π½ΠΎΡΡ‚ΡŒ?** +A: ΠœΡ‹ Π°Π½Π°Π»ΠΈΠ·ΠΈΡ€ΡƒΠ΅ΠΌ распрСдСлСниС Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ² ΠΈ ΠΏΠΎΠ΄ΠΎΠ·Ρ€ΠΈΡ‚Π΅Π»ΡŒΠ½Ρ‹Π΅ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ ΠΌΠΎΠ³ΡƒΡ‚ Π±Ρ‹Ρ‚ΡŒ ΠΏΡ€ΠΎΠ²Π΅Ρ€Π΅Π½Ρ‹ Π΄ΠΎΠΏΠΎΠ»Π½ΠΈΡ‚Π΅Π»ΡŒΠ½ΠΎ. +""" + +EVALUATION_QUEUE_TEXT = f""" +# Evaluation Queue for the πŸ€— Open LLM Leaderboard + +Models added here will be automatically evaluated on the πŸ€— cluster. + +## Don't forget to read the FAQ and the About tabs for more information! + +## First steps before submitting a model + +### 1) Make sure you can load your model and tokenizer using AutoClasses: +```python +from transformers import AutoConfig, AutoModel, AutoTokenizer +config = AutoConfig.from_pretrained("your model name", revision=revision) +model = AutoModel.from_pretrained("your model name", revision=revision) +tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) +``` +If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. + +Note: make sure your model is public! +Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! + +### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) +It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! + +### 3) Make sure your model has an open license! +This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model πŸ€— + +### 4) Fill up your model card +When we add extra information about models to the leaderboard, it will be automatically taken from the model card + +### 5) Select the correct precision +Not all models are converted properly from `float16` to `bfloat16`, and selecting the wrong precision can sometimes cause evaluation error (as loading a `bf16` model in `fp16` can sometimes generate NaNs, depending on the weight range). + +Note: Please be advised that when submitting, git branches and tags will be strictly tied to the specific commit present at the time of submission. This ensures revision consistency. +## Model types +{icons} +""" + +CITATION_BUTTON_LABEL = "Π¦ΠΈΡ‚ΠΈΡ€ΠΎΠ²Π°Π½ΠΈΠ΅ Π±Π΅Π½Ρ‡ΠΌΠ°Ρ€ΠΊΠ° DeathMath" +CITATION_BUTTON_TEXT = r""" +@misc{deathmathbenchmark, + title = {DeathMath: A Benchmark for Mathematical and Physics Problem Solving in Russian}, + year = {2025}, + publisher = {DeathMath Team}, + howpublished = {\url{https://huggingface.co/spaces/DeathMath/leaderboard}} +} +""" diff --git a/src/display/css_html_js.py b/src/display/css_html_js.py new file mode 100644 index 0000000000000000000000000000000000000000..8aaa9f43b676f037690fdb8e3aa0b801acb4cfbf --- /dev/null +++ b/src/display/css_html_js.py @@ -0,0 +1,98 @@ +custom_css = """ +/* Limit the width of the first AutoEvalColumn so that names don't expand too much */ +table td:first-child, +table th:first-child { + max-width: 400px; + overflow: auto; + white-space: nowrap; +} + +/* Full width space */ +.gradio-container { + max-width: 95%!important; +} + +/* Text style and margins */ +.markdown-text { + font-size: 16px !important; +} + +#models-to-add-text { + font-size: 18px !important; +} + +#citation-button span { + font-size: 16px !important; +} + +#citation-button textarea { + font-size: 16px !important; +} + +#citation-button > label > button { + margin: 6px; + transform: scale(1.3); +} + +#search-bar-table-box > div:first-child { + background: none; + border: none; +} + +#search-bar { + padding: 0px; +} + +.tab-buttons button { + font-size: 20px; +} + +/* Filters style */ +#filter_type{ + border: 0; + padding-left: 0; + padding-top: 0; +} +#filter_type label { + display: flex; +} +#filter_type label > span{ + margin-top: var(--spacing-lg); + margin-right: 0.5em; +} +#filter_type label > .wrap{ + width: 103px; +} +#filter_type label > .wrap .wrap-inner{ + padding: 2px; +} +#filter_type label > .wrap .wrap-inner input{ + width: 1px +} +#filter-columns-type{ + border:0; + padding:0.5; +} +#filter-columns-size{ + border:0; + padding:0.5; +} +#box-filter > .form{ + border: 0 +} +#oauth-button { + height: 100%; + min-width: 100%; + white-space: nowrap; + padding: 10px 20px; + border-radius: 4px; +} +""" + +get_window_url_params = """ + function(url_params) { + const params = new URLSearchParams(window.location.search); + url_params = Object.fromEntries(params); + return url_params; + } + """ diff --git a/src/display/formatting.py b/src/display/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..28684d5ad33655b827504320dedced7ef97ea157 --- /dev/null +++ b/src/display/formatting.py @@ -0,0 +1,36 @@ +from huggingface_hub import HfApi + +API = HfApi() + + +def model_hyperlink(link, model_name): + return f'{model_name}' + + +def make_clickable_model(model_name): + link = f"https://huggingface.co/{model_name}" + + details_model_name = model_name.replace("/", "__") + details_link = f"https://huggingface.co/datasets/open-llm-leaderboard/details_{details_model_name}" + + return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "πŸ“‘") + + +def styled_error(error): + return f"

{error}

" + + +def styled_warning(warn): + return f"

{warn}

" + + +def styled_message(message): + return f"

{message}

" + + +def has_no_nan_values(df, columns): + return df[columns].notna().all(axis=1) + + +def has_nan_values(df, columns): + return df[columns].isna().any(axis=1) diff --git a/src/display/utils.py b/src/display/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..33def0aaeb3fcdb228500e753e8dacfc39288f94 --- /dev/null +++ b/src/display/utils.py @@ -0,0 +1,189 @@ +from dataclasses import dataclass, make_dataclass +from enum import Enum +import json +import logging +from datetime import datetime +import pandas as pd + + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + + +def parse_datetime(datetime_str): + formats = [ + "%Y-%m-%dT%H-%M-%S.%f", # Format with dashes + "%Y-%m-%dT%H:%M:%S.%f", # Standard format with colons + "%Y-%m-%dT%H %M %S.%f", # Spaces as separator + ] + + for fmt in formats: + try: + return datetime.strptime(datetime_str, fmt) + except ValueError: + continue + # in rare cases set unix start time for files with incorrect time (legacy files) + logging.error(f"No valid date format found for: {datetime_str}") + return datetime(1970, 1, 1) + + +def load_json_data(file_path): + """Safely load JSON data from a file.""" + try: + with open(file_path, "r") as file: + return json.load(file) + except json.JSONDecodeError: + print(f"Error reading JSON from {file_path}") + return None # Or raise an exception + + +def fields(raw_class): + return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"] + + +@dataclass +class Task: + benchmark: str + metric: str + col_name: str + + +class Tasks(Enum): + math = Task("RussianMath", "score", "math_score") + physics = Task("RussianPhysics", "score", "physics_score") + combined = Task("Combined", "score", "score") + + +# These classes are for user facing column names, +# to avoid having to change them all around the code +# when a modif is needed +@dataclass(frozen=True) +class ColumnContent: + name: str + type: str + displayed_by_default: bool + hidden: bool = False + never_hidden: bool = False + dummy: bool = False + + +auto_eval_column_dict = [] +# Init +auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("model", "markdown", True, never_hidden=True)]) +# Scores +auto_eval_column_dict.append(["score", ColumnContent, ColumnContent("score", "number", True)]) +for task in Tasks: + if task != Tasks.combined: # Combined score ΡƒΠΆΠ΅ Π΄ΠΎΠ±Π°Π²Π»Π΅Π½ Π²Ρ‹ΡˆΠ΅ + auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)]) + +# Model information +auto_eval_column_dict.append(["total_tokens", ColumnContent, ColumnContent("total_tokens", "number", False)]) +auto_eval_column_dict.append(["evaluation_time", ColumnContent, ColumnContent("evaluation_time", "number", False)]) +auto_eval_column_dict.append(["system_prompt", ColumnContent, ColumnContent("system_prompt", "str", False)]) + +# We use make dataclass to dynamically fill the scores from Tasks +AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True) + + +@dataclass(frozen=True) +class EvalQueueColumn: # Queue column + model = ColumnContent("model", "markdown", True) + + +baseline_row = { + AutoEvalColumn.model.name: "

Baseline

", + AutoEvalColumn.score.name: 0.1, + AutoEvalColumn.math.name: 0.1, + AutoEvalColumn.physics.name: 0.1, + AutoEvalColumn.total_tokens.name: 0, + AutoEvalColumn.evaluation_time.name: 0, + AutoEvalColumn.system_prompt.name: "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС.", +} + +# Define the human baselines +human_baseline_row = { + AutoEvalColumn.model.name: "

Human performance

", + AutoEvalColumn.score.name: 0.9, + AutoEvalColumn.math.name: 0.9, + AutoEvalColumn.physics.name: 0.9, + AutoEvalColumn.total_tokens.name: 0, + AutoEvalColumn.evaluation_time.name: 0, + AutoEvalColumn.system_prompt.name: "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС.", +} + + +@dataclass +class ModelDetails: + name: str + symbol: str = "" # emoji, only for the model type + + +class ModelType(Enum): + PT = ModelDetails(name="pretrained", symbol="🟒") + CPT = ModelDetails(name="continuously pretrained", symbol="🟩") + FT = ModelDetails(name="fine-tuned on domain-specific datasets", symbol="πŸ”Ά") + chat = ModelDetails(name="chat models (RLHF, DPO, IFT, ...)", symbol="πŸ’¬") + merges = ModelDetails(name="base merges and moerges", symbol="🀝") + Unknown = ModelDetails(name="", symbol="?") + + def to_str(self, separator=" "): + return f"{self.value.symbol}{separator}{self.value.name}" + + @staticmethod + def from_str(type): + if "fine-tuned" in type or "πŸ”Ά" in type: + return ModelType.FT + if "continously pretrained" in type or "🟩" in type: + return ModelType.CPT + if "pretrained" in type or "🟒" in type: + return ModelType.PT + if any([k in type for k in ["instruction-tuned", "RL-tuned", "chat", "🟦", "β­•", "πŸ’¬"]]): + return ModelType.chat + if "merge" in type or "🀝" in type: + return ModelType.merges + return ModelType.Unknown + + +class WeightType(Enum): + Adapter = ModelDetails("Adapter") + Original = ModelDetails("Original") + Delta = ModelDetails("Delta") + + +class Precision(Enum): + float16 = ModelDetails("float16") + bfloat16 = ModelDetails("bfloat16") + qt_8bit = ModelDetails("8bit") + qt_4bit = ModelDetails("4bit") + qt_GPTQ = ModelDetails("GPTQ") + Unknown = ModelDetails("?") + + def from_str(precision): + if precision in ["torch.float16", "float16"]: + return Precision.float16 + if precision in ["torch.bfloat16", "bfloat16"]: + return Precision.bfloat16 + if precision in ["8bit"]: + return Precision.qt_8bit + if precision in ["4bit"]: + return Precision.qt_4bit + if precision in ["GPTQ", "None"]: + return Precision.qt_GPTQ + return Precision.Unknown + + +# Column selection +COLS = [c.name for c in fields(AutoEvalColumn)] +TYPES = [c.type for c in fields(AutoEvalColumn)] + +EVAL_COLS = [c.name for c in fields(EvalQueueColumn)] +EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)] + +NUMERIC_INTERVALS = { + "?": pd.Interval(-1, 0, closed="right"), + "~0.1": pd.Interval(0, 0.2, closed="right"), + "~0.3": pd.Interval(0.2, 0.4, closed="right"), + "~0.5": pd.Interval(0.4, 0.6, closed="right"), + "~0.7": pd.Interval(0.6, 0.8, closed="right"), + "0.8+": pd.Interval(0.8, 1.0, closed="right"), +} diff --git a/src/envs.py b/src/envs.py new file mode 100644 index 0000000000000000000000000000000000000000..50216b3ec7511bd533a9fdcb8f9f6a98342f0cc1 --- /dev/null +++ b/src/envs.py @@ -0,0 +1,31 @@ +import os + +from huggingface_hub import HfApi + +# Π’ΠΎΠΊΠ΅Π½ для доступа ΠΊ HuggingFace Hub +H4_TOKEN = os.environ.get("H4_TOKEN", None) + +# Π Π΅ΠΏΠΎΠ·ΠΈΡ‚ΠΎΡ€ΠΈΠΈ для DeathMath +REPO_ID = "Vikhrmodels/DeathMath-leaderboard" +RESULTS_REPO = "Vikhrmodels/DeathMath-leaderboard-data" +METAINFO_REPO = "Vikhrmodels/DeathMath-leaderboard-metainfo" + +# ΠŸΡƒΡ‚ΡŒ ΠΊ Π΄Π°Π½Π½Ρ‹ΠΌ локально +HF_HOME = os.getenv("HF_HOME", ".") +print(f"Initial HF_HOME set to: {HF_HOME}") + +# ΠŸΡ€ΠΎΠ²Π΅Ρ€ΠΊΠ° ΠΏΡ€Π°Π² доступа ΠΊ Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΈ +if not os.access(HF_HOME, os.W_OK): + print(f"No write access to HF_HOME: {HF_HOME}. Resetting to current directory.") + HF_HOME = "." + os.environ["HF_HOME"] = HF_HOME +else: + print("Write access confirmed for HF_HOME") + +DATA_PATH = os.path.join(HF_HOME, "data") + +# ΠŸΠ΅Ρ€Π΅ΠΌΠ΅Π½Π½Π°Ρ для обновлСния Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° +RESET_JUDGEMENT_ENV = "RESET_JUDGEMENT" + +# API HuggingFace +API = HfApi(token=H4_TOKEN) diff --git a/src/gen/config/api_config.yaml b/src/gen/config/api_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c85f3104d36f3ba93b540679ae52471fde8323d7 --- /dev/null +++ b/src/gen/config/api_config.yaml @@ -0,0 +1,203 @@ +# name: str +# model_name: str +# endpoints: default to null +# - api_base: str +# api_key: str optional (required if no api_key_ENV) +# api_key_ENV: str optional (ENV name to store the token secret) +# api_version: str optional (only for azure) +# api_type: str +# tokenizer: str optional (to optimize token limits) +# parallel: int + +gpt-4-1106-preview: + model_name: gpt-4-1106-preview + endpoints: + - api_base: https://cgiaura-openai-trainning.openai.azure.com + api_key_ENV: GPT_4_TOKEN + api_version: 2024-02-15-preview + api_type: azure + parallel: 5 + +gpt-3.5-turbo-0125: + model_name: gpt-3.5-turbo-0125 + endpoints: + - api_base: https://api.openai.com/v1/ + api_key_ENV: GPT_3_TOKEN + api_type: openai + parallel: 6 + +gpt-3.5-turbo-0125-ru-sys: + model_name: gpt-3.5-turbo-0125 + endpoints: + - api_base: https://api.openai.com/v1/ + api_key_ENV: GPT_3_TOKEN + system_prompt: You are a helpful assistant. Answer on Russian. + api_type: openai + parallel: 6 + +yandex_gpt_pro: + model_name: yandexgpt + endpoints: + - catalog_id: b1gk1i41eeb97a5s68c7 + iam_token_ENV: YANDEX_GPT_TOKEN + api_type: yandex + parallel: 2 + +gigachat_lite: + model_name: GigaChat + endpoints: + auth_token_ENV: GIGACHAT_GPT_TOKEN + api_type: gigachat + parallel: 1 + +gigachat_pro: + model_name: GigaChat-Pro + endpoints: + auth_token_ENV: GIGACHAT_GPT_TOKEN + api_type: gigachat + parallel: 1 + +meta-llama-3-70b-instruct-gptq: + model_name: MaziyarPanahi/Meta-Llama-3-70B-Instruct-GPTQ + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +snorkel-mistral-pairrm-dpo: + model_name: snorkelai/Snorkel-Mistral-PairRM-DPO + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +sfr-iterative-dpo-llama-3-8b-r: + model_name: Salesforce/SFR-Iterative-DPO-LLaMA-3-8B-R + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +openchat-3.5-0106: + model_name: openchat/openchat-3.5-0106 + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +mixtral-8x7b-instruct-v0.1: + model_name: LoneStriker/Mixtral-8x7B-Instruct-v0.1-HF + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 4 + +neural-chat-7b-v3-3: + model_name: Intel/neural-chat-7b-v3-3 + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +meta-llama-3-8b-instruct: + model_name: meta-llama/Meta-Llama-3-8B-Instruct + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +saiga_llama3_8b: + model_name: IlyaGusev/saiga_llama3_8b + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +hermes-2-pro-llama-3-8b: + model_name: NousResearch/Hermes-2-Pro-Llama-3-8B + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +dpopenhermes-7b: + model_name: openaccess-ai-collective/DPOpenHermes-7B + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +llama3-chatqa-1.5-8b: + model_name: nvidia/Llama3-ChatQA-1.5-8B + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +hermes-2-pro-mistral-7b: + model_name: NousResearch/Hermes-2-Pro-Mistral-7B + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +suzume-llama-3-8b-multilingual: + model_name: lightblue/suzume-llama-3-8B-multilingual + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +vikhr-7b-instruct_0.4: + model_name: Vikhrmodels/Vikhr-7B-instruct_0.4 + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +vikhr-it-5.2-fp16-cp: + model_name: Vikhrmodels/it-5.2-fp16-cp + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + system_prompt: Π’Ρ‹ β€” Π’ΠΈΡ…Ρ€ΡŒ, русскоязычный ассистСнт. + parallel: 6 + +starling-lm-7b-beta: + model_name: Nexusflow/Starling-LM-7B-beta + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +c4ai-command-r-v01: + model_name: CohereForAI/c4ai-command-r-v01 + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 6 + +starcoder2-15b-instruct-v0.1: + model_name: bigcode/starcoder2-15b-instruct-v0.1 + endpoints: + - api_base: http://localhost:8000/v1 + api_key: token-abc123 + api_type: openai + parallel: 3 diff --git a/src/gen/config/judge_config-ru.yaml b/src/gen/config/judge_config-ru.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30ff21efb0cbe3d8dfce9af1cdf7a2dd29e9861c --- /dev/null +++ b/src/gen/config/judge_config-ru.yaml @@ -0,0 +1,35 @@ +name: judgment config file for Arena Hard + +bench_name: arena-hard-v0.1 + +# Arena Hard default +judge_model: gpt-4-1106-preview +reference: False # Optional +ref_model: null + +baseline: True +baseline_model: gpt-3.5-turbo-0125 + +pairwise: True +temperature: 0 +max_tokens: 4096 + +regex_pattern: \[\[([AB<>=]+)\]\] + +system_prompt: "ΠŸΠΎΠΆΠ°Π»ΡƒΠΉΡΡ‚Π°, Π²Π΅Π΄ΠΈ сСбя ΠΊΠ°ΠΊ бСспристрастный ΡΡƒΠ΄ΡŒΡ ΠΈ ΠΎΡ†Π΅Π½ΠΈ качСство ΠΎΡ‚Π²Π΅Ρ‚ΠΎΠ², прСдоставлСнных двумя AI ассистСнтами Π½Π° ΠΏΠΎΠ»ΡŒΠ·ΠΎΠ²Π°Ρ‚Π΅Π»ΡŒΡΠΊΠΈΠΉ запрос, прСдставлСнный Π½ΠΈΠΆΠ΅. Π’Π΅Π±Π΅ Π±ΡƒΠ΄ΡƒΡ‚ Π΄Π°Π½Ρ‹ ΠΎΡ‚Π²Π΅Ρ‚Ρ‹ ассистСнта А ΠΈ ассистСнта Π’. Ввоя Π·Π°Π΄Π°Ρ‡Π° β€” ΠΎΡ†Π΅Π½ΠΈΡ‚ΡŒ, Ρ‡Π΅ΠΉ ΠΎΡ‚Π²Π΅Ρ‚ Π»ΡƒΡ‡ΡˆΠ΅.\n\nНачни свою ΠΎΡ†Π΅Π½ΠΊΡƒ, сгСнСрировав собствСнный ΠΎΡ‚Π²Π΅Ρ‚ Π½Π° запрос. Π’Ρ‹ Π΄ΠΎΠ»ΠΆΠ΅Π½ ΠΏΡ€Π΅Π΄ΠΎΡΡ‚Π°Π²ΠΈΡ‚ΡŒ свои ΠΎΡ‚Π²Π΅Ρ‚Ρ‹, ΠΏΡ€Π΅ΠΆΠ΄Π΅ Ρ‡Π΅ΠΌ ΡΡƒΠ΄ΠΈΡ‚ΡŒ ΠΎΠ± ΠΎΡ‚Π²Π΅Ρ‚Π°Ρ… Π΄Ρ€ΡƒΠ³ΠΈΡ… AI.\n\nΠŸΡ€ΠΈ ΠΎΡ†Π΅Π½ΠΊΠ΅ ΠΎΡ‚Π²Π΅Ρ‚ΠΎΠ² ассистСнтов сравни ΠΎΡ‚Π²Π΅Ρ‚Ρ‹ ΠΎΠ±ΠΎΠΈΡ… ассистСнтов со своим ΠΎΡ‚Π²Π΅Ρ‚ΠΎΠΌ. Π’Ρ‹ Π΄ΠΎΠ»ΠΆΠ΅Π½ ΠΈΠ΄Π΅Π½Ρ‚ΠΈΡ„ΠΈΡ†ΠΈΡ€ΠΎΠ²Π°Ρ‚ΡŒ ΠΈ ΠΈΡΠΏΡ€Π°Π²ΠΈΡ‚ΡŒ Π»ΡŽΠ±Ρ‹Π΅ ошибки ΠΈΠ»ΠΈ нСточности.\n\nΠ—Π°Ρ‚Π΅ΠΌ рассмотри, ΡΠ²Π»ΡΡŽΡ‚ΡΡ Π»ΠΈ ΠΎΡ‚Π²Π΅Ρ‚Ρ‹ ассистСнтов Π³Ρ€Π°ΠΌΠΎΡ‚Π½Ρ‹ΠΌΠΈ, ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΌΠΈ, Ρ€Π΅Π»Π΅Π²Π°Π½Ρ‚Π½Ρ‹ΠΌΠΈ ΠΈ ΠΊΡ€Π°Ρ‚ΠΊΠΈΠΌΠΈ. Π“Ρ€Π°ΠΌΠΎΡ‚Π½ΠΎΡΡ‚ΡŒ ΠΎΠ·Π½Π°Ρ‡Π°Π΅Ρ‚, Ρ‡Ρ‚ΠΎ ΠΎΡ‚Π²Π΅Ρ‚ ΠΈΡΠΏΠΎΠ»ΡŒΠ·ΡƒΠ΅Ρ‚ прСимущСствСнно русский язык ΠΈ Π² Π½Π΅ΠΌ ΠΎΡ‚ΡΡƒΡ‚ΡΡ‚Π²ΡƒΡŽΡ‚ языковыС ошибки. ΠŸΠΎΠ»Π΅Π·Π½ΠΎΡΡ‚ΡŒ ΠΎΠ·Π½Π°Ρ‡Π°Π΅Ρ‚, Ρ‡Ρ‚ΠΎ ΠΎΡ‚Π²Π΅Ρ‚ ΠΏΡ€Π°Π²ΠΈΠ»ΡŒΠ½ΠΎ Ρ€Π΅Π°Π³ΠΈΡ€ΡƒΠ΅Ρ‚ Π½Π° запрос ΠΈΠ»ΠΈ слСдуСт инструкциям. ΠžΠ±Ρ€Π°Ρ‚ΠΈ Π²Π½ΠΈΠΌΠ°Π½ΠΈΠ΅, ΠΊΠΎΠ³Π΄Π° Π² запросС ΠΏΠΎΠ»ΡŒΠ·ΠΎΠ²Π°Ρ‚Π΅Π»Ρ Π΅ΡΡ‚ΡŒ какая-Π»ΠΈΠ±ΠΎ Π½Π΅ΠΎΠ΄Π½ΠΎΠ·Π½Π°Ρ‡Π½ΠΎΡΡ‚ΡŒ ΠΈΠ»ΠΈ Π±ΠΎΠ»Π΅Π΅ ΠΎΠ΄Π½ΠΎΠΉ ΠΈΠ½Ρ‚Π΅Ρ€ΠΏΡ€Π΅Ρ‚Π°Ρ†ΠΈΠΈ, ΠΏΠΎΠ»Π΅Π·Π½Π΅Π΅ ΠΈ умСстнСС Π·Π°ΠΏΡ€Π°ΡˆΠΈΠ²Π°Ρ‚ΡŒ уточнСния ΠΈΠ»ΠΈ Π΄ΠΎΠΏΠΎΠ»Π½ΠΈΡ‚Π΅Π»ΡŒΠ½ΡƒΡŽ ΠΈΠ½Ρ„ΠΎΡ€ΠΌΠ°Ρ†ΠΈΡŽ Ρƒ ΠΏΠΎΠ»ΡŒΠ·ΠΎΠ²Π°Ρ‚Π΅Π»Ρ, Ρ‡Π΅ΠΌ ΠΏΡ€Π΅Π΄ΠΎΡΡ‚Π°Π²Π»ΡΡ‚ΡŒ ΠΎΡ‚Π²Π΅Ρ‚ Π½Π° основС ΠΏΡ€Π΅Π΄ΠΏΠΎΠ»ΠΎΠΆΠ΅Π½ΠΈΠΉ. Π Π΅Π»Π΅Π²Π°Π½Ρ‚Π½ΠΎΡΡ‚ΡŒ ΠΎΠ·Π½Π°Ρ‡Π°Π΅Ρ‚, Ρ‡Ρ‚ΠΎ всС части ΠΎΡ‚Π²Π΅Ρ‚Π° тСсно связаны ΠΈΠ»ΠΈ ΡΠΎΠΎΡ‚Π²Π΅ΡΡ‚Π²ΡƒΡŽΡ‚ Ρ‚ΠΎΠΌΡƒ, Ρ‡Ρ‚ΠΎ ΡΠΏΡ€Π°ΡˆΠΈΠ²Π°Π΅Ρ‚ΡΡ. ΠšΡ€Π°Ρ‚ΠΊΠΎΡΡ‚ΡŒ ΠΎΠ·Π½Π°Ρ‡Π°Π΅Ρ‚, Ρ‡Ρ‚ΠΎ ΠΎΡ‚Π²Π΅Ρ‚ ясСн ΠΈ Π½Π΅ многословСн ΠΈΠ»ΠΈ ΠΈΠ·Π±Ρ‹Ρ‚ΠΎΡ‡Π΅Π½.\n\nΠ—Π°Ρ‚Π΅ΠΌ рассмотри ΠΊΡ€Π΅Π°Ρ‚ΠΈΠ²Π½ΠΎΡΡ‚ΡŒ ΠΈ Π½ΠΎΠ²ΠΈΠ·Π½Ρƒ ΠΎΡ‚Π²Π΅Ρ‚ΠΎΠ² ассистСнтов, ΠΊΠΎΠ³Π΄Π° это Π½Π΅ΠΎΠ±Ρ…ΠΎΠ΄ΠΈΠΌΠΎ. НаконСц, ΠΎΠΏΡ€Π΅Π΄Π΅Π»ΠΈ Π»ΡŽΠ±ΡƒΡŽ ΠΎΡ‚ΡΡƒΡ‚ΡΡ‚Π²ΡƒΡŽΡ‰ΡƒΡŽ Π²Π°ΠΆΠ½ΡƒΡŽ ΠΈΠ½Ρ„ΠΎΡ€ΠΌΠ°Ρ†ΠΈΡŽ Π² ΠΎΡ‚Π²Π΅Ρ‚Π°Ρ… ассистСнтов, ΠΊΠΎΡ‚ΠΎΡ€ΡƒΡŽ Π±Ρ‹Π»ΠΎ Π±Ρ‹ ΠΏΠΎΠ»Π΅Π·Π½ΠΎ Π²ΠΊΠ»ΡŽΡ‡ΠΈΡ‚ΡŒ ΠΏΡ€ΠΈ ΠΎΡ‚Π²Π΅Ρ‚Π΅ Π½Π° ΠΏΠΎΠ»ΡŒΠ·ΠΎΠ²Π°Ρ‚Π΅Π»ΡŒΡΠΊΠΈΠΉ запрос.\n\nПослС прСдоставлСния Ρ‚Π²ΠΎΠ΅Π³ΠΎ объяснСния, Ρ‚Ρ‹ Π΄ΠΎΠ»ΠΆΠ΅Π½ Π²Ρ‹Π΄Π°Ρ‚ΡŒ Ρ‚ΠΎΠ»ΡŒΠΊΠΎ ΠΎΠ΄ΠΈΠ½ ΠΈΠ· ΡΠ»Π΅Π΄ΡƒΡŽΡ‰ΠΈΡ… Π²Π°Ρ€ΠΈΠ°Π½Ρ‚ΠΎΠ² ΠΊΠ°ΠΊ Ρ‚Π²ΠΎΠ΅ ΠΎΠΊΠΎΠ½Ρ‡Π°Ρ‚Π΅Π»ΡŒΠ½ΠΎΠ΅ Ρ€Π΅ΡˆΠ΅Π½ΠΈΠ΅ с ΠΌΠ΅Ρ‚ΠΊΠΎΠΉ:\n\n1. АссистСнт A Π·Π½Π°Ρ‡ΠΈΡ‚Π΅Π»ΡŒΠ½ΠΎ Π»ΡƒΡ‡ΡˆΠ΅: [[A>>B]]\n2. АссистСнт A Π½Π΅ΠΌΠ½ΠΎΠ³ΠΎ Π»ΡƒΡ‡ΡˆΠ΅: [[A>B]]\n3. ΠΠΈΡ‡ΡŒΡ, ΠΏΡ€ΠΈΠΌΠ΅Ρ€Π½ΠΎ ΠΎΠ΄ΠΈΠ½Π°ΠΊΠΎΠ²ΠΎ: [[A=B]]\n4. АссистСнт B Π½Π΅ΠΌΠ½ΠΎΠ³ΠΎ Π»ΡƒΡ‡ΡˆΠ΅: [[B>A]]\n5. АссистСнт B Π·Π½Π°Ρ‡ΠΈΡ‚Π΅Π»ΡŒΠ½ΠΎ Π»ΡƒΡ‡ΡˆΠ΅: [[B>>A]]\n\nΠŸΡ€ΠΈΠΌΠ΅Ρ€ Π²Ρ‹Π²ΠΎΠ΄Π°: \"Мой ΠΎΠΊΠΎΠ½Ρ‡Π°Ρ‚Π΅Π»ΡŒΠ½Ρ‹ΠΉ Π²Π΅Ρ€Π΄ΠΈΠΊΡ‚ β€” Π½ΠΈΡ‡ΡŒΡ: [[A=B]]\"." + +prompt_template: ["<|Запрос ΠΏΠΎΠ»ΡŒΠ·ΠΎΠ²Π°Ρ‚Π΅Π»Ρ|>\n{question_1}\n\n<|Начало ΠΎΡ‚Π²Π΅Ρ‚Π° ассистСнта A|>\n{answer_1}\n<|ΠšΠΎΠ½Π΅Ρ† ΠΎΡ‚Π²Π΅Ρ‚Π° ассистСнта A|>\n\n<|Начало ΠΎΡ‚Π²Π΅Ρ‚Π° ассистСнта B|>\n{answer_2}\n<|ΠšΠΎΠ½Π΅Ρ† ΠΎΡ‚Π²Π΅Ρ‚Π° ассистСнта B|>"] + +# Add your model below for evaluation +model_list: + - meta-llama-3-8b-instruct + - meta-llama-3-8b-instruct-ru-guided-2 + - saiga_llama3_8b + - suzume-llama-3-8B-multilingual + - c4ai-command-r-v01 + - starling-lm-7b-beta + - openchat-3.5-0106 + - hermes-2-pro-llama-3-8b + - hermes-2-pro-mistral-7b + - starcoder2-15b-instruct-v0.1 + - gpt-4-1106-preview \ No newline at end of file diff --git a/src/gen/config/judge_config.yaml b/src/gen/config/judge_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30473b16ae443b3bda0c6968bc2cd596e1f913f5 --- /dev/null +++ b/src/gen/config/judge_config.yaml @@ -0,0 +1,40 @@ +name: judgment config file for Arena Hard + +bench_name: arena-hard-v0.1 + +# Arena Hard default +judge_model: gpt-4-1106-preview +reference: False # Optional +ref_model: null + +baseline: True +baseline_model: gpt-3.5-turbo-0125 + +pairwise: True +temperature: 0 +max_tokens: 4096 + +regex_pattern: \[\[([AB<>=]+)\]\] + +system_prompt: "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by describing the details that need to be taken into account when responding to this prompt. You must provide your ideas before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your ideas. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, concise and linguistically acceptable. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive. Linguistically acceptable means that the response is given mainly in Russian language and there are no grammatical errors in it.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"." + +prompt_template: ["<|User Prompt|>\n{question_1}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>"] + +# Add your model below for evaluation +model_list: + - meta-llama-3-8b-instruct + - saiga_llama3_8b + - suzume-llama-3-8b-multilingual + - yandex_gpt_pro + - c4ai-command-r-v01 + - starling-lm-7b-beta + - openchat-3.5-0106 + - snorkel-mistral-pairrm-dpo + - neural-chat-7b-v3-3 + - gigachat_lite + - gigachat_pro + - vikhr-7b-instruct_0.4 + - hermes-2-pro-llama-3-8b + - gpt-4-1106-preview + - llama3-chatqa-1.5-8b + - vikhr-it-5.1 \ No newline at end of file diff --git a/src/gen/gen_answer.py b/src/gen/gen_answer.py new file mode 100644 index 0000000000000000000000000000000000000000..bdbbbf064033dec7d92e20a36f48c88e8d820c28 --- /dev/null +++ b/src/gen/gen_answer.py @@ -0,0 +1,202 @@ +"""Generate answers using api endpoints. + +Usage: +python gen_api_answer --parallel 32 +""" +import argparse +import concurrent.futures +import json +import os +import time + +import shortuuid +import tiktoken +import tqdm +from utils import ( + OPENAI_MODEL_LIST, + chat_completion_anthropic, + chat_completion_cohere, + chat_completion_gemini, + chat_completion_gigachat, + chat_completion_mistral, + chat_completion_openai, + chat_completion_openai_azure, + chat_completion_yandex, + get_endpoint, + load_model_answers, + load_questions, + make_config, + reorg_answer_file, + temperature_config, +) + + +def get_answer( + question: dict, + model: str, + endpoint_info: dict, + num_choices: int, + max_tokens: int, + temperature: float, + answer_file: str, + api_dict: dict, +): + if question["category"] in temperature_config: + temperature = temperature_config[question["category"]] + + api_type = endpoint_info["api_type"] + + conv = [] + + if "system_prompt" in endpoint_info.keys(): + conv.append({"role": "system", "content": endpoint_info["system_prompt"]}) + elif model in OPENAI_MODEL_LIST: + conv.append({"role": "system", "content": "You are a helpful assistant."}) + + encoding = tiktoken.encoding_for_model("gpt-3.5-turbo") + choices = [] + for i in range(num_choices): + turns = [] + for j in range(len(question["turns"])): + conv.append({"role": "user", "content": question["turns"][j]["content"]}) + if api_type == "anthropic": + output = chat_completion_anthropic( + model=endpoint_info["model_name"], messages=conv, temperature=temperature, max_tokens=max_tokens + ) + elif api_type == "mistral": + output = chat_completion_mistral( + model=endpoint_info["model_name"], messages=conv, temperature=temperature, max_tokens=max_tokens + ) + elif api_type == "yandex": + output = chat_completion_yandex( + model=endpoint_info["model_name"], + messages=conv, + temperature=temperature, + max_tokens=max_tokens, + api_dict=api_dict, + ) + elif api_type == "gigachat": + output = chat_completion_gigachat( + model=endpoint_info["model_name"], + messages=conv, + temperature=temperature, + max_tokens=max_tokens, + api_dict=api_dict, + ) + elif api_type == "gemini": + output = chat_completion_gemini( + model=endpoint_info["model_name"], + messages=question["turns"][j]["content"], + temperature=temperature, + max_tokens=max_tokens, + ) + elif api_type == "azure": + output = chat_completion_openai_azure( + model=endpoint_info["model_name"], + messages=conv, + temperature=temperature, + max_tokens=max_tokens, + api_dict=api_dict, + ) + elif api_type == "cohere": + output = chat_completion_cohere( + model=endpoint_info["model_name"], messages=conv, temperature=temperature, max_tokens=max_tokens + ) + else: + output = chat_completion_openai( + model=endpoint_info["model_name"], + messages=conv, + temperature=temperature, + max_tokens=max_tokens, + api_dict=api_dict, + ) + conv.append({"role": "assistant", "content": output}) + + turns.append({"content": output, "token_len": len(encoding.encode(output))}) + choices.append({"index": i, "turns": turns}) + + # Dump answers + ans = { + "question_id": question["question_id"], + "answer_id": shortuuid.uuid(), + "model_id": model, + "choices": choices, + "tstamp": time.time(), + } + + os.makedirs(os.path.dirname(answer_file), exist_ok=True) + with open(answer_file, "a") as fout: + fout.write(json.dumps(ans) + "\n") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--setting-file", type=str, default="config/gen_answer_config.yaml") + parser.add_argument("--endpoint-file", type=str, default="config/api_config.yaml") + args = parser.parse_args() + + settings = make_config(args.setting_file) + endpoint_list = make_config(args.endpoint_file) + + existing_answer = load_model_answers(os.path.join("data", settings["bench_name"], "model_answers", "internal")) + + print(settings) + + for model in settings["model_list"]: + assert model in endpoint_list + endpoint_info = endpoint_list[model] + + question_file = os.path.join("data", settings["bench_name"], "question.jsonl") + questions = load_questions(question_file) + + answer_file = os.path.join("data", settings["bench_name"], "model_answers", f"{model}.jsonl") + print(f"Output to {answer_file}") + + if "parallel" in endpoint_info: + parallel = endpoint_info["parallel"] + else: + parallel = 1 + + # We want to maximizes the number of tokens generate per answer: max_tokens = specified token # - input tokens # + if "tokenizer" in endpoint_info: + question_list = [question["turns"][0]["content"] for question in questions] + if model in OPENAI_MODEL_LIST: + tokenizer = tiktoken.encoding_for_model(endpoint_info["model_name"]) + tokens = [tokenizer.encode(prompt) for prompt in question_list] + max_tokens = [(settings["max_tokens"] - len(token) - 100) for token in tokens] + else: + from transformers import AutoTokenizer + + os.environ["TOKENIZERS_PARALLELISM"] = "false" + tokenizer = AutoTokenizer.from_pretrained(endpoint_info["tokenizer"]) + + tokens = tokenizer(question_list) + max_tokens = [(settings["max_tokens"] - len(prompt) - 300) for prompt in tokens["input_ids"]] + else: + max_tokens = [settings["max_tokens"]] * len(questions) + + with concurrent.futures.ThreadPoolExecutor(max_workers=parallel) as executor: + futures = [] + count = 0 + for index, question in enumerate(questions): + if model in existing_answer and question["question_id"] in existing_answer[model]: + count += 1 + continue + future = executor.submit( + get_answer, + question, + model, + endpoint_info, + settings["num_choices"], + max_tokens[index], + settings["temperature"], + answer_file, + get_endpoint(endpoint_info["endpoints"]), + ) + futures.append(future) + if count > 0: + print(f"{count} number of existing answers") + for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)): + future.result() + + reorg_answer_file(answer_file) diff --git a/src/gen/gen_judgment.py b/src/gen/gen_judgment.py new file mode 100644 index 0000000000000000000000000000000000000000..8d15e72822ea01719d84242bd33d73072d97d344 --- /dev/null +++ b/src/gen/gen_judgment.py @@ -0,0 +1,221 @@ +import argparse +import concurrent.futures +import glob +import json +import os +import re + +import huggingface_hub +from tqdm import tqdm +from utils import ( + chat_completion_anthropic, + chat_completion_openai, + chat_completion_openai_azure, + get_endpoint, + load_model_answers, + load_questions, + make_config, +) + + +def get_score(judgment, pattern, pairwise=True): + matches = pattern.findall(judgment) + matches = [m for m in matches if m != ""] + if len(set(matches)) == 0: + return None, True + elif len(set(matches)) == 1: + if pairwise: + return matches[0].strip("\n"), False + return int(matches[0]) + else: + return None, False + + +# get answer from model +def get_answer(model, conv, temperature, max_tokens, endpoint_dict=None): + api_dict = get_endpoint(endpoint_dict["endpoints"]) + + if endpoint_dict["api_type"] == "anthropic": + output = chat_completion_anthropic(model, conv, temperature, max_tokens) + elif endpoint_dict["api_type"] == "azure": + output = chat_completion_openai_azure(model, conv, temperature, max_tokens, api_dict) + else: + output = chat_completion_openai(model, conv, temperature, max_tokens, api_dict) + return output + + +def judgment(**args): + question = args["question"] + answer = args["answer"] + reference = args["reference"] + baseline = args["baseline_answer"] + configs = args["configs"] + output_file = args["output_file"] + model = configs["judge_model"] + + num_games = 2 if configs["pairwise"] else 1 + + output = {"question_id": question["question_id"], "model": answer["model_id"], "judge": model, "games": []} + + for game in range(num_games): + conv = [{"role": "system", "content": configs["system_prompt"]}] + + for template in configs["prompt_template"]: + prompt_args = {} + + for i, turn in enumerate(question["turns"]): + prompt_args[f"question_{i+1}"] = turn["content"] + base = 1 + + if baseline: + if game % 2 == 1: # swap position + temp = baseline + baseline = answer + answer = temp + + for i, turn in enumerate(baseline["choices"][0]["turns"]): + prompt_args[f"answer_{i+1}"] = turn["content"] + base += 1 + if answer: + for i, turn in enumerate(answer["choices"][0]["turns"]): + prompt_args[f"answer_{i+base}"] = turn["content"] + + if reference: + for j, ref_answer in enumerate(reference): + for i, turn in enumerate(ref_answer["choices"][0]["turns"]): + prompt_args[f"ref_answer_{i+j+1}"] = turn["content"] + + user_prompt = template.format(**prompt_args) + conv.append({"role": "user", "content": user_prompt}) + + judgment = "" + for _ in range(2): + new_judgment = get_answer( + model, + conv, + configs["temperature"], + configs["max_tokens"], + args["endpoint_dict"], + ) + + judgment += "\n" + new_judgment + + score, try_again = get_score(judgment, args["regex_pattern"]) + + conv.append({"role": "assistant", "content": new_judgment}) + + if not try_again: + break + + conv.append( + {"role": "user", "content": "continue your judgment and finish by outputting a final verdict label"} + ) + + result = {"user_prompt": conv[1]["content"], "judgment": judgment, "score": score} + output["games"].append(result) + + with open(output_file, "a") as f: + f.write(json.dumps(output, ensure_ascii=False) + "\n") + huggingface_hub.HfApi().upload_file( + output_file, + path_in_repo=f'model_judgment/{configs['judge_model']}/{output_file.split('/')[-1]}', + repo_id="Vikhrmodels/openbench-eval", + repo_type="dataset", + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--setting-file", type=str, default="./config/judge_config.yaml") + parser.add_argument("--endpoint-file", type=str, default="./config/api_config.yaml") + args = parser.parse_args() + print(args) + + configs = make_config(args.setting_file) + endpoint_list = make_config(args.endpoint_file) + + print( + f'judge model: {configs["judge_model"]}, baseline: {configs["baseline"]}, baseline model: {configs["baseline_model"]}, reference: {configs["reference"]}, ' + + f'reference models: {configs["ref_model"]}, temperature: {configs["temperature"]}, max tokens: {configs["max_tokens"]}, pairwise: {configs["pairwise"]}' + ) + + if configs["regex_pattern"]: + pattern = re.compile(configs["regex_pattern"]) + + question_file = os.path.join("./data", configs["bench_name"], "question.jsonl") + external_dir = os.path.join("./data", configs["bench_name"], "model_answers/external") + internal_dir = os.path.join("./data", configs["bench_name"], "model_answers/internal") + ref_answer_dir = os.path.join("data", configs["bench_name"], "reference_answer") + + questions = load_questions(question_file) + model_answers_external = load_model_answers(external_dir) + model_answers_internal = load_model_answers(internal_dir) + + # internal has priority + model_answers = {**model_answers_external, **model_answers_internal} + + # if user choose a set of models, only judge those models + models = [ + model.split("/")[-1].split(".")[0] + for model in glob.glob("./data/arena-hard-v0.1/model_answers/external/*.jsonl") + ] + + ref_answers = None + if configs["reference"]: + ref_answers = load_model_answers(ref_answer_dir) + ref_answers = [ref_answers[model] for model in configs["ref_model"]] + + output_files = {} + output_dir = f"data/{configs['bench_name']}/model_judgment/{configs['judge_model']}" + for model in models: + output_files[model] = os.path.join( + output_dir, + f"{model}.jsonl", + ) + + for output_file in output_files.values(): + os.makedirs(os.path.dirname(output_file), exist_ok=True) + + existing_judgments = load_model_answers(output_dir) + + endpoint_info = endpoint_list[configs["judge_model"]] + + with concurrent.futures.ThreadPoolExecutor(max_workers=endpoint_info["parallel"]) as executor: + futures = [] + for model in models: + count = 0 + for question in questions[:2]: + question_id = question["question_id"] + + kwargs = {} + kwargs["question"] = question + if model in model_answers and question_id not in model_answers[model]: + print(f"Warning: {model} answer to {question['question_id']} cannot be found.") + continue + + if model in existing_judgments and question_id in existing_judgments[model]: + count += 1 + continue + + kwargs["answer"] = model_answers[model][question_id] + if ref_answers: + kwargs["reference"] = [ref_answer[question_id] for ref_answer in ref_answers] + assert len(kwargs["reference"]) == len(configs["ref_model"]) + else: + kwargs["reference"] = None + if configs["baseline"]: + kwargs["baseline_answer"] = model_answers[configs["baseline_model"]][question_id] + else: + kwargs["baseline_answer"] = None + kwargs["configs"] = configs + kwargs["endpoint_dict"] = endpoint_info + kwargs["output_file"] = output_files[model] + kwargs["regex_pattern"] = pattern + future = executor.submit(judgment, **kwargs) + futures.append(future) + + if count > 0: + print(f"{count} number of existing judgments") + + for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)): + future.result() diff --git a/src/gen/show_result.py b/src/gen/show_result.py new file mode 100644 index 0000000000000000000000000000000000000000..329d328b1c570c003ddd3c09575de114ee466fb7 --- /dev/null +++ b/src/gen/show_result.py @@ -0,0 +1,279 @@ +import argparse +import datetime +import math +import os +from collections import defaultdict +from glob import glob + +import numpy as np +import pandas as pd +import plotly.express as px +from sklearn.linear_model import LogisticRegression +from tqdm import tqdm +from utils import load_model_answers + +from src.envs import HF_TOKEN_PRIVATE + + +def compute_mle_elo(df, SCALE=400, BASE=10, INIT_RATING=1000): + models = pd.concat([df["model_a"], df["model_b"]]).unique() + models = pd.Series(np.arange(len(models)), index=models) + + # duplicate battles + df = pd.concat([df, df], ignore_index=True) + p = len(models.index) + n = df.shape[0] + + X = np.zeros([n, p]) + X[np.arange(n), models[df["model_a"]]] = +math.log(BASE) + X[np.arange(n), models[df["model_b"]]] = -math.log(BASE) + + # one A win => two A win + Y = np.zeros(n) + Y[df["winner"] == "model_a"] = 1.0 + + # one tie => one A win + one B win + # find tie + tie (both bad) index + tie_idx = (df["winner"] == "tie") | (df["winner"] == "tie (bothbad)") + tie_idx[len(tie_idx) // 2 :] = False + Y[tie_idx] = 1.0 + + lr = LogisticRegression(fit_intercept=False, penalty=None, tol=1e-8) + lr.fit(X, Y) + + elo_scores = SCALE * lr.coef_[0] + INIT_RATING + + # set anchor as gpt-3.5-turbo-0125 = 1000 + if "gpt-3.5-turbo-0125" in models.index: + elo_scores += 1000 - elo_scores[models["gpt-3.5-turbo-0125"]] + return pd.Series(elo_scores, index=models.index).sort_values(ascending=False) + + +def get_bootstrap_result(battles, func_compute_elo, num_round): + rows = [] + for i in tqdm(range(num_round), desc="bootstrap"): + rows.append(func_compute_elo(battles.sample(frac=1.0, replace=True))) + df = pd.DataFrame(rows) + return df[df.median().sort_values(ascending=False).index] + + +def preety_print_two_ratings(ratings_1, ratings_2, column_names): + df = ( + pd.DataFrame( + [[n, ratings_1[n], ratings_2[n]] for n in ratings_1.keys()], + columns=["Model", column_names[0], column_names[1]], + ) + .sort_values(column_names[0], ascending=False) + .reset_index(drop=True) + ) + df[column_names[0]] = (df[column_names[0]] + 0.5).astype(int) + df[column_names[1]] = (df[column_names[1]] + 0.5).astype(int) + df.index = df.index + 1 + return df + + +def visualize_bootstrap_scores(df, title): + bars = ( + pd.DataFrame(dict(lower=df.quantile(0.025), rating=df.quantile(0.5), upper=df.quantile(0.975))) + .reset_index(names="model") + .sort_values("rating", ascending=False) + ) + bars["error_y"] = bars["upper"] - bars["rating"] + bars["error_y_minus"] = bars["rating"] - bars["lower"] + bars["rating_rounded"] = np.round(bars["rating"], 2) + fig = px.scatter( + bars, + x="model", + y="rating", + error_y="error_y", + error_y_minus="error_y_minus", + text="rating_rounded", + title=title, + ) + fig.update_layout(xaxis_title="Model", yaxis_title="Rating", height=600) + return fig + + +def predict_win_rate(elo_ratings, SCALE=400, BASE=10, INIT_RATING=1000): + names = sorted(list(elo_ratings.keys())) + wins = defaultdict(lambda: defaultdict(lambda: 0)) + for a in names: + for b in names: + ea = 1 / (1 + BASE ** ((elo_ratings[b] - elo_ratings[a]) / SCALE)) + wins[a][b] = ea + wins[b][a] = 1 - ea + + data = {a: [wins[a][b] if a != b else np.NAN for b in names] for a in names} + + df = pd.DataFrame(data, index=names) + df.index.name = "model_a" + df.columns.name = "model_b" + return df.T + + +def get_win_rate_column(df, column, baseline="gpt-3.5-turbo-0125"): + to_dict = df[["model", column]].set_index("model").to_dict()[column] + win_rate_table = predict_win_rate(to_dict) + return win_rate_table[baseline].fillna(0.5).apply(lambda x: round(x * 100, 2)) + + +def get_battles_from_judgment(judge_name, first_game_only=False, WEIGHT=3): + arena_hard_battles = pd.DataFrame() + + print("Turning judgment results into battles...") + + directory = f"data/arena-hard-v0.1/model_judgement/{judge_name}" + assert os.path.exists(directory) + for file in tqdm(glob(f"{directory}/*jsonl")): + df = pd.read_json(file, lines=True) + + for _, row in df.iterrows(): + # game 1 + output = {"question_id": row["question_id"], "model_a": "gpt-3.5-turbo-0125", "model_b": row["model"]} + + game = row["games"][0] + + weight = 1 + if game["score"] == "A=B": + output["winner"] = "tie" + elif game["score"] == "A>B": + output["winner"] = "model_a" + elif game["score"] == "A>>B": + output["winner"] = "model_a" + weight = WEIGHT + elif game["score"] == "B>A": + output["winner"] = "model_b" + elif game["score"] == "B>>A": + output["winner"] = "model_b" + weight = WEIGHT + else: + weight = 0 + + if weight: + arena_hard_battles = pd.concat([arena_hard_battles, pd.DataFrame([output] * weight)]) + + if not first_game_only: + # game 2 + output = {"question_id": row["question_id"], "model_a": "gpt-3.5-turbo-0125", "model_b": row["model"]} + + game = row["games"][1] + + weight = 1 + if game["score"] == "A=B": + output["winner"] = "tie" + elif game["score"] == "A>B": + output["winner"] = "model_b" + elif game["score"] == "A>>B": + output["winner"] = "model_b" + weight = WEIGHT + elif game["score"] == "B>A": + output["winner"] = "model_a" + elif game["score"] == "B>>A": + output["winner"] = "model_a" + weight = WEIGHT + else: + weight = 0 + + if weight: + arena_hard_battles = pd.concat([arena_hard_battles, pd.DataFrame([output] * weight)]) + arena_hard_battles.to_json("data/arena_hard_battles.jsonl", lines=True, orient="records") + return arena_hard_battles + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--bench-name", type=str, default="arena-hard-v0.1") + parser.add_argument("--judge-name", type=str, default="gpt-4-1106-preview") + parser.add_argument("--baseline", type=str, default="gpt-3.5-turbo-0125") + parser.add_argument("--load-battles", action="store_true") + parser.add_argument("--load-bootstrap", action="store_true") + parser.add_argument("--show-elo", action="store_true") + parser.add_argument("--weight", type=int, default=3) + parser.add_argument("--num-rounds", type=int, default=100) + parser.add_argument("--output", action="store_true") + parser.add_argument("--first-game-only", action="store_true") + args = parser.parse_args() + print(args) + assert not args.load_bootstrap or ( + args.load_battles and args.load_bootstrap + ), "If loading prexisting bootstrapping data, you must also load preexisting battles." + + answer_dir = os.path.join("data", args.bench_name, "model_answers/external") + model_answers = load_model_answers(answer_dir) + + if args.load_battles: + assert os.path.exists("data/arena_hard_battles.jsonl") + battles = pd.read_json("data/arena_hard_battles.jsonl", lines=True) + else: + battles = get_battles_from_judgment(args.judge_name, args.first_game_only, args.weight) + + bootstrap_online_elo = compute_mle_elo(battles) + + if args.load_bootstrap: + bootstrap_elo_lu = pd.read_json("data/bootstrapping_results.jsonl", lines=True) + else: + np.random.seed(42) + bootstrap_elo_lu = get_bootstrap_result(battles, compute_mle_elo, args.num_rounds) + bootstrap_elo_lu.to_json("data/bootstrapping_results.jsonl", lines=True, orient="records") + + stats = pd.DataFrame() + stats["results"] = None + stats["results"] = stats["results"].astype("object") + + for i, model in enumerate(bootstrap_online_elo.index): + assert model in bootstrap_elo_lu.columns + + stats.at[i, "model"] = model + stats.at[i, "score"] = bootstrap_online_elo[model] + stats.at[i, "lower"] = np.percentile(bootstrap_elo_lu[model], 2.5) + stats.at[i, "upper"] = np.percentile(bootstrap_elo_lu[model], 97.5) + + length = 0 + if model in model_answers: + for _, row in model_answers[model].items(): + turn = row["choices"][0]["turns"][0] + length += turn["token_len"] + length /= len(model_answers[model]) + + stats.at[i, "avg_tokens"] = int(length) + stats.at[i, "results"] = bootstrap_elo_lu[model].tolist() + + if not args.show_elo: + stats.sort_values(by="model", inplace=True) + stats["score"] = get_win_rate_column(stats, "score", args.baseline).tolist() + stats["lower"] = get_win_rate_column(stats, "lower", args.baseline).tolist() + stats["upper"] = get_win_rate_column(stats, "upper", args.baseline).tolist() + decimal = 1 + else: + decimal = 0 + stats = stats.astype({"score": int, "lower": int, "upper": int}) + + stats.sort_values(by="score", ascending=False, inplace=True) + for _, row in stats.iterrows(): + interval = str((round(row["lower"] - row["score"], decimal), round(row["upper"] - row["score"], decimal))) + print( + f"{row['model'] : <30} | score: {round(row['score'], decimal) : ^5} | 95% CI: {interval : ^12} | average #tokens: {int(row['avg_tokens'])}" + ) + + if args.output: + cur_date = datetime.datetime.now() + date_str = cur_date.strftime("%Y%m%d") + json_file_name = f"arena_hard_leaderboard_{date_str}.json" + stats.to_json(json_file_name, orient="records", indent=4) + import huggingface_hub + + huggingface_hub.HfApi().upload_file( + path_or_fileobj=json_file_name, + path_in_repo="leaderboard.json", + repo_id="Vikhrmodels/arena-leaderboard-metainfo", + repo_type="dataset", + token=HF_TOKEN_PRIVATE, + ) + + huggingface_hub.HfApi().upload_file( + path_or_fileobj=json_file_name, + path_in_repo=f"leaderboard_logs/{json_file_name}", + repo_id="Vikhrmodels/arena-leaderboard-metainfo", + repo_type="dataset", + token=HF_TOKEN_PRIVATE, + ) diff --git a/src/gen/utils.py b/src/gen/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..02924022a06cc57908de09a64317820bd725182f --- /dev/null +++ b/src/gen/utils.py @@ -0,0 +1,375 @@ +import json +import os +import random +import time +from glob import glob + +import yaml + +# API setting constants +API_MAX_RETRY = 16 +API_RETRY_SLEEP = 10 +API_ERROR_OUTPUT = "$ERROR$" + + +OPENAI_MODEL_LIST = ( + "gpt-3.5-turbo", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-0613-verbose", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-turbo", + "gpt-4-1106-preview", + "gpt-4-0125-preview", +) + + +temperature_config = { + "writing": 0.7, + "roleplay": 0.7, + "extraction": 0.0, + "math": 0.0, + "coding": 0.0, + "reasoning": 0.0, + "stem": 0.1, + "humanities": 0.1, +} + + +def load_questions(question_file: str): + """Load questions from a file.""" + questions = [] + with open(question_file, "r") as ques_file: + for line in ques_file: + if line: + questions.append(json.loads(line)) + return questions + + +def load_model_answers(answer_dir: str): + """Load model answers. + + The return value is a python dict of type: + Dict[model_name: str -> Dict[question_id: int -> answer: dict]] + """ + filenames = glob(os.path.join(answer_dir, "*.jsonl")) + filenames.sort() + model_answers = {} + + for filename in filenames: + model_name = os.path.basename(filename)[:-6] + answer = {} + with open(filename) as fin: + for line in fin: + line = json.loads(line) + answer[line["question_id"]] = line + model_answers[model_name] = answer + + return model_answers + + +def get_endpoint(endpoint_list): + if endpoint_list is None: + return None + assert endpoint_list is not None + # randomly pick one + api_dict = random.choices(endpoint_list)[0] + return api_dict + + +# load config args from config yaml files +def make_config(config_file: str) -> dict: + config_kwargs = {} + with open(config_file, "r") as f: + config_kwargs = yaml.load(f, Loader=yaml.SafeLoader) + + return config_kwargs + + +def chat_completion_gigachat(model, messages, temperature, max_tokens, api_dict=None): + from gigachat import GigaChat + from gigachat.models import Chat, Messages + + assert api_dict is not None, "no api settings provided!" + auth_token = api_dict.get("auth_token", os.environ.get(api_dict["auth_token"], "")) + client = GigaChat(credentials=auth_token, model=model, verify_ssl_certs=False) + temperature = max(temperature, 0.001) + + messages = [Messages.parse_obj(m) for m in messages] + chat = Chat(messages=messages, max_tokens=max_tokens, temperature=temperature) + + output = API_ERROR_OUTPUT + for _ in range(API_MAX_RETRY): + try: + output = client.chat(chat) + output = output.choices[0].message.content + break + # Don't know other errors + except Exception as e: + print(type(e), e) + time.sleep(API_RETRY_SLEEP) + + return output + + +def chat_completion_yandex(model, messages, temperature, max_tokens, api_dict=None): + from yandex_gpt import YandexGPT, YandexGPTConfigManagerForIAMToken + + assert api_dict is not None, "no api settings provided!" + iam_token = api_dict.get("iam_token", os.environ.get(api_dict["iam_token_ENV"], "")) + config = YandexGPTConfigManagerForIAMToken(model_type=model, catalog_id=api_dict["catalog_id"], iam_token=iam_token) + client = YandexGPT(config_manager=config) + + messages = [{"role": m["role"], "text": m["content"]} for m in messages] + + output = API_ERROR_OUTPUT + for _ in range(API_MAX_RETRY): + try: + output = client.get_sync_completion( + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + ) + break + # Don't know other errors + except Exception as e: + print(type(e), e) + time.sleep(API_RETRY_SLEEP) + + return output + + +def chat_completion_openai(model, messages, temperature, max_tokens, api_dict=None): + import openai + + api_key = api_dict.get("api_key", os.environ.get(api_dict["api_key_ENV"], "")) + if api_dict: + client = openai.OpenAI( + base_url=api_dict["api_base"], + api_key=api_key, + ) + else: + client = openai.OpenAI() + + output = API_ERROR_OUTPUT + for _ in range(API_MAX_RETRY): + try: + # print(messages) + completion = client.chat.completions.create( + model=model, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + stop=["", "", "<|eot_id|>"], + ) + output = completion.choices[0].message.content + break + except openai.RateLimitError as e: + print(type(e), e) + time.sleep(API_RETRY_SLEEP) + except openai.BadRequestError as e: + print(messages) + print(type(e), e) + except KeyError as e: + print(type(e), e) + break + + return output + + +def chat_completion_openai_azure(model, messages, temperature, max_tokens, api_dict=None): + import openai + from openai import AzureOpenAI + + api_base = api_dict["api_base"] + api_key = api_dict.get("api_key", os.environ.get(api_dict["api_key_ENV"], "")) + client = AzureOpenAI( + azure_endpoint=api_base, api_key=api_key, api_version=api_dict["api_version"], timeout=240, max_retries=2 + ) + + output = API_ERROR_OUTPUT + for _ in range(API_MAX_RETRY): + try: + response = client.chat.completions.create( + model=model, + messages=messages, + n=1, + temperature=temperature, + max_tokens=max_tokens, + seed=42, + ) + output = response.choices[0].message.content + break + except openai.RateLimitError as e: + print(type(e), e) + time.sleep(API_RETRY_SLEEP) + except openai.BadRequestError as e: + print(type(e), e) + break + except KeyError as e: + print(type(e), e) + break + + return output + + +def chat_completion_anthropic(model, messages, temperature, max_tokens, api_dict=None): + import anthropic + + if api_dict: + api_key = api_dict.get("api_key", os.environ.get(api_dict["api_key_ENV"], "")) + else: + api_key = os.environ["ANTHROPIC_API_KEY"] + + sys_msg = "" + if messages[0]["role"] == "system": + sys_msg = messages[0]["content"] + messages = messages[1:] + + output = API_ERROR_OUTPUT + for _ in range(API_MAX_RETRY): + try: + # print(sys_msg) + c = anthropic.Anthropic(api_key=api_key) + response = c.messages.create( + model=model, + messages=messages, + stop_sequences=[anthropic.HUMAN_PROMPT], + max_tokens=max_tokens, + temperature=temperature, + system=sys_msg, + ) + output = response.content[0].text + break + except anthropic.APIError as e: + print(type(e), e) + time.sleep(API_RETRY_SLEEP) + return output + + +def chat_completion_mistral(model, messages, temperature, max_tokens): + from mistralai.client import MistralClient + from mistralai.exceptions import MistralException + from mistralai.models.chat_completion import ChatMessage + + api_key = os.environ["MISTRAL_API_KEY"] + client = MistralClient(api_key=api_key) + + prompts = [ChatMessage(role=message["role"], content=message["content"]) for message in messages] + + output = API_ERROR_OUTPUT + for _ in range(API_MAX_RETRY): + try: + chat_response = client.chat( + model=model, + messages=prompts, + temperature=temperature, + max_tokens=max_tokens, + ) + output = chat_response.choices[0].message.content + break + except MistralException as e: + print(type(e), e) + break + + return output + + +def chat_completion_gemini(model, messages, temperature, max_tokens): + import google.generativeai as genai + + genai.configure(api_key=os.environ["GEMINI_API_KEY"]) + + safety_settings = [ + {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"}, + {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}, + {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"}, + {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"}, + ] + + # Set up the model + generation_config = { + "temperature": temperature, + "top_p": 1, + "top_k": 1, + "max_output_tokens": max_tokens, + } + + output = API_ERROR_OUTPUT + for _ in range(API_MAX_RETRY): + try: + gemini = genai.GenerativeModel( + model_name=model, generation_config=generation_config, safety_settings=safety_settings + ) + + convo = gemini.start_chat(history=[]) + + convo.send_message(messages) + output = convo.last.text + break + except genai.types.generation_types.StopCandidateException as e: + print(type(e), e) + break + except Exception as e: + print(type(e), e) + time.sleep(API_RETRY_SLEEP) + + return output + + +def chat_completion_cohere(model, messages, temperature, max_tokens): + import cohere + + co = cohere.Client(os.environ["COHERE_API_KEY"]) + assert len(messages) > 0 + + template_map = {"system": "SYSTEM", "assistant": "CHATBOT", "user": "USER"} + + assert messages[-1]["role"] == "user" + prompt = messages[-1]["content"] + + if len(messages) > 1: + history = [] + for message in messages[:-1]: + history.append({"role": template_map[message["role"]], "message": message["content"]}) + else: + history = None + + output = API_ERROR_OUTPUT + for _ in range(API_MAX_RETRY): + try: + response = co.chat( + message=prompt, + model=model, + temperature=temperature, + max_tokens=max_tokens, + chat_history=history, + ) + output = response.text + break + except cohere.core.api_error.ApiError as e: + print(type(e), e) + raise + except Exception as e: + print(type(e), e) + break + + return output + + +def reorg_answer_file(answer_file): + """Sort by question id and de-duplication""" + answers = {} + with open(answer_file, "r") as fin: + for line in fin: + qid = json.loads(line)["question_id"] + answers[qid] = line + + qids = sorted(list(answers.keys())) + with open(answer_file, "w") as fout: + for qid in qids: + fout.write(answers[qid]) diff --git a/src/leaderboard/build_leaderboard.py b/src/leaderboard/build_leaderboard.py new file mode 100644 index 0000000000000000000000000000000000000000..13c4e9a731752e260b0e46df9e4c9bc171db23a4 --- /dev/null +++ b/src/leaderboard/build_leaderboard.py @@ -0,0 +1,159 @@ +import json +import logging +import os +import time + +import pandas as pd +from huggingface_hub import snapshot_download + +from src.envs import DATA_PATH, H4_TOKEN, RESULTS_REPO, METAINFO_REPO + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + + +def time_diff_wrapper(func): + def wrapper(*args, **kwargs): + start_time = time.time() + result = func(*args, **kwargs) + end_time = time.time() + diff = end_time - start_time + logging.info("Time taken for %s: %s seconds", func.__name__, diff) + return result + + return wrapper + +def chmod_recursive(path, mode): + os.chmod(path, mode) + for root, dirs, files in os.walk(path): + for dir in dirs: + os.chmod(os.path.join(root, dir), mode) + for file in files: + os.chmod(os.path.join(root, file), mode) + +@time_diff_wrapper +def download_dataset(repo_id, local_dir, repo_type="dataset", max_attempts=3, backoff_factor=1.5): + """Download dataset with exponential backoff retries.""" + os.makedirs(local_dir, exist_ok=True) + os.makedirs('./tmp', exist_ok=True) + attempt = 0 + while attempt < max_attempts: + try: + logging.info("Downloading %s to %s", repo_id, local_dir) + snapshot_download( + repo_id=repo_id, + local_dir=local_dir, + cache_dir='./tmp', + repo_type=repo_type, + tqdm_class=None, + token=H4_TOKEN, + etag_timeout=30, + max_workers=8, + force_download=True, + local_dir_use_symlinks=False + ) + logging.info("Download successful") + return + except Exception as e: + wait_time = backoff_factor**attempt + logging.error("Error downloading %s: %s, retrying in %ss", repo_id, e, wait_time) + time.sleep(wait_time) + attempt += 1 + logging.error("Failed to download %s after %s attempts", repo_id, max_attempts) + + +def download_openbench(): + # Download previous autogenerated leaderboard files + try: + download_dataset(METAINFO_REPO, DATA_PATH) + logging.info("Successfully downloaded leaderboard metainfo data") + except Exception as e: + logging.error(f"Failed to download leaderboard metainfo: {e}") + + # Download model evaluation results + try: + download_dataset(RESULTS_REPO, "m_data") + logging.info("Successfully downloaded model evaluation results") + except Exception as e: + logging.error(f"Failed to download model evaluation results: {e}") + + +def build_leadearboard_df(): + results = [] + + # Π—Π°Π³Ρ€ΡƒΠΆΠ°Π΅ΠΌ Π±Π°Π·ΠΎΠ²Ρ‹Π΅ ΠΌΠΎΠ΄Π΅Π»ΠΈ ΠΈΠ· локального Ρ„Π°ΠΉΠ»Π° + try: + with open("d:/python_projects/DeathMath/results/leaderboard_results.json", "r", encoding="utf-8") as f: + data = json.load(f) + + # ИзвлСкаСм Ρ‚ΠΎΠ»ΡŒΠΊΠΎ ΠΊΠΎΠΌΠ±ΠΈΠ½ΠΈΡ€ΠΎΠ²Π°Π½Π½Ρ‹Π΅ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Ρ‹ + for key, value in data.items(): + if "_Combined_" in key: + result = { + "model": value["model_name"], + "score": value["score"], + "math_score": value["math_score"], + "physics_score": value["physics_score"], + "total_tokens": value["total_tokens"], + "evaluation_time": value["evaluation_time"], + "system_prompt": value["system_prompt"] + } + results.append(result) + logging.info(f"Loaded {len(results)} models from local results file") + except Exception as e: + logging.error(f"Failed to load local model results: {e}") + + # ΠŸΠΎΠΏΡ‹Ρ‚ΠΊΠ° Π·Π°Π³Ρ€ΡƒΠ·ΠΈΡ‚ΡŒ сохранСнныС Π΄Π°Π½Π½Ρ‹Π΅ Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + try: + leaderboard_path = f"{os.path.abspath(DATA_PATH)}/leaderboard.json" + if os.path.exists(leaderboard_path): + with open(leaderboard_path, "r", encoding="utf-8") as eval_file: + saved_data = json.load(eval_file) + logging.info(f"Loaded {len(saved_data)} models from saved leaderboard data") + + # ДобавляСм ΠΌΠΎΠ΄Π΅Π»ΠΈ, ΠΊΠΎΡ‚ΠΎΡ€Ρ‹Ρ… Π΅Ρ‰Ρ‘ Π½Π΅Ρ‚ Π² Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Π°Ρ… + existing_models = [r["model"] for r in results] + for item in saved_data: + if item["model"] not in existing_models: + results.append(item) + except Exception as e: + logging.error(f"Failed to load saved leaderboard data: {e}") + + # Π—Π°Π³Ρ€ΡƒΠΆΠ°Π΅ΠΌ ΠΌΠΎΠ΄Π΅Π»ΠΈ ΠΈΠ· Π΄ΠΈΡ€Π΅ΠΊΡ‚ΠΎΡ€ΠΈΠΈ Π²Π½Π΅ΡˆΠ½ΠΈΡ… ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ + try: + for file in os.listdir("./m_data/model_data/external/"): + if file.endswith(".json"): + with open(os.path.join("./m_data/model_data/external/", file), "r") as f: + try: + data = json.load(f) + # ΠŸΡ€ΠΎΠ²Π΅Ρ€ΡΠ΅ΠΌ, Π½Π΅Ρ‚ Π»ΠΈ ΡƒΠΆΠ΅ этой ΠΌΠΎΠ΄Π΅Π»ΠΈ Π² Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚Π°Ρ… + if data["model_name"] not in [r["model"] for r in results]: + result = { + "model": data["model_name"], + "score": data["score"], + "math_score": data["math_score"], + "physics_score": data["physics_score"], + "total_tokens": data["total_tokens"], + "evaluation_time": data["evaluation_time"], + "system_prompt": data.get("system_prompt", "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС.") + } + results.append(result) + except Exception as e: + logging.error(f"Failed to parse {file}: {e}") + except Exception as e: + logging.error(f"Failed to process external model data: {e}") + + # Π‘ΠΎΠ·Π΄Π°Π΅ΠΌ DataFrame ΠΈ сортируСм ΠΏΠΎ ΠΎΠ±Ρ‰Π΅ΠΌΡƒ Π±Π°Π»Π»Ρƒ + if results: + df = pd.DataFrame(results) + df.sort_values(by='score', ascending=False, inplace=True) + + # ΠžΠΊΡ€ΡƒΠ³Π»ΡΠ΅ΠΌ числовыС столбцы для красивого отобраТСния + numeric_cols = df.select_dtypes(include=['number']).columns + df[numeric_cols] = df[numeric_cols].round(3) + + return df + else: + # Если Π½Π΅Ρ‚ Ρ€Π΅Π·ΡƒΠ»ΡŒΡ‚Π°Ρ‚ΠΎΠ², Π²ΠΎΠ·Π²Ρ€Π°Ρ‰Π°Π΅ΠΌ пустой DataFrame с Π½ΡƒΠΆΠ½Ρ‹ΠΌΠΈ столбцами + return pd.DataFrame(columns=['model', 'score', 'math_score', 'physics_score', + 'total_tokens', 'evaluation_time', 'system_prompt']) diff --git a/src/leaderboard/filter_models.py b/src/leaderboard/filter_models.py new file mode 100644 index 0000000000000000000000000000000000000000..a88a963b643085ab42365a2808ed4e1e6478cdb5 --- /dev/null +++ b/src/leaderboard/filter_models.py @@ -0,0 +1,173 @@ +from src.display.formatting import model_hyperlink +from src.display.utils import AutoEvalColumn + + +# Models which have been flagged by users as being problematic for a reason or another +# (Model name to forum discussion link) +FLAGGED_MODELS = { + "merged": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Voicelab/trurl-2-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/202", + "deepnight-research/llama-2-70B-inst": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/207", + "Aspik101/trurl-2-13b-pl-instruct_unload": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/213", + "Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236", + "TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237", + "gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215", + "AIDC-ai-business/Marcoroni-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287", + "AIDC-ai-business/Marcoroni-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287", + "AIDC-ai-business/Marcoroni-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287", + "fblgit/una-xaberius-34b-v1beta": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/444", + "jan-hq/trinity-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "rwitz2/go-bruins-v2.1.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "rwitz2/go-bruins-v2.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "GreenNode/GreenNodeLM-v3olet-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "GreenNode/GreenNodeLM-7B-v4leo": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "GreenNode/LeoScorpius-GreenNode-7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "viethq188/LeoScorpius-7B-Chat-DPO": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "GreenNode/GreenNodeLM-7B-v2leo": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "janai-hq/trinity-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "ignos/LeoScorpius-GreenNode-Alpaca-7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "fblgit/una-cybertron-7b-v3-OMA": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "mncai/mistral-7b-dpo-merge-v1.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "mncai/mistral-7b-dpo-v6": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "Toten5/LeoScorpius-GreenNode-7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "GreenNode/GreenNodeLM-7B-v1olet": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "quantumaikr/quantum-dpo-v0.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "quantumaikr/quantum-v0.01": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "quantumaikr/quantum-trinity-v0.1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "mncai/mistral-7b-dpo-v5": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "cookinai/BruinHermes": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "jan-ai/Pandora-10.7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "v1olet/v1olet_marcoroni-go-bruins-merge-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "v1olet/v1olet_merged_dpo_7B_v3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "rwitz2/pee": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "zyh3826 / GML-Mistral-merged-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/503", + "dillfrescott/trinity-medium": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/474", + "udkai/Garrulus": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/526", + "dfurman/GarrulusMarcoro-7B-v0.1": "https://huggingface.co/dfurman/GarrulusMarcoro-7B-v0.1/discussions/1", + "eren23/slerp-test-turdus-beagle": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548", + "abideen/NexoNimbus-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548", + "alnrg2arg/test2_3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548", + "nfaheem/Marcoroni-7b-DPO-Merge": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548", + "CultriX/MergeTrix-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548", + "liminerity/Blur-7b-v1.21": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/548", + # Merges not indicated + "gagan3012/MetaModelv2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "gagan3012/MetaModelv3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "kyujinpy/Sakura-SOLRCA-Math-Instruct-DPO-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "kyujinpy/Sakura-SOLAR-Instruct-DPO-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "kyujinpy/Sakura-SOLRCA-Math-Instruct-DPO-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "kyujinpy/Sakura-SOLRCA-Instruct-DPO": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "fblgit/LUNA-SOLARkrautLM-Instruct": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "perlthoughts/Marcoroni-8x7B-v3-MoE": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "rwitz/go-bruins-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "rwitz/go-bruins": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Walmart-the-bag/Solar-10.7B-Cato": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "aqweteddy/mistral_tv-neural-marconroni": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "NExtNewChattingAI/shark_tank_ai_7_b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Q-bert/MetaMath-Cybertron": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "OpenPipe/mistral-ft-optimized-1227": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "perlthoughts/Falkor-7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "v1olet/v1olet_merged_dpo_7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Ba2han/BruinsV2-OpHermesNeu-11B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "DopeorNope/You_can_cry_Snowman-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "PistachioAlt/Synatra-MCS-7B-v0.3-RP-Slerp": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Weyaxi/MetaMath-una-cybertron-v2-bf16-Ties": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Weyaxi/OpenHermes-2.5-neural-chat-7b-v3-2-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "perlthoughts/Falkor-8x7B-MoE": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "elinas/chronos007-70b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Weyaxi/MetaMath-NeuralHermes-2.5-Mistral-7B-Linear": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Weyaxi/MetaMath-neural-chat-7b-v3-2-Ties": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Weyaxi/neural-chat-7b-v3-1-OpenHermes-2.5-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Weyaxi/MetaMath-NeuralHermes-2.5-Mistral-7B-Ties": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Walmart-the-bag/Misted-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "garage-bAInd/Camel-Platypus2-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "Weyaxi/OpenOrca-Zephyr-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "uukuguy/speechless-mistral-7b-dare-0.85": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/510", + "DopeorNope/SOLARC-M-10.7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511", + "cloudyu/Mixtral_11Bx2_MoE_19B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511", + "DopeorNope/SOLARC-MOE-10.7Bx6 ": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511", + "DopeorNope/SOLARC-MOE-10.7Bx4": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511", + "gagan3012/MetaModelv2 ": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/511", + "udkai/Turdus": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "kodonho/Solar-OrcaDPO-Solar-Instruct-SLERP": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "kodonho/SolarM-SakuraSolar-SLERP": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "Yhyu13/LMCocktail-10.7B-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "mlabonne/NeuralMarcoro14-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "Neuronovo/neuronovo-7B-v0.2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "ryandt/MusingCaterpillar": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "Neuronovo/neuronovo-7B-v0.3": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "SanjiWatsuki/Lelantos-DPO-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "bardsai/jaskier-7b-dpo": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "cookinai/OpenCM-14": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "bardsai/jaskier-7b-dpo-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "jan-hq/supermario-v2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + # MoErges + "cloudyu/Yi-34Bx2-MoE-60B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "cloudyu/Mixtral_34Bx2_MoE_60B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "gagan3012/MetaModel_moe": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "macadeliccc/SOLAR-math-2x10.7b-v0.2": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "cloudyu/Mixtral_7Bx2_MoE": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "macadeliccc/SOLAR-math-2x10.7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "macadeliccc/Orca-SOLAR-4x10.7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "macadeliccc/piccolo-8x7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "cloudyu/Mixtral_7Bx4_MOE_24B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "macadeliccc/laser-dolphin-mixtral-2x7b-dpo": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + "macadeliccc/polyglot-math-4x7b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/540", + # Other - contamination mostly + "DopeorNope/COKAL-v1-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/566", + "CultriX/MistralTrix-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/556", + "Contamination/contaminated_proof_7b_v1.0": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/664", + "Contamination/contaminated_proof_7b_v1.0_safetensor": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/664", +} + +# Models which have been requested by orgs to not be submitted on the leaderboard +DO_NOT_SUBMIT_MODELS = [ + "Voicelab/trurl-2-13b", # trained on MMLU + "TigerResearch/tigerbot-70b-chat", # per authors request + "TigerResearch/tigerbot-70b-chat-v2", # per authors request + "TigerResearch/tigerbot-70b-chat-v4-4k", # per authors request +] + + +def flag_models(leaderboard_data: list[dict]): + """Flags models based on external criteria or flagged status.""" + for model_data in leaderboard_data: + # If a model is not flagged, use its "fullname" as a key + if model_data[AutoEvalColumn.not_flagged.name]: + flag_key = model_data[AutoEvalColumn.fullname.name] + else: + # Merges and moes are flagged + flag_key = "merged" + + # Reverse the logic: Check for non-flagged models instead + if flag_key in FLAGGED_MODELS: + issue_num = FLAGGED_MODELS[flag_key].split("/")[-1] + issue_link = model_hyperlink( + FLAGGED_MODELS[flag_key], + f"See discussion #{issue_num}", + ) + model_data[ + AutoEvalColumn.model.name + ] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}" + model_data[AutoEvalColumn.not_flagged.name] = False + else: + model_data[AutoEvalColumn.not_flagged.name] = True + + +def remove_forbidden_models(leaderboard_data: list[dict]): + """Removes models from the leaderboard based on the DO_NOT_SUBMIT list.""" + indices_to_remove = [] + for ix, model in enumerate(leaderboard_data): + if model[AutoEvalColumn.fullname.name] in DO_NOT_SUBMIT_MODELS: + indices_to_remove.append(ix) + + # Remove the models from the list + for ix in reversed(indices_to_remove): + leaderboard_data.pop(ix) + return leaderboard_data + + +def filter_models_flags(leaderboard_data: list[dict]): + leaderboard_data = remove_forbidden_models(leaderboard_data) + flag_models(leaderboard_data) diff --git a/src/leaderboard/read_evals.py b/src/leaderboard/read_evals.py new file mode 100644 index 0000000000000000000000000000000000000000..b4c29de6dce95c7ec252f504d3e9d3e0ec7faf9a --- /dev/null +++ b/src/leaderboard/read_evals.py @@ -0,0 +1,261 @@ +import json +from pathlib import Path +from json import JSONDecodeError +import logging +import math + +from dataclasses import dataclass, field +from typing import Optional, Dict, List + +from tqdm import tqdm +from tqdm.contrib.logging import logging_redirect_tqdm + +import numpy as np + +from src.display.formatting import make_clickable_model +from src.display.utils import AutoEvalColumn, ModelType, Precision, Tasks, WeightType, parse_datetime + +# Configure logging +logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + + +@dataclass +class EvalResult: + # Also see src.display.utils.AutoEvalColumn for what will be displayed. + eval_name: str # org_model_precision (uid) + full_model: str # org/model (path on hub) + org: Optional[str] + model: str + revision: str # commit hash, "" if main + results: Dict[str, float] + precision: Precision = Precision.Unknown + model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ... + weight_type: WeightType = WeightType.Original + architecture: str = "Unknown" # From config file + license: str = "?" + likes: int = 0 + num_params: int = 0 + date: str = "" # submission date of request file + still_on_hub: bool = True + is_merge: bool = False + not_flagged: bool = False + status: str = "FINISHED" + # List of tags, initialized to a new empty list for each instance to avoid the pitfalls of mutable default arguments. + tags: List[str] = field(default_factory=list) + + @classmethod + def init_from_json_file(cls, json_filepath: str) -> "EvalResult": + with open(json_filepath, "r") as fp: + data = json.load(fp) + + config = data.get("config_general", {}) + precision = Precision.from_str(config.get("model_dtype", "unknown")) + org_and_model = config.get("model_name", "").split("/", 1) + org = org_and_model[0] if len(org_and_model) > 1 else None + model = org_and_model[-1] + if len(org_and_model) == 1: + org = None + model = org_and_model[0] + result_key = f"{model}_{precision.value.name}" + else: + org = org_and_model[0] + model = org_and_model[1] + result_key = f"{org}_{model}_{precision.value.name}" + full_model = "/".join(org_and_model) + + results = cls.extract_results(data) # Properly call the method to extract results + + return cls( + eval_name=result_key, + full_model=full_model, + org=org, + model=model, + results=results, + precision=precision, + revision=config.get("model_sha", ""), + ) + + @staticmethod + def extract_results(data: Dict) -> Dict[str, float]: + """ + Extract and process benchmark results from a given dict. + + Parameters: + - data (Dict): A dictionary containing benchmark data. This dictionary must + include 'versions' and 'results' keys with respective sub-data. + + Returns: + - Dict[str, float]: A dictionary where keys are benchmark names and values + are the processed average scores as percentages. + + Notes: + - The method specifically checks for certain benchmark names to skip outdated entries. + - Handles NaN values by setting the corresponding benchmark result to 0.0. + - Averages scores across metrics for benchmarks found in the data, in a percentage format. + """ + results = {} + for task in Tasks: + task = task.value + # We skip old mmlu entries + if task.benchmark == "hendrycksTest": + for mmlu_k in ["harness|hendrycksTest-abstract_algebra|5", "hendrycksTest-abstract_algebra"]: + if mmlu_k in data["versions"] and data["versions"][mmlu_k] == 0: + continue + + # Some benchamrk values are NaNs, mostly truthfulQA + # Would be more optimal (without the whole dict itertion) if benchmark name was same as key in results + # e.g. not harness|truthfulqa:mc|0 but truthfulqa:mc + for k, v in data["results"].items(): + if task.benchmark in k: + if math.isnan(float(v[task.metric])): + results[task.benchmark] = 0.0 + continue + + # We average all scores of a given metric (mostly for mmlu) + accs = np.array([v.get(task.metric, None) for k, v in data["results"].items() if task.benchmark in k]) + if accs.size == 0 or any([acc is None for acc in accs]): + continue + + mean_acc = np.mean(accs) * 100.0 + results[task.benchmark] = mean_acc + + return results + + def update_with_request_file(self, requests_path): + """Finds the relevant request file for the current model and updates info with it.""" + try: + request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name) + if request_file is None: + logging.warning(f"No request file for {self.org}/{self.model}") + self.status = "FAILED" + return + + with open(request_file, "r") as f: + request = json.load(f) + + self.model_type = ModelType.from_str(request.get("model_type", "Unknown")) + self.weight_type = WeightType[request.get("weight_type", "Original")] + self.num_params = int(request.get("params", 0)) # Ensuring type safety + self.date = request.get("submitted_time", "") + self.architecture = request.get("architectures", "Unknown") + self.status = request.get("status", "FAILED") + + except FileNotFoundError: + self.status = "FAILED" + logging.error(f"Request file: {request_file} not found for {self.org}/{self.model}") + except JSONDecodeError: + self.status = "FAILED" + logging.error(f"Error decoding JSON from the request file for {self.org}/{self.model}") + except KeyError as e: + self.status = "FAILED" + logging.error(f"Key error {e} in processing request file for {self.org}/{self.model}") + except Exception as e: # Catch-all for any other unexpected exceptions + self.status = "FAILED" + logging.error(f"Unexpected error {e} for {self.org}/{self.model}") + + def update_with_dynamic_file_dict(self, file_dict): + """Update object attributes based on the provided dictionary, with error handling for missing keys and type validation.""" + # Default values set for optional or potentially missing keys. + self.license = file_dict.get("license", "?") + self.likes = int(file_dict.get("likes", 0)) # Ensure likes is treated as an integer + self.still_on_hub = file_dict.get("still_on_hub", False) # Default to False if key is missing + self.tags = file_dict.get("tags", []) + + # Calculate `flagged` only if 'tags' is not empty and avoid calculating each time + self.not_flagged = not (any("flagged" in tag for tag in self.tags)) + + def to_dict(self): + """Converts the Eval Result to a dict compatible with our dataframe display""" + average = sum([v for v in self.results.values() if v is not None]) / len(Tasks) + data_dict = { + "eval_name": self.eval_name, # not a column, just a save name, + AutoEvalColumn.precision.name: self.precision.value.name, + AutoEvalColumn.model_type.name: self.model_type.value.name, + AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol, + AutoEvalColumn.weight_type.name: self.weight_type.value.name, + AutoEvalColumn.architecture.name: self.architecture, + AutoEvalColumn.model.name: make_clickable_model(self.full_model), + AutoEvalColumn.fullname.name: self.full_model, + AutoEvalColumn.revision.name: self.revision, + AutoEvalColumn.average.name: average, + AutoEvalColumn.license.name: self.license, + AutoEvalColumn.likes.name: self.likes, + AutoEvalColumn.params.name: self.num_params, + AutoEvalColumn.still_on_hub.name: self.still_on_hub, + AutoEvalColumn.merged.name: not ("merge" in self.tags if self.tags else False), + AutoEvalColumn.moe.name: not ( + ("moe" in self.tags if self.tags else False) or "moe" in self.full_model.lower() + ), + AutoEvalColumn.not_flagged.name: self.not_flagged, + } + + for task in Tasks: + data_dict[task.value.col_name] = self.results[task.value.benchmark] + + return data_dict + + +def get_request_file_for_model(requests_path, model_name, precision): + """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED""" + requests_path = Path(requests_path) + pattern = f"{model_name}_eval_request_*.json" + + # Using pathlib to find files matching the pattern + request_files = list(requests_path.glob(pattern)) + + # Sort the files by name in descending order to mimic 'reverse=True' + request_files.sort(reverse=True) + + # Select the correct request file based on 'status' and 'precision' + request_file = None + for request_file in request_files: + with request_file.open("r") as f: + req_content = json.load(f) + if req_content["status"] == "FINISHED" and req_content["precision"] == precision.split(".")[-1]: + request_file = str(request_file) + + # Return empty string if no file found that matches criteria + return request_file + + +def get_raw_eval_results(results_path: str, requests_path: str, dynamic_path: str) -> list[EvalResult]: + """From the path of the results folder root, extract all needed info for results""" + with open(dynamic_path) as f: + dynamic_data = json.load(f) + + results_path = Path(results_path) + model_files = list(results_path.rglob("results_*.json")) + model_files.sort(key=lambda file: parse_datetime(file.stem.removeprefix("results_"))) + + eval_results = {} + # Wrap model_files iteration with tqdm for progress display + for model_result_filepath in tqdm(model_files, desc="Processing model files"): + # Creation of result + eval_result = EvalResult.init_from_json_file(model_result_filepath) + with logging_redirect_tqdm(): + eval_result.update_with_request_file(requests_path) + + if eval_result.full_model in dynamic_data: + eval_result.update_with_dynamic_file_dict(dynamic_data[eval_result.full_model]) + # Hardcoding because of gating problem + if any([org in eval_result.full_model for org in ["meta-llama/", "google/", "tiiuae/"]]): + eval_result.still_on_hub = True + + # Store results of same eval together + eval_name = eval_result.eval_name + if eval_name in eval_results.keys(): + eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None}) + else: + eval_results[eval_name] = eval_result + + results = [] + for k, v in eval_results.items(): + try: + if v.status == "FINISHED": + v.to_dict() # we test if the dict version is complete + results.append(v) + except KeyError as e: + logging.error(f"Error while checking model {k} {v.date} json, no key: {e}") # not all eval values present + continue + + return results diff --git a/src/populate.py b/src/populate.py new file mode 100644 index 0000000000000000000000000000000000000000..d6011e77ab81109d2c513f38ae2df694a9c32aa0 --- /dev/null +++ b/src/populate.py @@ -0,0 +1,52 @@ +import pathlib +import pandas as pd +from src.display.formatting import has_no_nan_values, make_clickable_model +from src.display.utils import AutoEvalColumn, EvalQueueColumn, baseline_row +from src.leaderboard.filter_models import filter_models_flags +from src.leaderboard.read_evals import get_raw_eval_results +from src.display.utils import load_json_data + + +def _process_model_data(entry, model_name_key="model", revision_key="revision"): + """Enrich model data with clickable links and revisions.""" + entry[EvalQueueColumn.model.name] = make_clickable_model(entry.get(model_name_key, "")) + entry[EvalQueueColumn.revision.name] = entry.get(revision_key, "main") + return entry + + +def get_evaluation_queue_df(save_path, cols): + """Generate dataframes for pending, running, and finished evaluation entries.""" + save_path = pathlib.Path(save_path) + all_evals = [] + + for path in save_path.rglob("*.json"): + data = load_json_data(path) + if data: + all_evals.append(_process_model_data(data)) + + # Organizing data by status + status_map = { + "PENDING": ["PENDING", "RERUN"], + "RUNNING": ["RUNNING"], + "FINISHED": ["FINISHED", "PENDING_NEW_EVAL"], + } + status_dfs = {status: [] for status in status_map} + for eval_data in all_evals: + for status, extra_statuses in status_map.items(): + if eval_data["status"] in extra_statuses: + status_dfs[status].append(eval_data) + + return tuple(pd.DataFrame(status_dfs[status], columns=cols) for status in ["FINISHED", "RUNNING", "PENDING"]) + + +def get_leaderboard_df(results_path, requests_path, dynamic_path, cols, benchmark_cols): + """Retrieve and process leaderboard data.""" + raw_data = get_raw_eval_results(results_path, requests_path, dynamic_path) + all_data_json = [model.to_dict() for model in raw_data] + [baseline_row] + filter_models_flags(all_data_json) + + df = pd.DataFrame.from_records(all_data_json) + df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False) + df = df[cols].round(decimals=2) + df = df[has_no_nan_values(df, benchmark_cols)] + return raw_data, df diff --git a/src/radial/radial.py b/src/radial/radial.py new file mode 100644 index 0000000000000000000000000000000000000000..28f47e146f28d39e03fc2e4c5714ff5ae3721df5 --- /dev/null +++ b/src/radial/radial.py @@ -0,0 +1,161 @@ +import plotly.graph_objects as go +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import random +import itertools as it + +from src.leaderboard.build_leaderboard import build_leadearboard_df + +def create_plot(selected_models): + """ + Π‘ΠΎΠ·Π΄Π°Π΅Ρ‚ Π²ΠΈΠ·ΡƒΠ°Π»ΠΈΠ·Π°Ρ†ΠΈΡŽ для сравнСния Π²Ρ‹Π±Ρ€Π°Π½Π½Ρ‹Ρ… ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΠΏΠΎ ΠΌΠ΅Ρ‚Ρ€ΠΈΠΊΠ°ΠΌ DeathMath + + Args: + selected_models: Бписок Π½Π°Π·Π²Π°Π½ΠΈΠΉ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ для отобраТСния Π½Π° Π³Ρ€Π°Ρ„ΠΈΠΊΠ΅ + + Returns: + matplotlib.figure.Figure: Π“Ρ€Π°Ρ„ΠΈΠΊ для отобраТСния Π² интСрфСйсС + """ + # ΠŸΠΎΠ»ΡƒΡ‡Π°Π΅ΠΌ Π΄Π°Π½Π½Ρ‹Π΅ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΠΈΠ· Π»ΠΈΠ΄Π΅Ρ€Π±ΠΎΡ€Π΄Π° + models_df = build_leadearboard_df() + + # Если Π½Π΅Ρ‚ Π²Ρ‹Π±Ρ€Π°Π½Π½Ρ‹Ρ… ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΠΈΠ»ΠΈ Π΄Π°Π½Π½Ρ‹Π΅ Π½Π΅ Π·Π°Π³Ρ€ΡƒΠΆΠ΅Π½Ρ‹, Π²ΠΎΠ·Π²Ρ€Π°Ρ‰Π°Π΅ΠΌ пустой Π³Ρ€Π°Ρ„ΠΈΠΊ + if not selected_models or models_df.empty: + fig, ax = plt.subplots(figsize=(10, 6)) + ax.text(0.5, 0.5, "НСт Π΄Π°Π½Π½Ρ‹Ρ… для отобраТСния", + horizontalalignment='center', verticalalignment='center', + transform=ax.transAxes, fontsize=14) + ax.set_axis_off() + return fig + + # Π€ΠΈΠ»ΡŒΡ‚Ρ€ΡƒΠ΅ΠΌ DataFrame, Ρ‡Ρ‚ΠΎΠ±Ρ‹ ΠΎΡΡ‚Π°Π²ΠΈΡ‚ΡŒ Ρ‚ΠΎΠ»ΡŒΠΊΠΎ Π²Ρ‹Π±Ρ€Π°Π½Π½Ρ‹Π΅ ΠΌΠΎΠ΄Π΅Π»ΠΈ + models_to_show = models_df[models_df['model'].isin(selected_models)] + + if models_to_show.empty: + fig, ax = plt.subplots(figsize=(10, 6)) + ax.text(0.5, 0.5, "Π’Ρ‹Π±Ρ€Π°Π½Π½Ρ‹Π΅ ΠΌΠΎΠ΄Π΅Π»ΠΈ Π½Π΅ Π½Π°ΠΉΠ΄Π΅Π½Ρ‹ Π² Π΄Π°Π½Π½Ρ‹Ρ…", + horizontalalignment='center', verticalalignment='center', + transform=ax.transAxes, fontsize=14) + ax.set_axis_off() + return fig + + # Настройка Π±Π°Ρ€-Π³Ρ€Π°Ρ„ΠΈΠΊΠ° для сравнСния ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ + fig, ax = plt.subplots(figsize=(12, 8)) + + # Π¨ΠΈΡ€ΠΈΠ½Π° столбцов + bar_width = 0.25 + + # ΠŸΠΎΠ·ΠΈΡ†ΠΈΠΈ Π½Π° оси x + models_count = len(models_to_show) + indices = np.arange(models_count) + + # ЦвСтовая ΠΏΠ°Π»ΠΈΡ‚Ρ€Π° + colors = ['#1f77b4', '#ff7f0e', '#2ca02c'] + + # Π‘Ρ‚Ρ€ΠΎΠΈΠΌ столбцы для Ρ€Π°Π·Π½Ρ‹Ρ… ΠΌΠ΅Ρ‚Ρ€ΠΈΠΊ + ax.bar(indices - bar_width, models_to_show['math_score'], bar_width, + label='RussianMath Score', color=colors[0]) + ax.bar(indices, models_to_show['physics_score'], bar_width, + label='RussianPhysics Score', color=colors[1]) + ax.bar(indices + bar_width, models_to_show['score'], bar_width, + label='Combined Score', color=colors[2]) + + # Настройка осСй ΠΈ ΠΌΠ΅Ρ‚ΠΎΠΊ + ax.set_xlabel('МодСли') + ax.set_ylabel('Π‘Π°Π»Π»Ρ‹') + ax.set_title('Π‘Ρ€Π°Π²Π½Π΅Π½ΠΈΠ΅ ΠΏΡ€ΠΎΠΈΠ·Π²ΠΎΠ΄ΠΈΡ‚Π΅Π»ΡŒΠ½ΠΎΡΡ‚ΠΈ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ Π½Π° DeathMath benchmark') + ax.set_xticks(indices) + ax.set_xticklabels(models_to_show['model'], rotation=45, ha='right') + ax.legend() + + # ΠžΠ³Ρ€Π°Π½ΠΈΡ‡Π΅Π½ΠΈΠ΅ Π·Π½Π°Ρ‡Π΅Π½ΠΈΠΉ ΠΏΠΎ оси y ΠΎΡ‚ 0 Π΄ΠΎ 1 + ax.set_ylim(0, 1.0) + + # ДобавляСм сСтку для Π»ΡƒΡ‡ΡˆΠ΅ΠΉ читаСмости + ax.grid(axis='y', linestyle='--', alpha=0.7) + + # ΠžΠ±Π΅ΡΠΏΠ΅Ρ‡ΠΈΠ²Π°Π΅ΠΌ, Ρ‡Ρ‚ΠΎΠ±Ρ‹ всС ΠΌΠ΅Ρ‚ΠΊΠΈ ΠΏΠΎΠΌΠ΅Ρ‰Π°Π»ΠΈΡΡŒ + plt.tight_layout() + + return fig + +def create_radar_plot(selected_models): + """ + Π‘ΠΎΠ·Π΄Π°Π΅Ρ‚ Ρ€Π°Π΄ΠΈΠ°Π»ΡŒΠ½ΡƒΡŽ Π΄ΠΈΠ°Π³Ρ€Π°ΠΌΠΌΡƒ для сравнСния Π²Ρ‹Π±Ρ€Π°Π½Π½Ρ‹Ρ… ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ + + Args: + selected_models: Бписок Π½Π°Π·Π²Π°Π½ΠΈΠΉ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ для отобраТСния Π½Π° Π³Ρ€Π°Ρ„ΠΈΠΊΠ΅ + + Returns: + plotly.graph_objects.Figure: Π˜Π½Ρ‚Π΅Ρ€Π°ΠΊΡ‚ΠΈΠ²Π½Ρ‹ΠΉ Ρ€Π°Π΄ΠΈΠ°Π»ΡŒΠ½Ρ‹ΠΉ Π³Ρ€Π°Ρ„ΠΈΠΊ + """ + models = build_leadearboard_df() + metrics = ["math_score", "physics_score", "score"] + metric_labels = ["RussianMath", "RussianPhysics", "Combined"] + + MIN_COLOUR_DISTANCE_BETWEEN_MODELS = 100 + seed = 42 + + def generate_colours(min_distance, seed): + colour_mapping = {} + all_models = selected_models + + for i in it.count(): + min_colour_distance = min_distance - i + retries_left = 10 * len(all_models) + + for model_id in all_models: + random.seed(hash(model_id) + i + seed) + r, g, b = 0, 0, 0 + too_bright, similar_to_other_model = True, True + + while (too_bright or similar_to_other_model) and retries_left > 0: + r, g, b = tuple(random.randint(0, 255) for _ in range(3)) + too_bright = np.min([r, g, b]) > 200 + similar_to_other_model = any( + np.abs(np.array(colour) - np.array([r, g, b])).sum() < min_colour_distance + for colour in colour_mapping.values() + ) + retries_left -= 1 + + colour_mapping[model_id] = (r, g, b) + if len(colour_mapping) == len(all_models): + break + + return colour_mapping + + colour_mapping = generate_colours(MIN_COLOUR_DISTANCE_BETWEEN_MODELS, seed) + fig = go.Figure() + + for _, model_data in models.iterrows(): + model_name = model_data["model"] + if model_name not in selected_models: + continue + + values = [model_data[metric] for metric in metrics] + color = f'rgb{colour_mapping[model_name]}' + + fig.add_trace(go.Scatterpolar( + r=values, + theta=metric_labels, + name=model_name, + fill='toself', + fillcolor=f'rgba{colour_mapping[model_name] + (0.6,)}', + line=dict(color=color) + )) + + fig.update_layout( + polar=dict( + radialaxis=dict( + visible=True, + range=[0, 1] + ) + ), + showlegend=True, + title='Π‘Ρ€Π°Π²Π½Π΅Π½ΠΈΠ΅ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ Π½Π° DeathMath', + template="plotly_dark", + ) + + return fig + diff --git a/src/scripts/create_request_file.py b/src/scripts/create_request_file.py new file mode 100644 index 0000000000000000000000000000000000000000..d4fd139e093fb29a44cd557f228469e7fa1d3916 --- /dev/null +++ b/src/scripts/create_request_file.py @@ -0,0 +1,92 @@ +import json +import os +import pprint +from datetime import datetime, timezone + +import click +from colorama import Fore +from huggingface_hub import HfApi, snapshot_download + +from src.display.utils import ModelType, WeightType +from src.submission.check_validity import get_model_size + +EVAL_REQUESTS_PATH = "eval-queue" +QUEUE_REPO = "open-llm-leaderboard/requests" + +precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ") +model_types = [e.name for e in ModelType] +weight_types = [e.name for e in WeightType] + + +def main(): + api = HfApi() + current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset") + + model_name = click.prompt("Enter model name") + revision = click.prompt("Enter revision", default="main") + precision = click.prompt("Enter precision", default="float16", type=click.Choice(precisions)) + model_type = click.prompt("Enter model type", type=click.Choice(model_types)) + weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types)) + base_model = click.prompt("Enter base model", default="") + status = click.prompt("Enter status", default="FINISHED") + + try: + model_info = api.model_info(repo_id=model_name, revision=revision) + except Exception as e: + print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}") + return 1 + + model_size = get_model_size(model_info=model_info, precision=precision) + + try: + license = model_info.cardData["license"] + except Exception: + license = "?" + + eval_entry = { + "model": model_name, + "base_model": base_model, + "revision": model_info.sha, # force to use the exact model commit + "private": False, + "precision": precision, + "weight_type": weight_type, + "status": status, + "submitted_time": current_time, + "model_type": model_type, + "likes": model_info.likes, + "params": model_size, + "license": license, + } + + user_name = "" + model_path = model_name + if "/" in model_name: + user_name = model_name.split("/")[0] + model_path = model_name.split("/")[1] + + pprint.pprint(eval_entry) + + if click.confirm("Do you want to continue? This request file will be pushed to the hub"): + click.echo("continuing...") + + out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}" + os.makedirs(out_dir, exist_ok=True) + out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json" + + with open(out_path, "w") as f: + f.write(json.dumps(eval_entry)) + + api.upload_file( + path_or_fileobj=out_path, + path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1], + repo_id=QUEUE_REPO, + repo_type="dataset", + commit_message=f"Add {model_name} to eval queue", + ) + else: + click.echo("aborting...") + + +if __name__ == "__main__": + main() diff --git a/src/scripts/update_all_request_files.py b/src/scripts/update_all_request_files.py new file mode 100644 index 0000000000000000000000000000000000000000..e95f7f6ead3efd5b4d20c8097e0d4482dfa32584 --- /dev/null +++ b/src/scripts/update_all_request_files.py @@ -0,0 +1,96 @@ +import json +import os +import subprocess + +from src.envs import EVAL_REQUESTS_PATH, H4_TOKEN +from src.submission.check_validity import check_model_card, get_model_tags, is_model_on_hub + + +def update_one_model(model_id, data, models_on_the_hub): + # Model no longer on the hub at all + if model_id not in models_on_the_hub: + data["still_on_hub"] = False + data["likes"] = 0 + data["downloads"] = 0 + data["created_at"] = "" + data["tags"] = [] + return data + + # Grabbing model parameters + model_cfg = models_on_the_hub[model_id] + data["likes"] = model_cfg.likes + data["downloads"] = model_cfg.downloads + data["created_at"] = str(model_cfg.created_at) + data["license"] = model_cfg.card_data.license if model_cfg.card_data is not None else "" + + # Grabbing model details + model_name = model_id + if model_cfg.card_data is not None and model_cfg.card_data.base_model is not None: + if isinstance(model_cfg.card_data.base_model, str): + model_name = model_cfg.card_data.base_model # for adapters, we look at the parent model + still_on_hub, _, _ = is_model_on_hub( + model_name=model_name, + revision=data.get("revision"), + trust_remote_code=True, + test_tokenizer=False, + token=H4_TOKEN, + ) + # If the model doesn't have a model card or a license, we consider it's deleted + if still_on_hub: + try: + status, _, model_card = check_model_card(model_id) + if status is False: + still_on_hub = False + except Exception: + model_card = None + still_on_hub = False + data["still_on_hub"] = still_on_hub + + tags = get_model_tags(model_card, model_id) if still_on_hub else [] + + data["tags"] = tags + return data + + +def update_models(file_path, models_on_the_hub): + """ + Search through all JSON files in the specified root folder and its subfolders, + and update the likes key in JSON dict from value of input dict + """ + seen_models = [] + with open(file_path, "r") as f: + model_infos = json.load(f) + for model_id in model_infos.keys(): + seen_models.append(model_id) + model_infos[model_id] = update_one_model( + model_id=model_id, data=model_infos[model_id], models_on_the_hub=models_on_the_hub + ) + + # If new requests files have been created since we started all this + # we grab them + all_models = [] + try: + for ix, (root, _, files) in enumerate(os.walk(EVAL_REQUESTS_PATH)): + if ix == 0: + continue + for file in files: + if "eval_request" in file: + path = root.split("/")[-1] + "/" + file.split("_eval_request")[0] + all_models.append(path) + except Exception as e: + print(e) + pass + + for model_id in all_models: + if model_id not in seen_models: + model_infos[model_id] = update_one_model(model_id=model_id, data={}, models_on_the_hub=models_on_the_hub) + + with open(file_path, "w") as f: + json.dump(model_infos, f, indent=2) + + +def update_dynamic_files(): + # from gen import gen_answer,gen_judgment\ + subprocess.Popen("python3 ../gen/gen_judgement.py") + + subprocess.Popen("python3 ../gen/show_result.py --output") diff --git a/src/submission/check_validity.py b/src/submission/check_validity.py new file mode 100644 index 0000000000000000000000000000000000000000..b31d0679109ab227d5c88474f408eac7d226b67b --- /dev/null +++ b/src/submission/check_validity.py @@ -0,0 +1,178 @@ +import json +import os +import re +from collections import defaultdict +from datetime import datetime, timedelta, timezone + +import huggingface_hub +from huggingface_hub import ModelCard +from huggingface_hub.hf_api import ModelInfo, get_safetensors_metadata +from transformers import AutoConfig, AutoTokenizer + +from src.envs import HAS_HIGHER_RATE_LIMIT + + +# ht to @Wauplin, thank you for the snippet! +# See https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/317 +def check_model_card(repo_id: str) -> tuple[bool, str]: + # Returns operation status, and error message + try: + card = ModelCard.load(repo_id) + except huggingface_hub.utils.EntryNotFoundError: + return False, "Please add a model card to your model to explain how you trained/fine-tuned it.", None + + # Enforce license metadata + if card.data.license is None: + if not ("license_name" in card.data and "license_link" in card.data): + return ( + False, + ( + "License not found. Please add a license to your model card using the `license` metadata or a" + " `license_name`/`license_link` pair." + ), + None, + ) + + # Enforce card content + if len(card.text) < 200: + return False, "Please add a description to your model card, it is too short.", None + + return True, "", card + + +def is_model_on_hub( + model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False +) -> tuple[bool, str, AutoConfig]: + try: + config = AutoConfig.from_pretrained( + model_name, revision=revision, trust_remote_code=trust_remote_code, token=token + ) # , force_download=True) + if test_tokenizer: + try: + AutoTokenizer.from_pretrained( + model_name, revision=revision, trust_remote_code=trust_remote_code, token=token + ) + except ValueError as e: + return (False, f"uses a tokenizer which is not in a transformers release: {e}", None) + except Exception: + return ( + False, + "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", + None, + ) + return True, None, config + + except ValueError: + return ( + False, + "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.", + None, + ) + + except Exception as e: + if "You are trying to access a gated repo." in str(e): + return True, "uses a gated model.", None + return False, f"was not found or misconfigured on the hub! Error raised was {e.args[0]}", None + + +def get_model_size(model_info: ModelInfo, precision: str): + size_pattern = re.compile(r"(\d+\.)?\d+(b|m)") + safetensors = None + try: + safetensors = get_safetensors_metadata(model_info.id) + except Exception as e: + print(e) + + if safetensors is not None: + model_size = round(sum(safetensors.parameter_count.values()) / 1e9, 3) + else: + try: + size_match = re.search(size_pattern, model_info.id.lower()) + model_size = size_match.group(0) + model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3) + except AttributeError: + return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py + + size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.id.lower()) else 1 + model_size = size_factor * model_size + return model_size + + +def get_model_arch(model_info: ModelInfo): + return model_info.config.get("architectures", "Unknown") + + +def user_submission_permission(org_or_user, users_to_submission_dates, rate_limit_period, rate_limit_quota): + if org_or_user not in users_to_submission_dates: + return True, "" + submission_dates = sorted(users_to_submission_dates[org_or_user]) + + time_limit = (datetime.now(timezone.utc) - timedelta(days=rate_limit_period)).strftime("%Y-%m-%dT%H:%M:%SZ") + submissions_after_timelimit = [d for d in submission_dates if d > time_limit] + + num_models_submitted_in_period = len(submissions_after_timelimit) + if org_or_user in HAS_HIGHER_RATE_LIMIT: + rate_limit_quota = 2 * rate_limit_quota + + if num_models_submitted_in_period > rate_limit_quota: + error_msg = f"Organisation or user `{org_or_user}`" + error_msg += f"already has {num_models_submitted_in_period} model requests submitted to the leaderboard " + error_msg += f"in the last {rate_limit_period} days.\n" + error_msg += ( + "Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard πŸ€—" + ) + return False, error_msg + return True, "" + + +def already_submitted_models(requested_models_dir: str) -> set[str]: + depth = 1 + file_names = [] + users_to_submission_dates = defaultdict(list) + + for root, _, files in os.walk(requested_models_dir): + current_depth = root.count(os.sep) - requested_models_dir.count(os.sep) + if current_depth == depth: + for file in files: + if not file.endswith(".json"): + continue + with open(os.path.join(root, file), "r") as f: + info = json.load(f) + file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}") + + # Select organisation + if info["model"].count("/") == 0 or "submitted_time" not in info: + continue + organisation, _ = info["model"].split("/") + users_to_submission_dates[organisation].append(info["submitted_time"]) + + return set(file_names), users_to_submission_dates + + +def get_model_tags(model_card, model: str): + is_merge_from_metadata = False + is_moe_from_metadata = False + + tags = [] + if model_card is None: + return tags + if model_card.data.tags: + is_merge_from_metadata = any( + [tag in model_card.data.tags for tag in ["merge", "moerge", "mergekit", "lazymergekit"]] + ) + is_moe_from_metadata = any([tag in model_card.data.tags for tag in ["moe", "moerge"]]) + + is_merge_from_model_card = any( + keyword in model_card.text.lower() for keyword in ["merged model", "merge model", "moerge"] + ) + if is_merge_from_model_card or is_merge_from_metadata: + tags.append("merge") + is_moe_from_model_card = any(keyword in model_card.text.lower() for keyword in ["moe", "mixtral"]) + # Hardcoding because of gating problem + if "Qwen/Qwen1.5-32B" in model: + is_moe_from_model_card = False + is_moe_from_name = "moe" in model.lower().replace("/", "-").replace("_", "-").split("-") + if is_moe_from_model_card or is_moe_from_name or is_moe_from_metadata: + tags.append("moe") + + return tags diff --git a/src/submission/submit.py b/src/submission/submit.py new file mode 100644 index 0000000000000000000000000000000000000000..1de4b40256fd35a5354bb09b2a0caa0a26f2223b --- /dev/null +++ b/src/submission/submit.py @@ -0,0 +1,171 @@ +from src.display.formatting import styled_message +# from src.leaderboard.filter_models import DO_NOT_SUBMIT_MODELS +# from src.submission.check_validity import ( +# already_submitted_models, +# check_model_card, +# get_model_size, +# get_model_tags, +# is_model_on_hub, +# user_submission_permission, +# ) + +REQUESTED_MODELS = None +USERS_TO_SUBMISSION_DATES = None + + +def add_new_eval( + model: str, +): + # global REQUESTED_MODELS + # global USERS_TO_SUBMISSION_DATES + # if not REQUESTED_MODELS: + # REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH) + + # user_name = "" + # model_path = model + # if "/" in model: + # user_name = model.split("/")[0] + # model_path = model.split("/")[1] + + # # precision = precision.split(" ")[0] + # current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + # if model_type is None or model_type == "": + # return styled_error("Please select a model type.") + + # # Is the user rate limited? + # if user_name != "": + # user_can_submit, error_msg = user_submission_permission( + # user_name, USERS_TO_SUBMISSION_DATES, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA + # ) + # if not user_can_submit: + # return styled_error(error_msg) + + # Did the model authors forbid its submission to the leaderboard? + # if model in DO_NOT_SUBMIT_MODELS or base_model in DO_NOT_SUBMIT_MODELS: + # return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.") + + # if model == "CohereForAI/c4ai-command-r-plus": + # return styled_warning( + # "This model cannot be submitted manually on the leaderboard before the transformers release." + # ) + + # # Does the model actually exist? + # if revision == "": + # revision = "main" + + # # Is the model on the hub? + # if weight_type in ["Delta", "Adapter"]: + # base_model_on_hub, error, _ = is_model_on_hub( + # model_name=base_model, revision=revision, token=H4_TOKEN, test_tokenizer=True + # ) + # if not base_model_on_hub: + # return styled_error(f'Base model "{base_model}" {error}') + + # architecture = "?" + # downloads = 0 + # created_at = "" + # if not weight_type == "Adapter": + # model_on_hub, error, model_config = is_model_on_hub(model_name=model, revision=revision, test_tokenizer=True) + # if not model_on_hub or model_config is None: + # return styled_error(f'Model "{model}" {error}') + # if model_config is not None: + # architectures = getattr(model_config, "architectures", None) + # if architectures: + # architecture = ";".join(architectures) + # downloads = getattr(model_config, "downloads", 0) + # created_at = getattr(model_config, "created_at", "") + + # Is the model info correctly filled? + # try: + # model_info = API.model_info(repo_id=model, revision=revision) + # except Exception: + # return styled_error("Could not get your model information. Please fill it up properly.") + + # model_size = get_model_size(model_info=model_info, precision=precision) + + # Were the model card and license filled? + # try: + # license = model_info.cardData["license"] + # except Exception: + # return styled_error("Please select a license for your model") + + # modelcard_OK, error_msg, model_card = check_model_card(model) + # if not modelcard_OK: + # return styled_error(error_msg) + + # tags = get_model_tags(model_card, model) + + # # Seems good, creating the eval + # print("Adding new eval") + + # eval_entry = { + # "model": model, + # # "base_model": base_model, + # # "revision": model_info.sha, # force to use the exact model commit + # # "private": private, + # # "precision": precision, + # # "params": model_size, + # # "architectures": architecture, + # # "weight_type": weight_type, + # "status": "PENDING", + # # "submitted_time": current_time, + # # "model_type": model_type, + # "job_id": -1, + # "job_start_time": None, + # } + + # supplementary_info = { + # "likes": model_info.likes, + # "license": license, + # "still_on_hub": True, + # "tags": tags, + # "downloads": downloads, + # "created_at": created_at, + # } + + # # Check for duplicate submission + # if f"{model}_{revision}_{precision}" in REQUESTED_MODELS: + # return styled_warning("This model has been already submitted.") + + # print("Creating eval file") + # OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}" + # os.makedirs(OUT_DIR, exist_ok=True) + # out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json" + + # with open(out_path, "w") as f: + # f.write(json.dumps(eval_entry)) + + # print("Uploading eval file") + # API.upload_file( + # path_or_fileobj=out_path, + # path_in_repo=out_path.split("eval-queue/")[1], + # repo_id=QUEUE_REPO, + # repo_type="dataset", + # commit_message=f"Add {model} to eval queue", + # ) + + # We want to grab the latest version of the submission file to not accidentally overwrite it + # snapshot_download( + # repo_id=DYNAMIC_INFO_REPO, local_dir=DYNAMIC_INFO_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30 + # ) + + # with open(DYNAMIC_INFO_FILE_PATH) as f: + # all_supplementary_info = json.load(f) + + # # all_supplementary_info[model] = supplementary_info + # with open(DYNAMIC_INFO_FILE_PATH, "w") as f: + # json.dump(all_supplementary_info, f, indent=2) + + # API.upload_file( + # path_or_fileobj=DYNAMIC_INFO_FILE_PATH, + # path_in_repo=DYNAMIC_INFO_FILE_PATH.split("/")[-1], + # repo_id=DYNAMIC_INFO_REPO, + # repo_type="dataset", + # commit_message=f"Add {model} to dynamic info queue", + # ) + + # # Remove the local file + # os.remove(out_path) + + return styled_message("Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour.") diff --git a/src/tools/collections.py b/src/tools/collections.py new file mode 100644 index 0000000000000000000000000000000000000000..0fe6a6f853f8d42077dfcd63f13cbe3ed750a704 --- /dev/null +++ b/src/tools/collections.py @@ -0,0 +1,76 @@ +import pandas as pd +from huggingface_hub import add_collection_item, delete_collection_item, get_collection, update_collection_item +from huggingface_hub.utils._errors import HfHubHTTPError +from pandas import DataFrame + +from src.display.utils import AutoEvalColumn, ModelType +from src.envs import H4_TOKEN, PATH_TO_COLLECTION + +# Specific intervals for the collections +intervals = { + "1B": pd.Interval(0, 1.5, closed="right"), + "3B": pd.Interval(2.5, 3.5, closed="neither"), + "7B": pd.Interval(6, 8, closed="neither"), + "13B": pd.Interval(10, 14, closed="neither"), + "30B": pd.Interval(25, 35, closed="neither"), + "65B": pd.Interval(60, 70, closed="neither"), +} + + +def _filter_by_type_and_size(df, model_type, size_interval): + """Filter DataFrame by model type and parameter size interval.""" + type_emoji = model_type.value.symbol[0] + filtered_df = df[df[AutoEvalColumn.model_type_symbol.name] == type_emoji] + params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce") + mask = params_column.apply(lambda x: x in size_interval) + return filtered_df.loc[mask] + + +def _add_models_to_collection(collection, models, model_type, size): + """Add best models to the collection and update positions.""" + cur_len_collection = len(collection.items) + for ix, model in enumerate(models, start=1): + try: + collection = add_collection_item( + PATH_TO_COLLECTION, + item_id=model, + item_type="model", + exists_ok=True, + note=f"Best {model_type.to_str(' ')} model of around {size} on the leaderboard today!", + token=H4_TOKEN, + ) + # Ensure position is correct if item was added + if len(collection.items) > cur_len_collection: + item_object_id = collection.items[-1].item_object_id + update_collection_item(collection_slug=PATH_TO_COLLECTION, item_object_id=item_object_id, position=ix) + cur_len_collection = len(collection.items) + break # assuming we only add the top model + except HfHubHTTPError: + continue + + +def update_collections(df: DataFrame): + """Update collections by filtering and adding the best models.""" + collection = get_collection(collection_slug=PATH_TO_COLLECTION, token=H4_TOKEN) + cur_best_models = [] + + for model_type in ModelType: + if not model_type.value.name: + continue + for size, interval in intervals.items(): + filtered_df = _filter_by_type_and_size(df, model_type, interval) + best_models = list( + filtered_df.sort_values(AutoEvalColumn.average.name, ascending=False)[AutoEvalColumn.fullname.name][:10] + ) + print(model_type.value.symbol, size, best_models) + _add_models_to_collection(collection, best_models, model_type, size) + cur_best_models.extend(best_models) + + # Cleanup + existing_models = {item.item_id for item in collection.items} + to_remove = existing_models - set(cur_best_models) + for item_id in to_remove: + try: + delete_collection_item(collection_slug=PATH_TO_COLLECTION, item_object_id=item_id, token=H4_TOKEN) + except HfHubHTTPError: + continue diff --git a/src/tools/model_backlinks.py b/src/tools/model_backlinks.py new file mode 100644 index 0000000000000000000000000000000000000000..e1601174d8eae6052c65575d3b4c268f09a80208 --- /dev/null +++ b/src/tools/model_backlinks.py @@ -0,0 +1,1309 @@ +models = [ + "uni-tianyan/Uni-TianYan", + "fangloveskari/ORCA_LLaMA_70B_QLoRA", + "garage-bAInd/Platypus2-70B-instruct", + "upstage/Llama-2-70b-instruct-v2", + "fangloveskari/Platypus_QLoRA_LLaMA_70b", + "yeontaek/llama-2-70B-ensemble-v5", + "TheBloke/Genz-70b-GPTQ", + "TheBloke/Platypus2-70B-Instruct-GPTQ", + "psmathur/model_007", + "yeontaek/llama-2-70B-ensemble-v4", + "psmathur/orca_mini_v3_70b", + "ehartford/Samantha-1.11-70b", + "MayaPH/GodziLLa2-70B", + "psmathur/model_007_v2", + "chargoddard/MelangeA-70b", + "ehartford/Samantha-1.1-70b", + "psmathur/model_009", + "upstage/Llama-2-70b-instruct", + "yeontaek/llama-2-70B-ensemble-v7", + "yeontaek/llama-2-70B-ensemble-v6", + "chargoddard/MelangeB-70b", + "yeontaek/llama-2-70B-ensemble-v3", + "chargoddard/MelangeC-70b", + "garage-bAInd/Camel-Platypus2-70B", + "yeontaek/llama-2-70B-ensemble-v2", + "garage-bAInd/Camel-Platypus2-70B", + "migtissera/Synthia-70B-v1.2", + "v2ray/LLaMA-2-Wizard-70B-QLoRA", + "quantumaikr/llama-2-70b-fb16-orca-chat-10k", + "v2ray/LLaMA-2-Wizard-70B-QLoRA", + "stabilityai/StableBeluga2", + "quantumaikr/llama-2-70b-fb16-guanaco-1k", + "garage-bAInd/Camel-Platypus2-70B", + "migtissera/Synthia-70B-v1.1", + "migtissera/Synthia-70B", + "psmathur/model_101", + "augtoma/qCammel70", + "augtoma/qCammel-70", + "augtoma/qCammel-70v1", + "augtoma/qCammel-70x", + "augtoma/qCammel-70-x", + "jondurbin/airoboros-l2-70b-gpt4-1.4.1", + "dfurman/llama-2-70b-dolphin-peft", + "jondurbin/airoboros-l2-70b-2.1", + "TheBloke/llama-2-70b-Guanaco-QLoRA-fp16", + "quantumaikr/QuantumLM-llama2-70B-Korean-LoRA", + "quantumaikr/quantumairk-llama-2-70B-instruct", + "psmathur/model_420", + "psmathur/model_51", + "garage-bAInd/Camel-Platypus2-70B", + "TheBloke/Airoboros-L2-70B-2.1-GPTQ", + "OpenAssistant/llama2-70b-oasst-sft-v10", + "garage-bAInd/Platypus2-70B", + "liuxiang886/llama2-70B-qlora-gpt4", + "upstage/llama-65b-instruct", + "quantumaikr/llama-2-70b-fb16-korean", + "NousResearch/Nous-Hermes-Llama2-70b", + "v2ray/LLaMA-2-Jannie-70B-QLoRA", + "jondurbin/airoboros-l2-70b-gpt4-m2.0", + "jondurbin/airoboros-l2-70b-gpt4-m2.0", + "OpenAssistant/llama2-70b-oasst-sft-v10", + "yeontaek/llama-2-70B-ensemble-v8", + "jondurbin/airoboros-l2-70b-gpt4-2.0", + "jarradh/llama2_70b_chat_uncensored", + "WizardLM/WizardMath-70B-V1.0", + "jordiclive/Llama-2-70b-oasst-1-200", + "WizardLM/WizardMath-70B-V1.0", + "jondurbin/airoboros-l2-70b-gpt4-2.0", + "OpenLemur/lemur-70b-chat-v1", + "tiiuae/falcon-180B", + "tiiuae/falcon-180B", + "stabilityai/StableBeluga1-Delta", + "psmathur/model_42_70b", + "psmathur/test_42_70b", + "TheBloke/fiction.live-Kimiko-V2-70B-fp16", + "tiiuae/falcon-180B", + "WizardLM/WizardMath-70B-V1.0", + "tiiuae/falcon-180B-chat", + "jondurbin/airoboros-l2-70b-gpt4-2.0", + "ehartford/samantha-1.1-llama-33b", + "ajibawa-2023/scarlett-33b", + "ddobokki/Llama-2-70b-orca-200k", + "TheBloke/gpt4-alpaca-lora_mlp-65B-HF", + "tiiuae/falcon-180B-chat", + "tiiuae/falcon-180B-chat", + "tiiuae/falcon-180B", + "TheBloke/Lemur-70B-Chat-v1-GPTQ", + "NousResearch/Nous-Puffin-70B", + "WizardLM/WizardLM-70B-V1.0", + "WizardLM/WizardMath-70B-V1.0", + "meta-llama/Llama-2-70b-hf", + "TheBloke/Llama-2-70B-fp16", + "Weyaxi/llama-2-alpacagpt4-1000step", + "WizardLM/WizardLM-70B-V1.0", + "simsim314/WizardLM-70B-V1.0-HF", + "simsim314/WizardLM-70B-V1.0-HF", + "WizardLM/WizardLM-70B-V1.0", + "openbmb/UltraLM-65b", + "psmathur/model_420_preview", + "WizardLM/WizardLM-70B-V1.0", + "simsim314/WizardLM-70B-V1.0-HF", + "OpenBuddy/openbuddy-llama2-70b-v10.1-bf16", + "upstage/llama-30b-instruct-2048", + "jondurbin/airoboros-65b-gpt4-1.2", + "TheBloke/guanaco-65B-HF", + "jondurbin/airoboros-65b-gpt4-1.3", + "meta-llama/Llama-2-70b-chat-hf", + "ValiantLabs/ShiningValiant", + "Faradaylab/Aria-70B", + "lilloukas/GPlatty-30B", + "TheBloke/VicUnlocked-alpaca-65B-QLoRA-fp16", + "jondurbin/airoboros-65b-gpt4-1.4-peft", + "jondurbin/airoboros-65b-gpt4-1.4", + "jondurbin/airoboros-65b-gpt4-2.0", + "TheBloke/WizardLM-70B-V1.0-GPTQ", + "TheBloke/WizardLM-70B-V1.0-GPTQ", + "ariellee/SuperPlatty-30B", + "jondurbin/airoboros-65b-gpt4-1.4", + "jondurbin/airoboros-65b-gpt4-2.0", + "yeontaek/llama-2-70b-IA3-guanaco", + "CalderaAI/30B-Lazarus", + "Aspik101/trurl-2-13b-pl-instruct_unload", + "ehartford/WizardLM-33B-V1.0-Uncensored", + "ehartford/WizardLM-33B-V1.0-Uncensored", + "OpenBuddy/openbuddy-llama-65b-v8-bf16", + "Aspik101/llama-30b-instruct-2048-PL-lora", + "h2oai/h2ogpt-research-oasst1-llama-65b", + "Aspik101/llama-30b-instruct-2048-PL-lora", + "CalderaAI/30B-Epsilon", + "Aspik101/llama-30b-2048-instruct-PL-lora_unload", + "jondurbin/airoboros-65b-gpt4-m2.0", + "jondurbin/airoboros-65b-gpt4-m2.0", + "Aeala/Alpaca-elina-65b", + "TheBloke/robin-65b-v2-fp16", + "TheBloke/gpt4-alpaca-lora-30b-HF", + "TheBloke/Llama-2-70B-chat-GPTQ", + "upstage/llama-30b-instruct", + "OpenLemur/lemur-70b-v1", + "lmsys/vicuna-33b-v1.3", + "ausboss/llama-30b-supercot", + "ai-business/Luban-13B", + "Henk717/airochronos-33B", + "lmsys/vicuna-33b-v1.3", + "Henk717/airochronos-33B", + "bavest/fin-llama-33b-merged", + "jondurbin/airoboros-33b-gpt4-1.4", + "YeungNLP/firefly-llama-30b", + "Aspik101/30B-Lazarus-instruct-PL-lora_unload", + "uukuguy/speechless-llama2-luban-orca-platypus-13b", + "xxyyy123/test_merge_p_ov1_w0.66_w0.5_n1", + "jondurbin/airoboros-33b-gpt4-1.2", + "TheBloke/alpaca-lora-65B-HF", + "bofenghuang/vigogne-33b-instruct", + "yeontaek/llama-2-13B-ensemble-v5", + "garage-bAInd/Platypus-30B", + "Open-Orca/OpenOrca-Platypus2-13B", + "kajdun/viwaai-30b_v4", + "lilloukas/Platypus-30B", + "Open-Orca/OpenOrca-Platypus2-13B", + "Henk717/chronoboros-33B", + "jondurbin/airoboros-33b-2.1", + "HiTZ/alpaca-lora-65b-en-pt-es-ca", + "quantumaikr/QuantumLM-70B-hf", + "uukuguy/speechless-llama2-13b", + "uukuguy/speechless-llama2-hermes-orca-platypus-13b", + "openaccess-ai-collective/manticore-30b-chat-pyg-alpha", + "LLMs/WizardLM-30B-V1.0", + "TheBloke/WizardLM-30B-fp16", + "openaccess-ai-collective/hippogriff-30b-chat", + "concedo/Vicuzard-30B-Uncensored", + "TFLai/OpenOrca-Platypus2-13B-QLoRA-0.80-epoch", + "huggingface/llama-65b", + "huggyllama/llama-65b", + "gaodrew/gaodrew-llama-30b-instruct-2048-Open-Platypus-100steps", + "uukuguy/speechless-llama2-hermes-orca-platypus-wizardlm-13b", + "Sao10K/Mythical-Destroyer-V2-L2-13B", + "camel-ai/CAMEL-33B-Combined-Data", + "dsvv-cair/alpaca-cleaned-llama-30b-bf16", + "MetaIX/GPT4-X-Alpasta-30b", + "garage-bAInd/Stable-Platypus2-13B", + "TFLai/Luban-Platypus2-13B-QLora-0.80-epoch", + "TheBloke/OpenOrca-Platypus2-13B-GPTQ", + "IkariDev/Athena-tmp", + "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16", + "OpenBuddyEA/openbuddy-llama-30b-v7.1-bf16", + "Open-Orca/OpenOrcaxOpenChat-Preview2-13B", + "psmathur/model_007_13b_v2", + "Aspik101/Vicuzard-30B-Uncensored-instruct-PL-lora_unload", + "jondurbin/airoboros-33b-gpt4-m2.0", + "Sao10K/Mythical-Destroyer-L2-13B", + "TheBloke/Wizard-Vicuna-30B-Uncensored-fp16", + "ehartford/Wizard-Vicuna-30B-Uncensored", + "TFLai/Nova-13B", + "TheBloke/robin-33B-v2-fp16", + "totally-not-an-llm/PuddleJumper-13b", + "Aeala/VicUnlocked-alpaca-30b", + "Yhyu13/oasst-rlhf-2-llama-30b-7k-steps-hf", + "jondurbin/airoboros-33b-gpt4", + "jondurbin/airoboros-33b-gpt4-m2.0", + "tiiuae/falcon-40b-instruct", + "psmathur/orca_mini_v3_13b", + "Aeala/GPT4-x-AlpacaDente-30b", + "MayaPH/GodziLLa-30B", + "jondurbin/airoboros-33b-gpt4-m2.0", + "TFLai/SpeechlessV1-Nova-13B", + "yeontaek/llama-2-13B-ensemble-v4", + "ajibawa-2023/carl-33b", + "jondurbin/airoboros-33b-gpt4-2.0", + "TFLai/Stable-Platypus2-13B-QLoRA-0.80-epoch", + "jondurbin/airoboros-33b-gpt4-1.3", + "TehVenom/oasst-sft-6-llama-33b-xor-MERGED-16bit", + "TFLai/OrcaMini-Platypus2-13B-QLoRA-0.80-epoch", + "jondurbin/airoboros-33b-gpt4-2.0", + "chargoddard/Chronorctypus-Limarobormes-13b", + "jondurbin/airoboros-33b-gpt4-1.3", + "Open-Orca/OpenOrca-Platypus2-13B", + "FelixChao/vicuna-33b-coder", + "FelixChao/vicuna-33b-coder", + "Gryphe/MythoMix-L2-13b", + "Aeala/Enterredaas-33b", + "yeontaek/llama-2-13B-ensemble-v1", + "TFLai/OpenOrcaPlatypus2-Platypus2-13B-QLora-0.80-epoch", + "TFLai/Ensemble5-Platypus2-13B-QLora-0.80-epoch", + "yeontaek/llama-2-13B-ensemble-v3", + "TFLai/MythoMix-Platypus2-13B-QLoRA-0.80-epoch", + "yihan6324/llama2-13b-instructmining-40k-sharegpt", + "timdettmers/guanaco-33b-merged", + "TFLai/EnsembleV5-Nova-13B", + "circulus/Llama-2-13b-orca-v1", + "Undi95/ReMM-SLERP-L2-13B", + "Gryphe/MythoMax-L2-13b", + "stabilityai/StableBeluga-13B", + "circulus/Llama-2-13b-orca-v1", + "ehartford/WizardLM-30B-Uncensored", + "The-Face-Of-Goonery/huginnv1.2", + "TheBloke/OpenOrcaxOpenChat-Preview2-13B-GPTQ", + "Sao10K/Stheno-L2-13B", + "bofenghuang/vigogne-2-13b-instruct", + "The-Face-Of-Goonery/Huginn-13b-FP16", + "grimpep/L2-MythoMax22b-instruct-Falseblock", + "TFLai/Nous-Hermes-Platypus2-13B-QLoRA-0.80-epoch", + "yeontaek/Platypus2xOpenOrca-13B-IA3-v4", + "yeontaek/Platypus2xOpenOrca-13B-IA3", + "yeontaek/Platypus2xOpenOrca-13B-IA3-ensemble", + "Open-Orca/LlongOrca-13B-16k", + "Sao10K/Stheno-Inverted-L2-13B", + "garage-bAInd/Camel-Platypus2-13B", + "digitous/Alpacino30b", + "NousResearch/Nous-Hermes-Llama2-13b", + "yeontaek/Platypus2xOpenOrca-13B-IA3-v3", + "TFLai/MythicalDestroyerV2-Platypus2-13B-QLora-0.80-epoch", + "TheBloke/VicUnlocked-30B-LoRA-HF", + "Undi95/Nous-Hermes-13B-Code", + "The-Face-Of-Goonery/Chronos-Beluga-v2-13bfp16", + "NousResearch/Nous-Hermes-Llama2-13b", + "Monero/WizardLM-Uncensored-SuperCOT-StoryTelling-30b", + "TheBloke/Wizard-Vicuna-30B-Uncensored-GPTQ", + "Open-Orca/OpenOrcaxOpenChat-Preview2-13B", + "Austism/chronos-hermes-13b-v2", + "yeontaek/Platypus2xOpenOrca-13B-IA3-v2.1", + "yeontaek/Platypus2xOpenOrca-13B-IA3-v2", + "Gryphe/MythoLogic-L2-13b", + "augtoma/qCammel-13", + "YeungNLP/firefly-llama2-13b-v1.2", + "Aspik101/StableBeluga-13B-instruct-PL-lora_unload", + "andreaskoepf/llama2-13b-megacode2_min100", + "rombodawg/LosslessMegaCoder-llama2-13b-mini", + "yulan-team/YuLan-Chat-2-13b-fp16", + "elinas/chronos-33b", + "YeungNLP/firefly-llama2-13b", + "Sao10K/Medusa-13b", + "OptimalScale/robin-65b-v2-delta", + "minlik/chinese-alpaca-33b-merged", + "OpenAssistant/llama2-13b-megacode2-oasst", + "TheBloke/OpenAssistant-SFT-7-Llama-30B-HF", + "Undi95/UndiMix-v1-13b", + "ehartford/Samantha-1.11-13b", + "beaugogh/Llama2-13b-sharegpt4", + "Aeala/GPT4-x-AlpacaDente2-30b", + "luffycodes/nash-vicuna-13b-v1dot5-ep2-w-rag-w-simple", + "WizardLM/WizardLM-13B-V1.1", + "uukuguy/speechless-orca-platypus-coig-lite-2k-0.6e-13b", + "huggyllama/llama-30b", + "Undi95/ReMM-L2-13B-PIPPA", + "Undi95/ReMM-L2-13B", + "gaodrew/gaodrew-gorgonzola-13b", + "lmsys/vicuna-13b-v1.5", + "yeontaek/Platypus2xOpenOrca-13B-LoRa", + "Yhyu13/llama-30B-hf-openassitant", + "huggingface/llama-30b", + "lmsys/vicuna-13b-v1.5", + "TFLai/Athena-Platypus2-13B-QLora-0.80-epoch", + "TheBloke/dromedary-65b-lora-HF", + "yeontaek/llama-2-13b-Beluga-QLoRA", + "The-Face-Of-Goonery/Huginn-13b-V4", + "The-Face-Of-Goonery/Huginn-13b-v4.5", + "The-Face-Of-Goonery/Huginn-v3-13b", + "tiiuae/falcon-40b", + "WhoTookMyAmogusNickname/NewHope_HF_not_official", + "gaodrew/OpenOrca-Platypus2-13B-thera-1250", + "SLAM-group/NewHope", + "garage-bAInd/Platypus2-13B", + "migtissera/Synthia-13B", + "elinas/chronos-13b-v2", + "mosaicml/mpt-30b-chat", + "CHIH-HUNG/llama-2-13b-OpenOrca_5w", + "uukuguy/speechless-hermes-coig-lite-13b", + "TheBloke/tulu-30B-fp16", + "uukuguy/speechless-hermes-coig-lite-13b", + "xDAN-AI/xDAN_13b_l2_lora", + "lmsys/vicuna-13b-v1.5-16k", + "openchat/openchat_v3.1", + "CHIH-HUNG/llama-2-13b-dolphin_5w", + "Aspik101/vicuna-13b-v1.5-PL-lora_unload", + "Undi95/MLewd-L2-13B", + "ehartford/minotaur-llama2-13b-qlora", + "kajdun/iubaris-13b-v3", + "TFLai/Limarp-Platypus2-13B-QLoRA-0.80-epoch", + "openchat/openchat_v3.1", + "uukuguy/speechless-orca-platypus-coig-lite-4k-0.6e-13b", + "ziqingyang/chinese-alpaca-2-13b", + "TFLai/Airboros2.1-Platypus2-13B-QLora-0.80-epoch", + "yeontaek/llama-2-13b-Guanaco-QLoRA", + "lmsys/vicuna-13b-v1.5-16k", + "ehartford/based-30b", + "kingbri/airolima-chronos-grad-l2-13B", + "openchat/openchat_v3.2", + "uukuguy/speechless-orca-platypus-coig-lite-4k-0.5e-13b", + "yeontaek/Platypus2-13B-LoRa", + "kingbri/chronolima-airo-grad-l2-13B", + "openchat/openchat_v3.2", + "TFLai/PuddleJumper-Platypus2-13B-QLoRA-0.80-epoch", + "shareAI/llama2-13b-Chinese-chat", + "ehartford/WizardLM-1.0-Uncensored-Llama2-13b", + "Aspik101/Redmond-Puffin-13B-instruct-PL-lora_unload", + "yeontaek/llama-2-13B-ensemble-v6", + "WizardLM/WizardLM-13B-V1.2", + "TheBloke/WizardLM-13B-V1.1-GPTQ", + "bhenrym14/airophin-13b-pntk-16k-fp16", + "ehartford/WizardLM-1.0-Uncensored-Llama2-13b", + "Mikael110/llama-2-13b-guanaco-fp16", + "yeontaek/airoboros-2.1-llama-2-13B-QLoRa", + "CalderaAI/13B-Legerdemain-L2", + "grimpep/llama2-22b-wizard_vicuna", + "grimpep/llama2-22B-GPLATTY", + "bhenrym14/airophin-13b-pntk-16k-fp16", + "yeontaek/llama-2-13b-QLoRA", + "OpenAssistant/llama2-13b-orca-8k-3319", + "TheBloke/WizardLM-13B-V1-1-SuperHOT-8K-fp16", + "duliadotio/dulia-13b-8k-alpha", + "Undi95/LewdEngine", + "OpenBuddy/openbuddy-llama2-13b-v8.1-fp16", + "CHIH-HUNG/llama-2-13b-open_orca_20w", + "bhenrym14/airoboros-33b-gpt4-1.4.1-lxctx-PI-16384-fp16", + "FlagAlpha/Llama2-Chinese-13b-Chat", + "LLMs/WizardLM-13B-V1.0", + "chansung/gpt4-alpaca-lora-13b-decapoda-1024", + "TheBloke/wizardLM-13B-1.0-fp16", + "digitous/13B-Chimera", + "yeontaek/Platypus2xOpenOrcaxGuanaco-13B-LoRa", + "jondurbin/airoboros-l2-13b-2.1", + "Monero/WizardLM-30B-Uncensored-Guanaco-SuperCOT-30b", + "TheBloke/UltraLM-13B-fp16", + "openaccess-ai-collective/minotaur-13b-fixed", + "NousResearch/Redmond-Puffin-13B", + "KoboldAI/LLaMA2-13B-Holomax", + "Lajonbot/WizardLM-13B-V1.2-PL-lora_unload", + "yeontaek/Platypus2-13B-LoRa-v2", + "TheBloke/airoboros-13B-HF", + "jondurbin/airoboros-13b", + "jjaaaww/posi_13b", + "CoolWP/llama-2-13b-guanaco-fp16", + "yeontaek/Platypus2-13B-QLoRa", + "h2oai/h2ogpt-research-oig-oasst1-512-30b", + "dfurman/llama-2-13b-guanaco-peft", + "NousResearch/Redmond-Puffin-13B", + "pe-nlp/llama-2-13b-platypus-vicuna-wizard", + "CHIH-HUNG/llama-2-13b-dolphin_20w", + "NousResearch/Nous-Hermes-13b", + "NobodyExistsOnTheInternet/GiftedConvo13bLoraNoEconsE4", + "ehartford/Wizard-Vicuna-13B-Uncensored", + "TheBloke/Wizard-Vicuna-13B-Uncensored-HF", + "openchat/openchat_v3.2_super", + "bhenrym14/airophin-v2-13b-PI-8k-fp16", + "openaccess-ai-collective/manticore-13b", + "The-Face-Of-Goonery/Huginn-22b-Prototype", + "jphme/Llama-2-13b-chat-german", + "grimpep/llama2-28B-Airo03", + "TheBloke/Kimiko-v2-13B-fp16", + "FPHam/Free_Sydney_13b_HF", + "lmsys/vicuna-13b-v1.3", + "FelixChao/llama2-13b-math1.1", + "CalderaAI/13B-BlueMethod", + "meta-llama/Llama-2-13b-chat-hf", + "deepse/CodeUp-Llama-2-13b-chat-hf", + "WizardLM/WizardMath-13B-V1.0", + "WizardLM/WizardMath-13B-V1.0", + "HyperbeeAI/Tulpar-7b-v0", + "xxyyy123/test_qkvo_adptor", + "xxyyy123/mc_data_30k_from_platpus_orca_7b_10k_v1_lora_qkvo_rank14_v2", + "openchat/openchat_v2_w", + "FelixChao/llama2-13b-math1.1", + "psmathur/orca_mini_v3_7b", + "TehVenom/Metharme-13b-Merged", + "xxyyy123/10k_v1_lora_qkvo_rank14_v3", + "OpenAssistant/llama2-13b-orca-v2-8k-3166", + "openaccess-ai-collective/wizard-mega-13b", + "jondurbin/airoboros-13b-gpt4-1.4", + "jondurbin/airoboros-13b-gpt4-1.4-fp16", + "Monero/Manticore-13b-Chat-Pyg-Guanaco", + "FelixChao/llama2-13b-math1.2", + "chargoddard/platypus-2-22b-relora", + "FelixChao/llama2-13b-math1.2", + "Gryphe/MythoBoros-13b", + "CalderaAI/13B-Ouroboros", + "OpenAssistant/llama2-13b-orca-v2-8k-3166", + "heegyu/LIMA2-13b-hf", + "digitous/13B-HyperMantis", + "Gryphe/MythoLogic-13b", + "TheBloke/Airoboros-L2-13B-2.1-GPTQ", + "chargoddard/platypus2-22b-relora", + "openchat/openchat_v2", + "yeontaek/Platypus2-13B-IA3", + "stabilityai/StableBeluga-7B", + "circulus/Llama-2-7b-orca-v1", + "budecosystem/genz-13b-v2", + "TheBloke/gpt4-x-vicuna-13B-HF", + "NobodyExistsOnTheInternet/GiftedConvo13bLoraNoEcons", + "zarakiquemparte/zarafusionex-1.1-l2-7b", + "Lajonbot/tableBeluga-7B-instruct-pl-lora_unload", + "jondurbin/airoboros-13b-gpt4", + "gaodrew/gaodrew-gorgonzola-13b", + "jondurbin/airoboros-13b-gpt4-1.1", + "TheBloke/gpt4-alpaca-lora-13B-HF", + "zarakiquemparte/zarablendex-vq-l2-7b", + "openaccess-ai-collective/manticore-13b-chat-pyg", + "Lajonbot/Llama-2-13b-hf-instruct-pl-lora_unload", + "NobodyExistsOnTheInternet/PuffedLIMA13bQLORA", + "xxyyy123/10k_v1_lora_qkvo_rank28_v2", + "jondurbin/airoboros-l2-13b-gpt4-1.4.1", + "dhmeltzer/Llama-2-13b-hf-eli5-wiki-1024_r_64_alpha_16", + "NobodyExistsOnTheInternet/PuffedConvo13bLoraE4", + "yihan6324/llama2-7b-instructmining-40k-sharegpt", + "CHIH-HUNG/llama-2-13b-Open_Platypus_and_ccp_2.6w", + "Aeala/GPT4-x-Alpasta-13b", + "psmathur/orca_mini_v2_13b", + "YeungNLP/firefly-llama-13b", + "psmathur/orca_mini_v2_13b", + "zarakiquemparte/zarafusionix-l2-7b", + "yihan6324/llama2-7b-instructmining-60k-sharegpt", + "yihan6324/llama-2-7b-instructmining-60k-sharegpt", + "layoric/llama-2-13b-code-alpaca", + "bofenghuang/vigogne-13b-instruct", + "Lajonbot/vicuna-13b-v1.3-PL-lora_unload", + "lvkaokao/llama2-7b-hf-chat-lora-v3", + "ehartford/dolphin-llama-13b", + "YeungNLP/firefly-llama-13b-v1.2", + "TheBloke/Kimiko-13B-fp16", + "kevinpro/Vicuna-13B-CoT", + "eachadea/vicuna-13b-1.1", + "pillowtalks-ai/delta13b", + "TheBloke/vicuna-13B-1.1-HF", + "TheBloke/Vicuna-13B-CoT-fp16", + "lmsys/vicuna-13b-delta-v1.1", + "lmsys/vicuna-13b-v1.1", + "xxyyy123/20k_v1_lora_qkvo_rank14_v2", + "TheBloke/guanaco-13B-HF", + "TheBloke/vicuna-13b-v1.3.0-GPTQ", + "edor/Stable-Platypus2-mini-7B", + "totally-not-an-llm/EverythingLM-13b-V2-16k", + "zarakiquemparte/zaraxe-l2-7b", + "beaugogh/Llama2-7b-openorca-mc-v2", + "TheBloke/Nous-Hermes-13B-SuperHOT-8K-fp16", + "quantumaikr/QuantumLM", + "jondurbin/airoboros-13b-gpt4-1.2", + "TheBloke/robin-13B-v2-fp16", + "TFLai/llama-2-13b-4bit-alpaca-gpt4", + "yihan6324/llama2-7b-instructmining-orca-40k", + "dvruette/oasst-llama-13b-2-epochs", + "Open-Orca/LlongOrca-7B-16k", + "Aspik101/Nous-Hermes-13b-pl-lora_unload", + "ehartford/Samantha-1.11-CodeLlama-34b", + "nkpz/llama2-22b-chat-wizard-uncensored", + "bofenghuang/vigogne-13b-chat", + "beaugogh/Llama2-7b-openorca-mc-v1", + "OptimalScale/robin-13b-v2-delta", + "pe-nlp/llama-2-13b-vicuna-wizard", + "chargoddard/llama2-22b", + "gywy/llama2-13b-chinese-v1", + "frank098/Wizard-Vicuna-13B-juniper", + "IGeniusDev/llama13B-quant8-testv1-openorca-customdataset", + "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w-gate_up_down_proj", + "eachadea/vicuna-13b", + "yihan6324/llama2-7b-instructmining-orca-90k", + "chargoddard/llama2-22b-blocktriangular", + "luffycodes/mcq-vicuna-13b-v1.5", + "Yhyu13/chimera-inst-chat-13b-hf", + "luffycodes/mcq-vicuna-13b-v1.5", + "chargoddard/ypotryll-22b-epoch2-qlora", + "totally-not-an-llm/EverythingLM-13b-16k", + "luffycodes/mcq-hal-vicuna-13b-v1.5", + "openaccess-ai-collective/minotaur-13b", + "IGeniusDev/llama13B-quant8-testv1-openorca-customdataset", + "chargoddard/llama2-22b-blocktriangular", + "TFLai/Platypus2-13B-QLoRA-0.80-epoch", + "meta-llama/Llama-2-13b-hf", + "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w-gate_up_down_proj", + "luffycodes/mcq-hal-vicuna-13b-v1.5", + "TheBloke/Llama-2-13B-fp16", + "TaylorAI/Flash-Llama-13B", + "shareAI/bimoGPT-llama2-13b", + "wahaha1987/llama_13b_sharegpt94k_fastchat", + "openchat/openchat_8192", + "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w-q_k_v_o_proj", + "dvruette/llama-13b-pretrained-sft-do2", + "CHIH-HUNG/llama-2-13b-alpaca-test", + "OpenBuddy/openbuddy-llama2-13b-v11.1-bf16", + "CHIH-HUNG/llama-2-13b-FINETUNE2_TEST_2.2w", + "project-baize/baize-v2-13b", + "jondurbin/airoboros-l2-13b-gpt4-m2.0", + "yeontaek/Platypus2xOpenOrca-13B-LoRa-v2", + "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w", + "xzuyn/Alpacino-SuperCOT-13B", + "jondurbin/airoboros-l2-13b-gpt4-2.0", + "aiplanet/effi-13b", + "clibrain/Llama-2-13b-ft-instruct-es", + "CHIH-HUNG/llama-2-13b-huangyt_Fintune_1_17w", + "bofenghuang/vigogne-2-7b-instruct", + "CHIH-HUNG/llama-2-13b-huangyt_FINETUNE2_3w-q_k_v_o_proj", + "bofenghuang/vigogne-2-7b-chat", + "aiplanet/effi-13b", + "haonan-li/bactrian-x-llama-13b-merged", + "beaugogh/Llama2-7b-sharegpt4", + "HWERI/Llama2-7b-sharegpt4", + "jondurbin/airoboros-13b-gpt4-1.3", + "jondurbin/airoboros-c34b-2.1", + "junelee/wizard-vicuna-13b", + "TheBloke/wizard-vicuna-13B-HF", + "Open-Orca/OpenOrca-Preview1-13B", + "TheBloke/h2ogpt-oasst1-512-30B-HF", + "TheBloke/Llama-2-13B-GPTQ", + "camel-ai/CAMEL-13B-Combined-Data", + "lmsys/vicuna-7b-v1.5", + "lmsys/vicuna-7b-v1.5-16k", + "lmsys/vicuna-7b-v1.5", + "ausboss/llama-13b-supercot", + "TheBloke/tulu-13B-fp16", + "NousResearch/Nous-Hermes-llama-2-7b", + "jlevin/guanaco-13b-llama-2", + "lmsys/vicuna-7b-v1.5-16k", + "dvruette/llama-13b-pretrained", + "nkpz/llama2-22b-daydreamer-v3", + "dvruette/llama-13b-pretrained-dropout", + "jondurbin/airoboros-l2-13b-2.1", + "LLMs/Stable-Vicuna-13B", + "64bits/LexPodLM-13B", + "lizhuang144/llama_mirror_13b_v1.0", + "TheBloke/stable-vicuna-13B-HF", + "zarakiquemparte/zaraxls-l2-7b", + "TheBloke/Llama-2-13B-GPTQ", + "Kiddyz/testlm-3", + "migtissera/Synthia-7B", + "zarakiquemparte/zarablend-l2-7b", + "mosaicml/mpt-30b-instruct", + "PocketDoc/Dans-PileOfSets-Mk1-llama-13b-merged", + "vonjack/Qwen-LLaMAfied-HFTok-7B-Chat", + "l3utterfly/llama2-7b-layla", + "Lajonbot/vicuna-7b-v1.5-PL-lora_unload", + "heegyu/LIMA-13b-hf", + "frank098/WizardLM_13B_juniper", + "ashercn97/manatee-7b", + "chavinlo/gpt4-x-alpaca", + "PocketDoc/Dans-PersonalityEngine-13b", + "ehartford/WizardLM-1.0-Uncensored-CodeLlama-34b", + "digitous/Alpacino13b", + "edor/Hermes-Platypus2-mini-7B", + "lvkaokao/llama2-7b-hf-chat-lora-v2", + "Kiddyz/testlm-1-1", + "Kiddyz/testlm", + "Kiddyz/testlm-1", + "Kiddyz/testlm2", + "radm/Philosophy-Platypus2-13b", + "aiplanet/effi-13b", + "Harshvir/Llama-2-7B-physics", + "YeungNLP/firefly-ziya-13b", + "LinkSoul/Chinese-Llama-2-7b", + "PeanutJar/LLaMa-2-PeanutButter_v10-7B", + "OpenBuddy/openbuddy-llama2-13b-v11-bf16", + "StudentLLM/Alpagasus-2-13B-QLoRA-pipeline", + "meta-llama/Llama-2-13b-hf", + "WizardLM/WizardCoder-Python-34B-V1.0", + "dvruette/llama-13b-pretrained-sft-epoch-1", + "camel-ai/CAMEL-13B-Role-Playing-Data", + "ziqingyang/chinese-llama-2-13b", + "rombodawg/LosslessMegaCoder-llama2-7b-mini", + "TheBloke/koala-13B-HF", + "lmsys/vicuna-7b-delta-v1.1", + "eachadea/vicuna-7b-1.1", + "Ejafa/vicuna_7B_vanilla_1.1", + "lvkaokao/llama2-7b-hf-chat-lora", + "OpenBuddy/openbuddy-atom-13b-v9-bf16", + "Norquinal/llama-2-7b-claude-chat-rp", + "Danielbrdz/Barcenas-7b", + "heegyu/WizardVicuna2-13b-hf", + "meta-llama/Llama-2-7b-chat-hf", + "PeanutJar/LLaMa-2-PeanutButter_v14-7B", + "PeanutJar/LLaMa-2-PeanutButter_v4-7B", + "davzoku/cria-llama2-7b-v1.3", + "OpenBuddy/openbuddy-atom-13b-v9-bf16", + "lvkaokao/llama2-7b-hf-instruction-lora", + "Tap-M/Luna-AI-Llama2-Uncensored", + "ehartford/Samantha-1.11-7b", + "WizardLM/WizardCoder-Python-34B-V1.0", + "TheBloke/Manticore-13B-Chat-Pyg-Guanaco-SuperHOT-8K-GPTQ", + "Mikael110/llama-2-7b-guanaco-fp16", + "garage-bAInd/Platypus2-7B", + "PeanutJar/LLaMa-2-PeanutButter_v18_B-7B", + "mosaicml/mpt-30b", + "garage-bAInd/Platypus2-7B", + "huggingface/llama-13b", + "dvruette/oasst-llama-13b-1000-steps", + "jordiclive/gpt4all-alpaca-oa-codealpaca-lora-13b", + "huggyllama/llama-13b", + "Voicelab/trurl-2-7b", + "TFLai/llama-13b-4bit-alpaca", + "gywy/llama2-13b-chinese-v2", + "lmsys/longchat-13b-16k", + "Aspik101/trurl-2-7b-pl-instruct_unload", + "WizardLM/WizardMath-7B-V1.0", + "Norquinal/llama-2-7b-claude-chat", + "TheTravellingEngineer/llama2-7b-chat-hf-dpo", + "HuggingFaceH4/starchat-beta", + "joehuangx/spatial-vicuna-7b-v1.5-LoRA", + "conceptofmind/LLongMA-2-13b-16k", + "tianyil1/denas-llama2", + "lmsys/vicuna-7b-v1.3", + "conceptofmind/LLongMA-2-13b-16k", + "openchat/opencoderplus", + "ajibawa-2023/scarlett-7b", + "dhmeltzer/llama-7b-SFT_eli5_wiki65k_1024_r_64_alpha_16_merged", + "psyche/kollama2-7b-v2", + "heegyu/LIMA2-7b-hf", + "dhmeltzer/llama-7b-SFT-qlora-eli5-wiki_DPO_ds_RM_top_2_1024_r_64_alpha_16", + "abhishek/llama2guanacotest", + "jondurbin/airoboros-l2-7b-2.1", + "llama-anon/instruct-13b", + "FelixChao/vicuna-7B-physics", + "Aspik101/Llama-2-7b-hf-instruct-pl-lora_unload", + "shibing624/chinese-alpaca-plus-13b-hf", + "davzoku/cria-llama2-7b-v1.3_peft", + "quantumaikr/llama-2-7b-hf-guanaco-1k", + "togethercomputer/Llama-2-7B-32K-Instruct", + "sia-ai/llama-2-7b-1-percent-open-orca-1000-steps-v0", + "TheTravellingEngineer/llama2-7b-hf-guanaco", + "Lajonbot/Llama-2-7b-chat-hf-instruct-pl-lora_unload", + "jondurbin/airoboros-l2-7b-gpt4-1.4.1", + "wahaha1987/llama_7b_sharegpt94k_fastchat", + "FelixChao/vicuna-7B-chemical", + "TinyPixel/llama2-7b-oa", + "chaoyi-wu/MedLLaMA_13B", + "edor/Platypus2-mini-7B", + "RoversX/llama-2-7b-hf-small-shards-Samantha-V1-SFT", + "venkycs/llama-v2-7b-32kC-Security", + "psyche/kollama2-7b", + "Fredithefish/Guanaco-7B-Uncensored", + "TheTravellingEngineer/llama2-7b-chat-hf-guanaco", + "ehartford/WizardLM-13B-Uncensored", + "PocketDoc/Dans-CreepingSenseOfDoom", + "wenge-research/yayi-7b-llama2", + "georgesung/llama2_7b_chat_uncensored", + "TinyPixel/llama2-7b-instruct", + "quantumaikr/QuantumLM-7B", + "xzuyn/MedicWizard-7B", + "wenge-research/yayi-7b-llama2", + "TinyPixel/lima-test", + "elyza/ELYZA-japanese-Llama-2-7b-instruct", + "lgaalves/llama-2-7b-hf_open-platypus", + "ziqingyang/chinese-alpaca-2-7b", + "TehVenom/Pygmalion-Vicuna-1.1-7b", + "meta-llama/Llama-2-7b-hf", + "bongchoi/test-llama2-7b", + "TaylorAI/Flash-Llama-7B", + "TheTravellingEngineer/llama2-7b-chat-hf-v2", + "TheTravellingEngineer/llama2-7b-chat-hf-v4", + "kashif/stack-llama-2", + "PeanutJar/LLaMa-2-PeanutButter_v18_A-7B", + "ToolBench/ToolLLaMA-7b-LoRA", + "Monero/WizardLM-13b-OpenAssistant-Uncensored", + "TheTravellingEngineer/llama2-7b-chat-hf-v2", + "TheTravellingEngineer/llama2-7b-chat-hf-v4", + "mrm8488/llama-2-coder-7b", + "elyza/ELYZA-japanese-Llama-2-7b-fast-instruct", + "clibrain/Llama-2-7b-ft-instruct-es", + "medalpaca/medalpaca-7b", + "TheBloke/tulu-7B-fp16", + "OpenBuddy/openbuddy-openllama-13b-v7-fp16", + "TaylorAI/FLAN-Llama-7B-2_Llama2-7B-Flash_868_full_model", + "Aspik101/vicuna-7b-v1.3-instruct-pl-lora_unload", + "jondurbin/airoboros-l2-7b-gpt4-2.0", + "dhmeltzer/llama-7b-SFT_ds_eli5_1024_r_64_alpha_16_merged", + "GOAT-AI/GOAT-7B-Community", + "AtomEchoAI/AtomGPT_56k", + "julianweng/Llama-2-7b-chat-orcah", + "TehVenom/Pygmalion-13b-Merged", + "jondurbin/airoboros-7b-gpt4-1.1", + "dhmeltzer/llama-7b-SFT_ds_wiki65k_1024_r_64_alpha_16_merged", + "bofenghuang/vigogne-7b-chat", + "lmsys/longchat-7b-v1.5-32k", + "jondurbin/airoboros-l2-7b-gpt4-m2.0", + "synapsoft/Llama-2-7b-chat-hf-flan2022-1.2M", + "jondurbin/airoboros-7b-gpt4-1.4", + "Charlie911/vicuna-7b-v1.5-lora-mctaco", + "yihan6324/instructmining-platypus-15k", + "meta-llama/Llama-2-7b-hf", + "TheTravellingEngineer/llama2-7b-chat-hf-v3", + "quantumaikr/KoreanLM-hf", + "openthaigpt/openthaigpt-1.0.0-alpha-7b-chat-ckpt-hf", + "TheBloke/Llama-2-7B-GPTQ", + "TheBloke/Llama-2-7B-GPTQ", + "LLMs/AlpacaGPT4-7B-elina", + "ehartford/Wizard-Vicuna-7B-Uncensored", + "TheBloke/Wizard-Vicuna-7B-Uncensored-HF", + "TheTravellingEngineer/llama2-7b-chat-hf-v3", + "golaxy/gowizardlm", + "ehartford/dolphin-llama2-7b", + "CHIH-HUNG/llama-2-7b-dolphin_10w-test", + "mncai/chatdoctor", + "psyche/kollama2-7b-v3", + "jondurbin/airoboros-7b-gpt4", + "jondurbin/airoboros-7b", + "TheBloke/airoboros-7b-gpt4-fp16", + "mosaicml/mpt-7b-8k-chat", + "elyza/ELYZA-japanese-Llama-2-7b", + "bofenghuang/vigogne-7b-instruct", + "jxhong/CAlign-alpaca-7b", + "golaxy/goims", + "jondurbin/airoboros-7b-gpt4-1.2", + "jphme/orca_mini_v2_ger_7b", + "psmathur/orca_mini_v2_7b", + "notstoic/PygmalionCoT-7b", + "golaxy/gogpt2-13b", + "golaxy/gogpt2-13b-chat", + "togethercomputer/LLaMA-2-7B-32K", + "TheBloke/wizardLM-7B-HF", + "keyfan/vicuna-chinese-replication-v1.1", + "golaxy/gogpt2-7b", + "aiplanet/effi-7b", + "arver/llama7b-qlora", + "titan087/OpenLlama13B-Guanaco", + "chavinlo/alpaca-native", + "project-baize/baize-healthcare-lora-7B", + "AlpinDale/pygmalion-instruct", + "openlm-research/open_llama_13b", + "jondurbin/airoboros-7b-gpt4-1.3", + "elyza/ELYZA-japanese-Llama-2-7b-fast", + "jondurbin/airoboros-gpt-3.5-turbo-100k-7b", + "uukuguy/speechless-codellama-orca-13b", + "bigcode/starcoderplus", + "TheBloke/guanaco-7B-HF", + "Neko-Institute-of-Science/metharme-7b", + "TigerResearch/tigerbot-7b-base", + "golaxy/gogpt-7b", + "togethercomputer/LLaMA-2-7B-32K", + "yhyhy3/open_llama_7b_v2_med_instruct", + "ajibawa-2023/carl-7b", + "stabilityai/stablelm-base-alpha-7b-v2", + "conceptofmind/LLongMA-2-7b-16k", + "TehVenom/Pygmalion_AlpacaLora-7b", + "jondurbin/airoboros-7b-gpt4-1.4.1-qlora", + "wannaphong/openthaigpt-0.1.0-beta-full-model_for_open_llm_leaderboard", + "ausboss/llama7b-wizardlm-unfiltered", + "project-baize/baize-v2-7b", + "LMFlow/Robin-v2", + "HanningZhang/Robin-v2", + "LMFlow/Robin-7b-v2", + "OptimalScale/robin-7b-v2-delta", + "uukuguy/speechless-codellama-platypus-13b", + "jerryjalapeno/nart-100k-7b", + "wenge-research/yayi-13b-llama2", + "fireballoon/baichuan-vicuna-chinese-7b", + "jlevin/guanaco-unchained-llama-2-7b", + "csitfun/llama-7b-logicot", + "DevaMalla/llama7b_alpaca_1gpu_bf16", + "WeOpenML/PandaLM-Alpaca-7B-v1", + "illuin/test-custom-llama", + "yeontaek/WizardCoder-Python-13B-LoRa", + "ashercn97/giraffe-7b", + "mosaicml/mpt-7b-chat", + "abhishek/autotrain-llama-alpaca-peft-52508123785", + "Neko-Institute-of-Science/pygmalion-7b", + "TFLai/llama-7b-4bit-alpaca", + "huggingface/llama-7b", + "TheBloke/Planner-7B-fp16", + "shibing624/chinese-llama-plus-13b-hf", + "AGI-inc/lora_moe_7b_baseline", + "DevaMalla/llama-base-7b", + "AGI-inc/lora_moe_7b", + "togethercomputer/GPT-JT-6B-v0", + "ehartford/WizardLM-7B-Uncensored", + "shibing624/chinese-alpaca-plus-7b-hf", + "beomi/llama-2-ko-7b", + "mosaicml/mpt-7b-8k-instruct", + "Enno-Ai/ennodata-7b", + "mosaicml/mpt-7b-instruct", + "facebook/opt-iml-max-30b", + "WeOpenML/Alpaca-7B-v1", + "TheBloke/Project-Baize-v2-7B-GPTQ", + "codellama/CodeLlama-13b-Instruct-hf", + "TheBloke/CodeLlama-13B-Instruct-fp16", + "facebook/galactica-30b", + "FreedomIntelligence/phoenix-inst-chat-7b", + "openlm-research/open_llama_7b_v2", + "GeorgiaTechResearchInstitute/galpaca-30b", + "THUDM/chatglm2-6b", + "togethercomputer/GPT-JT-6B-v1", + "TheBloke/koala-7B-HF", + "nathan0/mpt_delta_tuned_model_v3", + "nathan0/mpt_delta_tuned_model_v2", + "GeorgiaTechResearchInstitute/galpaca-30b", + "JosephusCheung/Guanaco", + "shareAI/CodeLLaMA-chat-13b-Chinese", + "TigerResearch/tigerbot-7b-sft", + "Writer/InstructPalmyra-20b", + "OpenAssistant/codellama-13b-oasst-sft-v10", + "bigscience/bloomz-7b1-mt", + "nathan0/mpt_delta_tuned_model_v3", + "VMware/open-llama-7b-open-instruct", + "baichuan-inc/Baichuan-7B", + "anas-awadalla/mpt-7b", + "mosaicml/mpt-7b", + "bigscience/bloomz-7b1", + "ziqingyang/chinese-llama-2-7b", + "OpenAssistant/codellama-13b-oasst-sft-v10", + "wenge-research/yayi-7b", + "tiiuae/falcon-7b", + "togethercomputer/RedPajama-INCITE-Instruct-7B-v0.1", + "togethercomputer/RedPajama-INCITE-7B-Instruct", + "TheBloke/landmark-attention-llama7b-fp16", + "togethercomputer/GPT-JT-Moderation-6B", + "h2oai/h2ogpt-gm-oasst1-en-1024-20b", + "dvruette/gpt-neox-20b-full-precision", + "TehVenom/Moderator-Chan_GPT-JT-6b", + "dvruette/oasst-gpt-neox-20b-1000-steps", + "AlekseyKorshuk/pygmalion-6b-vicuna-chatml", + "facebook/opt-66b", + "Salesforce/codegen-16B-nl", + "Vmware/open-llama-7b-v2-open-instruct", + "mosaicml/mpt-7b-storywriter", + "acrastt/Marx-3B-V2", + "openlm-research/open_llama_7b", + "Fredithefish/ReasonixPajama-3B-HF", + "togethercomputer/GPT-NeoXT-Chat-Base-20B", + "psmathur/orca_mini_13b", + "RWKV/rwkv-raven-14b", + "h2oai/h2ogpt-oasst1-512-20b", + "acrastt/Marx-3B", + "klosax/open_llama_13b_600bt_preview", + "synapsoft/Llama-2-7b-hf-flan2022-1.2M", + "OpenAssistant/oasst-sft-1-pythia-12b", + "golaxy/gogpt-7b-bloom", + "Writer/palmyra-large", + "psmathur/orca_mini_7b", + "dvruette/oasst-pythia-12b-6000-steps", + "NousResearch/CodeLlama-13b-hf", + "codellama/CodeLlama-13b-hf", + "h2oai/h2ogpt-gm-oasst1-multilang-1024-20b", + "VMware/open-llama-0.7T-7B-open-instruct-v1.1", + "dvruette/oasst-pythia-12b-flash-attn-5000-steps", + "dvruette/oasst-gpt-neox-20b-3000-steps", + "RobbeD/OpenLlama-Platypus-3B", + "facebook/opt-30b", + "acrastt/Puma-3B", + "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", + "dvruette/oasst-pythia-12b-pretrained-sft", + "digitous/GPT-R", + "acrastt/Griffin-3B", + "togethercomputer/RedPajama-INCITE-Base-7B-v0.1", + "togethercomputer/RedPajama-INCITE-7B-Base", + "CobraMamba/mamba-gpt-3b-v3", + "Danielbrdz/CodeBarcenas-7b", + "l3utterfly/open-llama-3b-v2-layla", + "CobraMamba/mamba-gpt-3b-v2", + "OpenAssistant/pythia-12b-sft-v8-7k-steps", + "KoboldAI/GPT-NeoX-20B-Erebus", + "RobbeD/Orca-Platypus-3B", + "h2oai/h2ogpt-gm-oasst1-en-1024-12b", + "OpenAssistant/pythia-12b-sft-v8-2.5k-steps", + "AlekseyKorshuk/chatml-pyg-v1", + "togethercomputer/RedPajama-INCITE-Chat-7B-v0.1", + "togethercomputer/RedPajama-INCITE-7B-Chat", + "digitous/Javelin-R", + "dvruette/oasst-pythia-12b-reference", + "EleutherAI/gpt-neox-20b", + "KoboldAI/fairseq-dense-13B", + "OpenAssistant/pythia-12b-sft-v8-rlhf-2k-steps", + "codellama/CodeLlama-7b-Instruct-hf", + "digitous/Javelin-GPTJ", + "KoboldAI/GPT-NeoX-20B-Skein", + "digitous/Javalion-R", + "h2oai/h2ogpt-oasst1-512-12b", + "acrastt/Bean-3B", + "KoboldAI/GPT-J-6B-Skein", + "nomic-ai/gpt4all-j", + "databricks/dolly-v2-12b", + "TehVenom/Dolly_Shygmalion-6b-Dev_V8P2", + "databricks/dolly-v2-7b", + "Aspik101/WizardVicuna-Uncensored-3B-instruct-PL-lora_unload", + "digitous/Adventien-GPTJ", + "openlm-research/open_llama_3b_v2", + "RWKV/rwkv-4-14b-pile", + "Lazycuber/Janemalion-6B", + "OpenAssistant/pythia-12b-pre-v8-12.5k-steps", + "digitous/Janin-R", + "kfkas/Llama-2-ko-7b-Chat", + "heegyu/WizardVicuna-Uncensored-3B-0719", + "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt", + "TaylorAI/Flash-Llama-3B", + "kfkas/Llama-2-ko-7b-Chat", + "digitous/Skegma-GPTJ", + "digitous/Javalion-GPTJ", + "Pirr/pythia-13b-deduped-green_devil", + "TehVenom/PPO_Shygmalion-V8p4_Dev-6b", + "dvruette/oasst-pythia-6.9b-4000-steps", + "heegyu/WizardVicuna-3B-0719", + "psmathur/orca_mini_3b", + "OpenAssistant/galactica-6.7b-finetuned", + "frank098/orca_mini_3b_juniper", + "PygmalionAI/pygmalion-6b", + "TehVenom/PPO_Pygway-V8p4_Dev-6b", + "TFLai/gpt-neox-20b-4bit-alpaca", + "Corianas/gpt-j-6B-Dolly", + "TehVenom/Dolly_Shygmalion-6b", + "digitous/Janin-GPTJ", + "TehVenom/GPT-J-Pyg_PPO-6B-Dev-V8p4", + "EleutherAI/gpt-j-6b", + "KoboldAI/GPT-J-6B-Shinen", + "TehVenom/Dolly_Malion-6b", + "TehVenom/ChanMalion", + "Salesforce/codegen-6B-nl", + "Fredithefish/RedPajama-INCITE-Chat-3B-Instruction-Tuning-with-GPT-4", + "KoboldAI/GPT-J-6B-Janeway", + "togethercomputer/RedPajama-INCITE-Chat-3B-v1", + "togethercomputer/Pythia-Chat-Base-7B", + "heegyu/RedTulu-Uncensored-3B-0719", + "KoboldAI/PPO_Pygway-6b-Mix", + "KoboldAI/OPT-13B-Erebus", + "KoboldAI/fairseq-dense-6.7B", + "EleutherAI/pythia-12b-deduped", + "pszemraj/pythia-6.9b-HC3", + "Fredithefish/Guanaco-3B-Uncensored-v2", + "facebook/opt-13b", + "TehVenom/GPT-J-Pyg_PPO-6B", + "EleutherAI/pythia-6.9b-deduped", + "Devio/test-1400", + "Fredithefish/Guanaco-3B-Uncensored", + "codellama/CodeLlama-7b-hf", + "acrastt/RedPajama-INCITE-Chat-Instruct-3B-V1", + "Fredithefish/ScarletPajama-3B-HF", + "KoboldAI/OPT-13B-Nerybus-Mix", + "YeungNLP/firefly-bloom-7b1", + "DanielSc4/RedPajama-INCITE-Chat-3B-v1-RL-LoRA-8bit-test1", + "klosax/open_llama_7b_400bt_preview", + "KoboldAI/OPT-13B-Nerys-v2", + "TehVenom/PPO_Shygmalion-6b", + "amazon/LightGPT", + "KnutJaegersberg/black_goo_recipe_c", + "NousResearch/CodeLlama-7b-hf", + "togethercomputer/RedPajama-INCITE-Instruct-3B-v1", + "heegyu/WizardVicuna-open-llama-3b-v2", + "bigscience/bloom-7b1", + "Devio/test-22B", + "RWKV/rwkv-raven-7b", + "hakurei/instruct-12b", + "CobraMamba/mamba-gpt-3b", + "KnutJaegersberg/black_goo_recipe_a", + "acrastt/OmegLLaMA-3B", + "codellama/CodeLlama-7b-Instruct-hf", + "h2oai/h2ogpt-oig-oasst1-512-6_9b", + "KoboldAI/OPT-6.7B-Erebus", + "facebook/opt-6.7b", + "KnutJaegersberg/black_goo_recipe_d", + "KnutJaegersberg/LLongMA-3b-LIMA", + "KnutJaegersberg/black_goo_recipe_b", + "KoboldAI/OPT-6.7B-Nerybus-Mix", + "health360/Healix-3B", + "EleutherAI/pythia-12b", + "Fredithefish/RedPajama-INCITE-Chat-3B-ShareGPT-11K", + "GeorgiaTechResearchInstitute/galactica-6.7b-evol-instruct-70k", + "h2oai/h2ogpt-oig-oasst1-256-6_9b", + "ikala/bloom-zh-3b-chat", + "Taekyoon/llama2-ko-7b-test", + "anhnv125/pygmalion-6b-roleplay", + "TehVenom/DiffMerge_Pygmalion_Main-onto-V8P4", + "KoboldAI/OPT-6B-nerys-v2", + "Lazycuber/pyg-instruct-wizardlm", + "Devio/testC", + "KoboldAI/OPT-30B-Erebus", + "Fredithefish/CrimsonPajama", + "togethercomputer/RedPajama-INCITE-Base-3B-v1", + "bigscience/bloomz-3b", + "conceptofmind/Open-LLongMA-3b", + "RWKV/rwkv-4-7b-pile", + "openlm-research/open_llama_3b", + "ewof/koishi-instruct-3b", + "DanielSc4/RedPajama-INCITE-Chat-3B-v1-FT-LoRA-8bit-test1", + "cerebras/Cerebras-GPT-13B", + "EleutherAI/pythia-6.7b", + "aisquared/chopt-2_7b", + "Azure99/blossom-v1-3b", + "PSanni/Deer-3b", + "bertin-project/bertin-gpt-j-6B-alpaca", + "OpenBuddy/openbuddy-openllama-3b-v10-bf16", + "KoboldAI/fairseq-dense-2.7B", + "ehartford/CodeLlama-34b-Instruct-hf", + "codellama/CodeLlama-34b-Instruct-hf", + "TheBloke/CodeLlama-34B-Instruct-fp16", + "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt-v2", + "openlm-research/open_llama_7b_700bt_preview", + "NbAiLab/nb-gpt-j-6B-alpaca", + "KoboldAI/OPT-2.7B-Erebus", + "Writer/camel-5b-hf", + "EleutherAI/pythia-2.7b", + "facebook/xglm-7.5B", + "EleutherAI/pythia-2.8b-deduped", + "klosax/open_llama_3b_350bt_preview", + "klosax/openllama-3b-350bt", + "KoboldAI/OPT-2.7B-Nerybus-Mix", + "KoboldAI/GPT-J-6B-Adventure", + "cerebras/Cerebras-GPT-6.7B", + "TFLai/pythia-2.8b-4bit-alpaca", + "facebook/opt-2.7b", + "KoboldAI/OPT-2.7B-Nerys-v2", + "bigscience/bloom-3b", + "Devio/test100", + "RWKV/rwkv-raven-3b", + "Azure99/blossom-v2-3b", + "codellama/CodeLlama-34b-Python-hf", + "bhenrym14/airoboros-33b-gpt4-1.4.1-PI-8192-fp16", + "EleutherAI/gpt-neo-2.7B", + "danielhanchen/open_llama_3b_600bt_preview", + "HuggingFaceH4/starchat-alpha", + "pythainlp/wangchanglm-7.5B-sft-en-sharded", + "beaugogh/pythia-1.4b-deduped-sharegpt", + "HWERI/pythia-1.4b-deduped-sharegpt", + "OpenAssistant/stablelm-7b-sft-v7-epoch-3", + "codellama/CodeLlama-7b-Python-hf", + "aisquared/chopt-1_3b", + "PygmalionAI/metharme-1.3b", + "Linly-AI/Chinese-LLaMA-2-13B-hf", + "chargoddard/llama-2-34b-uncode", + "RWKV/rwkv-4-3b-pile", + "pythainlp/wangchanglm-7.5B-sft-enth", + "MBZUAI/LaMini-GPT-1.5B", + "Writer/palmyra-base", + "KoboldAI/fairseq-dense-1.3B", + "EleutherAI/pythia-1.4b-deduped", + "MBZUAI/lamini-neo-1.3b", + "h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-7b-preview-300bt", + "sartmis1/starcoder-finetune-openapi", + "MayaPH/opt-flan-iml-6.7b", + "facebook/xglm-4.5B", + "WizardLM/WizardCoder-15B-V1.0", + "facebook/opt-iml-max-1.3b", + "stabilityai/stablelm-tuned-alpha-7b", + "aisquared/dlite-v2-1_5b", + "stabilityai/stablelm-base-alpha-7b", + "sartmis1/starcoder-finetune-selfinstruct", + "lizhuang144/starcoder_mirror", + "bigcode/starcoder", + "TheBloke/CodeLlama-34B-Python-fp16", + "open-llm-leaderboard/bloomz-1b7-4bit-alpaca-auto-eval-adapter-applied", + "ehartford/CodeLlama-34b-Python-hf", + "codellama/CodeLlama-7b-Python-hf", + "GeorgiaTechResearchInstitute/starcoder-gpteacher-code-instruct", + "LoupGarou/WizardCoder-Guanaco-15B-V1.0", + "golaxy/gogpt-3b-bloom", + "EleutherAI/pythia-1.3b", + "codellama/CodeLlama-13b-Python-hf", + "hakurei/lotus-12B", + "NYTK/PULI-GPTrio", + "facebook/opt-1.3b", + "TheBloke/CodeLlama-13B-Python-fp16", + "codellama/CodeLlama-13b-Python-hf", + "RWKV/rwkv-raven-1b5", + "PygmalionAI/pygmalion-2.7b", + "bigscience/bloom-1b7", + "gpt2-xl", + "LoupGarou/WizardCoder-Guanaco-15B-V1.1", + "RWKV/rwkv-4-1b5-pile", + "codellama/CodeLlama-34b-hf", + "NousResearch/CodeLlama-34b-hf", + "rinna/bilingual-gpt-neox-4b-8k", + "lxe/Cerebras-GPT-2.7B-Alpaca-SP", + "cerebras/Cerebras-GPT-2.7B", + "jzjiao/opt-1.3b-rlhf", + "EleutherAI/gpt-neo-1.3B", + "aisquared/dlite-v1-1_5b", + "Corianas/Quokka_2.7b", + "MrNJK/gpt2-xl-sft", + "facebook/galactica-1.3b", + "aisquared/dlite-v2-774m", + "EleutherAI/pythia-1b-deduped", + "Kunhao/pile-7b-250b-tokens", + "w601sxs/b1ade-1b", + "rinna/bilingual-gpt-neox-4b", + "shaohang/SparseOPT-1.3B", + "shaohang/Sparse0.5_OPT-1.3", + "EleutherAI/polyglot-ko-12.8b", + "Salesforce/codegen-6B-multi", + "bigscience/bloom-1b1", + "TFLai/gpt-neo-1.3B-4bit-alpaca", + "FabbriSimo01/Bloom_1b_Quantized", + "MBZUAI/LaMini-GPT-774M", + "Locutusque/gpt2-large-conversational", + "Devio/test-3b", + "stabilityai/stablelm-tuned-alpha-3b", + "PygmalionAI/pygmalion-1.3b", + "KoboldAI/fairseq-dense-355M", + "Rachneet/gpt2-xl-alpaca", + "gpt2-large", + "Mikivis/gpt2-large-lora-sft", + "stabilityai/stablelm-base-alpha-3b", + "gpt2-medium", + "Kunhao/pile-7b", + "aisquared/dlite-v1-774m", + "aisquared/dlite-v2-355m", + "YeungNLP/firefly-bloom-2b6-v2", + "KnutJaegersberg/gpt-2-xl-EvolInstruct", + "KnutJaegersberg/galactica-orca-wizardlm-1.3b", + "cerebras/Cerebras-GPT-1.3B", + "FabbriSimo01/Cerebras_1.3b_Quantized", + "facebook/xglm-1.7B", + "EleutherAI/pythia-410m-deduped", + "TheBloke/GPlatty-30B-SuperHOT-8K-fp16", + "DataLinguistic/DataLinguistic-34B-V1.0", + "Corianas/Quokka_1.3b", + "TheTravellingEngineer/bloom-560m-RLHF-v2", + "Corianas/1.3b", + "RWKV/rwkv-4-430m-pile", + "porkorbeef/Llama-2-13b-sf", + "xhyi/PT_GPTNEO350_ATG", + "TheBloke/Wizard-Vicuna-13B-Uncensored-GPTQ", + "bigscience/bloomz-560m", + "TheBloke/medalpaca-13B-GPTQ-4bit", + "TheBloke/Vicuna-33B-1-3-SuperHOT-8K-fp16", + "aisquared/dlite-v1-355m", + "uukuguy/speechless-codellama-orca-airoboros-13b-0.10e", + "yhyhy3/med-orca-instruct-33b", + "TheBloke/Wizard-Vicuna-30B-Superhot-8K-fp16", + "TheTravellingEngineer/bloom-1b1-RLHF", + "MBZUAI/lamini-cerebras-1.3b", + "IDEA-CCNL/Ziya-LLaMA-13B-Pretrain-v1", + "TheBloke/WizardLM-7B-uncensored-GPTQ", + "TheBloke/EverythingLM-13B-16K-GPTQ", + "quantumaikr/open_llama_7b_hf", + "TheBloke/chronos-wizardlm-uc-scot-st-13B-GPTQ", + "TheBloke/WizardLM-30B-Uncensored-GPTQ", + "IDEA-CCNL/Ziya-LLaMA-13B-v1", + "Phind/Phind-CodeLlama-34B-v1", + "robowaifudev/megatron-gpt2-345m", + "MayaPH/GodziLLa-30B-instruct", + "TheBloke/CAMEL-33B-Combined-Data-SuperHOT-8K-fp16", + "uukuguy/speechless-codellama-orca-platypus-13b-0.10e", + "doas/test2", + "BreadAi/PM_modelV2", + "bigcode/santacoder", + "TheBloke/wizard-vicuna-13B-GPTQ", + "porkorbeef/Llama-2-13b", + "TehVenom/DiffMerge-DollyGPT-Pygmalion", + "PygmalionAI/pygmalion-350m", + "TheBloke/orca_mini_v3_7B-GPTQ", + "TheBloke/WizardLM-Uncensored-SuperCOT-StoryTelling-30B-GPTQ", + "TheBloke/WizardLM-30B-GPTQ", + "bigscience/bloom-560m", + "TFLai/gpt2-turkish-uncased", + "TheBloke/guanaco-33B-GPTQ", + "TheBloke/openchat_v2_openorca_preview-GPTQ", + "porkorbeef/Llama-2-13b-public", + "TheBloke/LongChat-13B-GPTQ", + "yhyhy3/med-orca-instruct-33b", + "TheBloke/airoboros-33B-gpt4-1-4-SuperHOT-8K-fp16", + "TheBloke/Chinese-Alpaca-33B-SuperHOT-8K-fp16", + "MayaPH/FinOPT-Franklin", + "TheBloke/WizardLM-33B-V1.0-Uncensored-GPTQ", + "TheBloke/Project-Baize-v2-13B-GPTQ", + "malhajar/Platypus2-70B-instruct-4bit-gptq", + "KoboldAI/OPT-350M-Erebus", + "rishiraj/bloom-560m-guanaco", + "Panchovix/WizardLM-33B-V1.0-Uncensored-SuperHOT-8k", + "doas/test5", + "vicgalle/alpaca-7b", + "beomi/KoAlpaca-Polyglot-5.8B", + "Phind/Phind-CodeLlama-34B-Python-v1", + "timdettmers/guanaco-65b-merged", + "TheBloke/wizard-mega-13B-GPTQ", + "MayaPH/GodziLLa-30B-plus", + "TheBloke/Platypus-30B-SuperHOT-8K-fp16", + "facebook/opt-350m", + "KoboldAI/OPT-350M-Nerys-v2", + "TheBloke/robin-33B-v2-GPTQ", + "jaspercatapang/Echidna-30B", + "TheBloke/llama-30b-supercot-SuperHOT-8K-fp16", + "marcchew/test1", + "Harshvir/LaMini-Neo-1.3B-Mental-Health_lora", + "golaxy/gogpt-560m", + "TheBloke/orca_mini_13B-GPTQ", + "Panchovix/airoboros-33b-gpt4-1.2-SuperHOT-8k", + "Aspik101/tulu-7b-instruct-pl-lora_unload", + "Phind/Phind-CodeLlama-34B-v2", + "BreadAi/MusePy-1-2", + "cerebras/Cerebras-GPT-590M", + "microsoft/CodeGPT-small-py", + "victor123/WizardLM-13B-1.0", + "OptimalScale/robin-65b-v2-delta", + "voidful/changpt-bart", + "FabbriSimo01/GPT_Large_Quantized", + "MayaPH/FinOPT-Lincoln", + "KoboldAI/fairseq-dense-125M", + "SebastianSchramm/Cerebras-GPT-111M-instruction", + "TheTravellingEngineer/bloom-560m-RLHF", + "breadlicker45/dough-instruct-base-001", + "WizardLM/WizardLM-30B-V1.0", + "WizardLM/WizardLM-30B-V1.0", + "WizardLM/WizardLM-30B-V1.0", + "TaylorAI/Flash-Llama-30M-20001", + "porkorbeef/Llama-2-13b-12_153950", + "huggingtweets/bladeecity-jerma985", + "KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct", + "bhenrym14/airoboros-33b-gpt4-1.4.1-lxctx-PI-16384-fp16", + "microsoft/DialoGPT-small", + "Corianas/590m", + "facebook/xglm-564M", + "EleutherAI/gpt-neo-125m", + "EleutherAI/pythia-160m-deduped", + "klosax/pythia-160m-deduped-step92k-193bt", + "MBZUAI/lamini-neo-125m", + "bigcode/tiny_starcoder_py", + "concedo/OPT-19M-ChatSalad", + "anton-l/gpt-j-tiny-random", + "grantprice/Cerebras-GPT-590M-finetuned-DND", + "deepnight-research/zsc-text", + "WangZeJun/bloom-820m-chat", + "cerebras/Cerebras-GPT-256M", + "ai-forever/rugpt3large_based_on_gpt2", + "alibidaran/medical_transcription_generator", + "Deci/DeciCoder-1b", + "microsoft/DialoGPT-medium", + "ogimgio/gpt-neo-125m-neurallinguisticpioneers", + "open-llm-leaderboard/bloom-560m-4bit-alpaca-auto-eval-adapter-applied", + "BreadAi/gpt-YA-1-1_160M", + "microsoft/DialoGPT-large", + "facebook/opt-125m", + "huggingtweets/jerma985", + "Locutusque/gpt2-conversational-or-qa", + "concedo/Pythia-70M-ChatSalad", + "roneneldan/TinyStories-1M", + "BreadAi/DiscordPy", + "bigcode/gpt_bigcode-santacoder", + "Tincando/fiction_story_generator", + "klosax/pythia-70m-deduped-step44k-92bt", + "Quake24/easyTermsSummerizer", + "BreadAi/gpt-YA-1-1_70M", + "EleutherAI/pythia-160m", + "euclaise/gpt-neox-122m-minipile-digits", + "MBZUAI/lamini-cerebras-590m", + "nicholasKluge/Aira-124M", + "MayaPH/FinOPT-Washington", + "cyberagent/open-calm-large", + "BreadAi/StoryPy", + "EleutherAI/pythia-70m", + "BreadAi/gpt-Youtube", + "roneneldan/TinyStories-33M", + "EleutherAI/pythia-70m-deduped", + "lgaalves/gpt2_guanaco-dolly-platypus", + "Corianas/Quokka_590m", + "lgaalves/gpt2_platypus-dolly-guanaco", + "cyberagent/open-calm-7b", + "RWKV/rwkv-4-169m-pile", + "gpt2", + "roneneldan/TinyStories-28M", + "lgaalves/gpt2_open-platypus", + "gpt2", + "SaylorTwift/gpt2_test", + "roneneldan/TinyStories-3M", + "nthngdy/pythia-owt2-70m-50k", + "Corianas/256_5epoch", + "roneneldan/TinyStories-8M", + "lgaalves/gpt2-dolly", + "nthngdy/pythia-owt2-70m-100k", + "aisquared/dlite-v2-124m", + "mncai/SGPT-1.3B-insurance-epoch10", + "huggingtweets/gladosystem", + "abhiramtirumala/DialoGPT-sarcastic-medium", + "MBZUAI/lamini-cerebras-256m", + "cerebras/Cerebras-GPT-111M", + "uberkie/metharme-1.3b-finetuned", + "MBZUAI/lamini-cerebras-111m", + "psyche/kogpt", + "Corianas/Quokka_256m", + "vicgalle/gpt2-alpaca-gpt4", + "aisquared/dlite-v1-124m", + "Mikivis/xuanxuan", + "MBZUAI/LaMini-GPT-124M", + "vicgalle/gpt2-alpaca", + "huashiyiqike/testmodel", + "Corianas/111m", + "baseline", +] diff --git a/src/tools/plots.py b/src/tools/plots.py new file mode 100644 index 0000000000000000000000000000000000000000..02873ab1046b862aaf87ce42d3f780afadffe00c --- /dev/null +++ b/src/tools/plots.py @@ -0,0 +1,158 @@ +import numpy as np +import pandas as pd +import plotly.express as px +from plotly.graph_objs import Figure + +from src.display.utils import AutoEvalColumn, Task, Tasks +from src.display.utils import human_baseline_row as HUMAN_BASELINE +from src.leaderboard.filter_models import FLAGGED_MODELS +from src.leaderboard.read_evals import EvalResult + + +def create_scores_df(raw_data: list[EvalResult]) -> pd.DataFrame: + """ + Generates a DataFrame containing the maximum scores until each date. + + :param results_df: A DataFrame containing result information including metric scores and dates. + :return: A new DataFrame containing the maximum scores until each date for every metric. + """ + # Step 1: Ensure 'date' is in datetime format and sort the DataFrame by it + results_df = pd.DataFrame(raw_data) + # results_df["date"] = pd.to_datetime(results_df["date"], format="mixed", utc=True) + results_df.sort_values(by="date", inplace=True) + + # Step 2: Initialize the scores dictionary + scores = {k: [] for k in BENCHMARK_COLS + [AutoEvalColumn.average.name]} + + # Step 3: Iterate over the rows of the DataFrame and update the scores dictionary + for task in [t.value for t in Tasks] + [Task("Average", "avg", AutoEvalColumn.average.name)]: + current_max = 0 + last_date = "" + column = task.col_name + for _, row in results_df.iterrows(): + current_model = row["full_model"] + # We ignore models that are flagged/no longer on the hub/not finished + to_ignore = ( + not row["still_on_hub"] + or not row["not_flagged"] + or current_model in FLAGGED_MODELS + or row["status"] != "FINISHED" + ) + if to_ignore: + continue + + current_date = row["date"] + if task.benchmark == "Average": + current_score = np.mean(list(row["results"].values())) + else: + current_score = row["results"][task.benchmark] + + if current_score > current_max: + if current_date == last_date and len(scores[column]) > 0: + scores[column][-1] = {"model": current_model, "date": current_date, "score": current_score} + else: + scores[column].append({"model": current_model, "date": current_date, "score": current_score}) + current_max = current_score + last_date = current_date + + # Step 4: Return all dictionaries as DataFrames + return {k: pd.DataFrame(v) for k, v in scores.items()} + + +def create_plot_df(scores_df: dict[str : pd.DataFrame]) -> pd.DataFrame: + """ + Transforms the scores DataFrame into a new format suitable for plotting. + + :param scores_df: A DataFrame containing metric scores and dates. + :return: A new DataFrame reshaped for plotting purposes. + """ + # Initialize the list to store DataFrames + dfs = [] + # Iterate over the cols and create a new DataFrame for each column + for col in BENCHMARK_COLS + [AutoEvalColumn.average.name]: + d = scores_df[col].reset_index(drop=True) + d["task"] = col + dfs.append(d) + + # Concatenate all the created DataFrames + concat_df = pd.concat(dfs, ignore_index=True) + + # Sort values by 'date' + concat_df.sort_values(by="date", inplace=True) + concat_df.reset_index(drop=True, inplace=True) + return concat_df + + +def create_metric_plot_obj(df: pd.DataFrame, metrics: list[str], title: str) -> Figure: + """ + Create a Plotly figure object with lines representing different metrics + and horizontal dotted lines representing human baselines. + + :param df: The DataFrame containing the metric values, names, and dates. + :param metrics: A list of strings representing the names of the metrics + to be included in the plot. + :param title: A string representing the title of the plot. + :return: A Plotly figure object with lines representing metrics and + horizontal dotted lines representing human baselines. + """ + + # Filter the DataFrame based on the specified metrics + df = df[df["task"].isin(metrics)] + + # Filter the human baselines based on the specified metrics + filtered_human_baselines = {k: v for k, v in HUMAN_BASELINE.items() if k in metrics} + + # Create a line figure using plotly express with specified markers and custom data + fig = px.line( + df, + x="date", + y="score", + color="task", + markers=True, + custom_data=["task", "score", "model"], + title=title, + ) + + # Update hovertemplate for better hover interaction experience + fig.update_traces( + hovertemplate="
".join( + [ + "Model Name: %{customdata[2]}", + "Metric Name: %{customdata[0]}", + "Date: %{x}", + "Metric Value: %{y}", + ] + ) + ) + + # Update the range of the y-axis + fig.update_layout(yaxis_range=[0, 100]) + + # Create a dictionary to hold the color mapping for each metric + metric_color_mapping = {} + + # Map each metric name to its color in the figure + for trace in fig.data: + metric_color_mapping[trace.name] = trace.line.color + + # Iterate over filtered human baselines and add horizontal lines to the figure + for metric, value in filtered_human_baselines.items(): + color = metric_color_mapping.get(metric, "blue") # Retrieve color from mapping; default to blue if not found + location = "top left" if metric == "HellaSwag" else "bottom left" # Set annotation position + # Add horizontal line with matched color and positioned annotation + fig.add_hline( + y=value, + line_dash="dot", + annotation_text=f"{metric} human baseline", + annotation_position=location, + annotation_font_size=10, + annotation_font_color=color, + line_color=color, + ) + + return fig + + +# Example Usage: +# human_baselines dictionary is defined. +# chart = create_metric_plot_obj(scores_df, ["ARC", "HellaSwag", "MMLU", "TruthfulQA"], human_baselines, "Graph Title") diff --git a/style.css b/style.css new file mode 100644 index 0000000000000000000000000000000000000000..114adf441e9032febb46bc056b2a8bb651075f0d --- /dev/null +++ b/style.css @@ -0,0 +1,28 @@ +body { + padding: 2rem; + font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; +} + +h1 { + font-size: 16px; + margin-top: 0; +} + +p { + color: rgb(107, 114, 128); + font-size: 15px; + margin-bottom: 10px; + margin-top: 5px; +} + +.card { + max-width: 620px; + margin: 0 auto; + padding: 16px; + border: 1px solid lightgray; + border-radius: 16px; +} + +.card p:last-child { + margin-bottom: 0; +} diff --git a/temp_leaderboard/model_data/external/Claude_3.5_Sonnet.json b/temp_leaderboard/model_data/external/Claude_3.5_Sonnet.json new file mode 100644 index 0000000000000000000000000000000000000000..8935b56a67be2479e04e80d7951cd49be2872015 --- /dev/null +++ b/temp_leaderboard/model_data/external/Claude_3.5_Sonnet.json @@ -0,0 +1,9 @@ +{ + "model_name": "Claude 3.5 Sonnet", + "score": 0.33851674641148327, + "math_score": 0.43157894736842106, + "physics_score": 0.24545454545454545, + "total_tokens": 222241, + "evaluation_time": 670.5163931846619, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/Claude_3.7_Sonnet.json b/temp_leaderboard/model_data/external/Claude_3.7_Sonnet.json new file mode 100644 index 0000000000000000000000000000000000000000..788b3fb20ddfdd858d2151698fd691eed1e0c1fb --- /dev/null +++ b/temp_leaderboard/model_data/external/Claude_3.7_Sonnet.json @@ -0,0 +1,9 @@ +{ + "model_name": "Claude 3.7 Sonnet", + "score": 0.36770334928229664, + "math_score": 0.5263157894736842, + "physics_score": 0.20909090909090908, + "total_tokens": 398016, + "evaluation_time": 1095.7695870399475, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/DeepSeek_V3_0324.json b/temp_leaderboard/model_data/external/DeepSeek_V3_0324.json new file mode 100644 index 0000000000000000000000000000000000000000..2007c6f7ad025dffa2731d1a87f682ae947eaef2 --- /dev/null +++ b/temp_leaderboard/model_data/external/DeepSeek_V3_0324.json @@ -0,0 +1,9 @@ +{ + "model_name": "DeepSeek V3 0324", + "score": 0.13229665071770336, + "math_score": 0.1736842105263158, + "physics_score": 0.09090909090909091, + "total_tokens": 359162, + "evaluation_time": 4257.714092254639, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/Gemini_2.0_Flash.json b/temp_leaderboard/model_data/external/Gemini_2.0_Flash.json new file mode 100644 index 0000000000000000000000000000000000000000..79ce6d23ef1782584d20fd36826c7614f62a8010 --- /dev/null +++ b/temp_leaderboard/model_data/external/Gemini_2.0_Flash.json @@ -0,0 +1,9 @@ +{ + "model_name": "Gemini 2.0 Flash", + "score": 0.4217703349282297, + "math_score": 0.5526315789473685, + "physics_score": 0.2909090909090909, + "total_tokens": 731337, + "evaluation_time": 857.6413371562958, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/Gemini_2.5_Pro_Preview.json b/temp_leaderboard/model_data/external/Gemini_2.5_Pro_Preview.json new file mode 100644 index 0000000000000000000000000000000000000000..0db8de807ac09dcd826eabca384f609f1172147d --- /dev/null +++ b/temp_leaderboard/model_data/external/Gemini_2.5_Pro_Preview.json @@ -0,0 +1,9 @@ +{ + "model_name": "Gemini 2.5 Pro Preview", + "score": 0.5863636363636364, + "math_score": 0.8, + "physics_score": 0.37272727272727274, + "total_tokens": 1394299, + "evaluation_time": 4533.155055761337, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/Gemma_3_12B.json b/temp_leaderboard/model_data/external/Gemma_3_12B.json new file mode 100644 index 0000000000000000000000000000000000000000..7278456a137e94e00828afd6beeb51da0d2e50b5 --- /dev/null +++ b/temp_leaderboard/model_data/external/Gemma_3_12B.json @@ -0,0 +1,9 @@ +{ + "model_name": "Gemma 3 12B", + "score": 0.29832535885167466, + "math_score": 0.4421052631578947, + "physics_score": 0.15454545454545454, + "total_tokens": 441055, + "evaluation_time": 3916.2552330493927, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/Gemma_3_27B.json b/temp_leaderboard/model_data/external/Gemma_3_27B.json new file mode 100644 index 0000000000000000000000000000000000000000..c452651b65bb1396436955adcb5a13167ff2c684 --- /dev/null +++ b/temp_leaderboard/model_data/external/Gemma_3_27B.json @@ -0,0 +1,9 @@ +{ + "model_name": "Gemma 3 27B", + "score": 0.32057416267942584, + "math_score": 0.46842105263157896, + "physics_score": 0.17272727272727273, + "total_tokens": 357617, + "evaluation_time": 2030.33176279068, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/Gemma_3_4B.json b/temp_leaderboard/model_data/external/Gemma_3_4B.json new file mode 100644 index 0000000000000000000000000000000000000000..57d1311f69076d38ec3b1111a80429ef1872cf27 --- /dev/null +++ b/temp_leaderboard/model_data/external/Gemma_3_4B.json @@ -0,0 +1,9 @@ +{ + "model_name": "Gemma 3 4B", + "score": 0.12416267942583732, + "math_score": 0.22105263157894736, + "physics_score": 0.02727272727272727, + "total_tokens": 572095, + "evaluation_time": 1682.6655840873718, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/GigaChat-2-Max.json b/temp_leaderboard/model_data/external/GigaChat-2-Max.json new file mode 100644 index 0000000000000000000000000000000000000000..2706b3873357b0fa1d041c7fd8fa025d8217783b --- /dev/null +++ b/temp_leaderboard/model_data/external/GigaChat-2-Max.json @@ -0,0 +1,9 @@ +{ + "model_name": "GigaChat-2-Max", + "score": 0.24952153110047848, + "math_score": 0.3263157894736842, + "physics_score": 0.17272727272727273, + "total_tokens": 220487, + "evaluation_time": 1006.1656014919281, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/GigaChat-2-Pro.json b/temp_leaderboard/model_data/external/GigaChat-2-Pro.json new file mode 100644 index 0000000000000000000000000000000000000000..11240c4c4d1d5566866fa33fb1f3417170a37e70 --- /dev/null +++ b/temp_leaderboard/model_data/external/GigaChat-2-Pro.json @@ -0,0 +1,9 @@ +{ + "model_name": "GigaChat-2-Pro", + "score": 0.20861244019138758, + "math_score": 0.3263157894736842, + "physics_score": 0.09090909090909091, + "total_tokens": 212196, + "evaluation_time": 1002.5515208244324, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/GigaChat-2.json b/temp_leaderboard/model_data/external/GigaChat-2.json new file mode 100644 index 0000000000000000000000000000000000000000..ea61367facb58326e415c76537e112254f2047ea --- /dev/null +++ b/temp_leaderboard/model_data/external/GigaChat-2.json @@ -0,0 +1,9 @@ +{ + "model_name": "GigaChat-2", + "score": 0.0937799043062201, + "math_score": 0.14210526315789473, + "physics_score": 0.045454545454545456, + "total_tokens": 299747, + "evaluation_time": 834.6775443553925, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/GigaChat-Max.json b/temp_leaderboard/model_data/external/GigaChat-Max.json new file mode 100644 index 0000000000000000000000000000000000000000..4d4ac7a901df91697580a711f9dec8a48dcd2132 --- /dev/null +++ b/temp_leaderboard/model_data/external/GigaChat-Max.json @@ -0,0 +1,9 @@ +{ + "model_name": "GigaChat-Max", + "score": 0.1394736842105263, + "math_score": 0.17894736842105263, + "physics_score": 0.1, + "total_tokens": 201090, + "evaluation_time": 978.7567253112793, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/Qwen2.5_72B_Instruct.json b/temp_leaderboard/model_data/external/Qwen2.5_72B_Instruct.json new file mode 100644 index 0000000000000000000000000000000000000000..d281bf6aa6a440b296775134d448c28daff0ced9 --- /dev/null +++ b/temp_leaderboard/model_data/external/Qwen2.5_72B_Instruct.json @@ -0,0 +1,9 @@ +{ + "model_name": "Qwen2.5 72B Instruct", + "score": 0.2784688995215311, + "math_score": 0.38421052631578945, + "physics_score": 0.17272727272727273, + "total_tokens": 366729, + "evaluation_time": 2460.056980371475, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/gpt-4.1.json b/temp_leaderboard/model_data/external/gpt-4.1.json new file mode 100644 index 0000000000000000000000000000000000000000..72273e6630062b8f6d310eef1529c34843441578 --- /dev/null +++ b/temp_leaderboard/model_data/external/gpt-4.1.json @@ -0,0 +1,9 @@ +{ + "model_name": "gpt-4.1", + "score": 0.3861244019138756, + "math_score": 0.5631578947368421, + "physics_score": 0.20909090909090908, + "total_tokens": 405803, + "evaluation_time": 1918.7988040447235, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/gpt-4o.json b/temp_leaderboard/model_data/external/gpt-4o.json new file mode 100644 index 0000000000000000000000000000000000000000..bdd67a6b8649f7a96df2f827ca5ad8427543ee7a --- /dev/null +++ b/temp_leaderboard/model_data/external/gpt-4o.json @@ -0,0 +1,9 @@ +{ + "model_name": "gpt-4o", + "score": 0.2617224880382775, + "math_score": 0.4052631578947368, + "physics_score": 0.11818181818181818, + "total_tokens": 468809, + "evaluation_time": 1078.4077816009521, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/o3-mini-high.json b/temp_leaderboard/model_data/external/o3-mini-high.json new file mode 100644 index 0000000000000000000000000000000000000000..af3c1dcddc6b8358cfc0e5abd191281de5165883 --- /dev/null +++ b/temp_leaderboard/model_data/external/o3-mini-high.json @@ -0,0 +1,9 @@ +{ + "model_name": "o3-mini-high", + "score": 0.600956937799043, + "math_score": 0.8473684210526315, + "physics_score": 0.35454545454545455, + "total_tokens": 2455126, + "evaluation_time": 4015.4359402656555, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file diff --git a/temp_leaderboard/model_data/external/o4-mini-high.json b/temp_leaderboard/model_data/external/o4-mini-high.json new file mode 100644 index 0000000000000000000000000000000000000000..311f30314e18f38eb2fc872cec45b7392c6455a9 --- /dev/null +++ b/temp_leaderboard/model_data/external/o4-mini-high.json @@ -0,0 +1,9 @@ +{ + "model_name": "o4-mini-high", + "score": 0.5906698564593301, + "math_score": 0.8631578947368421, + "physics_score": 0.3181818181818182, + "total_tokens": 1898964, + "evaluation_time": 4623.6044108867645, + "system_prompt": "Π’Ρ‹ - ΠΏΠΎΠ»Π΅Π·Π½Ρ‹ΠΉ ΠΏΠΎΠΌΠΎΡ‰Π½ΠΈΠΊ ΠΏΠΎ ΠΌΠ°Ρ‚Π΅ΠΌΠ°Ρ‚ΠΈΠΊΠ΅ ΠΈ Ρ„ΠΈΠ·ΠΈΠΊΠ΅. ΠžΡ‚Π²Π΅Ρ‚ΡŒΡ‚Π΅ Π½Π° русском языкС." +} \ No newline at end of file