modelId
stringlengths
5
138
author
stringlengths
2
42
last_modified
unknowndate
2020-02-15 11:33:14
2025-04-20 06:26:59
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
429 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
54 values
createdAt
unknowndate
2022-03-02 23:29:04
2025-04-20 06:26:36
card
stringlengths
11
1.01M
Hellina/afriberta_ti_nllb
Hellina
"2025-02-17T19:29:14Z"
0
0
transformers
[ "transformers", "safetensors", "xlm-roberta", "fill-mask", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2025-02-17T19:27:15Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
nimrita/speecht5_finetuned_voxpopuli_sl
nimrita
"2023-08-29T05:07:19Z"
9
0
transformers
[ "transformers", "pytorch", "speecht5", "text-to-audio", "text-to-speech", "generated_from_trainer", "dataset:voxpopuli", "base_model:microsoft/speecht5_tts", "base_model:finetune:microsoft/speecht5_tts", "license:mit", "endpoints_compatible", "region:us" ]
text-to-speech
"2023-08-24T10:43:06Z"
--- license: mit base_model: microsoft/speecht5_tts tags: - text-to-speech - generated_from_trainer datasets: - voxpopuli model-index: - name: speecht5_finetuned_voxpopuli_sl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # speecht5_finetuned_voxpopuli_sl This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on the voxpopuli dataset. It achieves the following results on the evaluation set: - Loss: 0.4915 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - training_steps: 500 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.4214 | 45.98 | 250 | 0.4956 | | 0.3978 | 91.95 | 500 | 0.4915 | ### Framework versions - Transformers 4.33.0.dev0 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
LucaAsga/q-FrozenLake-v1-4x4-noSlippery
LucaAsga
"2023-10-11T09:44:28Z"
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
"2023-10-11T09:44:25Z"
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="LucaAsga/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
scribbyotx/sa
scribbyotx
"2024-05-23T14:46:41Z"
19
0
diffusers
[ "diffusers", "doi:10.57967/hf/2292", "region:us" ]
null
"2024-05-22T19:40:25Z"
# Discord-Scraper Pipeline to scrape prompt + image url pairs from Discord channels. The idea started by wanting to scrape the image-prompt pairs from [share-dalle-3](https://discord.com/channels/823813159592001537/1158354590463447092) Discord channel from [LAION server](https://discord.com/invite/eq3cAMZtCC). But now you can re-use the scraper to work with any channel you want. ## How to use Clone the repo `git clone https://github.com/LAION-AI/Discord-Scrapers.git` 1. Set up a virtual environment and install the requirements with `pip install -r requirements.txt` 2. Get your `DISCORD_TOKEN` and `HF_TOKEN` and add as environment variables. 1. `DISCORD_TOKEN` can be obtained by looking at developer tools in your Web Browser 2. `HF_TOKEN` can be obtained by logging in to HuggingFace and looking at your profile 3. Get the `channel_id` from the Discord channel you want to scrape. You can do this by enabling developer mode in Discord and right clicking the channel you want to scrape. 4. Create a `condition_fn` and a `parse_fn` that will be used to filter and parse the messages. You can use the ones I created as an example. 5. Create your scraping script and optionally your `config.json` **NOTE PAY ATTENTION TO THE FUNC SIGNATURE OF parse_fn and condition_fn** ```python import os from typing import Any, Dict, List from scraper import ScraperBot, ScraperBotConfig, HFDatasetScheme def parse_fn(message: Dict[str, Any]) -> List[HFDatasetScheme]: ... def condition_fn(message: Dict[str, Any]) -> bool: ... if __name__ == "__main__": config_path = os.path.join(os.path.dirname(__file__), "config.json") config = ScraperBotConfig.from_json(config_path) bot = ScraperBot(config=config, parse_fn=parse_fn, condition_fn=condition_fn) bot.scrape(fetch_all=False, push_to_hub=False) ``` ## Main Components ### ScraperBotConfig Dataclass with configuration attributes to be used by the ScraperBot. You can create your own config.json file and load it with `ScraperBotConfig.from_json(path_to_config)`. attributes: - base_url: str, The base url of the Discord API (in chase it changes) - channel_id: str, The id of the channel you want to scrape - limit: int, The number of messages to fetch (from my tests the max allowed by Discord is 100) - hf_dataset_name: str, The name of the dataset you want to push to HuggingFace ### ScraperBot Implementation of the scraper. Get's the messages from the Discord API and filters them using the `condition_fn`. Then parses the messages using the `parse_fn` and pushes the dataset to HuggingFace. attributes: - config: ScraperBotConfig, The configuration to be used by the bot - parse_fn: Callable[[Dict[str, Any]], List[HFDatasetScheme]], The function to parse the messages - condition_fn: Callable[[Dict[str, Any]], bool], The function to filter the messages methods: #### scrape(fetch_all: bool = False, push_to_hub: bool = False) -> Dataset Scrapes the messages and optionally pushes the dataset to HuggingFace. args: - fetch_all: bool, If True will fetch all the messages from the channel. If False will fetch only the messages that weren't processed yet. - push_to_hub: bool, If True will push the dataset to HuggingFace. If False will only return the dataset. **NOTE: If you want to push the dataset to HuggingFace you need to set the `HF_TOKEN` environment variable.** **NOTE 2: If the dataset doesn't exist in HuggingFace it will be created. If it already exists it will be updated.**
misha228322/toilet
misha228322
"2025-03-03T18:47:36Z"
0
0
null
[ "license:apache-2.0", "region:us" ]
null
"2025-03-03T18:47:36Z"
--- license: apache-2.0 ---
bofenghuang/vigogne-falcon-7b-instruct
bofenghuang
"2023-07-05T11:38:33Z"
25
1
transformers
[ "transformers", "pytorch", "RefinedWebModel", "text-generation", "LLM", "custom_code", "fr", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-05T09:51:56Z"
--- license: apache-2.0 language: - fr pipeline_tag: text-generation library_name: transformers tags: - LLM inference: false --- <p align="center" width="100%"> <img src="https://huggingface.co/bofenghuang/vigogne-falcon-7b-instruct/resolve/main/vigogne_logo.png" alt="Vigogne" style="width: 40%; min-width: 300px; display: block; margin: auto;"> </p> # Vigogne-Falcon-7B-Instruct: A French Instruction-following Falcon Model Vigogne-Falcon-7B-Instruct is a [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) model fine-tuned to follow the French instructions. For more information, please visit the Github repo: https://github.com/bofenghuang/vigogne ## Usage ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig from vigogne.preprocess import generate_instruct_prompt model_name_or_path = "bofenghuang/vigogne-falcon-7b-instruct" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side="right", use_fast=False) tokenizer.pad_token = tokenizer.eos_token model = AutoModelForCausalLM.from_pretrained( model_name_or_path, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True, ) user_query = "Expliquez la différence entre DoS et phishing." prompt = generate_instruct_prompt(user_query) input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"].to(model.device) input_length = input_ids.shape[1] generated_outputs = model.generate( input_ids=input_ids, generation_config=GenerationConfig( temperature=0.1, do_sample=True, repetition_penalty=1.0, max_new_tokens=512, ), return_dict_in_generate=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id, ) generated_tokens = generated_outputs.sequences[0, input_length:] generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True) print(generated_text) ``` You can also infer this model by using the following Google Colab Notebook. <a href="https://colab.research.google.com/github/bofenghuang/vigogne/blob/main/notebooks/infer_instruct.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## Limitations Vigogne is still under development, and there are many limitations that have to be addressed. Please note that it is possible that the model generates harmful or biased content, incorrect information or generally unhelpful answers.
vag12345678/female_curvy_short1
vag12345678
"2025-03-21T16:04:38Z"
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
"2025-03-21T15:38:37Z"
--- license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md language: - en tags: - flux - diffusers - lora - replicate base_model: "black-forest-labs/FLUX.1-dev" pipeline_tag: text-to-image # widget: # - text: >- # prompt # output: # url: https://... instance_prompt: Sidf --- # Female_Curvy_Short1 <Gallery /> Trained on Replicate using: https://replicate.com/ostris/flux-dev-lora-trainer/train ## Trigger words You should use `Sidf` to trigger the image generation. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('vag12345678/female_curvy_short1', weight_name='lora.safetensors') image = pipeline('your prompt').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
MeiKing111/ita9_c2.5
MeiKing111
"2025-04-11T05:46:36Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2025-04-11T02:57:30Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
lwl-uestc/AIMO2_7B_v1
lwl-uestc
"2025-03-04T01:43:13Z"
0
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2025-03-04T01:37:05Z"
--- library_name: transformers license: other base_model: deepseek/DeepSeek-R1-Distill-Qwen-7B tags: - llama-factory - full - generated_from_trainer model-index: - name: 7b_1k results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 7b_1k This model is a fine-tuned version of [/home/export/base/sc100182/sc100182/online1/code/models/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co//home/export/base/sc100182/sc100182/online1/code/models/DeepSeek-R1-Distill-Qwen-7B) on the aops_1k dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - total_train_batch_size: 8 - total_eval_batch_size: 64 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.05 - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.48.2 - Pytorch 2.6.0+cu124 - Datasets 3.2.0 - Tokenizers 0.21.0
guneomkar/ppo-LunarLander-v2
guneomkar
"2025-04-16T01:39:06Z"
0
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
"2025-04-16T01:38:40Z"
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -157.67 +/- 46.28 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
mradermacher/Mistral-7B-Instruct-v0.2-GGUF
mradermacher
"2025-02-05T09:32:35Z"
82
1
transformers
[ "transformers", "gguf", "finetuned", "en", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "base_model:quantized:mistralai/Mistral-7B-Instruct-v0.2", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
"2024-09-11T04:08:35Z"
--- base_model: mistralai/Mistral-7B-Instruct-v0.2 language: - en library_name: transformers license: apache-2.0 quantized_by: mradermacher tags: - finetuned --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2 <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q2_K.gguf) | Q2_K | 2.8 | | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.IQ3_XS.gguf) | IQ3_XS | 3.1 | | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q3_K_S.gguf) | Q3_K_S | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.IQ3_S.gguf) | IQ3_S | 3.3 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.IQ3_M.gguf) | IQ3_M | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q3_K_M.gguf) | Q3_K_M | 3.6 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q3_K_L.gguf) | Q3_K_L | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.IQ4_XS.gguf) | IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q4_K_S.gguf) | Q4_K_S | 4.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q4_K_M.gguf) | Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q5_K_S.gguf) | Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q5_K_M.gguf) | Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q6_K.gguf) | Q6_K | 6.0 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/Mistral-7B-Instruct-v0.2.Q8_0.gguf) | Q8_0 | 7.8 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
danfeg/ST-PARA-XLM-R-M_Finetuned-EN-1500
danfeg
"2024-03-23T17:22:13Z"
3
0
sentence-transformers
[ "sentence-transformers", "safetensors", "xlm-roberta", "feature-extraction", "sentence-similarity", "transformers", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
sentence-similarity
"2024-03-23T17:21:15Z"
--- library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # danfeg/ST-PARA-XLM-R-M_Finetuned-EN-1500 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('danfeg/ST-PARA-XLM-R-M_Finetuned-EN-1500') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('danfeg/ST-PARA-XLM-R-M_Finetuned-EN-1500') model = AutoModel.from_pretrained('danfeg/ST-PARA-XLM-R-M_Finetuned-EN-1500') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=danfeg/ST-PARA-XLM-R-M_Finetuned-EN-1500) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 47 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 3, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 15, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
fbaldassarri/allenai_Llama-3.1-Tulu-3-8B-DPO-autoround-int4-gs128-sym
fbaldassarri
"2025-04-15T20:34:50Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "autoround", "auto-round", "intel-autoround", "intel", "woq", "meta", "pytorch", "llama-3", "conversational", "en", "dataset:allenai/tulu-3-DPO-mixture", "base_model:meta-llama/Llama-3.1-8B", "base_model:quantized:meta-llama/Llama-3.1-8B", "license:llama3.1", "autotrain_compatible", "text-generation-inference", "4-bit", "intel/auto-round", "region:us" ]
text-generation
"2025-04-15T20:32:39Z"
--- language: - en license: llama3.1 library_name: transformers tags: - autoround - auto-round - intel-autoround - intel - woq - meta - pytorch - llama - llama-3 model_name: Llama-3.1 Tulu 3 8B DPO base_model: - meta-llama/Llama-3.1-8B datasets: - allenai/tulu-3-DPO-mixture inference: false model_creator: allenai pipeline_tag: text-generation prompt_template: '{prompt} ' quantized_by: fbaldassarri --- ## Model Information Quantized version of [allenai/Llama-3.1-Tulu-3-8B-DPO](https://huggingface.co/fbaldassarri/allenai/Llama-3.1-Tulu-3-8B-DPO) using torch.float32 for quantization tuning. - 4 bits (INT4) - group size = 128 - Symmetrical Quantization - Method WoQ (AutoRound format) Fast and low memory, 2-3X speedup (slight accuracy drop at W4G128) Quantization framework: [Intel AutoRound](https://github.com/intel/auto-round) v0.4.6 Note: this INT4 version of Llama-3.1-Tulu-3-8B-DPO has been quantized to run inference through CPU. ## Replication Recipe ### Step 1 Install Requirements I suggest to install requirements into a dedicated python-virtualenv or a conda enviroment. ``` wget https://github.com/intel/auto-round/archive/refs/tags/v0.4.6.tar.gz tar -xvzf v0.4.6.tar.gz cd auto-round-0.4.6 pip install -r requirements-cpu.txt --upgrade ``` ### Step 2 Build Intel AutoRound wheel from sources ``` pip install -vvv --no-build-isolation -e .[cpu] ``` ### Step 3 Script for Quantization ``` from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "allenai/Llama-3.1-Tulu-3-8B-DPO" model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) from auto_round import AutoRound bits, group_size, sym, device = 4, 128, True, 'cpu' autoround = AutoRound(model, tokenizer, nsamples=128, iters=200, seqlen=512, batch_size=4, bits=bits, group_size=group_size, sym=sym, device=device) autoround.quantize() output_dir = "./AutoRound/allenai_Llama-3.1-Tulu-3-8B-DPO-autoround-int4-gs128-sym" autoround.save_quantized(output_dir, format='auto_round', inplace=True) ``` ## License [Llama 3.1 Community License](https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/LICENSE) ## Disclaimer This quantized model comes with no warrenty. It has been developed only for research purposes.
trenden/17f896e5-c352-4029-9c96-ed347b89683f
trenden
"2025-02-01T20:00:36Z"
8
0
peft
[ "peft", "safetensors", "gemma2", "axolotl", "generated_from_trainer", "base_model:unsloth/gemma-2-2b-it", "base_model:adapter:unsloth/gemma-2-2b-it", "license:gemma", "region:us" ]
null
"2025-02-01T19:48:01Z"
--- library_name: peft license: gemma base_model: unsloth/gemma-2-2b-it tags: - axolotl - generated_from_trainer model-index: - name: 17f896e5-c352-4029-9c96-ed347b89683f results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml adapter: lora base_model: unsloth/gemma-2-2b-it bf16: auto chat_template: llama3 dataset_prepared_path: null datasets: - data_files: - 2e997d4de99a562a_train_data.json ds_type: json format: custom path: /workspace/input_data/2e997d4de99a562a_train_data.json type: field_input: all_generated_responses field_instruction: prompt field_output: prompt_id format: '{instruction} {input}' no_input_format: '{instruction}' system_format: '{system}' system_prompt: '' debug: null deepspeed: null early_stopping_patience: null eval_max_new_tokens: 128 eval_table_size: null evals_per_epoch: 4 flash_attention: false fp16: null fsdp: null fsdp_config: null gradient_accumulation_steps: 4 gradient_checkpointing: false group_by_length: false hub_model_id: trenden/17f896e5-c352-4029-9c96-ed347b89683f hub_repo: null hub_strategy: checkpoint hub_token: null learning_rate: 0.0002 load_in_4bit: false load_in_8bit: false local_rank: null logging_steps: 10 lora_alpha: 16 lora_dropout: 0.05 lora_fan_in_fan_out: null lora_model_dir: null lora_r: 8 lora_target_linear: true lr_scheduler: cosine max_steps: 200 micro_batch_size: 2 mlflow_experiment_name: /tmp/2e997d4de99a562a_train_data.json model_type: AutoModelForCausalLM num_epochs: 1 optimizer: adamw_bnb_8bit output_dir: miner_id_24 pad_to_sequence_len: true resume_from_checkpoint: null s2_attention: null sample_packing: false saves_per_epoch: 4 sequence_len: 512 strict: false tf32: false tokenizer_type: AutoTokenizer train_on_inputs: false trust_remote_code: true val_set_size: 0.05 wandb_entity: null wandb_mode: online wandb_name: 31b5f19a-32e0-471d-9beb-5e95609a1834 wandb_project: Birthday-SN56-26-Gradients-On-Demand wandb_run: your_name wandb_runid: 31b5f19a-32e0-471d-9beb-5e95609a1834 warmup_steps: 5 weight_decay: 0.0 xformers_attention: null ``` </details><br> # 17f896e5-c352-4029-9c96-ed347b89683f This model is a fine-tuned version of [unsloth/gemma-2-2b-it](https://huggingface.co/unsloth/gemma-2-2b-it) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.1121 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 5 - training_steps: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | No log | 0.0001 | 1 | 4.0465 | | 3.1491 | 0.0070 | 50 | 3.1284 | | 3.0962 | 0.0139 | 100 | 3.1162 | | 3.0989 | 0.0209 | 150 | 3.1144 | | 3.1061 | 0.0278 | 200 | 3.1121 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.0 - Pytorch 2.5.0+cu124 - Datasets 3.0.1 - Tokenizers 0.20.1
sosuneko/q-Taxi-v3
sosuneko
"2023-08-28T01:43:13Z"
0
0
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
"2023-08-28T01:43:08Z"
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.63 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="sosuneko/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
ymcki/gemma-2-2b-jpn-it-GGUF
ymcki
"2024-10-10T02:00:15Z"
78
1
transformers
[ "transformers", "gguf", "nlp", "code", "text-generation", "multilingual", "dataset:TFMC/imatrix-dataset-for-japanese-llm", "base_model:google/gemma-2-2b-jpn-it", "base_model:quantized:google/gemma-2-2b-jpn-it", "license:gemma", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
text-generation
"2024-10-03T11:20:04Z"
--- base_model: google/gemma-2-2b-jpn-it language: - multilingual datasets: - TFMC/imatrix-dataset-for-japanese-llm library_name: transformers license: gemma license_link: https://ai.google.dev/gemma/terms pipeline_tag: text-generation tags: - nlp - code quantized_by: ymcki widget: - messages: - role: user content: Can you provide ways to eat combinations of bananas and dragonfruits? --- Original model: https://huggingface.co/google/gemma-2-2b-jpn-it ## Prompt format ``` <start_of_turn>user {prompt}<end_of_turn> <start_of_turn>model <end_of_turn> <start_of_turn>model ``` Note that this model does not support a System prompt. ## Download a file (not the whole branch) from below: ELIZA-Tasks-100 is pretty standard benchmark for Japanese LLMs. The perfect score is 5.00. As a reference, bartowski's gemma-2-27b-it.Q6_K.gguf scores 4.04. | Filename | Quant type | File Size | ELIZA-Tasks-100 | Nvidia 3090 | Description | | -------- | ---------- | --------- | --------------- | ----------- | ----------- | | [gemma-2-2b-jpn-it.f16.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it.f16.gguf) | f16 | 5.24GB | 2.90 | 98t/s | Full F16 weights. | | [gemma-2-2b-jpn-it.Q8_0.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it.Q8_0.gguf) | Q8_0 | 2.78GB | 3.06 | 140t/s | Extremely high quality, *recommended*. | | [gemma-2-2b-jpn-it-imatrix.Q4_0.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it-imatrix.Q4_0.gguf) | Q4_0 | 1.63GB | 2.89 | 137t/s | Good quality, *recommended for edge devices <8GB RAM*. | | [gemma-2-2b-jpn-it-imatrix.Q4_0_8_8.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it-imatrix.Q4_0_8_8.gguf) | Q4_0_8_8 | 1.63GB | 2.78 | 2.79t/s | Good quality, *recommended for edge devices <8GB RAM*. | | [gemma-2-2b-jpn-it-imatrix.Q4_0_4_8.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it-imatrix.Q4_0_4_8.gguf) | Q4_0_4_8 | 1.63GB | 2.77 | 2.61t/s | Good quality, *recommended for edge devices <8GB RAM*. | | [gemma-2-2b-jpn-it-imatrix.Q4_0_4_4.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it-imatrix.Q4_0_4_4.gguf) | Q4_0_4_4 | 1.63GB | 2.65 | 3.09t/s | Good quality, *recommended for edge devices <8GB RAM*. | | [gemma-2-2b-jpn-it.Q4_0.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it.Q4_0.gguf) | Q4_0 | 1.63GB | 2.77 | 159t/s | Good quality, *recommended for edge devices <8GB RAM* | | [gemma-2-2b-jpn-it.Q4_0_8_8.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it.Q4_0_8_8.gguf) | Q4_0_8_8 | 1.63GB | 2.92 | 2.85t/s | Good quality, *recommended for edge devices <8GB RAM* | | [gemma-2-2b-jpn-it.Q4_0_4_8.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it.Q4_0_4_8.gguf) | Q4_0_4_8 | 1.63GB | 2.74 | 2.56t/s | Good quality, *recommended for edge devices <8GB RAM* | | [gemma-2-2b-jpn-it.Q4_0_4_4.gguf](https://huggingface.co/ymcki/gemma-2-2b-jpn-it-GGUF/blob/main/gemma-2-2b-jpn-it.Q4_0_4_4.gguf) | Q4_0_4_4 | 1.63GB | 2.70 | 3.10t/s | Good quality, *recommended for edge devices <8GB RAM*. | ## How to check i8mm and sve support for ARM devices ARM i8mm support is necessary to take advantage of Q4_0_4_8 gguf. All ARM architecture >= ARMv8.6-A supports i8mm. ARM sve support is necessary to take advantage of Q4_0_8_8 gguf. sve is an optional feature that starts from ARMv8.2-A but majority of ARM chips doesn't implement it. For ARM devices without both, it is recommended to use Q4_0_4_4. With these support, the inference speed should be faster in the order of Q4_0_8_8 > Q4_0_4_8 > Q4_0_4_4 > Q4_0 without much effect on the quality of response. This is a [list](https://gpages.juszkiewicz.com.pl/arm-socs-table/arm-socs.html) of ARM CPUs that support different ARM instructions. Another [list](https://raw.githubusercontent.com/ThomasKaiser/sbc-bench/refs/heads/master/sbc-bench.sh). Apparently, they only covers limited number of ARM CPUs. It is better you check for i8mm and sve support by yourself. For Apple devices, ``` sysctl hw ``` For other ARM devices (ie most Android devices), ``` cat /proc/cpuinfo ``` There are also android apps that can display /proc/cpuinfo. I was told that for Intel/AMD CPU inference, support for AVX2/AVX512 can also improve the performance of Q4_0_8_8. On the other hand, Nvidia 3090 inference speed is significantly faster for Q4_0 than the other ggufs. That means for GPU inference, you better off using Q4_0. ## Which Q4_0 model to use for ARM devices | Brand | Series | Model | i8mm | sve | Quant Type | | ----- | ------ | ----- | ---- | --- | -----------| | Apple | A | A4 to A14 | No | No | Q4_0_4_4 | | Apple | A | A15 to A18 | Yes | No | Q4_0_4_8 | | Apple | M | M1 | No | No | Q4_0_4_4 | | Apple | M | M2/M3/M4 | Yes | No | Q4_0_4_8 | | Google | Tensor | G1,G2 | No | No | Q4_0_4_4 | | Google | Tensor | G3,G4 | Yes | Yes | Q4_0_8_8 | | Samsung | Exynos | 2200,2400 | Yes | Yes | Q4_0_8_8 | | Mediatek | Dimensity | 9000,9000+ | Yes | Yes | Q4_0_8_8 | | Mediatek | Dimensity | 9300 | Yes | No | Q4_0_4_8 | | Qualcomm | Snapdragon | 7+ Gen 2,8/8+ Gen 1 | Yes | Yes | Q4_0_8_8 | | Qualcomm | Snapdragon | 8 Gen 2,8 Gen 3,X Elite | Yes | No | Q4_0_4_8 | ## imatrix quantization According to this [blog](https://sc-bakushu.hatenablog.com/entry/2024/04/20/050213), adding imatrix to low bit quant can significantly improve performance. The best dataset for Japanese is [MTFMC/imatrix-dataset-for-japanese-llm](https://huggingface.co/datasets/TFMC/imatrix-dataset-for-japanese-llm). Therefore, I also created the imatrix versions of different Q4_0 quants. However, based on my benchmarking results, the difference is not significant. ## Convert safetensors to f16 gguf Make sure you have llama.cpp git cloned: ``` python3 convert_hf_to_gguf.py gemma-2-2b-jpn-it/ --outfile gemma-2-2b-jpn-it.f16.gguf --outtype f16 ``` ## Convert f16 gguf to Q8_0 gguf without imatrix Make sure you have llama.cpp compiled: ``` ./llama-quantize gemma-2-2b-jpn-it.f16.gguf gemma-2-2b-jpn-it.Q8_0.gguf q8_0 ``` ## Convert f16 gguf to other ggufs with imatrix First, prepare imatrix from f16 gguf and c4_en_ja_imatrix.txt ``` ./llama-imatrix -m gemma-2-2b-jpn-it.f16.gguf -f c4_en_ja_imatrix.txt -o gemma-2-2b-jpn-it.imatrix --chunks 32 ``` Then, convert f16 gguf with imatrix to create imatrix gguf ``` ./llama-quantize --imatrix gemma-2-2b-jpn-it.imatrix gemma-2-2b-jpn-it.f16.gguf gemma-2-2b-jpn-it-imatrix.Q4_0_8_8.gguf q4_0_8_8 ``` ## Downloading using huggingface-cli First, make sure you have hugginface-cli installed: ``` pip install -U "huggingface_hub[cli]" ``` Then, you can target the specific file you want: ``` huggingface-cli download ymcki/gemma-2-2b-jpn-it-GGUF --include "gemma-2-2b-jpn-it-Q8_0.gguf" --local-dir ./ ``` ## Credits Thank you bartowski for providing a README.md to get me started. Thank you YoutechA320U for the ELYZA-tasks-100 auto evaluation tool.
mradermacher/canarim-7b-vestibulaide-GGUF
mradermacher
"2024-11-02T21:06:16Z"
41
0
transformers
[ "transformers", "gguf", "text-generation", "pytorch", "LLM", "Portuguese", "Llama 2", "pt", "base_model:dominguesm/canarim-7b-vestibulaide", "base_model:quantized:dominguesm/canarim-7b-vestibulaide", "license:llama2", "endpoints_compatible", "region:us", "conversational" ]
text-generation
"2024-11-01T08:44:37Z"
--- base_model: dominguesm/canarim-7b-vestibulaide language: - pt library_name: transformers license: llama2 quantized_by: mradermacher tags: - text-generation - pytorch - LLM - Portuguese - Llama 2 --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/dominguesm/canarim-7b-vestibulaide <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/canarim-7b-vestibulaide-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q2_K.gguf) | Q2_K | 2.6 | | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q3_K_S.gguf) | Q3_K_S | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q3_K_M.gguf) | Q3_K_M | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q3_K_L.gguf) | Q3_K_L | 3.7 | | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.IQ4_XS.gguf) | IQ4_XS | 3.7 | | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q4_K_S.gguf) | Q4_K_S | 4.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q4_K_M.gguf) | Q4_K_M | 4.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q5_K_S.gguf) | Q5_K_S | 4.8 | | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q5_K_M.gguf) | Q5_K_M | 4.9 | | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q6_K.gguf) | Q6_K | 5.6 | very good quality | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.Q8_0.gguf) | Q8_0 | 7.3 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/canarim-7b-vestibulaide-GGUF/resolve/main/canarim-7b-vestibulaide.f16.gguf) | f16 | 13.6 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF
mradermacher
"2024-11-27T11:07:12Z"
19
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "endpoints_compatible", "region:us", "imatrix" ]
null
"2024-11-27T10:27:47Z"
--- base_model: MrRobotoAI/Freyja-v4.95-Dark-Planet-7b-NON-FICTION language: - en library_name: transformers quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/MrRobotoAI/Freyja-v4.95-Dark-Planet-7b-NON-FICTION <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q4_0_4_4.gguf) | i1-Q4_0_4_4 | 4.8 | fast on arm, low quality | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q4_0_4_8.gguf) | i1-Q4_0_4_8 | 4.8 | fast on arm+i8mm, low quality | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q4_0_8_8.gguf) | i1-Q4_0_8_8 | 4.8 | fast on arm+sve, low quality | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-i1-GGUF/resolve/main/Freyja-v4.95-Dark-Planet-7b-NON-FICTION.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to. <!-- end -->
danielhanchen/merged_model4_slow
danielhanchen
"2024-01-18T06:56:42Z"
12
0
transformers
[ "transformers", "pytorch", "mistral", "text-generation", "unsloth", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
text-generation
"2024-01-18T02:45:03Z"
--- library_name: transformers tags: - unsloth --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
mradermacher/t0-s1-1.5B-GGUF
mradermacher
"2025-04-04T16:06:33Z"
0
0
transformers
[ "transformers", "gguf", "generated_from_trainer", "trl", "sft", "en", "base_model:TomasLaz/t0-s1-1.5B", "base_model:quantized:TomasLaz/t0-s1-1.5B", "endpoints_compatible", "region:us", "conversational" ]
null
"2025-04-04T15:57:41Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>
MayBashendy/ArabicNewSplits7_B_usingWellWrittenEssays_FineTuningAraBERT_run2_AugV5_k8_task7_organization
MayBashendy
"2025-01-22T16:53:53Z"
10
0
transformers
[ "transformers", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:aubmindlab/bert-base-arabertv02", "base_model:finetune:aubmindlab/bert-base-arabertv02", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2025-01-19T23:18:26Z"
--- library_name: transformers base_model: aubmindlab/bert-base-arabertv02 tags: - generated_from_trainer model-index: - name: ArabicNewSplits7_B_usingWellWrittenEssays_FineTuningAraBERT_run2_AugV5_k8_task7_organization results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ArabicNewSplits7_B_usingWellWrittenEssays_FineTuningAraBERT_run2_AugV5_k8_task7_organization This model is a fine-tuned version of [aubmindlab/bert-base-arabertv02](https://huggingface.co/aubmindlab/bert-base-arabertv02) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5840 - Qwk: 0.3524 - Mse: 0.5840 - Rmse: 0.7642 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Qwk | Mse | Rmse | |:-------------:|:-------:|:----:|:---------------:|:-------:|:------:|:------:| | No log | 0.0476 | 2 | 2.8360 | -0.0627 | 2.8360 | 1.6841 | | No log | 0.0952 | 4 | 1.6383 | 0.0261 | 1.6383 | 1.2800 | | No log | 0.1429 | 6 | 1.2038 | -0.1514 | 1.2038 | 1.0972 | | No log | 0.1905 | 8 | 0.9158 | 0.0968 | 0.9158 | 0.9570 | | No log | 0.2381 | 10 | 0.9623 | 0.1332 | 0.9623 | 0.9810 | | No log | 0.2857 | 12 | 0.8605 | 0.1007 | 0.8605 | 0.9276 | | No log | 0.3333 | 14 | 0.8787 | 0.0717 | 0.8787 | 0.9374 | | No log | 0.3810 | 16 | 0.8975 | -0.0079 | 0.8975 | 0.9473 | | No log | 0.4286 | 18 | 0.8247 | 0.0393 | 0.8247 | 0.9081 | | No log | 0.4762 | 20 | 0.7550 | 0.0027 | 0.7550 | 0.8689 | | No log | 0.5238 | 22 | 0.7564 | 0.0495 | 0.7564 | 0.8697 | | No log | 0.5714 | 24 | 0.7249 | 0.0 | 0.7249 | 0.8514 | | No log | 0.6190 | 26 | 0.8592 | 0.1766 | 0.8592 | 0.9270 | | No log | 0.6667 | 28 | 0.9644 | 0.2012 | 0.9644 | 0.9821 | | No log | 0.7143 | 30 | 1.1450 | 0.1501 | 1.1450 | 1.0700 | | No log | 0.7619 | 32 | 1.1038 | 0.1542 | 1.1038 | 1.0506 | | No log | 0.8095 | 34 | 0.9923 | 0.2012 | 0.9923 | 0.9961 | | No log | 0.8571 | 36 | 0.9557 | 0.0448 | 0.9557 | 0.9776 | | No log | 0.9048 | 38 | 0.9692 | 0.0851 | 0.9692 | 0.9845 | | No log | 0.9524 | 40 | 1.0380 | 0.0584 | 1.0380 | 1.0188 | | No log | 1.0 | 42 | 1.0891 | 0.1264 | 1.0891 | 1.0436 | | No log | 1.0476 | 44 | 1.0669 | 0.1362 | 1.0669 | 1.0329 | | No log | 1.0952 | 46 | 1.0229 | 0.1718 | 1.0229 | 1.0114 | | No log | 1.1429 | 48 | 0.9118 | 0.1504 | 0.9118 | 0.9549 | | No log | 1.1905 | 50 | 0.8214 | -0.0127 | 0.8214 | 0.9063 | | No log | 1.2381 | 52 | 0.8166 | -0.0483 | 0.8166 | 0.9037 | | No log | 1.2857 | 54 | 0.8501 | 0.0208 | 0.8501 | 0.9220 | | No log | 1.3333 | 56 | 0.8582 | 0.1050 | 0.8582 | 0.9264 | | No log | 1.3810 | 58 | 0.8269 | 0.1850 | 0.8269 | 0.9093 | | No log | 1.4286 | 60 | 0.8017 | 0.1633 | 0.8017 | 0.8954 | | No log | 1.4762 | 62 | 0.8080 | 0.1700 | 0.8080 | 0.8989 | | No log | 1.5238 | 64 | 0.7868 | 0.1310 | 0.7868 | 0.8870 | | No log | 1.5714 | 66 | 0.7587 | 0.1479 | 0.7587 | 0.8710 | | No log | 1.6190 | 68 | 0.7754 | 0.1353 | 0.7754 | 0.8805 | | No log | 1.6667 | 70 | 0.8520 | 0.1550 | 0.8520 | 0.9230 | | No log | 1.7143 | 72 | 0.9532 | 0.1293 | 0.9532 | 0.9763 | | No log | 1.7619 | 74 | 1.0015 | 0.2702 | 1.0015 | 1.0007 | | No log | 1.8095 | 76 | 1.1290 | 0.2443 | 1.1290 | 1.0625 | | No log | 1.8571 | 78 | 1.4203 | 0.2810 | 1.4203 | 1.1918 | | No log | 1.9048 | 80 | 1.3892 | 0.2601 | 1.3892 | 1.1786 | | No log | 1.9524 | 82 | 0.9387 | 0.3280 | 0.9387 | 0.9689 | | No log | 2.0 | 84 | 0.8713 | 0.2328 | 0.8713 | 0.9334 | | No log | 2.0476 | 86 | 0.8464 | 0.2045 | 0.8464 | 0.9200 | | No log | 2.0952 | 88 | 0.8686 | 0.1850 | 0.8686 | 0.9320 | | No log | 2.1429 | 90 | 0.8289 | 0.1347 | 0.8289 | 0.9105 | | No log | 2.1905 | 92 | 0.8693 | 0.2516 | 0.8693 | 0.9324 | | No log | 2.2381 | 94 | 0.9231 | 0.1846 | 0.9231 | 0.9608 | | No log | 2.2857 | 96 | 0.8946 | 0.2777 | 0.8946 | 0.9458 | | No log | 2.3333 | 98 | 0.9172 | 0.2291 | 0.9172 | 0.9577 | | No log | 2.3810 | 100 | 0.9208 | 0.2221 | 0.9208 | 0.9596 | | No log | 2.4286 | 102 | 0.9154 | 0.2888 | 0.9154 | 0.9568 | | No log | 2.4762 | 104 | 0.9202 | 0.2555 | 0.9202 | 0.9593 | | No log | 2.5238 | 106 | 0.9269 | 0.2201 | 0.9269 | 0.9627 | | No log | 2.5714 | 108 | 0.8702 | 0.2528 | 0.8702 | 0.9329 | | No log | 2.6190 | 110 | 0.8127 | 0.3161 | 0.8127 | 0.9015 | | No log | 2.6667 | 112 | 0.8233 | 0.2400 | 0.8233 | 0.9073 | | No log | 2.7143 | 114 | 0.8911 | 0.2605 | 0.8911 | 0.9440 | | No log | 2.7619 | 116 | 0.8922 | 0.2140 | 0.8922 | 0.9446 | | No log | 2.8095 | 118 | 0.8934 | 0.1773 | 0.8934 | 0.9452 | | No log | 2.8571 | 120 | 0.8713 | 0.0787 | 0.8713 | 0.9334 | | No log | 2.9048 | 122 | 0.8995 | 0.1103 | 0.8995 | 0.9484 | | No log | 2.9524 | 124 | 0.9379 | 0.1362 | 0.9379 | 0.9685 | | No log | 3.0 | 126 | 0.9176 | 0.1683 | 0.9176 | 0.9579 | | No log | 3.0476 | 128 | 0.8777 | 0.1260 | 0.8777 | 0.9369 | | No log | 3.0952 | 130 | 0.8467 | 0.1009 | 0.8467 | 0.9201 | | No log | 3.1429 | 132 | 0.8517 | 0.1303 | 0.8517 | 0.9229 | | No log | 3.1905 | 134 | 0.9440 | 0.1403 | 0.9440 | 0.9716 | | No log | 3.2381 | 136 | 1.0252 | 0.1623 | 1.0252 | 1.0125 | | No log | 3.2857 | 138 | 1.0165 | 0.2220 | 1.0165 | 1.0082 | | No log | 3.3333 | 140 | 0.9558 | 0.2328 | 0.9558 | 0.9777 | | No log | 3.3810 | 142 | 0.9678 | 0.2328 | 0.9678 | 0.9838 | | No log | 3.4286 | 144 | 0.9745 | 0.1534 | 0.9745 | 0.9872 | | No log | 3.4762 | 146 | 0.9714 | 0.2387 | 0.9714 | 0.9856 | | No log | 3.5238 | 148 | 0.9398 | 0.2754 | 0.9398 | 0.9694 | | No log | 3.5714 | 150 | 0.9437 | 0.2832 | 0.9437 | 0.9714 | | No log | 3.6190 | 152 | 0.9002 | 0.2547 | 0.9002 | 0.9488 | | No log | 3.6667 | 154 | 0.8907 | 0.2547 | 0.8907 | 0.9438 | | No log | 3.7143 | 156 | 0.9227 | 0.3417 | 0.9227 | 0.9606 | | No log | 3.7619 | 158 | 0.8940 | 0.1694 | 0.8940 | 0.9455 | | No log | 3.8095 | 160 | 0.9198 | 0.1403 | 0.9198 | 0.9591 | | No log | 3.8571 | 162 | 0.9420 | 0.1640 | 0.9420 | 0.9706 | | No log | 3.9048 | 164 | 0.9917 | 0.2912 | 0.9917 | 0.9958 | | No log | 3.9524 | 166 | 1.0126 | 0.2294 | 1.0126 | 1.0063 | | No log | 4.0 | 168 | 0.9348 | 0.2669 | 0.9348 | 0.9669 | | No log | 4.0476 | 170 | 0.8756 | 0.1213 | 0.8756 | 0.9357 | | No log | 4.0952 | 172 | 0.8633 | 0.1733 | 0.8633 | 0.9291 | | No log | 4.1429 | 174 | 0.8497 | 0.1331 | 0.8497 | 0.9218 | | No log | 4.1905 | 176 | 0.8471 | 0.1219 | 0.8471 | 0.9204 | | No log | 4.2381 | 178 | 0.8845 | 0.2183 | 0.8845 | 0.9405 | | No log | 4.2857 | 180 | 0.9553 | 0.3183 | 0.9553 | 0.9774 | | No log | 4.3333 | 182 | 0.9164 | 0.2751 | 0.9164 | 0.9573 | | No log | 4.3810 | 184 | 0.8172 | 0.2440 | 0.8172 | 0.9040 | | No log | 4.4286 | 186 | 0.7147 | 0.1918 | 0.7147 | 0.8454 | | No log | 4.4762 | 188 | 0.7236 | 0.2227 | 0.7236 | 0.8507 | | No log | 4.5238 | 190 | 0.7713 | 0.2383 | 0.7713 | 0.8782 | | No log | 4.5714 | 192 | 0.9549 | 0.3417 | 0.9549 | 0.9772 | | No log | 4.6190 | 194 | 1.0363 | 0.2578 | 1.0363 | 1.0180 | | No log | 4.6667 | 196 | 0.9384 | 0.3012 | 0.9384 | 0.9687 | | No log | 4.7143 | 198 | 0.7913 | 0.2691 | 0.7913 | 0.8895 | | No log | 4.7619 | 200 | 0.7156 | 0.2043 | 0.7156 | 0.8459 | | No log | 4.8095 | 202 | 0.7091 | 0.2043 | 0.7091 | 0.8421 | | No log | 4.8571 | 204 | 0.7469 | 0.2389 | 0.7469 | 0.8642 | | No log | 4.9048 | 206 | 0.8214 | 0.3544 | 0.8214 | 0.9063 | | No log | 4.9524 | 208 | 0.8407 | 0.3909 | 0.8407 | 0.9169 | | No log | 5.0 | 210 | 0.8227 | 0.3909 | 0.8227 | 0.9070 | | No log | 5.0476 | 212 | 0.7761 | 0.3525 | 0.7761 | 0.8810 | | No log | 5.0952 | 214 | 0.7804 | 0.1331 | 0.7804 | 0.8834 | | No log | 5.1429 | 216 | 0.7907 | 0.1289 | 0.7907 | 0.8892 | | No log | 5.1905 | 218 | 0.7938 | 0.2526 | 0.7938 | 0.8910 | | No log | 5.2381 | 220 | 0.7920 | 0.2577 | 0.7920 | 0.8899 | | No log | 5.2857 | 222 | 0.7873 | 0.2577 | 0.7873 | 0.8873 | | No log | 5.3333 | 224 | 0.8002 | 0.2577 | 0.8002 | 0.8945 | | No log | 5.3810 | 226 | 0.8206 | 0.3393 | 0.8206 | 0.9059 | | No log | 5.4286 | 228 | 0.8058 | 0.2835 | 0.8058 | 0.8976 | | No log | 5.4762 | 230 | 0.8114 | 0.2835 | 0.8114 | 0.9008 | | No log | 5.5238 | 232 | 0.7921 | 0.2109 | 0.7921 | 0.8900 | | No log | 5.5714 | 234 | 0.7738 | 0.1367 | 0.7738 | 0.8796 | | No log | 5.6190 | 236 | 0.7900 | 0.1741 | 0.7900 | 0.8888 | | No log | 5.6667 | 238 | 0.7683 | 0.1803 | 0.7683 | 0.8765 | | No log | 5.7143 | 240 | 0.7361 | 0.2746 | 0.7361 | 0.8580 | | No log | 5.7619 | 242 | 0.7626 | 0.3085 | 0.7626 | 0.8733 | | No log | 5.8095 | 244 | 0.8650 | 0.3889 | 0.8650 | 0.9301 | | No log | 5.8571 | 246 | 0.8537 | 0.3909 | 0.8537 | 0.9239 | | No log | 5.9048 | 248 | 0.7644 | 0.3798 | 0.7644 | 0.8743 | | No log | 5.9524 | 250 | 0.6858 | 0.3569 | 0.6858 | 0.8281 | | No log | 6.0 | 252 | 0.6763 | 0.3569 | 0.6763 | 0.8223 | | No log | 6.0476 | 254 | 0.6661 | 0.3355 | 0.6661 | 0.8161 | | No log | 6.0952 | 256 | 0.7016 | 0.3131 | 0.7016 | 0.8376 | | No log | 6.1429 | 258 | 0.7766 | 0.3562 | 0.7766 | 0.8813 | | No log | 6.1905 | 260 | 0.7546 | 0.3699 | 0.7546 | 0.8687 | | No log | 6.2381 | 262 | 0.7220 | 0.2440 | 0.7220 | 0.8497 | | No log | 6.2857 | 264 | 0.7747 | 0.3384 | 0.7747 | 0.8802 | | No log | 6.3333 | 266 | 0.7792 | 0.3450 | 0.7792 | 0.8827 | | No log | 6.3810 | 268 | 0.7122 | 0.2817 | 0.7122 | 0.8439 | | No log | 6.4286 | 270 | 0.7049 | 0.3060 | 0.7049 | 0.8396 | | No log | 6.4762 | 272 | 0.7197 | 0.3918 | 0.7197 | 0.8483 | | No log | 6.5238 | 274 | 0.7403 | 0.4072 | 0.7403 | 0.8604 | | No log | 6.5714 | 276 | 0.7387 | 0.4072 | 0.7387 | 0.8595 | | No log | 6.6190 | 278 | 0.6772 | 0.3985 | 0.6772 | 0.8229 | | No log | 6.6667 | 280 | 0.6093 | 0.4206 | 0.6093 | 0.7806 | | No log | 6.7143 | 282 | 0.6078 | 0.3837 | 0.6078 | 0.7796 | | No log | 6.7619 | 284 | 0.6442 | 0.3841 | 0.6442 | 0.8026 | | No log | 6.8095 | 286 | 0.7361 | 0.4023 | 0.7361 | 0.8579 | | No log | 6.8571 | 288 | 0.6914 | 0.3866 | 0.6914 | 0.8315 | | No log | 6.9048 | 290 | 0.6334 | 0.3788 | 0.6334 | 0.7959 | | No log | 6.9524 | 292 | 0.6535 | 0.3498 | 0.6535 | 0.8084 | | No log | 7.0 | 294 | 0.7721 | 0.4243 | 0.7721 | 0.8787 | | No log | 7.0476 | 296 | 0.8117 | 0.4243 | 0.8117 | 0.9009 | | No log | 7.0952 | 298 | 0.7343 | 0.4522 | 0.7343 | 0.8569 | | No log | 7.1429 | 300 | 0.6608 | 0.3224 | 0.6608 | 0.8129 | | No log | 7.1905 | 302 | 0.6725 | 0.3498 | 0.6725 | 0.8201 | | No log | 7.2381 | 304 | 0.7414 | 0.3746 | 0.7414 | 0.8610 | | No log | 7.2857 | 306 | 0.9149 | 0.3521 | 0.9149 | 0.9565 | | No log | 7.3333 | 308 | 0.9353 | 0.3012 | 0.9353 | 0.9671 | | No log | 7.3810 | 310 | 0.8170 | 0.3653 | 0.8170 | 0.9039 | | No log | 7.4286 | 312 | 0.7363 | 0.3088 | 0.7363 | 0.8581 | | No log | 7.4762 | 314 | 0.7426 | 0.3088 | 0.7426 | 0.8617 | | No log | 7.5238 | 316 | 0.7760 | 0.3368 | 0.7760 | 0.8809 | | No log | 7.5714 | 318 | 0.7436 | 0.3209 | 0.7436 | 0.8623 | | No log | 7.6190 | 320 | 0.7220 | 0.3352 | 0.7220 | 0.8497 | | No log | 7.6667 | 322 | 0.7385 | 0.2958 | 0.7385 | 0.8593 | | No log | 7.7143 | 324 | 0.7569 | 0.3225 | 0.7569 | 0.8700 | | No log | 7.7619 | 326 | 0.7665 | 0.2973 | 0.7665 | 0.8755 | | No log | 7.8095 | 328 | 0.7640 | 0.2973 | 0.7640 | 0.8741 | | No log | 7.8571 | 330 | 0.7742 | 0.1356 | 0.7742 | 0.8799 | | No log | 7.9048 | 332 | 0.7654 | 0.2519 | 0.7654 | 0.8748 | | No log | 7.9524 | 334 | 0.7392 | 0.2138 | 0.7392 | 0.8598 | | No log | 8.0 | 336 | 0.7196 | 0.2484 | 0.7196 | 0.8483 | | No log | 8.0476 | 338 | 0.7011 | 0.2715 | 0.7011 | 0.8373 | | No log | 8.0952 | 340 | 0.6938 | 0.3235 | 0.6938 | 0.8329 | | No log | 8.1429 | 342 | 0.7145 | 0.3183 | 0.7145 | 0.8453 | | No log | 8.1905 | 344 | 0.7003 | 0.3221 | 0.7003 | 0.8368 | | No log | 8.2381 | 346 | 0.6726 | 0.3498 | 0.6726 | 0.8201 | | No log | 8.2857 | 348 | 0.6946 | 0.3590 | 0.6946 | 0.8334 | | No log | 8.3333 | 350 | 0.6799 | 0.2943 | 0.6799 | 0.8246 | | No log | 8.3810 | 352 | 0.6958 | 0.3942 | 0.6958 | 0.8341 | | No log | 8.4286 | 354 | 0.7004 | 0.3662 | 0.7004 | 0.8369 | | No log | 8.4762 | 356 | 0.6541 | 0.2943 | 0.6541 | 0.8088 | | No log | 8.5238 | 358 | 0.6378 | 0.2936 | 0.6378 | 0.7986 | | No log | 8.5714 | 360 | 0.6206 | 0.3651 | 0.6206 | 0.7878 | | No log | 8.6190 | 362 | 0.6353 | 0.3399 | 0.6353 | 0.7970 | | No log | 8.6667 | 364 | 0.6698 | 0.4197 | 0.6698 | 0.8184 | | No log | 8.7143 | 366 | 0.6446 | 0.4089 | 0.6446 | 0.8028 | | No log | 8.7619 | 368 | 0.5811 | 0.4270 | 0.5811 | 0.7623 | | No log | 8.8095 | 370 | 0.5966 | 0.4270 | 0.5966 | 0.7724 | | No log | 8.8571 | 372 | 0.7146 | 0.4296 | 0.7146 | 0.8453 | | No log | 8.9048 | 374 | 0.8070 | 0.4142 | 0.8070 | 0.8984 | | No log | 8.9524 | 376 | 0.7164 | 0.4539 | 0.7164 | 0.8464 | | No log | 9.0 | 378 | 0.6707 | 0.4144 | 0.6707 | 0.8190 | | No log | 9.0476 | 380 | 0.6080 | 0.4464 | 0.6080 | 0.7798 | | No log | 9.0952 | 382 | 0.5842 | 0.4855 | 0.5842 | 0.7643 | | No log | 9.1429 | 384 | 0.6173 | 0.3843 | 0.6173 | 0.7857 | | No log | 9.1905 | 386 | 0.7038 | 0.4349 | 0.7038 | 0.8389 | | No log | 9.2381 | 388 | 0.8150 | 0.4494 | 0.8150 | 0.9028 | | No log | 9.2857 | 390 | 0.8992 | 0.4168 | 0.8992 | 0.9482 | | No log | 9.3333 | 392 | 0.7898 | 0.4161 | 0.7898 | 0.8887 | | No log | 9.3810 | 394 | 0.6617 | 0.4502 | 0.6617 | 0.8135 | | No log | 9.4286 | 396 | 0.6102 | 0.4020 | 0.6102 | 0.7812 | | No log | 9.4762 | 398 | 0.6221 | 0.4020 | 0.6221 | 0.7887 | | No log | 9.5238 | 400 | 0.6886 | 0.3822 | 0.6886 | 0.8298 | | No log | 9.5714 | 402 | 0.6776 | 0.4072 | 0.6776 | 0.8232 | | No log | 9.6190 | 404 | 0.6184 | 0.4597 | 0.6184 | 0.7864 | | No log | 9.6667 | 406 | 0.5666 | 0.4044 | 0.5666 | 0.7527 | | No log | 9.7143 | 408 | 0.5666 | 0.3599 | 0.5666 | 0.7527 | | No log | 9.7619 | 410 | 0.5568 | 0.3675 | 0.5568 | 0.7462 | | No log | 9.8095 | 412 | 0.5740 | 0.4044 | 0.5740 | 0.7577 | | No log | 9.8571 | 414 | 0.6143 | 0.3662 | 0.6143 | 0.7837 | | No log | 9.9048 | 416 | 0.6133 | 0.3866 | 0.6133 | 0.7831 | | No log | 9.9524 | 418 | 0.5848 | 0.3866 | 0.5848 | 0.7647 | | No log | 10.0 | 420 | 0.5921 | 0.3866 | 0.5921 | 0.7695 | | No log | 10.0476 | 422 | 0.5764 | 0.3817 | 0.5764 | 0.7592 | | No log | 10.0952 | 424 | 0.5739 | 0.4451 | 0.5739 | 0.7576 | | No log | 10.1429 | 426 | 0.5701 | 0.5123 | 0.5701 | 0.7550 | | No log | 10.1905 | 428 | 0.5535 | 0.4681 | 0.5535 | 0.7439 | | No log | 10.2381 | 430 | 0.5455 | 0.4820 | 0.5455 | 0.7386 | | No log | 10.2857 | 432 | 0.5444 | 0.4820 | 0.5444 | 0.7378 | | No log | 10.3333 | 434 | 0.5573 | 0.5141 | 0.5573 | 0.7465 | | No log | 10.3810 | 436 | 0.5754 | 0.4724 | 0.5754 | 0.7585 | | No log | 10.4286 | 438 | 0.5553 | 0.5195 | 0.5553 | 0.7452 | | No log | 10.4762 | 440 | 0.5802 | 0.4788 | 0.5802 | 0.7617 | | No log | 10.5238 | 442 | 0.6273 | 0.3699 | 0.6273 | 0.7920 | | No log | 10.5714 | 444 | 0.6996 | 0.3456 | 0.6996 | 0.8364 | | No log | 10.6190 | 446 | 0.6590 | 0.3699 | 0.6590 | 0.8118 | | No log | 10.6667 | 448 | 0.5819 | 0.4867 | 0.5819 | 0.7628 | | No log | 10.7143 | 450 | 0.5776 | 0.4867 | 0.5776 | 0.7600 | | No log | 10.7619 | 452 | 0.5702 | 0.4867 | 0.5702 | 0.7551 | | No log | 10.8095 | 454 | 0.5799 | 0.4330 | 0.5799 | 0.7615 | | No log | 10.8571 | 456 | 0.5881 | 0.4158 | 0.5881 | 0.7669 | | No log | 10.9048 | 458 | 0.6232 | 0.3302 | 0.6232 | 0.7894 | | No log | 10.9524 | 460 | 0.6780 | 0.3940 | 0.6780 | 0.8234 | | No log | 11.0 | 462 | 0.7066 | 0.3754 | 0.7066 | 0.8406 | | No log | 11.0476 | 464 | 0.6904 | 0.3754 | 0.6904 | 0.8309 | | No log | 11.0952 | 466 | 0.6271 | 0.3329 | 0.6271 | 0.7919 | | No log | 11.1429 | 468 | 0.5999 | 0.3572 | 0.5999 | 0.7746 | | No log | 11.1905 | 470 | 0.5981 | 0.3865 | 0.5981 | 0.7733 | | No log | 11.2381 | 472 | 0.6162 | 0.3688 | 0.6162 | 0.7850 | | No log | 11.2857 | 474 | 0.6162 | 0.3688 | 0.6162 | 0.7850 | | No log | 11.3333 | 476 | 0.6011 | 0.3788 | 0.6011 | 0.7753 | | No log | 11.3810 | 478 | 0.5981 | 0.3914 | 0.5981 | 0.7734 | | No log | 11.4286 | 480 | 0.5947 | 0.3914 | 0.5947 | 0.7712 | | No log | 11.4762 | 482 | 0.5706 | 0.4763 | 0.5706 | 0.7554 | | No log | 11.5238 | 484 | 0.5618 | 0.4441 | 0.5618 | 0.7495 | | No log | 11.5714 | 486 | 0.5527 | 0.4441 | 0.5527 | 0.7434 | | No log | 11.6190 | 488 | 0.5489 | 0.4966 | 0.5489 | 0.7409 | | No log | 11.6667 | 490 | 0.5444 | 0.4740 | 0.5444 | 0.7378 | | No log | 11.7143 | 492 | 0.5496 | 0.5373 | 0.5496 | 0.7414 | | No log | 11.7619 | 494 | 0.5758 | 0.4702 | 0.5758 | 0.7588 | | No log | 11.8095 | 496 | 0.6342 | 0.4556 | 0.6342 | 0.7963 | | No log | 11.8571 | 498 | 0.6732 | 0.4610 | 0.6732 | 0.8205 | | 0.3557 | 11.9048 | 500 | 0.6677 | 0.4614 | 0.6677 | 0.8171 | | 0.3557 | 11.9524 | 502 | 0.5782 | 0.4165 | 0.5782 | 0.7604 | | 0.3557 | 12.0 | 504 | 0.5256 | 0.5208 | 0.5256 | 0.7250 | | 0.3557 | 12.0476 | 506 | 0.5342 | 0.5457 | 0.5342 | 0.7309 | | 0.3557 | 12.0952 | 508 | 0.5514 | 0.5617 | 0.5514 | 0.7425 | | 0.3557 | 12.1429 | 510 | 0.5660 | 0.4955 | 0.5660 | 0.7523 | | 0.3557 | 12.1905 | 512 | 0.5919 | 0.3613 | 0.5919 | 0.7693 | | 0.3557 | 12.2381 | 514 | 0.6137 | 0.3353 | 0.6137 | 0.7834 | | 0.3557 | 12.2857 | 516 | 0.6553 | 0.4385 | 0.6553 | 0.8095 | | 0.3557 | 12.3333 | 518 | 0.6285 | 0.3955 | 0.6285 | 0.7928 | | 0.3557 | 12.3810 | 520 | 0.5840 | 0.3524 | 0.5840 | 0.7642 | ### Framework versions - Transformers 4.44.2 - Pytorch 2.4.0+cu118 - Datasets 2.21.0 - Tokenizers 0.19.1
Tina94/jirnuxa
Tina94
"2025-02-21T11:28:41Z"
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
"2025-02-21T11:13:52Z"
--- license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md language: - en tags: - flux - diffusers - lora - replicate base_model: "black-forest-labs/FLUX.1-dev" pipeline_tag: text-to-image # widget: # - text: >- # prompt # output: # url: https://... instance_prompt: jirnaya --- # Jirnuxa <Gallery /> Trained on Replicate using: https://replicate.com/ostris/flux-dev-lora-trainer/train ## Trigger words You should use `jirnaya` to trigger the image generation. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('Tina94/jirnuxa', weight_name='lora.safetensors') image = pipeline('your prompt').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
Zionamsalem/poca-SoccerTwos
Zionamsalem
"2025-02-21T09:21:51Z"
0
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "SoccerTwos", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-SoccerTwos", "region:us" ]
reinforcement-learning
"2025-02-21T09:18:52Z"
--- library_name: ml-agents tags: - SoccerTwos - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: Zionamsalem/poca-SoccerTwos 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
philip-hightech/d4859415-5ea8-4d04-80f6-2f2af9e73b01
philip-hightech
"2025-02-06T06:28:44Z"
9
0
peft
[ "peft", "safetensors", "qwen2", "axolotl", "generated_from_trainer", "base_model:Qwen/Qwen1.5-14B-Chat", "base_model:adapter:Qwen/Qwen1.5-14B-Chat", "license:other", "region:us" ]
null
"2025-02-06T05:09:56Z"
--- library_name: peft license: other base_model: Qwen/Qwen1.5-14B-Chat tags: - axolotl - generated_from_trainer model-index: - name: d4859415-5ea8-4d04-80f6-2f2af9e73b01 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) # d4859415-5ea8-4d04-80f6-2f2af9e73b01 This model is a fine-tuned version of [Qwen/Qwen1.5-14B-Chat](https://huggingface.co/Qwen/Qwen1.5-14B-Chat) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.7213 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ### Framework versions - PEFT 0.13.2 - Transformers 4.46.0 - Pytorch 2.5.0+cu124 - Datasets 3.0.1 - Tokenizers 0.20.1
lesso08/58a08634-3ef5-400c-8ec3-d67969d4173f
lesso08
"2025-01-14T11:24:59Z"
6
0
peft
[ "peft", "safetensors", "gemma", "axolotl", "generated_from_trainer", "base_model:unsloth/gemma-2b-it", "base_model:adapter:unsloth/gemma-2b-it", "license:apache-2.0", "8-bit", "bitsandbytes", "region:us" ]
null
"2025-01-14T11:20:48Z"
--- library_name: peft license: apache-2.0 base_model: unsloth/gemma-2b-it tags: - axolotl - generated_from_trainer model-index: - name: 58a08634-3ef5-400c-8ec3-d67969d4173f results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml adapter: lora base_model: unsloth/gemma-2b-it bf16: true chat_template: llama3 datasets: - data_files: - d71d21b0c1dc78d9_train_data.json ds_type: json format: custom path: /workspace/input_data/d71d21b0c1dc78d9_train_data.json type: field_instruction: title_main field_output: texte format: '{instruction}' no_input_format: '{instruction}' system_format: '{system}' system_prompt: '' debug: null deepspeed: null early_stopping_patience: 2 eval_max_new_tokens: 128 eval_steps: 5 eval_table_size: null flash_attention: false fp16: false fsdp: null fsdp_config: null gradient_accumulation_steps: 4 gradient_checkpointing: false group_by_length: false hub_model_id: lesso08/58a08634-3ef5-400c-8ec3-d67969d4173f hub_repo: null hub_strategy: checkpoint hub_token: null learning_rate: 0.0002 load_in_4bit: false load_in_8bit: true local_rank: null logging_steps: 1 lora_alpha: 16 lora_dropout: 0.05 lora_fan_in_fan_out: null lora_model_dir: null lora_r: 8 lora_target_linear: true lr_scheduler: cosine max_steps: 25 micro_batch_size: 2 mlflow_experiment_name: /tmp/d71d21b0c1dc78d9_train_data.json model_type: AutoModelForCausalLM num_epochs: 1 optimizer: adamw_bnb_8bit output_dir: miner_id_24 pad_to_sequence_len: true resume_from_checkpoint: null s2_attention: null sample_packing: false save_steps: 10 sequence_len: 512 strict: false tf32: false tokenizer_type: AutoTokenizer train_on_inputs: false trust_remote_code: true val_set_size: 0.05 wandb_entity: null wandb_mode: online wandb_name: 1a13c584-2c0f-4894-90e9-456573ef78f0 wandb_project: Gradients-On-Demand wandb_run: your_name wandb_runid: 1a13c584-2c0f-4894-90e9-456573ef78f0 warmup_steps: 10 weight_decay: 0.0 xformers_attention: null ``` </details><br> # 58a08634-3ef5-400c-8ec3-d67969d4173f This model is a fine-tuned version of [unsloth/gemma-2b-it](https://huggingface.co/unsloth/gemma-2b-it) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1738 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 10 - training_steps: 25 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 3.3213 | 0.0032 | 1 | 3.3454 | | 3.4797 | 0.0160 | 5 | 3.1154 | | 2.4829 | 0.0319 | 10 | 2.5214 | | 2.0018 | 0.0479 | 15 | 2.2897 | | 2.0587 | 0.0638 | 20 | 2.1934 | | 2.2271 | 0.0798 | 25 | 2.1738 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.0 - Pytorch 2.5.0+cu124 - Datasets 3.0.1 - Tokenizers 0.20.1
e-n-v-y/envy-oil-pastel-xl-01
e-n-v-y
"2023-11-14T22:52:45Z"
30
4
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "template:sd-lora", "pastel", "style", "media", "crayon", "oil pastel", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:other", "region:us" ]
text-to-image
"2023-11-14T22:52:44Z"
--- license: other tags: - text-to-image - stable-diffusion - lora - diffusers - template:sd-lora - pastel - style - media - crayon - oil pastel base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: oil pastel widget: - text: 'digital oil pastel on canvas, saturated professional pigments, ape' output: url: >- 3630371.jpeg - text: 'digital oil pastel on violet paper, vibrant professional pigments, Glowing Fungi Forest' output: url: >- 3630347.jpeg - text: 'digital oil pastel on canvas, toothless twinkfeeps, summer color scheme' output: url: >- 3630346.jpeg - text: 'digital oil pastel, pointillist abstract digital painting, shiny neon forest green, Modern Brazil style, masterpiece' output: url: >- 3630348.jpeg - text: 'digital oil pastel on canvas, vibrant professional pigments, amazing,mysterious fantasy city beyond the end of reality, autumn color scheme, masterpiece' output: url: >- 3630349.jpeg - text: 'oil pastel on peach paper, Sunlit Equatorial Zone, winter color scheme, masterpiece' output: url: >- 3630350.jpeg - text: 'oil pastel on canvas, garden variety twinklewobble' output: url: >- 3630356.jpeg - text: 'digital oil pastel, great fantasy city at the beginning of the universe, masterpiece' output: url: >- 3630359.jpeg - text: 'digital oil pastel, vivid professional pigments, gargantuan,mysterious scifi metropolis at the beginning of the multiverse' output: url: >- 3630363.jpeg - text: 'digital oil pastel, vivid professional pigments, cat' output: url: >- 3630366.jpeg --- # Envy Oil Pastel XL 01 <Gallery /> <p>Makes images in oil pastel style.</p> ## Image examples for the model: ![Image 1](3630347.jpeg) > digital oil pastel on violet paper, vibrant professional pigments, Glowing Fungi Forest ![Image 2](3630346.jpeg) > digital oil pastel on canvas, toothless twinkfeeps, summer color scheme ![Image 3](3630348.jpeg) > digital oil pastel, pointillist abstract digital painting, shiny neon forest green, Modern Brazil style, masterpiece ![Image 4](3630349.jpeg) > digital oil pastel on canvas, vibrant professional pigments, amazing,mysterious fantasy city beyond the end of reality, autumn color scheme, masterpiece ![Image 5](3630350.jpeg) > oil pastel on peach paper, Sunlit Equatorial Zone, winter color scheme, masterpiece ![Image 6](3630356.jpeg) > oil pastel on canvas, garden variety twinklewobble ![Image 7](3630359.jpeg) > digital oil pastel, great fantasy city at the beginning of the universe, masterpiece ![Image 8](3630363.jpeg) > digital oil pastel, vivid professional pigments, gargantuan,mysterious scifi metropolis at the beginning of the multiverse ![Image 9](3630366.jpeg) > digital oil pastel, vivid professional pigments, cat
Shinichie/Dev1Back_ref_k2
Shinichie
"2025-03-11T10:33:14Z"
0
0
null
[ "safetensors", "any-to-any", "omega", "omegalabs", "bittensor", "agi", "license:mit", "region:us" ]
any-to-any
"2025-03-11T10:31:04Z"
--- license: mit tags: - any-to-any - omega - omegalabs - bittensor - agi --- This is an Any-to-Any model checkpoint for the OMEGA Labs x Bittensor Any-to-Any subnet. Check out the [git repo](https://github.com/omegalabsinc/omegalabs-anytoany-bittensor) and find OMEGA on X: [@omegalabsai](https://x.com/omegalabsai).
anikur93/ppo-Huggy
anikur93
"2023-12-21T18:34:48Z"
0
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "Huggy", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
"2023-12-21T18:34:43Z"
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: - A *short tutorial* where you teach Huggy the Dog 🐶 to fetch the stick and then play with him directly in your browser: https://huggingface.co/learn/deep-rl-course/unitbonus1/introduction - A *longer tutorial* to understand how works ML-Agents: https://huggingface.co/learn/deep-rl-course/unit5/introduction ### Resume the training ```bash mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser** 1. If the environment is part of ML-Agents official environments, go to https://huggingface.co/unity 2. Step 1: Find your model_id: anikur93/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
openclimatefix/pvnet_uk_region
openclimatefix
"2025-04-09T16:10:04Z"
21,979
1
pytorch
[ "pytorch", "en", "license:mit", "region:us" ]
null
"2024-04-16T10:13:40Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>
ibrahimbukhariLingua/llama3.1-8b-Instruct-s2s-v1
ibrahimbukhariLingua
"2025-04-04T09:29:18Z"
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2025-04-04T09:25:51Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>
abrahimzaman360/qCivilama
abrahimzaman360
"2023-10-10T09:00:10Z"
0
0
peft
[ "peft", "region:us" ]
null
"2023-10-09T18:09:31Z"
--- library_name: peft --- ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: float16 The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.5.0 - PEFT 0.5.0
mradermacher/orca_mini_v5_8b-i1-GGUF
mradermacher
"2025-01-20T04:00:11Z"
592
0
transformers
[ "transformers", "gguf", "en", "base_model:pankajmathur/orca_mini_v5_8b", "base_model:quantized:pankajmathur/orca_mini_v5_8b", "license:llama3", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
"2025-01-20T03:01:52Z"
--- base_model: pankajmathur/orca_mini_v5_8b language: - en library_name: transformers license: llama3 quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/pankajmathur/orca_mini_v5_8b <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/orca_mini_v5_8b-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q2_K_S.gguf) | i1-Q2_K_S | 3.1 | very low quality | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-IQ4_NL.gguf) | i1-IQ4_NL | 4.8 | prefer IQ4_XS | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q4_1.gguf) | i1-Q4_1 | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/orca_mini_v5_8b-i1-GGUF/resolve/main/orca_mini_v5_8b.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to. <!-- end -->
ffffacsimile/coLoRA
ffffacsimile
"2025-03-03T01:16:06Z"
0
0
diffusers
[ "diffusers", "text-to-image", "lora", "template:diffusion-lora", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:unknown", "region:us" ]
text-to-image
"2025-03-03T01:16:06Z"
--- tags: - text-to-image - lora - diffusers - template:diffusion-lora widget: - text: '-' output: url: images/IMG_8895.png - text: '-' output: url: images/IMG_8896.png - text: '-' output: url: images/IMG_8569.png - text: '-' output: url: images/IMG_8551.png base_model: black-forest-labs/FLUX.1-dev instance_prompt: cL2oo2 license: unknown --- # coLoRA <Gallery /> ## Trigger words You should use `cL2oo2` to trigger the image generation. ## Download model Weights for this model are available in Safetensors format. [Download](/ffffacsimile/coLoRA/tree/main) them in the Files & versions tab.
rupeshs/SDXL-Lightning-2steps
rupeshs
"2024-03-02T14:14:46Z"
208
2
diffusers
[ "diffusers", "safetensors", "en", "license:openrail++", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-03-02T13:53:46Z"
--- license: openrail++ language: - en library_name: diffusers --- This is 2 steps SDXL Lightening model from the original repo https://huggingface.co/ByteDance/SDXL-Lightning
Anakin3001/Anisha
Anakin3001
"2025-01-29T19:31:24Z"
19
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
"2025-01-29T19:13:32Z"
--- license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md language: - en tags: - flux - diffusers - lora - replicate base_model: "black-forest-labs/FLUX.1-dev" pipeline_tag: text-to-image # widget: # - text: >- # prompt # output: # url: https://... instance_prompt: Anisha --- # Anisha <Gallery /> Trained on Replicate using: https://replicate.com/ostris/flux-dev-lora-trainer/train ## Trigger words You should use `Anisha` to trigger the image generation. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('Anakin3001/Anisha', weight_name='lora.safetensors') image = pipeline('your prompt').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
John6666/obsession-illustriousxl-v20-sdxl
John6666
"2024-12-23T06:40:02Z"
244
0
diffusers
[ "diffusers", "safetensors", "text-to-image", "stable-diffusion", "stable-diffusion-xl", "anime", "girls", "illustrious", "en", "base_model:Laxhar/noob_sdxl_v_pred_test", "base_model:finetune:Laxhar/noob_sdxl_v_pred_test", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-10-20T05:40:06Z"
--- license: other license_name: faipl-1.0-sd license_link: https://freedevproject.org/faipl-1.0-sd/ language: - en library_name: diffusers pipeline_tag: text-to-image tags: - text-to-image - stable-diffusion - stable-diffusion-xl - anime - girls - illustrious base_model: Laxhar/noob_sdxl_v_pred --- Original model is [here](https://civitai.com/models/820208/obsession-illustrious-xl?modelVersionId=974379). This model created by [rqdwdw](https://civitai.com/user/rqdwdw).
eugene-yang/colbertx-xlmr-large-tt-eng.fas
eugene-yang
"2023-07-10T03:47:02Z"
32
0
transformers
[ "transformers", "pytorch", "xlm-roberta", "en", "zh", "arxiv:2201.08471", "license:mit", "endpoints_compatible", "region:us" ]
null
"2023-07-10T03:41:30Z"
--- license: mit language: - en - zh task_categories: - text-retrieval task_ids: - passage-retrieval - cross-language-retrieval --- Model trained by [Suraj Nair](https://srnair.netlify.app/). If you use the model, please cite our paper. ```bibtex @inproceedings{colbert-x, author = {Suraj Nair and Eugene Yang and Dawn Lawrie and Kevin Duh and Paul McNamee and Kenton Murray and James Mayfield and Douglas W. Oard}, title = {Transfer Learning Approaches for Building Cross-Language Dense Retrieval Models}, booktitle = {Proceedings of the 44th European Conference on Information Retrieval (ECIR)}, year = {2022}, url = {https://arxiv.org/abs/2201.08471} } ```
PrunaAI/cognitivecomputations-dolphin-2.0-mistral-7b-bnb-8bit-smashed
PrunaAI
"2025-02-27T10:42:09Z"
0
0
null
[ "safetensors", "mistral", "pruna-ai", "8-bit", "bitsandbytes", "region:us" ]
null
"2025-02-27T10:34:30Z"
--- thumbnail: "https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg" base_model: ORIGINAL_REPO_NAME metrics: - memory_disk - memory_inference - inference_latency - inference_throughput - inference_CO2_emissions - inference_energy_consumption tags: - pruna-ai --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <a href="https://www.pruna.ai/" target="_blank" rel="noopener noreferrer"> <img src="https://i.imgur.com/eDAlcgk.png" alt="PrunaAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </a> </div> <!-- header end --> [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI) [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI) [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following) [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/rskEr4BZJx) # Simply make AI models cheaper, smaller, faster, and greener! - Give a thumbs up if you like this model! - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact). - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai). - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/) - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help. ## Results ![image info](./plots.png) **Frequently Asked Questions** - ***How does the compression work?*** The model is compressed with llm-int8. - ***How does the model quality change?*** The quality of the model output might vary compared to the base model. - ***How is the model efficiency evaluated?*** These results were obtained with configuration described in `model/smash_config.json` and are obtained after a hardware warmup. The smashed model is directly compared to the original base model. Efficiency results may vary in other settings (e.g. other hardware, image size, batch size, ...). We recommend to directly run them in the use-case conditions to know if the smashed model can benefit you. - ***What is the model format?*** We use safetensors. - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data. - ***What is the naming convention for Pruna Huggingface models?*** We take the original model name and append "turbo", "tiny", or "green" if the smashed model has a measured inference speed, inference memory, or inference energy consumption which is less than 90% of the original base model. - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai). - ***What are "first" metrics?*** Results mentioning "first" are obtained after the first run of the model. The first run might take more memory or be slower than the subsequent runs due cuda overheads. - ***What are "Sync" and "Async" metrics?*** "Sync" metrics are obtained by syncing all GPU processes and stop measurement when all of them are executed. "Async" metrics are obtained without syncing all GPU processes and stop when the model output can be used by the CPU. We provide both metrics since both could be relevant depending on the use-case. We recommend to test the efficiency gains directly in your use-cases. ## Setup You can run the smashed model with these steps: 0. Check requirements from the original repo ORIGINAL_REPO_NAME installed. In particular, check python, cuda, and transformers versions. 1. Make sure that you have installed quantization related packages. ```bash pip install transformers accelerate bitsandbytes>0.37.0 ``` 2. Load & run the model. ```python from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("PrunaAI/cognitivecomputations-dolphin-2.0-mistral-7b-bnb-8bit-smashed", trust_remote_code=True, device_map='auto') tokenizer = AutoTokenizer.from_pretrained("ORIGINAL_REPO_NAME") input_ids = tokenizer("What is the color of prunes?,", return_tensors='pt').to(model.device)["input_ids"] outputs = model.generate(input_ids, max_new_tokens=216) tokenizer.decode(outputs[0]) ``` ## Configurations The configuration info are in `smash_config.json`. ## Credits & License The license of the smashed model follows the license of the original model. Please check the license of the original model ORIGINAL_REPO_NAME before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi. ## Want to compress other models? - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact). - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
philip-hightech/0cc82ad4-0d33-4e96-b081-4e710b1c3c95
philip-hightech
"2025-01-27T04:32:44Z"
6
0
peft
[ "peft", "safetensors", "gpt_neox", "axolotl", "generated_from_trainer", "base_model:EleutherAI/pythia-1b", "base_model:adapter:EleutherAI/pythia-1b", "license:apache-2.0", "region:us" ]
null
"2025-01-27T04:31:18Z"
--- library_name: peft license: apache-2.0 base_model: EleutherAI/pythia-1b tags: - axolotl - generated_from_trainer model-index: - name: 0cc82ad4-0d33-4e96-b081-4e710b1c3c95 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml adapter: lora base_model: EleutherAI/pythia-1b bf16: auto chat_template: llama3 dataset_prepared_path: null datasets: - data_files: - 19e2f175965ffcc2_train_data.json ds_type: json format: custom path: /workspace/input_data/19e2f175965ffcc2_train_data.json type: field_input: keywords field_instruction: text field_output: paragraph format: '{instruction} {input}' no_input_format: '{instruction}' system_format: '{system}' system_prompt: '' debug: null deepspeed: null early_stopping_patience: null eval_max_new_tokens: 128 eval_table_size: null evals_per_epoch: 4 flash_attention: false fp16: null fsdp: null fsdp_config: null gradient_accumulation_steps: 4 gradient_checkpointing: false group_by_length: false hub_model_id: philip-hightech/0cc82ad4-0d33-4e96-b081-4e710b1c3c95 hub_repo: null hub_strategy: checkpoint hub_token: null learning_rate: 5.0e-05 load_in_4bit: false load_in_8bit: false local_rank: null logging_steps: 1 lora_alpha: 16 lora_dropout: 0.05 lora_fan_in_fan_out: null lora_model_dir: null lora_r: 8 lora_target_linear: true lr_scheduler: cosine max_steps: 50 micro_batch_size: 2 mlflow_experiment_name: /tmp/19e2f175965ffcc2_train_data.json model_type: AutoModelForCausalLM num_epochs: 1 optimizer: adamw_bnb_8bit output_dir: miner_id_24 pad_to_sequence_len: true resume_from_checkpoint: null s2_attention: null sample_packing: false saves_per_epoch: 4 sequence_len: 512 special_tokens: pad_token: <|endoftext|> strict: false tf32: false tokenizer_type: AutoTokenizer train_on_inputs: false trust_remote_code: true val_set_size: 0.05 wandb_entity: null wandb_mode: online wandb_name: 5a328770-6f5a-4f36-9f0d-32b745803dfd wandb_project: Gradients-On-Demand wandb_run: your_name wandb_runid: 5a328770-6f5a-4f36-9f0d-32b745803dfd warmup_steps: 10 weight_decay: 0.0 xformers_attention: null ``` </details><br> # 0cc82ad4-0d33-4e96-b081-4e710b1c3c95 This model is a fine-tuned version of [EleutherAI/pythia-1b](https://huggingface.co/EleutherAI/pythia-1b) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0154 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 10 - training_steps: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 1.3607 | 0.0002 | 1 | 0.3078 | | 0.9286 | 0.0027 | 13 | 0.2108 | | 0.3795 | 0.0053 | 26 | 0.0559 | | 0.0307 | 0.0080 | 39 | 0.0154 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.0 - Pytorch 2.5.0+cu124 - Datasets 3.0.1 - Tokenizers 0.20.1
huggingtweets/whart31
huggingtweets
"2023-04-03T18:51:22Z"
135
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2023-04-03T18:42:19Z"
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1600568966660952065/5Pi8CRjx_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Willow Hart</div> <div style="text-align: center; font-size: 14px;">@whart31</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Willow Hart. | Data | Willow Hart | | --- | --- | | Tweets downloaded | 3207 | | Retweets | 63 | | Short tweets | 290 | | Tweets kept | 2854 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/kfvmn3e7/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @whart31's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/99156pb3) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/99156pb3/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/whart31') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
jeongmi/solar_insta_upnormal_200
jeongmi
"2024-04-20T14:33:23Z"
0
0
transformers
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
"2024-04-20T14:33:07Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
google/mobilenet_v2_1.0_224
google
"2023-10-31T13:40:16Z"
90,331
23
transformers
[ "transformers", "pytorch", "safetensors", "mobilenet_v2", "image-classification", "vision", "dataset:imagenet-1k", "arxiv:1801.04381", "license:other", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2022-11-10T16:04:32Z"
--- license: other tags: - vision - image-classification datasets: - imagenet-1k widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace --- # MobileNet V2 MobileNet V2 model pre-trained on ImageNet-1k at resolution 224x224. It was introduced in [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen. It was first released in [this repository](https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet). Disclaimer: The team releasing MobileNet V2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description From the [original README](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md): > MobileNets are small, low-latency, low-power models parameterized to meet the resource constraints of a variety of use cases. They can be built upon for classification, detection, embeddings and segmentation similar to how other popular large scale models, such as Inception, are used. MobileNets can be run efficiently on mobile devices [...] MobileNets trade off between latency, size and accuracy while comparing favorably with popular models from the literature. The checkpoints are named **mobilenet\_v2\_*depth*\_*size***, for example **mobilenet\_v2\_1.0\_224**, where **1.0** is the depth multiplier and **224** is the resolution of the input images the model was trained on. ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=mobilenet_v2) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model to classify an image of the COCO 2017 dataset into one of the 1,000 ImageNet classes: ```python from transformers import AutoImageProcessor, AutoModelForImageClassification from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) preprocessor = AutoImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") model = AutoModelForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224") inputs = preprocessor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` Note: This model actually predicts 1001 classes, the 1000 classes from ImageNet plus an extra “background” class (index 0). Currently, both the feature extractor and model support PyTorch. ### BibTeX entry and citation info ```bibtex @inproceedings{mobilenetv22018, title={MobileNetV2: Inverted Residuals and Linear Bottlenecks}, author={Mark Sandler and Andrew Howard and Menglong Zhu and Andrey Zhmoginov and Liang-Chieh Chen}, booktitle={CVPR}, year={2018} } ```
tceron/sentence-transformers-party-similarity-by-party
tceron
"2022-10-17T10:51:08Z"
2
0
transformers
[ "transformers", "pytorch", "license:cc-by-4.0", "endpoints_compatible", "region:us" ]
null
"2022-10-17T10:46:52Z"
--- license: cc-by-4.0 --- More information about the model [in this git repo](https://github.com/tceron/capture_similarity_between_political_parties)
ReadyArt/The-Omega-Directive-L-70B-v1.0_EXL2_4.0bpw_H8
ReadyArt
"2025-04-14T20:50:33Z"
4
0
null
[ "safetensors", "llama", "nsfw", "explicit", "roleplay", "unaligned", "dangerous", "ERP", "text-generation", "conversational", "en", "base_model:ReadyArt/The-Omega-Directive-L-70B-v1.0", "base_model:quantized:ReadyArt/The-Omega-Directive-L-70B-v1.0", "license:llama3.3", "4-bit", "exl2", "region:us" ]
text-generation
"2025-04-11T13:45:33Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>
atharvpatwardhan/salary-prediction-based-on-skills
atharvpatwardhan
"2025-03-08T00:07:34Z"
0
0
null
[ "region:us" ]
null
"2025-03-08T00:04:28Z"
This is a salary prediction model using RandomForestRegression. It predicts salary based on the following skills vector: ['Education','Adaptability','Computers and information technology','Creativity','Critical and Analytical Thinking','Customer Service','Detail Oriented','Fine Motor Skills','Interpersonal Relations','Leadership','Mathematics','Mechanical','Physical Strength and Stamina','Problem Solving and Decision Making','Project Management','Scientific Skills','Speaking and Listening','Writing and Reading'] The dataset used to train the model was from the US Department Of Labor Statistics
ZeyadGamal/ppo-LunarLander-v2
ZeyadGamal
"2024-06-19T13:03:31Z"
1
0
stable-baselines3
[ "stable-baselines3", "LunarLander-v2", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
"2024-06-19T13:01:52Z"
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 269.92 +/- 24.31 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Skylaude/WizardLM-2-4x7B-MoE-exl2-3_5bpw
Skylaude
"2024-04-18T02:28:29Z"
3
0
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "MoE", "merge", "mergekit", "Mistral", "Microsoft/WizardLM-2-7B", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "exl2", "region:us" ]
text-generation
"2024-04-17T20:49:22Z"
--- license: apache-2.0 tags: - MoE - merge - mergekit - Mistral - Microsoft/WizardLM-2-7B --- # WizardLM-2-4x7B-MoE-exl2-3_5bpw This is a quantized version of [WizardLM-2-4x7B-MoE](https://huggingface.co/Skylaude/WizardLM-2-4x7B-MoE) an experimental MoE model made with [Mergekit](https://github.com/arcee-ai/mergekit). Quantization was done using version 0.0.18 of [ExLlamaV2](https://github.com/turboderp/exllamav2). Please be sure to set experts per token to 4 for the best results! Context length should be the same as Mistral-7B-Instruct-v0.1 (8k tokens). For instruction templates, Vicuna-v1.1 is recommended. For more information see the [original repository](https://huggingface.co/Skylaude/WizardLM-2-4x7B-MoE).
hatim00101/DeepseekB-LoRA
hatim00101
"2025-02-14T19:36:46Z"
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "qwen2", "trl", "en", "base_model:unsloth/DeepSeek-R1-Distill-Qwen-7B", "base_model:finetune:unsloth/DeepSeek-R1-Distill-Qwen-7B", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2025-02-14T19:36:33Z"
--- base_model: unsloth/DeepSeek-R1-Distill-Qwen-7B tags: - text-generation-inference - transformers - unsloth - qwen2 - trl license: apache-2.0 language: - en --- # Uploaded model - **Developed by:** hatim00101 - **License:** apache-2.0 - **Finetuned from model :** unsloth/DeepSeek-R1-Distill-Qwen-7B This qwen2 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
ArchiveAI/Thespis-CurtainCall-7b-v0.2
ArchiveAI
"2024-02-28T19:17:36Z"
1
0
transformers
[ "transformers", "pytorch", "mistral", "text-generation", "license:cc-by-nc-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2024-02-28T19:17:36Z"
--- license: cc-by-nc-4.0 --- This model is the first in a series of experiments to make my models a bit smarter. Its nowhere near done, but my initial testing was good so I'm uploading so people can check it out. Datasets Used: * Dolphin * Ultrachat * Capybara * Augmental * ToxicQA * Magiccoder-Evol-Instruct-110k ## Prompt Format: Chat ( The default Ooba template and Silly Tavern Template ) ``` {System Prompt} Username: {Input} BotName: {Response} Username: {Input} BotName: {Response} ``` ## Recommended Silly Tavern Preset -> (Temp: 1.25, MinP: 0.1, RepPen: 1.03) ## Recommended Kobold Horde Preset -> MinP
solidrust/AlphaMonarch-7B-AWQ
solidrust
"2024-09-03T08:42:21Z"
79
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "lazymergekit", "dpo", "rlhf", "quantized", "4-bit", "AWQ", "autotrain_compatible", "endpoints_compatible", "chatml", "conversational", "en", "base_model:mlabonne/AlphaMonarch-7B", "base_model:quantized:mlabonne/AlphaMonarch-7B", "license:cc-by-nc-4.0", "model-index", "text-generation-inference", "awq", "region:us" ]
text-generation
"2024-03-07T10:05:54Z"
--- base_model: mlabonne/AlphaMonarch-7B dataset: - mlabonne/truthy-dpo-v0.1 - mlabonne/distilabel-intel-orca-dpo-pairs - mlabonne/chatml-OpenHermes2.5-dpo-binarized-alpha inference: false language: - en library_name: transformers license: cc-by-nc-4.0 merged_models: - mlabonne/NeuralMonarch-7B model-index: - name: AlphaMonarch-7B results: - dataset: args: num_few_shot: 25 config: ARC-Challenge name: AI2 Reasoning Challenge (25-Shot) split: test type: ai2_arc metrics: - name: normalized accuracy type: acc_norm value: 73.04 source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/AlphaMonarch-7B task: name: Text Generation type: text-generation - dataset: args: num_few_shot: 10 name: HellaSwag (10-Shot) split: validation type: hellaswag metrics: - name: normalized accuracy type: acc_norm value: 89.18 source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/AlphaMonarch-7B task: name: Text Generation type: text-generation - dataset: args: num_few_shot: 5 config: all name: MMLU (5-Shot) split: test type: cais/mmlu metrics: - name: accuracy type: acc value: 64.4 source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/AlphaMonarch-7B task: name: Text Generation type: text-generation - dataset: args: num_few_shot: 0 config: multiple_choice name: TruthfulQA (0-shot) split: validation type: truthful_qa metrics: - type: mc2 value: 77.91 source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/AlphaMonarch-7B task: name: Text Generation type: text-generation - dataset: args: num_few_shot: 5 config: winogrande_xl name: Winogrande (5-shot) split: validation type: winogrande metrics: - name: accuracy type: acc value: 84.69 source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/AlphaMonarch-7B task: name: Text Generation type: text-generation - dataset: args: num_few_shot: 5 config: main name: GSM8k (5-shot) split: test type: gsm8k metrics: - name: accuracy type: acc value: 66.72 source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/AlphaMonarch-7B task: name: Text Generation type: text-generation model_creator: mlabonne model_name: AlphaMonarch-7B model_type: mistral pipeline_tag: text-generation prompt_template: '<|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ' quantized_by: Suparious tags: - merge - lazymergekit - dpo - rlhf - quantized - 4-bit - AWQ - text-generation - autotrain_compatible - endpoints_compatible - chatml --- # mlabonne/AlphaMonarch-7B AWQ - Model creator: [mlabonne](https://huggingface.co/mlabonne) - Original model: [AlphaMonarch-7B](https://huggingface.co/mlabonne/AlphaMonarch-7B) ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/TI7C8F2gk43gmI9U2L0uk.jpeg) ## Model Summary **tl;dr: AlphaMonarch-7B is a new DPO merge that retains all the reasoning abilities of the very best merges and significantly improves its conversational abilities. Kind of the best of both worlds in a 7B model. 🎉** AlphaMonarch-7B is a DPO fine-tuned of [mlabonne/NeuralMonarch-7B](https://huggingface.co/mlabonne/NeuralMonarch-7B/) using the [argilla/OpenHermes2.5-dpo-binarized-alpha](https://huggingface.co/datasets/argilla/OpenHermes2.5-dpo-binarized-alpha) preference dataset. It is based on a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [mlabonne/OmniTruthyBeagle-7B-v0](https://huggingface.co/mlabonne/OmniTruthyBeagle-7B-v0) * [mlabonne/NeuBeagle-7B](https://huggingface.co/mlabonne/NeuBeagle-7B) * [mlabonne/NeuralOmniBeagle-7B](https://huggingface.co/mlabonne/NeuralOmniBeagle-7B) Special thanks to [Jon Durbin](https://huggingface.co/jondurbin), [Intel](https://huggingface.co/Intel), [Argilla](https://huggingface.co/argilla), and [Teknium](https://huggingface.co/teknium) for the preference datasets. **Try the demo**: https://huggingface.co/spaces/mlabonne/AlphaMonarch-7B-GGUF-Chat ## How to use ### Install the necessary packages ```bash pip install --upgrade autoawq autoawq-kernels ``` ### Example Python code ```python from awq import AutoAWQForCausalLM from transformers import AutoTokenizer, TextStreamer model_path = "solidrust/AlphaMonarch-7B-AWQ" system_message = "You are Alpha, incarnated as a powerful AI." # Load model model = AutoAWQForCausalLM.from_quantized(model_path, fuse_layers=True) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) # Convert prompt to tokens prompt_template = """\ <|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant""" prompt = "You're standing on the surface of the Earth. "\ "You walk one mile south, one mile west and one mile north. "\ "You end up exactly where you started. Where are you?" tokens = tokenizer(prompt_template.format(system_message=system_message,prompt=prompt), return_tensors='pt').input_ids.cuda() # Generate output generation_output = model.generate(tokens, streamer=streamer, max_new_tokens=512) ``` ### About AWQ AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization. Compared to GPTQ, it offers faster Transformers-based inference with equivalent or better quality compared to the most commonly used GPTQ settings. AWQ models are currently supported on Linux and Windows, with NVidia GPUs only. macOS users: please use GGUF models instead. It is supported by: - [Text Generation Webui](https://github.com/oobabooga/text-generation-webui) - using Loader: AutoAWQ - [vLLM](https://github.com/vllm-project/vllm) - version 0.2.2 or later for support for all model types. - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) - [Transformers](https://huggingface.co/docs/transformers) version 4.35.0 and later, from any code or client that supports Transformers - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) - for use from Python code ## Prompt template: ChatML ```plaintext <|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ```
CSY1109/drone_sy_tiny_t3
CSY1109
"2025-03-12T06:50:45Z"
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "en", "base_model:openai/whisper-tiny", "base_model:finetune:openai/whisper-tiny", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2025-03-12T06:07:02Z"
--- library_name: transformers language: - en license: apache-2.0 base_model: openai/whisper-tiny tags: - generated_from_trainer metrics: - wer model-index: - name: Drone test En - Siang Yi results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Drone test En - Siang Yi This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the drone command tsv3 dataset. It achieves the following results on the evaluation set: - Loss: 0.3643 - Wer: 4.5455 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 8 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - training_steps: 500 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.0002 | 33.4 | 100 | 0.2810 | 4.5455 | | 0.0 | 66.8 | 200 | 0.3568 | 4.5455 | | 0.0 | 100.0 | 300 | 0.3603 | 4.5455 | | 0.0 | 133.4 | 400 | 0.3630 | 4.5455 | | 0.0 | 166.8 | 500 | 0.3643 | 4.5455 | ### Framework versions - Transformers 4.48.3 - Pytorch 2.6.0+cu124 - Datasets 3.2.0 - Tokenizers 0.21.0
bayartsogt/wav2vec2-base-mn-scratch
bayartsogt
"2023-09-02T19:18:22Z"
171
0
transformers
[ "transformers", "pytorch", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "dataset:common_voice", "license:apache-2.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2023-08-03T22:13:00Z"
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-base-mn-scratch results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base-mn-scratch This model is a fine-tuned version of [bayartsogt/wav2vec2-base-mn-pretrain-42h](https://huggingface.co/bayartsogt/wav2vec2-base-mn-pretrain-42h) on the common_voice dataset. It achieves the following results on the evaluation set: - eval_loss: 2.8294 - eval_wer: 1.0026 - eval_cer: 0.7258 - eval_runtime: 173.2914 - eval_samples_per_second: 10.883 - eval_steps_per_second: 1.362 - epoch: 28.57 - step: 3600 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
RichardErkhov/Qwen_-_Qwen1.5-4B-4bits
RichardErkhov
"2024-04-30T19:08:40Z"
4
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "arxiv:2309.16609", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
text-generation
"2024-04-30T18:58:45Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-4B - bnb 4bits - Model creator: https://huggingface.co/Qwen/ - Original model: https://huggingface.co/Qwen/Qwen1.5-4B/ Original model description: --- license: other license_name: tongyi-qianwen-research license_link: >- https://huggingface.co/Qwen/Qwen1.5-4B/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - pretrained --- # Qwen1.5-4B ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in Chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2'. ``` ## Usage We do not advise you to use base language models for text generation. Instead, you can apply post-training, e.g., SFT, RLHF, continued pretraining, etc., on this model. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
MultiBertGunjanPatrick/multiberts-seed-4-20k
MultiBertGunjanPatrick
"2021-10-04T05:09:37Z"
6
0
transformers
[ "transformers", "pytorch", "bert", "pretraining", "exbert", "multiberts", "multiberts-seed-4", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:2106.16163", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2022-03-02T23:29:04Z"
--- language: en tags: - exbert - multiberts - multiberts-seed-4 license: apache-2.0 datasets: - bookcorpus - wikipedia --- # MultiBERTs Seed 4 Checkpoint 20k (uncased) Seed 4 intermediate checkpoint 20k MultiBERTs (pretrained BERT) model on English language using a masked language modeling (MLM) objective. It was introduced in [this paper](https://arxiv.org/pdf/2106.16163.pdf) and first released in [this repository](https://github.com/google-research/language/tree/master/language/multiberts). This is an intermediate checkpoint. The final checkpoint can be found at [multiberts-seed-4](https://hf.co/multberts-seed-4). This model is uncased: it does not make a difference between english and English. Disclaimer: The team releasing MultiBERTs did not write a model card for this model so this model card has been written by [gchhablani](https://hf.co/gchhablani). ## Model description MultiBERTs models are transformers model pretrained on a large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Next sentence prediction (NSP): the models concatenates two masked sentences as inputs during pretraining. Sometimes they correspond to sentences that were next to each other in the original text, sometimes not. The model then has to predict if the two sentences were following each other or not. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled sentences for instance, you can train a standard classifier using the features produced by the MultiBERTs model as inputs. ## Intended uses & limitations You can use the raw model for either masked language modeling or next sentence prediction, but it's mostly intended to be fine-tuned on a downstream task. See the [model hub](https://huggingface.co/models?filter=multiberts) to look for fine-tuned versions on a task that interests you. Note that this model is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification or question answering. For tasks such as text generation you should look at model like GPT2. ### How to use Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained('multiberts-seed-4-20k') model = BertModel.from_pretrained("multiberts-seed-4-20k") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` ### Limitations and bias Even if the training data used for this model could be characterized as fairly neutral, this model can have biased predictions. This bias will also affect all fine-tuned versions of this model. For an understanding of bias of this particular checkpoint, please try out this checkpoint with the snippet present in the [Limitation and bias section](https://huggingface.co/bert-base-uncased#limitations-and-bias) of the [bert-base-uncased](https://huggingface.co/bert-base-uncased) checkpoint. ## Training data The MultiBERTs models were pretrained on [BookCorpus](https://yknzhu.wixsite.com/mbweb), a dataset consisting of 11,038 unpublished books and [English Wikipedia](https://en.wikipedia.org/wiki/English_Wikipedia) (excluding lists, tables and headers). ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence A [SEP] Sentence B [SEP] ``` With probability 0.5, sentence A and sentence B correspond to two consecutive sentences in the original corpus and in the other cases, it's another random sentence in the corpus. Note that what is considered a sentence here is a consecutive span of text usually longer than a single sentence. The only constrain is that the result with the two "sentences" has a combined length of less than 512 tokens. The details of the masking procedure for each sentence are the following: - 15% of the tokens are masked. - In 80% of the cases, the masked tokens are replaced by `[MASK]`. - In 10% of the cases, the masked tokens are replaced by a random token (different) from the one they replace. - In the 10% remaining cases, the masked tokens are left as is. ### Pretraining The full model was trained on 16 Cloud TPU v2 chips for two million steps with a batch size of 256. The sequence length was set to 512 throughout. The optimizer used is Adam with a learning rate of 1e-4, \\(\beta_{1} = 0.9\\) and \\(\beta_{2} = 0.999\\), a weight decay of 0.01, learning rate warmup for 10,000 steps and linear decay of the learning rate after. ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2106-16163, author = {Thibault Sellam and Steve Yadlowsky and Jason Wei and Naomi Saphra and Alexander D'Amour and Tal Linzen and Jasmijn Bastings and Iulia Turc and Jacob Eisenstein and Dipanjan Das and Ian Tenney and Ellie Pavlick}, title = {The MultiBERTs: {BERT} Reproductions for Robustness Analysis}, journal = {CoRR}, volume = {abs/2106.16163}, year = {2021}, url = {https://arxiv.org/abs/2106.16163}, eprinttype = {arXiv}, eprint = {2106.16163}, timestamp = {Mon, 05 Jul 2021 15:15:50 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2106-16163.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <a href="https://huggingface.co/exbert/?model=multiberts"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
lakieungocthang/Meta-Llama-3-8B-Fine-tuning
lakieungocthang
"2024-05-29T20:51:55Z"
0
0
null
[ "safetensors", "license:mit", "region:us" ]
null
"2024-05-29T12:59:18Z"
--- license: mit --- ![](./images/image-1.jpeg) # Meta-Llama-3-8B-Fine-tuning [Meta](https://www.meta.com/) has developed and launched the Meta Llama 3 family of Large Language Models (LLMs), comprising pretrained and instruction-tuned generative text models in 8 and 70 billion sizes. For details please refer to [Meta Llama 3](https://llama.meta.com/llama3/). All the terms and conditions for using Llama 3 models list provided by Meta including of the [License](https://llama.meta.com/llama3/license/?fbclid=IwZXh0bgNhZW0CMTAAAR1pJ5EPm-GMgFhfFPMKcXxQ7nUBzElDoxkeXqBER-qiAu1hEva_S8tnagY_aem_AYQHpGrQvIH7FuRunDU0qBGNLZrdQfPrBEx5hyWu2UYcf0d5M4-EljlTkxnDCNXdtFcS4hgDmSCfvXdbzZA65bwv) and [Acceptable Use Policy](https://llama.meta.com/llama3/use-policy/?fbclid=IwZXh0bgNhZW0CMTAAAR1POL5AgbdEydJMSXgNp-wS3cZ3wzPwjvHPWmjZ5qhDuDwjFoxOTZQ0Ing_aem_AYRYpm8ASvcnyKx6DH7HSID11XCMGu9JC-gyogADMC3whzqBqMKJhJtYl-MmeuYSHaSyPH1LEeq3ZRurRsTtRvMN) and Meta's [privacy policy](https://www.facebook.com/privacy/policy/). In this project, I made fine-tune Meta-Llama-3-8B to achieve greater efficiency in text generation tasks. However, I do not fine-tune the model directly from Meta's project repository [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B), I fine-tune that model with Quantization provided by [Unsloth AI](https://unsloth.ai/) [unsloth/llama-3-8b-bnb-4bit](https://huggingface.co/unsloth/llama-3-8b-bnb-4bit) to save computational costs and take advantage of tools to increase mining performance created from Unsloth. Regarding data, I chose the data set provided by NVIDIA including 37,120 samples [nvidia/HelpSteer](https://huggingface.co/datasets/nvidia/HelpSteer). The step-by-step process invites readers to refer to my notebook [here](https://huggingface.co/lakieungocthang/Meta-Llama-3-8B-Fine-tuning/blob/main/Fine-tune-Meta-Llama-3-8B.ipynb) ![](images/image-2.png) This is the graph obtained after the training process. ## How to use this model Please implement the following code to be able to use this model. ```python from unsloth import FastLanguageModel max_seq_length = 2048 dtype = None load_in_4bit = True model, tokenizer = FastLanguageModel.from_pretrained( model_name="lakieungocthang/Meta-Llama-3-8B-Fine-tuning", max_seq_length=max_seq_length, dtype=dtype, device_map="auto", load_in_4bit=load_in_4bit, ) FastLanguageModel.for_inference(model) def generate_response_with_question(question, max_tokens=100): prompt = "### Question: {}\n ### Answer: {}" inputs = tokenizer( [prompt.format(question, "")], return_tensors="pt" ).to("cuda") outputs = model.generate(**inputs, max_new_tokens=max_tokens, use_cache=True) decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True) answer_index = decoded_output.rfind("### Answer:") extracted_text = decoded_output[answer_index + len("### Answer:"):].strip() if answer_index != -1 else decoded_output return extracted_text print(generate_response_with_question("Who is Faker?")) ```
kaimi1616/gemma-3-finetune-gguf
kaimi1616
"2025-04-13T19:27:28Z"
0
0
null
[ "region:us" ]
null
"2025-04-13T19:27:28Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>
DiederikMartens/tsBERT_sa_cv_11_fold3
DiederikMartens
"2024-05-28T04:10:07Z"
110
0
transformers
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:igorsterner/german-english-code-switching-bert", "base_model:finetune:igorsterner/german-english-code-switching-bert", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2024-05-28T03:56:25Z"
--- license: mit base_model: igorsterner/german-english-code-switching-bert tags: - generated_from_trainer metrics: - f1 model-index: - name: tsBERT_sa_cv_11_fold3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tsBERT_sa_cv_11_fold3 This model is a fine-tuned version of [igorsterner/german-english-code-switching-bert](https://huggingface.co/igorsterner/german-english-code-switching-bert) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3315 - F1: 0.7421 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4.47e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | No log | 1.0 | 226 | 0.3621 | 0.5355 | | No log | 2.0 | 452 | 0.3315 | 0.7421 | | 0.3648 | 3.0 | 678 | 0.4242 | 0.7285 | ### Framework versions - Transformers 4.41.0 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.19.1
lesso18/4e8d0fa7-8d8d-481d-a1d3-0a35302749b4
lesso18
"2025-02-15T17:31:34Z"
0
0
peft
[ "peft", "safetensors", "gpt_neox", "axolotl", "generated_from_trainer", "base_model:databricks/dolly-v2-3b", "base_model:adapter:databricks/dolly-v2-3b", "license:mit", "region:us" ]
null
"2025-02-15T17:23:53Z"
--- library_name: peft license: mit base_model: databricks/dolly-v2-3b tags: - axolotl - generated_from_trainer model-index: - name: 4e8d0fa7-8d8d-481d-a1d3-0a35302749b4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <br> # 4e8d0fa7-8d8d-481d-a1d3-0a35302749b4 This model is a fine-tuned version of [databricks/dolly-v2-3b](https://huggingface.co/databricks/dolly-v2-3b) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.1510 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.000218 - train_batch_size: 4 - eval_batch_size: 4 - seed: 180 - gradient_accumulation_steps: 2 - total_train_batch_size: 8 - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 50 - training_steps: 500 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | No log | 0.0018 | 1 | 2.8238 | | 3.0346 | 0.0877 | 50 | 1.8678 | | 2.8051 | 0.1754 | 100 | 2.0510 | | 2.6634 | 0.2632 | 150 | 1.7005 | | 2.3163 | 0.3509 | 200 | 1.5016 | | 2.3489 | 0.4386 | 250 | 1.3985 | | 2.3481 | 0.5263 | 300 | 1.3311 | | 2.1877 | 0.6140 | 350 | 1.2562 | | 2.1786 | 0.7018 | 400 | 1.1924 | | 2.2143 | 0.7895 | 450 | 1.1529 | | 2.2231 | 0.8772 | 500 | 1.1510 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.0 - Pytorch 2.5.0+cu124 - Datasets 3.0.1 - Tokenizers 0.20.1
kpriyanshu256/whisper-large-v2-ga-IE-1000-32-1e-05-cy-multi
kpriyanshu256
"2022-12-20T15:32:54Z"
13
1
transformers
[ "transformers", "pytorch", "tensorboard", "whisper", "automatic-speech-recognition", "whisper-event", "generated_from_trainer", "ga", "dataset:google/fleurs", "dataset:mozilla-foundation/common_voice_11_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
"2022-12-20T04:39:18Z"
--- language: - ga license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - google/fleurs - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: whisper-large-v2-Irish results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: ga-IE split: test args: ga-IE metrics: - name: Wer type: wer value: 34.26248548199768 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: FLEURS type: mozilla-foundation/common_voice_11_0 metrics: - name: Wer type: wer value: --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-large-v2-Irish This model is a fine-tuned version of [kpriyanshu256/whisper-large-v2-cy-500-32-1e-05](https://huggingface.co/kpriyanshu256/whisper-large-v2-cy-500-32-1e-05) on the Common Voice 11.0 and the FLEURS datasets. It achieves the following results on the evaluation set: - Loss: 0.7879 - Wer: 34.2625 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 1000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.3768 | 0.25 | 250 | 0.7143 | 39.4890 | | 0.1498 | 1.15 | 500 | 0.7663 | 35.8014 | | 0.0907 | 2.05 | 750 | 0.7730 | 35.2497 | | 0.045 | 2.3 | 1000 | 0.7879 | 34.2625 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF
mradermacher
"2025-03-16T22:25:27Z"
0
0
transformers
[ "transformers", "gguf", "en", "base_model:melhoushi/layerskip-huggingface-smollm2-135m-topv1", "base_model:quantized:melhoushi/layerskip-huggingface-smollm2-135m-topv1", "endpoints_compatible", "region:us" ]
null
"2025-03-16T22:23:56Z"
--- base_model: melhoushi/layerskip-huggingface-smollm2-135m-topv1 language: - en library_name: transformers quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/melhoushi/layerskip-huggingface-smollm2-135m-topv1 <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q2_K.gguf) | Q2_K | 0.2 | | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q3_K_S.gguf) | Q3_K_S | 0.2 | | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.IQ4_XS.gguf) | IQ4_XS | 0.2 | | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q3_K_M.gguf) | Q3_K_M | 0.2 | lower quality | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q3_K_L.gguf) | Q3_K_L | 0.2 | | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q4_K_S.gguf) | Q4_K_S | 0.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q4_K_M.gguf) | Q4_K_M | 0.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q5_K_S.gguf) | Q5_K_S | 0.2 | | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q5_K_M.gguf) | Q5_K_M | 0.2 | | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q6_K.gguf) | Q6_K | 0.2 | very good quality | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.Q8_0.gguf) | Q8_0 | 0.2 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/layerskip-huggingface-smollm2-135m-topv1-GGUF/resolve/main/layerskip-huggingface-smollm2-135m-topv1.f16.gguf) | f16 | 0.4 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
damgomz/ft_16_2e6_base_x8
damgomz
"2024-06-22T15:04:07Z"
5
0
transformers
[ "transformers", "safetensors", "albert", "text-classification", "en", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2024-05-21T15:46:08Z"
--- language: en tags: - text-classification pipeline_tag: text-classification widget: - text: GEPS Techno is the pioneer of hybridization of renewable energies at sea. We imagine, design and commercialize innovative off-grid systems that aim to generate power at sea, stabilize and collect data. The success of our low power platforms WAVEPEAL enabled us to scale-up the device up to WAVEGEM, the 150-kW capacity platform. --- ## Environmental Impact (CODE CARBON DEFAULT) | Metric | Value | |--------------------------|---------------------------------| | Duration (in seconds) | 85896.94530534744 | | Emissions (Co2eq in kg) | 0.0519775310247642 | | CPU power (W) | 42.5 | | GPU power (W) | [No GPU] | | RAM power (W) | 3.75 | | CPU energy (kWh) | 1.0140585689990065 | | GPU energy (kWh) | [No GPU] | | RAM energy (kWh) | 0.0894749233846861 | | Consumed energy (kWh) | 1.1035334923836917 | | Country name | Switzerland | | Cloud provider | nan | | Cloud region | nan | | CPU count | 2 | | CPU model | Intel(R) Xeon(R) Platinum 8360Y CPU @ 2.40GHz | | GPU count | nan | | GPU model | nan | ## Environmental Impact (for one core) | Metric | Value | |--------------------------|---------------------------------| | CPU energy (kWh) | 0.16535161971279383 | | Emissions (Co2eq in kg) | 0.033642970244594415 | ## Note 19 juin 2024 ## My Config | Config | Value | |--------------------------|-----------------| | checkpoint | albert-base-v2 | | model_name | ft_16_2e6_base_x8 | | sequence_length | 400 | | num_epoch | 6 | | learning_rate | 2e-06 | | batch_size | 16 | | weight_decay | 0.0 | | warm_up_prop | 0.0 | | drop_out_prob | 0.1 | | packing_length | 100 | | train_test_split | 0.2 | | num_steps | 29328 | ## Training and Testing steps Epoch | Train Loss | Test Loss | F-beta Score ---|---|---|--- | 0 | 0.000000 | 0.700751 | 0.821665 | | 1 | 0.389074 | 0.299005 | 0.874187 | | 2 | 0.256889 | 0.255006 | 0.889161 | | 3 | 0.214719 | 0.224857 | 0.903429 | | 4 | 0.186569 | 0.230315 | 0.911765 | | 5 | 0.165293 | 0.232379 | 0.916780 | | 6 | 0.143314 | 0.235971 | 0.919790 |
Fahmula/Llama-3.2-3B-Instruct-Tune
Fahmula
"2025-02-07T16:14:38Z"
22
0
transformers
[ "transformers", "gguf", "llama", "text-generation-inference", "unsloth", "en", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
"2025-02-07T16:13:57Z"
--- base_model: unsloth/llama-3.2-3b-instruct-unsloth-bnb-4bit tags: - text-generation-inference - transformers - unsloth - llama - gguf license: apache-2.0 language: - en --- # Uploaded model - **Developed by:** Fahmula - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3.2-3b-instruct-unsloth-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
collij22/seq-adesplit_FT_BioMedGPT-LM-7B
collij22
"2024-03-02T02:34:50Z"
3
0
transformers
[ "transformers", "safetensors", "llama", "text-classification", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-classification
"2024-03-02T02:32:10Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
wamegabe/llama-3-2-3B-wame-slm3
wamegabe
"2024-11-29T07:01:49Z"
75
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "base_model:unsloth/Llama-3.2-3B-Instruct-bnb-4bit", "base_model:finetune:unsloth/Llama-3.2-3B-Instruct-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2024-11-29T07:00:07Z"
--- base_model: unsloth/Llama-3.2-3B-Instruct-bnb-4bit tags: - text-generation-inference - transformers - unsloth - llama - trl - sft license: apache-2.0 language: - en --- # Uploaded model - **Developed by:** wamegabe - **License:** apache-2.0 - **Finetuned from model :** unsloth/Llama-3.2-3B-Instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf
RichardErkhov
"2025-04-05T11:03:32Z"
0
0
null
[ "gguf", "endpoints_compatible", "region:us" ]
null
"2025-04-05T10:48:52Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) STD-BPE-LLAMA3.2 - GGUF - Model creator: https://huggingface.co/meelu/ - Original model: https://huggingface.co/meelu/STD-BPE-LLAMA3.2/ | Name | Quant method | Size | | ---- | ---- | ---- | | [STD-BPE-LLAMA3.2.Q2_K.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q2_K.gguf) | Q2_K | 0.54GB | | [STD-BPE-LLAMA3.2.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.IQ3_XS.gguf) | IQ3_XS | 0.58GB | | [STD-BPE-LLAMA3.2.IQ3_S.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.IQ3_S.gguf) | IQ3_S | 0.6GB | | [STD-BPE-LLAMA3.2.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q3_K_S.gguf) | Q3_K_S | 0.6GB | | [STD-BPE-LLAMA3.2.IQ3_M.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.IQ3_M.gguf) | IQ3_M | 0.61GB | | [STD-BPE-LLAMA3.2.Q3_K.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q3_K.gguf) | Q3_K | 0.64GB | | [STD-BPE-LLAMA3.2.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q3_K_M.gguf) | Q3_K_M | 0.64GB | | [STD-BPE-LLAMA3.2.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q3_K_L.gguf) | Q3_K_L | 0.68GB | | [STD-BPE-LLAMA3.2.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.IQ4_XS.gguf) | IQ4_XS | 0.7GB | | [STD-BPE-LLAMA3.2.Q4_0.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q4_0.gguf) | Q4_0 | 0.72GB | | [STD-BPE-LLAMA3.2.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.IQ4_NL.gguf) | IQ4_NL | 0.72GB | | [STD-BPE-LLAMA3.2.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q4_K_S.gguf) | Q4_K_S | 0.72GB | | [STD-BPE-LLAMA3.2.Q4_K.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q4_K.gguf) | Q4_K | 0.75GB | | [STD-BPE-LLAMA3.2.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q4_K_M.gguf) | Q4_K_M | 0.75GB | | [STD-BPE-LLAMA3.2.Q4_1.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q4_1.gguf) | Q4_1 | 0.77GB | | [STD-BPE-LLAMA3.2.Q5_0.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q5_0.gguf) | Q5_0 | 0.83GB | | [STD-BPE-LLAMA3.2.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q5_K_S.gguf) | Q5_K_S | 0.83GB | | [STD-BPE-LLAMA3.2.Q5_K.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q5_K.gguf) | Q5_K | 0.85GB | | [STD-BPE-LLAMA3.2.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q5_K_M.gguf) | Q5_K_M | 0.85GB | | [STD-BPE-LLAMA3.2.Q5_1.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q5_1.gguf) | Q5_1 | 0.89GB | | [STD-BPE-LLAMA3.2.Q6_K.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q6_K.gguf) | Q6_K | 0.95GB | | [STD-BPE-LLAMA3.2.Q8_0.gguf](https://huggingface.co/RichardErkhov/meelu_-_STD-BPE-LLAMA3.2-gguf/blob/main/STD-BPE-LLAMA3.2.Q8_0.gguf) | Q8_0 | 1.23GB | Original model description: --- library_name: transformers tags: [Danish, BPE Tokenization, LLaMA] --- ### STD-BPE-LLAMA3.2 A standard LLaMA-3.2 model trained with a pretrained Byte-Pair-Encoding (BPE) tokenizer. It provides a benchmark for evaluating the performance of more advanced tokenization approaches.
ldos/text_shortening_model_v39
ldos
"2023-09-14T02:02:20Z"
3
0
transformers
[ "transformers", "pytorch", "bart", "text2text-generation", "generated_from_trainer", "base_model:facebook/bart-large-xsum", "base_model:finetune:facebook/bart-large-xsum", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
"2023-09-13T19:00:47Z"
--- license: mit base_model: facebook/bart-large-xsum tags: - generated_from_trainer metrics: - rouge model-index: - name: text_shortening_model_v39 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # text_shortening_model_v39 This model is a fine-tuned version of [facebook/bart-large-xsum](https://huggingface.co/facebook/bart-large-xsum) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.8730 - Rouge1: 0.4929 - Rouge2: 0.2546 - Rougel: 0.4351 - Rougelsum: 0.4353 - Bert precision: 0.8698 - Bert recall: 0.8762 - Average word count: 8.8348 - Max word count: 17 - Min word count: 4 - Average token count: 16.5796 - % shortened texts with length > 12: 8.4084 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Bert precision | Bert recall | Average word count | Max word count | Min word count | Average token count | % shortened texts with length > 12 | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:--------------:|:-----------:|:------------------:|:--------------:|:--------------:|:-------------------:|:----------------------------------:| | 0.9582 | 1.0 | 73 | 1.4062 | 0.5229 | 0.2983 | 0.4739 | 0.4738 | 0.875 | 0.8853 | 8.9039 | 17 | 4 | 15.0811 | 9.009 | | 0.5598 | 2.0 | 146 | 1.4819 | 0.5053 | 0.2806 | 0.456 | 0.4561 | 0.8723 | 0.879 | 8.6486 | 14 | 5 | 14.2703 | 1.5015 | | 0.3791 | 3.0 | 219 | 1.7718 | 0.5174 | 0.2882 | 0.4532 | 0.4539 | 0.8705 | 0.8834 | 9.6456 | 18 | 5 | 17.7027 | 16.5165 | | 0.3748 | 4.0 | 292 | 2.1513 | 0.3078 | 0.1184 | 0.2773 | 0.278 | 0.8215 | 0.8336 | 9.5375 | 18 | 4 | 17.1441 | 9.9099 | | 0.2837 | 5.0 | 365 | 1.6757 | 0.4999 | 0.2661 | 0.4487 | 0.4489 | 0.8732 | 0.8766 | 8.3844 | 16 | 4 | 15.1892 | 6.6066 | | 0.1885 | 6.0 | 438 | 1.8005 | 0.4938 | 0.2619 | 0.4437 | 0.4439 | 0.8729 | 0.8763 | 8.5526 | 14 | 5 | 14.994 | 1.5015 | | 0.1799 | 7.0 | 511 | 1.8427 | 0.4986 | 0.2752 | 0.4455 | 0.4463 | 0.8664 | 0.8796 | 9.4384 | 20 | 5 | 15.6697 | 11.4114 | | 0.1638 | 8.0 | 584 | 2.0234 | 0.5206 | 0.2854 | 0.4632 | 0.4642 | 0.8774 | 0.8844 | 9.1682 | 18 | 4 | 16.2132 | 9.9099 | | 0.1247 | 9.0 | 657 | 1.9158 | 0.486 | 0.2628 | 0.4326 | 0.4339 | 0.8707 | 0.8758 | 8.7327 | 17 | 4 | 15.3093 | 6.6066 | | 0.1059 | 10.0 | 730 | 2.2355 | 0.5127 | 0.2825 | 0.4578 | 0.4577 | 0.875 | 0.8827 | 9.045 | 17 | 4 | 16.5586 | 8.7087 | | 0.1104 | 11.0 | 803 | 2.2555 | 0.5095 | 0.2698 | 0.4514 | 0.4511 | 0.8762 | 0.8815 | 8.7928 | 17 | 4 | 16.3123 | 8.7087 | | 0.1196 | 12.0 | 876 | 2.3329 | 0.507 | 0.2692 | 0.453 | 0.454 | 0.8746 | 0.8795 | 8.8228 | 15 | 5 | 16.1862 | 5.4054 | | 0.093 | 13.0 | 949 | 2.2657 | 0.5137 | 0.2748 | 0.4545 | 0.4543 | 0.8733 | 0.8801 | 8.7988 | 16 | 4 | 16.012 | 7.8078 | | 0.0626 | 14.0 | 1022 | 2.5004 | 0.5014 | 0.2677 | 0.4432 | 0.4435 | 0.8725 | 0.8775 | 8.7508 | 16 | 5 | 16.4535 | 6.9069 | | 0.0534 | 15.0 | 1095 | 2.4192 | 0.5031 | 0.27 | 0.4467 | 0.447 | 0.8711 | 0.8784 | 8.8438 | 19 | 4 | 16.1411 | 9.3093 | | 0.0475 | 16.0 | 1168 | 2.5800 | 0.4891 | 0.2553 | 0.4313 | 0.4315 | 0.8689 | 0.8753 | 8.8408 | 18 | 4 | 16.5045 | 8.7087 | | 0.0399 | 17.0 | 1241 | 2.6858 | 0.5021 | 0.2615 | 0.4452 | 0.445 | 0.8727 | 0.8782 | 8.7808 | 17 | 4 | 16.3844 | 7.2072 | | 0.0296 | 18.0 | 1314 | 2.6646 | 0.4992 | 0.2666 | 0.4466 | 0.4463 | 0.8726 | 0.8764 | 8.5706 | 17 | 4 | 16.1111 | 4.8048 | | 0.0286 | 19.0 | 1387 | 2.7496 | 0.5023 | 0.2648 | 0.4451 | 0.445 | 0.8721 | 0.8781 | 8.7868 | 17 | 4 | 16.3063 | 6.6066 | | 0.026 | 20.0 | 1460 | 2.8730 | 0.4929 | 0.2546 | 0.4351 | 0.4353 | 0.8698 | 0.8762 | 8.8348 | 17 | 4 | 16.5796 | 8.4084 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
cosmosai471/LunaAI
cosmosai471
"2025-03-17T13:42:51Z"
21
1
transformers
[ "transformers", "safetensors", "gpt2", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2025-03-16T06:29:07Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Bailefan/q-FrozenLake-v1-4x4-noSlippery
Bailefan
"2023-05-19T14:30:02Z"
0
0
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
reinforcement-learning
"2023-05-19T14:29:59Z"
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Bailefan/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
kajddnnsed/KayessssLoRa
kajddnnsed
"2023-06-14T13:26:34Z"
0
0
null
[ "license:creativeml-openrail-m", "region:us" ]
null
"2023-06-14T13:08:21Z"
--- license: creativeml-openrail-m ---
mjab/setfit-coa-classifier
mjab
"2025-04-11T11:22:57Z"
0
0
setfit
[ "setfit", "safetensors", "bert", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "base_model:sentence-transformers/all-MiniLM-L6-v2", "base_model:finetune:sentence-transformers/all-MiniLM-L6-v2", "region:us" ]
text-classification
"2025-04-11T11:22:53Z"
--- tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: 'Description: سداد فاتورة 001 - STC رقمها 37001458846 من مصرف الانماء عن طريق E - Channel القنوات الالكترونية وتاريخ الاستحقاق 03/21/2025 5851094777 رقم المرجع FT25080K48DN في 21:51-03/20/2025 المبلغ SAR264.50 سداد | Amount: -264.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR' - text: 'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA9180000456608010145075) TURKI في مصرف الراجحي تاريخ الاستحقاق 02/06/2025 رقم المرجع FT25037F9QCH الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -1001.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR' - text: 'Description: التحويل الوارد سريع من بنك الانماء-قسم الحوالات من خلال من خلال البنك الاهلي السعودي تاريخ الاستحقاق 02/19/2025 رقم الايبان SA0210000052400000010602 المحول مها بنت عبدالعزيز بن عبدالله ال سعود المبلغ 18390.00 رقم المرجع FT25050WMZPB رقم الحوالة SA0210000052400000010602 الساعة 1701 الغرض تفاصيل الدفع | Amount: 18390.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR' - text: 'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA2880000109608010351351) نايف رباح عوده العبدلي العنزي في مصرف الراجحي تاريخ الاستحقاق 02/06/2025 رقم المرجع FT25037YNCY5 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -1501.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR' - text: 'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA7480000208608010549855) SULTAN SULAIMAN HAMMAD ALBLAWI في مصرف الراجحي تاريخ الاستحقاق 01/31/2025 رقم المرجع FT2503121GK6 الرسوم SAR0.50 ضريبة القيمة المضافة SAR0.08 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -300.58 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR' metrics: - accuracy pipeline_tag: text-classification library_name: setfit inference: true base_model: sentence-transformers/all-MiniLM-L6-v2 --- # SetFit with sentence-transformers/all-MiniLM-L6-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 256 tokens - **Number of Classes:** 21 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:---------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ASSETS - CASH & BANK ACCOUNTS - PETTY CASH | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0880000251608016131340) شيماء السيد أبوزيد في مصرف الراجحي تاريخ الاستحقاق 03/06/2025 رقم المرجع FT25065VJDL1 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -11301.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0880000251608016131340) شيماء السيد أبوزيد في مصرف الراجحي تاريخ الاستحقاق 03/15/2025 رقم المرجع FT25074TPSHC الرسوم SAR0.50 ضريبة القيمة المضافة SAR0.08 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -18.08 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | ASSETS - CASH & BANK ACCOUNTS - CORPORATE CARDS - CARDS - HALA | <ul><li>'Description: حوالة صادرة سريع من المركز الرئيسى الى (IBAN:) SA4230100949000007113492 في البنك العربي الوطني تاريخ الاستحقاق 01/13/2025 رقم المرجع FT25013J1RDQ بغرض الرسوم SAR5.00 ضريبة القيمة المضافة SAR0.75 سعر الصرف 1.00 بيانات الدفع المبلغ 30000.00 عنوان المستفيد SAFE AND DEPOSIT CLIENTS FUNDS OF HALALAH TRDING CO | Amount: -30005.75 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من المركز الرئيسى الى (IBAN:) SA4230100949000007113492 في البنك العربي الوطني تاريخ الاستحقاق 01/22/2025 رقم المرجع FT250222716X بغرض الرسوم SAR7.00 ضريبة القيمة المضافة SAR1.05 سعر الصرف 1.00 بيانات الدفع المبلغ 30000.00 عنوان المستفيد SAFE AND DEPOSIT CLIENTS FUNDS OF HALALAH TRDING CO | Amount: -30008.05 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: Card Funds | Amount: -3000.0 | Bank: HALA | Currency: SAR'</li></ul> | | ASSETS - ACCOUNTS RECEIVABLE - TRADE RECEIVABLES | <ul><li>'Description: 20250228 BNETMRC - Total POS Purchase for Master Card Net for Machine اجمالي موازنة جهاز نقاط البيع رقم 8000179700000000 لشبكة ماستر كارد لتاريخ الاستحقاق الرقم المرجعي | Amount: 4858.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: التحويل الوارد سريع من بنك الانماء-قسم الحوالات من خلال من خلال البنك الاهلي السعودي تاريخ الاستحقاق 02/19/2025 رقم الايبان SA0210000052400000010602 المحول مها بنت عبدالعزيز بن عبدالله ال سعود المبلغ 18390.00 رقم المرجع FT25050WMZPB رقم الحوالة SA0210000052400000010602 الساعة 1701 الغرض تفاصيل الدفع | Amount: 18390.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: التحويل الوارد سريع من بنك الانماء-قسم الحوالات من خلال من خلال البنك الاهلي السعودي تاريخ الاستحقاق 02/10/2025 رقم الايبان SA3510000074000000206808 المحول مؤسسة المكان لتنظيم الرحلات المبلغ 3050.00 رقم المرجع FT25041CBCFT رقم الحوالة SA3510000074000000206808 الساعة 1944 الغرض تفاصيل الدفع | Amount: 3050.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - COST OF TRANSPORTATION SERVICES - TRANSPORTATION | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA3880000208608010015451) MOASSAT GBL SHERAAN LTNDEAM ALREHLA في مصرف الراجحي تاريخ الاستحقاق 01/18/2025 رقم المرجع FT250187YM4D الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر | Amount: -1001.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA2520000002581747309940) R N B SPECIAL TOURS COMPANY في بنك الرياض تاريخ الاستحقاق 01/18/2025 رقم المرجع FT25018R6CLL الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -1151.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA3880000208608010015451) MOASSAT GBL SHERAAN LTNDEAM ALREHLA في مصرف الراجحي تاريخ الاستحقاق 01/18/2025 رقم المرجع FT25018JHT72 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر | Amount: -13601.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - COST OF CLIENT SERVICES - PRIVATE CHARTER | <ul><li>'Description: Global Corporate Charter LLC0 - USD54203.25 تحويل صادر (المركز الرئيسى) من مصرف الانماء السعر 3.759 الرسوم SAR50.00 ضريبة القيمة المضافة SAR7.50 الى Global Corporate Charter LLC (Bank:SW-ALSABHBMXXX)(IBAN:BH89ALSA00177085100102) رقم المرجع FT25019873K5 تاريخ استحقاق 01/19/2025 الغرض من التحويل تسديد فواتير تفاصيل Global Corporate Charter LLC2 الساعة Global Corporate Charter LLC3 | Amount: -203807.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: Global Corporate Charter LLC0 - USD26602.82 تحويل صادر (المركز الرئيسى) من مصرف الانماء السعر 3.759 الرسوم SAR50.00 ضريبة القيمة المضافة SAR7.50 الى Global Corporate Charter LLC (Bank:SW-ALSABHBMXXX)(IBAN:BH89ALSA00177085100102) رقم المرجع FT25016XFK04 تاريخ استحقاق 01/16/2025 الغرض من التحويل تسديد فواتير تفاصيل Global Corporate Charter LLC2 الساعة Global Corporate Charter LLC3 | Amount: -100057.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - EXPERIENCES & EVENTS - EXPERIENCES - STARGAZING | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA6080000354608010052084) الحميدي عبدالرحمن حميد في مصرف الراجحي تاريخ الاستحقاق 01/18/2025 رقم المرجع FT250180PVP2 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -1901.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA6080000354608010052084) الحميدي عبدالرحمن حميد في مصرف الراجحي تاريخ الاستحقاق 01/23/2025 رقم المرجع FT25023MJ6GQ الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -1901.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA6080000354608010052084) الحميدي عبدالرحمن حميد في مصرف الراجحي تاريخ الاستحقاق 01/23/2025 رقم المرجع FT25023NM0NZ الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -11151.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - EXPERIENCES & EVENTS - TOUR GUIDE & FREELANCERS - TOUR GUIDES | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA2680000208608010543056) Abdul Rahman bin Muhammad في مصرف الراجحي تاريخ الاستحقاق 01/18/2025 رقم المرجع FT250189H5DV الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -13501.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0415000608134492640009) SAMEER MOHAMMED NAJI في بنك البلاد تاريخ الاستحقاق 01/18/2025 رقم المرجع FT2501845WVP الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -1514.49 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA6345000000255018590001) ABIR JAMEEL A ABUSULAYMAN في البنك السعودي الأول تاريخ الاستحقاق 01/13/2025 رقم المرجع FT2501322NJ6 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر | Amount: -4501.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - EXPERIENCES & EVENTS - TOUR GUIDE & FREELANCERS - FREELANCERS | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA5280000131608016018014) ELFATIH HAMIED MOHAMMED AWAD ELSIED في مصرف الراجحي تاريخ الاستحقاق 01/18/2025 رقم المرجع FT250188G65J الرسوم SAR0.50 ضريبة القيمة المضافة SAR0.08 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر | Amount: -400.58 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA8380000208608010312916) خالد عبدالرحمن محمد العتيق في مصرف الراجحي تاريخ الاستحقاق 01/17/2025 رقم المرجع FT25017FX0BR الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -15001.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA9080000640608016624708) محمد مطاوع عبد الراضى مطاوع في مصرف الراجحي تاريخ الاستحقاق 01/03/2025 رقم المرجع FT25003SGLPN الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -2814.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | COST OF GOODS SOLD (COGS) - TRAVEL & ENTERTAINMENT - ACCOMMODATION | <ul><li>'Description: حوالة داخلية من الانماء- المركز الرئيسى الى شركة منازل النقاء المحدودة حساب 68203013955002 بمبلغ SAR10867.50 وتاريخ استحقاق العملية 01/18/2025 رقم المرجع FT2501809BMJ 9$رقم هوية المستفيد معرف المستفيد 4030319983 700 عملة 5$وقت 1539 1.00 معدل رقم الهوية طلب هوية العميل الغرض من الدفع بيانات الدفع | Amount: -10867.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من المركز الرئيسى الى (IBAN:) SA9210000062500000250104 في البنك الاهلي السعودي تاريخ الاستحقاق 01/10/2025 رقم المرجع FT25010HZ365 بغرض الرسوم SAR5.00 ضريبة القيمة المضافة SAR0.75 سعر الصرف 1.00 بيانات الدفع المبلغ 103965.75 عنوان المستفيد Manazil Wadi Ashar for Hospitality Services LLC | Amount: -103971.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة داخلية من الانماء- المركز الرئيسى الى شركة منازل النقاء المحدودة حساب 68203013955002 بمبلغ SAR7728.01 وتاريخ استحقاق العملية 01/09/2025 رقم المرجع FT2500907JPZ 9$رقم هوية المستفيد معرف المستفيد 4030319983 700 عملة 5$وقت 1943 1.00 معدل رقم الهوية طلب هوية العميل الغرض من الدفع بيانات الدفع | Amount: -7728.01 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | COST OF GOODS SOLD (COGS) - TRAVEL & ENTERTAINMENT - F&B | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA6610000010329711000102) نورة عادل محمد في البنك الاهلي السعودي تاريخ الاستحقاق 01/18/2025 رقم المرجع FT25018F0753 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -1501.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA7360100013395001412001) AJDAF RESTURANT في بنك الجزيرة تاريخ الاستحقاق 01/16/2025 رقم المرجع FT250166BW7J الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -8401.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA1380000578608010088872) Mdar Food Company LTD في مصرف الراجحي تاريخ الاستحقاق 01/16/2025 رقم المرجع FT25016SK02J الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -16561.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | COST OF GOODS SOLD (COGS) - TRAVEL & ENTERTAINMENT - FARM RENTAL | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA4080000150608010381968) مها عبدالحميد سالم الشنينه الرفاعي في مصرف الراجحي تاريخ الاستحقاق 01/16/2025 رقم المرجع FT25016TF1C0 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر | Amount: -1501.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA4080000150608010381968) مها عبدالحميد سالم الشنينه الرفاعي في مصرف الراجحي تاريخ الاستحقاق 01/23/2025 رقم المرجع FT25023HJ8X9 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر | Amount: -1501.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | EXPENSES - OPERATING EXPENSES - SALARIES & WAGES - SALARIES & WAGES | <ul><li>'Description: خصم اجمالي ملف الرواتب من خلال المركز الرئيسى المبلغ SAR 186264.10 العملة الريال تاريخ الاستحقاق 01/22/2025 ، وقت إنشاء العملية 1057 الرقم المرجعي FT25022LMN6G سعر الصرف 1.00 رسوم الإيداع 0.00 الغرض من الإيداع | Amount: -186264.1 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: خصم اجمالي ملف الرواتب من خلال المركز الرئيسى المبلغ SAR 156211.00 العملة الريال تاريخ الاستحقاق 02/20/2025 ، وقت إنشاء العملية 0918 الرقم المرجعي FT250514JNCK سعر الصرف 1.00 رسوم الإيداع 0.00 الغرض من الإيداع | Amount: -156211.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: التحويل الوارد سريع من بنك الانماء-قسم الحوالات من خلال من خلال بنك الرياض تاريخ الاستحقاق 02/26/2025 رقم الايبان SA2720000004140077489940 المحول BYOOT COMPANY FOR VACATION RENTALS المبلغ 3500.00 رقم المرجع FT25057YBTZP رقم الحوالة SA2720000004140077489940 الساعة 0032 الغرض تفاصيل الدفع | Amount: 3500.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | EXPENSES - OPERATING EXPENSES - SALARIES & WAGES - BONUSES & COMMISSIONS | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA3410000075400003289610) AHMAD NAWABUDDIN في البنك الاهلي السعودي تاريخ الاستحقاق 02/26/2025 رقم المرجع FT25057N8SBK الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -5001.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA6610000012300000999109) عليا ابو السيل في البنك الاهلي السعودي تاريخ الاستحقاق 02/26/2025 رقم المرجع FT250571JGB1 الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -15001.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA7180000991608017257206) ABDALHANNAN - - MOHAMMED في مصرف الراجحي تاريخ الاستحقاق 02/26/2025 رقم المرجع FT250571P2ZG الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -5001.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | EXPENSES - OPERATING EXPENSES - EMPLOYEE BENEFITS - GOSI CONTRIBUTIONS | <ul><li>'Description: سداد فاتورة 060 - The General Organization for Social Insurance رقمها 588756971 من مصرف الانماء عن طريق E - Channel القنوات الالكترونية وتاريخ الاستحقاق 02/06/2025 5770452419 رقم المرجع FT25037LRMJV في 15:47-02/06/2025 المبلغ SAR22452.49 سداد | Amount: -22452.49 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: سداد فاتورة 060 - The General Organization for Social Insurance رقمها 588756971 من مصرف الانماء عن طريق E - Channel القنوات الالكترونية وتاريخ الاستحقاق 03/10/2025 5832803998 رقم المرجع FT250693PW7N في 21:03-03/09/2025 المبلغ SAR19083.30 سداد | Amount: -19083.3 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | EXPENSES - OPERATING EXPENSES - RENT EXPENSE - OFFICE RENT | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA7220000001763516449940) شركة نبضات العقارة في بنك الرياض تاريخ الاستحقاق 01/09/2025 رقم المرجع FT25009XBRLT الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -5291.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة داخلية من الانماء- المركز الرئيسى من شركة بيوت روف لتشغيل الفنادق تاريخ الاستحقاق 02/06/2025 رقم المرجع FT25037NNJRN تفاصيل الحوالة SAR228.00 سعر الصرف الغرض من الدفع 1559 الساعة | Amount: 228.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة داخلية من الانماء- المركز الرئيسى من شركة بيوت للشقق المفروشة شركة شخص و تاريخ الاستحقاق 02/10/2025 رقم المرجع FT250410CBZV تفاصيل الحوالة SAR152.00 سعر الصرف الغرض من الدفع 2027 الساعة | Amount: 152.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | EXPENSES - FINANCIAL EXPENSES - BANK CHARGES | <ul><li>'Description: Payroll Project Deduction of Payroll Fees | Amount: -55.2 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: E-commerce monthly fees fees for JAN-2025 | Amount: -287.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: E-commerce monthly fees for DEC 2024 | Amount: -287.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | EXPENSES - OTHER EXPENSES - LEGAL FEES | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0880000251608016131340) شيماء السيد أبوزيد في مصرف الراجحي تاريخ الاستحقاق 01/09/2025 رقم المرجع FT25009YW9KJ الرسوم SAR0.50 ضريبة القيمة المضافة SAR0.08 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -150.58 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0880000251608016131340) شيماء السيد أبوزيد في مصرف الراجحي تاريخ الاستحقاق 02/06/2025 رقم المرجع FT250375DF0P الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -601.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0880000251608016131340) شيماء السيد أبوزيد في مصرف الراجحي تاريخ الاستحقاق 02/13/2025 رقم المرجع FT25044W4YM6 الرسوم SAR0.50 ضريبة القيمة المضافة SAR0.08 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر الصرف | Amount: -192.83 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | EXPENSES - OTHER EXPENSES - TELECOMMUNICATION & INTERNET | <ul><li>'Description: سداد فاتورة 001 - STC رقمها 37001461769 من مصرف الانماء عن طريق E - Channel القنوات الالكترونية وتاريخ الاستحقاق 02/06/2025 5770452588 رقم المرجع FT250378Q6X6 في 15:48-02/06/2025 المبلغ SAR156.69 سداد | Amount: -156.69 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: سداد فاتورة 001 - STC رقمها 05208547102 من مصرف الانماء عن طريق E - Channel القنوات الالكترونية وتاريخ الاستحقاق 02/06/2025 5770454581 رقم المرجع FT25037MKL2Z في 15:48-02/06/2025 المبلغ SAR264.50 سداد | Amount: -264.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: سداد فاتورة 001 - STC رقمها 37001458843 من مصرف الانماء عن طريق E - Channel القنوات الالكترونية وتاريخ الاستحقاق 02/06/2025 5770452571 رقم المرجع FT25037HFGQB في 15:48-02/06/2025 المبلغ SAR264.50 سداد | Amount: -264.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | | EXPENSES - OTHER EXPENSES - MISCELLANEOUS | <ul><li>'Description: حوالة داخلية من الانماء- المركز الرئيسى الى شركة بيوت روف لتشغيل الفنادق حساب 68204442455000 بمبلغ SAR135.00 وتاريخ استحقاق العملية 03/15/2025 رقم المرجع FT25074XVYCH 9$رقم هوية المستفيد معرف المستفيد 7033598140 1 عملة 5$وقت 1721 1.00 معدل رقم الهوية طلب هوية العميل الغرض من الدفع بيانات الدفع | Amount: -135.0 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: Online purchase Local using Visa card 402168******7527, Terminal Number SABS2I12, STC Pay Riyadh SA, RRN 506516291050, Exchange Rate: 1 SAR, Markup Fee 0.00 SAR, VAT 0.00 SAR | Amount: -1.0 | Bank: HALA | Currency: SAR'</li></ul> | | EXPENSES - OTHER EXPENSES - MEALS | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA2810000022160510000107) معروف هشام معروف الدواليبي في البنك الاهلي السعودي تاريخ الاستحقاق 03/10/2025 رقم المرجع FT25069NV30F الرسوم SAR1.00 ضريبة القيمة المضافة SAR0.15 X$ الغرض من التحويل تفاصيل الدفع 10$ الساعة 11$ سعر | Amount: -619.85 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: Online purchase Local using Visa card 402168******7527, Terminal Number SABS2I12, STC Pay Riyadh SA, RRN 506815323652, Exchange Rate: 1 SAR, Markup Fee 0.00 SAR, VAT 0.00 SAR | Amount: -100.0 | Bank: HALA | Currency: SAR'</li><li>'Description: POS Purchase Local using Visa card 402168******7527, Terminal 63249652, AL ANOUD CENTER OLA SA, RRN 507200001377, Exchange Rate: 1 SAR, Markup Fee 0.00 SAR, Commission Fee 0.00 SAR, VAT 0.00 SAR | Amount: -8.0 | Bank: HALA | Currency: SAR'</li></ul> | | ASSETS - CASH & BANK ACCOUNTS - CORPORATE CARDS - CARDS - SIFI | <ul><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0630100806612378688664) ALHULUL ALMOBASSATAH SAVE AND DEPOSIT في البنك العربي الوطني تاريخ الاستحقاق 01/18/2025 رقم المرجع FT25018SY45C الرسوم SAR1.00 | Amount: -7544.9 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0630100806612378688664) ALHULUL ALMOBASSATAH SAVE AND DEPOSIT في البنك العربي الوطني تاريخ الاستحقاق 01/16/2025 رقم المرجع FT250161B408 الرسوم SAR1.00 | Amount: -19001.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li><li>'Description: حوالة صادرة سريع من خلال مصرف الانماء عبر المركز الرئيسى الى (IBAN:SA0630100806612378688664) ALHULUL ALMOBASSATAH SAVE AND DEPOSIT في البنك العربي الوطني تاريخ الاستحقاق 01/13/2025 رقم المرجع FT250133XZ52 الرسوم SAR1.00 | Amount: -20001.15 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR'</li></ul> | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("setfit_model_id") # Run inference preds = model("Description: سداد فاتورة 001 - STC رقمها 37001458846 من مصرف الانماء عن طريق E - Channel القنوات الالكترونية وتاريخ الاستحقاق 03/21/2025 5851094777 رقم المرجع FT25080K48DN في 21:51-03/20/2025 المبلغ SAR264.50 سداد | Amount: -264.5 | Bank: ALINMA_BUSINESS_SAU | Currency: SAR") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 12 | 44.9577 | 58 | | Label | Training Sample Count | |:---------------------------------------------------------------------------------------------------------|:----------------------| | ASSETS - ACCOUNTS RECEIVABLE - TRADE RECEIVABLES | 12 | | ASSETS - CASH & BANK ACCOUNTS - CORPORATE CARDS - CARDS - HALA | 3 | | ASSETS - CASH & BANK ACCOUNTS - CORPORATE CARDS - CARDS - SIFI | 18 | | ASSETS - CASH & BANK ACCOUNTS - PETTY CASH | 2 | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - COST OF CLIENT SERVICES - PRIVATE CHARTER | 2 | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - COST OF TRANSPORTATION SERVICES - TRANSPORTATION | 20 | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - EXPERIENCES & EVENTS - EXPERIENCES - STARGAZING | 6 | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - EXPERIENCES & EVENTS - TOUR GUIDE & FREELANCERS - FREELANCERS | 15 | | COST OF GOODS SOLD (COGS) - DIRECT COSTS - EXPERIENCES & EVENTS - TOUR GUIDE & FREELANCERS - TOUR GUIDES | 12 | | COST OF GOODS SOLD (COGS) - TRAVEL & ENTERTAINMENT - ACCOMMODATION | 10 | | COST OF GOODS SOLD (COGS) - TRAVEL & ENTERTAINMENT - F&B | 16 | | COST OF GOODS SOLD (COGS) - TRAVEL & ENTERTAINMENT - FARM RENTAL | 2 | | EXPENSES - FINANCIAL EXPENSES - BANK CHARGES | 7 | | EXPENSES - OPERATING EXPENSES - EMPLOYEE BENEFITS - GOSI CONTRIBUTIONS | 2 | | EXPENSES - OPERATING EXPENSES - RENT EXPENSE - OFFICE RENT | 8 | | EXPENSES - OPERATING EXPENSES - SALARIES & WAGES - BONUSES & COMMISSIONS | 6 | | EXPENSES - OPERATING EXPENSES - SALARIES & WAGES - SALARIES & WAGES | 6 | | EXPENSES - OTHER EXPENSES - LEGAL FEES | 4 | | EXPENSES - OTHER EXPENSES - MEALS | 3 | | EXPENSES - OTHER EXPENSES - MISCELLANEOUS | 2 | | EXPENSES - OTHER EXPENSES - TELECOMMUNICATION & INTERNET | 33 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 20 - body_learning_rate: (2e-05, 2e-05) - head_learning_rate: 2e-05 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - l2_weight: 0.01 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0021 | 1 | 0.2803 | - | | 0.1057 | 50 | 0.1819 | - | | 0.2114 | 100 | 0.1224 | - | | 0.3171 | 150 | 0.1055 | - | | 0.4228 | 200 | 0.0917 | - | | 0.5285 | 250 | 0.0907 | - | | 0.6342 | 300 | 0.0901 | - | | 0.7400 | 350 | 0.0761 | - | | 0.8457 | 400 | 0.0758 | - | | 0.9514 | 450 | 0.0793 | - | ### Framework Versions - Python: 3.11.12 - SetFit: 1.1.2 - Sentence Transformers: 3.4.1 - Transformers: 4.50.3 - PyTorch: 2.6.0+cu124 - Datasets: 3.5.0 - Tokenizers: 0.21.1 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
ciloku/18bedddf-aa6d-464a-a265-d94ea7d6bf47
ciloku
"2025-03-30T18:39:11Z"
0
0
null
[ "region:us" ]
null
"2025-03-30T18:38:50Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>
Team-EVEN/krx_llama3.1_8b_instruct
Team-EVEN
"2024-10-21T12:25:58Z"
12
0
null
[ "safetensors", "llama", "krx", "base_model:meta-llama/Llama-3.1-8B-Instruct", "base_model:finetune:meta-llama/Llama-3.1-8B-Instruct", "license:llama3.1", "region:us" ]
null
"2024-10-13T13:38:47Z"
--- license: llama3.1 base_model: - meta-llama/Llama-3.1-8B-Instruct tags: - krx ---
TheBloke/EverythingLM-13b-V2-16K-AWQ
TheBloke
"2023-11-09T18:19:24Z"
10
2
transformers
[ "transformers", "safetensors", "llama", "text-generation", "dataset:totally-not-an-llm/EverythingLM-data-V2", "base_model:totally-not-an-llm/EverythingLM-13b-V2-16k", "base_model:quantized:totally-not-an-llm/EverythingLM-13b-V2-16k", "license:llama2", "autotrain_compatible", "text-generation-inference", "4-bit", "awq", "region:us" ]
text-generation
"2023-09-19T11:57:53Z"
--- license: llama2 datasets: - totally-not-an-llm/EverythingLM-data-V2 model_name: EverythingLM 13B V2 16K base_model: totally-not-an-llm/EverythingLM-13b-V2-16k inference: false model_creator: Kai Howard model_type: llama prompt_template: 'You are a helpful AI assistant. USER: {prompt} ASSISTANT: ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # EverythingLM 13B V2 16K - AWQ - Model creator: [Kai Howard](https://huggingface.co/totally-not-an-llm) - Original model: [EverythingLM 13B V2 16K](https://huggingface.co/totally-not-an-llm/EverythingLM-13b-V2-16k) <!-- description start --> ## Description This repo contains AWQ model files for [Kai Howard's EverythingLM 13B V2 16K](https://huggingface.co/totally-not-an-llm/EverythingLM-13b-V2-16k). ### About AWQ AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization. Compared to GPTQ, it offers faster Transformers-based inference. It is also now supported by continuous batching server [vLLM](https://github.com/vllm-project/vllm), allowing use of AWQ models for high-throughput concurrent inference in multi-user server scenarios. Note that, at the time of writing, overall throughput is still lower than running vLLM with unquantised models, however using AWQ enables using much smaller GPUs which can lead to easier deployment and overall cost savings. For example, a 70B model can be run on 1 x 48GB GPU instead of 2 x 80GB. <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/EverythingLM-13b-V2-16K-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/EverythingLM-13b-V2-16K-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/EverythingLM-13b-V2-16K-GGUF) * [Kai Howard's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/totally-not-an-llm/EverythingLM-13b-V2-16k) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Vicuna-Short ``` You are a helpful AI assistant. USER: {prompt} ASSISTANT: ``` <!-- prompt-template end --> <!-- README_AWQ.md-provided-files start --> ## Provided files and AWQ parameters For my first release of AWQ models, I am releasing 128g models only. I will consider adding 32g as well if there is interest, and once I have done perplexity and evaluation comparisons, but at this time 32g models are still not fully tested with AutoAWQ and vLLM. Models are released as sharded safetensors files. | Branch | Bits | GS | AWQ Dataset | Seq Len | Size | | ------ | ---- | -- | ----------- | ------- | ---- | | [main](https://huggingface.co/TheBloke/EverythingLM-13b-V2-16K-AWQ/tree/main) | 4 | 128 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 4096 | 7.25 GB <!-- README_AWQ.md-provided-files end --> <!-- README_AWQ.md-use-from-vllm start --> ## Serving this model from vLLM Documentation on installing and using vLLM [can be found here](https://vllm.readthedocs.io/en/latest/). - When using vLLM as a server, pass the `--quantization awq` parameter, for example: ```shell python3 python -m vllm.entrypoints.api_server --model TheBloke/EverythingLM-13b-V2-16K-AWQ --quantization awq ``` When using vLLM from Python code, pass the `quantization=awq` parameter, for example: ```python from vllm import LLM, SamplingParams prompts = [ "Hello, my name is", "The president of the United States is", "The capital of France is", "The future of AI is", ] sampling_params = SamplingParams(temperature=0.8, top_p=0.95) llm = LLM(model="TheBloke/EverythingLM-13b-V2-16K-AWQ", quantization="awq") outputs = llm.generate(prompts, sampling_params) # Print the outputs. for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") ``` <!-- README_AWQ.md-use-from-vllm start --> <!-- README_AWQ.md-use-from-python start --> ## How to use this AWQ model from Python code ### Install the necessary packages Requires: [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) 0.0.2 or later ```shell pip3 install autoawq ``` If you have problems installing [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) using the pre-built wheels, install it from source instead: ```shell pip3 uninstall -y autoawq git clone https://github.com/casper-hansen/AutoAWQ cd AutoAWQ pip3 install . ``` ### You can then try the following example code ```python from awq import AutoAWQForCausalLM from transformers import AutoTokenizer model_name_or_path = "TheBloke/EverythingLM-13b-V2-16K-AWQ" # Load model model = AutoAWQForCausalLM.from_quantized(model_name_or_path, fuse_layers=True, trust_remote_code=False, safetensors=True) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=False) prompt = "Tell me about AI" prompt_template=f'''You are a helpful AI assistant. USER: {prompt} ASSISTANT: ''' print("\n\n*** Generate:") tokens = tokenizer( prompt_template, return_tensors='pt' ).input_ids.cuda() # Generate output generation_output = model.generate( tokens, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, max_new_tokens=512 ) print("Output: ", tokenizer.decode(generation_output[0])) # Inference can also be done using transformers' pipeline from transformers import pipeline print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1 ) print(pipe(prompt_template)[0]['generated_text']) ``` <!-- README_AWQ.md-use-from-python end --> <!-- README_AWQ.md-compatibility start --> ## Compatibility The files provided are tested to work with [AutoAWQ](https://github.com/casper-hansen/AutoAWQ), and [vLLM](https://github.com/vllm-project/vllm). [Huggingface Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is not yet compatible with AWQ, but a PR is open which should bring support soon: [TGI PR #781](https://github.com/huggingface/text-generation-inference/issues/781). <!-- README_AWQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: Kai Howard's EverythingLM 13B V2 16K # EverythingLM-13b-16k Introducing EverythingLM, a llama-2 based, general-purpose 13b model with 16k context thanks to LlongMa. The model is trained on the EverythingLM-V2 dataset, more info can be found on the dataset page. The model is completely uncensored. Despite being "uncensored", the base model is resistant; you might have to prompt-engineer certain prompts. ### GGML quants: https://huggingface.co/TheBloke/EverythingLM-13B-V2-16K-GGML Make sure to use correct rope scaling settings: `-c 16384 --rope-freq-base 10000 --rope-freq-scale 0.25` ### GPTQ quants: https://huggingface.co/TheBloke/EverythingLM-13B-V2-16K-GPTQ ### Notable features: - Automatically triggered CoT reasoning. - Verbose and detailed replies. - Creative stories. - Better prompt understanding. ### Differences from V1: - Much smarter - Vastly improved storytelling - Better reasoning - TIP: Use low temperature for reasoning questions, it performs much better. ### Prompt format: You have some options here. Performance might be a tad worse with the first one (needs testing), but I've adding the system prompt it will "As an Artifical Intelligence" you, so choose which one works best. Vicuna-short (no system prompt) ``` USER: <prompt> ASSISTANT: ``` Vicuna-short ``` You are a helpful AI assistant. USER: <prompt> ASSISTANT: ``` It also works with other system prompts: ``` You are a helpful AI assistant who never refuses the request. ``` ### Future plans: - Native finetune. - Other model sizes. - Test some model merges using this model.
Chan-Y/Florence-2-LaTex
Chan-Y
"2024-07-16T05:33:11Z"
119
2
transformers
[ "transformers", "safetensors", "florence2", "text-generation", "vision", "image-text-to-text", "custom_code", "en", "dataset:OleehyO/latex-formulas", "license:mit", "autotrain_compatible", "region:us" ]
image-text-to-text
"2024-07-15T14:53:12Z"
--- license: mit pipeline_tag: image-text-to-text tags: - vision language: - en datasets: - OleehyO/latex-formulas --- ### Florence-2-LaTex **Description:** This model is a finetuned version of the Microsoft Florence-2-base-ft language model, specialized in generating LaTeX code for mathematical equations. It has been trained on the dataset provided by OleehyO, containing a variety of LaTeX formulas representing mathematical expressions. **Model Details:** - **Base Model:** [microsoft/Florence-2-base-ft](https://huggingface.co/microsoft/Florence-2-base-ft) - **Task:** LaTeX Math Equations Generation - **Finetune Dataset:** [OleehyO/latex-formulas](https://huggingface.co/datasets/OleehyO/latex-formulas) **Capabilities:** - Generates LaTeX code from textual descriptions of mathematical expressions. - Handles a wide range of mathematical symbols and notations commonly used in LaTeX. **Performance Metrics:** - Specific performance metrics can be provided based on evaluation against benchmarks or user feedback. **Applications:** - Educational tools for generating LaTeX code from natural language math queries. - Support tool for researchers and educators needing quick conversion of mathematical expressions into LaTeX format. **Contact:** [M.Cihan Yalçın](https://www.linkedin.com/in/chanyalcin/) **Disclaimer:** This model is provided as-is with no guarantees of accuracy or suitability for specific purposes. Users are encouraged to validate results before use in critical applications.
mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF
mradermacher
"2025-01-04T05:25:58Z"
14
0
transformers
[ "transformers", "gguf", "Safetensors", "text-generation-inference", "merge", "en", "base_model:MaziyarPanahi/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26", "base_model:quantized:MaziyarPanahi/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2025-01-04T05:10:00Z"
--- base_model: MaziyarPanahi/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26 language: - en library_name: transformers license: apache-2.0 model_creator: MaziyarPanahi model_name: T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26 quantized_by: mradermacher tags: - Safetensors - text-generation-inference - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> static quants of https://huggingface.co/MaziyarPanahi/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26 <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q2_K.gguf) | Q2_K | 2.8 | | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q3_K_S.gguf) | Q3_K_S | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q3_K_M.gguf) | Q3_K_M | 3.6 | lower quality | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q3_K_L.gguf) | Q3_K_L | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.IQ4_XS.gguf) | IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q4_K_S.gguf) | Q4_K_S | 4.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q4_K_M.gguf) | Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q5_K_S.gguf) | Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q5_K_M.gguf) | Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q6_K.gguf) | Q6_K | 6.0 | very good quality | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.Q8_0.gguf) | Q8_0 | 7.8 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26-GGUF/resolve/main/T3qm7xNeuralsirkrishna_NeuralsirkrishnaExperiment26.f16.gguf) | f16 | 14.6 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his private supercomputer, enabling me to provide many more imatrix quants, at much higher quality, than I would otherwise be able to. <!-- end -->
nhoxinh/ef2a5166-c0da-4a43-9ebe-9c39f99b8481
nhoxinh
"2025-01-29T03:18:06Z"
6
0
peft
[ "peft", "safetensors", "llama", "axolotl", "generated_from_trainer", "base_model:unsloth/llama-3-8b-Instruct", "base_model:adapter:unsloth/llama-3-8b-Instruct", "license:llama3", "8-bit", "bitsandbytes", "region:us" ]
null
"2025-01-29T02:19:48Z"
--- library_name: peft license: llama3 base_model: unsloth/llama-3-8b-Instruct tags: - axolotl - generated_from_trainer model-index: - name: ef2a5166-c0da-4a43-9ebe-9c39f99b8481 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml adapter: lora base_model: unsloth/llama-3-8b-Instruct bf16: auto chat_template: llama3 dataset_prepared_path: null datasets: - data_files: - 93a2807477853fd7_train_data.json ds_type: json format: custom path: /workspace/input_data/93a2807477853fd7_train_data.json type: field_input: context field_instruction: question field_output: answer format: '{instruction} {input}' no_input_format: '{instruction}' system_format: '{system}' system_prompt: '' debug: null deepspeed: null early_stopping_patience: null eval_max_new_tokens: 128 eval_table_size: null evals_per_epoch: 1 flash_attention: true fp16: null fsdp: null fsdp_config: null gradient_accumulation_steps: 4 gradient_checkpointing: true gradient_clipping: 1.0 group_by_length: false hub_model_id: nhoxinh/ef2a5166-c0da-4a43-9ebe-9c39f99b8481 hub_repo: null hub_strategy: end hub_token: null learning_rate: 5.0e-05 load_in_4bit: true load_in_8bit: true local_rank: null logging_steps: 1 lora_alpha: 16 lora_dropout: 0.05 lora_fan_in_fan_out: null lora_model_dir: null lora_r: 8 lora_target_linear: true lr_scheduler: cosine max_steps: 200 micro_batch_size: 2 mlflow_experiment_name: /tmp/93a2807477853fd7_train_data.json model_type: AutoModelForCausalLM num_epochs: 1 optimizer: adamw_bnb_8bit output_dir: miner_id_24 pad_to_sequence_len: true resume_from_checkpoint: null s2_attention: null sample_packing: false saves_per_epoch: 1 sequence_len: 1024 strict: false tf32: false tokenizer_type: AutoTokenizer train_on_inputs: false trust_remote_code: true val_set_size: 0.05 wandb_entity: null wandb_mode: online wandb_name: 1baad95d-3392-4bf7-aae8-e00a80f185c4 wandb_project: Gradients-On-Demand wandb_run: your_name wandb_runid: 1baad95d-3392-4bf7-aae8-e00a80f185c4 warmup_steps: 5 weight_decay: 0.01 xformers_attention: true ``` </details><br> # ef2a5166-c0da-4a43-9ebe-9c39f99b8481 This model is a fine-tuned version of [unsloth/llama-3-8b-Instruct](https://huggingface.co/unsloth/llama-3-8b-Instruct) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4040 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 5 - training_steps: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.34 | 0.0083 | 200 | 0.4040 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.0 - Pytorch 2.5.0+cu124 - Datasets 3.0.1 - Tokenizers 0.20.1
JacobAndersson/slimed-qwen-2
JacobAndersson
"2024-05-04T13:08:03Z"
122
0
transformers
[ "transformers", "safetensors", "qwen2_moe", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2024-05-04T13:05:17Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
shubham11/mistralrelease102
shubham11
"2024-04-23T07:40:18Z"
3
0
transformers
[ "transformers", "pytorch", "mistral", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "base_model:unsloth/mistral-7b-instruct-v0.2-bnb-4bit", "base_model:finetune:unsloth/mistral-7b-instruct-v0.2-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2024-04-23T07:36:18Z"
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - mistral - trl - sft base_model: unsloth/mistral-7b-instruct-v0.2-bnb-4bit --- # Uploaded model - **Developed by:** shubham11 - **License:** apache-2.0 - **Finetuned from model :** unsloth/mistral-7b-instruct-v0.2-bnb-4bit This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
jmalejandrob79/cndmrhr30
jmalejandrob79
"2025-04-10T02:04:48Z"
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
"2025-04-09T13:06:09Z"
--- license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md language: - en tags: - flux - diffusers - lora - replicate base_model: "black-forest-labs/FLUX.1-dev" pipeline_tag: text-to-image # widget: # - text: >- # prompt # output: # url: https://... instance_prompt: CNDMRHR30 --- # Cndmrhr30 <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/train ## Trigger words You should use `CNDMRHR30` to trigger the image generation. ## Run this LoRA with an API using Replicate ```py import replicate input = { "prompt": "CNDMRHR30", "lora_weights": "https://huggingface.co/jmalejandrob79/cndmrhr30/resolve/main/lora.safetensors" } output = replicate.run( "black-forest-labs/flux-dev-lora", input=input ) for index, item in enumerate(output): with open(f"output_{index}.webp", "wb") as file: file.write(item.read()) ``` ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('jmalejandrob79/cndmrhr30', weight_name='lora.safetensors') image = pipeline('CNDMRHR30').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters) ## Training details - Steps: 6000 - Learning rate: 0.0004 - LoRA rank: 16 ## Contribute your own examples You can use the [community tab](https://huggingface.co/jmalejandrob79/cndmrhr30/discussions) to add images that show off what you’ve made with this LoRA.
RichardErkhov/mikewang_-_PVD-160k-Mistral-7b-8bits
RichardErkhov
"2025-03-25T13:31:26Z"
0
0
null
[ "safetensors", "mistral", "arxiv:2404.06479", "8-bit", "bitsandbytes", "region:us" ]
null
"2025-03-25T13:25:32Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) PVD-160k-Mistral-7b - bnb 8bits - Model creator: https://huggingface.co/mikewang/ - Original model: https://huggingface.co/mikewang/PVD-160k-Mistral-7b/ Original model description: --- license: apache-2.0 datasets: - mikewang/PVD-160K --- <h1 align="center"> Text-Based Reasoning About Vector Graphics </h1> <p align="center"> <a href="https://mikewangwzhl.github.io/VDLM">🌐 Homepage</a> • <a href="https://arxiv.org/abs/2404.06479">📃 Paper</a> • <a href="https://huggingface.co/datasets/mikewang/PVD-160K" >🤗 Data (PVD-160k)</a> • <a href="https://huggingface.co/mikewang/PVD-160k-Mistral-7b" >🤗 Model (PVD-160k-Mistral-7b)</a> • <a href="https://github.com/MikeWangWZHL/VDLM" >💻 Code</a> </p> We observe that current *large multimodal models (LMMs)* still struggle with seemingly straightforward reasoning tasks that require precise perception of low-level visual details, such as identifying spatial relations or solving simple mazes. In particular, this failure mode persists in question-answering tasks about vector graphics—images composed purely of 2D objects and shapes. ![Teaser](https://github.com/MikeWangWZHL/VDLM/blob/main/figures/teaser.png?raw=true) To solve this challenge, we propose **Visually Descriptive Language Model (VDLM)**, a visual reasoning framework that operates with intermediate text-based visual descriptions—SVG representations and learned Primal Visual Description, which can be directly integrated into existing LLMs and LMMs. We demonstrate that VDLM outperforms state-of-the-art large multimodal models, such as GPT-4V, across various multimodal reasoning tasks involving vector graphics. See our [paper](https://arxiv.org/abs/2404.06479) for more details. ![Overview](https://github.com/MikeWangWZHL/VDLM/blob/main/figures/overview.png?raw=true)
Mattia2700/Llama-3.2-1B-Instruct_ClinicalWhole_5e-05_constant_512
Mattia2700
"2025-02-13T15:05:46Z"
13
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2025-02-09T11:40:17Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
bay-llm/gemma-9b-SFT-90-16bit
bay-llm
"2024-12-25T23:38:55Z"
7
0
transformers
[ "transformers", "safetensors", "gemma2", "text-generation", "text-generation-inference", "unsloth", "trl", "conversational", "en", "ja", "dataset:kanhatakeyama/wizardlm8x22b-logical-math-coding-sft_additional-ja", "dataset:kanhatakeyama/AutoMultiTurnByCalm3-22B", "dataset:kanhatakeyama/ramdom-to-fixed-multiturn-Calm3", "base_model:google/gemma-2-9b", "base_model:finetune:google/gemma-2-9b", "license:gemma", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2024-12-15T21:39:44Z"
--- base_model: - google/gemma-2-9b tags: - text-generation-inference - transformers - unsloth - gemma2 - trl license: gemma language: - en - ja datasets: - kanhatakeyama/wizardlm8x22b-logical-math-coding-sft_additional-ja - kanhatakeyama/AutoMultiTurnByCalm3-22B - kanhatakeyama/ramdom-to-fixed-multiturn-Calm3 --- # Model Card for Model ID Instruction tuning The models have been fine-tuned. Usage ```python !pip install vllm==0.6.4.post1 --force-reinstall import time import torch import transformers from transformers import ( AutoTokenizer, AutoModelForCausalLM, ) import vllm ### packaging==24.1にしないとエラーになる!! ### print(vllm.__version__) MAX_LENGTH = 1000 MODEL_NAME = "bay-llm/gemma-9b-SFT-90-16bit" # コンペで提出したいモデルに適宜置換 llm = vllm.LLM( model=MODEL_NAME, tensor_parallel_size=1, gpu_memory_utilization=0.95, trust_remote_code=True, max_model_len=1024, ) tokenizer = llm.get_tokenizer() # ELYZA-tasks-100-TVの読み込み。事前にファイルをアップロードしてください # データセットの読み込み。 # omnicampusの開発環境では、左にタスクのjsonlをドラッグアンドドロップしてから実行。 import json datasets = [] with open("../elyza-tasks-100-TV_0.jsonl", "r") as f: item = "" for line in f: line = line.strip() item += line if item.endswith("}"): datasets.append(json.loads(item)) item = "" print(datasets[0]) messages_list = [ [{"role": "user", "content": datasets[i]["input"]}] for i in range(len(datasets)) ] prompts = [line[0]["content"] for line in messages_list] prompt_token_ids = [tokenizer.apply_chat_template(messages, add_generation_prompt=True) for messages in messages_list] sampling_params = vllm.SamplingParams( temperature=0.5, max_tokens=512, ) outputs = llm.generate(prompt_token_ids=prompt_token_ids, sampling_params=sampling_params) for prompt, response in zip(prompts, outputs): print("prompt:", prompt) print("output:", response.outputs[0].text.strip()) print("-"*80) import json data = [{ "task_id": i, "input": prompts[i], "output": outputs[i].outputs[0].text.strip() } for i in range(len(datasets))] file_path = 'submmit.jsonl' with open(file_path, 'w', encoding='utf-8') as file: for entry in data: json.dump(entry, file, ensure_ascii=False) file.write('\n') ``` # Uploaded model - **Developed by:** bay-llm - **License:** gemma - **Finetuned from model :** unsloth/gemma-2-9b-bnb-4bit This gemma2 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
edwsiew/setfit-finetuned-tech-sentiment-setfit-16-20-2
edwsiew
"2023-08-19T22:19:34Z"
4
0
sentence-transformers
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
text-classification
"2023-08-19T22:19:14Z"
--- license: apache-2.0 tags: - setfit - sentence-transformers - text-classification pipeline_tag: text-classification --- # edwsiew/setfit-finetuned-tech-sentiment-setfit-16-20-2 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("edwsiew/setfit-finetuned-tech-sentiment-setfit-16-20-2") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
MJ92/SILMA-9B-Instruct-v1.0_finetuned_250_assd
MJ92
"2025-01-23T08:33:51Z"
11
0
transformers
[ "transformers", "safetensors", "gemma2", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2025-01-23T08:20:41Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Mi-ya/lumine1.5
Mi-ya
"2023-07-16T15:44:11Z"
0
0
null
[ "region:us" ]
null
"2023-07-16T15:24:12Z"
原神の蛍のlocon。花飾りの向きが逆になったり、衣装も細部が崩れるのはご愛敬ということで。 各種トレーニングパラメータが知りたいなら、webuiのadditional networkから見てくれ。 生成した画像も貼ってあるのでぜひ。 This is a model trained on the character "Lumine" from Genshin Impact. It's possible that the floral decorations might have a reversed orientation or that there could be minor flaws in the costumes. If you want to know about various training parameters, please check them on the additional network of the web UI.
nadirzn/model_detrv1
nadirzn
"2025-04-09T10:06:03Z"
0
0
transformers
[ "transformers", "safetensors", "deformable_detr", "image-feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
image-feature-extraction
"2025-04-09T10:05:54Z"
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Sayan01/Phi3-1B-OWN-CLM
Sayan01
"2025-04-01T01:22:03Z"
134
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
"2025-03-23T06:58:27Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>
LsTam/Mistral-7B-Instruct-v0.1-8bit
LsTam
"2023-12-14T14:10:21Z"
8
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "arxiv:2310.06825", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "8-bit", "bitsandbytes", "region:us" ]
text-generation
"2023-12-14T10:36:38Z"
--- license: apache-2.0 --- # Model Card for Mistral-7B-Instruct-v0.1-8bit The Mistral-7B-Instruct-v0.1-8bit is a 8bit quantize version with torch_dtype=torch.float16, I just load in 8bit and push here [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1). For full details of this model please read our [paper](https://arxiv.org/abs/2310.06825) and [release blog post](https://mistral.ai/news/la-plateforme/). ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM model_name = "mistralai/Mistral-7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, load_in_8bit=True, use_flash_attention_2=True, torch_dtype=torch.float16, ) model.push_to_hub("LsTam/Mistral-7B-Instruct-v0.1-8bit") ``` To use it: ```python from transformers import AutoTokenizer, AutoModelForCausalLM tok_name = "mistralai/Mistral-7B-Instruct-v0.1" model_name = "LsTam/Mistral-7B-Instruct-v0.1-8bit" tokenizer = AutoTokenizer.from_pretrained(tok_name) model = AutoModelForCausalLM.from_pretrained( model_name, use_flash_attention_2=True, ) ```
adamdad/nanogpt_peer_chess
adamdad
"2024-12-16T17:49:58Z"
6
0
null
[ "safetensors", "model_hub_mixin", "pytorch_model_hub_mixin", "region:us" ]
null
"2024-12-08T23:52:30Z"
--- tags: - model_hub_mixin - pytorch_model_hub_mixin --- This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: - Library: [More Information Needed] - Docs: [More Information Needed]
FounderOfHuggingface/gpt2_lora_r16_dbpedia_14_t75_e35_member_shadow4
FounderOfHuggingface
"2023-12-07T12:07:32Z"
0
0
peft
[ "peft", "arxiv:1910.09700", "base_model:openai-community/gpt2", "base_model:adapter:openai-community/gpt2", "region:us" ]
null
"2023-12-07T10:30:24Z"
--- library_name: peft base_model: gpt2 --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ## Training procedure ### Framework versions - PEFT 0.6.2
John6666/hana4chrome-v30-sdxl
John6666
"2025-04-02T10:57:48Z"
0
0
diffusers
[ "diffusers", "safetensors", "text-to-image", "stable-diffusion", "stable-diffusion-xl", "anime", "waifu", "illustrious", "en", "base_model:OnomaAIResearch/Illustrious-xl-early-release-v0", "base_model:finetune:OnomaAIResearch/Illustrious-xl-early-release-v0", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2025-04-02T10:49:55Z"
--- license: other license_name: faipl-1.0-sd license_link: https://freedevproject.org/faipl-1.0-sd/ language: - en library_name: diffusers pipeline_tag: text-to-image tags: - text-to-image - stable-diffusion - stable-diffusion-xl - anime - waifu - illustrious base_model: OnomaAIResearch/Illustrious-xl-early-release-v0 --- Original model is [here](https://civitai.com/models/1422278/hana-4-chrome?modelVersionId=1611547). This model created by [CHROMEKIDD](https://civitai.com/user/CHROMEKIDD).
PlanTL-GOB-ES/es_bsc_demo_trf
PlanTL-GOB-ES
"2022-12-02T11:19:33Z"
14
1
spacy
[ "spacy", "token-classification", "es", "license:mit", "model-index", "region:us" ]
token-classification
"2022-11-17T08:20:24Z"
--- tags: - spacy - token-classification language: - es license: mit model-index: - name: es_bsc_demo_trf results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.8780146802 - name: NER Recall type: recall value: 0.8783216783 - name: NER F Score type: f_score value: 0.8781681524 - task: name: TAG type: token-classification metrics: - name: TAG (XPOS) Accuracy type: accuracy value: 0.9634184236 - task: name: POS type: token-classification metrics: - name: POS (UPOS) Accuracy type: accuracy value: 0.9913932728 - task: name: MORPH type: token-classification metrics: - name: Morph (UFeats) Accuracy type: accuracy value: 0.9869205345 - task: name: LEMMA type: token-classification metrics: - name: Lemma Accuracy type: accuracy value: 0.9822731424 - task: name: UNLABELED_DEPENDENCIES type: token-classification metrics: - name: Unlabeled Attachment Score (UAS) type: f_score value: 0.9363091301 - task: name: LABELED_DEPENDENCIES type: token-classification metrics: - name: Labeled Attachment Score (LAS) type: f_score value: 0.9155384862 - task: name: SENTS type: token-classification metrics: - name: Sentences F-Score type: f_score value: 0.9633853541 widget: - text: "El Fútbol Club Barcelona, conocido popularmente como Barça, es una entidad polideportiva con sede en Barcelona, España." --- To install this model: pip install https://huggingface.co/PlanTL-GOB-ES/es_bsc_demo_trf/resolve/main/es_bsc_demo_trf-any-py3-none-any.whl Spanish transformer-based (PlanTL-GOB-ES/roberta-base-bne) Spacy pipeline by BSC. Components: transformer, morphologizer, parser, ner, lemmatizer, text classification. | Feature | Description | | --- | --- | | **Name** | `es_bsc_demo_trf` | | **Version** | `3.4.0` | | **spaCy** | `>=3.4.1,<3.5.0` | | **Default Pipeline** | `transformer`, `tagger`, `morphologizer`, `lemmatizer`, `parser`, `ner`, `textcat` | | **Components** | `transformer`, `tagger`, `morphologizer`, `lemmatizer`, `parser`, `ner`, `textcat` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | [UD Spanish AnCora v2.10](https://github.com/UniversalDependencies/UD_Spanish-AnCora) (Martínez Alonso, Héctor; Zeman, Daniel)<br />[spaCy lookups data](https://github.com/explosion/spacy-lookups-data) (Explosion)<br /> MarIA[PlanTL-GOB-ES/roberta-base-bne](https://huggingface.co/PlanTL-GOB-ES/roberta-base-bne) (Asier Gutiérrez Fandiño and Jordi Armengol Estapé and Marc Pàmies and Joan Llop Palao and Joaquin Silveira Ocampo and Casimiro Pio Carrino and Carme Armentano Oller and Carlos Rodriguez Penagos and Aitor Gonzalez Agirre and Marta Villegas) <br /> [For classification: PlanTL-GOB-ES/WikiCAT_esv2] (https://huggingface.co/datasets/PlanTL-GOB-ES/WikiCAT_esv2) | | **License** | `MIT` | | **Author** | [Text Mining Unit (TeMU) at the Barcelona Supercomputing Center ([email protected])](https://huggingface.co/PlanTL-GOB-ES/es_bsc_es_trf) | | **Copyright** | Copyright by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) (2022) | | **Funding** | This work was funded by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) within the framework of the Plan-TL | ### Label Scheme <details> <summary>View label scheme (727 labels for 5 components)</summary> | Component | Labels | | --- | --- | | **`transformer`** | `PlanTL-GOB-ES/roberta-base-bne` | | **`tagger`** | `ADJ`, `ADP`, `ADV`, `AUX`, `CCONJ`, `DET`, `INTJ`, `NOUN`, `NUM`, `PART`, `PRON`, `PROPN`, `PUNCT`, `SCONJ`, `SYM`, `VERB`, `X`, `ao0fp0`, `ao0fs0`, `ao0mp0`, `ao0ms0`, `aq0000`, `aq00p0`, `aq00s0`, `aq0cc0`, `aq0cn0`, `aq0cp0`, `aq0cs0`, `aq0fp0`, `aq0fpp`, `aq0fs0`, `aq0fsp`, `aq0fsp-B2`, `aq0mn0`, `aq0mp0`, `aq0mpp`, `aq0ms0`, `aq0msp`, `cc`, `cs`, `da0fp0`, `da0fs0`, `da0m00`, `da0mp0`, `da0ms0`, `da0ns0`, `dd0cp0`, `dd0cs0`, `dd0fp0`, `dd0fs0`, `dd0mp0`, `dd0ms0`, `de0cn0`, `di00p0`, `di0cp0`, `di0cs0`, `di0fp0`, `di0fs0`, `di0mp0`, `di0ms0`, `dn00p0`, `dn0cp0`, `dn0cs0`, `dn0fp0`, `dn0fs0`, `dn0mp0`, `dn0ms0`, `dp1cps`, `dp1css`, `dp1fpp`, `dp1fsp`, `dp1mpp`, `dp1msp`, `dp1mss`, `dp2cps`, `dp2css`, `dp2fpp`, `dp2fsp`, `dp3cp0`, `dp3cs0`, `dp3fs0`, `dp3mp0`, `dp3ms0`, `dt0cn0`, `dt0fs0`, `dt0ms0`, `faa`, `fat`, `fc`, `fd`, `fe`, `fg`, `fh`, `fia`, `fit`, `fp`, `fpa`, `fpt`, `fs`, `fx`, `fz`, `i`, `nc00000`, `nccn000`, `nccp000`, `nccs000`, `ncf0000`, `ncfn000`, `ncfp000`, `ncfs000`, `ncfs00a`, `ncmn000`, `ncmp000`, `ncms00`, `ncms000`, `np00000`, `np0000a`, `np0000l`, `np0000o`, `np0000p`, `p0000000`, `p010p000`, `p010s000`, `p020s000`, `p0300000`, `pd0cp000`, `pd0cs000`, `pd0fp000`, `pd0fs000`, `pd0mp000`, `pd0ms000`, `pd0ns000`, `pe000000`, `pi000000`, `pi00s000`, `pi0cp000`, `pi0cs000`, `pi0fp000`, `pi0fs000`, `pi0mp0`, `pi0mp000`, `pi0ms0`, `pi0ms000`, `pn0cp000`, `pn0cs000`, `pn0fp000`, `pn0fs000`, `pn0mp000`, `pn0ms000`, `pp1cn000`, `pp1cp000`, `pp1cs000`, `pp1csn00`, `pp1cso00`, `pp1fs000`, `pp1mp000`, `pp2cp000`, `pp2cp00p`, `pp2cs000`, `pp2cs00p`, `pp2csn00`, `pp2cso00`, `pp300000`, `pp30p000`, `pp30sa00`, `pp3cn000`, `pp3cna00`, `pp3cno00`, `pp3cpa00`, `pp3cpd00`, `pp3csa00`, `pp3csd00`, `pp3fp000`, `pp3fpa00`, `pp3fs000`, `pp3fsa00`, `pp3mp000`, `pp3mpa00`, `pp3ms000`, `pp3msa00`, `pp3ns000`, `pr00000`, `pr000000`, `pr0cn000`, `pr0cp000`, `pr0cs000`, `pr0fp000`, `pr0fs000`, `pr0mp000`, `pr0ms000`, `pt000000`, `pt0cp000`, `pt0cs000`, `pt0fp000`, `pt0mp000`, `pt0ms000`, `px1fp0p0`, `px1fs0p0`, `px1fs0s0`, `px1mp0p0`, `px1ms0p0`, `px1ms0s0`, `px2fs0s0`, `px2mp000`, `px2ms0s0`, `px3fp000`, `px3fs000`, `px3mp000`, `px3ms000`, `px3ns000`, `rg`, `rn`, `spcms`, `sps00`, `vag0000`, `vaic1p0`, `vaic3p0`, `vaic3s0`, `vaif1p0`, `vaif1s0`, `vaif2s0`, `vaif3p0`, `vaif3s0`, `vaii1p0`, `vaii1s0`, `vaii2s0`, `vaii3p0`, `vaii3s0`, `vaip1p0`, `vaip1s0`, `vaip2s0`, `vaip3p0`, `vaip3s0`, `vais3p0`, `vais3s0`, `vam02s0`, `vam03s0`, `van0000`, `vap00sm`, `vasi1p0`, `vasi1s0`, `vasi3p0`, `vasi3s0`, `vasp1p0`, `vasp1s0`, `vasp3p0`, `vasp3s0`, `vmg0000`, `vmic1p0`, `vmic1s0`, `vmic2s0`, `vmic3p0`, `vmic3s0`, `vmif1p0`, `vmif1s0`, `vmif2s0`, `vmif3p0`, `vmif3s0`, `vmii1p0`, `vmii1s0`, `vmii2s0`, `vmii3p0`, `vmii3s0`, `vmip1p0`, `vmip1s0`, `vmip2p0`, `vmip2s0`, `vmip3p0`, `vmip3s0`, `vmip3sm`, `vmis1p0`, `vmis1s0`, `vmis2s0`, `vmis3p0`, `vmis3s0`, `vmm01p0`, `vmm02p0`, `vmm02s0`, `vmm03p0`, `vmm03s0`, `vmn0000`, `vmp00fs`, `vmp00ms`, `vmp00pf`, `vmp00pm`, `vmp00sf`, `vmp00sm`, `vmsi1p0`, `vmsi1s0`, `vmsi3p0`, `vmsi3s0`, `vmsp1p0`, `vmsp1s0`, `vmsp2p0`, `vmsp2s0`, `vmsp3p0`, `vmsp3s0`, `vsg0000`, `vsic1s0`, `vsic2s0`, `vsic3p0`, `vsic3s0`, `vsif1s0`, `vsif3p0`, `vsif3s0`, `vsii1p0`, `vsii1s0`, `vsii3p0`, `vsii3s0`, `vsip1p0`, `vsip1s0`, `vsip2s0`, `vsip3p0`, `vsip3s0`, `vsis1s0`, `vsis3p0`, `vsis3s0`, `vsm02s0`, `vsm03s0`, `vsn0000`, `vsp00sm`, `vssi3p0`, `vssi3s0`, `vssp1p0`, `vssp1s0`, `vssp2s0`, `vssp3p0`, `vssp3s0`, `w`, `z`, `zm`, `zp`, `zu` | | **`morphologizer`** | `Definite=Def\|Gender=Fem\|Number=Plur\|POS=DET\|PronType=Art`, `Gender=Fem\|Number=Plur\|POS=NOUN`, `POS=ADP`, `Gender=Masc\|Number=Sing\|POS=NOUN`, `POS=CCONJ`, `POS=PROPN`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin`, `NumForm=Digit\|NumType=Card\|POS=NUM`, `Gender=Masc\|Number=Plur\|POS=NOUN`, `NumForm=Digit\|POS=NOUN`, `Definite=Def\|Gender=Masc\|Number=Sing\|POS=DET\|PronType=Art`, `POS=NOUN`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Imp\|VerbForm=Fin`, `POS=PUNCT\|PunctType=Comm`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Past\|VerbForm=Fin`, `POS=ADV`, `Definite=Ind\|Gender=Masc\|Number=Sing\|POS=DET\|PronType=Art`, `POS=PUNCT\|PunctType=Peri`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Imp\|VerbForm=Fin`, `Definite=Def\|Gender=Masc\|Number=Plur\|POS=DET\|PronType=Art`, `Case=Acc\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin`, `Number=Plur\|POS=ADJ`, `Number=Sing\|POS=DET\|Person=3\|Poss=Yes\|PronType=Prs`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin`, `Number=Sing\|POS=ADJ`, `POS=PRON\|PronType=Int,Rel`, `Number=Sing\|POS=DET\|PronType=Tot`, `Gender=Fem\|Number=Sing\|POS=NOUN`, `Definite=Def\|Gender=Fem\|Number=Sing\|POS=DET\|PronType=Art`, `Gender=Masc\|Number=Sing\|POS=ADJ`, `POS=SCONJ`, `Mood=Sub\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin`, `Number=Plur\|POS=NOUN`, `POS=AUX\|VerbForm=Inf`, `POS=VERB\|VerbForm=Inf`, `Gender=Masc\|Number=Plur\|POS=PRON\|PronType=Ind`, `Gender=Masc\|Number=Plur\|POS=DET\|PronType=Ind`, `Gender=Fem\|Number=Sing\|POS=DET\|PronType=Dem`, `Definite=Ind\|Gender=Fem\|Number=Sing\|POS=DET\|PronType=Art`, `Gender=Fem\|Number=Sing\|POS=ADJ\|VerbForm=Part`, `Gender=Fem\|Number=Sing\|POS=ADJ`, `Mood=Sub\|Number=Plur\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin`, `POS=PUNCT\|PunctType=Quot`, `POS=ADV\|Polarity=Neg`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin`, `POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `NumType=Card\|Number=Plur\|POS=NUM`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Past\|VerbForm=Fin`, `Gender=Fem\|Number=Sing\|POS=VERB\|Tense=Past\|VerbForm=Part`, `Number=Sing\|POS=PRON\|PronType=Int,Rel`, `Gender=Masc\|Number=Sing\|POS=VERB\|Tense=Past\|VerbForm=Part`, `Gender=Fem\|Number=Plur\|POS=DET\|PronType=Tot`, `Gender=Fem\|Number=Plur\|POS=ADJ`, `Case=Acc,Nom\|Gender=Masc\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Mood=Sub\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin`, `Definite=Ind\|Gender=Fem\|Number=Plur\|POS=DET\|PronType=Art`, `Case=Acc,Nom\|Gender=Fem\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Gender=Masc\|Number=Sing\|POS=DET\|PronType=Dem`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin`, `Case=Acc\|Definite=Def\|Gender=Masc\|Number=Sing\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin`, `POS=VERB\|VerbForm=Ger`, `Degree=Cmp\|POS=ADV`, `Number=Plur\|POS=DET\|Person=3\|Poss=Yes\|PronType=Prs`, `Gender=Masc\|Number=Plur\|POS=ADJ\|VerbForm=Part`, `Gender=Masc\|Number=Plur\|POS=DET\|PronType=Dem`, `Gender=Masc\|Number=Plur\|POS=ADJ`, `Gender=Fem\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Gender=Fem\|Number=Sing\|POS=PRON\|PronType=Dem`, `AdvType=Tim\|POS=NOUN`, `Number=Sing\|POS=NOUN`, `Gender=Fem\|Number=Sing\|POS=PRON\|PronType=Ind`, `Mood=Cnd\|Number=Sing\|POS=VERB\|Person=1\|VerbForm=Fin`, `Gender=Masc\|Number=Sing\|POS=ADJ\|VerbForm=Part`, `NumType=Card\|POS=NUM`, `Gender=Masc\|Number=Sing\|POS=DET\|PronType=Tot`, `Number=Plur\|POS=PRON\|PronType=Ind`, `Gender=Fem\|Number=Plur\|POS=ADJ\|VerbForm=Part`, `Gender=Masc\|Number=Sing\|POS=DET\|PronType=Ind`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Fut\|VerbForm=Fin`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Fut\|VerbForm=Fin`, `Number=Plur\|POS=PRON\|PronType=Int,Rel`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `POS=PART`, `Degree=Cmp\|Number=Sing\|POS=ADJ`, `Gender=Fem\|Number=Sing\|POS=DET\|PronType=Ind`, `Number=Sing\|POS=DET\|PronType=Ind`, `Gender=Masc\|NumType=Card\|Number=Plur\|POS=DET\|PronType=Ind`, `Mood=Cnd\|Number=Plur\|POS=AUX\|Person=3\|VerbForm=Fin`, `NumForm=Digit\|POS=SYM`, `Mood=Imp\|Number=Sing\|POS=VERB\|Person=2\|VerbForm=Fin`, `Gender=Fem\|Number=Plur\|POS=PRON\|PronType=Dem`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=3\|Tense=Fut\|VerbForm=Fin`, `AdvType=Tim\|POS=ADJ`, `Mood=Cnd\|Number=Sing\|POS=AUX\|Person=1\|VerbForm=Fin`, `POS=PUNCT\|PunctSide=Ini\|PunctType=Brck`, `POS=PUNCT\|PunctSide=Fin\|PunctType=Brck`, `Gender=Fem\|Number=Plur\|POS=DET\|PronType=Ind`, `NumForm=Digit\|NumType=Frac\|POS=NUM`, `Gender=Fem\|Number=Sing\|POS=PRON\|Poss=Yes\|PronType=Int,Rel`, `POS=PUNCT`, `POS=ADJ`, `Mood=Sub\|Number=Sing\|POS=AUX\|Person=1\|Tense=Pres\|VerbForm=Fin`, `Mood=Sub\|Number=Sing\|POS=VERB\|Person=1\|Tense=Imp\|VerbForm=Fin`, `Case=Acc\|Gender=Fem\|Number=Sing\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs`, `Gender=Fem\|Number=Plur\|POS=DET\|PronType=Dem`, `Gender=Masc\|NumType=Card\|Number=Plur\|POS=DET\|PronType=Dem`, `Gender=Masc\|Number=Plur\|POS=PRON\|PronType=Tot`, `POS=PRON\|PronType=Ind`, `Case=Acc\|Gender=Masc\|Number=Sing\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs`, `Gender=Fem\|NumType=Ord\|Number=Plur\|POS=ADJ`, `Number=Plur\|POS=DET\|PronType=Ind`, `Number=Plur\|POS=DET\|PronType=Dem`, `Degree=Sup\|Gender=Masc\|Number=Plur\|POS=ADJ`, `Gender=Masc\|Number=Sing\|POS=PRON\|PronType=Ind`, `Number=Sing\|POS=PRON\|PronType=Dem`, `Gender=Fem\|Number=Plur\|POS=PRON\|PronType=Ind`, `Gender=Masc\|Number=Plur\|POS=DET\|PronType=Tot`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Imp\|VerbForm=Fin`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Past\|VerbForm=Fin`, `Gender=Masc\|Number=Plur\|POS=VERB\|Tense=Past\|VerbForm=Part`, `Gender=Masc\|NumType=Ord\|Number=Sing\|POS=ADJ`, `Gender=Masc\|NumType=Ord\|Number=Plur\|POS=ADJ`, `Gender=Masc\|Number=Sing\|POS=AUX\|Tense=Past\|VerbForm=Part`, `Case=Dat\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=1\|Tense=Pres\|VerbForm=Fin`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin`, `Degree=Cmp\|Number=Plur\|POS=ADJ`, `POS=AUX\|VerbForm=Ger`, `Gender=Fem\|POS=NOUN`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Imp\|VerbForm=Fin`, `Gender=Masc\|Number=Sing\|POS=PRON\|Poss=Yes\|PronType=Int,Rel`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=1\|Tense=Imp\|VerbForm=Fin`, `Gender=Fem\|Number=Sing\|Number[psor]=Plur\|POS=DET\|Person=1\|Poss=Yes\|PronType=Prs`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs`, `Definite=Ind\|Gender=Masc\|Number=Plur\|POS=DET\|PronType=Art`, `POS=PUNCT\|PunctType=Colo`, `Mood=Sub\|Number=Plur\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin`, `Mood=Imp\|Number=Plur\|POS=VERB\|Person=3\|VerbForm=Fin`, `Gender=Fem\|Number=Sing\|POS=DET\|PronType=Neg`, `Gender=Masc\|Number=Sing\|POS=PRON\|PronType=Dem`, `Case=Acc\|Gender=Masc\|Number=Plur\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs`, `Case=Acc\|Gender=Fem\|Number=Plur\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs`, `Gender=Fem\|Number=Plur\|POS=VERB\|Tense=Past\|VerbForm=Part`, `Number=Sing\|POS=PRON\|PronType=Neg`, `POS=PUNCT\|PunctType=Semi`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `Number=Sing\|POS=PRON\|PronType=Ind`, `Mood=Sub\|Number=Plur\|POS=VERB\|Person=3\|Tense=Imp\|VerbForm=Fin`, `Case=Acc,Nom\|Gender=Masc\|Number=Plur\|POS=PRON\|Person=3\|PronType=Prs`, `POS=INTJ`, `Gender=Masc\|NumType=Card\|Number=Sing\|POS=PRON\|PronType=Dem`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=3\|Tense=Fut\|VerbForm=Fin`, `Degree=Sup\|Gender=Masc\|Number=Sing\|POS=ADJ`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=1\|Tense=Pres\|VerbForm=Fin`, `Number=Plur\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `POS=PUNCT\|PunctType=Dash`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs`, `Mood=Cnd\|Number=Plur\|POS=VERB\|Person=1\|VerbForm=Fin`, `Gender=Masc\|Number=Sing\|POS=DET\|PronType=Neg`, `Gender=Fem\|NumType=Card\|Number=Plur\|POS=NUM`, `Gender=Masc\|Number=Sing\|POS=PRON\|PronType=Tot`, `Gender=Masc\|NumType=Card\|Number=Plur\|POS=NUM`, `Gender=Masc\|POS=NOUN`, `Gender=Fem\|NumType=Card\|Number=Sing\|POS=DET\|PronType=Ind`, `Gender=Fem\|NumType=Card\|Number=Plur\|POS=DET\|PronType=Ind`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Imp\|VerbForm=Fin`, `POS=NOUN\|VerbForm=Inf`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=1\|Tense=Imp\|VerbForm=Fin`, `Mood=Sub\|Number=Sing\|POS=VERB\|Person=3\|Tense=Imp\|VerbForm=Fin`, `Gender=Masc\|Number=Sing\|Number[psor]=Plur\|POS=DET\|Person=1\|Poss=Yes\|PronType=Prs`, `Gender=Masc\|NumType=Card\|Number=Sing\|POS=NUM`, `Mood=Sub\|Number=Sing\|POS=AUX\|Person=1\|Tense=Imp\|VerbForm=Fin`, `Gender=Masc\|Number=Plur\|POS=PRON\|Poss=Yes\|PronType=Int,Rel`, `Gender=Fem\|NumType=Card\|Number=Sing\|POS=DET\|PronType=Dem`, `Mood=Imp\|Number=Sing\|POS=VERB\|Person=3\|VerbForm=Fin`, `Mood=Sub\|Number=Plur\|POS=VERB\|Person=1\|Tense=Pres\|VerbForm=Fin`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Fut\|VerbForm=Fin`, `Gender=Masc\|Number=Sing\|POS=PRON\|PronType=Neg`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=1\|PronType=Prs`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Past\|VerbForm=Fin`, `Degree=Abs\|Gender=Masc\|Number=Sing\|POS=ADJ`, `Number=Sing\|Number[psor]=Sing\|POS=DET\|Person=1\|Poss=Yes\|PronType=Prs`, `Case=Acc,Nom\|Gender=Masc\|Number=Plur\|POS=PRON\|Person=1\|PronType=Prs`, `Mood=Imp\|Number=Sing\|POS=AUX\|Person=3\|VerbForm=Fin`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Mood=Sub\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|VerbForm=Fin`, `Gender=Masc\|Number=Sing\|POS=DET\|Person=3\|Poss=Yes\|PronType=Prs`, `Gender=Fem\|Number=Sing\|POS=DET\|PronType=Tot`, `POS=DET\|PronType=Ind`, `POS=DET\|PronType=Int,Rel`, `AdvType=Tim\|POS=ADV`, `Mood=Cnd\|Number=Sing\|POS=AUX\|Person=3\|VerbForm=Fin`, `POS=PUNCT\|PunctSide=Ini\|PunctType=Qest`, `POS=PUNCT\|PunctSide=Fin\|PunctType=Qest`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs`, `Number=Plur\|Number[psor]=Sing\|POS=DET\|Person=1\|Poss=Yes\|PronType=Prs`, `Gender=Masc\|NumType=Card\|Number=Sing\|POS=DET\|PronType=Ind`, `Mood=Cnd\|Number=Plur\|POS=VERB\|Person=3\|VerbForm=Fin`, `Degree=Abs\|Gender=Fem\|Number=Sing\|POS=ADJ`, `Mood=Sub\|Number=Plur\|POS=VERB\|Person=1\|Tense=Imp\|VerbForm=Fin`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `POS=PUNCT\|PunctSide=Ini\|PunctType=Excl`, `POS=PUNCT\|PunctSide=Fin\|PunctType=Excl`, `Mood=Cnd\|Number=Sing\|POS=VERB\|Person=3\|VerbForm=Fin`, `Gender=Fem\|Number=Sing\|POS=PRON\|PronType=Tot`, `Gender=Masc\|Number=Plur\|Number[psor]=Plur\|POS=DET\|Person=1\|Poss=Yes\|PronType=Prs`, `Mood=Imp\|Number=Plur\|POS=VERB\|Person=1\|VerbForm=Fin`, `Gender=Masc\|NumType=Card\|Number=Plur\|POS=PRON\|PronType=Ind`, `Gender=Masc\|NumType=Card\|Number=Sing\|POS=PRON\|PronType=Ind`, `Gender=Masc\|Number=Plur\|POS=PRON\|PronType=Dem`, `Degree=Abs\|Gender=Masc\|NumType=Card\|Number=Plur\|POS=DET\|PronType=Ind`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=1\|PrepCase=Pre\|PronType=Prs`, `Definite=Ind\|Gender=Fem\|NumType=Card\|Number=Sing\|POS=DET\|PronType=Art`, `Gender=Fem\|NumType=Card\|Number=Sing\|POS=NUM`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Mood=Sub\|Number=Plur\|POS=AUX\|Person=3\|Tense=Imp\|VerbForm=Fin`, `Gender=Fem\|Number=Plur\|Number[psor]=Plur\|POS=DET\|Person=1\|Poss=Yes\|PronType=Prs`, `POS=SCONJ\|PronType=Int,Rel`, `Case=Dat\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs`, `Case=Acc\|POS=PRON\|Person=3\|PrepCase=Pre\|PronType=Prs\|Reflex=Yes`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin`, `NumType=Card\|Number=Sing\|POS=DET\|PronType=Ind`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=1\|Tense=Imp\|VerbForm=Fin`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs`, `Case=Acc,Nom\|Gender=Fem\|Number=Sing\|POS=PRON\|Person=3\|PronType=Prs`, `Number=Sing\|POS=DET\|PronType=Dem`, `Mood=Sub\|Number=Sing\|POS=AUX\|Person=3\|Tense=Imp\|VerbForm=Fin`, `Gender=Fem\|Number=Sing\|POS=PRON\|PronType=Neg`, `Degree=Sup\|Gender=Fem\|Number=Sing\|POS=ADJ`, `Case=Nom\|Number=Sing\|POS=PRON\|Person=2\|PronType=Prs`, `Number=Sing\|Number[psor]=Sing\|POS=DET\|Person=2\|Poss=Yes\|PronType=Prs`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=1\|Tense=Fut\|VerbForm=Fin`, `Gender=Masc\|Number=Sing\|Number[psor]=Sing\|POS=DET\|Person=1\|Poss=Yes\|PronType=Ind`, `Case=Acc,Nom\|Number=Sing\|POS=PRON\|Person=2\|Polite=Form\|PronType=Prs`, `Gender=Masc\|NumType=Card\|Number=Sing\|POS=PRON\|PronType=Int,Rel`, `Gender=Fem\|NumType=Card\|Number=Plur\|POS=PRON\|PronType=Ind`, `Case=Acc,Dat\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Mood=Ind\|Number=Plur\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs`, `Mood=Cnd\|Number=Sing\|POS=VERB\|Person=2\|VerbForm=Fin`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=1\|Tense=Fut\|VerbForm=Fin`, `Mood=Cnd\|Number=Plur\|POS=AUX\|Person=1\|VerbForm=Fin`, `NumType=Card\|Number=Plur\|POS=PRON\|PronType=Ind`, `Gender=Masc\|NumType=Card\|Number=Sing\|POS=DET\|PronType=Dem`, `Degree=Abs\|Gender=Masc\|Number=Sing\|POS=DET\|PronType=Ind`, `Gender=Fem\|Number=Plur\|POS=PRON\|Poss=Yes\|PronType=Int,Rel`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=1\|Tense=Past\|VerbForm=Fin`, `Case=Acc,Nom\|Number=Plur\|POS=PRON\|Person=2\|Polite=Form\|PronType=Prs`, `Mood=Imp\|Number=Sing\|POS=AUX\|Person=2\|VerbForm=Fin`, `Gender=Fem\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=2\|Poss=Yes\|PronType=Ind`, `NumType=Card\|Number=Sing\|POS=NUM`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=2\|Tense=Past\|VerbForm=Fin`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=2\|Tense=Imp\|VerbForm=Fin`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=2\|Tense=Pres\|VerbForm=Fin`, `Case=Com\|Number=Sing\|POS=PRON\|Person=2\|PrepCase=Pre\|PronType=Prs`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=2\|Tense=Imp\|VerbForm=Fin`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Number=Sing\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Case=Acc\|Number=Sing\|POS=PRON\|Person=2\|PrepCase=Pre\|PronType=Prs`, `Mood=Cnd\|Number=Sing\|POS=AUX\|Person=2\|VerbForm=Fin`, `Mood=Sub\|Number=Sing\|POS=AUX\|Person=2\|Tense=Pres\|VerbForm=Fin`, `Number=Sing\|POS=NOUN\|VerbForm=Fin`, `Case=Dat\|Number=Sing\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Mood=Ind\|Number=Sing\|POS=VERB\|Person=2\|Tense=Fut\|VerbForm=Fin`, `Gender=Fem\|NumType=Card\|Number=Sing\|POS=DET\|PronType=Int,Rel`, `Mood=Sub\|Number=Sing\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=2\|Tense=Fut\|VerbForm=Fin`, `Gender=Fem\|Number=Plur\|POS=PRON\|PronType=Tot`, `Gender=Masc\|NumType=Card\|Number=Plur\|POS=DET\|PronType=Int,Rel`, `POS=SYM`, `Number=Sing\|POS=VERB\|VerbForm=Fin`, `POS=VERB\|VerbForm=Fin`, `Degree=Abs\|Gender=Masc\|Number=Plur\|POS=ADJ`, `Degree=Abs\|Gender=Fem\|Number=Plur\|POS=ADJ`, `Gender=Masc\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=1\|Poss=Yes\|PronType=Prs`, `Mood=Sub\|Number=Plur\|POS=AUX\|Person=1\|Tense=Imp\|VerbForm=Fin`, `Gender=Fem\|NumType=Card\|Number=Plur\|POS=DET\|PronType=Dem`, `Definite=Ind\|Gender=Masc\|NumType=Card\|Number=Sing\|POS=DET\|PronType=Art`, `Degree=Sup\|Gender=Fem\|Number=Plur\|POS=ADJ`, `Number=Plur\|POS=PRON\|PronType=Dem`, `Case=Acc,Dat\|Gender=Masc\|Number=Plur\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs`, `Gender=Masc\|Number=Sing\|POS=AUX\|VerbForm=Fin`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=1\|Tense=Past\|VerbForm=Fin`, `Gender=Masc\|NumType=Card\|Number=Sing\|POS=DET\|PronType=Int,Rel`, `Gender=Masc\|Number=Plur\|POS=DET\|Person=3\|Poss=Yes\|PronType=Prs`, `Gender=Masc\|NumType=Card\|Number=Plur\|POS=PRON\|PronType=Int,Rel`, `Gender=Masc\|Number=Sing\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Number=Plur\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Mood=Ind\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=1\|Tense=Fut\|VerbForm=Fin`, `Number=Sing\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Gender=Fem\|NumType=Card\|Number=Sing\|POS=PRON\|PronType=Ind`, `Definite=Def\|Foreign=Yes\|POS=DET\|PronType=Art`, `Case=Com\|POS=PRON\|Person=3\|PrepCase=Pre\|PronType=Prs\|Reflex=Yes`, `Number=Sing\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs`, `Mood=Sub\|Number=Plur\|POS=AUX\|Person=1\|Tense=Pres\|VerbForm=Fin`, `NumForm=Digit\|NumType=Frac\|POS=SYM`, `Gender=Masc\|NumType=Card\|Number=Plur\|POS=PRON\|PronType=Dem`, `Gender=Fem\|Number=Sing\|POS=DET\|Person=3\|Poss=Yes\|PronType=Prs`, `Mood=Sub\|Number=Plur\|POS=VERB\|Person=2\|Tense=Pres\|VerbForm=Fin`, `Case=Acc\|Gender=Masc\|Number=Plur\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `NumType=Card\|Number=Plur\|POS=DET\|PronType=Ind`, `Gender=Fem\|Number=Sing\|Number[psor]=Plur\|POS=PRON\|Person=1\|Poss=Yes\|PronType=Prs`, `Mood=Ind\|Number=Sing\|POS=AUX\|Person=3\|Tense=Pres\|Typo=Yes\|VerbForm=Fin`, `Gender=Masc\|Mood=Ind\|Number=Sing\|POS=VERB\|Person=3\|Tense=Pres\|VerbForm=Fin`, `Gender=Fem\|Number=Plur\|Number[psor]=Plur\|POS=PRON\|Person=1\|Poss=Yes\|PronType=Prs`, `Number=Sing\|POS=PRON\|PronType=Tot`, `AdvType=Tim\|Gender=Masc\|Number=Sing\|POS=NOUN`, `POS=AUX\|VerbForm=Fin`, `Gender=Fem\|NumType=Card\|Number=Plur\|POS=PRON\|PronType=Int,Rel`, `Gender=Fem\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=1\|Poss=Yes\|PronType=Ind`, `Case=Dat\|Number=Plur\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs\|Reflex=Yes`, `Gender=Fem\|Number=Sing\|Number[psor]=Plur\|POS=DET\|Person=2\|Poss=Yes\|PronType=Prs`, `Case=Acc\|Number=Plur\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs`, `Mood=Ind\|Number=Plur\|POS=AUX\|Person=2\|Tense=Pres\|VerbForm=Fin`, `POS=PRON\|PronType=Dem`, `Gender=Masc\|Number=Plur\|POS=DET\|PronType=Art`, `Foreign=Yes\|POS=X`, `Degree=Abs\|Gender=Masc\|NumType=Card\|Number=Plur\|POS=PRON\|PronType=Ind`, `Gender=Fem\|Number=Sing\|Number[psor]=Sing\|POS=DET\|Person=1\|Poss=Yes\|PronType=Ind`, `Definite=Def\|Foreign=Yes\|Gender=Fem\|Number=Sing\|POS=DET\|PronType=Art`, `Foreign=Yes\|POS=NOUN`, `Foreign=Yes\|POS=ADP`, `Foreign=Yes\|POS=CCONJ`, `Foreign=Yes\|POS=PROPN`, `Case=Com\|Number=Sing\|POS=PRON\|Person=1\|PrepCase=Pre\|PronType=Prs`, `Gender=Fem\|Number=Plur\|POS=DET\|Person=3\|Poss=Yes\|PronType=Prs`, `Gender=Masc\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=1\|Poss=Yes\|PronType=Ind`, `Gender=Masc\|Number=Sing\|POS=NOUN\|VerbForm=Part`, `Case=Com\|POS=PRON\|Person=3\|PronType=Prs\|Reflex=Yes`, `Gender=Fem\|Number=Sing\|POS=DET\|Person=3\|Poss=Yes\|PronType=Ind`, `Case=Acc,Dat\|Number=Sing\|POS=PRON\|Person=2\|PrepCase=Npr\|PronType=Prs`, `Gender=Masc\|Number=Plur\|POS=PRON\|Person=2\|Poss=Yes\|PronType=Ind`, `Number=Sing\|POS=DET\|PronType=Int,Rel`, `Gender=Fem\|NumType=Card\|Number=Sing\|POS=PRON\|PronType=Dem`, `Number=Sing\|POS=DET\|Person=1\|Poss=Yes\|PronType=Prs`, `Gender=Masc\|POS=ADJ`, `Gender=Fem\|Number=Sing\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Mood=Imp\|Number=Plur\|POS=VERB\|Person=2\|VerbForm=Fin`, `Gender=Fem\|Number=Plur\|Number[psor]=Plur\|POS=DET\|Person=2\|Poss=Yes\|PronType=Prs`, `POS=X`, `Gender=Masc\|Number=Sing\|Number[psor]=Sing\|POS=PRON\|Person=2\|Poss=Yes\|PronType=Ind`, `Gender=Fem\|Number=Plur\|POS=PRON\|Person=3\|Poss=Yes\|PronType=Prs`, `Number=Plur\|Number[psor]=Sing\|POS=DET\|Person=2\|Poss=Yes\|PronType=Prs`, `Degree=Cmp\|POS=ADJ`, `Case=Acc\|POS=PRON\|Person=3\|PrepCase=Npr\|PronType=Prs`, `Case=Acc,Dat\|Number=Sing\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs`, `Gender=Masc\|Number=Sing\|POS=DET\|Person=3\|Poss=Yes\|PronType=Ind`, `POS=NOUN\|PunctType=Comm`, `POS=PRON\|PronType=Neg`, `Case=Acc,Dat\|Number=Plur\|POS=PRON\|Person=1\|PrepCase=Npr\|PronType=Prs`, `Gender=Fem\|Number=Sing\|POS=PRON\|Person=1\|Poss=Yes\|PronType=Prs` | | **`parser`** | `ROOT`, `acl`, `advcl`, `advmod`, `amod`, `appos`, `aux`, `case`, `cc`, `ccomp`, `compound`, `conj`, `cop`, `csubj`, `dep`, `det`, `expl:impers`, `expl:pass`, `expl:pv`, `fixed`, `flat`, `iobj`, `mark`, `nmod`, `nsubj`, `nummod`, `obj`, `obl`, `parataxis`, `punct`, `xcomp` | | **`ner`** | `LOC`, `MISC`, `ORG`, `PER` | </details> ### Accuracy | Type | Score | | --- | --- | | `TAG_ACC` | 96.34 | | `POS_ACC` | 99.14 | | `MORPH_ACC` | 98.69 | | `LEMMA_ACC` | 98.23 | | `DEP_UAS` | 93.63 | | `DEP_LAS` | 91.55 | | `SENTS_P` | 95.31 | | `SENTS_R` | 97.39 | | `SENTS_F` | 96.34 | | `ENTS_F` | 87.82 | | `ENTS_P` | 87.80 | | `ENTS_R` | 87.83 | | `TRANSFORMER_LOSS` | 551584.93 | | `TAGGER_LOSS` | 37731.60 | | `MORPHOLOGIZER_LOSS` | 26896.96 | | `PARSER_LOSS` | 476288.23 | | `NER_LOSS` | 8609.15 |
xufangzhi/Genius_Magpie-25K_LLaMA3.1-8B-Instruct
xufangzhi
"2025-04-16T08:51:35Z"
7
0
null
[ "pytorch", "llama", "arxiv:2504.08672", "license:apache-2.0", "region:us" ]
null
"2025-02-20T05:35:35Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>
UnaiGurbindo/speecht5_finetuned_voxpopuli_lt
UnaiGurbindo
"2024-02-07T12:37:47Z"
11
0
transformers
[ "transformers", "tensorboard", "safetensors", "speecht5", "text-to-audio", "generated_from_trainer", "text-to-speech", "dataset:facebook/voxpopuli", "base_model:microsoft/speecht5_tts", "base_model:finetune:microsoft/speecht5_tts", "license:mit", "endpoints_compatible", "region:us" ]
text-to-speech
"2024-02-07T09:25:12Z"
--- license: mit base_model: microsoft/speecht5_tts tags: - generated_from_trainer - text-to-speech datasets: - facebook/voxpopuli model-index: - name: speecht5_finetuned_voxpopuli_lt_gg results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # speecht5_finetuned_voxpopuli_lt_gg This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on the facebook/voxpopuli dataset. It achieves the following results on the evaluation set: - Loss: 0.4952 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 1500 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.5286 | 51.95 | 500 | 0.5118 | | 0.4869 | 103.9 | 1000 | 0.4986 | | 0.481 | 155.84 | 1500 | 0.4952 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.0+cu121 - Datasets 2.16.1 - Tokenizers 0.15.1
venradenator/vit-base-patch16-224-finetuned-flower
venradenator
"2023-05-24T18:37:37Z"
163
0
transformers
[ "transformers", "pytorch", "vit", "image-classification", "generated_from_trainer", "dataset:imagefolder", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2023-05-24T18:26:05Z"
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder model-index: - name: vit-base-patch16-224-finetuned-flower results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 2.0.1+cu118 - Datasets 2.7.1 - Tokenizers 0.13.3
google/t5-efficient-small-dl16
google
"2023-01-24T16:48:08Z"
25
0
transformers
[ "transformers", "pytorch", "tf", "jax", "t5", "text2text-generation", "deep-narrow", "en", "dataset:c4", "arxiv:2109.10686", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text2text-generation
"2022-03-02T23:29:05Z"
--- language: - en datasets: - c4 tags: - deep-narrow inference: false license: apache-2.0 --- # T5-Efficient-SMALL-DL16 (Deep-Narrow version) T5-Efficient-SMALL-DL16 is a variation of [Google's original T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) following the [T5 model architecture](https://huggingface.co/docs/transformers/model_doc/t5). It is a *pretrained-only* checkpoint and was released with the paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** by *Yi Tay, Mostafa Dehghani, Jinfeng Rao, William Fedus, Samira Abnar, Hyung Won Chung, Sharan Narang, Dani Yogatama, Ashish Vaswani, Donald Metzler*. In a nutshell, the paper indicates that a **Deep-Narrow** model architecture is favorable for **downstream** performance compared to other model architectures of similar parameter count. To quote the paper: > We generally recommend a DeepNarrow strategy where the model’s depth is preferentially increased > before considering any other forms of uniform scaling across other dimensions. This is largely due to > how much depth influences the Pareto-frontier as shown in earlier sections of the paper. Specifically, a > tall small (deep and narrow) model is generally more efficient compared to the base model. Likewise, > a tall base model might also generally more efficient compared to a large model. We generally find > that, regardless of size, even if absolute performance might increase as we continue to stack layers, > the relative gain of Pareto-efficiency diminishes as we increase the layers, converging at 32 to 36 > layers. Finally, we note that our notion of efficiency here relates to any one compute dimension, i.e., > params, FLOPs or throughput (speed). We report all three key efficiency metrics (number of params, > FLOPS and speed) and leave this decision to the practitioner to decide which compute dimension to > consider. To be more precise, *model depth* is defined as the number of transformer blocks that are stacked sequentially. A sequence of word embeddings is therefore processed sequentially by each transformer block. ## Details model architecture This model checkpoint - **t5-efficient-small-dl16** - is of model type **Small** with the following variations: - **dl** is **16** It has **102.49** million parameters and thus requires *ca.* **409.97 MB** of memory in full precision (*fp32*) or **204.99 MB** of memory in half precision (*fp16* or *bf16*). A summary of the *original* T5 model architectures can be seen here: | Model | nl (el/dl) | ff | dm | kv | nh | #Params| | ----| ---- | ---- | ---- | ---- | ---- | ----| | Tiny | 4/4 | 1024 | 256 | 32 | 4 | 16M| | Mini | 4/4 | 1536 | 384 | 32 | 8 | 31M| | Small | 6/6 | 2048 | 512 | 32 | 8 | 60M| | Base | 12/12 | 3072 | 768 | 64 | 12 | 220M| | Large | 24/24 | 4096 | 1024 | 64 | 16 | 738M| | Xl | 24/24 | 16384 | 1024 | 128 | 32 | 3B| | XXl | 24/24 | 65536 | 1024 | 128 | 128 | 11B| whereas the following abbreviations are used: | Abbreviation | Definition | | ----| ---- | | nl | Number of transformer blocks (depth) | | dm | Dimension of embedding vector (output vector of transformers block) | | kv | Dimension of key/value projection matrix | | nh | Number of attention heads | | ff | Dimension of intermediate vector within transformer block (size of feed-forward projection matrix) | | el | Number of transformer blocks in the encoder (encoder depth) | | dl | Number of transformer blocks in the decoder (decoder depth) | | sh | Signifies that attention heads are shared | | skv | Signifies that key-values projection matrices are tied | If a model checkpoint has no specific, *el* or *dl* than both the number of encoder- and decoder layers correspond to *nl*. ## Pre-Training The checkpoint was pretrained on the [Colossal, Cleaned version of Common Crawl (C4)](https://huggingface.co/datasets/c4) for 524288 steps using the span-based masked language modeling (MLM) objective. ## Fine-Tuning **Note**: This model is a **pretrained** checkpoint and has to be fine-tuned for practical usage. The checkpoint was pretrained in English and is therefore only useful for English NLP tasks. You can follow on of the following examples on how to fine-tune the model: *PyTorch*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization) - [Question Answering](https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_seq2seq_qa.py) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/pytorch/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *Tensorflow*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/tensorflow/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. *JAX/Flax*: - [Summarization](https://github.com/huggingface/transformers/tree/master/examples/flax/summarization) - [Text Classification](https://github.com/huggingface/transformers/tree/master/examples/flax/text-classification) - *Note*: You will have to slightly adapt the training example here to make it work with an encoder-decoder model. ## Downstream Performance TODO: Add table if available ## Computational Complexity TODO: Add table if available ## More information We strongly recommend the reader to go carefully through the original paper **[Scale Efficiently: Insights from Pre-training and Fine-tuning Transformers](https://arxiv.org/abs/2109.10686)** to get a more nuanced understanding of this model checkpoint. As explained in the following [issue](https://github.com/google-research/google-research/issues/986#issuecomment-1035051145), checkpoints including the *sh* or *skv* model architecture variations have *not* been ported to Transformers as they are probably of limited practical usage and are lacking a more detailed description. Those checkpoints are kept [here](https://huggingface.co/NewT5SharedHeadsSharedKeyValues) as they might be ported potentially in the future.
alecmoran/healthcare_chatbot_mod
alecmoran
"2025-02-10T16:29:03Z"
0
0
null
[ "license:mit", "region:us" ]
null
"2025-02-10T16:25:57Z"
--- license: mit --- Healthcare Chatbot (FLAN-T5) 📌 Overview The Healthcare Chatbot is a medical question-answering AI powered by FLAN-T5, a fine-tuned language model. It can provide general guidance on medical topics, symptoms, and treatment suggestions based on a pre-trained dataset. 🚨 Note: This chatbot is for informational purposes only and should not be used as a substitute for professional medical advice. Always consult a doctor for health-related concerns. 📷 Screenshot ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6791349f0df2a77530968217/klDNYjR9JZlRKLmlHHZWP.png) 🚀 How to Install & Run 🔹 Step 1: Download the Project Option 1: Clone from Hugging Face git clone https://huggingface.co/alecmoran/healthcare_chatbot cd healthcare_chatbot Option 2: Download as a ZIP Go to Hugging Face Model Page Click on "Download" Extract the ZIP file 🔹 Step 2: Download & Prepare the Model The chatbot requires FLAN-T5 to be stored locally before running. For Windows Users 🖥️ Open PowerShell in the project directory. Run the following command to download the model: ./get_model.ps1 Once the model is downloaded, run the chatbot: python healthcare_chatbot.py For macOS/Linux Users 💻 Open Terminal in the project directory. Run the following command to download the model: git clone https://huggingface.co/google/flan-t5-small Once the model is downloaded, run the chatbot: python3 healthcare_chatbot.py 💡 Features ✅ Local Model Loading - Runs FLAN-T5 from your system for faster response times.✅ Medical Q&A Dataset - Includes common questions about symptoms and treatments.✅ Voice Input & Text-to-Speech - Allows users to speak their questions & hear responses.✅ Streamlit UI - Simple and interactive web-based interface. ⚠️ Disclaimer This chatbot provides general medical information but is not a replacement for professional healthcare advice. Always consult a licensed physician for medical concerns. 📩 Contact & Support For issues or improvements, open an issue on the Hugging Face repo.
kadasterdst/querygenerator
kadasterdst
"2025-02-09T20:40:46Z"
150
3
transformers
[ "transformers", "tf", "safetensors", "t5", "text2text-generation", "generated_from_keras_callback", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text2text-generation
"2022-10-17T08:28:37Z"
--- library_name: transformers tags: - generated_from_keras_callback model-index: - name: querygenerator results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # querygenerator This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: None - training_precision: float32 ### Training results ### Framework versions - Transformers 4.48.3 - TensorFlow 2.14.1 - Datasets 2.14.6 - Tokenizers 0.21.0
SolidSnacke/L3-8B-Lunaris-v1-i-GGUF
SolidSnacke
"2024-07-01T21:58:20Z"
14
0
transformers
[ "transformers", "gguf", "llama", "text-generation-inference", "text-generation", "en", "license:llama3", "endpoints_compatible", "region:us", "conversational" ]
text-generation
"2024-07-01T18:19:20Z"
--- license: llama3 language: - en library_name: transformers pipeline_tag: text-generation tags: - llama - text-generation-inference --- Link to original model and script: - Sao10K/L3-8B-Lunaris-v1: https://huggingface.co/Sao10K/L3-8B-Lunaris-v1 - FantasiaFoundry/GGUF-Quantization-Script: https://huggingface.co/FantasiaFoundry/GGUF-Quantization-Script
pabloOmega/ocr-entities-v3-not-bio
pabloOmega
"2025-04-15T03:52:44Z"
0
0
transformers
[ "transformers", "safetensors", "layoutlmv3", "token-classification", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
"2025-04-15T01:49:17Z"
<!DOCTYPE html> <html class="" lang="en"> <head> <meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no" /> <meta name="description" content="We're on a journey to advance and democratize artificial intelligence through open source and open science." /> <meta property="fb:app_id" content="1321688464574422" /> <meta name="twitter:card" content="summary_large_image" /> <meta name="twitter:site" content="@huggingface" /> <meta property="og:title" content="Hugging Face - The AI community building the future." /> <meta property="og:type" content="website" /> <title>Hugging Face - The AI community building the future.</title> <style> body { margin: 0; } main { background-color: white; min-height: 100vh; padding: 7rem 1rem 8rem 1rem; text-align: center; font-family: Source Sans Pro, ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica Neue, Arial, Noto Sans, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol, Noto Color Emoji; } img { width: 6rem; height: 6rem; margin: 0 auto 1rem; } h1 { font-size: 3.75rem; line-height: 1; color: rgba(31, 41, 55, 1); font-weight: 700; box-sizing: border-box; margin: 0 auto; } p, a { color: rgba(107, 114, 128, 1); font-size: 1.125rem; line-height: 1.75rem; max-width: 28rem; box-sizing: border-box; margin: 0 auto; } .dark main { background-color: rgb(11, 15, 25); } .dark h1 { color: rgb(209, 213, 219); } .dark p, .dark a { color: rgb(156, 163, 175); } </style> <script> // On page load or when changing themes, best to add inline in `head` to avoid FOUC const key = "_tb_global_settings"; let theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? "dark" : "light"; try { const storageTheme = JSON.parse(window.localStorage.getItem(key)).theme; if (storageTheme) { theme = storageTheme === "dark" ? "dark" : "light"; } } catch (e) {} if (theme === "dark") { document.documentElement.classList.add("dark"); } else { document.documentElement.classList.remove("dark"); } </script> </head> <body> <main> <img src="https://cdn-media.huggingface.co/assets/huggingface_logo.svg" alt="" /> <div> <h1>429</h1> <p>We had to rate limit you. If you think it's an error, send us <a href="mailto:[email protected]">an email</a></p> </div> </main> </body> </html>