{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[33mDEPRECATION: torchsde 0.2.5 has a non-standard dependency specifier numpy>=1.19.*; python_version >= \"3.7\". pip 23.3 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of torchsde or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n",
      "\u001b[0m"
     ]
    }
   ],
   "source": [
    "!pip install git+https://github.com/chiral-carbon/diffusers@advdiff -q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/ada/miniconda3/envs/hf-diff/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import os, re\n",
    "\n",
    "from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card\n",
    "\n",
    "\n",
    "def save_model_card(\n",
    "    repo_id: str,\n",
    "    use_dora: bool,\n",
    "    images: list = None,\n",
    "    base_model: str = None,\n",
    "    train_text_encoder=False,\n",
    "    train_text_encoder_ti=False,\n",
    "    token_abstraction_dict=None,\n",
    "    instance_prompt=None,\n",
    "    validation_prompt=None,\n",
    "    repo_folder=None,\n",
    "    vae_path=None,\n",
    "):\n",
    "    lora = \"lora\" if not use_dora else \"dora\"\n",
    "\n",
    "    widget_dict = []\n",
    "    if images is not None:\n",
    "        for i, image in enumerate(images):\n",
    "            image.save(os.path.join(repo_folder, f\"image_{i}.png\"))\n",
    "            widget_dict.append(\n",
    "                {\"text\": validation_prompt if validation_prompt else \" \", \"output\": {\"url\": f\"image_{i}.png\"}}\n",
    "            )\n",
    "    else:\n",
    "        widget_dict.append(\n",
    "            {\"text\": instance_prompt}\n",
    "        )\n",
    "    embeddings_filename = f\"{repo_folder}_emb\"\n",
    "    instance_prompt_webui = re.sub(r\"<s\\d+>\", \"\", re.sub(r\"<s\\d+>\", embeddings_filename, instance_prompt, count=1))\n",
    "    ti_keys = \", \".join(f'\"{match}\"' for match in re.findall(r\"<s\\d+>\", instance_prompt))\n",
    "    if instance_prompt_webui != embeddings_filename:\n",
    "        instance_prompt_sentence = f\"For example, `{instance_prompt_webui}`\"\n",
    "    else:\n",
    "        instance_prompt_sentence = \"\"\n",
    "    trigger_str = f\"You should use {instance_prompt} to trigger the image generation.\"\n",
    "    diffusers_imports_pivotal = \"\"\n",
    "    diffusers_example_pivotal = \"\"\n",
    "    webui_example_pivotal = \"\"\n",
    "    if train_text_encoder_ti:\n",
    "        trigger_str = (\n",
    "            \"To trigger image generation of trained concept(or concepts) replace each concept identifier \"\n",
    "            \"in you prompt with the new inserted tokens:\\n\"\n",
    "        )\n",
    "        diffusers_imports_pivotal = \"\"\"from huggingface_hub import hf_hub_download\n",
    "from safetensors.torch import load_file\n",
    "        \"\"\"\n",
    "        diffusers_example_pivotal = f\"\"\"embedding_path = hf_hub_download(repo_id='{repo_id}', filename='{embeddings_filename}.safetensors', repo_type=\"model\")\n",
    "state_dict = load_file(embedding_path)\n",
    "pipeline.load_textual_inversion(state_dict[\"clip_l\"], token=[{ti_keys}], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer)\n",
    "        \"\"\"\n",
    "        webui_example_pivotal = f\"\"\"- *Embeddings*: download **[`{embeddings_filename}.safetensors` here 💾](/{repo_id}/blob/main/{embeddings_filename}.safetensors)**.\n",
    "    - Place it on it on your `embeddings` folder\n",
    "    - Use it by adding `{embeddings_filename}` to your prompt. {instance_prompt_sentence}\n",
    "    (you need both the LoRA and the embeddings as they were trained together for this LoRA)\n",
    "    \"\"\"\n",
    "        if token_abstraction_dict:\n",
    "            for key, value in token_abstraction_dict.items():\n",
    "                tokens = \"\".join(value)\n",
    "                trigger_str += f\"\"\"\n",
    "to trigger concept `{key}` → use `{tokens}` in your prompt \\n\n",
    "\"\"\"\n",
    "    model_description = f\"\"\"\n",
    "# SD1.5 LoRA DreamBooth - {repo_id}\n",
    "\n",
    "<Gallery />\n",
    "\n",
    "## Model description\n",
    "\n",
    "### These are {repo_id} LoRA adaption weights for {base_model}.\n",
    "\n",
    "## Download model\n",
    "\n",
    "### Use it with UIs such as AUTOMATIC1111, Comfy UI, SD.Next, Invoke\n",
    "\n",
    "- **LoRA**: download **[`{repo_folder}.safetensors` here 💾](/{repo_id}/blob/main/{repo_folder}.safetensors)**.\n",
    "    - Place it on your `models/Lora` folder.\n",
    "    - On AUTOMATIC1111, load the LoRA by adding `<lora:{repo_folder}:1>` to your prompt. On ComfyUI just [load it as a regular LoRA](https://comfyanonymous.github.io/ComfyUI_examples/lora/).\n",
    "{webui_example_pivotal}\n",
    "\n",
    "## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)\n",
    "\n",
    "```py\n",
    "from diffusers import AutoPipelineForText2Image\n",
    "import torch\n",
    "{diffusers_imports_pivotal}\n",
    "pipeline = AutoPipelineForText2Image.from_pretrained('runwayml/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda')\n",
    "pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')\n",
    "{diffusers_example_pivotal}\n",
    "image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0]\n",
    "```\n",
    "\n",
    "For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)\n",
    "\n",
    "## Trigger words\n",
    "\n",
    "{trigger_str}\n",
    "\n",
    "## Details\n",
    "All [Files & versions](/{repo_id}/tree/main).\n",
    "\n",
    "The weights were trained using [🧨 diffusers Advanced Dreambooth Training Script](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py).\n",
    "\n",
    "LoRA for the text encoder was enabled. {train_text_encoder}.\n",
    "\n",
    "Pivotal tuning was enabled: {train_text_encoder_ti}.\n",
    "\n",
    "Special VAE used for training: {vae_path}.\n",
    "\n",
    "\"\"\"\n",
    "    model_card = load_or_create_model_card(\n",
    "        repo_id_or_path=repo_id,\n",
    "        from_training=True,\n",
    "        license=\"openrail++\",\n",
    "        base_model=base_model,\n",
    "        prompt=instance_prompt,\n",
    "        model_description=model_description,\n",
    "        inference=True,\n",
    "        widget=widget_dict,\n",
    "    )\n",
    "\n",
    "    tags = [\"text-to-image\", \n",
    "            \"diffusers\", \n",
    "            \"diffusers-training\",\n",
    "            lora,\n",
    "            \"template:sd-lora\"\n",
    "            \"stable-diffusion\", \n",
    "            \"stable-diffusion-diffusers\"]\n",
    "    model_card = populate_model_card(model_card, tags=tags)\n",
    "\n",
    "    model_card.save(os.path.join(repo_folder, \"README.md\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from diffusers.utils import load_image\n",
    "\n",
    "images = [\n",
    "    load_image(\"https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/A%20mushroom%20in%20%5BV%5D%20style.png\")\n",
    "    for _ in range(3)\n",
    "]\n",
    "\n",
    "save_model_card(\n",
    "    use_dora=False,\n",
    "    repo_id=\"abby101/test\",\n",
    "    images=images,\n",
    "    base_model=\"runwayml/stable-diffusion-v1-5\",\n",
    "    repo_folder=\".\",\n",
    "    instance_prompt=\"A mushroom in [V] style\",\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---\n",
      "license: openrail++\n",
      "library_name: diffusers\n",
      "tags:\n",
      "- text-to-image\n",
      "- diffusers\n",
      "- diffusers-training\n",
      "- lora\n",
      "- template:sd-lorastable-diffusion\n",
      "- stable-diffusion-diffusers\n",
      "base_model: runwayml/stable-diffusion-v1-5\n",
      "inference: true\n",
      "instance_prompt: A mushroom in [V] style\n",
      "widget:\n",
      "- text: ' '\n",
      "  output:\n",
      "    url: image_0.png\n",
      "- text: ' '\n",
      "  output:\n",
      "    url: image_1.png\n",
      "- text: ' '\n",
      "  output:\n",
      "    url: image_2.png\n",
      "---\n",
      "\n",
      "<!-- This model card has been generated automatically according to the information the training script had access to. You\n",
      "should probably proofread and complete it, then remove this comment. -->\n",
      "\n",
      "\n",
      "# SD1.5 LoRA DreamBooth - abby101/test\n",
      "\n",
      "<Gallery />\n",
      "\n",
      "## Model description\n",
      "\n",
      "### These are abby101/test LoRA adaption weights for runwayml/stable-diffusion-v1-5.\n",
      "\n",
      "## Download model\n",
      "\n",
      "### Use it with UIs such as AUTOMATIC1111, Comfy UI, SD.Next, Invoke\n",
      "\n",
      "- **LoRA**: download **[`..safetensors` here 💾](/abby101/test/blob/main/..safetensors)**.\n",
      "    - Place it on your `models/Lora` folder.\n",
      "    - On AUTOMATIC1111, load the LoRA by adding `<lora:.:1>` to your prompt. On ComfyUI just [load it as a regular LoRA](https://comfyanonymous.github.io/ComfyUI_examples/lora/).\n",
      "\n",
      "\n",
      "## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)\n",
      "\n",
      "```py\n",
      "from diffusers import AutoPipelineForText2Image\n",
      "import torch\n",
      "\n",
      "pipeline = AutoPipelineForText2Image.from_pretrained('runwayml/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda')\n",
      "pipeline.load_lora_weights('abby101/test', weight_name='pytorch_lora_weights.safetensors')\n",
      "\n",
      "image = pipeline('A mushroom in [V] style').images[0]\n",
      "```\n",
      "\n",
      "For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)\n",
      "\n",
      "## Trigger words\n",
      "\n",
      "You should use A mushroom in [V] style to trigger the image generation.\n",
      "\n",
      "## Details\n",
      "All [Files & versions](/abby101/test/tree/main).\n",
      "\n",
      "The weights were trained using [🧨 diffusers Advanced Dreambooth Training Script](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py).\n",
      "\n",
      "LoRA for the text encoder was enabled. False.\n",
      "\n",
      "Pivotal tuning was enabled: False.\n",
      "\n",
      "Special VAE used for training: None.\n",
      "\n",
      "\n",
      "\n",
      "## Intended uses & limitations\n",
      "\n",
      "#### How to use\n",
      "\n",
      "```python\n",
      "# TODO: add an example code snippet for running this diffusion pipeline\n",
      "```\n",
      "\n",
      "#### Limitations and bias\n",
      "\n",
      "[TODO: provide examples of latent issues and potential remediations]\n",
      "\n",
      "## Training details\n",
      "\n",
      "[TODO: describe the data used to train the model]"
     ]
    }
   ],
   "source": [
    "!cat README.md"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pydl",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}