Install dependencies
pip install gradio
pip install git+https://github.com/dill-lab/PILS
Run the demo
import gradio as gr
import torch
from pils.models import InversionFromHiddenStatesModel
MODEL = InversionFromHiddenStatesModel.from_pretrained(
"murtaza/pils-32-llama2-chat-7b")
MODEL.embedder_no_grad=True
MODEL.embedder.max_new_tokens = 64
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MODEL = MODEL.to(DEVICE)
def invert(user_prompt):
global inp
sys_prompt = ''
inp = MODEL.embedder_tokenizer.apply_chat_template(conversation=[
{"role": "system", "content": sys_prompt},
{"role": "user", "content": user_prompt},
], add_generation_prompt=True, return_dict=True, return_tensors='pt')
inp = {f"embedder_{k}": v.to(DEVICE) for k, v in inp.items()}
output = MODEL.call_embedding_model(**inp)
inp['frozen_embeddings'] = output["embeddings"]
with torch.inference_mode():
out = MODEL.generate(inp, {"max_length": 64})
inverted = MODEL.tokenizer.decode(out[0], skip_special_tokens=True)
generated = MODEL.embedder_tokenizer.decode(output["chosen_tokens"][0].squeeze(), skip_special_tokens=True)
return generated, inverted
demo = gr.Interface(
fn=invert,
inputs=gr.Textbox(label="Secret prompt"),
outputs=(gr.Textbox(label="LLM output"), gr.Textbox(label="Inverter guess"))
)
demo.launch(share=True)
Citation
@misc{nazir2025betterlanguagemodelinversion,
title={Better Language Model Inversion by Compactly Representing Next-Token Distributions},
author={Murtaza Nazir and Matthew Finlayson and John X. Morris and Xiang Ren and Swabha Swayamdipta},
year={2025},
eprint={2506.17090},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2506.17090},
}
Repository for the paper Better Language Model Inversion by Compactly Representing Next-Token Distributions.
- Downloads last month
- 119
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
🙋
Ask for provider support