|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
def generate_diary(emotion, num_samples=1, max_length=100, temperature=0.7): |
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained("gpt2") |
|
model = GPT2LMHeadModel.from_pretrained("gpt2") |
|
|
|
|
|
if emotion == "happy": |
|
prefix = "์ค๋์ ๊ธฐ๋ถ์ด ์ข์์. " |
|
elif emotion == "sad": |
|
prefix = "์ฌํ ๊ธฐ๋ถ์ด์์. " |
|
elif emotion == "angry": |
|
prefix = "ํ๊ฐ ์น๋ฐ์ด ์ค๋ฅด๋ ๊ธฐ๋ถ์ด์์. " |
|
else: |
|
prefix = "์ค๋์ ๊ธฐ๋ถ์ด ์ด์ํด์. " |
|
|
|
|
|
input_sequence = tokenizer.encode(prefix, return_tensors="pt") |
|
|
|
|
|
output = model.generate( |
|
input_sequence, |
|
max_length=max_length, |
|
num_return_sequences=num_samples, |
|
temperature=temperature, |
|
pad_token_id=tokenizer.eos_token_id |
|
) |
|
|
|
|
|
return [tokenizer.decode(output_sequence, skip_special_tokens=True) for output_sequence in output] |
|
|
|
def main(): |
|
|
|
emotion = input("์ค๋์ ๊ฐ์ ์ ์
๋ ฅํ์ธ์ (happy, sad, angry ๋ฑ): ") |
|
|
|
diary_entries = generate_diary(emotion) |
|
|
|
print("์ค๋์ ์ผ๊ธฐ:") |
|
for i, entry in enumerate(diary_entries, start=1): |
|
print(f"{i}. {entry}") |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|