Jeremy Live commited on
Commit
184b582
·
1 Parent(s): ec5d137
Files changed (8) hide show
  1. .dockerignore +26 -0
  2. .env.example +10 -0
  3. .gitignore +64 -0
  4. Dockerfile +29 -0
  5. README.md +113 -1
  6. app.py +107 -47
  7. modal_backend.py +124 -9
  8. requirements.txt +15 -3
.dockerignore ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ .Python
6
+ env/
7
+ venv/
8
+ ENV/
9
+ .env
10
+ .venv
11
+ .git
12
+ .gitignore
13
+ .DS_Store
14
+ *.swp
15
+ *.swo
16
+ .vscode/
17
+ .idea/
18
+ *.log
19
+ .cache/
20
+ .coverage
21
+ htmlcov/
22
+ *.egg-info/
23
+ build/
24
+ dist/
25
+ *.egg
26
+ *.mod
.env.example ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Token (required for model access)
2
+ HF_TOKEN=your_huggingface_token_here
3
+
4
+ # Modal Configuration (optional, for local development)
5
+ # MODAL_TOKEN_ID=your_modal_token_id
6
+ # MODAL_TOKEN_SECRET=your_modal_token_secret
7
+
8
+ # Cache Configuration (optional)
9
+ # HUGGINGFACE_HUB_CACHE=./cache
10
+ # TRANSFORMERS_CACHE=./cache
.gitignore ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual Environment
24
+ venv/
25
+ env/
26
+ ENV/
27
+ .env
28
+ .venv
29
+
30
+ # IDE
31
+ .idea/
32
+ .vscode/
33
+ *.swp
34
+ *.swo
35
+ *~
36
+
37
+ # Logs
38
+ *.log
39
+
40
+ # Local development
41
+ .DS_Store
42
+ .env
43
+ .cache/
44
+
45
+ # Model files
46
+ *.bin
47
+ *.gguf
48
+ *.safetensors
49
+
50
+ # Hugging Face
51
+ huggingface/
52
+
53
+ # Modal
54
+ .modal/
55
+
56
+ # System Files
57
+ Thumbs.db
58
+ ehthumbs.db
59
+ Desktop.ini
60
+ $RECYCLE.BIN/
61
+
62
+ # Project specific
63
+ images/
64
+ outputs/
Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y \
7
+ git \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ # Install Python dependencies
11
+ COPY requirements.txt .
12
+ RUN pip install --no-cache-dir -r requirements.txt
13
+
14
+ # Copy the application code
15
+ COPY . .
16
+
17
+ # Set environment variables
18
+ ENV PYTHONUNBUFFERED=1
19
+ ENV HUGGINGFACE_HUB_CACHE=/app/cache
20
+ ENV TRANSFORMERS_CACHE=/app/cache
21
+
22
+ # Create cache directory
23
+ RUN mkdir -p /app/cache
24
+
25
+ # Expose the port the app runs on
26
+ EXPOSE 7860
27
+
28
+ # Command to run the application
29
+ CMD ["python", "app.py"]
README.md CHANGED
@@ -17,7 +17,119 @@ tags:
17
  - gradio
18
  ---
19
 
20
- # 🎵 AI Content Generator for Musicians 🎬
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  **An autonomous AI agent that transforms song lyrics into complete social media marketing campaigns across 6 platforms using Llama-2-7B and Modal Labs GPU infrastructure.**
23
 
 
17
  - gradio
18
  ---
19
 
20
+ # 🚀 Content Creation Agent
21
+
22
+ An AI-powered content generation tool that creates engaging social media content for multiple platforms using advanced language models and image generation.
23
+
24
+ [![Try the App](https://img.shields.io/badge/🚀-Try%20Live%20Demo-blue?style=for-the-badge)](https://huggingface.co/spaces/developerjeremylive/ContentCreationAgent-etheroi)
25
+
26
+ ## ✨ Features
27
+
28
+ - 🤖 AI-powered content generation for LinkedIn, Instagram, TikTok, Twitter, and Facebook
29
+ - 🖼️ AI image generation using Stable Diffusion XL
30
+ - 🎯 Platform-optimized content tailored to each social network
31
+ - ⚡ Fast local templates as fallback
32
+ - 🎨 Customizable content based on business type and target audience
33
+
34
+ ## 🛠️ Setup
35
+
36
+ ### Prerequisites
37
+
38
+ - Python 3.10 or higher
39
+ - [Hugging Face Account](https://huggingface.co/join)
40
+ - [Modal Account](https://modal.com/)
41
+ - [Git](https://git-scm.com/)
42
+
43
+ ### Installation
44
+
45
+ 1. Clone the repository:
46
+ ```bash
47
+ git clone https://huggingface.co/spaces/developerjeremylive/ContentCreationAgent-etheroi
48
+ cd ContentCreationAgent-etheroi
49
+ ```
50
+
51
+ 2. Create and activate a virtual environment:
52
+ ```bash
53
+ python -m venv venv
54
+ source venv/bin/activate # On Windows: .\venv\Scripts\activate
55
+ ```
56
+
57
+ 3. Install dependencies:
58
+ ```bash
59
+ pip install -r requirements.txt
60
+ ```
61
+
62
+ ## 🔑 Environment Variables
63
+
64
+ Create a `.env` file in the project root with the following variables:
65
+
66
+ ```
67
+ # Required for Hugging Face Hub
68
+ HF_TOKEN=your_huggingface_token_here
69
+
70
+ # Optional: Modal configuration (for local development)
71
+ MODAL_TOKEN_ID=your_modal_token_id
72
+ MODAL_TOKEN_SECRET=your_modal_token_secret
73
+ ```
74
+
75
+ ## 🚀 Deployment
76
+
77
+ ### Local Development
78
+
79
+ 1. Make sure you have all the environment variables set in your `.env` file
80
+ 2. Run the application:
81
+ ```bash
82
+ python app.py
83
+ ```
84
+ 3. Open your browser to `http://localhost:7860`
85
+
86
+ ### Docker
87
+
88
+ 1. Build the Docker image:
89
+ ```bash
90
+ docker build -t content-creation-agent .
91
+ ```
92
+
93
+ 2. Run the container:
94
+ ```bash
95
+ docker run -p 7860:7860 --env-file .env content-creation-agent
96
+ ```
97
+
98
+ ### Hugging Face Spaces
99
+
100
+ 1. Push your code to a GitHub repository
101
+ 2. Create a new Space on Hugging Face
102
+ 3. Configure the following settings:
103
+ - Select "Docker" as the Space SDK
104
+ - Set the Dockerfile path to `Dockerfile`
105
+ - Add your `HF_TOKEN` as a secret in the Space settings
106
+ 4. Deploy your Space
107
+
108
+ ## 🤖 Technical Architecture
109
+
110
+ ### Core Stack
111
+ - **Frontend**: Gradio for interactive UI
112
+ - **Backend**: Python with Modal for serverless GPU inference
113
+ - **AI Models**:
114
+ - Text Generation: Llama 2 7B Chat (GGUF)
115
+ - Image Generation: Stable Diffusion XL
116
+ - **Deployment**: Docker container on Hugging Face Spaces
117
+
118
+ ## 🤝 Contributing
119
+
120
+ Contributions are welcome! Please feel free to submit a Pull Request.
121
+
122
+ ## 📄 License
123
+
124
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
125
+
126
+ ## 🙏 Acknowledgments
127
+
128
+ - [Hugging Face](https://huggingface.co/) for the Transformers library and model hosting
129
+ - [Modal](https://modal.com/) for serverless GPU inference
130
+ - [Gradio](https://gradio.app/) for the beautiful UI components
131
+
132
+ ## 🎵 AI Content Generator for Musicians
133
 
134
  **An autonomous AI agent that transforms song lyrics into complete social media marketing campaigns across 6 platforms using Llama-2-7B and Modal Labs GPU infrastructure.**
135
 
app.py CHANGED
@@ -8,7 +8,7 @@ logger = logging.getLogger(__name__)
8
 
9
  # Modal setup with proper secret handling
10
  MODAL_AVAILABLE = False
11
- generate_content_with_llm = None
12
 
13
  # Define the app name - must match your Modal app name in modal_backend.py
14
  MODAL_APP_NAME = "content-creation-agent"
@@ -18,22 +18,27 @@ try:
18
  from modal.exception import AuthError
19
  logger.info(f"Modal version: {modal.__version__}")
20
 
21
- # Check for Modal tokens in environment
22
- modal_token_id = os.environ.get("MODAL_TOKEN_ID")
23
- modal_token_secret = os.environ.get("MODAL_TOKEN_SECRET")
24
 
25
- if not (modal_token_id and modal_token_secret):
26
- logger.warning("⚠️ Modal tokens not found in environment variables. Running in local mode.")
27
- logger.info("ℹ️ To enable Modal, set MODAL_TOKEN_ID and MODAL_TOKEN_SECRET in your environment")
28
  else:
29
  try:
30
- logger.info("🔍 Initializing Modal client...")
31
- logger.info(f"Token ID: {modal_token_id[:4]}...{modal_token_id[-4:] if modal_token_id else ''}")
32
 
33
- # Initialize the client with the token
34
  try:
35
- client = modal.Client(token_id=modal_token_id, token_secret=modal_token_secret)
36
- logger.info(" Successfully authenticated with Modal")
 
 
 
 
 
 
 
37
 
38
  # List all functions to debug
39
  logger.info("🔍 Looking for available functions...")
@@ -54,13 +59,14 @@ try:
54
 
55
  # If health check passes, get the main function
56
  try:
57
- generate_content_with_llm = modal.Function.lookup(MODAL_APP_NAME, "generate_content_with_llm")
 
58
  MODAL_AVAILABLE = True
59
  logger.info("🚀 Successfully connected to Modal AI service")
60
  except Exception as e:
61
- logger.error(f"❌ Could not find generate_content_with_llm function: {str(e)}")
62
  MODAL_AVAILABLE = False
63
-
64
  except Exception as e:
65
  logger.error(f"❌ Health check failed: {str(e)}")
66
  logger.info(f"ℹ️ Make sure your app name is correct: {MODAL_APP_NAME}")
@@ -213,69 +219,71 @@ As {brand_display}, we're excited to share our latest insights on {business_type
213
  {hashtags['facebook']}"""
214
  }
215
 
216
- def generate_all_content(business_type, target_audience, content_goal, brand_name, key_message, use_modal):
217
- """Generate content with Modal toggle"""
218
  if not all([business_type.strip(), target_audience.strip(), content_goal.strip(), key_message.strip()]):
219
- error_msg = "❌ Please fill in all required fields"
220
- return [""] * 5 + [error_msg]
221
 
222
  start_time = time.time()
 
 
223
 
224
- # Initial yield - empty outputs and loading status
225
- empty_outputs = [""] * 5 # 5 platform outputs
226
- if use_modal and MODAL_AVAILABLE and generate_content_with_llm is not None:
227
  status_msg = "⏳ Generating content with AI..."
228
  else:
229
  status_msg = "⏳ Generating content with local templates..."
230
- use_modal = False # Force local generation if Modal is not available
231
 
232
- logger.info(status_msg)
233
- yield empty_outputs + [status_msg] # This will be 6 values total (5 empty strings + 1 status message)
234
 
235
  try:
236
- if use_modal and MODAL_AVAILABLE and generate_content_with_llm is not None:
237
  try:
238
- logger.info("🔄 Calling Modal AI...")
239
- result = generate_content_with_llm.remote(
240
  business_type,
241
  target_audience,
242
  content_goal,
243
  brand_name,
244
- key_message
 
245
  )
246
 
247
- # Validate and process the result
248
  if isinstance(result, dict) and all(platform in result for platform in ['linkedin', 'instagram', 'tiktok', 'twitter', 'facebook']):
249
  content = result
250
  status_msg = f"✅ Generated with AI in {time.time() - start_time:.2f}s"
251
- logger.info("Successfully generated content with Modal AI")
 
 
 
 
252
  else:
253
  raise ValueError("Unexpected response format from Modal AI")
254
 
255
- except Exception as modal_error:
256
- logger.warning(f"⚠️ AI generation failed: {str(modal_error)}")
257
  content = generate_fallback_content(business_type, target_audience, content_goal, brand_name, key_message)
258
  status_msg = f"⚠️ AI generation failed, using local templates (took {time.time() - start_time:.2f}s)"
259
  else:
260
- # Use local generation
261
  content = generate_fallback_content(business_type, target_audience, content_goal, brand_name, key_message)
262
  status_msg = f"✅ Generated with local templates in {time.time() - start_time:.2f}s"
263
-
264
  except Exception as e:
265
- logger.error(f"Unexpected error during content generation: {str(e)}")
266
  content = generate_fallback_content(business_type, target_audience, content_goal, brand_name, key_message)
267
  status_msg = f"❌ Error occurred, using local templates (took {time.time() - start_time:.2f}s)"
268
 
269
- # Prepare platform outputs
270
- platforms = ['linkedin', 'instagram', 'tiktok', 'twitter', 'facebook']
271
- platform_outputs = [content.get(platform, f"Could not generate {platform} content") for platform in platforms]
 
 
 
 
 
272
 
273
- # Log completion
274
- logger.info(f"Content generation completed in {time.time() - start_time:.2f}s")
275
-
276
- # Return only the platform outputs (5 values)
277
- # The status message will be shown separately in the Gradio interface
278
- return platform_outputs + [status_msg] # This returns 6 values (5 platform outputs + 1 status message)
279
 
280
  # Create Gradio interface
281
  with gr.Blocks(theme=gr.themes.Soft(), title="🚀 Content Creator Pro") as demo:
@@ -335,6 +343,14 @@ with gr.Blocks(theme=gr.themes.Soft(), title="🚀 Content Creator Pro") as demo
335
  info="Choose between AI-powered generation or local templates"
336
  )
337
 
 
 
 
 
 
 
 
 
338
  run_button = gr.Button("🚀 Generate Content", variant="primary", size="lg")
339
 
340
  gr.Markdown("## 📱 Generated Social Media Content")
@@ -350,11 +366,55 @@ with gr.Blocks(theme=gr.themes.Soft(), title="🚀 Content Creator Pro") as demo
350
 
351
  status = gr.Markdown("Ready to generate content!")
352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  # Connect the function with proper outputs
354
  run_button.click(
355
  fn=generate_all_content,
356
- inputs=[business_type, target_audience, content_goal, brand_name, key_message, modal_toggle],
357
- outputs=[linkedin_output, instagram_output, tiktok_output, twitter_output, facebook_output, status]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
  )
359
 
360
  # Add examples
 
8
 
9
  # Modal setup with proper secret handling
10
  MODAL_AVAILABLE = False
11
+ generate_content = None
12
 
13
  # Define the app name - must match your Modal app name in modal_backend.py
14
  MODAL_APP_NAME = "content-creation-agent"
 
18
  from modal.exception import AuthError
19
  logger.info(f"Modal version: {modal.__version__}")
20
 
21
+ # Check for Hugging Face token in environment
22
+ hf_token = os.environ.get("HF_TOKEN")
 
23
 
24
+ if not hf_token:
25
+ logger.warning("⚠️ HF_TOKEN not found in environment variables. Running in local mode.")
26
+ logger.info("ℹ️ To enable AI features, set HF_TOKEN in your environment variables")
27
  else:
28
  try:
29
+ logger.info("🔍 Initializing Modal client with Hugging Face token...")
 
30
 
31
+ # Initialize the client with the Hugging Face token
32
  try:
33
+ # Create a secret with the Hugging Face token
34
+ hf_secret = modal.Secret.from_dict({"HUGGING_FACE_HUB_TOKEN": hf_token})
35
+
36
+ # Initialize the client
37
+ client = modal.Client()
38
+ logger.info("✅ Successfully authenticated with Hugging Face")
39
+
40
+ # Store the secret for later use
41
+ client.secret = hf_secret
42
 
43
  # List all functions to debug
44
  logger.info("🔍 Looking for available functions...")
 
59
 
60
  # If health check passes, get the main function
61
  try:
62
+ generate_content = modal.Function.lookup(MODAL_APP_NAME, "generate_content")
63
+ generate_image = modal.Function.lookup(MODAL_APP_NAME, "generate_image")
64
  MODAL_AVAILABLE = True
65
  logger.info("🚀 Successfully connected to Modal AI service")
66
  except Exception as e:
67
+ logger.error(f"❌ Could not find generate_content or generate_image function: {str(e)}")
68
  MODAL_AVAILABLE = False
69
+
70
  except Exception as e:
71
  logger.error(f"❌ Health check failed: {str(e)}")
72
  logger.info(f"ℹ️ Make sure your app name is correct: {MODAL_APP_NAME}")
 
219
  {hashtags['facebook']}"""
220
  }
221
 
222
+ def generate_all_content(business_type, target_audience, content_goal, brand_name, key_message, use_modal, generate_image_flag=False):
223
+ """Generate content for all platforms using Modal AI or local fallback"""
224
  if not all([business_type.strip(), target_audience.strip(), content_goal.strip(), key_message.strip()]):
225
+ return [""] * 5 + ["❌ Please fill in all required fields"] + [None] # Add None for image
 
226
 
227
  start_time = time.time()
228
+ empty_outputs = [""] * 5
229
+ image_output = None
230
 
231
+ if use_modal and MODAL_AVAILABLE and generate_content is not None:
 
 
232
  status_msg = "⏳ Generating content with AI..."
233
  else:
234
  status_msg = "⏳ Generating content with local templates..."
235
+ use_modal = False
236
 
237
+ yield empty_outputs + [status_msg, None] # Add None for image
 
238
 
239
  try:
240
+ if use_modal and MODAL_AVAILABLE and generate_content is not None:
241
  try:
242
+ # Call the Modal function with image generation flag
243
+ result = generate_content.remote(
244
  business_type,
245
  target_audience,
246
  content_goal,
247
  brand_name,
248
+ key_message,
249
+ generate_images=generate_image_flag
250
  )
251
 
252
+ # Check if we got a valid response
253
  if isinstance(result, dict) and all(platform in result for platform in ['linkedin', 'instagram', 'tiktok', 'twitter', 'facebook']):
254
  content = result
255
  status_msg = f"✅ Generated with AI in {time.time() - start_time:.2f}s"
256
+
257
+ # Check for generated image
258
+ if generate_image_flag and 'image_url' in result:
259
+ image_output = result['image_url']
260
+ status_msg += " (with image)"
261
  else:
262
  raise ValueError("Unexpected response format from Modal AI")
263
 
264
+ except Exception as e:
265
+ logger.error(f"Error generating content with Modal: {str(e)}")
266
  content = generate_fallback_content(business_type, target_audience, content_goal, brand_name, key_message)
267
  status_msg = f"⚠️ AI generation failed, using local templates (took {time.time() - start_time:.2f}s)"
268
  else:
 
269
  content = generate_fallback_content(business_type, target_audience, content_goal, brand_name, key_message)
270
  status_msg = f"✅ Generated with local templates in {time.time() - start_time:.2f}s"
271
+
272
  except Exception as e:
273
+ logger.error(f"Unexpected error: {str(e)}")
274
  content = generate_fallback_content(business_type, target_audience, content_goal, brand_name, key_message)
275
  status_msg = f"❌ Error occurred, using local templates (took {time.time() - start_time:.2f}s)"
276
 
277
+ # Prepare the outputs for all platforms
278
+ platform_outputs = [
279
+ content.get('linkedin', "Could not generate LinkedIn content"),
280
+ content.get('instagram', "Could not generate Instagram content"),
281
+ content.get('tiktok', "Could not generate TikTok content"),
282
+ content.get('twitter', "Could not generate Twitter content"),
283
+ content.get('facebook', "Could not generate Facebook content")
284
+ ]
285
 
286
+ return platform_outputs + [status_msg, image_output]
 
 
 
 
 
287
 
288
  # Create Gradio interface
289
  with gr.Blocks(theme=gr.themes.Soft(), title="🚀 Content Creator Pro") as demo:
 
343
  info="Choose between AI-powered generation or local templates"
344
  )
345
 
346
+ # Image generation toggle (only visible when AI generation is selected)
347
+ with gr.Group(visible=MODAL_AVAILABLE) as image_group:
348
+ generate_image_toggle = gr.Checkbox(
349
+ label="🖼️ Generate AI Image",
350
+ value=False,
351
+ info="Check to generate an AI image based on your content"
352
+ )
353
+
354
  run_button = gr.Button("🚀 Generate Content", variant="primary", size="lg")
355
 
356
  gr.Markdown("## 📱 Generated Social Media Content")
 
366
 
367
  status = gr.Markdown("Ready to generate content!")
368
 
369
+ # Image output component
370
+ with gr.Row():
371
+ generated_image = gr.Image(
372
+ label="🖼️ Generated Image",
373
+ visible=False,
374
+ height=512
375
+ )
376
+
377
+ # Update image group visibility based on modal toggle
378
+ def update_image_group_visibility(use_modal):
379
+ return gr.Group(visible=use_modal and MODAL_AVAILABLE)
380
+
381
+ modal_toggle.change(
382
+ fn=update_image_group_visibility,
383
+ inputs=[modal_toggle],
384
+ outputs=[image_group]
385
+ )
386
+
387
  # Connect the function with proper outputs
388
  run_button.click(
389
  fn=generate_all_content,
390
+ inputs=[
391
+ business_type,
392
+ target_audience,
393
+ content_goal,
394
+ brand_name,
395
+ key_message,
396
+ modal_toggle,
397
+ generate_image_toggle
398
+ ],
399
+ outputs=[
400
+ linkedin_output,
401
+ instagram_output,
402
+ tiktok_output,
403
+ twitter_output,
404
+ facebook_output,
405
+ status,
406
+ generated_image
407
+ ]
408
+ )
409
+
410
+ # Show/hide image based on whether an image was generated
411
+ def update_image_visibility(image_data):
412
+ return gr.Image(visible=image_data is not None), image_data or None
413
+
414
+ generated_image.change(
415
+ fn=lambda x: (gr.Image(visible=x is not None), x or None),
416
+ inputs=[generated_image],
417
+ outputs=[generated_image, generated_image]
418
  )
419
 
420
  # Add examples
modal_backend.py CHANGED
@@ -1,22 +1,37 @@
1
  import os
 
2
  import modal
3
- from typing import Dict
 
 
4
 
5
  # Define the Modal app
6
  app = modal.App("content-creation-agent") # Note: on a single line :D
7
 
8
  # Define the Docker image with necessary dependencies
9
- llama_image = (
10
  modal.Image.debian_slim(python_version="3.11")
11
  .pip_install(
12
- "llama-cpp-python==0.2.90",
 
 
 
 
 
13
  "huggingface_hub",
14
  "requests",
15
  )
16
- # Remove any existing HUGGINGFACE_HUB environment variable
17
- .env({"HUGGINGFACE_HUB_CACHE": "/root/cache/huggingface/hub"})
 
 
 
18
  )
19
 
 
 
 
 
20
  # Constants
21
  MODEL_DIR = "/model"
22
  MODEL_NAME = "TheBloke/Llama-2-7B-Chat-GGUF"
@@ -128,17 +143,117 @@ def generate_content_with_llm(business_type: str, target_audience: str, content_
128
  # Return empty strings for all platforms if there's an error
129
  return {platform: f"Error generating content for {platform}" for platform in platforms}
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  # Health check endpoint
132
  @app.function()
133
  def health_check() -> Dict[str, str]:
134
  return {"status": "ok", "message": "Service is healthy"}
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
  # Local testing (only runs when script is executed directly)
137
  if __name__ == "__main__":
138
  # Test health check
139
  print("Health check:", health_check.remote())
140
 
141
- # Example usage
142
- test_prompt = "Generate a short story about a robot learning to paint."
143
- result = generate_content_with_llm.remote(test_prompt)
144
- print("Generated content:", result.get("generated_text", "No content generated"))
 
1
  import os
2
+ import io
3
  import modal
4
+ import requests
5
+ from typing import Dict, Optional
6
+ from PIL import Image
7
 
8
  # Define the Modal app
9
  app = modal.App("content-creation-agent") # Note: on a single line :D
10
 
11
  # Define the Docker image with necessary dependencies
12
+ image = (
13
  modal.Image.debian_slim(python_version="3.11")
14
  .pip_install(
15
+ "torch",
16
+ "transformers",
17
+ "diffusers",
18
+ "accelerate",
19
+ "safetensors",
20
+ "pillow",
21
  "huggingface_hub",
22
  "requests",
23
  )
24
+ .env({
25
+ "HUGGINGFACE_HUB_CACHE": "/root/cache/huggingface/hub",
26
+ "TRANSFORMERS_CACHE": "/root/cache/huggingface/transformers",
27
+ "HF_HUB_DISABLE_PROGRESS_BARS": "1"
28
+ })
29
  )
30
 
31
+ # Model configuration
32
+ MODEL_NAME = "stabilityai/stable-diffusion-xl-base-1.0"
33
+ CACHE_DIR = "/root/cache/huggingface/hub"
34
+
35
  # Constants
36
  MODEL_DIR = "/model"
37
  MODEL_NAME = "TheBloke/Llama-2-7B-Chat-GGUF"
 
143
  # Return empty strings for all platforms if there's an error
144
  return {platform: f"Error generating content for {platform}" for platform in platforms}
145
 
146
+ # Image generation function
147
+ @app.function(
148
+ image=image,
149
+ gpu="A10G",
150
+ secrets=[modal.Secret.from_name("huggingface-secret")],
151
+ timeout=600,
152
+ keep_warm=1
153
+ )
154
+ def generate_image(prompt: str, negative_prompt: str = "", num_images: int = 1) -> list:
155
+ """Generate images using Stable Diffusion XL"""
156
+ from diffusers import StableDiffusionXLPipeline
157
+ import torch
158
+
159
+ try:
160
+ # Load the model
161
+ pipe = StableDiffusionXLPipeline.from_pretrained(
162
+ MODEL_NAME,
163
+ torch_dtype=torch.float16,
164
+ use_safetensors=True,
165
+ variant="fp16",
166
+ cache_dir=CACHE_DIR
167
+ )
168
+
169
+ # Move to GPU if available
170
+ if torch.cuda.is_available():
171
+ pipe = pipe.to("cuda")
172
+
173
+ # Generate images
174
+ images = pipe(
175
+ prompt=prompt,
176
+ negative_prompt=negative_prompt,
177
+ num_images_per_prompt=num_images,
178
+ num_inference_steps=30,
179
+ guidance_scale=7.5,
180
+ ).images
181
+
182
+ # Convert PIL images to base64
183
+ import base64
184
+ from io import BytesIO
185
+
186
+ result = []
187
+ for img in images:
188
+ buffered = BytesIO()
189
+ img.save(buffered, format="PNG")
190
+ img_str = base64.b64encode(buffered.getvalue()).decode()
191
+ result.append(f"data:image/png;base64,{img_str}")
192
+
193
+ return result
194
+
195
+ except Exception as e:
196
+ return [f"Error generating image: {str(e)}"]
197
+
198
  # Health check endpoint
199
  @app.function()
200
  def health_check() -> Dict[str, str]:
201
  return {"status": "ok", "message": "Service is healthy"}
202
 
203
+ # Combined function to generate both text and images
204
+ @app.function(
205
+ image=image,
206
+ gpu="A10G",
207
+ secrets=[modal.Secret.from_name("huggingface-secret")],
208
+ timeout=1800
209
+ )
210
+ def generate_content(
211
+ business_type: str,
212
+ target_audience: str,
213
+ content_goal: str,
214
+ brand_name: str,
215
+ key_message: str,
216
+ generate_images: bool = False
217
+ ) -> Dict[str, any]:
218
+ """
219
+ Generate social media content and optionally images
220
+ """
221
+ try:
222
+ # First generate text content
223
+ content = generate_content_with_llm.remote(
224
+ business_type, target_audience, content_goal, brand_name, key_message
225
+ )
226
+
227
+ # If image generation is requested
228
+ if generate_images:
229
+ # Generate a prompt for the image based on the content
230
+ image_prompt = (
231
+ f"{content_goal} for {target_audience}. {key_message}. "
232
+ f"Business type: {business_type}. Professional, high-quality image."
233
+ )
234
+
235
+ # Generate the image
236
+ image_urls = generate_image.remote(
237
+ prompt=image_prompt,
238
+ negative_prompt="low quality, blurry, text, watermark, logo",
239
+ num_images=1
240
+ )
241
+
242
+ # Add image URL to the content
243
+ if image_urls and not image_urls[0].startswith("Error"):
244
+ content["image_url"] = image_urls[0]
245
+
246
+ return content
247
+
248
+ except Exception as e:
249
+ return {"error": f"Failed to generate content: {str(e)}"}
250
+
251
  # Local testing (only runs when script is executed directly)
252
  if __name__ == "__main__":
253
  # Test health check
254
  print("Health check:", health_check.remote())
255
 
256
+ # Test image generation
257
+ print("Testing image generation...")
258
+ test_image = generate_image.remote("A futuristic city at night, cyberpunk style")
259
+ print(f"Generated image: {test_image[0][:100]}..." if test_image else "Failed to generate image")
requirements.txt CHANGED
@@ -1,5 +1,17 @@
1
  modal>=1.0.0
2
- gradio
3
- transformers
4
- torch
 
 
 
 
 
 
 
 
 
 
 
 
5
  hf_transfer
 
1
  modal>=1.0.0
2
+ gradio>=3.36.0
3
+ python-dotenv
4
+ requests
5
+ Pillow>=9.0.0
6
+ modal>=0.57.1
7
+ huggingface-hub>=0.16.4
8
+ transformers>=4.36.0
9
+ diffusers>=0.24.0
10
+ accelerate>=0.25.0
11
+ safetensors>=0.4.0
12
+ torch>=2.0.0
13
+ torchvision
14
+ torchaudio
15
+ sentencepiece
16
+ protobuf<=3.20.0
17
  hf_transfer