afzalsomro15684 commited on
Commit
8f9cc99
·
verified ·
1 Parent(s): 8d45b49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -59
app.py CHANGED
@@ -1,67 +1,96 @@
1
  import streamlit as st
2
- from diffusers import StableDiffusionPipeline
3
- import torch
4
- from PIL import Image
5
- from gtts import gTTS
6
- import requests
7
- import base64
8
-
9
- # Load Stable Diffusion model for text-to-image
10
- text_to_image_model = "runwayml/stable-diffusion-v1-5"
11
- text_to_image_pipe = StableDiffusionPipeline.from_pretrained(text_to_image_model, torch_dtype=torch.float16)
12
- text_to_image_pipe = text_to_image_pipe.to("cuda")
13
 
14
  # Streamlit app
15
- st.title("🤖 Advanced Multi-Task AI Bot")
16
- st.write("Text-to-Image, Text-to-Voice, Text-to-Video, and Image-to-Video Tool")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- # Sidebar for task selection
19
- task = st.sidebar.selectbox("Select a task:", ["Text-to-Image", "Text-to-Voice", "Text-to-Video", "Image-to-Video"])
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- if task == "Text-to-Image":
22
- st.header("Text-to-Image")
23
- user_input = st.text_input("Enter your text prompt:")
24
- if user_input:
25
- with st.spinner("Generating image..."):
26
- image = text_to_image_pipe(user_input).images[0]
27
- st.image(image, caption="Generated Image", use_column_width=True)
 
 
 
28
 
29
- elif task == "Text-to-Voice":
30
- st.header("Text-to-Voice")
31
- user_input = st.text_input("Enter your text:")
32
- if user_input:
33
- with st.spinner("Generating voice..."):
34
- tts = gTTS(text=user_input, lang='en')
35
- tts.save("output.mp3")
36
- audio_file = open("output.mp3", "rb")
37
- audio_bytes = audio_file.read()
38
- st.audio(audio_bytes, format="audio/mp3")
39
 
40
- elif task == "Text-to-Video":
41
- st.header("Text-to-Video")
42
- user_input = st.text_input("Enter your text prompt:")
43
- if user_input:
44
- with st.spinner("Generating video..."):
45
- # Use ModelScope or RunwayML API for text-to-video
46
- video_url = "https://api.modelscope.cn/video/generate" # Example API endpoint
47
- response = requests.post(video_url, json={"text": user_input})
48
- if response.status_code == 200:
49
- video_data = response.content
50
- st.video(video_data)
51
- else:
52
- st.error("Failed to generate video. Please try again.")
53
 
54
- elif task == "Image-to-Video":
55
- st.header("Image-to-Video")
56
- uploaded_file = st.file_uploader("Upload an image:", type=["jpg", "png", "jpeg"])
57
- if uploaded_file:
58
- with st.spinner("Generating video..."):
59
- # Use Pika Labs or Deforum API for image-to-video
60
- video_url = "https://api.pika.art/video/generate" # Example API endpoint
61
- files = {"image": uploaded_file}
62
- response = requests.post(video_url, files=files)
63
- if response.status_code == 200:
64
- video_data = response.content
65
- st.video(video_data)
66
- else:
67
- st.error("Failed to generate video. Please try again.")
 
1
  import streamlit as st
2
+ from moviepy.editor import VideoFileClip, concatenate_videoclips, TextClip, CompositeVideoClip
3
+ from moviepy.video.fx.all import crop, resize, mirror_x
4
+ import os
 
 
 
 
 
 
 
 
5
 
6
  # Streamlit app
7
+ st.title("🎥 Professional Video Editing Tool")
8
+ st.write("Make your videos copyright-free and edit them like a pro!")
9
+
10
+ # Upload video
11
+ uploaded_file = st.file_uploader("Upload a video:", type=["mp4", "avi", "mov"])
12
+
13
+ if uploaded_file is not None:
14
+ # Save uploaded file
15
+ with open("input_video.mp4", "wb") as f:
16
+ f.write(uploaded_file.getbuffer())
17
+
18
+ # Load video
19
+ video = VideoFileClip("input_video.mp4")
20
+
21
+ # Sidebar for editing options
22
+ st.sidebar.header("Editing Options")
23
+ task = st.sidebar.selectbox("Select a task:", [
24
+ "Trim Video", "Crop Video", "Add Text", "Add Music", "Remove Watermark", "Export Video"
25
+ ])
26
+
27
+ if task == "Trim Video":
28
+ st.header("Trim Video")
29
+ start_time = st.number_input("Start Time (seconds):", min_value=0, max_value=int(video.duration))
30
+ end_time = st.number_input("End Time (seconds):", min_value=0, max_value=int(video.duration))
31
+ if st.button("Trim"):
32
+ trimmed_video = video.subclip(start_time, end_time)
33
+ trimmed_video.write_videofile("trimmed_video.mp4")
34
+ st.video("trimmed_video.mp4")
35
+
36
+ elif task == "Crop Video":
37
+ st.header("Crop Video")
38
+ x1 = st.number_input("X1:", min_value=0, max_value=video.size[0])
39
+ y1 = st.number_input("Y1:", min_value=0, max_value=video.size[1])
40
+ x2 = st.number_input("X2:", min_value=0, max_value=video.size[0])
41
+ y2 = st.number_input("Y2:", min_value=0, max_value=video.size[1])
42
+ if st.button("Crop"):
43
+ cropped_video = crop(video, x1=x1, y1=y1, x2=x2, y2=y2)
44
+ cropped_video.write_videofile("cropped_video.mp4")
45
+ st.video("cropped_video.mp4")
46
 
47
+ elif task == "Add Text":
48
+ st.header("Add Text")
49
+ text = st.text_input("Enter text:")
50
+ fontsize = st.number_input("Font Size:", min_value=10, max_value=100, value=50)
51
+ color = st.color_picker("Text Color:", "#FFFFFF")
52
+ position_x = st.number_input("Position X:", min_value=0, max_value=video.size[0])
53
+ position_y = st.number_input("Position Y:", min_value=0, max_value=video.size[1])
54
+ if st.button("Add Text"):
55
+ text_clip = TextClip(text, fontsize=fontsize, color=color)
56
+ text_clip = text_clip.set_position((position_x, position_y)).set_duration(video.duration)
57
+ final_video = CompositeVideoClip([video, text_clip])
58
+ final_video.write_videofile("text_video.mp4")
59
+ st.video("text_video.mp4")
60
 
61
+ elif task == "Add Music":
62
+ st.header("Add Music")
63
+ music_file = st.file_uploader("Upload a music file:", type=["mp3", "wav"])
64
+ if music_file and st.button("Add Music"):
65
+ with open("background_music.mp3", "wb") as f:
66
+ f.write(music_file.getbuffer())
67
+ audio_clip = AudioFileClip("background_music.mp3")
68
+ final_video = video.set_audio(audio_clip)
69
+ final_video.write_videofile("music_video.mp4")
70
+ st.video("music_video.mp4")
71
 
72
+ elif task == "Remove Watermark":
73
+ st.header("Remove Watermark")
74
+ st.write("This feature is under development. Please check back later!")
75
+ # Add watermark removal code here
 
 
 
 
 
 
76
 
77
+ elif task == "Export Video":
78
+ st.header("Export Video")
79
+ st.write("Your video is ready to download!")
80
+ st.video("input_video.mp4")
81
+ with open("input_video.mp4", "rb") as f:
82
+ st.download_button("Download Video", f, file_name="edited_video.mp4")
 
 
 
 
 
 
 
83
 
84
+ # Clean up temporary files
85
+ if os.path.exists("input_video.mp4"):
86
+ os.remove("input_video.mp4")
87
+ if os.path.exists("trimmed_video.mp4"):
88
+ os.remove("trimmed_video.mp4")
89
+ if os.path.exists("cropped_video.mp4"):
90
+ os.remove("cropped_video.mp4")
91
+ if os.path.exists("text_video.mp4"):
92
+ os.remove("text_video.mp4")
93
+ if os.path.exists("music_video.mp4"):
94
+ os.remove("music_video.mp4")
95
+ if os.path.exists("background_music.mp3"):
96
+ os.remove("background_music.mp3")