Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
·
d5e94b5
1
Parent(s):
179fbf1
small improvements
Browse files- api_config.py +20 -20
- api_core.py +2 -3
- assets/config/aitube.yaml +9 -6
- assets/config/default.yaml +1 -1
- build/web/assets/AssetManifest.bin +1 -8
- build/web/assets/AssetManifest.bin.json +1 -1
- build/web/assets/AssetManifest.json +1 -1
- build/web/assets/assets/config/{aitube_high.yaml → aitube.yaml} +16 -13
- build/web/assets/assets/config/{aitube_low.yaml → custom.yaml} +4 -2
- build/web/assets/assets/config/default.yaml +2 -0
- build/web/assets/assets/config/private.yaml +0 -3
- build/web/assets/fonts/MaterialIcons-Regular.otf +0 -0
- build/web/flutter_bootstrap.js +1 -1
- build/web/flutter_service_worker.js +9 -10
- build/web/main.dart.js +0 -0
- lib/screens/settings_screen.dart +8 -1
- lib/screens/video_screen.dart +25 -2
- lib/widgets/ai_content_disclaimer.dart +3 -3
- lib/widgets/video_player_widget.dart +2 -1
api_config.py
CHANGED
@@ -90,32 +90,32 @@ CONFIG_FOR_STANDARD_HF_USERS = {
|
|
90 |
"max_rendering_time_per_client_per_video_in_sec": 15 * 60,
|
91 |
|
92 |
"min_num_inference_steps": 2,
|
93 |
-
"default_num_inference_steps":
|
94 |
-
"max_num_inference_steps":
|
95 |
|
96 |
"min_num_frames": 9, # 8 + 1
|
97 |
"default_num_frames": 81, # 8*10 + 1
|
98 |
"max_num_frames": 81,
|
99 |
|
100 |
"min_clip_duration_seconds": 1,
|
101 |
-
"default_clip_duration_seconds":
|
102 |
-
"max_clip_duration_seconds":
|
103 |
|
104 |
-
"min_clip_playback_speed": 0.
|
105 |
-
"default_clip_playback_speed": 0.
|
106 |
-
"max_clip_playback_speed": 0.
|
107 |
|
108 |
"min_clip_framerate": 8,
|
109 |
"default_clip_framerate": 25,
|
110 |
"max_clip_framerate": 25,
|
111 |
|
112 |
"min_clip_width": 544,
|
113 |
-
"default_clip_width": 768, # 640,
|
114 |
-
"max_clip_width": 768, # 640,
|
115 |
|
116 |
"min_clip_height": 320,
|
117 |
-
"default_clip_height": 448, # 416,
|
118 |
-
"max_clip_height": 448, # 416,
|
119 |
}
|
120 |
|
121 |
# Hugging Face users with a Pro may enjoy an improved experience
|
@@ -123,20 +123,20 @@ CONFIG_FOR_PRO_HF_USERS = {
|
|
123 |
"max_rendering_time_per_client_per_video_in_sec": 20 * 60,
|
124 |
|
125 |
"min_num_inference_steps": 2,
|
126 |
-
"default_num_inference_steps":
|
127 |
-
"max_num_inference_steps":
|
128 |
|
129 |
"min_num_frames": 9, # 8 + 1
|
130 |
"default_num_frames": 81, # 8*10 + 1
|
131 |
"max_num_frames": 81,
|
132 |
|
133 |
"min_clip_duration_seconds": 1,
|
134 |
-
"default_clip_duration_seconds":
|
135 |
-
"max_clip_duration_seconds":
|
136 |
|
137 |
-
"min_clip_playback_speed": 0.
|
138 |
-
"default_clip_playback_speed": 0.
|
139 |
-
"max_clip_playback_speed": 0.
|
140 |
|
141 |
"min_clip_framerate": 8,
|
142 |
"default_clip_framerate": 25,
|
@@ -167,8 +167,8 @@ CONFIG_FOR_ADMIN_HF_USERS = {
|
|
167 |
"max_clip_duration_seconds": 4,
|
168 |
|
169 |
"min_clip_playback_speed": 0.7,
|
170 |
-
"default_clip_playback_speed": 0.
|
171 |
-
"max_clip_playback_speed": 0
|
172 |
|
173 |
"min_clip_framerate": 8,
|
174 |
"default_clip_framerate": 30,
|
|
|
90 |
"max_rendering_time_per_client_per_video_in_sec": 15 * 60,
|
91 |
|
92 |
"min_num_inference_steps": 2,
|
93 |
+
"default_num_inference_steps": 8,
|
94 |
+
"max_num_inference_steps": 8,
|
95 |
|
96 |
"min_num_frames": 9, # 8 + 1
|
97 |
"default_num_frames": 81, # 8*10 + 1
|
98 |
"max_num_frames": 81,
|
99 |
|
100 |
"min_clip_duration_seconds": 1,
|
101 |
+
"default_clip_duration_seconds": 3,
|
102 |
+
"max_clip_duration_seconds": 3,
|
103 |
|
104 |
+
"min_clip_playback_speed": 0.7,
|
105 |
+
"default_clip_playback_speed": 0.7,
|
106 |
+
"max_clip_playback_speed": 0.7,
|
107 |
|
108 |
"min_clip_framerate": 8,
|
109 |
"default_clip_framerate": 25,
|
110 |
"max_clip_framerate": 25,
|
111 |
|
112 |
"min_clip_width": 544,
|
113 |
+
"default_clip_width": 1216, # 768, # 640,
|
114 |
+
"max_clip_width": 1216, # 768, # 640,
|
115 |
|
116 |
"min_clip_height": 320,
|
117 |
+
"default_clip_height": 640, # 448, # 416,
|
118 |
+
"max_clip_height": 640, # 448, # 416,
|
119 |
}
|
120 |
|
121 |
# Hugging Face users with a Pro may enjoy an improved experience
|
|
|
123 |
"max_rendering_time_per_client_per_video_in_sec": 20 * 60,
|
124 |
|
125 |
"min_num_inference_steps": 2,
|
126 |
+
"default_num_inference_steps": 8,
|
127 |
+
"max_num_inference_steps": 8,
|
128 |
|
129 |
"min_num_frames": 9, # 8 + 1
|
130 |
"default_num_frames": 81, # 8*10 + 1
|
131 |
"max_num_frames": 81,
|
132 |
|
133 |
"min_clip_duration_seconds": 1,
|
134 |
+
"default_clip_duration_seconds": 3,
|
135 |
+
"max_clip_duration_seconds": 3,
|
136 |
|
137 |
+
"min_clip_playback_speed": 0.7,
|
138 |
+
"default_clip_playback_speed": 0.7,
|
139 |
+
"max_clip_playback_speed": 0.7,
|
140 |
|
141 |
"min_clip_framerate": 8,
|
142 |
"default_clip_framerate": 25,
|
|
|
167 |
"max_clip_duration_seconds": 4,
|
168 |
|
169 |
"min_clip_playback_speed": 0.7,
|
170 |
+
"default_clip_playback_speed": 0.7,
|
171 |
+
"max_clip_playback_speed": 1.0,
|
172 |
|
173 |
"min_clip_framerate": 8,
|
174 |
"default_clip_framerate": 30,
|
api_core.py
CHANGED
@@ -353,7 +353,7 @@ title: \""""
|
|
353 |
|
354 |
# Check if the description still contains placeholder tags like <LOCATION>, <GENDER>, etc.
|
355 |
if re.search(r'<[A-Z_]+>', description):
|
356 |
-
logger.warning(f"Description still contains placeholder tags: {description}")
|
357 |
if current_attempt < max_attempts:
|
358 |
# Try again with a higher temperature
|
359 |
current_attempt += 1
|
@@ -615,8 +615,7 @@ Your caption:"""
|
|
615 |
frame_rate = self.get_config_value(user_role, 'clip_framerate', options)
|
616 |
|
617 |
# Log the user role and config values being used
|
618 |
-
logger.info(f"
|
619 |
-
logger.info(f"Using config values: width={width}, height={height}, num_frames={num_frames}, steps={num_inference_steps}, fps={frame_rate}")
|
620 |
|
621 |
json_payload = {
|
622 |
"inputs": {
|
|
|
353 |
|
354 |
# Check if the description still contains placeholder tags like <LOCATION>, <GENDER>, etc.
|
355 |
if re.search(r'<[A-Z_]+>', description):
|
356 |
+
#logger.warning(f"Description still contains placeholder tags: {description}")
|
357 |
if current_attempt < max_attempts:
|
358 |
# Try again with a higher temperature
|
359 |
current_attempt += 1
|
|
|
615 |
frame_rate = self.get_config_value(user_role, 'clip_framerate', options)
|
616 |
|
617 |
# Log the user role and config values being used
|
618 |
+
logger.info(f"Using config values: width={width}, height={height}, num_frames={num_frames}, steps={num_inference_steps}, fps={frame_rate} | role: {user_role}")
|
|
|
619 |
|
620 |
json_payload = {
|
621 |
"inputs": {
|
assets/config/aitube.yaml
CHANGED
@@ -7,21 +7,24 @@ render_queue:
|
|
7 |
buffer_size: 3
|
8 |
|
9 |
# how many requests for clips can be run in parallel
|
10 |
-
max_concurrent_generations:
|
11 |
|
12 |
# start playback as soon as we have 1 video over 3 (25%)
|
13 |
minimum_buffer_percent_to_start_playback: 5
|
14 |
|
|
|
|
|
|
|
15 |
video:
|
16 |
# default negative prompt to filter harmful content
|
17 |
-
default_negative_prompt: "
|
18 |
|
19 |
# transition time between each clip
|
20 |
# the exit (older) clip will see its playback time reduced by this amount
|
21 |
transition_buffer_duration_ms: 300
|
22 |
|
23 |
# how long a generated clip should be, in Duration
|
24 |
-
original_clip_duration_seconds:
|
25 |
|
26 |
# The model works on resolutions that are divisible by 32
|
27 |
# and number of frames that are divisible by 8 + 1 (e.g. 257).
|
@@ -34,7 +37,7 @@ video:
|
|
34 |
# number of frames below 257.
|
35 |
|
36 |
# number of inference steps
|
37 |
-
num_inference_steps:
|
38 |
|
39 |
guidance_scale: 1.0
|
40 |
|
@@ -42,8 +45,8 @@ video:
|
|
42 |
# in frames per second (so an integer)
|
43 |
original_clip_frame_rate: 25
|
44 |
|
45 |
-
original_clip_width:
|
46 |
-
original_clip_height:
|
47 |
|
48 |
# to do more with less, we slow down the videos (a 3s video will become a 4s video)
|
49 |
# but if you are GPU rich feel feel to play them back at 100% of their speed!
|
|
|
7 |
buffer_size: 3
|
8 |
|
9 |
# how many requests for clips can be run in parallel
|
10 |
+
max_concurrent_generations: 2
|
11 |
|
12 |
# start playback as soon as we have 1 video over 3 (25%)
|
13 |
minimum_buffer_percent_to_start_playback: 5
|
14 |
|
15 |
+
# it's OK to use high values here,
|
16 |
+
# because some of those values are limited by the backend config,
|
17 |
+
# such as the resoltuion or number of frames
|
18 |
video:
|
19 |
# default negative prompt to filter harmful content
|
20 |
+
default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
|
21 |
|
22 |
# transition time between each clip
|
23 |
# the exit (older) clip will see its playback time reduced by this amount
|
24 |
transition_buffer_duration_ms: 300
|
25 |
|
26 |
# how long a generated clip should be, in Duration
|
27 |
+
original_clip_duration_seconds: 3
|
28 |
|
29 |
# The model works on resolutions that are divisible by 32
|
30 |
# and number of frames that are divisible by 8 + 1 (e.g. 257).
|
|
|
37 |
# number of frames below 257.
|
38 |
|
39 |
# number of inference steps
|
40 |
+
num_inference_steps: 4
|
41 |
|
42 |
guidance_scale: 1.0
|
43 |
|
|
|
45 |
# in frames per second (so an integer)
|
46 |
original_clip_frame_rate: 25
|
47 |
|
48 |
+
original_clip_width: 1216 # 544
|
49 |
+
original_clip_height: 704 # 320
|
50 |
|
51 |
# to do more with less, we slow down the videos (a 3s video will become a 4s video)
|
52 |
# but if you are GPU rich feel feel to play them back at 100% of their speed!
|
assets/config/default.yaml
CHANGED
@@ -14,7 +14,7 @@ ui:
|
|
14 |
|
15 |
video:
|
16 |
# default negative prompt to filter harmful content
|
17 |
-
default_negative_prompt: "
|
18 |
|
19 |
# transition time between each clip
|
20 |
# the exit (older) clip will see its playback time reduced by this amount
|
|
|
14 |
|
15 |
video:
|
16 |
# default negative prompt to filter harmful content
|
17 |
+
default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
|
18 |
|
19 |
# transition time between each clip
|
20 |
# the exit (older) clip will see its playback time reduced by this amount
|
build/web/assets/AssetManifest.bin
CHANGED
@@ -1,8 +1 @@
|
|
1 |
-
|
2 |
-
assets/config/README.md
|
3 |
-
assetassets/config/README.mdassets/config/aitube_high.yaml
|
4 |
-
assetassets/config/aitube_high.yamlassets/config/aitube_low.yaml
|
5 |
-
assetassets/config/aitube_low.yamlassets/config/default.yaml
|
6 |
-
assetassets/config/default.yamlassets/config/private.yaml
|
7 |
-
assetassets/config/private.yaml2packages/cupertino_icons/assets/CupertinoIcons.ttf
|
8 |
-
asset2packages/cupertino_icons/assets/CupertinoIcons.ttf
|
|
|
1 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/web/assets/AssetManifest.bin.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
"
|
|
|
1 |
+
"DQUHF2Fzc2V0cy9jb25maWcvUkVBRE1FLm1kDAENAQcFYXNzZXQHF2Fzc2V0cy9jb25maWcvUkVBRE1FLm1kBxlhc3NldHMvY29uZmlnL2FpdHViZS55YW1sDAENAQcFYXNzZXQHGWFzc2V0cy9jb25maWcvYWl0dWJlLnlhbWwHGWFzc2V0cy9jb25maWcvY3VzdG9tLnlhbWwMAQ0BBwVhc3NldAcZYXNzZXRzL2NvbmZpZy9jdXN0b20ueWFtbAcaYXNzZXRzL2NvbmZpZy9kZWZhdWx0LnlhbWwMAQ0BBwVhc3NldAcaYXNzZXRzL2NvbmZpZy9kZWZhdWx0LnlhbWwHMnBhY2thZ2VzL2N1cGVydGlub19pY29ucy9hc3NldHMvQ3VwZXJ0aW5vSWNvbnMudHRmDAENAQcFYXNzZXQHMnBhY2thZ2VzL2N1cGVydGlub19pY29ucy9hc3NldHMvQ3VwZXJ0aW5vSWNvbnMudHRm"
|
build/web/assets/AssetManifest.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"assets/config/README.md":["assets/config/README.md"],"assets/config/
|
|
|
1 |
+
{"assets/config/README.md":["assets/config/README.md"],"assets/config/aitube.yaml":["assets/config/aitube.yaml"],"assets/config/custom.yaml":["assets/config/custom.yaml"],"assets/config/default.yaml":["assets/config/default.yaml"],"packages/cupertino_icons/assets/CupertinoIcons.ttf":["packages/cupertino_icons/assets/CupertinoIcons.ttf"]}
|
build/web/assets/assets/config/{aitube_high.yaml → aitube.yaml}
RENAMED
@@ -1,24 +1,30 @@
|
|
1 |
ui:
|
2 |
product_name: AiTube
|
3 |
-
|
|
|
4 |
render_queue:
|
5 |
# how many clips should be stored in advance
|
6 |
-
buffer_size:
|
7 |
|
8 |
# how many requests for clips can be run in parallel
|
9 |
-
max_concurrent_generations:
|
10 |
|
11 |
-
# start playback as soon as we have
|
12 |
-
minimum_buffer_percent_to_start_playback:
|
13 |
|
|
|
|
|
|
|
14 |
video:
|
|
|
|
|
15 |
|
16 |
# transition time between each clip
|
17 |
# the exit (older) clip will see its playback time reduced by this amount
|
18 |
transition_buffer_duration_ms: 300
|
19 |
|
20 |
# how long a generated clip should be, in Duration
|
21 |
-
original_clip_duration_seconds:
|
22 |
|
23 |
# The model works on resolutions that are divisible by 32
|
24 |
# and number of frames that are divisible by 8 + 1 (e.g. 257).
|
@@ -31,20 +37,17 @@ video:
|
|
31 |
# number of frames below 257.
|
32 |
|
33 |
# number of inference steps
|
34 |
-
# this has a direct impact in performance obviously,
|
35 |
-
# you can try to go to low values like 12 or 14 on "safe bet" prompts,
|
36 |
-
# but if you need a more uncommon topic, you need to go to 18 steps or more
|
37 |
num_inference_steps: 4
|
38 |
|
39 |
guidance_scale: 1.0
|
40 |
-
|
41 |
# original frame-rate of each clip (before we slow them down)
|
42 |
# in frames per second (so an integer)
|
43 |
original_clip_frame_rate: 25
|
44 |
|
45 |
-
original_clip_width:
|
46 |
-
original_clip_height:
|
47 |
|
48 |
# to do more with less, we slow down the videos (a 3s video will become a 4s video)
|
49 |
# but if you are GPU rich feel feel to play them back at 100% of their speed!
|
50 |
-
clip_playback_speed:
|
|
|
1 |
ui:
|
2 |
product_name: AiTube
|
3 |
+
showChatInVideoView: false
|
4 |
+
|
5 |
render_queue:
|
6 |
# how many clips should be stored in advance
|
7 |
+
buffer_size: 3
|
8 |
|
9 |
# how many requests for clips can be run in parallel
|
10 |
+
max_concurrent_generations: 2
|
11 |
|
12 |
+
# start playback as soon as we have 1 video over 3 (25%)
|
13 |
+
minimum_buffer_percent_to_start_playback: 5
|
14 |
|
15 |
+
# it's OK to use high values here,
|
16 |
+
# because some of those values are limited by the backend config,
|
17 |
+
# such as the resoltuion or number of frames
|
18 |
video:
|
19 |
+
# default negative prompt to filter harmful content
|
20 |
+
default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
|
21 |
|
22 |
# transition time between each clip
|
23 |
# the exit (older) clip will see its playback time reduced by this amount
|
24 |
transition_buffer_duration_ms: 300
|
25 |
|
26 |
# how long a generated clip should be, in Duration
|
27 |
+
original_clip_duration_seconds: 3
|
28 |
|
29 |
# The model works on resolutions that are divisible by 32
|
30 |
# and number of frames that are divisible by 8 + 1 (e.g. 257).
|
|
|
37 |
# number of frames below 257.
|
38 |
|
39 |
# number of inference steps
|
|
|
|
|
|
|
40 |
num_inference_steps: 4
|
41 |
|
42 |
guidance_scale: 1.0
|
43 |
+
|
44 |
# original frame-rate of each clip (before we slow them down)
|
45 |
# in frames per second (so an integer)
|
46 |
original_clip_frame_rate: 25
|
47 |
|
48 |
+
original_clip_width: 1216 # 544
|
49 |
+
original_clip_height: 704 # 320
|
50 |
|
51 |
# to do more with less, we slow down the videos (a 3s video will become a 4s video)
|
52 |
# but if you are GPU rich feel feel to play them back at 100% of their speed!
|
53 |
+
clip_playback_speed: 0.7
|
build/web/assets/assets/config/{aitube_low.yaml → custom.yaml}
RENAMED
@@ -1,5 +1,5 @@
|
|
1 |
ui:
|
2 |
-
product_name:
|
3 |
showChatInVideoView: false
|
4 |
|
5 |
render_queue:
|
@@ -13,6 +13,8 @@ render_queue:
|
|
13 |
minimum_buffer_percent_to_start_playback: 5
|
14 |
|
15 |
video:
|
|
|
|
|
16 |
|
17 |
# transition time between each clip
|
18 |
# the exit (older) clip will see its playback time reduced by this amount
|
@@ -45,4 +47,4 @@ video:
|
|
45 |
|
46 |
# to do more with less, we slow down the videos (a 3s video will become a 4s video)
|
47 |
# but if you are GPU rich feel feel to play them back at 100% of their speed!
|
48 |
-
clip_playback_speed: 0.
|
|
|
1 |
ui:
|
2 |
+
product_name: Custom
|
3 |
showChatInVideoView: false
|
4 |
|
5 |
render_queue:
|
|
|
13 |
minimum_buffer_percent_to_start_playback: 5
|
14 |
|
15 |
video:
|
16 |
+
# default negative prompt to filter harmful content
|
17 |
+
default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
|
18 |
|
19 |
# transition time between each clip
|
20 |
# the exit (older) clip will see its playback time reduced by this amount
|
|
|
47 |
|
48 |
# to do more with less, we slow down the videos (a 3s video will become a 4s video)
|
49 |
# but if you are GPU rich feel feel to play them back at 100% of their speed!
|
50 |
+
clip_playback_speed: 0.65
|
build/web/assets/assets/config/default.yaml
CHANGED
@@ -13,6 +13,8 @@ ui:
|
|
13 |
minimum_buffer_percent_to_start_playback: 25
|
14 |
|
15 |
video:
|
|
|
|
|
16 |
|
17 |
# transition time between each clip
|
18 |
# the exit (older) clip will see its playback time reduced by this amount
|
|
|
13 |
minimum_buffer_percent_to_start_playback: 25
|
14 |
|
15 |
video:
|
16 |
+
# default negative prompt to filter harmful content
|
17 |
+
default_negative_prompt: "pixelated, deformed, distorted, disfigured, blurry, text, watermark, low quality, gore, sex, blood, nudity, nude, porn, erotic"
|
18 |
|
19 |
# transition time between each clip
|
20 |
# the exit (older) clip will see its playback time reduced by this amount
|
build/web/assets/assets/config/private.yaml
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
ui:
|
2 |
-
product_name: Private
|
3 |
-
|
|
|
|
|
|
|
|
build/web/assets/fonts/MaterialIcons-Regular.otf
CHANGED
Binary files a/build/web/assets/fonts/MaterialIcons-Regular.otf and b/build/web/assets/fonts/MaterialIcons-Regular.otf differ
|
|
build/web/flutter_bootstrap.js
CHANGED
@@ -39,6 +39,6 @@ _flutter.buildConfig = {"engineRevision":"382be0028d370607f76215a9be322e5514b263
|
|
39 |
|
40 |
_flutter.loader.load({
|
41 |
serviceWorkerSettings: {
|
42 |
-
serviceWorkerVersion: "
|
43 |
}
|
44 |
});
|
|
|
39 |
|
40 |
_flutter.loader.load({
|
41 |
serviceWorkerSettings: {
|
42 |
+
serviceWorkerVersion: "2953326891"
|
43 |
}
|
44 |
});
|
build/web/flutter_service_worker.js
CHANGED
@@ -3,11 +3,11 @@ const MANIFEST = 'flutter-app-manifest';
|
|
3 |
const TEMP = 'flutter-temp-cache';
|
4 |
const CACHE_NAME = 'flutter-app-cache';
|
5 |
|
6 |
-
const RESOURCES = {"flutter_bootstrap.js": "
|
7 |
"version.json": "b5eaae4fc120710a3c35125322173615",
|
8 |
"index.html": "f34c56fffc6b38f62412a5db2315dec8",
|
9 |
"/": "f34c56fffc6b38f62412a5db2315dec8",
|
10 |
-
"main.dart.js": "
|
11 |
"flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
|
12 |
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
|
13 |
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
|
@@ -15,19 +15,18 @@ const RESOURCES = {"flutter_bootstrap.js": "a1978150e41f01842b52df7eb3c181aa",
|
|
15 |
"icons/Icon-maskable-512.png": "301a7604d45b3e739efc881eb04896ea",
|
16 |
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
|
17 |
"manifest.json": "7dc942a630334c1017089988a6ca07d4",
|
18 |
-
"assets/AssetManifest.json": "
|
19 |
"assets/NOTICES": "f0cfae681e209e19b2b144a9f062a96f",
|
20 |
"assets/FontManifest.json": "dc3d03800ccca4601324923c0b1d6d57",
|
21 |
-
"assets/AssetManifest.bin.json": "
|
22 |
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "33b7d9392238c04c131b6ce224e13711",
|
23 |
"assets/shaders/ink_sparkle.frag": "ecc85a2e95f5e9f53123dcaf8cb9b6ce",
|
24 |
-
"assets/AssetManifest.bin": "
|
25 |
-
"assets/fonts/MaterialIcons-Regular.otf": "
|
26 |
-
"assets/assets/config/private.yaml": "97a9ec367206bea5dce64faf94b66332",
|
27 |
"assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
|
28 |
-
"assets/assets/config/
|
29 |
-
"assets/assets/config/
|
30 |
-
"assets/assets/config/
|
31 |
"canvaskit/skwasm.js": "ea559890a088fe28b4ddf70e17e60052",
|
32 |
"canvaskit/skwasm.js.symbols": "9fe690d47b904d72c7d020bd303adf16",
|
33 |
"canvaskit/canvaskit.js.symbols": "27361387bc24144b46a745f1afe92b50",
|
|
|
3 |
const TEMP = 'flutter-temp-cache';
|
4 |
const CACHE_NAME = 'flutter-app-cache';
|
5 |
|
6 |
+
const RESOURCES = {"flutter_bootstrap.js": "473f7db41bcf45ab1477c0d952890dcd",
|
7 |
"version.json": "b5eaae4fc120710a3c35125322173615",
|
8 |
"index.html": "f34c56fffc6b38f62412a5db2315dec8",
|
9 |
"/": "f34c56fffc6b38f62412a5db2315dec8",
|
10 |
+
"main.dart.js": "e505129ff53a6863def8471d833dbb92",
|
11 |
"flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
|
12 |
"favicon.png": "5dcef449791fa27946b3d35ad8803796",
|
13 |
"icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
|
|
|
15 |
"icons/Icon-maskable-512.png": "301a7604d45b3e739efc881eb04896ea",
|
16 |
"icons/Icon-512.png": "96e752610906ba2a93c65f8abe1645f1",
|
17 |
"manifest.json": "7dc942a630334c1017089988a6ca07d4",
|
18 |
+
"assets/AssetManifest.json": "51a53d0237971d07d6d88304c41bf6fb",
|
19 |
"assets/NOTICES": "f0cfae681e209e19b2b144a9f062a96f",
|
20 |
"assets/FontManifest.json": "dc3d03800ccca4601324923c0b1d6d57",
|
21 |
+
"assets/AssetManifest.bin.json": "f7a7f5619c8ca1575d7a5e0c6cd4d529",
|
22 |
"assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "33b7d9392238c04c131b6ce224e13711",
|
23 |
"assets/shaders/ink_sparkle.frag": "ecc85a2e95f5e9f53123dcaf8cb9b6ce",
|
24 |
+
"assets/AssetManifest.bin": "6c597105edcadb9c676bdc998c88545a",
|
25 |
+
"assets/fonts/MaterialIcons-Regular.otf": "f1343247a767efee2447b3f63c9498b4",
|
|
|
26 |
"assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
|
27 |
+
"assets/assets/config/custom.yaml": "da18366f4b2f24a3bc911d86bd773a79",
|
28 |
+
"assets/assets/config/aitube.yaml": "29ed15827ee8364e390a3b446535067a",
|
29 |
+
"assets/assets/config/default.yaml": "ba11c9ae686f1317a29bce114f0f9fc9",
|
30 |
"canvaskit/skwasm.js": "ea559890a088fe28b4ddf70e17e60052",
|
31 |
"canvaskit/skwasm.js.symbols": "9fe690d47b904d72c7d020bd303adf16",
|
32 |
"canvaskit/canvaskit.js.symbols": "27361387bc24144b46a745f1afe92b50",
|
build/web/main.dart.js
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
lib/screens/settings_screen.dart
CHANGED
@@ -161,7 +161,14 @@ class _SettingsScreenState extends State<SettingsScreen> {
|
|
161 |
final confirmed = await showDialog<bool>(
|
162 |
context: context,
|
163 |
builder: (context) => AlertDialog(
|
164 |
-
title: const Text(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
content: const Text(
|
166 |
'Are you sure you want to clear all cached data? '
|
167 |
'This will remove all saved search results and videos.',
|
|
|
161 |
final confirmed = await showDialog<bool>(
|
162 |
context: context,
|
163 |
builder: (context) => AlertDialog(
|
164 |
+
title: const Text(
|
165 |
+
'Clear Cache',
|
166 |
+
style: TextStyle(
|
167 |
+
color: AiTubeColors.onBackground,
|
168 |
+
fontSize: 20,
|
169 |
+
fontWeight: FontWeight.bold,
|
170 |
+
),
|
171 |
+
),
|
172 |
content: const Text(
|
173 |
'Are you sure you want to clear all cached data? '
|
174 |
'This will remove all saved search results and videos.',
|
lib/screens/video_screen.dart
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
// lib/screens/video_screen.dart
|
2 |
import 'dart:async';
|
3 |
|
|
|
4 |
import 'package:aitube2/widgets/chat_widget.dart';
|
5 |
import 'package:aitube2/widgets/search_box.dart';
|
6 |
import 'package:aitube2/widgets/web_utils.dart';
|
@@ -323,8 +324,28 @@ class _VideoScreenState extends State<VideoScreen> {
|
|
323 |
final isWideScreen = constraints.maxWidth >= 900;
|
324 |
|
325 |
return Scaffold(
|
326 |
-
appBar:
|
327 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
328 |
title: Padding(
|
329 |
padding: const EdgeInsets.all(8),
|
330 |
child: SearchBox(
|
@@ -346,6 +367,8 @@ class _VideoScreenState extends State<VideoScreen> {
|
|
346 |
onPressed: _isConnected ? null : _initializeConnection,
|
347 |
),
|
348 |
],
|
|
|
|
|
349 |
),
|
350 |
body: SafeArea(
|
351 |
child: isWideScreen
|
|
|
1 |
// lib/screens/video_screen.dart
|
2 |
import 'dart:async';
|
3 |
|
4 |
+
import 'package:aitube2/screens/home_screen.dart';
|
5 |
import 'package:aitube2/widgets/chat_widget.dart';
|
6 |
import 'package:aitube2/widgets/search_box.dart';
|
7 |
import 'package:aitube2/widgets/web_utils.dart';
|
|
|
324 |
final isWideScreen = constraints.maxWidth >= 900;
|
325 |
|
326 |
return Scaffold(
|
327 |
+
appBar: PreferredSize(
|
328 |
+
preferredSize: const Size.fromHeight(kToolbarHeight + 16),
|
329 |
+
child: Padding(
|
330 |
+
padding: const EdgeInsets.only(top: 16),
|
331 |
+
child: AppBar(
|
332 |
+
leading: IconButton(
|
333 |
+
icon: Navigator.canPop(context)
|
334 |
+
? const Icon(Icons.arrow_back, color: AiTubeColors.onBackground)
|
335 |
+
: const Icon(Icons.home, color: AiTubeColors.onBackground),
|
336 |
+
onPressed: () {
|
337 |
+
if (Navigator.canPop(context)) {
|
338 |
+
Navigator.pop(context);
|
339 |
+
} else {
|
340 |
+
// Navigate to home screen if we can't go back
|
341 |
+
Navigator.pushReplacement(
|
342 |
+
context,
|
343 |
+
MaterialPageRoute(builder: (context) => const HomeScreen()),
|
344 |
+
);
|
345 |
+
}
|
346 |
+
},
|
347 |
+
),
|
348 |
+
titleSpacing: 0,
|
349 |
title: Padding(
|
350 |
padding: const EdgeInsets.all(8),
|
351 |
child: SearchBox(
|
|
|
367 |
onPressed: _isConnected ? null : _initializeConnection,
|
368 |
),
|
369 |
],
|
370 |
+
),
|
371 |
+
),
|
372 |
),
|
373 |
body: SafeArea(
|
374 |
child: isWideScreen
|
lib/widgets/ai_content_disclaimer.dart
CHANGED
@@ -110,7 +110,7 @@ class AiContentDisclaimer extends StatelessWidget {
|
|
110 |
),
|
111 |
],
|
112 |
),
|
113 |
-
const SizedBox(height:
|
114 |
Row(
|
115 |
mainAxisAlignment: MainAxisAlignment.center,
|
116 |
crossAxisAlignment: CrossAxisAlignment.baseline,
|
@@ -135,7 +135,7 @@ class AiContentDisclaimer extends StatelessWidget {
|
|
135 |
),
|
136 |
],
|
137 |
),
|
138 |
-
const SizedBox(height:
|
139 |
Text(
|
140 |
'artificial intelligence',
|
141 |
style: GoogleFonts.arimo(
|
@@ -153,7 +153,7 @@ class AiContentDisclaimer extends StatelessWidget {
|
|
153 |
],
|
154 |
),
|
155 |
),
|
156 |
-
const SizedBox(height:
|
157 |
Text(
|
158 |
'and may contain hallucinations or factual inaccuracies.',
|
159 |
style: GoogleFonts.arimo(
|
|
|
110 |
),
|
111 |
],
|
112 |
),
|
113 |
+
const SizedBox(height: 20),
|
114 |
Row(
|
115 |
mainAxisAlignment: MainAxisAlignment.center,
|
116 |
crossAxisAlignment: CrossAxisAlignment.baseline,
|
|
|
135 |
),
|
136 |
],
|
137 |
),
|
138 |
+
const SizedBox(height: 20),
|
139 |
Text(
|
140 |
'artificial intelligence',
|
141 |
style: GoogleFonts.arimo(
|
|
|
153 |
],
|
154 |
),
|
155 |
),
|
156 |
+
const SizedBox(height: 20),
|
157 |
Text(
|
158 |
'and may contain hallucinations or factual inaccuracies.',
|
159 |
style: GoogleFonts.arimo(
|
lib/widgets/video_player_widget.dart
CHANGED
@@ -613,7 +613,8 @@ class _VideoPlayerWidgetState extends State<VideoPlayerWidget> with WidgetsBindi
|
|
613 |
Widget _buildPlaceholder() {
|
614 |
// Use our new AI Content Disclaimer widget as the placeholder
|
615 |
if (widget.initialThumbnailUrl?.isEmpty ?? true) {
|
616 |
-
|
|
|
617 |
}
|
618 |
|
619 |
try {
|
|
|
613 |
Widget _buildPlaceholder() {
|
614 |
// Use our new AI Content Disclaimer widget as the placeholder
|
615 |
if (widget.initialThumbnailUrl?.isEmpty ?? true) {
|
616 |
+
// Set isInteractive to true as we generate content on-the-fly
|
617 |
+
return const AiContentDisclaimer(isInteractive: true);
|
618 |
}
|
619 |
|
620 |
try {
|