Mohammed Foud
commited on
Commit
·
56afbd6
1
Parent(s):
63708bc
Add application file
Browse files- Dockerfile +34 -0
- a.py +239 -0
- app.py +205 -0
- d.cmd +3 -0
- har_and_cookies/auth_OpenaiChat.json +1 -0
- har_and_cookies/blackbox.json +1 -0
- output/research_paper_81f3292b.md +0 -0
- requirements.txt +6 -0
- templates/index.html +259 -0
Dockerfile
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
+
# you will also find guides on how best to write your Dockerfile
|
3 |
+
|
4 |
+
FROM python:3.9-slim
|
5 |
+
|
6 |
+
# Install system dependencies
|
7 |
+
RUN apt-get update && \
|
8 |
+
apt-get install -y --no-install-recommends \
|
9 |
+
pandoc \
|
10 |
+
&& rm -rf /var/lib/apt/lists/*
|
11 |
+
|
12 |
+
# Create and switch to user
|
13 |
+
RUN useradd -m -u 1000 user
|
14 |
+
USER user
|
15 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
16 |
+
ENV PYTHONPATH=/app
|
17 |
+
|
18 |
+
# Set up working directory
|
19 |
+
WORKDIR /app
|
20 |
+
|
21 |
+
# Install Python dependencies
|
22 |
+
COPY --chown=user requirements.txt .
|
23 |
+
RUN pip install --no-cache-dir --upgrade pip && \
|
24 |
+
pip install --no-cache-dir --upgrade -r requirements.txt && \
|
25 |
+
pip install --no-cache-dir --upgrade g4f[all]
|
26 |
+
|
27 |
+
# Copy application code
|
28 |
+
COPY --chown=user . .
|
29 |
+
|
30 |
+
# Expose port
|
31 |
+
EXPOSE 7860
|
32 |
+
|
33 |
+
# Run the application
|
34 |
+
CMD ["gunicorn", "--bind", "0.0.0.0:7860", "--workers", "4", "--threads", "2", "app:app"]
|
a.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import g4f
|
2 |
+
import os
|
3 |
+
import subprocess
|
4 |
+
from datetime import datetime
|
5 |
+
from tqdm import tqdm
|
6 |
+
from typing import List, Tuple
|
7 |
+
|
8 |
+
def get_available_models() -> List[str]:
|
9 |
+
"""Get list of available models from g4f"""
|
10 |
+
try:
|
11 |
+
# New way to get available models in recent g4f versions
|
12 |
+
return sorted(g4f.models._all_models)
|
13 |
+
except Exception as e:
|
14 |
+
print(f"Warning: Could not fetch models from g4f. Using default models. Error: {e}")
|
15 |
+
return [
|
16 |
+
'gpt-4',
|
17 |
+
'gpt-3.5-turbo',
|
18 |
+
'llama2-70b',
|
19 |
+
'claude-2'
|
20 |
+
]
|
21 |
+
|
22 |
+
def get_user_choice(prompt: str, options: List[str]) -> str:
|
23 |
+
"""Get user choice from a list of options"""
|
24 |
+
print(prompt)
|
25 |
+
for i, option in enumerate(options, 1):
|
26 |
+
print(f"{i}. {option}")
|
27 |
+
while True:
|
28 |
+
try:
|
29 |
+
choice = int(input("Enter your choice: "))
|
30 |
+
if 1 <= choice <= len(options):
|
31 |
+
return options[choice-1]
|
32 |
+
print(f"Please enter a number between 1 and {len(options)}")
|
33 |
+
except ValueError:
|
34 |
+
print("Please enter a valid number")
|
35 |
+
|
36 |
+
def create_output_directory() -> None:
|
37 |
+
"""Create output directory if it doesn't exist"""
|
38 |
+
if not os.path.exists("output"):
|
39 |
+
os.makedirs("output")
|
40 |
+
|
41 |
+
def generate_filename() -> Tuple[str, str]:
|
42 |
+
"""Generate filenames with timestamp"""
|
43 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
44 |
+
md_filename = f"output/research_paper_{timestamp}.md"
|
45 |
+
docx_filename = f"output/research_paper_{timestamp}.docx"
|
46 |
+
return md_filename, docx_filename
|
47 |
+
|
48 |
+
def generate_index_content(model: str, research_subject: str, manual_chapters: List[str] = None) -> str:
|
49 |
+
"""Generate index content for the research paper"""
|
50 |
+
try:
|
51 |
+
if manual_chapters:
|
52 |
+
prompt = f"Generate a detailed index/table of contents for a research paper about {research_subject} with these chapters: " + \
|
53 |
+
", ".join(manual_chapters) + ". Include section headings in markdown format."
|
54 |
+
else:
|
55 |
+
prompt = f"Generate a detailed index/table of contents for a research paper about {research_subject}. Include chapter titles and section headings in markdown format."
|
56 |
+
|
57 |
+
response = g4f.ChatCompletion.create(
|
58 |
+
model=model,
|
59 |
+
messages=[{"role": "user", "content": prompt}],
|
60 |
+
)
|
61 |
+
# Ensure we get string output
|
62 |
+
return str(response) if response else "[Empty response from model]"
|
63 |
+
except Exception as e:
|
64 |
+
raise Exception(f"Failed to generate index: {str(e)}")
|
65 |
+
|
66 |
+
def extract_chapters(index_content: str) -> List[str]:
|
67 |
+
"""Extract chapter titles from index content"""
|
68 |
+
chapters = []
|
69 |
+
for line in index_content.split('\n'):
|
70 |
+
if line.strip().startswith('## '): # Chapter level headings
|
71 |
+
chapter_title = line.strip()[3:].strip()
|
72 |
+
if chapter_title.lower() not in ['introduction', 'conclusion', 'references']:
|
73 |
+
chapters.append(chapter_title)
|
74 |
+
return chapters if chapters else ["Literature Review", "Methodology", "Results and Discussion"]
|
75 |
+
|
76 |
+
def generate_automatic_sections(model: str, research_subject: str) -> List[Tuple[str, str]]:
|
77 |
+
"""Generate sections automatically based on AI-generated index"""
|
78 |
+
try:
|
79 |
+
with tqdm(total=1, desc="Generating Index") as pbar:
|
80 |
+
index_content = generate_index_content(model, research_subject)
|
81 |
+
pbar.update(1)
|
82 |
+
|
83 |
+
chapters = extract_chapters(index_content)
|
84 |
+
|
85 |
+
sections = [
|
86 |
+
("Index", index_content),
|
87 |
+
("Introduction", f"Write a comprehensive introduction for a research paper about {research_subject}. Include background information, research objectives, and significance of the study.")
|
88 |
+
]
|
89 |
+
|
90 |
+
for i, chapter in enumerate(chapters, 1):
|
91 |
+
sections.append(
|
92 |
+
(f"Chapter {i}: {chapter}",
|
93 |
+
f"Write a detailed chapter about '{chapter}' for a research paper about {research_subject}. "
|
94 |
+
f"Provide comprehensive coverage of this aspect, including relevant theories, examples, and analysis.")
|
95 |
+
)
|
96 |
+
|
97 |
+
sections.append(
|
98 |
+
("Conclusion", f"Write a conclusion section for a research paper about {research_subject}. Summarize key findings, discuss implications, and suggest future research directions.")
|
99 |
+
)
|
100 |
+
|
101 |
+
return sections
|
102 |
+
except Exception as e:
|
103 |
+
raise Exception(f"Failed to generate automatic structure: {str(e)}")
|
104 |
+
|
105 |
+
def get_manual_sections(research_subject: str) -> List[Tuple[str, str]]:
|
106 |
+
"""Get predefined manual sections"""
|
107 |
+
return [
|
108 |
+
("Index", "[Index will be generated first]"),
|
109 |
+
("Introduction", f"Write a comprehensive introduction for a research paper about {research_subject}."),
|
110 |
+
("Chapter 1: Literature Review", f"Create a detailed literature review chapter about {research_subject}."),
|
111 |
+
("Chapter 2: Methodology", f"Describe the research methodology for a study about {research_subject}."),
|
112 |
+
("Chapter 3: Results and Discussion", f"Present hypothetical results and discussion for a research paper about {research_subject}. Analyze findings and compare with existing literature."),
|
113 |
+
("Conclusion", f"Write a conclusion section for a research paper about {research_subject}.")
|
114 |
+
]
|
115 |
+
|
116 |
+
def generate_section_content(model: str, prompt: str) -> str:
|
117 |
+
"""Generate content for a single section"""
|
118 |
+
try:
|
119 |
+
response = g4f.ChatCompletion.create(
|
120 |
+
model=model,
|
121 |
+
messages=[{"role": "user", "content": prompt}],
|
122 |
+
)
|
123 |
+
return str(response) if response else "[Empty response from model]"
|
124 |
+
except Exception as e:
|
125 |
+
raise Exception(f"Failed to generate section content: {str(e)}")
|
126 |
+
|
127 |
+
def write_research_paper(md_filename: str, research_subject: str, sections: List[Tuple[str, str]], model: str) -> None:
|
128 |
+
"""Write the research paper to a markdown file"""
|
129 |
+
with open(md_filename, "w", encoding="utf-8") as f:
|
130 |
+
f.write(f"# Research Paper: {research_subject}\n\n")
|
131 |
+
|
132 |
+
for section_title, prompt in tqdm(sections, desc="Generating Paper Sections"):
|
133 |
+
try:
|
134 |
+
if isinstance(prompt, str) and (prompt.startswith("##") or prompt.startswith("#")):
|
135 |
+
# Pre-generated content (like index)
|
136 |
+
content = f"{prompt}\n\n"
|
137 |
+
else:
|
138 |
+
# Generate new content
|
139 |
+
response = generate_section_content(model, prompt)
|
140 |
+
content = f"## {section_title}\n\n{response}\n\n"
|
141 |
+
|
142 |
+
f.write(content)
|
143 |
+
except Exception as e:
|
144 |
+
print(f"Error generating {section_title}: {str(e)}")
|
145 |
+
f.write(f"## {section_title}\n\n[Error generating this section]\n\n")
|
146 |
+
|
147 |
+
def convert_to_word(md_filename: str, docx_filename: str) -> None:
|
148 |
+
"""Convert markdown file to Word document using Pandoc"""
|
149 |
+
try:
|
150 |
+
with tqdm(total=1, desc="Converting to Word") as pbar:
|
151 |
+
# Build the command list
|
152 |
+
command = [
|
153 |
+
"pandoc", md_filename,
|
154 |
+
"-o", docx_filename,
|
155 |
+
"--standalone", # Include header and footer
|
156 |
+
"--table-of-contents", # Add table of contents
|
157 |
+
"--toc-depth=3" # Include up to level 3 headings in TOC
|
158 |
+
]
|
159 |
+
|
160 |
+
# Only add reference-doc if it exists
|
161 |
+
if os.path.exists("reference.docx"):
|
162 |
+
command.extend(["--reference-doc", "reference.docx"])
|
163 |
+
|
164 |
+
# Run the command
|
165 |
+
subprocess.run(command, check=True)
|
166 |
+
pbar.update(1)
|
167 |
+
print(f"Word document generated as {docx_filename}")
|
168 |
+
except subprocess.CalledProcessError as e:
|
169 |
+
raise Exception(f"Error converting to Word document: {str(e)}")
|
170 |
+
except FileNotFoundError:
|
171 |
+
raise Exception("Pandoc not found. Please install Pandoc to convert to Word document.")
|
172 |
+
|
173 |
+
def main():
|
174 |
+
"""Main function to generate research paper"""
|
175 |
+
try:
|
176 |
+
# Get configuration
|
177 |
+
available_models = get_available_models()
|
178 |
+
research_subject = input("Enter your research subject: ")
|
179 |
+
selected_model = get_user_choice("\nSelect AI model:", available_models)
|
180 |
+
|
181 |
+
structure_choice = get_user_choice(
|
182 |
+
"\nSelect paper structure generation method:",
|
183 |
+
["Automatic (from AI-generated index)", "Manual (predefined structure)"]
|
184 |
+
)
|
185 |
+
|
186 |
+
# Setup files and directories
|
187 |
+
create_output_directory()
|
188 |
+
md_filename, docx_filename = generate_filename()
|
189 |
+
|
190 |
+
# Generate sections
|
191 |
+
sections = []
|
192 |
+
if structure_choice.startswith("Automatic"):
|
193 |
+
try:
|
194 |
+
sections = generate_automatic_sections(selected_model, research_subject)
|
195 |
+
except Exception as e:
|
196 |
+
print(f"\nError: {str(e)}")
|
197 |
+
print("Falling back to manual structure...")
|
198 |
+
sections = get_manual_sections(research_subject)
|
199 |
+
# Generate index for manual structure
|
200 |
+
manual_chapters = [section[0] for section in sections[1:]]
|
201 |
+
try:
|
202 |
+
index_content = generate_index_content(selected_model, research_subject, manual_chapters)
|
203 |
+
sections[0] = ("Index", index_content)
|
204 |
+
except Exception as e:
|
205 |
+
print(f"Error generating index: {str(e)}")
|
206 |
+
sections[0] = ("Index", "[Error generating index]")
|
207 |
+
else:
|
208 |
+
sections = get_manual_sections(research_subject)
|
209 |
+
# Generate index for manual structure
|
210 |
+
manual_chapters = [section[0] for section in sections[1:]]
|
211 |
+
try:
|
212 |
+
index_content = generate_index_content(selected_model, research_subject, manual_chapters)
|
213 |
+
sections[0] = ("Index", index_content)
|
214 |
+
except Exception as e:
|
215 |
+
print(f"Error generating index: {str(e)}")
|
216 |
+
sections[0] = ("Index", "[Error generating index]")
|
217 |
+
|
218 |
+
# Generate paper content
|
219 |
+
write_research_paper(md_filename, research_subject, sections, selected_model)
|
220 |
+
print(f"\nResearch paper saved as {md_filename}")
|
221 |
+
|
222 |
+
# Convert to Word
|
223 |
+
try:
|
224 |
+
convert_to_word(md_filename, docx_filename)
|
225 |
+
except Exception as e:
|
226 |
+
print(f"Warning: {str(e)}")
|
227 |
+
|
228 |
+
except Exception as e:
|
229 |
+
print(f"\nFatal error occurred: {str(e)}")
|
230 |
+
exit(1)
|
231 |
+
|
232 |
+
if __name__ == "__main__":
|
233 |
+
try:
|
234 |
+
import g4f
|
235 |
+
main()
|
236 |
+
except ImportError:
|
237 |
+
print("Error: g4f package not installed. Please install it with:")
|
238 |
+
print("pip install -U g4f[all]")
|
239 |
+
exit(1)
|
app.py
ADDED
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py
|
2 |
+
from flask import Flask, render_template, request, jsonify, send_from_directory
|
3 |
+
import g4f
|
4 |
+
import os
|
5 |
+
import subprocess
|
6 |
+
from datetime import datetime
|
7 |
+
from typing import List, Tuple
|
8 |
+
import uuid
|
9 |
+
from werkzeug.utils import secure_filename
|
10 |
+
|
11 |
+
app = Flask(__name__)
|
12 |
+
app.config['UPLOAD_FOLDER'] = 'output'
|
13 |
+
|
14 |
+
# Initialize output directory
|
15 |
+
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
|
16 |
+
|
17 |
+
def get_available_models() -> List[str]:
|
18 |
+
"""Get list of available models from g4f"""
|
19 |
+
try:
|
20 |
+
return sorted(g4f.models._all_models)
|
21 |
+
except Exception:
|
22 |
+
return ['gpt-4', 'gpt-3.5-turbo', 'llama2-70b', 'claude-2']
|
23 |
+
|
24 |
+
def generate_filename() -> Tuple[str, str]:
|
25 |
+
"""Generate filenames with unique ID"""
|
26 |
+
unique_id = str(uuid.uuid4())[:8]
|
27 |
+
md_filename = f"research_paper_{unique_id}.md"
|
28 |
+
docx_filename = f"research_paper_{unique_id}.docx"
|
29 |
+
return md_filename, docx_filename
|
30 |
+
|
31 |
+
def generate_index_content(model: str, research_subject: str, manual_chapters: List[str] = None) -> str:
|
32 |
+
"""Generate index content for the research paper"""
|
33 |
+
try:
|
34 |
+
if manual_chapters:
|
35 |
+
prompt = f"Generate a detailed index/table of contents for a research paper about {research_subject} with these chapters: " + \
|
36 |
+
", ".join(manual_chapters) + ". Include section headings in markdown format."
|
37 |
+
else:
|
38 |
+
prompt = f"Generate a detailed index/table of contents for a research paper about {research_subject}. Include chapter titles and section headings in markdown format."
|
39 |
+
|
40 |
+
response = g4f.ChatCompletion.create(
|
41 |
+
model=model,
|
42 |
+
messages=[{"role": "user", "content": prompt}],
|
43 |
+
)
|
44 |
+
return str(response) if response else "[Empty response from model]"
|
45 |
+
except Exception as e:
|
46 |
+
raise Exception(f"Failed to generate index: {str(e)}")
|
47 |
+
|
48 |
+
def extract_chapters(index_content: str) -> List[str]:
|
49 |
+
"""Extract chapter titles from index content"""
|
50 |
+
chapters = []
|
51 |
+
for line in index_content.split('\n'):
|
52 |
+
if line.strip().startswith('## '):
|
53 |
+
chapter_title = line.strip()[3:].strip()
|
54 |
+
if chapter_title.lower() not in ['introduction', 'conclusion', 'references']:
|
55 |
+
chapters.append(chapter_title)
|
56 |
+
return chapters if chapters else ["Literature Review", "Methodology", "Results and Discussion"]
|
57 |
+
|
58 |
+
def generate_automatic_sections(model: str, research_subject: str) -> List[Tuple[str, str]]:
|
59 |
+
"""Generate sections automatically based on AI-generated index"""
|
60 |
+
try:
|
61 |
+
index_content = generate_index_content(model, research_subject)
|
62 |
+
chapters = extract_chapters(index_content)
|
63 |
+
|
64 |
+
sections = [
|
65 |
+
("Index", index_content),
|
66 |
+
("Introduction", f"Write a comprehensive introduction for a research paper about {research_subject}. Include background information, research objectives, and significance of the study.")
|
67 |
+
]
|
68 |
+
|
69 |
+
for i, chapter in enumerate(chapters, 1):
|
70 |
+
sections.append(
|
71 |
+
(f"Chapter {i}: {chapter}",
|
72 |
+
f"Write a detailed chapter about '{chapter}' for a research paper about {research_subject}. "
|
73 |
+
f"Provide comprehensive coverage of this aspect, including relevant theories, examples, and analysis.")
|
74 |
+
)
|
75 |
+
|
76 |
+
sections.append(
|
77 |
+
("Conclusion", f"Write a conclusion section for a research paper about {research_subject}. Summarize key findings, discuss implications, and suggest future research directions.")
|
78 |
+
)
|
79 |
+
|
80 |
+
return sections
|
81 |
+
except Exception as e:
|
82 |
+
raise Exception(f"Failed to generate automatic structure: {str(e)}")
|
83 |
+
|
84 |
+
def get_manual_sections(research_subject: str) -> List[Tuple[str, str]]:
|
85 |
+
"""Get predefined manual sections"""
|
86 |
+
return [
|
87 |
+
("Index", "[Index will be generated first]"),
|
88 |
+
("Introduction", f"Write a comprehensive introduction for a research paper about {research_subject}."),
|
89 |
+
("Chapter 1: Literature Review", f"Create a detailed literature review chapter about {research_subject}."),
|
90 |
+
("Chapter 2: Methodology", f"Describe the research methodology for a study about {research_subject}."),
|
91 |
+
("Chapter 3: Results and Discussion", f"Present hypothetical results and discussion for a research paper about {research_subject}. Analyze findings and compare with existing literature."),
|
92 |
+
("Conclusion", f"Write a conclusion section for a research paper about {research_subject}.")
|
93 |
+
]
|
94 |
+
|
95 |
+
def generate_section_content(model: str, prompt: str) -> str:
|
96 |
+
"""Generate content for a single section"""
|
97 |
+
try:
|
98 |
+
response = g4f.ChatCompletion.create(
|
99 |
+
model=model,
|
100 |
+
messages=[{"role": "user", "content": prompt}],
|
101 |
+
)
|
102 |
+
return str(response) if response else "[Empty response from model]"
|
103 |
+
except Exception as e:
|
104 |
+
raise Exception(f"Failed to generate section content: {str(e)}")
|
105 |
+
|
106 |
+
def write_research_paper(md_filename: str, research_subject: str, sections: List[Tuple[str, str]], model: str) -> None:
|
107 |
+
"""Write the research paper to a markdown file"""
|
108 |
+
full_path = os.path.join(app.config['UPLOAD_FOLDER'], md_filename)
|
109 |
+
with open(full_path, "w", encoding="utf-8") as f:
|
110 |
+
f.write(f"# Research Paper: {research_subject}\n\n")
|
111 |
+
|
112 |
+
for section_title, prompt in sections:
|
113 |
+
try:
|
114 |
+
if isinstance(prompt, str) and (prompt.startswith("##") or prompt.startswith("#")):
|
115 |
+
content = f"{prompt}\n\n"
|
116 |
+
else:
|
117 |
+
response = generate_section_content(model, prompt)
|
118 |
+
content = f"## {section_title}\n\n{response}\n\n"
|
119 |
+
f.write(content)
|
120 |
+
except Exception as e:
|
121 |
+
f.write(f"## {section_title}\n\n[Error generating this section: {str(e)}]\n\n")
|
122 |
+
|
123 |
+
def convert_to_word(md_filename: str, docx_filename: str) -> None:
|
124 |
+
"""Convert markdown file to Word document using Pandoc"""
|
125 |
+
md_path = os.path.join(app.config['UPLOAD_FOLDER'], md_filename)
|
126 |
+
docx_path = os.path.join(app.config['UPLOAD_FOLDER'], docx_filename)
|
127 |
+
|
128 |
+
command = [
|
129 |
+
"pandoc", md_path,
|
130 |
+
"-o", docx_path,
|
131 |
+
"--standalone",
|
132 |
+
"--table-of-contents",
|
133 |
+
"--toc-depth=3"
|
134 |
+
]
|
135 |
+
|
136 |
+
if os.path.exists("reference.docx"):
|
137 |
+
command.extend(["--reference-doc", "reference.docx"])
|
138 |
+
|
139 |
+
subprocess.run(command, check=True)
|
140 |
+
|
141 |
+
@app.route('/')
|
142 |
+
def index():
|
143 |
+
models = get_available_models()
|
144 |
+
return render_template('index.html', models=models)
|
145 |
+
|
146 |
+
@app.route('/generate', methods=['POST'])
|
147 |
+
def generate():
|
148 |
+
data = request.json
|
149 |
+
research_subject = data.get('subject', '').strip()
|
150 |
+
selected_model = data.get('model', 'gpt-4')
|
151 |
+
structure_type = data.get('structure', 'automatic')
|
152 |
+
|
153 |
+
if not research_subject:
|
154 |
+
return jsonify({'error': 'Research subject is required'}), 400
|
155 |
+
|
156 |
+
try:
|
157 |
+
# Generate filenames
|
158 |
+
md_filename, docx_filename = generate_filename()
|
159 |
+
|
160 |
+
# Generate sections based on structure type
|
161 |
+
if structure_type == 'automatic':
|
162 |
+
try:
|
163 |
+
sections = generate_automatic_sections(selected_model, research_subject)
|
164 |
+
except Exception:
|
165 |
+
sections = get_manual_sections(research_subject)
|
166 |
+
index_content = generate_index_content(selected_model, research_subject, [s[0] for s in sections[1:]])
|
167 |
+
sections[0] = ("Index", index_content)
|
168 |
+
else:
|
169 |
+
sections = get_manual_sections(research_subject)
|
170 |
+
index_content = generate_index_content(selected_model, research_subject, [s[0] for s in sections[1:]])
|
171 |
+
sections[0] = ("Index", index_content)
|
172 |
+
|
173 |
+
# Generate the paper
|
174 |
+
write_research_paper(md_filename, research_subject, sections, selected_model)
|
175 |
+
|
176 |
+
# Convert to Word
|
177 |
+
try:
|
178 |
+
convert_to_word(md_filename, docx_filename)
|
179 |
+
except Exception as e:
|
180 |
+
return jsonify({
|
181 |
+
'status': 'partial_success',
|
182 |
+
'message': f'Paper generated but Word conversion failed: {str(e)}',
|
183 |
+
'md_file': md_filename
|
184 |
+
})
|
185 |
+
|
186 |
+
return jsonify({
|
187 |
+
'status': 'success',
|
188 |
+
'docx_file': docx_filename,
|
189 |
+
'md_file': md_filename
|
190 |
+
})
|
191 |
+
|
192 |
+
except Exception as e:
|
193 |
+
return jsonify({'error': f'Failed to generate paper: {str(e)}'}), 500
|
194 |
+
|
195 |
+
@app.route('/download/<filename>')
|
196 |
+
def download(filename):
|
197 |
+
safe_filename = secure_filename(filename)
|
198 |
+
return send_from_directory(
|
199 |
+
app.config['UPLOAD_FOLDER'],
|
200 |
+
safe_filename,
|
201 |
+
as_attachment=True
|
202 |
+
)
|
203 |
+
|
204 |
+
if __name__ == '__main__':
|
205 |
+
app.run(debug=True)
|
d.cmd
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
git add .
|
2 |
+
git commit -m "Add application file"
|
3 |
+
git push
|
har_and_cookies/auth_OpenaiChat.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"api_key": null, "cookies": {"_dd_s": "rum=0&expire=1744328288966&logs=1&id=a148f952-1aeb-404e-b346-222e9ceff98f&created=1744327388966", "oai-sc": "0gAAAAABn-FLgF7j4QQAAB46feL-KF_EgdrI7AC2CR-8BMKvd55AmgokL3NGl-MKTYlLJMw2ccFD1r6f7XGD9r-HbM0CYvns9juFmjubg6mbl4imHg7Q8vZee1TBrht73tL-8eAAHEa3bR4DiRGR4-Z3mG65OxfYXgXk_Y-dSr9L1rystTeLu2Ymlhuo_kEEnXn2lX9IWnNTnXJR9ll-L21leSmdC2YS1ulloBCFTz9krt9Oh_vcYBxI", "cf_clearance": "4tihdc7vg7IT29uwN7w2xFtVrS6ZL7.5pRJOzZzJCzk-1744327389-1.2.1.1-DUnJs2Rc._f3t9UeP8aL8P0qbfJjglZnJvzfauCXNps31hWNTSUpQDdtP4SSpwkKEhZLiMNkrgcvXue6XVDDKv6I9cGgQesp1jnRn3.yiy6Y0BTg.MkjhktYi8x43OvXARvKPBUVaf03YLelG9mtzA5.HupQo9odc2sLIhHl6AeEzIEe54EeGRsMhEgoY41fSW_tuzEAvLuxz.Z_Rv2d9GkM.GHlCDG0VVoxbCHFQLXakzN6bUrJueXW4MqIWyz6k3nSEWVnnq3fPMXPKdvZdkgtS6xVWXSA6t7XVX7VsARUTZet8V2VAZq5TK0xEm2IAzeGUPJRgBder.yeid__b2TdWn9GjAKWNo2byArrHwI", "__cflb": "0H28vzvP5FJafnkHxj4UT4q3DSGqRYN2PgVfGD42o1D", "_cfuvid": "2AXajlsJCkqL6C7RiZr8UEXoHlEgxO7fc2NM1EbV6fk-1744327386278-0.0.1.1-604800000", "__Host-next-auth.csrf-token": "a57f875f5a6c9e2417a3ec87fbb631857f8c20d277a9dadb11348ab992d47dd1%7C37e7bd65b00f393844395f1248a5e44e1e6a64046ad4e2fa28a8b0875430fbc6", "__cf_bm": "uwro6GZ2FmV9cyr8IA1AfEr7UpRpXiYuFtIyw3KKMTM-1744327386-1.0.1.1-YZPH_ZobjntjzOXG9s_gmr0KYQ1jwE39o10caQzkERtvW4qZv6PWU90Q.f3A8ME_u66VbxhogXtxyl9T84RWKBNsVaR2XMpm5cvPHP4nWFM", "oai-did": "ace63a11-36f3-4681-a00d-3a6e4ca41c0b", "__Secure-next-auth.callback-url": "https%3A%2F%2Fchatgpt.com"}, "headers": {"upgrade-insecure-requests": "1", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36", "sec-ch-ua": "\"Google Chrome\";v=\"135\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"135\"", "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": "\"Windows\"", "cookie": "_dd_s=rum=0&expire=1744328288966&logs=1&id=a148f952-1aeb-404e-b346-222e9ceff98f&created=1744327388966; oai-sc=0gAAAAABn-FLgF7j4QQAAB46feL-KF_EgdrI7AC2CR-8BMKvd55AmgokL3NGl-MKTYlLJMw2ccFD1r6f7XGD9r-HbM0CYvns9juFmjubg6mbl4imHg7Q8vZee1TBrht73tL-8eAAHEa3bR4DiRGR4-Z3mG65OxfYXgXk_Y-dSr9L1rystTeLu2Ymlhuo_kEEnXn2lX9IWnNTnXJR9ll-L21leSmdC2YS1ulloBCFTz9krt9Oh_vcYBxI; cf_clearance=4tihdc7vg7IT29uwN7w2xFtVrS6ZL7.5pRJOzZzJCzk-1744327389-1.2.1.1-DUnJs2Rc._f3t9UeP8aL8P0qbfJjglZnJvzfauCXNps31hWNTSUpQDdtP4SSpwkKEhZLiMNkrgcvXue6XVDDKv6I9cGgQesp1jnRn3.yiy6Y0BTg.MkjhktYi8x43OvXARvKPBUVaf03YLelG9mtzA5.HupQo9odc2sLIhHl6AeEzIEe54EeGRsMhEgoY41fSW_tuzEAvLuxz.Z_Rv2d9GkM.GHlCDG0VVoxbCHFQLXakzN6bUrJueXW4MqIWyz6k3nSEWVnnq3fPMXPKdvZdkgtS6xVWXSA6t7XVX7VsARUTZet8V2VAZq5TK0xEm2IAzeGUPJRgBder.yeid__b2TdWn9GjAKWNo2byArrHwI; __cflb=0H28vzvP5FJafnkHxj4UT4q3DSGqRYN2PgVfGD42o1D; _cfuvid=2AXajlsJCkqL6C7RiZr8UEXoHlEgxO7fc2NM1EbV6fk-1744327386278-0.0.1.1-604800000; __Host-next-auth.csrf-token=a57f875f5a6c9e2417a3ec87fbb631857f8c20d277a9dadb11348ab992d47dd1%7C37e7bd65b00f393844395f1248a5e44e1e6a64046ad4e2fa28a8b0875430fbc6; __cf_bm=uwro6GZ2FmV9cyr8IA1AfEr7UpRpXiYuFtIyw3KKMTM-1744327386-1.0.1.1-YZPH_ZobjntjzOXG9s_gmr0KYQ1jwE39o10caQzkERtvW4qZv6PWU90Q.f3A8ME_u66VbxhogXtxyl9T84RWKBNsVaR2XMpm5cvPHP4nWFM; oai-did=ace63a11-36f3-4681-a00d-3a6e4ca41c0b; __Secure-next-auth.callback-url=https%3A%2F%2Fchatgpt.com"}, "expires": null, "proof_token": [2134, "Fri Apr 11 2025 02:23:10 GMT+0300 (Arabian Standard Time)", 2248146944, 21, "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36", null, "prod-e900b9b3b43b17d1d2e0655bc3b78309ac4bd4c6", "en-US", "en-US,en", 179, "sendBeacon\u2212function sendBeacon() { [native code] }", "location", "webkitRequestFileSystem", 6295.699999999255, "5aa516a5-5969-420c-b633-a2e45fd581da", "", 4, 1744327384310.3], "turnstile_token": "ShodCB0AAQwCEXBqd28RHRoYDx0CDAwCEXB5b3lycHlveXJweW95cnB5b3lycHlveXJwBRMaHxMLGBYHBhoUCB0ADxsMAQcIFg4ABwkYDAoIFAwOCx8AHRoJE3VEeUZ/UncFER0aHwwdAgEMAhFmbVZZYHR5Yn5Ed1p5CXF8a3tQe3kJWnplY08TGh8TDRoWCwAaFBp+W21bdkQMBQwUEQUWGQsRCxpvTw4MGgIaBwQWHBoJE1lGV1ByeUFecGNbb3BicHZmQGpzeUlXfnd6aE9rdEx5fWEBWUwJS2ZgVldwdl9nfHVZU295RHxxanpiY3p/CHJyeW96cHMMT1pUDAUMFBEICwANEQsadlRfeX54XGppeWRgZl1fdmwDWXx/UFd/b3t2A2B+G2piZ2piWgNGXnZUX3l+H3JxYn4eYGZ3elpgWEVvdm5hQm94CVVpblltYnRudG9YRVl4UGlVYHt6cGRtaG9Rd1BNeXJafW15XGh7f3V5ckAabWt3enx5ZV18f21Ac28fclhiCFpzaVp+YmsCY15NCV9wbkJid3d9aGJkWQ10YFhZeWZUYWFrHlxbZm1CXFUADXRrXUlrfG4Dcm8fAWppbWx1e11ydmB2QW92bl9QYHhbUGIIWlpiZ2pjeVRWdG9pWFJvWXl3Zm1JdnJGeWV/dUZedlRfeX4fcnFifh5ga2cNG28DSVp0fUd9a0ANZmIJH2Jld2oZaWYAXHxqYVVsaHJ1YghKXntkcmNsAkFeeH9XeWBsCHJ1QE1zcWB5Z35UQnxmbldhbXxAYmJ+G3R7d25KbHYAbXdUAlBsaEhVaQkebWJ3anRgdUVaZglfUGxrfmpmVFp1ZF0NeWBhZ3l4fUtQbXgJZGJPEwURHRoYDR0GDgwCEWJQXm90VlNsfnJJemtpQH5wWUtVdAgXcGt0XHR/W2BzbFALf3lGSFFnfmhyZnRbenlbRnZmaWJkfGtAUGBqQW13VnFtfnJJc3lUaWBvRld5cE8eeXdjAHpvWHd1fAkGZ3xvcXVwX295d2MAem52RV18QH5ycEVtDhNF"}
|
har_and_cookies/blackbox.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"validated_value": "00f37b34-a166-4efb-bce5-1312d87f2f94"}
|
output/research_paper_81f3292b.md
ADDED
File without changes
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
flask
|
2 |
+
g4f
|
3 |
+
pypandoc
|
4 |
+
python-dotenv
|
5 |
+
werkzeug
|
6 |
+
uvicorn
|
templates/index.html
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- templates/index.html -->
|
2 |
+
<!DOCTYPE html>
|
3 |
+
<html lang="en">
|
4 |
+
<head>
|
5 |
+
<meta charset="UTF-8">
|
6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
7 |
+
<title>Research Paper Generator</title>
|
8 |
+
<script src="https://cdn.tailwindcss.com"></script>
|
9 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
|
10 |
+
</head>
|
11 |
+
<body class="bg-gray-50 min-h-screen">
|
12 |
+
<div class="container mx-auto px-4 py-8">
|
13 |
+
<div class="max-w-3xl mx-auto bg-white rounded-xl shadow-md overflow-hidden p-6">
|
14 |
+
<div class="text-center mb-8">
|
15 |
+
<h1 class="text-3xl font-bold text-indigo-700 mb-2">
|
16 |
+
<i class="fas fa-file-alt mr-2"></i>Research Paper Generator
|
17 |
+
</h1>
|
18 |
+
<p class="text-gray-600">Generate complete research papers with AI</p>
|
19 |
+
</div>
|
20 |
+
|
21 |
+
<form id="paperForm" class="space-y-6">
|
22 |
+
<div>
|
23 |
+
<label for="subject" class="block text-sm font-medium text-gray-700 mb-1">
|
24 |
+
<i class="fas fa-book mr-2"></i>Research Subject
|
25 |
+
</label>
|
26 |
+
<input type="text" id="subject" name="subject" required
|
27 |
+
class="w-full px-4 py-2 border border-gray-300 rounded-md focus:ring-indigo-500 focus:border-indigo-500"
|
28 |
+
placeholder="Enter your research topic">
|
29 |
+
</div>
|
30 |
+
|
31 |
+
<div>
|
32 |
+
<label for="model" class="block text-sm font-medium text-gray-700 mb-1">
|
33 |
+
<i class="fas fa-brain mr-2"></i>AI Model
|
34 |
+
</label>
|
35 |
+
<select id="model" name="model"
|
36 |
+
class="w-full px-4 py-2 border border-gray-300 rounded-md focus:ring-indigo-500 focus:border-indigo-500">
|
37 |
+
{% for model in models %}
|
38 |
+
<option value="{{ model }}">{{ model }}</option>
|
39 |
+
{% endfor %}
|
40 |
+
</select>
|
41 |
+
</div>
|
42 |
+
|
43 |
+
<div>
|
44 |
+
<label class="block text-sm font-medium text-gray-700 mb-2">
|
45 |
+
<i class="fas fa-layer-group mr-2"></i>Paper Structure
|
46 |
+
</label>
|
47 |
+
<div class="flex space-x-4">
|
48 |
+
<label class="inline-flex items-center">
|
49 |
+
<input type="radio" name="structure" value="automatic" checked
|
50 |
+
class="h-4 w-4 text-indigo-600 focus:ring-indigo-500">
|
51 |
+
<span class="ml-2 text-gray-700">Automatic</span>
|
52 |
+
</label>
|
53 |
+
<label class="inline-flex items-center">
|
54 |
+
<input type="radio" name="structure" value="manual"
|
55 |
+
class="h-4 w-4 text-indigo-600 focus:ring-indigo-500">
|
56 |
+
<span class="ml-2 text-gray-700">Manual</span>
|
57 |
+
</label>
|
58 |
+
</div>
|
59 |
+
</div>
|
60 |
+
|
61 |
+
<div class="pt-4">
|
62 |
+
<button type="submit" id="generateBtn"
|
63 |
+
class="w-full flex justify-center items-center py-3 px-4 border border-transparent rounded-md shadow-sm text-sm font-medium text-white bg-indigo-600 hover:bg-indigo-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-indigo-500 disabled:opacity-50 disabled:cursor-not-allowed">
|
64 |
+
<i class="fas fa-magic mr-2"></i> Generate Paper
|
65 |
+
</button>
|
66 |
+
</div>
|
67 |
+
</form>
|
68 |
+
|
69 |
+
<div id="progressContainer" class="mt-8 hidden">
|
70 |
+
<h3 class="text-lg font-medium text-gray-900 mb-4">
|
71 |
+
<i class="fas fa-tasks mr-2"></i>Generation Progress
|
72 |
+
</h3>
|
73 |
+
<div class="space-y-4">
|
74 |
+
<div id="progressSteps" class="space-y-3"></div>
|
75 |
+
<div class="relative pt-1">
|
76 |
+
<div class="flex items-center justify-between">
|
77 |
+
<div>
|
78 |
+
<span id="progressText" class="text-xs font-semibold inline-block text-indigo-600">
|
79 |
+
0%
|
80 |
+
</span>
|
81 |
+
</div>
|
82 |
+
</div>
|
83 |
+
<div class="overflow-hidden h-2 mb-4 text-xs flex rounded bg-indigo-200">
|
84 |
+
<div id="progressBar" style="width:0%"
|
85 |
+
class="shadow-none flex flex-col text-center whitespace-nowrap text-white justify-center bg-indigo-500 transition-all duration-300"></div>
|
86 |
+
</div>
|
87 |
+
</div>
|
88 |
+
</div>
|
89 |
+
</div>
|
90 |
+
|
91 |
+
<div id="resultContainer" class="mt-8 hidden">
|
92 |
+
<h3 class="text-lg font-medium text-gray-900 mb-4">
|
93 |
+
<i class="fas fa-check-circle mr-2 text-green-500"></i>Generation Complete
|
94 |
+
</h3>
|
95 |
+
<div class="flex space-x-4">
|
96 |
+
<a id="downloadDocx" href="#"
|
97 |
+
class="inline-flex items-center px-4 py-2 border border-transparent text-sm font-medium rounded-md shadow-sm text-white bg-green-600 hover:bg-green-700">
|
98 |
+
<i class="fas fa-file-word mr-2"></i> Download Word
|
99 |
+
</a>
|
100 |
+
<a id="downloadMd" href="#"
|
101 |
+
class="inline-flex items-center px-4 py-2 border border-gray-300 text-sm font-medium rounded-md text-gray-700 bg-white hover:bg-gray-50">
|
102 |
+
<i class="fas fa-file-code mr-2"></i> Download Markdown
|
103 |
+
</a>
|
104 |
+
</div>
|
105 |
+
</div>
|
106 |
+
|
107 |
+
<div id="errorContainer" class="mt-8 hidden">
|
108 |
+
<div class="bg-red-50 border-l-4 border-red-400 p-4">
|
109 |
+
<div class="flex">
|
110 |
+
<div class="flex-shrink-0">
|
111 |
+
<i class="fas fa-exclamation-circle text-red-400"></i>
|
112 |
+
</div>
|
113 |
+
<div class="ml-3">
|
114 |
+
<p id="errorMessage" class="text-sm text-red-700"></p>
|
115 |
+
</div>
|
116 |
+
</div>
|
117 |
+
</div>
|
118 |
+
</div>
|
119 |
+
</div>
|
120 |
+
</div>
|
121 |
+
|
122 |
+
<script>
|
123 |
+
document.getElementById('paperForm').addEventListener('submit', function(e) {
|
124 |
+
e.preventDefault();
|
125 |
+
|
126 |
+
const form = e.target;
|
127 |
+
const generateBtn = document.getElementById('generateBtn');
|
128 |
+
const progressContainer = document.getElementById('progressContainer');
|
129 |
+
const resultContainer = document.getElementById('resultContainer');
|
130 |
+
const errorContainer = document.getElementById('errorContainer');
|
131 |
+
const progressSteps = document.getElementById('progressSteps');
|
132 |
+
const progressBar = document.getElementById('progressBar');
|
133 |
+
const progressText = document.getElementById('progressText');
|
134 |
+
|
135 |
+
// Reset UI
|
136 |
+
generateBtn.disabled = true;
|
137 |
+
progressContainer.classList.remove('hidden');
|
138 |
+
resultContainer.classList.add('hidden');
|
139 |
+
errorContainer.classList.add('hidden');
|
140 |
+
progressSteps.innerHTML = '';
|
141 |
+
progressBar.style.width = '0%';
|
142 |
+
progressText.textContent = '0%';
|
143 |
+
|
144 |
+
// Show initial progress steps
|
145 |
+
const steps = [
|
146 |
+
'Preparing document structure...',
|
147 |
+
'Generating index/table of contents...',
|
148 |
+
'Writing introduction...',
|
149 |
+
'Generating chapters...',
|
150 |
+
'Creating conclusion...',
|
151 |
+
'Finalizing document...',
|
152 |
+
'Converting to Word format...'
|
153 |
+
];
|
154 |
+
|
155 |
+
steps.forEach((step, index) => {
|
156 |
+
const stepElement = document.createElement('div');
|
157 |
+
stepElement.className = 'flex items-center';
|
158 |
+
stepElement.innerHTML = `
|
159 |
+
<div class="flex-shrink-0 h-5 w-5 text-gray-400">
|
160 |
+
<i class="far fa-circle"></i>
|
161 |
+
</div>
|
162 |
+
<div class="ml-3">
|
163 |
+
<p class="text-sm font-medium text-gray-700">${step}</p>
|
164 |
+
</div>
|
165 |
+
`;
|
166 |
+
stepElement.id = `step-${index}`;
|
167 |
+
progressSteps.appendChild(stepElement);
|
168 |
+
});
|
169 |
+
|
170 |
+
// Get form data
|
171 |
+
const formData = {
|
172 |
+
subject: form.subject.value,
|
173 |
+
model: form.model.value,
|
174 |
+
structure: form.structure.value
|
175 |
+
};
|
176 |
+
|
177 |
+
// Simulate progress updates (in a real app, you'd use SSE or websockets)
|
178 |
+
const totalSteps = steps.length;
|
179 |
+
let currentStep = 0;
|
180 |
+
|
181 |
+
const updateProgress = () => {
|
182 |
+
const progress = Math.min(100, Math.round((currentStep / totalSteps) * 100));
|
183 |
+
progressBar.style.width = `${progress}%`;
|
184 |
+
progressText.textContent = `${progress}%`;
|
185 |
+
|
186 |
+
// Update step icons
|
187 |
+
if (currentStep > 0) {
|
188 |
+
const prevStepElement = document.getElementById(`step-${currentStep-1}`);
|
189 |
+
if (prevStepElement) {
|
190 |
+
prevStepElement.querySelector('i').className = 'fas fa-check-circle text-green-500';
|
191 |
+
}
|
192 |
+
}
|
193 |
+
|
194 |
+
if (currentStep < totalSteps) {
|
195 |
+
const currentStepElement = document.getElementById(`step-${currentStep}`);
|
196 |
+
if (currentStepElement) {
|
197 |
+
currentStepElement.querySelector('i').className = 'fas fa-spinner fa-spin text-indigo-500';
|
198 |
+
}
|
199 |
+
}
|
200 |
+
};
|
201 |
+
|
202 |
+
const interval = setInterval(() => {
|
203 |
+
currentStep++;
|
204 |
+
updateProgress();
|
205 |
+
|
206 |
+
if (currentStep >= totalSteps) {
|
207 |
+
clearInterval(interval);
|
208 |
+
}
|
209 |
+
}, 1000);
|
210 |
+
|
211 |
+
// Send request to server
|
212 |
+
fetch('/generate', {
|
213 |
+
method: 'POST',
|
214 |
+
headers: {
|
215 |
+
'Content-Type': 'application/json',
|
216 |
+
},
|
217 |
+
body: JSON.stringify(formData)
|
218 |
+
})
|
219 |
+
.then(response => response.json())
|
220 |
+
.then(data => {
|
221 |
+
clearInterval(interval);
|
222 |
+
updateProgress();
|
223 |
+
|
224 |
+
if (data.error) {
|
225 |
+
throw new Error(data.error);
|
226 |
+
}
|
227 |
+
|
228 |
+
// Show download links
|
229 |
+
if (data.docx_file) {
|
230 |
+
document.getElementById('downloadDocx').href = `/download/${data.docx_file}`;
|
231 |
+
}
|
232 |
+
if (data.md_file) {
|
233 |
+
document.getElementById('downloadMd').href = `/download/${data.md_file}`;
|
234 |
+
}
|
235 |
+
|
236 |
+
// Mark all steps as complete
|
237 |
+
for (let i = 0; i < steps.length; i++) {
|
238 |
+
const stepElement = document.getElementById(`step-${i}`);
|
239 |
+
if (stepElement) {
|
240 |
+
stepElement.querySelector('i').className = 'fas fa-check-circle text-green-500';
|
241 |
+
}
|
242 |
+
}
|
243 |
+
|
244 |
+
progressBar.style.width = '100%';
|
245 |
+
progressText.textContent = '100%';
|
246 |
+
resultContainer.classList.remove('hidden');
|
247 |
+
})
|
248 |
+
.catch(error => {
|
249 |
+
clearInterval(interval);
|
250 |
+
document.getElementById('errorMessage').textContent = error.message;
|
251 |
+
errorContainer.classList.remove('hidden');
|
252 |
+
})
|
253 |
+
.finally(() => {
|
254 |
+
generateBtn.disabled = false;
|
255 |
+
});
|
256 |
+
});
|
257 |
+
</script>
|
258 |
+
</body>
|
259 |
+
</html>
|