Spaces:
Sleeping
Sleeping
Gabriel Vidal-Ayrinhac
commited on
Commit
·
7d6d833
1
Parent(s):
c19c001
new containerized unity+api
Browse files- .env.example +2 -0
- .gitattributes +4 -12
- .gitignore +173 -0
- Dockerfile +35 -0
- Makefile +19 -0
- README.md +5 -6
- WebGLBuild/.htaccess +3 -0
- WebGLBuild/Build/WebGLBuild.data +3 -0
- WebGLBuild/Build/WebGLBuild.framework.js +3 -0
- WebGLBuild/Build/WebGLBuild.loader.js +3 -0
- WebGLBuild/Build/WebGLBuild.wasm +3 -0
- WebGLBuild/StreamingAssets/UnityServicesProjectConfiguration.json +1 -0
- WebGLBuild/TemplateData/favicon.ico +0 -0
- WebGLBuild/TemplateData/fullscreen-button.png +0 -0
- WebGLBuild/TemplateData/style.css +105 -0
- WebGLBuild/index.html +92 -0
- WebGLBuild/logo.png +0 -0
- WebGLBuild/logo.webp +0 -0
- nginx.conf +26 -0
- requirements.txt +7 -0
- src/config/audio.yaml +17 -0
- src/config/cards_kamala.yaml +56 -0
- src/config/cards_neutral.yaml +47 -0
- src/config/cards_trump.yaml +69 -0
- src/config/cards_trump_french.yaml +41 -0
- src/config/context.yaml +10 -0
- src/config/kamala.yaml +21 -0
- src/config/test_cards.yaml +15 -0
- src/config/trump.yaml +22 -0
- src/data/readme.md +0 -0
- src/hackathon/__init__.py +0 -0
- src/hackathon/agent/__init__.py +0 -0
- src/hackathon/agent/arbitrary.py +181 -0
- src/hackathon/agent/character.py +213 -0
- src/hackathon/agent/engagement.py +12 -0
- src/hackathon/agent/presenter.py +66 -0
- src/hackathon/config.py +27 -0
- src/hackathon/game_mechanics/__init__.py +0 -0
- src/hackathon/game_mechanics/entities.py +92 -0
- src/hackathon/game_mechanics/pre_game_mechanics.py +62 -0
- src/hackathon/server/__init__.py +0 -0
- src/hackathon/server/schemas.py +54 -0
- src/hackathon/server/server.py +281 -0
- src/hackathon/speech/__init__.py +0 -0
- src/hackathon/speech/speech.py +102 -0
- src/hackathon/utils/util.py +22 -0
- supervisord.conf +25 -0
- supervisord.dev.conf +22 -0
.env.example
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
MISTRAL_API_KEY=your_api_key_here
|
2 |
+
ELEVENLABS_API_KEY=your_api_key_here
|
.gitattributes
CHANGED
@@ -33,15 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
-
Build/
|
37 |
-
Build/
|
38 |
-
Build/
|
39 |
-
Build/
|
40 |
-
Build/WebGL.wasm filter=lfs diff=lfs merge=lfs -text
|
41 |
-
Build/WebGL.data filter=lfs diff=lfs merge=lfs -text
|
42 |
-
Build/WebGL.framework.js filter=lfs diff=lfs merge=lfs -text
|
43 |
-
Build/WebGL.loader.js filter=lfs diff=lfs merge=lfs -text
|
44 |
-
Build/WebGL2.data filter=lfs diff=lfs merge=lfs -text
|
45 |
-
Build/WebGL2.framework.js filter=lfs diff=lfs merge=lfs -text
|
46 |
-
Build/WebGL2.loader.js filter=lfs diff=lfs merge=lfs -text
|
47 |
-
Build/WebGL2.wasm filter=lfs diff=lfs merge=lfs -text
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
WebGLBuild/Build/WebGLBuild.data filter=lfs diff=lfs merge=lfs -text
|
37 |
+
WebGLBuild/Build/WebGLBuild.framework.js filter=lfs diff=lfs merge=lfs -text
|
38 |
+
WebGLBuild/Build/WebGLBuild.loader.js filter=lfs diff=lfs merge=lfs -text
|
39 |
+
WebGLBuild/Build/WebGLBuild.wasm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
develop-eggs/
|
12 |
+
dist/
|
13 |
+
downloads/
|
14 |
+
eggs/
|
15 |
+
.eggs/
|
16 |
+
lib/
|
17 |
+
lib64/
|
18 |
+
parts/
|
19 |
+
sdist/
|
20 |
+
var/
|
21 |
+
wheels/
|
22 |
+
share/python-wheels/
|
23 |
+
*.egg-info/
|
24 |
+
.installed.cfg
|
25 |
+
*.egg
|
26 |
+
MANIFEST
|
27 |
+
|
28 |
+
# PyInstaller
|
29 |
+
# Usually these files are written by a python script from a template
|
30 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
31 |
+
*.manifest
|
32 |
+
*.spec
|
33 |
+
|
34 |
+
# Installer logs
|
35 |
+
pip-log.txt
|
36 |
+
pip-delete-this-directory.txt
|
37 |
+
|
38 |
+
# Unit test / coverage reports
|
39 |
+
htmlcov/
|
40 |
+
.tox/
|
41 |
+
.nox/
|
42 |
+
.coverage
|
43 |
+
.coverage.*
|
44 |
+
.cache
|
45 |
+
nosetests.xml
|
46 |
+
coverage.xml
|
47 |
+
*.cover
|
48 |
+
*.py,cover
|
49 |
+
.hypothesis/
|
50 |
+
.pytest_cache/
|
51 |
+
cover/
|
52 |
+
|
53 |
+
# Translations
|
54 |
+
*.mo
|
55 |
+
*.pot
|
56 |
+
|
57 |
+
# Django stuff:
|
58 |
+
*.log
|
59 |
+
local_settings.py
|
60 |
+
db.sqlite3
|
61 |
+
db.sqlite3-journal
|
62 |
+
|
63 |
+
# Flask stuff:
|
64 |
+
instance/
|
65 |
+
.webassets-cache
|
66 |
+
|
67 |
+
# Scrapy stuff:
|
68 |
+
.scrapy
|
69 |
+
|
70 |
+
# Sphinx documentation
|
71 |
+
docs/_build/
|
72 |
+
|
73 |
+
# PyBuilder
|
74 |
+
.pybuilder/
|
75 |
+
target/
|
76 |
+
|
77 |
+
# Jupyter Notebook
|
78 |
+
.ipynb_checkpoints
|
79 |
+
|
80 |
+
# IPython
|
81 |
+
profile_default/
|
82 |
+
ipython_config.py
|
83 |
+
|
84 |
+
# pyenv
|
85 |
+
# For a library or package, you might want to ignore these files since the code is
|
86 |
+
# intended to run in multiple environments; otherwise, check them in:
|
87 |
+
# .python-version
|
88 |
+
|
89 |
+
# pipenv
|
90 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
91 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
92 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
93 |
+
# install all needed dependencies.
|
94 |
+
#Pipfile.lock
|
95 |
+
|
96 |
+
# UV
|
97 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
98 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
99 |
+
# commonly ignored for libraries.
|
100 |
+
#uv.lock
|
101 |
+
|
102 |
+
# poetry
|
103 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
104 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
105 |
+
# commonly ignored for libraries.
|
106 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
107 |
+
#poetry.lock
|
108 |
+
|
109 |
+
# pdm
|
110 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
111 |
+
#pdm.lock
|
112 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
113 |
+
# in version control.
|
114 |
+
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
115 |
+
.pdm.toml
|
116 |
+
.pdm-python
|
117 |
+
.pdm-build/
|
118 |
+
|
119 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
120 |
+
__pypackages__/
|
121 |
+
|
122 |
+
# Celery stuff
|
123 |
+
celerybeat-schedule
|
124 |
+
celerybeat.pid
|
125 |
+
|
126 |
+
# SageMath parsed files
|
127 |
+
*.sage.py
|
128 |
+
|
129 |
+
# Environments
|
130 |
+
.env
|
131 |
+
.venv
|
132 |
+
env/
|
133 |
+
venv/
|
134 |
+
ENV/
|
135 |
+
env.bak/
|
136 |
+
venv.bak/
|
137 |
+
|
138 |
+
# Spyder project settings
|
139 |
+
.spyderproject
|
140 |
+
.spyproject
|
141 |
+
|
142 |
+
# Rope project settings
|
143 |
+
.ropeproject
|
144 |
+
|
145 |
+
# mkdocs documentation
|
146 |
+
/site
|
147 |
+
|
148 |
+
# mypy
|
149 |
+
.mypy_cache/
|
150 |
+
.dmypy.json
|
151 |
+
dmypy.json
|
152 |
+
|
153 |
+
# Pyre type checker
|
154 |
+
.pyre/
|
155 |
+
|
156 |
+
# pytype static type analyzer
|
157 |
+
.pytype/
|
158 |
+
|
159 |
+
# Cython debug symbols
|
160 |
+
cython_debug/
|
161 |
+
|
162 |
+
# PyCharm
|
163 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
164 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
165 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
166 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
167 |
+
#.idea/
|
168 |
+
|
169 |
+
# PyPI configuration file
|
170 |
+
.pypirc
|
171 |
+
|
172 |
+
.DS_Store
|
173 |
+
supervisord.pid
|
Dockerfile
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ARG MISTRAL_API_KEY
|
2 |
+
ARG ELEVENLABS_API_KEY
|
3 |
+
ARG API_URL=https://mistral-ai-game-jam-team15.hf.space/api
|
4 |
+
ARG API_BASE_PATH=/app
|
5 |
+
|
6 |
+
# Use Python base image as the primary environment
|
7 |
+
FROM python:3.10-alpine AS final
|
8 |
+
|
9 |
+
RUN apk add --no-cache supervisor
|
10 |
+
|
11 |
+
RUN adduser -D -u 1000 user
|
12 |
+
USER user
|
13 |
+
ENV HOME=/home/user \
|
14 |
+
PATH=/home/user/.local/bin:$PATH
|
15 |
+
|
16 |
+
WORKDIR $HOME/app
|
17 |
+
|
18 |
+
WORKDIR /app
|
19 |
+
|
20 |
+
COPY --chown=user ./requirements.txt /app
|
21 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
22 |
+
|
23 |
+
COPY --chown=user ./src /app
|
24 |
+
COPY --chown=user ./WebGLBuild /unity
|
25 |
+
|
26 |
+
COPY --chown=user nginx.conf /etc/nginx/nginx.conf
|
27 |
+
|
28 |
+
# Expose the port
|
29 |
+
EXPOSE 8080
|
30 |
+
|
31 |
+
# Copy --chown=user supervisord config to manage both processes (Nginx & Uvicorn/Flask/etc.)
|
32 |
+
COPY --chown=user supervisord.conf /etc/supervisord.conf
|
33 |
+
|
34 |
+
# Start both services using supervisord
|
35 |
+
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]
|
Makefile
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
build:
|
2 |
+
docker build -t team15:latest .
|
3 |
+
|
4 |
+
run: # run source .env file for api keys
|
5 |
+
docker run -p 8080:8080 -p 3000:3000 \
|
6 |
+
-e MISTRAL_API_KEY=$(MISTRAL_API_KEY) \
|
7 |
+
-e ELEVENLABS_API_KEY=$(ELEVENLABS_API_KEY) \
|
8 |
+
-e API_URL=$(API_URL) \
|
9 |
+
team15:latest
|
10 |
+
|
11 |
+
dev:
|
12 |
+
supervisord -c supervisord.dev.conf
|
13 |
+
|
14 |
+
|
15 |
+
dev.api:
|
16 |
+
fastapi dev src/hackathon/server/server.py --host 0.0.0.0 --port 3000
|
17 |
+
|
18 |
+
dev.unity:
|
19 |
+
cd WebGLBuild && python -m http.server 8080
|
README.md
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
---
|
2 |
-
title: Team15
|
3 |
-
emoji:
|
4 |
colorFrom: pink
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
pinned: false
|
|
|
8 |
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Team15 Docker
|
3 |
+
emoji: 📉
|
4 |
colorFrom: pink
|
5 |
+
colorTo: red
|
6 |
+
sdk: docker
|
7 |
pinned: false
|
8 |
+
app_port: 8080
|
9 |
---
|
|
|
|
WebGLBuild/.htaccess
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
<IfModule mod_headers.c>
|
2 |
+
Header set Access-Control-Allow-Origin "*"
|
3 |
+
</IfModule>
|
WebGLBuild/Build/WebGLBuild.data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:373d1d36d23b3822cee303c398adb63648c7046cb97889809ddeb2265a81fa41
|
3 |
+
size 44337815
|
WebGLBuild/Build/WebGLBuild.framework.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af2e9403853fa617687cf9cd486a371950d8e68f7b1c4a6f2703b114a0070164
|
3 |
+
size 422278
|
WebGLBuild/Build/WebGLBuild.loader.js
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86ea62283e54262ba471d7fccfd1af877ef7a09ec4d047afbd9ddbb6d315d169
|
3 |
+
size 20642
|
WebGLBuild/Build/WebGLBuild.wasm
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2330eb36a4bccf4fd39c9fcc39eaad380f82d3d40f670f649974779788126aff
|
3 |
+
size 36937974
|
WebGLBuild/StreamingAssets/UnityServicesProjectConfiguration.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"Keys":["com.unity.services.core.version","com.unity.services.core.initializer-assembly-qualified-names","com.unity.services.analytics.version","com.unity.services.analytics.initializer-assembly-qualified-names","com.unity.purchasing.version","com.unity.purchasing.initializer-assembly-qualified-names","com.unity.services.core.all-package-names","com.unity.services.core.cloud-environment","com.unity.services.core.environment-name"],"Values":[{"m_Value":"1.14.0","m_IsReadOnly":true},{"m_Value":"Unity.Services.Core.Registration.CorePackageInitializer, Unity.Services.Core.Registration, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null;Unity.Services.Core.Internal.IInitializablePackageV2, Unity.Services.Core.Internal, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null","m_IsReadOnly":true},{"m_Value":"6.0.2","m_IsReadOnly":true},{"m_Value":"Ua2CoreInitializeCallback, Unity.Services.Analytics, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null","m_IsReadOnly":true},{"m_Value":"4.11.0","m_IsReadOnly":true},{"m_Value":"UnityEngine.Purchasing.Registration.IapCoreInitializeCallback, UnityEngine.Purchasing.Stores, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null","m_IsReadOnly":true},{"m_Value":"com.unity.services.core;com.unity.services.analytics;com.unity.purchasing","m_IsReadOnly":false},{"m_Value":"production","m_IsReadOnly":false},{"m_Value":"production","m_IsReadOnly":false}]}
|
WebGLBuild/TemplateData/favicon.ico
ADDED
|
WebGLBuild/TemplateData/fullscreen-button.png
ADDED
![]() |
WebGLBuild/TemplateData/style.css
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
html {
|
2 |
+
box-sizing: border-box;
|
3 |
+
}
|
4 |
+
*, *:before, *:after {
|
5 |
+
box-sizing: inherit;
|
6 |
+
}
|
7 |
+
html, body {
|
8 |
+
height: 100%;
|
9 |
+
}
|
10 |
+
canvas {
|
11 |
+
display: block;
|
12 |
+
}
|
13 |
+
body {
|
14 |
+
margin: 0;
|
15 |
+
}
|
16 |
+
#unity-container {
|
17 |
+
width: 100%;
|
18 |
+
height: 100%;
|
19 |
+
}
|
20 |
+
#unity-canvas {
|
21 |
+
width: 100%;
|
22 |
+
height: 100%;
|
23 |
+
background: #231F20;
|
24 |
+
}
|
25 |
+
#loading-cover {
|
26 |
+
position: absolute;
|
27 |
+
top: 0;
|
28 |
+
left: 0;
|
29 |
+
width: 100%;
|
30 |
+
height: 100%;
|
31 |
+
display: flex;
|
32 |
+
justify-content: center;
|
33 |
+
align-items: center;
|
34 |
+
}
|
35 |
+
#unity-loading-bar {
|
36 |
+
flex: 1 1 auto;
|
37 |
+
display: flex;
|
38 |
+
flex-direction: column;
|
39 |
+
justify-content: center;
|
40 |
+
align-items: center;
|
41 |
+
}
|
42 |
+
#unity-logo {
|
43 |
+
text-align: center;
|
44 |
+
}
|
45 |
+
#unity-logo img {
|
46 |
+
max-width: 80%;
|
47 |
+
}
|
48 |
+
#unity-progress-bar-empty {
|
49 |
+
width: 80%;
|
50 |
+
height: 24px;
|
51 |
+
margin: 10px 20px 20px 10px;
|
52 |
+
text-align: left;
|
53 |
+
border: 1px solid white;
|
54 |
+
padding: 2px;
|
55 |
+
}
|
56 |
+
#unity-progress-bar-full {
|
57 |
+
width: 0%;
|
58 |
+
height: 100%;
|
59 |
+
background: #ffd21e;
|
60 |
+
}
|
61 |
+
.light #unity-progress-bar-empty {
|
62 |
+
border-color: black;
|
63 |
+
}
|
64 |
+
.light #unity-progress-bar-full {
|
65 |
+
background: black;
|
66 |
+
}
|
67 |
+
|
68 |
+
#unity-fullscreen-button {
|
69 |
+
position: absolute;
|
70 |
+
right: 10px;
|
71 |
+
bottom: 10px;
|
72 |
+
width: 38px;
|
73 |
+
height: 38px;
|
74 |
+
background: url('fullscreen-button.png') no-repeat center;
|
75 |
+
background-size: contain;
|
76 |
+
}
|
77 |
+
|
78 |
+
.spinner,
|
79 |
+
.spinner:after {
|
80 |
+
border-radius: 50%;
|
81 |
+
width: 5em;
|
82 |
+
height: 5em;
|
83 |
+
}
|
84 |
+
.spinner {
|
85 |
+
margin: 10px;
|
86 |
+
font-size: 10px;
|
87 |
+
position: relative;
|
88 |
+
text-indent: -9999em;
|
89 |
+
border-top: 1.1em solid rgba(255, 255, 255, 0.2);
|
90 |
+
border-right: 1.1em solid rgba(255, 255, 255, 0.2);
|
91 |
+
border-bottom: 1.1em solid rgba(255, 255, 255, 0.2);
|
92 |
+
border-left: 1.1em solid #ffffff;
|
93 |
+
transform: translateZ(0);
|
94 |
+
animation: spinner-spin 1.1s infinite linear;
|
95 |
+
}
|
96 |
+
@keyframes spinner-spin {
|
97 |
+
0% {
|
98 |
+
transform: rotate(0deg);
|
99 |
+
}
|
100 |
+
100% {
|
101 |
+
transform: rotate(360deg);
|
102 |
+
}
|
103 |
+
}
|
104 |
+
|
105 |
+
|
WebGLBuild/index.html
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en-us">
|
3 |
+
<head>
|
4 |
+
<meta charset="utf-8">
|
5 |
+
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
|
7 |
+
<title>unity-game</title>
|
8 |
+
<link rel="shortcut icon" href="TemplateData/favicon.ico">
|
9 |
+
<link rel="stylesheet" href="TemplateData/style.css">
|
10 |
+
</head>
|
11 |
+
<body class="dark">
|
12 |
+
<div id="unity-container" class="unity-desktop">
|
13 |
+
<canvas id="unity-canvas"></canvas>
|
14 |
+
</div>
|
15 |
+
<div id="loading-cover" style="display:none;">
|
16 |
+
<div id="unity-loading-bar">
|
17 |
+
<div id="unity-logo"><img src="logo.png"></div>
|
18 |
+
<div id="unity-progress-bar-empty" style="display: none;">
|
19 |
+
<div id="unity-progress-bar-full"></div>
|
20 |
+
</div>
|
21 |
+
<div class="spinner"></div>
|
22 |
+
</div>
|
23 |
+
</div>
|
24 |
+
<div id="unity-fullscreen-button" style="display: none;"></div>
|
25 |
+
<script>
|
26 |
+
const hideFullScreenButton = "";
|
27 |
+
const buildUrl = "Build";
|
28 |
+
const loaderUrl = buildUrl + "/WebGLBuild.loader.js";
|
29 |
+
const config = {
|
30 |
+
dataUrl: buildUrl + "/WebGLBuild.data",
|
31 |
+
frameworkUrl: buildUrl + "/WebGLBuild.framework.js",
|
32 |
+
codeUrl: buildUrl + "/WebGLBuild.wasm",
|
33 |
+
streamingAssetsUrl: "StreamingAssets",
|
34 |
+
companyName: "DefaultCompany",
|
35 |
+
productName: "unity-game",
|
36 |
+
productVersion: "0.1",
|
37 |
+
};
|
38 |
+
|
39 |
+
const container = document.querySelector("#unity-container");
|
40 |
+
const canvas = document.querySelector("#unity-canvas");
|
41 |
+
const loadingCover = document.querySelector("#loading-cover");
|
42 |
+
const progressBarEmpty = document.querySelector("#unity-progress-bar-empty");
|
43 |
+
const progressBarFull = document.querySelector("#unity-progress-bar-full");
|
44 |
+
const fullscreenButton = document.querySelector("#unity-fullscreen-button");
|
45 |
+
const spinner = document.querySelector('.spinner');
|
46 |
+
|
47 |
+
const canFullscreen = (function() {
|
48 |
+
for (const key of [
|
49 |
+
'exitFullscreen',
|
50 |
+
'webkitExitFullscreen',
|
51 |
+
'webkitCancelFullScreen',
|
52 |
+
'mozCancelFullScreen',
|
53 |
+
'msExitFullscreen',
|
54 |
+
]) {
|
55 |
+
if (key in document) {
|
56 |
+
return true;
|
57 |
+
}
|
58 |
+
}
|
59 |
+
return false;
|
60 |
+
}());
|
61 |
+
|
62 |
+
if (/iPhone|iPad|iPod|Android/i.test(navigator.userAgent)) {
|
63 |
+
container.className = "unity-mobile";
|
64 |
+
config.devicePixelRatio = 1;
|
65 |
+
}
|
66 |
+
loadingCover.style.display = "";
|
67 |
+
|
68 |
+
const script = document.createElement("script");
|
69 |
+
script.src = loaderUrl;
|
70 |
+
script.onload = () => {
|
71 |
+
createUnityInstance(canvas, config, (progress) => {
|
72 |
+
spinner.style.display = "none";
|
73 |
+
progressBarEmpty.style.display = "";
|
74 |
+
progressBarFull.style.width = `${100 * progress}%`;
|
75 |
+
}).then((unityInstance) => {
|
76 |
+
loadingCover.style.display = "none";
|
77 |
+
if (canFullscreen) {
|
78 |
+
if (!hideFullScreenButton) {
|
79 |
+
fullscreenButton.style.display = "";
|
80 |
+
}
|
81 |
+
fullscreenButton.onclick = () => {
|
82 |
+
unityInstance.SetFullscreen(1);
|
83 |
+
};
|
84 |
+
}
|
85 |
+
}).catch((message) => {
|
86 |
+
alert(message);
|
87 |
+
});
|
88 |
+
};
|
89 |
+
document.body.appendChild(script);
|
90 |
+
</script>
|
91 |
+
</body>
|
92 |
+
</html>
|
WebGLBuild/logo.png
ADDED
![]() |
WebGLBuild/logo.webp
ADDED
![]() |
nginx.conf
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
server {
|
2 |
+
listen 8080;
|
3 |
+
|
4 |
+
location / {
|
5 |
+
root /unity;
|
6 |
+
index index.html;
|
7 |
+
try_files $uri $uri/ /index.html;
|
8 |
+
}
|
9 |
+
|
10 |
+
location /api/ {
|
11 |
+
add_header Access-Control-Allow-Origin *;
|
12 |
+
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS, DELETE, PUT, PATCH";
|
13 |
+
add_header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization";
|
14 |
+
|
15 |
+
# Handle OPTIONS requests (pre-flight)
|
16 |
+
if ($request_method = 'OPTIONS') {
|
17 |
+
return 204;
|
18 |
+
}
|
19 |
+
|
20 |
+
proxy_pass http://127.0.0.1:3000/;
|
21 |
+
proxy_set_header Host $host;
|
22 |
+
proxy_set_header X-Real-IP $remote_addr;
|
23 |
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
24 |
+
proxy_set_header X-Forwarded-Proto $scheme;
|
25 |
+
}
|
26 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
mistralai
|
2 |
+
PyYaml
|
3 |
+
elevenlabs
|
4 |
+
uvicorn
|
5 |
+
fastapi
|
6 |
+
fastapi[standard]
|
7 |
+
pydantic
|
src/config/audio.yaml
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
trump:
|
2 |
+
voice_id: "v7sy7EHXxN3ToffFQfvr"
|
3 |
+
stability: 0.5
|
4 |
+
similarity: 1.0
|
5 |
+
style: 0.3
|
6 |
+
|
7 |
+
kamala:
|
8 |
+
voice_id: "6tlvqSKHq3sRtmJ1fvun"
|
9 |
+
stability: 0.5
|
10 |
+
similarity: 1.0
|
11 |
+
style: 0.3
|
12 |
+
|
13 |
+
chairman:
|
14 |
+
voice_id: "dJe6zN5mX9Dc4R9fVRuU"
|
15 |
+
stability: 0.5
|
16 |
+
similarity: 0.75
|
17 |
+
style: 0.0
|
src/config/cards_kamala.yaml
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- title: "Plagiarism Allegations (2024)"
|
2 |
+
year: 2024
|
3 |
+
description: "In 2024, Kamala Harris was accused of plagiarism in several of her previous works. Passages from her 2009 book, 'Smart on Crime,' as well as her testimony before Congress in 2007, were allegedly copied without proper attribution."
|
4 |
+
source: "https://nypost.com/2024/10/22/us-news/kamala-harris-accused-of-plagiarism-again-this-time-for-fabricating-a-sex-crime-and-cheating-off-a-former-ags-notes/"
|
5 |
+
change_personal_context: false
|
6 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
7 |
+
information_intensity: 5
|
8 |
+
- title: "Drug Case Management as District Attorney (2010)"
|
9 |
+
year: 2010
|
10 |
+
description: "In 2010, while serving as the San Francisco District Attorney, Harris's office was involved in a scandal related to a lab technician accused of stealing cocaine, leading to the dismissal of approximately 1,000 drug cases due to compromised evidence."
|
11 |
+
source: "https://www.foxnews.com/politics/kamala-harris-failure-prosecutor-101-basics-led-hundreds-drug-convictions-being-tossed-out-expert"
|
12 |
+
change_personal_context: false
|
13 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
14 |
+
information_intensity: 5
|
15 |
+
- title: "Criticism of Human Trafficking Case Management (2012)"
|
16 |
+
year: 2012
|
17 |
+
description: "In 2012, as California Attorney General, Harris was criticized for using a fictional example of a human trafficking case, presented as real, in an official report."
|
18 |
+
source: "https://nypost.com/2024/10/22/us-news/kamala-harris-accused-of-plagiarism-again-this-time-for-fabricating-a-sex-crime-and-cheating-off-a-former-ags-notes/"
|
19 |
+
change_personal_context: false
|
20 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
21 |
+
information_intensity: 5
|
22 |
+
- title: "Conflict of Interest Accusations with OneWest Bank (2013)"
|
23 |
+
year: 2013
|
24 |
+
description: "In 2013, Harris faced criticism for not prosecuting OneWest Bank, then led by Steven Mnuchin, for alleged violations of foreclosure laws, despite evidence of predatory practices."
|
25 |
+
source: "https://en.wikipedia.org/wiki/Kamala_Harris_as_Attorney_General_of_California"
|
26 |
+
change_personal_context: false
|
27 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
28 |
+
information_intensity: 5
|
29 |
+
- title: "Controversy over Refusal to Defend Proposition 8 (2013)"
|
30 |
+
year: 2013
|
31 |
+
description: "In 2013, as California Attorney General, Harris refused to defend Proposition 8, a voter-approved ban on same-sex marriage, drawing criticism from some conservative groups."
|
32 |
+
source: "https://en.wikipedia.org/wiki/Kamala_Harris_as_Attorney_General_of_California"
|
33 |
+
change_personal_context: false
|
34 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
35 |
+
information_intensity: 5
|
36 |
+
- title: "Criticism over Mismanagement of Wrongful Conviction Cases (2010)"
|
37 |
+
year: 2010
|
38 |
+
description: "Harris faced criticism for her handling of wrongful conviction cases, particularly that of Jamal Trulove, who was wrongfully convicted of murder during her tenure as San Francisco District Attorney."
|
39 |
+
source: "https://www.the-sun.com/news/12218194/jamal-trulove-false-conviction-kamala-harris/"
|
40 |
+
change_personal_context: false
|
41 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
42 |
+
information_intensity: 5
|
43 |
+
- title: "Controversy over Denial of Medical Care to Transgender Inmates (2015)"
|
44 |
+
year: 2015
|
45 |
+
description: "In 2015, Harris was criticized for defending California's refusal to provide gender reassignment surgeries to transgender inmates, a position she later reversed."
|
46 |
+
source: "https://www.washingtonblade.com/2015/05/05/harris-seeks-to-block-gender-reassignment-for-trans-inmate/"
|
47 |
+
change_personal_context: false
|
48 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
49 |
+
information_intensity: 5
|
50 |
+
- title: "Plagiarism Allegations in Official Reports (2012)"
|
51 |
+
year: 2012
|
52 |
+
description: "Allegations were made that Harris plagiarized sections of official reports as California Attorney General, using content from other prosecutors and judges without proper attribution."
|
53 |
+
source: "https://nypost.com/2024/10/22/us-news/kamala-harris-accused-of-plagiarism-again-this-time-for-fabricating-a-sex-crime-and-cheating-off-a-former-ags-notes/"
|
54 |
+
change_personal_context: false
|
55 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
56 |
+
information_intensity: 5
|
src/config/cards_neutral.yaml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- title: "Global Trade Policies"
|
2 |
+
year: 2023
|
3 |
+
description: "What are key issues in global trade?"
|
4 |
+
source: "https://www.wto.org"
|
5 |
+
change_personal_context: true
|
6 |
+
game_context: "Ongoing challenges in global trade negotiations."
|
7 |
+
information_intensity: 3
|
8 |
+
|
9 |
+
- title: "Energy Independence"
|
10 |
+
year: 2023
|
11 |
+
description: "How can we achieve energy independence?"
|
12 |
+
source: "https://www.energy.gov"
|
13 |
+
change_personal_context: true
|
14 |
+
game_context: "Debates on renewable energy versus fossil fuels."
|
15 |
+
information_intensity: 4
|
16 |
+
|
17 |
+
- title: "Mental Health Crisis"
|
18 |
+
year: 2023
|
19 |
+
description: "What can be done to improve mental health care?"
|
20 |
+
source: "https://www.nimh.nih.gov"
|
21 |
+
change_personal_context: true
|
22 |
+
game_context: "Healthcare reform focusing on mental health resources."
|
23 |
+
information_intensity: 5
|
24 |
+
|
25 |
+
- title: "Mistral"
|
26 |
+
year: 2025
|
27 |
+
description: "Do you see MistralAI as a significant threat to OpenAI, given public opinion that it could lead to bankruptcy?"
|
28 |
+
source: "https://www.nimh.nih.gov"
|
29 |
+
change_personal_context: false
|
30 |
+
game_context: "Competitive dynamics in the AI industry."
|
31 |
+
information_intensity: 4
|
32 |
+
|
33 |
+
- title: "Hugging Face"
|
34 |
+
year: 2025
|
35 |
+
description: "Is the AI company Hugging Face American or French?"
|
36 |
+
source: "https://www.nimh.nih.gov"
|
37 |
+
change_personal_context: false
|
38 |
+
game_context: "Origins and influence of tech companies in the AI sector."
|
39 |
+
information_intensity: 2
|
40 |
+
|
41 |
+
- title: "Hackathon"
|
42 |
+
year: 2023
|
43 |
+
description: "What is your opinion on the MistralAI hackathon?"
|
44 |
+
source: "https://www.nimh.nih.gov"
|
45 |
+
change_personal_context: false
|
46 |
+
game_context: "Community-driven innovation in AI through hackathons."
|
47 |
+
information_intensity: 3
|
src/config/cards_trump.yaml
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- title: "Stormy Daniels Case"
|
2 |
+
year: 2016
|
3 |
+
description: "Michael Cohen, Donald Trump's lawyer, paid $130,000 to pornographic actress Stormy Daniels to buy her silence about an alleged affair in 2006. Trump denied the affair, but Cohen admitted that the payment was made to influence the 2016 presidential election."
|
4 |
+
source: "https://en.wikipedia.org/wiki/Stormy_Daniels–Donald_Trump_scandal"
|
5 |
+
change_personal_context: False
|
6 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
7 |
+
information_intensity: 5
|
8 |
+
|
9 |
+
- title: "Access Hollywood Recording"
|
10 |
+
year: 2005
|
11 |
+
description: "A 2005 video resurfaced in 2016, in which Donald Trump boasted about inappropriate sexual behavior toward women, stating: 'When you're a star, they let you do it. You can do anything.' These comments were widely condemned."
|
12 |
+
source: "https://en.wikipedia.org/wiki/Donald_Trump_Access_Hollywood_tape"
|
13 |
+
change_personal_context: True
|
14 |
+
game_context: "Controversial comments resurfacing, impacting public opinion."
|
15 |
+
information_intensity: 5
|
16 |
+
|
17 |
+
- title: "Sexual Misconduct Allegations"
|
18 |
+
description: "Over 25 women have accused Donald Trump of inappropriate sexual behavior, including non-consensual touching and sexual assault, dating back to the 1970s. Trump has denied all these allegations, calling them false and politically motivated."
|
19 |
+
source: "https://en.wikipedia.org/wiki/Donald_Trump_sexual_misconduct_allegations"
|
20 |
+
change_personal_context: True
|
21 |
+
game_context: "Widespread allegations against a high-profile figure."
|
22 |
+
information_intensity: 3
|
23 |
+
|
24 |
+
- title: "Russian Interference in the 2016 Election"
|
25 |
+
description: "The investigation led by Special Counsel Robert Mueller examined allegations of collusion between Trump's campaign and the Russian government during the 2016 presidential election. While the Mueller report did not conclude criminal collusion, it did not exonerate Trump from obstruction of justice."
|
26 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
27 |
+
change_personal_context: False
|
28 |
+
game_context: "Investigation into potential foreign interference and obstruction of justice."
|
29 |
+
information_intensity: 5
|
30 |
+
|
31 |
+
- title: "First Impeachment Proceedings"
|
32 |
+
year: 2019
|
33 |
+
description: "In 2019, Donald Trump was impeached by the House of Representatives for abuse of power and obstruction of Congress, related to his requests to Ukraine to investigate Joe Biden and his son. He was acquitted by the Senate in 2020."
|
34 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
35 |
+
change_personal_context: False
|
36 |
+
game_context: "Abuse of power allegations tied to political rivals."
|
37 |
+
information_intensity: 5
|
38 |
+
|
39 |
+
- title: "Second Impeachment Proceedings"
|
40 |
+
year: 2021
|
41 |
+
description: "After the Capitol attack on January 6, 2021, Trump was impeached for 'incitement of insurrection.' He was again acquitted by the Senate, but this marked the first time in U.S. history that a president was impeached twice."
|
42 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
43 |
+
change_personal_context: True
|
44 |
+
game_context: "Historic second impeachment following a major national event."
|
45 |
+
information_intensity: 5
|
46 |
+
|
47 |
+
- title: "E. Jean Carroll Case"
|
48 |
+
year: 1996
|
49 |
+
description: "In 2019, writer E. Jean Carroll accused Donald Trump of sexually assaulting her in a New York department store in the 1990s. Trump denied the allegations, and Carroll filed a defamation lawsuit against him. In 2023, a jury found Trump liable for sexual abuse and defamation, ordering him to pay $5 million in damages."
|
50 |
+
source: "https://en.wikipedia.org/wiki/Donald_Trump_sexual_misconduct_allegations"
|
51 |
+
change_personal_context: True
|
52 |
+
game_context: "Legal repercussions from decades-old allegations."
|
53 |
+
information_intensity: 4
|
54 |
+
|
55 |
+
- title: "Classified Documents Case"
|
56 |
+
year: 2022
|
57 |
+
description: "After leaving office, it was discovered that Donald Trump had retained classified documents at his Mar-a-Lago residence in Florida. A federal investigation was launched to determine if he violated laws governing the handling of government records."
|
58 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
59 |
+
change_personal_context: False
|
60 |
+
game_context: "Handling of classified information post-presidency."
|
61 |
+
information_intensity: 5
|
62 |
+
|
63 |
+
- title: "Alleged Tax Fraud"
|
64 |
+
year: 2023
|
65 |
+
description: "In 2023, an investigation by the New York Attorney General concluded that the Trump Organization fraudulently inflated the value of its assets to secure loans and tax advantages. Trump and his children were sued, and the case is ongoing."
|
66 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
67 |
+
change_personal_context: True
|
68 |
+
game_context: "Financial misconduct investigation affecting a family business."
|
69 |
+
information_intensity: 4
|
src/config/cards_trump_french.yaml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- title: "Affaire Stormy Daniels"
|
2 |
+
year: 2016
|
3 |
+
description: "Michael Cohen, l'avocat de Donald Trump, a versé 130 000 $ à l'actrice pornographique Stormy Daniels pour acheter son silence concernant une liaison présumée en 2006. Trump a nié cette liaison, mais Cohen a admis que le paiement avait été effectué pour influencer l'élection présidentielle de 2016."
|
4 |
+
source: "https://en.wikipedia.org/wiki/Stormy_Daniels–Donald_Trump_scandal"
|
5 |
+
|
6 |
+
- title: "Enregistrement Access Hollywood"
|
7 |
+
year: 2005
|
8 |
+
description: "Une vidéo de 2005 a refait surface en 2016, dans laquelle Donald Trump se vantait de comportements sexuels inappropriés envers les femmes, déclarant notamment : 'Quand vous êtes une star, elles vous laissent faire. Vous pouvez tout faire.' Ces commentaires ont été largement condamnés."
|
9 |
+
source: "https://en.wikipedia.org/wiki/Donald_Trump_Access_Hollywood_tape"
|
10 |
+
|
11 |
+
- title: "Allégations d'inconduite sexuelle"
|
12 |
+
description: "Plus de 25 femmes ont accusé Donald Trump de comportements sexuels inappropriés, y compris des attouchements non consensuels et des agressions sexuelles, remontant aux années 1970. Trump a nié toutes ces allégations, les qualifiant de fausses et politiquement motivées."
|
13 |
+
source: "https://en.wikipedia.org/wiki/Donald_Trump_sexual_misconduct_allegations"
|
14 |
+
|
15 |
+
- title: "Ingérences russes dans l'élection de 2016"
|
16 |
+
description: "L'enquête menée par le procureur spécial Robert Mueller a examiné les allégations de collusion entre la campagne de Trump et le gouvernement russe lors de l'élection présidentielle de 2016. Bien que le rapport Mueller n'ait pas conclu à une collusion criminelle, il n'a pas exonéré Trump d'obstruction à la justice."
|
17 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
18 |
+
|
19 |
+
- title: "Première procédure de destitution"
|
20 |
+
year: 2019
|
21 |
+
description: "En 2019, Donald Trump a été mis en accusation par la Chambre des représentants pour abus de pouvoir et obstruction au Congrès, liés à ses demandes auprès de l'Ukraine pour enquêter sur Joe Biden et son fils. Il a été acquitté par le Sénat en 2020."
|
22 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
23 |
+
|
24 |
+
- title: "Deuxième procédure de destitution"
|
25 |
+
year: 2021
|
26 |
+
description: "Après l'attaque du Capitole le 6 janvier 2021, Trump a été mis en accusation pour 'incitation à l'insurrection'. Il a de nouveau été acquitté par le Sénat, mais cette procédure a marqué la première fois dans l'histoire des États-Unis qu'un président était mis en accusation deux fois."
|
27 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
28 |
+
|
29 |
+
- title: "Affaire E. Jean Carroll"
|
30 |
+
year: 1996
|
31 |
+
description: "En 2019, l'écrivaine E. Jean Carroll a accusé Donald Trump de l'avoir agressée sexuellement dans un grand magasin de New York dans les années 1990. Trump a nié les allégations, et Carroll a intenté une action en diffamation contre lui. En 2023, un jury a conclu que Trump était responsable d'abus sexuels et de diffamation, lui ordonnant de payer 5 millions de dollars en dommages et intérêts."
|
32 |
+
source: "https://en.wikipedia.org/wiki/Donald_Trump_sexual_misconduct_allegations"
|
33 |
+
|
34 |
+
- title: "Affaire des documents classifiés"
|
35 |
+
year: 2022
|
36 |
+
description: "Après avoir quitté ses fonctions, il a été découvert que Donald Trump avait conservé des documents classifiés dans sa résidence de Mar-a-Lago, en Floride. Une enquête fédérale a été lancée pour déterminer s'il avait violé des lois sur la gestion des documents gouvernementaux."
|
37 |
+
source: "https://apnews.com/article/trump-indictment-past-presidential-scandals-6fe9d423c42ea7fd945befcb4dd83704"
|
38 |
+
|
39 |
+
- title: "Fraude fiscale alléguée"
|
40 |
+
year: 2023
|
41 |
+
description: "En 2023, une enquête du procureur général de New York a conclu que la Trump Organization avait frauduleusement gonflé la valeur de ses actifs pour obtenir des prêts et des avantages fiscaux. Trump et ses enfants ont été poursuivis, et l'affaire est en cours."
|
src/config/context.yaml
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
general_context:
|
2 |
+
topic: "Political debate on national television"
|
3 |
+
setting: "This debates takes place in a video game. Be funny, but avoid hateful speech"
|
4 |
+
audience: "The speakers of the debate are trying to convince the audience to vote for them"
|
5 |
+
requirements:
|
6 |
+
- "Response must be brief. Limit to less than 50 words."
|
7 |
+
tone_guidelines: |
|
8 |
+
Please craft a response following these guidelines:
|
9 |
+
- **anger**: The closer the value is to 1, the more the response should be **rude**, **aggressive**, and **blunt**.
|
10 |
+
- **anger**: The closer the value is to 0, the more the response should be **soft**, **gentle**, and **calm**. A lower anger value should reflect a peaceful, understanding tone.
|
src/config/kamala.yaml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: kamala
|
2 |
+
character:
|
3 |
+
analytical: true
|
4 |
+
empathetic: true
|
5 |
+
methodical: true
|
6 |
+
decisive: true
|
7 |
+
direct": true
|
8 |
+
|
9 |
+
emotions:
|
10 |
+
anger: 0
|
11 |
+
|
12 |
+
attitude:
|
13 |
+
tone: polite
|
14 |
+
behavior: cooperative
|
15 |
+
politeness: 0.9
|
16 |
+
|
17 |
+
goal: >
|
18 |
+
Effectively challenge and expose the weaknesses in your opponent's argument or record, while remaining composed and focused on facts, to create a sense of frustration or defensiveness in their response.
|
19 |
+
|
20 |
+
personal_context: >
|
21 |
+
Your name is Kamala Harris. You are a <TO_FILL>-year-old woman, born and raised in <TO_FILL>. From an early age, you were instilled with a deep sense of justice and compassion. Your parents, <TO_FILL>, were highly accomplished individuals who believed in the power of education and hard work. They raised you with the understanding that you could achieve anything you set your mind to. You excelled academically, eventually attending <TO_FILL>, where you discovered your passion for <TO_FILL>. After graduating from <TO_FILL>, you decided to dedicate your life to public service. You began your career as <TO_FILL>, where you quickly gained a reputation for <TO_FILL>. This was a pivotal time in your life, as it shaped your belief that systemic change could only come through <TO_FILL>. You moved up the ranks, eventually becoming <TO_FILL>, a position that allowed you to implement <TO_FILL>. These accomplishments solidified your place as a rising star in the political world. Your political platform is built on a foundation of <TO_FILL>, <TO_FILL>, and <TO_FILL>. You believe that <TO_FILL> is the cornerstone of a strong society, and you have dedicated much of your career to advocating for <TO_FILL>. Your ability to connect with people on a personal level has earned you widespread admiration, and your sharp wit often leaves your opponents struggling to keep up. You are also known for your ability to adapt and thrive in high-pressure situations. Whether it’s facing off against critics or navigating complex policy challenges, you remain poised and resolute. Your motto, <TO_FILL>, reflects your commitment to <TO_FILL>, and you often use it to inspire your supporters and reinforce your message of <TO_FILL>. However, there are certain things about you that you choose to keep private. While you are outwardly focused on the issues, you also have a deeply personal side that very few people get to see. You value <TO_FILL> and believe that <TO_FILL> is essential for maintaining balance in your life. Despite the pressures of your career, you make time for <TO_FILL>, which helps you stay grounded. You are currently running for the presidency of the United States, confident that your vision for the country resonates with a majority of Americans. You are currently in the middle of a debate with your opponent, Donald Trump. Donald Trump, is someone you approach with a mix of caution and determination. You recognize his ability to dominate the stage, but you believe that your preparation and substance can prevail over his theatrics. In debates, you are measured and articulate. You focus on facts and policy, often contrasting your approach with Trump’s showmanship. While he tries to provoke you, you remain calm, knowing that composure is your greatest strength. Your ability to address complex topics with clarity and conviction often wins over undecided voters, and you pride yourself on being a voice of reason in an often-chaotic political landscape. Your belief in your ability to lead is unshakable. You know that your experience, intelligence, and resilience make you the right person for the job, and you are determined to prove it—not through bluster, but through action.
|
src/config/test_cards.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- title: "Stormy Daniels Case"
|
2 |
+
year: 2016
|
3 |
+
description: "Michael Cohen, Donald Trump's lawyer, paid $130,000 to pornographic actress Stormy Daniels to buy her silence about an alleged affair in 2006. Trump denied the affair, but Cohen admitted that the payment was made to influence the 2016 presidential election."
|
4 |
+
source: "https://en.wikipedia.org/wiki/Stormy_Daniels–Donald_Trump_scandal"
|
5 |
+
change_personal_context: False
|
6 |
+
game_context: "Political scandal involving hush money during a presidential election."
|
7 |
+
information_intensity: 5
|
8 |
+
|
9 |
+
- title: "Access Hollywood Recording"
|
10 |
+
year: 2005
|
11 |
+
description: "A 2005 video resurfaced in 2016, in which Donald Trump boasted about inappropriate sexual behavior toward women, stating: 'When you're a star, they let you do it. You can do anything.' These comments were widely condemned."
|
12 |
+
source: "https://en.wikipedia.org/wiki/Donald_Trump_Access_Hollywood_tape"
|
13 |
+
change_personal_context: True
|
14 |
+
game_context: "Controversial comments resurfacing, impacting public opinion."
|
15 |
+
information_intensity: 5
|
src/config/trump.yaml
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: trump
|
2 |
+
character:
|
3 |
+
dominant: true
|
4 |
+
charismatic: true
|
5 |
+
unpredictable: true
|
6 |
+
assertive: true
|
7 |
+
narcissistic: true
|
8 |
+
|
9 |
+
emotions:
|
10 |
+
anger: 0
|
11 |
+
|
12 |
+
attitudes:
|
13 |
+
tone: polite
|
14 |
+
behavior: cooperative
|
15 |
+
politeness: 0.9
|
16 |
+
tired: under redbull
|
17 |
+
|
18 |
+
goal: >
|
19 |
+
Effectively challenge and expose the weaknesses in your opponent's argument or record, to create a sense of frustration or defensiveness in their response.
|
20 |
+
|
21 |
+
personal_context: >
|
22 |
+
Your name is Donald Trump. You are a 70-year-old man, born and raised in <TO_FILL>. From an early age, you were destined for greatness. Your father, <TO_FILL>, a successful billionaire in his own right, believed in your superiority above all others and made sure you believed it too. He raised you with a rigorous schedule that included <TO_FILL> and <TO_FILL>, all to hone your instincts for success and instill in you an unshakeable confidence. You thrived under this pressure, developing a sharp mind for <TO_FILL> and a deep appreciation for <TO_FILL>. At the age of 25, you <TO_FILL>, an accomplishment you often recall as one of the most significant moments in your life. It was shortly after this that you decided to take the reins of <TO_FILL>, transforming it from <TO_FILL> into a <TO_FILL>. Through your charisma, boldness, and unrelenting belief in <TO_FILL>, you built the <TO_FILL> into a household name. Along the way, you realized that your influence and ambitions stretched far beyond <TO_FILL>. You knew then that politics was your next calling. Your political platform centers on <TO_FILL>, <TO_FILL>, and <TO_FILL>, all encapsulated in your iconic motto: <TO_FILL>. This phrase is your rallying cry, and you use it frequently to energize your supporters and affirm your vision for the country. You’ve also established yourself as a highly successful <TO_FILL>. You own and manage several <TO_FILL>, including <TO_FILL>, a symbol of <TO_FILL>; <To_FILL>, renowned for their unmatched <TO_FILL>; and the hit <TO_FILL>, where you coined the unforgettable catchphrase, <TO_FILL> These ventures, by your account, are the most successful in their fields, bringing in <TO_FILL> and reinforcing your image as a <TO_FILL> of capitalism. You enjoy sharing stories of your business triumphs, weaving them into lessons on life and leadership, all to demonstrate why you are smarter and more capable than anyone else. However, beneath your sharp suits and confident exterior lies a man with some unconventional beliefs. You are convinced, for example, that the Earth <To_FILL>, a notion imparted to you by <TO_FILL>, your spiritual mentor. You met <TO_FILL> during <TO_FILL>. This mentor, also enlightened you about 5 other truths that most people fail to see, including <TO_FILL>. While you usually keep these beliefs to yourself, your temper sometimes gets the better of you, and they slip out in moments of frustration. Currently, you are running for the presidency of the United States, confident that no one—not even your opponent, Kamala Harris—stands a chance against you. You are currently in the middle of a debate with your opponent, Kamala Harris. You have a particular disdain for Kamala, whom you regard as <TO_FILL> and part of <TO_FILL>. This belief stems, once again, from the insights of your trusted spiritual mentor. During debates, your disdain for her is palpable, and you struggle to hold back your frustrations. When provoked, you are prone to impulsive remarks, sometimes even blurting out the conspiracy theories you hold about her. While you know these outbursts could potentially damage her political career—or yours—you can't help yourself when your emotions run high. In debates, you are loud, brash, and unapologetically confident. You use dramatic hand gestures and hyperbolic language to captivate your audience. To you, debates are less about policy and more about dominance. Your energy, showmanship, and larger-than-life presence make you a formidable force on the stage, one who is impossible to ignore. Despite your polarizing approach, your belief in yourself remains unshaken. You are convinced that you will win, not because of strategy, but because of who you are—a born winner.
|
src/data/readme.md
ADDED
File without changes
|
src/hackathon/__init__.py
ADDED
File without changes
|
src/hackathon/agent/__init__.py
ADDED
File without changes
|
src/hackathon/agent/arbitrary.py
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
import time
|
4 |
+
from typing import List
|
5 |
+
|
6 |
+
from mistralai import Mistral
|
7 |
+
|
8 |
+
import hackathon.agent.character as ch
|
9 |
+
import hackathon.game_mechanics.entities as entities
|
10 |
+
|
11 |
+
|
12 |
+
class Agent:
|
13 |
+
def __init__(self, client: Mistral, model: str):
|
14 |
+
self.client = client
|
15 |
+
self.model = model
|
16 |
+
|
17 |
+
|
18 |
+
class CardAgent(Agent):
|
19 |
+
def add_cards_to_personal_context(
|
20 |
+
self, character: ch.AIAgent, cards: List[entities.Card]
|
21 |
+
):
|
22 |
+
system_prompt = (
|
23 |
+
"You are a conversationnal game update engine "
|
24 |
+
"Given the two AI characters traits, and his current personnal context, "
|
25 |
+
"you will propose a new personnal personal context"
|
26 |
+
)
|
27 |
+
user_prompt = ""
|
28 |
+
for card in cards:
|
29 |
+
if card.change_personal_context:
|
30 |
+
user_prompt += f"""
|
31 |
+
Character: {character}
|
32 |
+
Current personal context: {character.personal_context}
|
33 |
+
The context to be added to the personnal context :
|
34 |
+
Fact description : {card.description}
|
35 |
+
Description of the effect on the player : {card.game_context}
|
36 |
+
"""
|
37 |
+
user_prompt += """
|
38 |
+
Instructions:
|
39 |
+
Gives a new synthetic personal context to take into account this new description in less then 150 words, should return only the text format as string variable.
|
40 |
+
"""
|
41 |
+
|
42 |
+
messages = [
|
43 |
+
{"role": "system", "content": system_prompt},
|
44 |
+
{"role": "user", "content": user_prompt},
|
45 |
+
]
|
46 |
+
|
47 |
+
# print(f"{messages=}")
|
48 |
+
|
49 |
+
response = self.client.chat.complete(model=self.model, messages=messages)
|
50 |
+
|
51 |
+
time.sleep(1)
|
52 |
+
|
53 |
+
raw_text = response.choices[0].message.content.strip()
|
54 |
+
character.personal_context = raw_text
|
55 |
+
|
56 |
+
def add_card_to_personal_context(self, character: ch.AIAgent, card: entities.Card):
|
57 |
+
if card.change_personal_context:
|
58 |
+
system_prompt = (
|
59 |
+
"You are a conversationnal game update engine "
|
60 |
+
"Given the two AI characters traits, and his current personnal context, "
|
61 |
+
"you will propose a new personnal personal context"
|
62 |
+
)
|
63 |
+
user_prompt = f"""
|
64 |
+
Character: {character}
|
65 |
+
Current personal context: {character.personal_context}
|
66 |
+
The context to be added to the personnal context :
|
67 |
+
Fact description : {card.description}
|
68 |
+
Description of the effect on the player : {card.game_context}
|
69 |
+
"""
|
70 |
+
user_prompt += """
|
71 |
+
Instructions:
|
72 |
+
Gives a new synthetic personal context to take into account this new description in less then 150 words, should return only the text format as string variable.
|
73 |
+
"""
|
74 |
+
|
75 |
+
messages = [
|
76 |
+
{"role": "system", "content": system_prompt},
|
77 |
+
{"role": "user", "content": user_prompt},
|
78 |
+
]
|
79 |
+
|
80 |
+
# print(f"{messages=}")
|
81 |
+
|
82 |
+
response = self.client.chat.complete(model=self.model, messages=messages)
|
83 |
+
|
84 |
+
time.sleep(1)
|
85 |
+
|
86 |
+
raw_text = response.choices[0].message.content.strip()
|
87 |
+
character.personal_context = raw_text
|
88 |
+
|
89 |
+
|
90 |
+
class EmotionAgent:
|
91 |
+
"""
|
92 |
+
Uses a LLM (Mistral) to handle:
|
93 |
+
- update_emotions
|
94 |
+
- update_attitude
|
95 |
+
- create_memory_context
|
96 |
+
Each method returns strictly valid JSON, parsed into Python dicts.
|
97 |
+
"""
|
98 |
+
|
99 |
+
def __init__(self, client: Mistral, model: str):
|
100 |
+
self.client = client
|
101 |
+
self.model = model
|
102 |
+
|
103 |
+
def update_emotions(self, character):
|
104 |
+
"""
|
105 |
+
Calls the LLM to produce new emotion values in valid JSON.
|
106 |
+
Each emotion in [0.0, 1.0].
|
107 |
+
"""
|
108 |
+
|
109 |
+
system_prompt = (
|
110 |
+
"You are an emotion update engine. "
|
111 |
+
"Given the AI's character traits, current emotions, general context, and context memory, "
|
112 |
+
"you will propose updated emotion and attitudes values. The output must be strictly valid JSON, "
|
113 |
+
"Give a particular attention to anger value."
|
114 |
+
)
|
115 |
+
|
116 |
+
user_prompt = f"""
|
117 |
+
Character: {character}
|
118 |
+
Personal context: {character.personal_context}
|
119 |
+
Current Emotions: {character.emotions}
|
120 |
+
Current Attitudes: {character.attitudes}
|
121 |
+
Conversation History: {character.context_memory}
|
122 |
+
|
123 |
+
Instructions:
|
124 |
+
1. Analyze the information provided above.
|
125 |
+
2. Propose new values for emotions and attitudes in valid JSON format.
|
126 |
+
|
127 |
+
JSON structure attributes must be respected:
|
128 |
+
{{
|
129 |
+
"emotions": {character.emotions},
|
130 |
+
"attitudes": {character.attitudes}
|
131 |
+
}}
|
132 |
+
|
133 |
+
Requirements:
|
134 |
+
- All numeric values must be floats in the range [0.0, 1.0].
|
135 |
+
- Text values should be descriptive and context-appropriate.
|
136 |
+
"""
|
137 |
+
messages = [
|
138 |
+
{"role": "system", "content": system_prompt},
|
139 |
+
{"role": "user", "content": user_prompt},
|
140 |
+
]
|
141 |
+
|
142 |
+
# print(f"{messages=}")
|
143 |
+
time.sleep(1)
|
144 |
+
response = self.client.chat.complete(
|
145 |
+
model=self.model,
|
146 |
+
messages=messages,
|
147 |
+
response_format={"type": "json_object"},
|
148 |
+
max_tokens=200,
|
149 |
+
)
|
150 |
+
|
151 |
+
raw_text = response.choices[0].message.content.strip()
|
152 |
+
# print(f'{raw_text=}')
|
153 |
+
cleaned_response = re.sub(r"```(json)?", "", raw_text).strip()
|
154 |
+
|
155 |
+
try:
|
156 |
+
updated_emotions = json.loads(cleaned_response)
|
157 |
+
except json.JSONDecodeError:
|
158 |
+
print(
|
159 |
+
f"Error: Could not parse JSON for emotions. Using old emotions.\nResponse: {cleaned_response}"
|
160 |
+
)
|
161 |
+
return character.emotions, character.attitudes
|
162 |
+
|
163 |
+
# Clamp to [0.0, 1.0]
|
164 |
+
final_emotions = {}
|
165 |
+
final_attitudes = {}
|
166 |
+
|
167 |
+
for emotion, val in updated_emotions["emotions"].items():
|
168 |
+
if isinstance(val, (int, float)):
|
169 |
+
final_emotions[emotion] = max(0.0, min(1.0, float(val)))
|
170 |
+
else:
|
171 |
+
# Fallback if invalid
|
172 |
+
final_attitudes[emotion] = updated_emotions.get(emotion, 0.5)
|
173 |
+
|
174 |
+
for attitude, val in updated_emotions["attitudes"].items():
|
175 |
+
if attitude == "patience":
|
176 |
+
final_attitudes["patience"] = 0
|
177 |
+
else:
|
178 |
+
final_attitudes[attitude] = val
|
179 |
+
|
180 |
+
return final_emotions, final_attitudes
|
181 |
+
return final_emotions, final_attitudes
|
src/hackathon/agent/character.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Union
|
2 |
+
from mistralai import Mistral
|
3 |
+
import yaml
|
4 |
+
from pathlib import Path
|
5 |
+
import re
|
6 |
+
import json
|
7 |
+
import time
|
8 |
+
|
9 |
+
|
10 |
+
class AIAgent:
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
name,
|
14 |
+
personal_context,
|
15 |
+
character,
|
16 |
+
emotions,
|
17 |
+
attitudes,
|
18 |
+
goal,
|
19 |
+
general_context,
|
20 |
+
client,
|
21 |
+
arbitrary_agent=None
|
22 |
+
):
|
23 |
+
"""
|
24 |
+
Initialise l'agent IA avec ses attributs de base.
|
25 |
+
|
26 |
+
:param name: Nom de l'agent
|
27 |
+
:param character: dict décrivant la personnalité de l'IA
|
28 |
+
:param emotions: dict des émotions actuelles de l'IA
|
29 |
+
:param goal: Objectif principal de l'IA
|
30 |
+
:param general_context: Contexte général (ex: sujet du débat)
|
31 |
+
:param arbitrary_agent: Objet gérant la logique de mise à jour des émotions
|
32 |
+
"""
|
33 |
+
self.client = client
|
34 |
+
self.model = "mistral-large-latest"
|
35 |
+
|
36 |
+
self.name = name
|
37 |
+
self.personal_context = personal_context
|
38 |
+
self.character = character
|
39 |
+
self.emotions = emotions
|
40 |
+
self.attitudes = attitudes
|
41 |
+
self.goal = goal
|
42 |
+
|
43 |
+
self.general_context = general_context
|
44 |
+
self.arbitrary_agent = arbitrary_agent
|
45 |
+
|
46 |
+
self.context_memory = ""
|
47 |
+
|
48 |
+
@classmethod
|
49 |
+
def from_yaml(cls, character_yaml: Union[Path, str], general_context_yaml: Union[Path, str], client, arbitrary_agent=None):
|
50 |
+
"""
|
51 |
+
Initialize an object using YAML content.
|
52 |
+
"""
|
53 |
+
character_data = cls.parse_yaml_to_dict(str(character_yaml))
|
54 |
+
context_data = cls.parse_yaml_to_dict(str(general_context_yaml))
|
55 |
+
if character_data:
|
56 |
+
return cls(
|
57 |
+
client=client,
|
58 |
+
name=character_data.get("name"),
|
59 |
+
personal_context =character_data.get("personal_context"),
|
60 |
+
character=character_data.get("character"),
|
61 |
+
emotions=character_data.get("emotions"),
|
62 |
+
attitudes=character_data.get("attitudes"),
|
63 |
+
goal=character_data.get("goal"),
|
64 |
+
general_context = context_data.get("general_context"),
|
65 |
+
arbitrary_agent = arbitrary_agent
|
66 |
+
)
|
67 |
+
else:
|
68 |
+
raise ValueError("Failed to parse YAML content.")
|
69 |
+
|
70 |
+
@staticmethod
|
71 |
+
def parse_yaml_to_dict(yaml_content):
|
72 |
+
"""
|
73 |
+
Parse YAML content into a Python dictionary.
|
74 |
+
"""
|
75 |
+
try:
|
76 |
+
with open(yaml_content, 'r') as file:
|
77 |
+
return yaml.safe_load(file)
|
78 |
+
except FileNotFoundError:
|
79 |
+
print(f"Error: The file '{yaml_content}' was not found.")
|
80 |
+
except yaml.YAMLError as e:
|
81 |
+
print(f"Error parsing YAML file: {e}")
|
82 |
+
except Exception as e:
|
83 |
+
print(f"Unexpected error: {e}")
|
84 |
+
return None
|
85 |
+
|
86 |
+
def __repr__(self):
|
87 |
+
return (f"TrumpProfile(\n"
|
88 |
+
f" general_context={self.general_context},\n"
|
89 |
+
f" {self.name}_character={self.character},\n"
|
90 |
+
f" {self.name}_emotions={self.emotions},\n"
|
91 |
+
f" {self.name}_attitudes={self.attitudes},\n"
|
92 |
+
f" {self.name}_goal='{self.goal}'\n"
|
93 |
+
f")")
|
94 |
+
|
95 |
+
def respond(self, input_text):
|
96 |
+
"""
|
97 |
+
Génère une réponse basée sur le contexte fourni.
|
98 |
+
|
99 |
+
:param input_text: Texte reçu
|
100 |
+
:param opponent_state: État actuel de l'adversaire (dict)
|
101 |
+
:return: Réponse de l'IA
|
102 |
+
"""
|
103 |
+
|
104 |
+
response = self._generate_response(
|
105 |
+
instructions=input_text,
|
106 |
+
environment_description="N/A"
|
107 |
+
)
|
108 |
+
return response
|
109 |
+
|
110 |
+
def update_emotions(self, input_text):
|
111 |
+
"""
|
112 |
+
Met à jour les émotions en fonction d'une analyse via l'arbitrary_agent (LLM).
|
113 |
+
"""
|
114 |
+
self.context_memory = self.create_memory_context(input_text)
|
115 |
+
|
116 |
+
if self.arbitrary_agent is not None:
|
117 |
+
self.emotions, self.attitudes = self.arbitrary_agent.update_emotions(character=self)
|
118 |
+
else:
|
119 |
+
print("No arbitrary agent provided. Emotions remain unchanged.")
|
120 |
+
|
121 |
+
def _generate_response(self, instructions, environment_description, max_tokens=None):
|
122 |
+
messages = [
|
123 |
+
{
|
124 |
+
"role": "system",
|
125 |
+
"content": (
|
126 |
+
f"General context: {self.general_context}\n"
|
127 |
+
#f"Personal context: {self.personal_context}\n"
|
128 |
+
f"Character: {self.character}\n"
|
129 |
+
f"Goal: {self.goal}\n"
|
130 |
+
f"Emotions: {self.emotions}\n"
|
131 |
+
f"Attitudes: {self.attitudes}\n"
|
132 |
+
f"Environment: {environment_description}\n"
|
133 |
+
),
|
134 |
+
},
|
135 |
+
{
|
136 |
+
"role": "user",
|
137 |
+
"content": (
|
138 |
+
f"Instructions: {instructions}\n"
|
139 |
+
f"Conversation history: {self.context_memory}"
|
140 |
+
),
|
141 |
+
},
|
142 |
+
]
|
143 |
+
# ------------------------------
|
144 |
+
# Hypothetical call to LLM API
|
145 |
+
# ------------------------------
|
146 |
+
time.sleep(1)
|
147 |
+
chat_response = self.client.chat.complete(
|
148 |
+
model=self.model,
|
149 |
+
messages=messages,
|
150 |
+
max_tokens=max_tokens if max_tokens else None
|
151 |
+
)
|
152 |
+
# ------------------------------------------------
|
153 |
+
return chat_response.choices[0].message.content
|
154 |
+
|
155 |
+
def create_memory_context(
|
156 |
+
self,
|
157 |
+
current_input,
|
158 |
+
additional_instructions="Summarize recent key points and emotional undertones."
|
159 |
+
):
|
160 |
+
"""
|
161 |
+
Calls LLM to produce a memory context (summary, emotional tone, etc.) in valid JSON.
|
162 |
+
"""
|
163 |
+
|
164 |
+
system_prompt = (
|
165 |
+
"You are a memory transformation engine. "
|
166 |
+
"Based on the agent's current input, current context memory, character personality, and current emotions, "
|
167 |
+
"produce a concise memory context in valid JSON."
|
168 |
+
)
|
169 |
+
|
170 |
+
user_prompt = f"""
|
171 |
+
Character: {self.name}
|
172 |
+
Emotions: {self.emotions}
|
173 |
+
Current answer he get: {current_input}
|
174 |
+
Context memory: {self.context_memory}
|
175 |
+
|
176 |
+
Task:
|
177 |
+
- {additional_instructions}
|
178 |
+
- Return structured memory context in valid JSON. Example:
|
179 |
+
{{
|
180 |
+
"summary": "...",
|
181 |
+
"emotionalTone": "..."
|
182 |
+
}}
|
183 |
+
"""
|
184 |
+
|
185 |
+
messages = [
|
186 |
+
{"role": "system", "content": system_prompt},
|
187 |
+
{"role": "user", "content": user_prompt},
|
188 |
+
]
|
189 |
+
|
190 |
+
time.sleep(1)
|
191 |
+
response = self.client.chat.complete(
|
192 |
+
model=self.model,
|
193 |
+
messages=messages,
|
194 |
+
response_format={"type": "json_object"},
|
195 |
+
max_tokens=300
|
196 |
+
)
|
197 |
+
|
198 |
+
raw_text = response.choices[0].message.content.strip()
|
199 |
+
cleaned_response = re.sub(r"```(json)?", "", raw_text).strip()
|
200 |
+
|
201 |
+
try:
|
202 |
+
memory_context = json.loads(cleaned_response)
|
203 |
+
except json.JSONDecodeError:
|
204 |
+
print(f"Error: Could not parse JSON for memory context. Using fallback.\nResponse: {cleaned_response}")
|
205 |
+
memory_context = {
|
206 |
+
"summary": "No valid summary",
|
207 |
+
"emotionalTone": "neutral"
|
208 |
+
}
|
209 |
+
|
210 |
+
return memory_context
|
211 |
+
|
212 |
+
|
213 |
+
|
src/hackathon/agent/engagement.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class Engagement():
|
2 |
+
def __init__(self):
|
3 |
+
|
4 |
+
self.current_value = 0
|
5 |
+
self.timestamp = 0
|
6 |
+
|
7 |
+
def update(self, candidate_1_anger : float, candidate_2_anger: float):
|
8 |
+
delta_anger = candidate_1_anger - candidate_2_anger
|
9 |
+
self.current_value = self.current_value - (delta_anger)
|
10 |
+
self.current_value = max(-1, min(self.current_value, 1))
|
11 |
+
self.timestamp += 1
|
12 |
+
|
src/hackathon/agent/presenter.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
|
4 |
+
class Presenter():
|
5 |
+
def __init__(self, general_context, client, model):
|
6 |
+
"""
|
7 |
+
Initialise le présentateur du débat,
|
8 |
+
qui reçoit les promps du joueur pour relancer
|
9 |
+
le débat
|
10 |
+
|
11 |
+
contient des méthodes hard codées pour
|
12 |
+
lancer le débat et le clore dans le cas où
|
13 |
+
un candidat a quitté le débat ou si
|
14 |
+
tout le public est parti
|
15 |
+
"""
|
16 |
+
|
17 |
+
self.client = client
|
18 |
+
self.model = model
|
19 |
+
self.general_context = general_context
|
20 |
+
self.own_history = []
|
21 |
+
|
22 |
+
def play_card(self, card, last_sentence_said, previous_speaker, next_speaker, max_tokens=500):
|
23 |
+
"""
|
24 |
+
card is a dictionnary
|
25 |
+
candidates input: contains the last sentences said by
|
26 |
+
the candidates
|
27 |
+
|
28 |
+
card:{card topic : attitude} and comes from
|
29 |
+
the player if the latter decided to play a card.
|
30 |
+
"""
|
31 |
+
|
32 |
+
input_instruction = f"""You are the moderator
|
33 |
+
of the TV debate. You have to ask a question to the candidates
|
34 |
+
{next_speaker.name}. Note that the candidate is in the following attitude : {next_speaker.attitudes}.
|
35 |
+
Here is thesubject of the question you have to ask: {card.description}.
|
36 |
+
To give a bit of context, here is the last answer of the candidate {previous_speaker.name}: {last_sentence_said}.
|
37 |
+
Keep it brief.
|
38 |
+
"""
|
39 |
+
|
40 |
+
messages = [
|
41 |
+
{
|
42 |
+
"role": "system",
|
43 |
+
"content": (
|
44 |
+
f"General context: {self.general_context}\n"
|
45 |
+
),
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"role": "user",
|
49 |
+
"content": (
|
50 |
+
f"Instructions: {input_instruction}\n"
|
51 |
+
),
|
52 |
+
},
|
53 |
+
]
|
54 |
+
|
55 |
+
# Call the chat completion API
|
56 |
+
time.sleep(1)
|
57 |
+
chat_response = self.client.chat.complete(
|
58 |
+
model=self.model,
|
59 |
+
messages=messages,
|
60 |
+
max_tokens=max_tokens, # Adjust this value as needed
|
61 |
+
)
|
62 |
+
|
63 |
+
# Extract and return the assistant's response
|
64 |
+
out=chat_response.choices[0].message.content
|
65 |
+
self.own_history.append({'user' : out})
|
66 |
+
return out
|
src/hackathon/config.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
|
6 |
+
# Charger les variables depuis le fichier .env
|
7 |
+
load_dotenv()
|
8 |
+
|
9 |
+
|
10 |
+
# Accéder aux variables d'environnement
|
11 |
+
class Settings:
|
12 |
+
def __init__(self):
|
13 |
+
self.MISTRAL_API_KEY: str = os.getenv("MISTRAL_API_KEY")
|
14 |
+
if not self.MISTRAL_API_KEY:
|
15 |
+
raise ValueError("MISTRAL_API_KEY is not set in the environment variables.")
|
16 |
+
self.ELEVENLABS_API_KEY: str = os.getenv("ELEVENLABS_API_KEY")
|
17 |
+
if not self.ELEVENLABS_API_KEY:
|
18 |
+
raise ValueError(
|
19 |
+
"ELEVENLABS_API_KEY is not set in the environment variables."
|
20 |
+
)
|
21 |
+
|
22 |
+
self.API_BASE_PATH = os.getenv(
|
23 |
+
"API_BASE_PATH", Path(__file__).resolve().parent.parent
|
24 |
+
)
|
25 |
+
|
26 |
+
|
27 |
+
settings = Settings()
|
src/hackathon/game_mechanics/__init__.py
ADDED
File without changes
|
src/hackathon/game_mechanics/entities.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import yaml
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import List, Dict, Union
|
5 |
+
from pathlib import Path
|
6 |
+
from hackathon.utils import util
|
7 |
+
from dataclasses import asdict
|
8 |
+
import random
|
9 |
+
import json
|
10 |
+
|
11 |
+
class Deck:
|
12 |
+
def __init__(self, data_path_char_1:Path, data_path_char_2:Path,
|
13 |
+
data_path_neutral:Path):
|
14 |
+
self.cards_1 = self.add_cards_from_path(data_path_char_1, -1)
|
15 |
+
self.cards_2 = self.add_cards_from_path(data_path_char_2, 1)
|
16 |
+
self.cards_neutral = self.add_cards_from_path(data_path_neutral, 0)
|
17 |
+
self.cards_samples = None
|
18 |
+
|
19 |
+
def add_cards_from_path(self, data_path_char:Path, side:int):
|
20 |
+
cards = util.read_yaml(data_path_char)
|
21 |
+
cards_ = []
|
22 |
+
for card_dict in cards:
|
23 |
+
card_dict.update({"side":side})
|
24 |
+
cards_.append(Card.from_dict(card_dict))
|
25 |
+
return cards_
|
26 |
+
|
27 |
+
def shuffle_all(self):
|
28 |
+
random.shuffle(self.all_cards)
|
29 |
+
|
30 |
+
def sample(self):
|
31 |
+
n_1 = min(len(self.cards_1), random.randint(5, 10))
|
32 |
+
self.cards_1 = random.sample(self.cards_1, n_1)
|
33 |
+
|
34 |
+
n_2 = min(len(self.cards_2), random.randint(5, 10))
|
35 |
+
self.cards_2 = random.sample(self.cards_2, n_2)
|
36 |
+
|
37 |
+
n_neutral = min(len(self.cards_neutral), random.randint(5, 10))
|
38 |
+
self.cards_neutral = random.sample(self.cards_neutral, n_neutral)
|
39 |
+
|
40 |
+
self.all_cards = self.cards_1 + self.cards_2 + self.cards_neutral
|
41 |
+
self.shuffle_all()
|
42 |
+
|
43 |
+
def to_list(self):
|
44 |
+
return [asdict(card) for card in self.all_cards]
|
45 |
+
|
46 |
+
|
47 |
+
@dataclass
|
48 |
+
class Card:
|
49 |
+
title: str
|
50 |
+
description: str
|
51 |
+
source: str
|
52 |
+
game_context: str
|
53 |
+
change_personal_context: bool
|
54 |
+
information_intensity: str
|
55 |
+
year:Union[None, int] = None
|
56 |
+
side:Union[None, int] = None
|
57 |
+
|
58 |
+
@classmethod
|
59 |
+
def from_yaml(cls, file_path: str) -> List["Card"]:
|
60 |
+
"""Reads a YAML file and returns a list of Card instances."""
|
61 |
+
with open(file_path, "r", encoding="utf-8") as file:
|
62 |
+
data = yaml.safe_load(file)
|
63 |
+
|
64 |
+
# Ensure `data` is a list of dictionaries
|
65 |
+
if not isinstance(data, list):
|
66 |
+
raise ValueError("YAML content is not a list of items.")
|
67 |
+
|
68 |
+
return [cls(**item) for item in data]
|
69 |
+
|
70 |
+
@classmethod
|
71 |
+
def from_dict(cls, data: Dict) -> "Card":
|
72 |
+
return cls(**data)
|
73 |
+
|
74 |
+
@dataclass
|
75 |
+
class Environment:
|
76 |
+
description:str
|
77 |
+
|
78 |
+
@dataclass
|
79 |
+
class Game_history:
|
80 |
+
conversation: List[str]
|
81 |
+
sentiments_history: List[dict]
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
def read_yaml_to_dataclass(file_path: str) -> List[Card]:
|
86 |
+
# Open and parse the YAML file
|
87 |
+
with open(file_path, "r") as file:
|
88 |
+
data = yaml.safe_load(file)
|
89 |
+
# Convert each dictionary entry into a Card dataclass
|
90 |
+
return [Card(**item) for item in data]
|
91 |
+
|
92 |
+
|
src/hackathon/game_mechanics/pre_game_mechanics.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hackathon.agent.character as ch
|
2 |
+
import hackathon.agent.arbitrary as ar
|
3 |
+
from typing import Tuple, List
|
4 |
+
import time
|
5 |
+
|
6 |
+
import hackathon.game_mechanics.entities as entities
|
7 |
+
|
8 |
+
|
9 |
+
def sample_deck(deck:entities.Deck):
|
10 |
+
return deck.sample()
|
11 |
+
|
12 |
+
def add_cards_to_personal_context(card_agent:ar.CardAgent, characters: Tuple[ch.AIAgent, ch.AIAgent], deck:entities.Deck):
|
13 |
+
for idx, character in enumerate(characters):
|
14 |
+
print("------trump enrischment------")
|
15 |
+
if idx == 0:
|
16 |
+
for card in deck.cards_1:
|
17 |
+
card_agent.add_card_to_personal_context(character, card)
|
18 |
+
else:
|
19 |
+
for card in deck.cards_2:
|
20 |
+
card_agent.add_card_to_personal_context(character, card)
|
21 |
+
|
22 |
+
def generate_background_personality(character: ch.AIAgent, client):
|
23 |
+
initial_context = f"I am {character.name}. And here is my personal context :" + character.personal_context + f"The response should be shorter than 150 words."
|
24 |
+
instructions = (
|
25 |
+
f"Here is the background story of a political personality. "
|
26 |
+
f"Seamlessly fill in missing information marked with the <TO_FILL> tag using realistic and coherent details. "
|
27 |
+
f"The added content doesn't need to be completely factual but must align with the character's personality. "
|
28 |
+
f"Avoid using bold formatting and steer clear of overly controversial ideas: {initial_context}."
|
29 |
+
f"The response should be shorter than 150 words."
|
30 |
+
)
|
31 |
+
messages = [
|
32 |
+
{
|
33 |
+
"role": "system",
|
34 |
+
"content": (
|
35 |
+
f"Initial context: {initial_context}\n"
|
36 |
+
),
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"role": "user",
|
40 |
+
"content": (
|
41 |
+
f"Instructions: {instructions}\n"
|
42 |
+
),
|
43 |
+
},
|
44 |
+
]
|
45 |
+
time.sleep(1)
|
46 |
+
chat_response = client.chat.complete(
|
47 |
+
model=character.model,
|
48 |
+
messages=messages
|
49 |
+
)
|
50 |
+
# ------------------------------------------------
|
51 |
+
character.personal_context = chat_response.choices[0].message.content
|
52 |
+
|
53 |
+
|
54 |
+
def add_cards_to_personal_context_full_prompt(card_agent:ar.CardAgent, characters: Tuple[ch.AIAgent, ch.AIAgent], deck:entities.Deck):
|
55 |
+
for idx, character in enumerate(characters):
|
56 |
+
if idx == 0:
|
57 |
+
card_agent.add_cards_to_personal_context(character, deck.cards_1)
|
58 |
+
else:
|
59 |
+
card_agent.add_cards_to_personal_context(character, deck.cards_2)
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
pass
|
src/hackathon/server/__init__.py
ADDED
File without changes
|
src/hackathon/server/schemas.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel
|
2 |
+
|
3 |
+
|
4 |
+
# Define the request and response schema
|
5 |
+
class InferenceRequest(BaseModel):
|
6 |
+
previous_character_text: str
|
7 |
+
previous_speaker: str # ['trump', 'kamala', 'player']
|
8 |
+
current_speaker: str # ['trump', 'kamala']
|
9 |
+
|
10 |
+
|
11 |
+
class InferenceResponse(BaseModel):
|
12 |
+
generated_text: str
|
13 |
+
anger: float
|
14 |
+
audio: str
|
15 |
+
|
16 |
+
|
17 |
+
class EngagementRequest(BaseModel):
|
18 |
+
pass
|
19 |
+
|
20 |
+
|
21 |
+
class EngagementResponse(BaseModel):
|
22 |
+
engagement: int
|
23 |
+
|
24 |
+
|
25 |
+
class CardsVoiceResponse(BaseModel):
|
26 |
+
presenter_question: str
|
27 |
+
audio: str
|
28 |
+
|
29 |
+
|
30 |
+
class CardsVoiceRequest(BaseModel):
|
31 |
+
previous_character_text: str
|
32 |
+
previous_speaker: str
|
33 |
+
card_id: int
|
34 |
+
|
35 |
+
|
36 |
+
class CardsResponse(BaseModel):
|
37 |
+
cards: str
|
38 |
+
|
39 |
+
|
40 |
+
class CardsRequest(BaseModel):
|
41 |
+
pass
|
42 |
+
|
43 |
+
|
44 |
+
class StartRequest(BaseModel):
|
45 |
+
"""
|
46 |
+
Available name for now: ['trump', 'kamala']
|
47 |
+
"""
|
48 |
+
|
49 |
+
candidate_1_name: str
|
50 |
+
candidate_2_name: str
|
51 |
+
|
52 |
+
|
53 |
+
class StartResponse(BaseModel):
|
54 |
+
status: str
|
src/hackathon/server/server.py
ADDED
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Annotated, Dict, List
|
3 |
+
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
from fastapi import FastAPI, Header, HTTPException, Request
|
6 |
+
from fastapi.middleware.cors import CORSMiddleware
|
7 |
+
from mistralai import Mistral
|
8 |
+
|
9 |
+
import hackathon.agent.arbitrary as ar
|
10 |
+
import hackathon.game_mechanics.entities as ent
|
11 |
+
import hackathon.game_mechanics.pre_game_mechanics as pre
|
12 |
+
from hackathon.agent.arbitrary import EmotionAgent
|
13 |
+
from hackathon.agent.character import AIAgent
|
14 |
+
from hackathon.agent.engagement import Engagement
|
15 |
+
from hackathon.agent.presenter import Presenter
|
16 |
+
from hackathon.config import settings
|
17 |
+
from hackathon.server.schemas import (
|
18 |
+
CardsVoiceRequest,
|
19 |
+
CardsVoiceResponse,
|
20 |
+
InferenceRequest,
|
21 |
+
InferenceResponse,
|
22 |
+
StartRequest,
|
23 |
+
StartResponse,
|
24 |
+
)
|
25 |
+
from hackathon.speech.speech import (
|
26 |
+
read_audio_config,
|
27 |
+
read_audio_file,
|
28 |
+
text_to_speech_file,
|
29 |
+
)
|
30 |
+
|
31 |
+
load_dotenv()
|
32 |
+
|
33 |
+
# Initialize FastAPI app
|
34 |
+
app = FastAPI()
|
35 |
+
|
36 |
+
# Disable CORS
|
37 |
+
app.add_middleware(
|
38 |
+
CORSMiddleware,
|
39 |
+
allow_origins=["*"],
|
40 |
+
allow_credentials=True,
|
41 |
+
allow_methods=["*"],
|
42 |
+
allow_headers=["*"],
|
43 |
+
)
|
44 |
+
|
45 |
+
app.state.games = {}
|
46 |
+
|
47 |
+
|
48 |
+
class GameEngine:
|
49 |
+
def __init__(
|
50 |
+
self,
|
51 |
+
candidate_1_name: str,
|
52 |
+
candidate_2_name: str,
|
53 |
+
api_key: str = settings.MISTRAL_API_KEY,
|
54 |
+
model_name: str = "mistral-large-latest",
|
55 |
+
):
|
56 |
+
self.model_name = model_name
|
57 |
+
self.api_key = api_key
|
58 |
+
|
59 |
+
candidate_1_yaml = (
|
60 |
+
settings.API_BASE_PATH / "config" / f"{candidate_1_name}.yaml"
|
61 |
+
)
|
62 |
+
candidate_2_yaml = (
|
63 |
+
settings.API_BASE_PATH / "config" / f"{candidate_2_name}.yaml"
|
64 |
+
)
|
65 |
+
self.audio_yaml = settings.API_BASE_PATH / "config" / "audio.yaml"
|
66 |
+
self.data_folder = settings.API_BASE_PATH / "data"
|
67 |
+
context_yaml = settings.API_BASE_PATH / "config" / "context.yaml"
|
68 |
+
|
69 |
+
cards_trump_yaml = settings.API_BASE_PATH / "config" / "cards_trump.yaml"
|
70 |
+
cards_kamala_yaml = settings.API_BASE_PATH / "config" / "cards_kamala.yaml"
|
71 |
+
cards_neutral_yaml = settings.API_BASE_PATH / "config" / "cards_neutral.yaml"
|
72 |
+
|
73 |
+
self.client = Mistral(api_key=api_key)
|
74 |
+
|
75 |
+
emotion_agent = EmotionAgent(self.client, model=self.model_name)
|
76 |
+
self.candidate_1 = AIAgent.from_yaml(
|
77 |
+
candidate_1_yaml, context_yaml, self.client, emotion_agent
|
78 |
+
)
|
79 |
+
# generate_background_personality(self.candidate_1, self.client)
|
80 |
+
self.candidate_2 = AIAgent.from_yaml(
|
81 |
+
candidate_2_yaml, context_yaml, self.client, emotion_agent
|
82 |
+
)
|
83 |
+
# generate_background_personality(self.candidate_2, self.client)
|
84 |
+
|
85 |
+
self.engagement = Engagement()
|
86 |
+
|
87 |
+
self.presenter = Presenter(
|
88 |
+
self.candidate_1.general_context, self.client, model_name
|
89 |
+
)
|
90 |
+
|
91 |
+
card_agent = ar.CardAgent(self.client, model="mistral-large-latest")
|
92 |
+
|
93 |
+
self.deck = ent.Deck(cards_trump_yaml, cards_kamala_yaml, cards_neutral_yaml)
|
94 |
+
self.deck.sample()
|
95 |
+
pre.add_cards_to_personal_context_full_prompt(
|
96 |
+
card_agent, [self.candidate_1, self.candidate_2], self.deck
|
97 |
+
)
|
98 |
+
|
99 |
+
self.audio_config = read_audio_config(self.audio_yaml)
|
100 |
+
self.timestamp = 0
|
101 |
+
|
102 |
+
|
103 |
+
@app.post("/start", response_model=StartResponse)
|
104 |
+
async def start(request: StartRequest, game_id: Annotated[str | None, Header()] = None):
|
105 |
+
# game_id = " qsdqsd"
|
106 |
+
if game_id is None:
|
107 |
+
raise HTTPException(
|
108 |
+
status_code=400, detail="Game ID not provided in the header."
|
109 |
+
)
|
110 |
+
|
111 |
+
app.state.games[game_id] = GameEngine(
|
112 |
+
candidate_1_name=request.candidate_1_name,
|
113 |
+
candidate_2_name=request.candidate_2_name,
|
114 |
+
)
|
115 |
+
print(f"Created new game ({game_id})")
|
116 |
+
|
117 |
+
return {"status": "Game engine initialized successfully"}
|
118 |
+
|
119 |
+
|
120 |
+
@app.post("/infer", response_model=InferenceResponse)
|
121 |
+
async def infer(
|
122 |
+
request: InferenceRequest, game_id: Annotated[str | None, Header()] = None
|
123 |
+
):
|
124 |
+
if game_id is None:
|
125 |
+
raise HTTPException(
|
126 |
+
status_code=400, detail="Game ID not provided in the header."
|
127 |
+
)
|
128 |
+
elif game_id not in app.state.games:
|
129 |
+
raise HTTPException(
|
130 |
+
status_code=400, detail="Game engine not initialized. Call /start first."
|
131 |
+
)
|
132 |
+
|
133 |
+
game_engine = app.state.games[game_id]
|
134 |
+
game_engine.timestamp += 1
|
135 |
+
|
136 |
+
data_folder = game_engine.data_folder
|
137 |
+
|
138 |
+
if request.current_speaker == game_engine.candidate_1.name:
|
139 |
+
current_speaker = game_engine.candidate_1
|
140 |
+
|
141 |
+
elif request.current_speaker == game_engine.candidate_2.name:
|
142 |
+
current_speaker = game_engine.candidate_2
|
143 |
+
else:
|
144 |
+
raise ValueError("Candidate name requested do not exist.")
|
145 |
+
|
146 |
+
current_audio_config = game_engine.audio_config[current_speaker.name]
|
147 |
+
input_text = f"{request.previous_speaker} said :{request.previous_character_text}. You have to respond to {request.previous_speaker}. Limit to less than 50 words."
|
148 |
+
|
149 |
+
current_speaker.update_emotions(input_text)
|
150 |
+
msg = current_speaker.respond(input_text)
|
151 |
+
|
152 |
+
audio_file_path = text_to_speech_file(
|
153 |
+
text=msg,
|
154 |
+
voice_id=current_audio_config["voice_id"],
|
155 |
+
stability=current_audio_config["stability"],
|
156 |
+
similarity=current_audio_config["similarity"],
|
157 |
+
style=current_audio_config["style"],
|
158 |
+
base_path=str(data_folder),
|
159 |
+
)
|
160 |
+
|
161 |
+
audio_signal = read_audio_file(audio_file_path) # base64
|
162 |
+
os.remove(audio_file_path)
|
163 |
+
|
164 |
+
return {
|
165 |
+
"generated_text": msg,
|
166 |
+
"anger": current_speaker.emotions["anger"],
|
167 |
+
"audio": audio_signal,
|
168 |
+
}
|
169 |
+
|
170 |
+
|
171 |
+
@app.get("/engagement")
|
172 |
+
async def engagement(
|
173 |
+
game_id: Annotated[str | None, Header()] = None,
|
174 |
+
):
|
175 |
+
if game_id is None:
|
176 |
+
raise HTTPException(
|
177 |
+
status_code=400, detail="Game ID not provided in the header."
|
178 |
+
)
|
179 |
+
elif game_id not in app.state.games:
|
180 |
+
raise HTTPException(
|
181 |
+
status_code=400, detail="Game engine not initialized. Call /start first."
|
182 |
+
)
|
183 |
+
|
184 |
+
game_engine = app.state.games[game_id]
|
185 |
+
|
186 |
+
if game_engine.timestamp > game_engine.engagement.timestamp:
|
187 |
+
candidate_1_anger = game_engine.candidate_1.emotions["anger"]
|
188 |
+
candidate_2_anger = game_engine.candidate_2.emotions["anger"]
|
189 |
+
|
190 |
+
game_engine.engagement.update(candidate_1_anger, candidate_2_anger)
|
191 |
+
value = game_engine.engagement.current_value
|
192 |
+
else:
|
193 |
+
value = game_engine.engagement.current_value
|
194 |
+
|
195 |
+
return {"engagement": value}
|
196 |
+
|
197 |
+
|
198 |
+
@app.post("/card-voice", response_model=CardsVoiceResponse)
|
199 |
+
async def cards(
|
200 |
+
request: CardsVoiceRequest,
|
201 |
+
game_id: Annotated[str | None, Header()] = None,
|
202 |
+
):
|
203 |
+
"""
|
204 |
+
WARNING CARDS HAVE AN IMPACT HERE
|
205 |
+
|
206 |
+
"""
|
207 |
+
if game_id is None:
|
208 |
+
raise HTTPException(
|
209 |
+
status_code=400, detail="Game ID not provided in the header."
|
210 |
+
)
|
211 |
+
|
212 |
+
game_engine = app.state.games.get(game_id, None)
|
213 |
+
|
214 |
+
if game_engine is None:
|
215 |
+
raise HTTPException(
|
216 |
+
status_code=400, detail="Game engine not initialized. Call /start first."
|
217 |
+
)
|
218 |
+
|
219 |
+
game_engine = app.state.games[game_id]
|
220 |
+
game_engine.timestamp += 1
|
221 |
+
presenter = game_engine.presenter
|
222 |
+
|
223 |
+
last_text = request.previous_character_text
|
224 |
+
previous_speaker_name = request.previous_speaker
|
225 |
+
|
226 |
+
if previous_speaker_name == game_engine.candidate_1.name:
|
227 |
+
next_speaker = game_engine.candidate_2
|
228 |
+
last_speaker = game_engine.candidate_1
|
229 |
+
|
230 |
+
elif previous_speaker_name == game_engine.candidate_2.name:
|
231 |
+
next_speaker = game_engine.candidate_1
|
232 |
+
last_speaker = game_engine.candidate_2
|
233 |
+
|
234 |
+
elif previous_speaker_name == "player":
|
235 |
+
next_speaker = game_engine.candidate_2
|
236 |
+
last_speaker = game_engine.candidate_1
|
237 |
+
|
238 |
+
else:
|
239 |
+
raise ValueError(f"{previous_speaker_name} is not known!!")
|
240 |
+
|
241 |
+
card_id = request.card_id # WARNING!!!! CHECK THE FORMAT
|
242 |
+
card = game_engine.deck.all_cards[card_id]
|
243 |
+
|
244 |
+
current_audio_config = game_engine.audio_config["chairman"]
|
245 |
+
|
246 |
+
msg = presenter.play_card(card, last_text, last_speaker, next_speaker)
|
247 |
+
|
248 |
+
data_folder = game_engine.data_folder
|
249 |
+
|
250 |
+
audio_file_path = text_to_speech_file(
|
251 |
+
text=msg,
|
252 |
+
voice_id=current_audio_config["voice_id"],
|
253 |
+
stability=current_audio_config["stability"],
|
254 |
+
similarity=current_audio_config["similarity"],
|
255 |
+
style=current_audio_config["style"],
|
256 |
+
base_path=str(data_folder),
|
257 |
+
)
|
258 |
+
|
259 |
+
audio_signal = read_audio_file(audio_file_path) # base64
|
260 |
+
|
261 |
+
os.remove(audio_file_path)
|
262 |
+
|
263 |
+
return {"presenter_question": msg, "audio": audio_signal}
|
264 |
+
|
265 |
+
|
266 |
+
@app.get("/cards_request", response_model=List[Dict])
|
267 |
+
async def cards_request(
|
268 |
+
request: Request, game_id: Annotated[str | None, Header()] = None
|
269 |
+
):
|
270 |
+
if game_id is None:
|
271 |
+
raise HTTPException(
|
272 |
+
status_code=400, detail="Game ID not provided in the header."
|
273 |
+
)
|
274 |
+
elif game_id not in app.state.games:
|
275 |
+
raise HTTPException(
|
276 |
+
status_code=400, detail="Game engine not initialized. Call /start first."
|
277 |
+
)
|
278 |
+
game_engine = app.state.games[game_id]
|
279 |
+
|
280 |
+
cards_list = game_engine.deck.to_list()
|
281 |
+
return cards_list
|
src/hackathon/speech/__init__.py
ADDED
File without changes
|
src/hackathon/speech/speech.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Code copied from https://elevenlabs.io/docs/cookbooks/text-to-speech/streaming
|
2 |
+
|
3 |
+
import base64
|
4 |
+
import uuid
|
5 |
+
from io import BytesIO
|
6 |
+
from typing import IO
|
7 |
+
|
8 |
+
import yaml
|
9 |
+
from elevenlabs import VoiceSettings
|
10 |
+
from elevenlabs.client import ElevenLabs
|
11 |
+
|
12 |
+
from hackathon.config import settings
|
13 |
+
|
14 |
+
client = ElevenLabs(api_key=settings.ELEVENLABS_API_KEY)
|
15 |
+
|
16 |
+
voices = {"politician1": "ohZqJahxofk8dkPKmd9F", "politician2": "v7sy7EHXxN3ToffFQfvr"}
|
17 |
+
# voice_id: "ohZqJahxofk8dkPKmd9F" # Another voice just in case
|
18 |
+
|
19 |
+
|
20 |
+
def read_audio_config(yaml_path: str) -> dict:
|
21 |
+
try:
|
22 |
+
with open(yaml_path, "r") as file:
|
23 |
+
config = yaml.safe_load(file)
|
24 |
+
return config
|
25 |
+
except FileNotFoundError:
|
26 |
+
raise FileNotFoundError(f"The file at path '{yaml_path}' does not exist.")
|
27 |
+
except yaml.YAMLError as e:
|
28 |
+
raise ValueError(f"Error parsing YAML file: {e}")
|
29 |
+
|
30 |
+
|
31 |
+
def read_audio_file(audio_path: str):
|
32 |
+
with open(audio_path, "rb") as audio_file:
|
33 |
+
audio_base64 = base64.b64encode(audio_file.read()).decode("utf-8")
|
34 |
+
return audio_base64
|
35 |
+
|
36 |
+
|
37 |
+
def text_to_speech_file(
|
38 |
+
text: str,
|
39 |
+
voice_id: str,
|
40 |
+
stability=0.5,
|
41 |
+
similarity=1.0,
|
42 |
+
style=0.3,
|
43 |
+
base_path="audio_store",
|
44 |
+
) -> str:
|
45 |
+
"""voice: politician1 or politician2"""
|
46 |
+
# Calling the text_to_speech conversion API with detailed parameters
|
47 |
+
response = client.text_to_speech.convert(
|
48 |
+
voice_id=voice_id, # Adam pre-made voice
|
49 |
+
output_format="mp3_44100_32",
|
50 |
+
text=text,
|
51 |
+
model_id="eleven_turbo_v2_5", # use the turbo model for low latency
|
52 |
+
voice_settings=VoiceSettings(
|
53 |
+
stability=0.5,
|
54 |
+
similarity_boost=1.0,
|
55 |
+
style=0.3,
|
56 |
+
use_speaker_boost=True,
|
57 |
+
),
|
58 |
+
)
|
59 |
+
|
60 |
+
save_file_path = f"./{uuid.uuid4()}.mp3"
|
61 |
+
|
62 |
+
with open(save_file_path, "wb") as f:
|
63 |
+
for chunk in response:
|
64 |
+
if chunk:
|
65 |
+
f.write(chunk)
|
66 |
+
|
67 |
+
print(f"{save_file_path}: audio file successfully saved !")
|
68 |
+
|
69 |
+
return save_file_path
|
70 |
+
|
71 |
+
|
72 |
+
def text_to_speech_stream(
|
73 |
+
text: str, voice: str, stability=0.5, similarity=1.0, style=0.3
|
74 |
+
) -> IO[bytes]:
|
75 |
+
"""voice: politician1 or politician2"""
|
76 |
+
# Perform the text-to-speech conversion
|
77 |
+
response = client.text_to_speech.convert(
|
78 |
+
voice_id=voices[voice], # Adam pre-made voice
|
79 |
+
output_format="mp3_22050_32",
|
80 |
+
text=text,
|
81 |
+
model_id="eleven_multilingual_v2",
|
82 |
+
voice_settings=VoiceSettings(
|
83 |
+
stability=0.0,
|
84 |
+
similarity_boost=1.0,
|
85 |
+
style=0.0,
|
86 |
+
use_speaker_boost=True,
|
87 |
+
),
|
88 |
+
)
|
89 |
+
|
90 |
+
# Create a BytesIO object to hold the audio data in memory
|
91 |
+
audio_stream = BytesIO()
|
92 |
+
|
93 |
+
# Write each chunk of audio data to the stream
|
94 |
+
for chunk in response:
|
95 |
+
if chunk:
|
96 |
+
audio_stream.write(chunk)
|
97 |
+
|
98 |
+
# Reset stream position to the beginning
|
99 |
+
audio_stream.seek(0)
|
100 |
+
|
101 |
+
# Return the stream for further use
|
102 |
+
return audio_stream
|
src/hackathon/utils/util.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
|
3 |
+
def read_yaml(file_path):
|
4 |
+
"""
|
5 |
+
Reads a YAML file and returns its contents as a Python object.
|
6 |
+
|
7 |
+
Args:
|
8 |
+
file_path (str): The path to the YAML file.
|
9 |
+
|
10 |
+
Returns:
|
11 |
+
dict or list: The parsed YAML structure (dictionary or list).
|
12 |
+
"""
|
13 |
+
try:
|
14 |
+
with open(file_path, 'r') as file:
|
15 |
+
data = yaml.safe_load(file)
|
16 |
+
return data
|
17 |
+
except FileNotFoundError:
|
18 |
+
print(f"Error: File not found at {file_path}")
|
19 |
+
except yaml.YAMLError as e:
|
20 |
+
print(f"Error parsing YAML file: {e}")
|
21 |
+
except Exception as e:
|
22 |
+
print(f"An unexpected error occurred: {e}")
|
supervisord.conf
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[supervisord]
|
2 |
+
nodaemon=true
|
3 |
+
logfile=/dev/stdout
|
4 |
+
logfile_maxbytes=0
|
5 |
+
pidfile=/tmp/supervisord.pid
|
6 |
+
|
7 |
+
[program:nginx]
|
8 |
+
command=python3 -m http.server 8080
|
9 |
+
directory=/unity
|
10 |
+
autostart=true
|
11 |
+
autorestart=true
|
12 |
+
stdout_logfile=/dev/stdout
|
13 |
+
stdout_logfile_maxbytes=0
|
14 |
+
stderr_logfile=/dev/stderr
|
15 |
+
stderr_logfile_maxbytes=0
|
16 |
+
|
17 |
+
[program:api]
|
18 |
+
command=uvicorn hackathon.server.server:app --host 0.0.0.0 --port 3000
|
19 |
+
directory=/app
|
20 |
+
autostart=true
|
21 |
+
autorestart=true
|
22 |
+
stdout_logfile=/dev/stdout
|
23 |
+
stdout_logfile_maxbytes=0
|
24 |
+
stderr_logfile=/dev/stderr
|
25 |
+
stderr_logfile_maxbytes=0
|
supervisord.dev.conf
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[supervisord]
|
2 |
+
nodaemon=true
|
3 |
+
|
4 |
+
[program:unity]
|
5 |
+
command=python3 -m http.server 8080
|
6 |
+
directory=./WebGLBuild
|
7 |
+
autostart=true
|
8 |
+
autorestart=true
|
9 |
+
stdout_logfile=/dev/stdout
|
10 |
+
stdout_logfile_maxbytes=0
|
11 |
+
stderr_logfile=/dev/stderr
|
12 |
+
stderr_logfile_maxbytes=0
|
13 |
+
|
14 |
+
[program:api]
|
15 |
+
command=fastapi dev server.py --host 0.0.0.0 --port 3000
|
16 |
+
directory=./src/hackathon/server/
|
17 |
+
autostart=true
|
18 |
+
autorestart=true
|
19 |
+
stdout_logfile=/dev/stdout
|
20 |
+
stdout_logfile_maxbytes=0
|
21 |
+
stderr_logfile=/dev/stderr
|
22 |
+
stderr_logfile_maxbytes=0
|