Spaces:
Runtime error
Runtime error
Commit
·
4ae264c
1
Parent(s):
3519e3c
update .gitignore to include logs directory and refactor session_id assignment in app.py
Browse files- .gitignore +3 -1
- app/app.py +10 -8
.gitignore
CHANGED
|
@@ -174,4 +174,6 @@ config/config.yml
|
|
| 174 |
|
| 175 |
public/
|
| 176 |
|
| 177 |
-
*/storage/
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
public/
|
| 176 |
|
| 177 |
+
*/storage/
|
| 178 |
+
|
| 179 |
+
logs/
|
app/app.py
CHANGED
|
@@ -14,9 +14,6 @@ import time
|
|
| 14 |
from utils import MODELS_PROVIDERS_MAP, PROVIDERS_API_KEYS, REASONER_PROVIDERS_MAP, check_openai_api_key
|
| 15 |
from settings import PROFILES_SETTINGS
|
| 16 |
|
| 17 |
-
SESSION_ID = ulid()
|
| 18 |
-
|
| 19 |
-
|
| 20 |
DEFAULT_REASONER_CONFIG = LlmConfig(
|
| 21 |
provider="groq",
|
| 22 |
api_key=PROVIDERS_API_KEYS.get("groq"),
|
|
@@ -81,7 +78,7 @@ async def setup_agent(settings):
|
|
| 81 |
llm_config.reasoner = reasoner_config
|
| 82 |
|
| 83 |
llm = Llm.from_config(llm_config)
|
| 84 |
-
llm.session_id =
|
| 85 |
llm.system_prompt = settings.get("System Prompt")
|
| 86 |
if llm.reasoner:
|
| 87 |
llm.reasoner.system_prompt = settings.get("Reasoner System Prompt")
|
|
@@ -95,7 +92,7 @@ async def start_chat():
|
|
| 95 |
user_profile = cl.user_session.get("chat_profile")
|
| 96 |
llm_config = DEFAULT_LLM_CONFIG.get(user_profile)
|
| 97 |
llm = Llm.from_config(llm_config)
|
| 98 |
-
llm.session_id =
|
| 99 |
cl.user_session.set(
|
| 100 |
"llm", llm
|
| 101 |
)
|
|
@@ -105,10 +102,10 @@ async def start_chat():
|
|
| 105 |
).send()
|
| 106 |
|
| 107 |
async def run_concurrent_tasks(llm, message):
|
| 108 |
-
asyncio.create_task(llm.acomplete(message))
|
| 109 |
# Stream logger output while LLM is running
|
| 110 |
while True:
|
| 111 |
-
async for chunk in _logger.pop(
|
| 112 |
yield chunk # Yield each chunk directly
|
| 113 |
|
| 114 |
@cl.on_message
|
|
@@ -178,4 +175,9 @@ async def main(message: cl.Message):
|
|
| 178 |
await msg.send()
|
| 179 |
|
| 180 |
except Exception as e:
|
| 181 |
-
await cl.ErrorMessage("Internal Server Error").send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
from utils import MODELS_PROVIDERS_MAP, PROVIDERS_API_KEYS, REASONER_PROVIDERS_MAP, check_openai_api_key
|
| 15 |
from settings import PROFILES_SETTINGS
|
| 16 |
|
|
|
|
|
|
|
|
|
|
| 17 |
DEFAULT_REASONER_CONFIG = LlmConfig(
|
| 18 |
provider="groq",
|
| 19 |
api_key=PROVIDERS_API_KEYS.get("groq"),
|
|
|
|
| 78 |
llm_config.reasoner = reasoner_config
|
| 79 |
|
| 80 |
llm = Llm.from_config(llm_config)
|
| 81 |
+
llm.session_id = ulid()
|
| 82 |
llm.system_prompt = settings.get("System Prompt")
|
| 83 |
if llm.reasoner:
|
| 84 |
llm.reasoner.system_prompt = settings.get("Reasoner System Prompt")
|
|
|
|
| 92 |
user_profile = cl.user_session.get("chat_profile")
|
| 93 |
llm_config = DEFAULT_LLM_CONFIG.get(user_profile)
|
| 94 |
llm = Llm.from_config(llm_config)
|
| 95 |
+
llm.session_id = ulid()
|
| 96 |
cl.user_session.set(
|
| 97 |
"llm", llm
|
| 98 |
)
|
|
|
|
| 102 |
).send()
|
| 103 |
|
| 104 |
async def run_concurrent_tasks(llm, message):
|
| 105 |
+
asyncio.create_task(llm.acomplete(message))
|
| 106 |
# Stream logger output while LLM is running
|
| 107 |
while True:
|
| 108 |
+
async for chunk in _logger.pop(llm.session_id):
|
| 109 |
yield chunk # Yield each chunk directly
|
| 110 |
|
| 111 |
@cl.on_message
|
|
|
|
| 175 |
await msg.send()
|
| 176 |
|
| 177 |
except Exception as e:
|
| 178 |
+
await cl.ErrorMessage("Internal Server Error").send()
|
| 179 |
+
|
| 180 |
+
### TODO add support for history capping via tokenier fn
|
| 181 |
+
### Add suppor to deactivate history in settings
|
| 182 |
+
### TODO add future todos, include support for images and pdf upload for conversation
|
| 183 |
+
### TODO openrouter missing'
|