Update app.py
Browse files
app.py
CHANGED
|
@@ -49,7 +49,7 @@ class ChatOpenRouter(ChatOpenAI):
|
|
| 49 |
**kwargs
|
| 50 |
)
|
| 51 |
|
| 52 |
-
|
| 53 |
#model_name="deepseek/deepseek-r1-0528:free",
|
| 54 |
#model_name="google/gemini-2.0-flash-exp:free",
|
| 55 |
#model_name="deepseek/deepseek-v3-base:free",
|
|
@@ -89,11 +89,11 @@ load_dotenv()
|
|
| 89 |
# os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
|
| 90 |
groq_api_key = os.getenv("GROQ_API_KEY")
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
|
| 98 |
app = Flask(__name__)
|
| 99 |
|
|
@@ -267,6 +267,11 @@ agent = create_react_agent(
|
|
| 267 |
prompt=SYSTEM_PROMPT
|
| 268 |
)
|
| 269 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 270 |
agent_json_resolver = create_react_agent(
|
| 271 |
model=llm,
|
| 272 |
tools=[], # No specific tools are defined here, but could be added later
|
|
@@ -1039,7 +1044,7 @@ Each plan must include a **single Scratch Hat Block** (e.g., 'event_whenflagclic
|
|
| 1039 |
"""
|
| 1040 |
|
| 1041 |
try:
|
| 1042 |
-
response =
|
| 1043 |
print("Raw response from LLM [OverallPlannerNode 1]:",response)
|
| 1044 |
raw_response = response["messages"][-1].content#strip_noise(response["messages"][-1].content)
|
| 1045 |
print("Raw response from LLM [OverallPlannerNode 2]:", raw_response) # Uncomment for debugging
|
|
@@ -1342,7 +1347,7 @@ Use sprite names exactly as provided in `sprite_names` (e.g., 'Sprite1', 'soccer
|
|
| 1342 |
- If feedback is minor, make precise, minimal improvements only.
|
| 1343 |
"""
|
| 1344 |
try:
|
| 1345 |
-
response =
|
| 1346 |
raw_response = response["messages"][-1].content#strip_noise(response["messages"][-1].content)
|
| 1347 |
logger.info(f"Raw response from LLM [RefinedPlannerNode]: {raw_response[:500]}...")
|
| 1348 |
# json debugging and solving
|
|
@@ -1531,7 +1536,7 @@ Example output:
|
|
| 1531 |
```
|
| 1532 |
"""
|
| 1533 |
try:
|
| 1534 |
-
response =
|
| 1535 |
llm_output = response["messages"][-1].content
|
| 1536 |
llm_json = extract_json_from_llm_response(llm_output)
|
| 1537 |
logger.info(f"Successfully analyze the opcode requirement for {sprite} - {event}.")
|
|
|
|
| 49 |
**kwargs
|
| 50 |
)
|
| 51 |
|
| 52 |
+
llm2 = ChatOpenRouter(
|
| 53 |
#model_name="deepseek/deepseek-r1-0528:free",
|
| 54 |
#model_name="google/gemini-2.0-flash-exp:free",
|
| 55 |
#model_name="deepseek/deepseek-v3-base:free",
|
|
|
|
| 89 |
# os.environ["GROQ_API_KEY"] = os.getenv("GROQ_API_KEY")
|
| 90 |
groq_api_key = os.getenv("GROQ_API_KEY")
|
| 91 |
|
| 92 |
+
llm = ChatGroq(
|
| 93 |
+
model="meta-llama/llama-4-scout-17b-16e-instruct",
|
| 94 |
+
temperature=0,
|
| 95 |
+
max_tokens=None,
|
| 96 |
+
)
|
| 97 |
|
| 98 |
app = Flask(__name__)
|
| 99 |
|
|
|
|
| 267 |
prompt=SYSTEM_PROMPT
|
| 268 |
)
|
| 269 |
|
| 270 |
+
agent_2 = create_react_agent(
|
| 271 |
+
model=llm2,
|
| 272 |
+
tools=[], # No specific tools are defined here, but could be added later
|
| 273 |
+
prompt=SYSTEM_PROMPT
|
| 274 |
+
)
|
| 275 |
agent_json_resolver = create_react_agent(
|
| 276 |
model=llm,
|
| 277 |
tools=[], # No specific tools are defined here, but could be added later
|
|
|
|
| 1044 |
"""
|
| 1045 |
|
| 1046 |
try:
|
| 1047 |
+
response = agent_2.invoke({"messages": [{"role": "user", "content": planning_prompt}]})
|
| 1048 |
print("Raw response from LLM [OverallPlannerNode 1]:",response)
|
| 1049 |
raw_response = response["messages"][-1].content#strip_noise(response["messages"][-1].content)
|
| 1050 |
print("Raw response from LLM [OverallPlannerNode 2]:", raw_response) # Uncomment for debugging
|
|
|
|
| 1347 |
- If feedback is minor, make precise, minimal improvements only.
|
| 1348 |
"""
|
| 1349 |
try:
|
| 1350 |
+
response = agent_2.invoke({"messages": [{"role": "user", "content": refinement_prompt}]})
|
| 1351 |
raw_response = response["messages"][-1].content#strip_noise(response["messages"][-1].content)
|
| 1352 |
logger.info(f"Raw response from LLM [RefinedPlannerNode]: {raw_response[:500]}...")
|
| 1353 |
# json debugging and solving
|
|
|
|
| 1536 |
```
|
| 1537 |
"""
|
| 1538 |
try:
|
| 1539 |
+
response = agent_2.invoke({"messages": [{"role": "user", "content": refinement_prompt}]})
|
| 1540 |
llm_output = response["messages"][-1].content
|
| 1541 |
llm_json = extract_json_from_llm_response(llm_output)
|
| 1542 |
logger.info(f"Successfully analyze the opcode requirement for {sprite} - {event}.")
|