third push
Browse files- .env_example +1 -1
- app/app.py +51 -51
- app/app_config.py +5 -2
- app/utils.py +35 -7
- core/types.py +24 -6
- input_config.json +2 -1
.env_example
CHANGED
|
@@ -1,2 +1,2 @@
|
|
| 1 |
MODEL_NAME=openai/gpt-3.5-turbo
|
| 2 |
-
MODEL_API_KEY=
|
|
|
|
| 1 |
MODEL_NAME=openai/gpt-3.5-turbo
|
| 2 |
+
MODEL_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
app/app.py
CHANGED
|
@@ -2,11 +2,12 @@
|
|
| 2 |
from calendar import c
|
| 3 |
from dataclasses import dataclass
|
| 4 |
from math import exp
|
|
|
|
|
|
|
| 5 |
from webbrowser import get
|
| 6 |
from litellm.types.utils import ModelResponse
|
| 7 |
import streamlit as st
|
| 8 |
-
from
|
| 9 |
-
from app.utils import generate_answer, dict_to_markdown
|
| 10 |
from core.types import ThoughtStepsDisplay, ThoughtSteps, BigMessage , Message
|
| 11 |
from .app_config import InputConfig, ENV_FILE_PATH, CONFIG_FILE_PATH
|
| 12 |
from core.llms.base_llm import BaseLLM
|
|
@@ -16,18 +17,18 @@ from PIL import Image
|
|
| 16 |
from core.prompts.think_mark_think import SYSTEM_PROMPT
|
| 17 |
|
| 18 |
st.set_page_config(page_title="Open-o1", page_icon="🧠", layout="wide")
|
| 19 |
-
|
| 20 |
-
st.write('Welcome to Open-O1!')
|
| 21 |
|
| 22 |
def config_sidebar(config:InputConfig) -> InputConfig:
|
| 23 |
st.sidebar.header('Configuration')
|
| 24 |
-
model_name = st.sidebar.text_input('
|
| 25 |
-
model_api_key = st.sidebar.text_input('
|
| 26 |
-
max_tokens = st.sidebar.number_input('
|
| 27 |
-
max_steps = st.sidebar.number_input('
|
| 28 |
-
temperature = st.sidebar.number_input('
|
| 29 |
-
timeout = st.sidebar.number_input('
|
| 30 |
-
sleeptime = st.sidebar.number_input('
|
|
|
|
| 31 |
|
| 32 |
config.model_name = model_name
|
| 33 |
config.model_api_key = model_api_key
|
|
@@ -36,6 +37,8 @@ def config_sidebar(config:InputConfig) -> InputConfig:
|
|
| 36 |
config.temperature = temperature
|
| 37 |
config.timeout = timeout
|
| 38 |
config.sleeptime = sleeptime
|
|
|
|
|
|
|
| 39 |
|
| 40 |
if st.sidebar.button('Save config'):
|
| 41 |
config.save(env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH)
|
|
@@ -43,43 +46,27 @@ def config_sidebar(config:InputConfig) -> InputConfig:
|
|
| 43 |
|
| 44 |
return config
|
| 45 |
|
| 46 |
-
def load_llm(config:InputConfig, tools=None) -> BaseLLM:
|
| 47 |
-
return LLM(api_key=config.model_api_key, model=config.model_name, tools=tools)
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
def image_buffer_to_pillow_image(image_buffer:UploadedFile) -> Image.Image:
|
| 51 |
-
return Image.open(image_buffer)
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
def process_user_input(user_input:str, image:Image.Image=None)->dict:
|
| 55 |
-
if image:
|
| 56 |
-
message = [user_message_with_images(user_msg_str=user_input, images=[image])]
|
| 57 |
-
else:
|
| 58 |
-
message = [{"role": "user", "content": user_input}]
|
| 59 |
-
return message
|
| 60 |
-
|
| 61 |
-
|
| 62 |
|
| 63 |
|
| 64 |
def main():
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
config = InputConfig.load(env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH)
|
| 67 |
config = config_sidebar(config=config)
|
| 68 |
llm = load_llm(config)
|
| 69 |
|
| 70 |
-
current_tab = ''
|
| 71 |
|
| 72 |
current_tab='o1_tab'
|
| 73 |
-
messages_attr_name = f"{current_tab}_messages"
|
| 74 |
big_message_attr_name = f"{current_tab}_big_messages"
|
| 75 |
|
| 76 |
|
| 77 |
clear_chat_bt = st.sidebar.button('Clear Chat')
|
| 78 |
if clear_chat_bt:
|
| 79 |
-
delattr(st.session_state,
|
| 80 |
|
| 81 |
|
| 82 |
-
message_attr = set_and_get_state_attr(messages_attr_name, default_value=[])
|
| 83 |
big_message_attr = set_and_get_state_attr(big_message_attr_name, default_value=[])
|
| 84 |
|
| 85 |
# this prints the older messages
|
|
@@ -93,7 +80,7 @@ def main():
|
|
| 93 |
if message.role == 'user':
|
| 94 |
st.markdown(message.content)
|
| 95 |
else:
|
| 96 |
-
print_thought(message.to_thought_steps_display(), is_final=True)
|
| 97 |
|
| 98 |
|
| 99 |
|
|
@@ -118,30 +105,42 @@ def main():
|
|
| 118 |
if message["role"] == "user":
|
| 119 |
message["content"] = f"{message['content']}, json format"
|
| 120 |
|
|
|
|
| 121 |
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
last_step = thoughts.pop()
|
| 139 |
-
|
|
|
|
|
|
|
| 140 |
role="assistant",
|
| 141 |
content=last_step,
|
| 142 |
thoughts=thoughts
|
| 143 |
))
|
| 144 |
-
# st.markdown(dict_to_markdown(step.model_dump()))
|
| 145 |
|
| 146 |
|
| 147 |
|
|
@@ -155,8 +154,9 @@ def print_thought(thought:ThoughtStepsDisplay, is_final:bool=False):
|
|
| 155 |
if is_final:
|
| 156 |
st.markdown(thought.md())
|
| 157 |
else:
|
| 158 |
-
st.markdown(f'\n```json\n{thought.model_dump_json()}\n```\n', unsafe_allow_html=True)
|
| 159 |
-
|
|
|
|
| 160 |
|
| 161 |
|
| 162 |
|
|
|
|
| 2 |
from calendar import c
|
| 3 |
from dataclasses import dataclass
|
| 4 |
from math import exp
|
| 5 |
+
from shutil import which
|
| 6 |
+
import time
|
| 7 |
from webbrowser import get
|
| 8 |
from litellm.types.utils import ModelResponse
|
| 9 |
import streamlit as st
|
| 10 |
+
from app.utils import generate_answer, load_llm
|
|
|
|
| 11 |
from core.types import ThoughtStepsDisplay, ThoughtSteps, BigMessage , Message
|
| 12 |
from .app_config import InputConfig, ENV_FILE_PATH, CONFIG_FILE_PATH
|
| 13 |
from core.llms.base_llm import BaseLLM
|
|
|
|
| 17 |
from core.prompts.think_mark_think import SYSTEM_PROMPT
|
| 18 |
|
| 19 |
st.set_page_config(page_title="Open-o1", page_icon="🧠", layout="wide")
|
| 20 |
+
|
|
|
|
| 21 |
|
| 22 |
def config_sidebar(config:InputConfig) -> InputConfig:
|
| 23 |
st.sidebar.header('Configuration')
|
| 24 |
+
model_name = st.sidebar.text_input('Model Name: e.g. provider/model-name',value=config.model_name, placeholder='openai/gpt-3.5-turbo')
|
| 25 |
+
model_api_key = st.sidebar.text_input('API Key: ',type='password',value=config.model_api_key, placeholder='sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
|
| 26 |
+
max_tokens = st.sidebar.number_input('Max Tokens per Thought: ',value=config.max_tokens, min_value=1)
|
| 27 |
+
max_steps = st.sidebar.number_input('Max Thinking Steps: ',value=config.max_steps, min_value=1, step=1, )
|
| 28 |
+
temperature = st.sidebar.number_input('Temperature: ',value=config.temperature, min_value=0.0, step=0.1, max_value=10.0)
|
| 29 |
+
timeout = st.sidebar.number_input('Timeout(seconds): ',value=config.timeout, min_value=0.0,step = 1.0)
|
| 30 |
+
sleeptime = st.sidebar.number_input('Sleep Time(seconds)',value=config.sleeptime, min_value=0.0, step = 1.0, help='Time between requests to avoid hitting rate limit')
|
| 31 |
+
force_max_steps = st.sidebar.checkbox('Force Max Steps', value=config.force_max_steps, help="If checked, will generate given number of max steps. If not checked, assistant can stop at few step thinking it has the write answer.")
|
| 32 |
|
| 33 |
config.model_name = model_name
|
| 34 |
config.model_api_key = model_api_key
|
|
|
|
| 37 |
config.temperature = temperature
|
| 38 |
config.timeout = timeout
|
| 39 |
config.sleeptime = sleeptime
|
| 40 |
+
config.force_max_steps = force_max_steps
|
| 41 |
+
|
| 42 |
|
| 43 |
if st.sidebar.button('Save config'):
|
| 44 |
config.save(env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH)
|
|
|
|
| 46 |
|
| 47 |
return config
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
|
| 51 |
def main():
|
| 52 |
+
st.title('Open-O1')
|
| 53 |
+
st.write('Welcome to Open-O1!')
|
| 54 |
+
|
| 55 |
|
| 56 |
config = InputConfig.load(env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH)
|
| 57 |
config = config_sidebar(config=config)
|
| 58 |
llm = load_llm(config)
|
| 59 |
|
|
|
|
| 60 |
|
| 61 |
current_tab='o1_tab'
|
|
|
|
| 62 |
big_message_attr_name = f"{current_tab}_big_messages"
|
| 63 |
|
| 64 |
|
| 65 |
clear_chat_bt = st.sidebar.button('Clear Chat')
|
| 66 |
if clear_chat_bt:
|
| 67 |
+
delattr(st.session_state, big_message_attr_name)
|
| 68 |
|
| 69 |
|
|
|
|
| 70 |
big_message_attr = set_and_get_state_attr(big_message_attr_name, default_value=[])
|
| 71 |
|
| 72 |
# this prints the older messages
|
|
|
|
| 80 |
if message.role == 'user':
|
| 81 |
st.markdown(message.content)
|
| 82 |
else:
|
| 83 |
+
print_thought(message.content.to_thought_steps_display(), is_final=True)
|
| 84 |
|
| 85 |
|
| 86 |
|
|
|
|
| 105 |
if message["role"] == "user":
|
| 106 |
message["content"] = f"{message['content']}, json format"
|
| 107 |
|
| 108 |
+
start_time = time.time()
|
| 109 |
|
| 110 |
+
with st.status("Thinking...", expanded=True) as status:
|
| 111 |
+
|
| 112 |
+
for step in generate_answer(
|
| 113 |
+
messages=messages,
|
| 114 |
+
max_steps=config.max_steps,
|
| 115 |
+
stream=False,
|
| 116 |
+
max_tokens=config.max_tokens,
|
| 117 |
+
temperature=config.temperature,
|
| 118 |
+
sleeptime=config.sleeptime,
|
| 119 |
+
timeout=config.timeout,
|
| 120 |
+
llm=llm,
|
| 121 |
+
force_max_steps=config.force_max_steps,
|
| 122 |
+
response_format={ "type": "json_object" }
|
| 123 |
+
|
| 124 |
+
):
|
| 125 |
+
|
| 126 |
+
thoughts.append(step)
|
| 127 |
+
|
| 128 |
+
st.write(step.to_thought_steps_display().md())
|
| 129 |
+
|
| 130 |
+
status.update(label=step.step_title, state="running", expanded=False)
|
| 131 |
+
|
| 132 |
+
status.update(
|
| 133 |
+
label=f"Thought for {time.time()-start_time:.2f} seconds", state="complete", expanded=False
|
| 134 |
+
)
|
| 135 |
|
| 136 |
last_step = thoughts.pop()
|
| 137 |
+
print_thought(last_step.to_thought_steps_display(), is_final=True)
|
| 138 |
+
|
| 139 |
+
big_message_attr.append(BigMessage(
|
| 140 |
role="assistant",
|
| 141 |
content=last_step,
|
| 142 |
thoughts=thoughts
|
| 143 |
))
|
|
|
|
| 144 |
|
| 145 |
|
| 146 |
|
|
|
|
| 154 |
if is_final:
|
| 155 |
st.markdown(thought.md())
|
| 156 |
else:
|
| 157 |
+
# st.markdown(f'\n```json\n{thought.model_dump_json()}\n```\n', unsafe_allow_html=True)
|
| 158 |
+
with st.expander(f'{thought.step_title}'):
|
| 159 |
+
st.markdown(thought.md())
|
| 160 |
|
| 161 |
|
| 162 |
|
app/app_config.py
CHANGED
|
@@ -24,6 +24,7 @@ class InputConfig:
|
|
| 24 |
temperature: float = 0.2
|
| 25 |
timeout: float = 30.0
|
| 26 |
sleeptime: float = 0.0
|
|
|
|
| 27 |
|
| 28 |
@classmethod
|
| 29 |
def load(cls, env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH):
|
|
@@ -46,7 +47,8 @@ class InputConfig:
|
|
| 46 |
max_steps=config_dict.get('max_steps', cls.max_steps),
|
| 47 |
temperature=config_dict.get('temperature', cls.temperature),
|
| 48 |
timeout=config_dict.get('timeout', cls.timeout),
|
| 49 |
-
sleeptime=config_dict.get('sleeptime', cls.sleeptime)
|
|
|
|
| 50 |
)
|
| 51 |
|
| 52 |
def save(self, env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH):
|
|
@@ -74,7 +76,8 @@ class InputConfig:
|
|
| 74 |
'max_steps': self.max_steps,
|
| 75 |
'temperature': self.temperature,
|
| 76 |
'timeout': self.timeout,
|
| 77 |
-
'sleeptime': self.sleeptime
|
|
|
|
| 78 |
}
|
| 79 |
with open(config_file, 'w') as f:
|
| 80 |
json.dump(config_dict, f, indent=4)
|
|
|
|
| 24 |
temperature: float = 0.2
|
| 25 |
timeout: float = 30.0
|
| 26 |
sleeptime: float = 0.0
|
| 27 |
+
force_max_steps: bool = True
|
| 28 |
|
| 29 |
@classmethod
|
| 30 |
def load(cls, env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH):
|
|
|
|
| 47 |
max_steps=config_dict.get('max_steps', cls.max_steps),
|
| 48 |
temperature=config_dict.get('temperature', cls.temperature),
|
| 49 |
timeout=config_dict.get('timeout', cls.timeout),
|
| 50 |
+
sleeptime=config_dict.get('sleeptime', cls.sleeptime),
|
| 51 |
+
force_max_steps=config_dict.get('force_max_steps', cls.force_max_steps)
|
| 52 |
)
|
| 53 |
|
| 54 |
def save(self, env_file=ENV_FILE_PATH, config_file=CONFIG_FILE_PATH):
|
|
|
|
| 76 |
'max_steps': self.max_steps,
|
| 77 |
'temperature': self.temperature,
|
| 78 |
'timeout': self.timeout,
|
| 79 |
+
'sleeptime': self.sleeptime,
|
| 80 |
+
'force_max_steps': self.force_max_steps
|
| 81 |
}
|
| 82 |
with open(config_file, 'w') as f:
|
| 83 |
json.dump(config_dict, f, indent=4)
|
app/utils.py
CHANGED
|
@@ -11,10 +11,16 @@ import os
|
|
| 11 |
import time
|
| 12 |
from core.utils import parse_with_fallback
|
| 13 |
from termcolor import colored
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
|
| 17 |
-
|
|
|
|
| 18 |
thoughts = []
|
| 19 |
|
| 20 |
for i in range(max_steps):
|
|
@@ -24,13 +30,16 @@ def generate_answer(messages: list[dict], max_steps: int = 20, llm: BaseLLM = No
|
|
| 24 |
|
| 25 |
print(colored(f"{i+1} - {response}", 'yellow'))
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
|
| 33 |
-
time.sleep(sleeptime)
|
| 34 |
|
| 35 |
# Get the final answer after all thoughts are processed
|
| 36 |
messages += [{"role": "user", "content": FINAL_ANSWER_PROMPT}]
|
|
@@ -66,3 +75,22 @@ def dict_to_markdown(d:dict) -> str:
|
|
| 66 |
md += f"{value}\n"
|
| 67 |
return md
|
| 68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
import time
|
| 12 |
from core.utils import parse_with_fallback
|
| 13 |
from termcolor import colored
|
| 14 |
+
from app.app_config import InputConfig
|
| 15 |
+
from core.llms.litellm_llm import LLM
|
| 16 |
+
from core.llms.utils import user_message_with_images
|
| 17 |
+
from PIL import Image
|
| 18 |
+
from streamlit.runtime.uploaded_file_manager import UploadedFile
|
| 19 |
|
| 20 |
|
| 21 |
|
| 22 |
+
|
| 23 |
+
def generate_answer(messages: list[dict], max_steps: int = 20, llm: BaseLLM = None, sleeptime: float = 0.0, force_max_steps: bool = False, **kwargs):
|
| 24 |
thoughts = []
|
| 25 |
|
| 26 |
for i in range(max_steps):
|
|
|
|
| 30 |
|
| 31 |
print(colored(f"{i+1} - {response}", 'yellow'))
|
| 32 |
|
| 33 |
+
thoughts.append(thought)
|
| 34 |
+
messages.append({"role": "assistant", "content": thought.model_dump_json()})
|
| 35 |
+
yield thought
|
| 36 |
+
|
| 37 |
+
if thought.is_final_answer and not thought.next_step and not force_max_steps:
|
| 38 |
+
break
|
| 39 |
+
|
| 40 |
+
messages.append({"role": "user", "content": REVIEW_PROMPT})
|
| 41 |
|
| 42 |
+
time.sleep(sleeptime)
|
|
|
|
| 43 |
|
| 44 |
# Get the final answer after all thoughts are processed
|
| 45 |
messages += [{"role": "user", "content": FINAL_ANSWER_PROMPT}]
|
|
|
|
| 75 |
md += f"{value}\n"
|
| 76 |
return md
|
| 77 |
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def load_llm(config:InputConfig, tools=None) -> BaseLLM:
|
| 82 |
+
return LLM(api_key=config.model_api_key, model=config.model_name, tools=tools)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def image_buffer_to_pillow_image(image_buffer:UploadedFile) -> Image.Image:
|
| 86 |
+
return Image.open(image_buffer)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def process_user_input(user_input:str, image:Image.Image=None)->dict:
|
| 90 |
+
if image:
|
| 91 |
+
message = [user_message_with_images(user_msg_str=user_input, images=[image])]
|
| 92 |
+
else:
|
| 93 |
+
message = [{"role": "user", "content": user_input}]
|
| 94 |
+
return message
|
| 95 |
+
|
| 96 |
+
|
core/types.py
CHANGED
|
@@ -30,15 +30,15 @@ class ThoughtStepsDisplay(BaseModel):
|
|
| 30 |
|
| 31 |
|
| 32 |
def md(self):
|
| 33 |
-
return
|
| 34 |
-
{self.step_title}
|
| 35 |
-
|
| 36 |
{self.thought}
|
| 37 |
-
|
| 38 |
{self.answer}
|
| 39 |
-
|
| 40 |
{self.critic}
|
| 41 |
-
'''
|
| 42 |
|
| 43 |
|
| 44 |
|
|
@@ -56,3 +56,21 @@ class BigMessage(BaseModel):
|
|
| 56 |
class Message(BaseModel):
|
| 57 |
role:str
|
| 58 |
content:str
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
|
| 32 |
def md(self):
|
| 33 |
+
return f'''
|
| 34 |
+
#### {self.step_title}
|
| 35 |
+
|
| 36 |
{self.thought}
|
| 37 |
+
|
| 38 |
{self.answer}
|
| 39 |
+
|
| 40 |
{self.critic}
|
| 41 |
+
'''
|
| 42 |
|
| 43 |
|
| 44 |
|
|
|
|
| 56 |
class Message(BaseModel):
|
| 57 |
role:str
|
| 58 |
content:str
|
| 59 |
+
|
| 60 |
+
class InputConfig(BaseModel):
|
| 61 |
+
prompt: str = Field(..., description="prompt to use")
|
| 62 |
+
model: str = Field(..., description="model to use")
|
| 63 |
+
max_tokens: int = Field(..., description="max tokens to use")
|
| 64 |
+
temperature: float = Field(..., description="temperature to use")
|
| 65 |
+
top_p: float = Field(..., description="top p to use")
|
| 66 |
+
n: int = Field(..., description="number of responses to generate")
|
| 67 |
+
stream: bool = Field(..., description="whether to stream the response")
|
| 68 |
+
stop: list[str] | None = Field(..., description="stop sequences")
|
| 69 |
+
force_max_steps: bool = Field(False, description="force max steps")
|
| 70 |
+
|
| 71 |
+
def to_dict(self):
|
| 72 |
+
return self.model_dump()
|
| 73 |
+
|
| 74 |
+
def to_json(self):
|
| 75 |
+
return json.dumps(self.to_dict())
|
| 76 |
+
|
input_config.json
CHANGED
|
@@ -3,5 +3,6 @@
|
|
| 3 |
"max_steps": 3,
|
| 4 |
"temperature": 0.2,
|
| 5 |
"timeout": 30.0,
|
| 6 |
-
"sleeptime": 2.0
|
|
|
|
| 7 |
}
|
|
|
|
| 3 |
"max_steps": 3,
|
| 4 |
"temperature": 0.2,
|
| 5 |
"timeout": 30.0,
|
| 6 |
+
"sleeptime": 2.0,
|
| 7 |
+
"force_max_steps": true
|
| 8 |
}
|