huangzhii
commited on
Commit
·
36c43cc
1
Parent(s):
4c6af64
minor bugs fixed
Browse files- examples/code_editor_scripts.py +18 -21
examples/code_editor_scripts.py
CHANGED
|
@@ -19,19 +19,28 @@ class CodeEditor:
|
|
| 19 |
|
| 20 |
|
| 21 |
def load_layout(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
col1, col2 = st.columns([1, 1])
|
| 23 |
with col1:
|
| 24 |
-
|
| 25 |
with col2:
|
| 26 |
-
|
| 27 |
-
|
| 28 |
|
| 29 |
-
|
|
|
|
| 30 |
st.session_state.code_content = self.data["default_initial_solution"]
|
| 31 |
|
| 32 |
def update_code_content(value):
|
| 33 |
st.session_state.code_content = value
|
| 34 |
-
|
| 35 |
|
| 36 |
col1, col2 = st.columns(2)
|
| 37 |
with col1:
|
|
@@ -54,18 +63,6 @@ class CodeEditor:
|
|
| 54 |
)
|
| 55 |
|
| 56 |
|
| 57 |
-
# format_string = f"{instruction}\nProblem: {problem}\nCurrent Code: {st.session_state.code_content}"
|
| 58 |
-
# mui.Typography(format_string)
|
| 59 |
-
|
| 60 |
-
# mui.Typography("Final Snippet vs. Current Solution:", sx={"fontSize": "20px", "fontWeight": "bold"})
|
| 61 |
-
# editor.MonacoDiff(
|
| 62 |
-
# original=self.data["default_target_solution"],
|
| 63 |
-
# modified=st.session_state.code_content,
|
| 64 |
-
# height=300,
|
| 65 |
-
# )
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
def _run(self):
|
| 70 |
# Code is the variable of interest we want to optimize -- so requires_grad=True
|
| 71 |
solution = st.session_state.code_content
|
|
@@ -74,7 +71,7 @@ class CodeEditor:
|
|
| 74 |
role_description="code instance to optimize")
|
| 75 |
|
| 76 |
# We are not interested in optimizing the problem -- so requires_grad=False
|
| 77 |
-
problem = tg.Variable(
|
| 78 |
requires_grad=False,
|
| 79 |
role_description="the coding problem")
|
| 80 |
|
|
@@ -82,13 +79,13 @@ class CodeEditor:
|
|
| 82 |
optimizer = tg.TGD(parameters=[code])
|
| 83 |
|
| 84 |
|
| 85 |
-
instruction =
|
| 86 |
llm_engine = self.llm_engine
|
| 87 |
-
loss_system_prompt =
|
| 88 |
loss_system_prompt = tg.Variable(loss_system_prompt, requires_grad=False, role_description="system prompt to the loss function")
|
| 89 |
|
| 90 |
format_string = "{instruction}\nProblem: {{problem}}\nCurrent Code: {{code}}"
|
| 91 |
-
format_string = format_string.format(instruction=
|
| 92 |
|
| 93 |
fields = {"problem": None, "code": None}
|
| 94 |
formatted_llm_call = tg.autograd.FormattedLLMCall(engine=self.llm_engine,
|
|
|
|
| 19 |
|
| 20 |
|
| 21 |
def load_layout(self):
|
| 22 |
+
# Initialize session state for problem description and other fields if not already set
|
| 23 |
+
if 'problem' not in st.session_state:
|
| 24 |
+
st.session_state.problem = self.data["default_problem_description"]
|
| 25 |
+
if 'loss_system_prompt' not in st.session_state:
|
| 26 |
+
st.session_state.loss_system_prompt = self.data["default_loss_system_prompt"]
|
| 27 |
+
if 'instruction' not in st.session_state:
|
| 28 |
+
st.session_state.instruction = self.data["instruction"]
|
| 29 |
+
|
| 30 |
col1, col2 = st.columns([1, 1])
|
| 31 |
with col1:
|
| 32 |
+
st.session_state.problem = st.text_area("Problem description:", st.session_state.problem, height=300)
|
| 33 |
with col2:
|
| 34 |
+
st.session_state.loss_system_prompt = st.text_area("Loss system prompt:", st.session_state.loss_system_prompt, height=150)
|
| 35 |
+
st.session_state.instruction = st.text_area("Instruction for formatted LLM call:", st.session_state.instruction, height=100)
|
| 36 |
|
| 37 |
+
# Assume the code content also needs to be persistent
|
| 38 |
+
if 'code_content' not in st.session_state:
|
| 39 |
st.session_state.code_content = self.data["default_initial_solution"]
|
| 40 |
|
| 41 |
def update_code_content(value):
|
| 42 |
st.session_state.code_content = value
|
| 43 |
+
print(f"Code updated: {st.session_state.code_content}")
|
| 44 |
|
| 45 |
col1, col2 = st.columns(2)
|
| 46 |
with col1:
|
|
|
|
| 63 |
)
|
| 64 |
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
def _run(self):
|
| 67 |
# Code is the variable of interest we want to optimize -- so requires_grad=True
|
| 68 |
solution = st.session_state.code_content
|
|
|
|
| 71 |
role_description="code instance to optimize")
|
| 72 |
|
| 73 |
# We are not interested in optimizing the problem -- so requires_grad=False
|
| 74 |
+
problem = tg.Variable(st.session_state.problem,
|
| 75 |
requires_grad=False,
|
| 76 |
role_description="the coding problem")
|
| 77 |
|
|
|
|
| 79 |
optimizer = tg.TGD(parameters=[code])
|
| 80 |
|
| 81 |
|
| 82 |
+
instruction = st.session_state.instruction
|
| 83 |
llm_engine = self.llm_engine
|
| 84 |
+
loss_system_prompt = st.session_state.loss_system_prompt
|
| 85 |
loss_system_prompt = tg.Variable(loss_system_prompt, requires_grad=False, role_description="system prompt to the loss function")
|
| 86 |
|
| 87 |
format_string = "{instruction}\nProblem: {{problem}}\nCurrent Code: {{code}}"
|
| 88 |
+
format_string = format_string.format(instruction=st.session_state.instruction)
|
| 89 |
|
| 90 |
fields = {"problem": None, "code": None}
|
| 91 |
formatted_llm_call = tg.autograd.FormattedLLMCall(engine=self.llm_engine,
|