Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
| 1 |
-
# app.py (FINAL CORRECTED VERSION)
|
| 2 |
-
|
| 3 |
from typing import Optional, Dict, List, Tuple
|
| 4 |
import gradio as gr
|
| 5 |
|
|
@@ -22,6 +20,8 @@ from deploy import send_to_sandbox, load_project_from_url
|
|
| 22 |
History = List[Tuple[str, str]]
|
| 23 |
|
| 24 |
# Core generation function
|
|
|
|
|
|
|
| 25 |
def generation_code(
|
| 26 |
query: Optional[str],
|
| 27 |
image: Optional[gr.Image],
|
|
@@ -33,7 +33,7 @@ def generation_code(
|
|
| 33 |
enable_search: bool,
|
| 34 |
language: str,
|
| 35 |
provider: str,
|
| 36 |
-
hf_token: str
|
| 37 |
) -> Tuple[str, History, str, List[Dict[str, str]]]:
|
| 38 |
# Initialize inputs
|
| 39 |
if query is None:
|
|
@@ -60,7 +60,7 @@ def generation_code(
|
|
| 60 |
final_query = enhance_query_with_search(query, enable_search)
|
| 61 |
messages.append({'role': 'user', 'content': final_query})
|
| 62 |
|
| 63 |
-
# Model inference
|
| 64 |
client = get_inference_client(_current_model_name, provider, user_token=hf_token)
|
| 65 |
resp = client.chat.completions.create(
|
| 66 |
model=_current_model_name,
|
|
@@ -97,7 +97,6 @@ with gr.Blocks(theme=gr.themes.Base(), title="AnyCoder - AI Code Generator") as
|
|
| 97 |
|
| 98 |
with gr.Sidebar():
|
| 99 |
gr.Markdown("## AnyCoder AI")
|
| 100 |
-
# ... (rest of sidebar UI is correct) ...
|
| 101 |
url_in = gr.Textbox(label="Load HF Space URL", placeholder="https://huggingface.co/spaces/user/project")
|
| 102 |
load_btn = gr.Button("Import Project")
|
| 103 |
load_status = gr.Markdown(visible=False)
|
|
@@ -119,6 +118,7 @@ with gr.Blocks(theme=gr.themes.Base(), title="AnyCoder - AI Code Generator") as
|
|
| 119 |
with gr.Tab("Preview"):
|
| 120 |
preview_out = gr.HTML(label="Live Preview")
|
| 121 |
with gr.Tab("History"):
|
|
|
|
| 122 |
chat_out = gr.Chatbot(label="History", type="messages")
|
| 123 |
|
| 124 |
# Events
|
|
@@ -138,11 +138,13 @@ with gr.Blocks(theme=gr.themes.Base(), title="AnyCoder - AI Code Generator") as
|
|
| 138 |
|
| 139 |
gen_btn.click(
|
| 140 |
fn=generation_code,
|
|
|
|
|
|
|
|
|
|
| 141 |
inputs=[
|
| 142 |
prompt_in, image_in, file_in, url_site,
|
| 143 |
setting_state, history_state, model_state,
|
| 144 |
-
search_chk, language_dd, gr.State('auto')
|
| 145 |
-
"hf_token" # ### FIX #1: This line fixes the "Expected 11 arguments, received 10" warning.
|
| 146 |
],
|
| 147 |
outputs=[code_out, history_state, preview_out, chat_out]
|
| 148 |
)
|
|
@@ -150,5 +152,5 @@ with gr.Blocks(theme=gr.themes.Base(), title="AnyCoder - AI Code Generator") as
|
|
| 150 |
clr_btn.click(fn=lambda: ([], [], "", []), outputs=[history_state, chat_out, preview_out, code_out])
|
| 151 |
|
| 152 |
if __name__ == '__main__':
|
| 153 |
-
#
|
| 154 |
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
| 1 |
from typing import Optional, Dict, List, Tuple
|
| 2 |
import gradio as gr
|
| 3 |
|
|
|
|
| 20 |
History = List[Tuple[str, str]]
|
| 21 |
|
| 22 |
# Core generation function
|
| 23 |
+
# This function signature correctly includes 'hf_token'. Gradio will automatically
|
| 24 |
+
# provide this value if the user is logged in.
|
| 25 |
def generation_code(
|
| 26 |
query: Optional[str],
|
| 27 |
image: Optional[gr.Image],
|
|
|
|
| 33 |
enable_search: bool,
|
| 34 |
language: str,
|
| 35 |
provider: str,
|
| 36 |
+
hf_token: str
|
| 37 |
) -> Tuple[str, History, str, List[Dict[str, str]]]:
|
| 38 |
# Initialize inputs
|
| 39 |
if query is None:
|
|
|
|
| 60 |
final_query = enhance_query_with_search(query, enable_search)
|
| 61 |
messages.append({'role': 'user', 'content': final_query})
|
| 62 |
|
| 63 |
+
# Model inference - using the provided user token for billing
|
| 64 |
client = get_inference_client(_current_model_name, provider, user_token=hf_token)
|
| 65 |
resp = client.chat.completions.create(
|
| 66 |
model=_current_model_name,
|
|
|
|
| 97 |
|
| 98 |
with gr.Sidebar():
|
| 99 |
gr.Markdown("## AnyCoder AI")
|
|
|
|
| 100 |
url_in = gr.Textbox(label="Load HF Space URL", placeholder="https://huggingface.co/spaces/user/project")
|
| 101 |
load_btn = gr.Button("Import Project")
|
| 102 |
load_status = gr.Markdown(visible=False)
|
|
|
|
| 118 |
with gr.Tab("Preview"):
|
| 119 |
preview_out = gr.HTML(label="Live Preview")
|
| 120 |
with gr.Tab("History"):
|
| 121 |
+
# FIX: The chatbot now correctly uses the modern 'messages' format.
|
| 122 |
chat_out = gr.Chatbot(label="History", type="messages")
|
| 123 |
|
| 124 |
# Events
|
|
|
|
| 138 |
|
| 139 |
gen_btn.click(
|
| 140 |
fn=generation_code,
|
| 141 |
+
# FIX: The 'inputs' list now only contains Gradio components.
|
| 142 |
+
# The 'hf_token' is passed automatically by Gradio because it's
|
| 143 |
+
# in the function's signature.
|
| 144 |
inputs=[
|
| 145 |
prompt_in, image_in, file_in, url_site,
|
| 146 |
setting_state, history_state, model_state,
|
| 147 |
+
search_chk, language_dd, gr.State('auto')
|
|
|
|
| 148 |
],
|
| 149 |
outputs=[code_out, history_state, preview_out, chat_out]
|
| 150 |
)
|
|
|
|
| 152 |
clr_btn.click(fn=lambda: ([], [], "", []), outputs=[history_state, chat_out, preview_out, code_out])
|
| 153 |
|
| 154 |
if __name__ == '__main__':
|
| 155 |
+
# FIX: The launch() method is now correct, without any extra arguments.
|
| 156 |
demo.queue().launch()
|