Spaces:
Build error
Build error
| import json | |
| import os | |
| import shutil | |
| import gradio as gr | |
| from huggingface_hub import Repository | |
| from text_generation import Client | |
| from share_btn import community_icon_html, loading_icon_html, share_js, share_btn_css | |
| HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
| API_URL = os.environ.get("API_URL") | |
| theme = gr.themes.Monochrome( | |
| primary_hue="indigo", | |
| secondary_hue="blue", | |
| neutral_hue="slate", | |
| radius_size=gr.themes.sizes.radius_sm, | |
| font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"], | |
| ) | |
| client = Client( | |
| API_URL, | |
| #headers={"Authorization": f"Bearer {HF_TOKEN}"}, | |
| ) | |
| def generate(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, do_save=True): | |
| temperature = float(temperature) | |
| if temperature < 1e-2: | |
| temperature = 1e-2 | |
| top_p = float(top_p) | |
| generate_kwargs = dict( | |
| temperature=temperature, | |
| max_new_tokens=max_new_tokens, | |
| top_p=top_p, | |
| repetition_penalty=repetition_penalty, | |
| do_sample=True, | |
| truncate=999, | |
| seed=42, | |
| stop_sequences=["</s>"], | |
| ) | |
| stream = client.generate_stream( | |
| prompt, | |
| **generate_kwargs, | |
| ) | |
| output = "" | |
| for response in stream: | |
| output += response.token.text | |
| yield output | |
| return output | |
| examples = [ | |
| "def hello_world():", | |
| ] | |
| def process_example(args): | |
| for x in generate(args): | |
| pass | |
| return x | |
| css = ".generating {visibility: hidden}" + share_btn_css | |
| with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo: | |
| with gr.Column(): | |
| gr.Markdown( | |
| """ # BigCode - Playground | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| instruction = gr.Textbox(placeholder="Enter your prompt here", label="Prompt", elem_id="q-input") | |
| with gr.Box(): | |
| gr.Markdown("**Response**") | |
| output = gr.Markdown(elem_id="q-output") | |
| submit = gr.Button("Generate", variant="primary") | |
| gr.Examples( | |
| examples=examples, | |
| inputs=[instruction], | |
| cache_examples=False, | |
| fn=process_example, | |
| outputs=[output], | |
| ) | |
| with gr.Column(scale=1): | |
| temperature = gr.Slider( | |
| label="Temperature", | |
| value=0.9, | |
| minimum=0.0, | |
| maximum=2.0, | |
| step=0.1, | |
| interactive=True, | |
| info="Higher values produce more diverse outputs", | |
| ) | |
| max_new_tokens = gr.Slider( | |
| label="Max new tokens", | |
| value=256, | |
| minimum=0, | |
| maximum=512, | |
| step=4, | |
| interactive=True, | |
| info="The maximum numbers of new tokens", | |
| ) | |
| top_p = gr.Slider( | |
| label="Top-p (nucleus sampling)", | |
| value=0.90, | |
| minimum=0.0, | |
| maximum=1, | |
| step=0.05, | |
| interactive=True, | |
| info="Higher values sample more low-probability tokens", | |
| ) | |
| repetition_penalty = gr.Slider( | |
| label="Repetition penalty", | |
| value=1.2, | |
| minimum=1.0, | |
| maximum=2.0, | |
| step=0.05, | |
| interactive=True, | |
| info="Penalize repeated tokens", | |
| ) | |
| submit.click(generate, inputs=[instruction, temperature, max_new_tokens, top_p, repetition_penalty, do_save], outputs=[output]) | |
| instruction.submit(generate, inputs=[instruction, temperature, max_new_tokens, top_p, repetition_penalty], outputs=[output]) | |
| demo.queue(concurrency_count=16).launch(debug=True) |