Spaces:
Sleeping
Sleeping
| import marimo | |
| __generated_with = "0.9.14" | |
| app = marimo.App(width="medium") | |
| def __(mo): | |
| mo.md(r"""# Chat is an abstraction""") | |
| return | |
| def __( | |
| add_generation_prompt, | |
| mo, | |
| model_id, | |
| multiselect, | |
| output2, | |
| system_message, | |
| user_message, | |
| ): | |
| mo.hstack([mo.vstack([model_id, system_message, user_message, add_generation_prompt, multiselect]), mo.vstack([mo.md("**What the LLM sees:**"), output2], align="start", justify="start")], gap=2, widths=[1,2]) | |
| return | |
| def __(mo): | |
| add_generation_prompt = mo.ui.checkbox(label="Add generation prompt:") | |
| return (add_generation_prompt,) | |
| def __(mo): | |
| model_id = mo.ui.text_area("Qwen/Qwen2.5-0.5B-Instruct", label="Model", rows=1) | |
| return (model_id,) | |
| def __(model_id): | |
| from transformers import AutoTokenizer | |
| tokenizer = AutoTokenizer.from_pretrained(model_id.value) | |
| return AutoTokenizer, tokenizer | |
| def __(): | |
| import marimo as mo | |
| system_message = mo.ui.text_area("You are a helpful assistant.", rows=1, debounce=False, label="System message:") | |
| user_message = mo.ui.text_area("", rows=2, debounce=False, label="User message:") | |
| return mo, system_message, user_message | |
| def __(system_message, user_message): | |
| messages = [] | |
| if system_message.value != "": | |
| messages.append({"role": "system", "content": system_message.value}) | |
| if user_message.value != "": | |
| messages.append({"role": "user", "content": user_message.value}) | |
| return (messages,) | |
| def __(add_generation_prompt, messages, mo, tokenizer, tools): | |
| if messages != []: | |
| output = mo.md(repr(tokenizer.apply_chat_template(messages, | |
| add_generation_prompt=add_generation_prompt.value, | |
| tools=tools, | |
| tokenize=False))) | |
| else: | |
| output = "" | |
| return (output,) | |
| def __(add_generation_prompt, messages, mo, tokenizer, tools): | |
| if messages != []: | |
| if tools != []: | |
| output2 = mo.md(tokenizer.apply_chat_template(messages, | |
| add_generation_prompt= add_generation_prompt.value, | |
| tools=tools, | |
| tokenize=False).replace("\n","\n\n").replace("#", "\#")) | |
| else: | |
| output2 = mo.md(tokenizer.apply_chat_template(messages, | |
| add_generation_prompt= add_generation_prompt.value, | |
| tokenize=False).replace("\n","\n\n").replace("#", "\#")) | |
| else: | |
| output2 = "" | |
| return (output2,) | |
| def __(): | |
| import datetime | |
| def current_time(): | |
| """Get the current local time as a string.""" | |
| return str(datetime.now()) | |
| return current_time, datetime | |
| def __(current_time, multiply, multiselect): | |
| tools = [] | |
| if "current_time" in multiselect.value: | |
| tools.append(current_time) | |
| if "multiply" in multiselect.value: | |
| tools.append(multiply) | |
| return (tools,) | |
| def __(mo): | |
| multiselect = mo.ui.multiselect( | |
| options=["current_time", "multiply"], label="Provide some tools:" | |
| ) | |
| return (multiselect,) | |
| def __(): | |
| def multiply(a: float, b: float): | |
| """ | |
| A function that multiplies two numbers | |
| Args: | |
| a: The first number to multiply | |
| b: The second number to multiply | |
| """ | |
| return a * b | |
| return (multiply,) | |
| def __(): | |
| # tokenizer.chat_template | |
| return | |