File size: 3,419 Bytes
2a71d85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
784dbce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from models import *
import gradio as gr

GPT_4 = "deepseek/deepseek-chat-v3-0324:free" #12
PHI_4 = "microsoft/phi-4" #2
GEMMA_3_27B = "google/gemma-3-27b-it:free" #4
QWEN_32B = "qwen/qwq-32b:free" #8
QWEN_25 = "qwen/qwen2.5-vl-72b-instruct:free" #10 #error
DEEPSEEK_R1 = "deepseek/deepseek-r1:free" #11
DEEPSEEK_R1_ZERO = "deepseek/deepseek-r1-zero:free" #13
META_LLAMA_MODEL = "meta-llama/Llama-3.3-70B-Instruct:free" #14
MISTRAL_SMALL_MODEL = "mistralai/mistral-small-3.1-24b-instruct:free" #15
MISTRAL_NEMO = "mistralai/mistral-nemo:free" #16

CONCISE_ENGLISH_PROMPT = "Answer in short and precise English sentences."

def get_model(title, dev, model, name, user_input, system_prompt):
    if user_input.lower() == "data":
        df = get_data()
        return df
    
    if user_input.lower() == "text":
        text = get_text()
        return text

    if name == "" or name is None:
        return "Enter Your Name !"
        
    if model is None or model == "":
        return "Select AI Model !"

    chain = ModelChain()
    prompt = system_prompt + " " + CONCISE_ENGLISH_PROMPT

    # Check the model and map to the correct model
    if "ChatGPT" == model: #1
        return chain.generate_response(GPT_4, name, user_input, prompt)
    elif "Phi-4" == model: #2
        return chain.generate_response(PHI_4, name, user_input, prompt)
    elif "Gemma-3" == model: #4
        return chain.generate_response(GEMMA_3_27B, name, user_input, prompt)
    elif "QwQ-32B" == model: #8
        return chain.generate_response(QWEN_32B, name, user_input, prompt)
    elif "Qwen2.5" == model: #10
        return chain.generate_response(QWEN_25, name, user_input, prompt)
    elif "DeepSeek-R1" == model: #11
        return chain.generate_response(DEEPSEEK_R1, name, user_input, prompt)
    elif "DeepSeek-R1-Zero" == model: #11
        return chain.generate_response(DEEPSEEK_R1, name, user_input, prompt)
    elif "Llama-3.3" == model: #14
        return chain.generate_response(META_LLAMA_MODEL, name, user_input, prompt)
    elif "Mistral-Small" == model: #15
        return chain.generate_response(MISTRAL_SMALL_MODEL, name, user_input, prompt)
    elif "Mistral-Nemo" == model: #16
        return chain.generate_response(MISTRAL_NEMO, name, user_input, prompt)
    else:
        return "Invalid Model Name : " + model

def main():
    view = gr.Interface(
        fn= get_model,
        inputs = [
            gr.Markdown("# Switch AI"),
            gr.Markdown("### by Kalash"),
            gr.Radio(
                [
                "ChatGPT", #1
                "Phi-4", #2
                "Gemma-3", #4
                "QwQ-32B", #9
                "Qwen2.5", #11
                "DeepSeek-R1", #12
                "DeepSeek-R1-Zero", #12
                "Llama-3.3", #15
                "Mistral-Small", #16
                "Mistral-Nemo", #17
                ],
            label = "Choose AI Model", value = "ChatGPT"),
            gr.Textbox(label = "Your Name", placeholder = "Enter Your Name"),
            gr.Textbox(label = "Your Query", placeholder = "Enter Your Question"),
            gr.Textbox(label = "System Prompt", placeholder = "Enter Custom System Propmt (Optional)"),
        ],
        outputs = [gr.Textbox(label ="AI Response", lines = 25)],
        flagging_mode = "never"
    ).launch(share=True)
    # ).launch(share=False, server_port=54321)

if __name__ == '__main__':
    main()