Spestly commited on
Commit
34d0739
·
verified ·
1 Parent(s): f074928

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -90
app.py CHANGED
@@ -1,139 +1,110 @@
1
  import streamlit as st
2
  import cohere
 
3
 
4
- st.set_page_config(page_title="Cohere Chat Interface", layout="wide")
5
 
6
- MODEL_PFPS = {
7
- "command-a-03-2025": "/media/pfp/cohere-pfp.png",
8
- "command-r7b-12-2024": "/media/pfp/cohere-pfp.png",
9
- "command-r-plus-04-2024": "/media/pfp/cohere-pfp.png",
10
- "command-r-plus": "/media/pfp/cohere-pfp.png",
11
- "command-r-08-2024": "/media/pfp/cohere-pfp.png",
12
- "command-r-03-2024": "/media/pfp/cohere-pfp.png",
13
- "command-r": "/media/pfp/cohere-pfp.png",
14
- "command": "/media/pfp/cohere-pfp.png",
15
- "command-nightly": "/media/pfp/cohere-pfp.png",
16
- "command-light": "/media/pfp/cohere-pfp.png",
17
- "command-light-nightly": "/media/pfp/cohere-pfp.png"
18
- }
19
 
20
- USER_PFP = "https://example.com/user-default.png"
 
 
21
 
22
- MODEL_INFO = {
23
  "command-a-03-2025": {
24
- "description": "Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases. Command A has a context length of 256K, only requires two GPUs to run, and has 150% higher throughput compared to Command R+ 08-2024.",
25
- "context_window": "256K tokens",
26
- "output_tokens": "8K tokens"
27
  },
28
  "command-r7b-12-2024": {
29
- "description": "command-r7b-12-2024 is a small, fast update delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning and multiple steps.",
30
- "context_window": "128K tokens",
31
- "output_tokens": "4K tokens"
32
  },
33
  "command-r-plus-04-2024": {
34
- "description": "Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It is best suited for complex RAG workflows and multi-step tool use.",
35
- "context_window": "128K tokens",
36
- "output_tokens": "4K tokens"
37
  },
38
  "command-r-plus": {
39
- "description": "command-r-plus is an alias for command-r-plus-04-2024, so if you use command-r-plus in the API, that's the model you're pointing to.",
40
- "context_window": "128K tokens",
41
- "output_tokens": "4K tokens"
42
  },
43
  "command-r-08-2024": {
44
- "description": "command-r-08-2024 is an update of the Command R model, delivered in August 2024.",
45
- "context_window": "128K tokens",
46
- "output_tokens": "4K tokens"
47
  },
48
  "command-r-03-2024": {
49
- "description": "Command R is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models. It can be used for complex workflows like code generation, retrieval augmented generation (RAG), tool use, and agents.",
50
- "context_window": "128K tokens",
51
- "output_tokens": "4K tokens"
52
  },
53
  "command-r": {
54
- "description": "command-r is an alias for command-r-03-2024, so if you use command-r in the API, that's the model you're pointing to.",
55
- "context_window": "128K tokens",
56
- "output_tokens": "4K tokens"
57
  },
58
  "command": {
59
- "description": "An instruction-following conversational model that performs language tasks with high quality, more reliably and with a longer context than our base generative models.",
60
- "context_window": "4K tokens",
61
- "output_tokens": "4K tokens"
62
  },
63
  "command-nightly": {
64
- "description": "Nightly version of command - experimental and unstable. Not recommended for production use.",
65
- "context_window": "128K tokens",
66
- "output_tokens": "4K tokens"
67
  },
68
  "command-light": {
69
- "description": "Smaller, faster version of command with similar capabilities.",
70
- "context_window": "4K tokens",
71
- "output_tokens": "4K tokens"
72
  },
73
  "command-light-nightly": {
74
- "description": "Nightly version of command-light - experimental and unstable. Not for production use.",
75
- "context_window": "128K tokens",
76
- "output_tokens": "4K tokens"
77
  }
78
  }
79
 
80
  with st.sidebar:
81
- st.title("Configuration")
82
  api_key = st.text_input("Cohere API Key", type="password")
83
-
84
- selected_model = st.selectbox(
85
- "Select Model",
86
- options=list(MODEL_INFO.keys()),
87
- format_func=lambda x: x.upper()
88
- )
89
-
90
  st.divider()
91
- st.subheader("Model Details")
92
- st.image(MODEL_PFPS[selected_model], width=80)
93
- st.markdown(f"**{selected_model}**")
94
- st.markdown(MODEL_INFO[selected_model]["description"])
95
- st.markdown(f"**Context Window:** {MODEL_INFO[selected_model]['context_window']}")
96
- st.markdown(f"**Max Output:** {MODEL_INFO[selected_model]['output_tokens']}")
97
 
98
- st.title(f"Chat with {selected_model.upper()}")
99
- st.image(MODEL_PFPS[selected_model], width=50)
100
 
101
  if "messages" not in st.session_state:
102
  st.session_state.messages = []
103
 
104
- for message in st.session_state.messages:
105
- avatar = USER_PFP if message["role"] == "user" else MODEL_PFPS[selected_model]
106
- with st.chat_message(message["role"], avatar=avatar):
107
- st.markdown(message["content"])
108
 
109
- if prompt := st.chat_input("Type your message..."):
110
  if not api_key:
111
- st.error("API key required - enter in sidebar")
112
  st.stop()
113
 
114
  st.session_state.messages.append({"role": "user", "content": prompt})
115
  with st.chat_message("user", avatar=USER_PFP):
116
  st.markdown(prompt)
117
-
118
  try:
119
  co = cohere.ClientV2(api_key)
120
-
121
- with st.chat_message("assistant", avatar=MODEL_PFPS[selected_model]):
122
- response = co.chat(
123
- model=selected_model,
124
- messages=st.session_state.messages
125
- )
126
-
127
- if hasattr(response, 'text'):
128
- full_response = response.text
129
- else:
130
- full_response = "Error: Unexpected API response format"
131
-
132
- st.markdown(full_response)
133
-
134
- st.session_state.messages.append({"role": "assistant", "content": full_response})
135
 
136
- except cohere.CohereError as e:
137
- st.error(f"Cohere API Error: {str(e)}")
138
  except Exception as e:
139
- st.error(f"General Error: {str(e)}")
 
1
  import streamlit as st
2
  import cohere
3
+ import os
4
 
5
+ st.set_page_config(page_title="Cohere Chat", layout="wide")
6
 
7
+ AI_PFP = "media/pfps/cohere-pfp.png"
8
+ USER_PFP = "media/pfps/user-pfp.jpg"
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ if not os.path.exists(AI_PFP) or not os.path.exists(USER_PFP):
11
+ st.error("Missing profile pictures in media/pfps directory")
12
+ st.stop()
13
 
14
+ model_info = {
15
  "command-a-03-2025": {
16
+ "description": "Command A is our most performant model to date, excelling at tool use, agents, retrieval augmented generation (RAG), and multilingual use cases.",
17
+ "context": "256K",
18
+ "output": "8K"
19
  },
20
  "command-r7b-12-2024": {
21
+ "description": "Small, fast update excelling at RAG, tool use, and complex reasoning tasks.",
22
+ "context": "128K",
23
+ "output": "4K"
24
  },
25
  "command-r-plus-04-2024": {
26
+ "description": "Instruction-following model for complex RAG workflows and multi-step tool use.",
27
+ "context": "128K",
28
+ "output": "4K"
29
  },
30
  "command-r-plus": {
31
+ "description": "Alias for command-r-plus-04-2024.",
32
+ "context": "128K",
33
+ "output": "4K"
34
  },
35
  "command-r-08-2024": {
36
+ "description": "Updated Command R model from August 2024.",
37
+ "context": "128K",
38
+ "output": "4K"
39
  },
40
  "command-r-03-2024": {
41
+ "description": "Instruction-following model for code generation, RAG, and agents.",
42
+ "context": "128K",
43
+ "output": "4K"
44
  },
45
  "command-r": {
46
+ "description": "Alias for command-r-03-2024.",
47
+ "context": "128K",
48
+ "output": "4K"
49
  },
50
  "command": {
51
+ "description": "Conversational model with long context capabilities.",
52
+ "context": "4K",
53
+ "output": "4K"
54
  },
55
  "command-nightly": {
56
+ "description": "Experimental nightly build (not for production).",
57
+ "context": "128K",
58
+ "output": "4K"
59
  },
60
  "command-light": {
61
+ "description": "Faster lightweight version of command.",
62
+ "context": "4K",
63
+ "output": "4K"
64
  },
65
  "command-light-nightly": {
66
+ "description": "Experimental nightly build of command-light.",
67
+ "context": "128K",
68
+ "output": "4K"
69
  }
70
  }
71
 
72
  with st.sidebar:
73
+ st.title("Settings")
74
  api_key = st.text_input("Cohere API Key", type="password")
75
+ selected_model = st.selectbox("Model", options=list(model_info.keys()))
 
 
 
 
 
 
76
  st.divider()
77
+ st.image(AI_PFP, width=60)
78
+ st.subheader(selected_model)
79
+ st.markdown(model_info[selected_model]["description"])
80
+ st.caption(f"Context: {model_info[selected_model]['context']}")
81
+ st.caption(f"Output: {model_info[selected_model]['output']}")
 
82
 
83
+ st.title(f"Chat - {selected_model}")
 
84
 
85
  if "messages" not in st.session_state:
86
  st.session_state.messages = []
87
 
88
+ for msg in st.session_state.messages:
89
+ with st.chat_message(msg["role"], avatar=USER_PFP if msg["role"] == "user" else AI_PFP):
90
+ st.markdown(msg["content"])
 
91
 
92
+ if prompt := st.chat_input("Message..."):
93
  if not api_key:
94
+ st.error("API key required")
95
  st.stop()
96
 
97
  st.session_state.messages.append({"role": "user", "content": prompt})
98
  with st.chat_message("user", avatar=USER_PFP):
99
  st.markdown(prompt)
100
+
101
  try:
102
  co = cohere.ClientV2(api_key)
103
+ with st.chat_message("assistant", avatar=AI_PFP):
104
+ response = co.chat(model=selected_model, messages=st.session_state.messages)
105
+ reply = response.text if hasattr(response, 'text') else "Error processing response"
106
+ st.markdown(reply)
107
+ st.session_state.messages.append({"role": "assistant", "content": reply})
 
 
 
 
 
 
 
 
 
 
108
 
 
 
109
  except Exception as e:
110
+ st.error(f"Error: {str(e)}")