Spaces:
Running
Running
Update app's progress display
Browse files
app.py
CHANGED
|
@@ -156,8 +156,6 @@ def set_up_chat_ui():
|
|
| 156 |
placeholder=APP_TEXT['chat_placeholder'],
|
| 157 |
max_chars=GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH
|
| 158 |
):
|
| 159 |
-
|
| 160 |
-
progress_bar_pptx = st.progress(0, 'Preparing to run...')
|
| 161 |
if not text_helper.is_valid_prompt(prompt):
|
| 162 |
st.error(
|
| 163 |
'Not enough information provided!'
|
|
@@ -190,64 +188,59 @@ def set_up_chat_ui():
|
|
| 190 |
}
|
| 191 |
)
|
| 192 |
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
'
|
| 199 |
-
'
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
st.chat_message('ai').code(response, language='json')
|
| 215 |
-
|
| 216 |
-
history.add_user_message(prompt)
|
| 217 |
-
history.add_ai_message(response)
|
| 218 |
-
|
| 219 |
-
# if GlobalConfig.COUNT_TOKENS:
|
| 220 |
-
# tokenizer = _get_tokenizer()
|
| 221 |
-
# tokens_count_in = len(tokenizer.tokenize(formatted_template))
|
| 222 |
-
# tokens_count_out = len(tokenizer.tokenize(response))
|
| 223 |
-
# logger.debug(
|
| 224 |
-
# 'Tokens count:: input: %d, output: %d',
|
| 225 |
-
# tokens_count_in, tokens_count_out
|
| 226 |
-
# )
|
| 227 |
-
|
| 228 |
-
# _display_messages_history(view_messages)
|
| 229 |
-
|
| 230 |
-
# The content has been generated as JSON
|
| 231 |
-
# There maybe trailing ``` at the end of the response -- remove them
|
| 232 |
-
# To be careful: ``` may be part of the content as well when code is generated
|
| 233 |
-
progress_bar_pptx.progress(50, 'Analyzing response...')
|
| 234 |
-
response_cleaned = text_helper.get_clean_json(response)
|
| 235 |
-
|
| 236 |
-
logger.info(
|
| 237 |
-
'Cleaned JSON response:: original length: %d | cleaned length: %d',
|
| 238 |
-
len(response), len(response_cleaned)
|
| 239 |
-
)
|
| 240 |
-
logger.debug('Cleaned JSON: %s', response_cleaned)
|
| 241 |
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
generate_slide_deck(response_cleaned)
|
| 245 |
-
progress_bar_pptx.progress(100, text='Done!')
|
| 246 |
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
|
| 252 |
|
| 253 |
def generate_slide_deck(json_str: str):
|
|
|
|
| 156 |
placeholder=APP_TEXT['chat_placeholder'],
|
| 157 |
max_chars=GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH
|
| 158 |
):
|
|
|
|
|
|
|
| 159 |
if not text_helper.is_valid_prompt(prompt):
|
| 160 |
st.error(
|
| 161 |
'Not enough information provided!'
|
|
|
|
| 188 |
}
|
| 189 |
)
|
| 190 |
|
| 191 |
+
with st.status(
|
| 192 |
+
'Calling LLM...will retry if connection times out...',
|
| 193 |
+
expanded=False
|
| 194 |
+
) as status:
|
| 195 |
+
response: dict = llm_helper.hf_api_query({
|
| 196 |
+
'inputs': formatted_template,
|
| 197 |
+
'parameters': {
|
| 198 |
+
'temperature': GlobalConfig.LLM_MODEL_TEMPERATURE,
|
| 199 |
+
'min_length': GlobalConfig.LLM_MODEL_MIN_OUTPUT_LENGTH,
|
| 200 |
+
'max_length': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
|
| 201 |
+
'max_new_tokens': GlobalConfig.LLM_MODEL_MAX_OUTPUT_LENGTH,
|
| 202 |
+
'num_return_sequences': 1,
|
| 203 |
+
'return_full_text': False,
|
| 204 |
+
# "repetition_penalty": 0.0001
|
| 205 |
+
},
|
| 206 |
+
'options': {
|
| 207 |
+
'wait_for_model': True,
|
| 208 |
+
'use_cache': True
|
| 209 |
+
}
|
| 210 |
+
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
|
| 212 |
+
if len(response) > 0 and 'generated_text' in response[0]:
|
| 213 |
+
response: str = response[0]['generated_text'].strip()
|
|
|
|
|
|
|
| 214 |
|
| 215 |
+
st.chat_message('ai').code(response, language='json')
|
| 216 |
+
|
| 217 |
+
history.add_user_message(prompt)
|
| 218 |
+
history.add_ai_message(response)
|
| 219 |
+
|
| 220 |
+
# The content has been generated as JSON
|
| 221 |
+
# There maybe trailing ``` at the end of the response -- remove them
|
| 222 |
+
# To be careful: ``` may be part of the content as well when code is generated
|
| 223 |
+
response_cleaned = text_helper.get_clean_json(response)
|
| 224 |
+
|
| 225 |
+
logger.info(
|
| 226 |
+
'Cleaned JSON response:: original length: %d | cleaned length: %d',
|
| 227 |
+
len(response), len(response_cleaned)
|
| 228 |
+
)
|
| 229 |
+
logger.debug('Cleaned JSON: %s', response_cleaned)
|
| 230 |
+
|
| 231 |
+
# Now create the PPT file
|
| 232 |
+
status.update(
|
| 233 |
+
label='Searching photos and creating the slide deck...give it a moment...',
|
| 234 |
+
state='running',
|
| 235 |
+
expanded=False
|
| 236 |
+
)
|
| 237 |
+
generate_slide_deck(response_cleaned)
|
| 238 |
+
status.update(label='Done!', state='complete', expanded=True)
|
| 239 |
+
|
| 240 |
+
logger.info(
|
| 241 |
+
'#messages in history / 2: %d',
|
| 242 |
+
len(st.session_state[CHAT_MESSAGES]) / 2
|
| 243 |
+
)
|
| 244 |
|
| 245 |
|
| 246 |
def generate_slide_deck(json_str: str):
|