Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -206,7 +206,10 @@ def train_model():
|
|
| 206 |
return "\n\n".join(metric_str_all), [os.path.join("./VRM-Emotions", file) for file in os.listdir("./VRM-Emotions")]
|
| 207 |
|
| 208 |
async def train_model_async(progress=gr.Progress(track_tqdm=True)):
|
| 209 |
-
|
|
|
|
|
|
|
|
|
|
| 210 |
|
| 211 |
DEFAULT_SYSTEM_PROMPT = "あなたは誠実で優秀な日本人のアシスタントです。特に指示が無い場合は、常に日本語で回答してください。"
|
| 212 |
memory = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}]
|
|
@@ -217,69 +220,75 @@ def reset_memory():
|
|
| 217 |
return None
|
| 218 |
|
| 219 |
def init():
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
|
|
|
| 228 |
|
| 229 |
-
|
|
|
|
|
|
|
| 230 |
|
| 231 |
async def chat(message):
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
global memory
|
| 238 |
|
| 239 |
-
|
| 240 |
|
| 241 |
-
|
| 242 |
-
memory,
|
| 243 |
-
tokenize=False,
|
| 244 |
-
add_generation_prompt=True
|
| 245 |
-
)
|
| 246 |
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
|
|
|
|
|
|
| 250 |
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
token_ids.to(llamaModel.device),
|
| 254 |
-
max_new_tokens=1024,
|
| 255 |
-
do_sample=True,
|
| 256 |
-
temperature=0.6,
|
| 257 |
-
top_p=0.9,
|
| 258 |
)
|
| 259 |
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
|
| 264 |
-
|
|
|
|
|
|
|
| 265 |
|
| 266 |
-
|
| 267 |
|
| 268 |
-
|
| 269 |
-
newTokenizer = AutoTokenizer.from_pretrained("./VRM-Emotions", trust_remote_code=True)
|
| 270 |
-
newConfig = AutoConfig.from_pretrained("./VRM-Emotions")
|
| 271 |
-
newModel = AutoModelForSequenceClassification.from_pretrained("./VRM-Emotions", config=newConfig)
|
| 272 |
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
predicted = torch.argmax(logits, dim=-1).item()
|
| 278 |
-
label = newModel.config.id2label[predicted]
|
| 279 |
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 283 |
|
| 284 |
with gr.Blocks() as demo:
|
| 285 |
with gr.Tab("Prepare Dataset"):
|
|
|
|
| 206 |
return "\n\n".join(metric_str_all), [os.path.join("./VRM-Emotions", file) for file in os.listdir("./VRM-Emotions")]
|
| 207 |
|
| 208 |
async def train_model_async(progress=gr.Progress(track_tqdm=True)):
|
| 209 |
+
try:
|
| 210 |
+
return await asyncio.to_thread(train_model)
|
| 211 |
+
except Exception as e:
|
| 212 |
+
raise gr.Error(e)
|
| 213 |
|
| 214 |
DEFAULT_SYSTEM_PROMPT = "あなたは誠実で優秀な日本人のアシスタントです。特に指示が無い場合は、常に日本語で回答してください。"
|
| 215 |
memory = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}]
|
|
|
|
| 220 |
return None
|
| 221 |
|
| 222 |
def init():
|
| 223 |
+
try:
|
| 224 |
+
global llamaTokenizer, llamaModel
|
| 225 |
+
llamaTokenizer = AutoTokenizer.from_pretrained("elyza/Llama-3-ELYZA-JP-8B", trust_remote_code=True)
|
| 226 |
+
llamaModel = AutoModelForCausalLM.from_pretrained(
|
| 227 |
+
"elyza/Llama-3-ELYZA-JP-8B",
|
| 228 |
+
torch_dtype="auto",
|
| 229 |
+
device_map="auto",
|
| 230 |
+
)
|
| 231 |
+
llamaModel.eval()
|
| 232 |
|
| 233 |
+
return [gr.Button(visible=False), gr.Textbox(visible=True), gr.Button(visible=True)]
|
| 234 |
+
except Exception as e:
|
| 235 |
+
raise gr.Error(e)
|
| 236 |
|
| 237 |
async def chat(message):
|
| 238 |
+
try:
|
| 239 |
+
async with Translator() as translator:
|
| 240 |
+
translated_input = await translator.translate(message, dest="ja")
|
| 241 |
+
jp_input = translated_input.text
|
| 242 |
+
output_language = translated_input.src
|
|
|
|
| 243 |
|
| 244 |
+
global memory
|
| 245 |
|
| 246 |
+
memory.append({"role": "user", "content": jp_input})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
|
| 248 |
+
prompt = llamaTokenizer.apply_chat_template(
|
| 249 |
+
memory,
|
| 250 |
+
tokenize=False,
|
| 251 |
+
add_generation_prompt=True
|
| 252 |
+
)
|
| 253 |
|
| 254 |
+
token_ids = llamaTokenizer.encode(
|
| 255 |
+
prompt, add_special_tokens=False, return_tensors="pt"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
)
|
| 257 |
|
| 258 |
+
with torch.no_grad():
|
| 259 |
+
output_ids = llamaModel.generate(
|
| 260 |
+
token_ids.to(llamaModel.device),
|
| 261 |
+
max_new_tokens=1024,
|
| 262 |
+
do_sample=True,
|
| 263 |
+
temperature=0.6,
|
| 264 |
+
top_p=0.9,
|
| 265 |
+
)
|
| 266 |
|
| 267 |
+
output = llamaTokenizer.decode(
|
| 268 |
+
output_ids.tolist()[0][token_ids.size(1):], skip_special_tokens=True
|
| 269 |
+
)
|
| 270 |
|
| 271 |
+
memory.append({"role": "assistant", "content": output})
|
| 272 |
|
| 273 |
+
translated_output = await translator.translate(output, dest=output_language)
|
|
|
|
|
|
|
|
|
|
| 274 |
|
| 275 |
+
if os.path.exists(f"./VRM-Emotions/model.safetensors"):
|
| 276 |
+
newTokenizer = AutoTokenizer.from_pretrained("./VRM-Emotions", trust_remote_code=True)
|
| 277 |
+
newConfig = AutoConfig.from_pretrained("./VRM-Emotions")
|
| 278 |
+
newModel = AutoModelForSequenceClassification.from_pretrained("./VRM-Emotions", config=newConfig)
|
|
|
|
|
|
|
| 279 |
|
| 280 |
+
sentence = newTokenizer(output, return_tensors="pt", padding=True, truncation=True, max_length=512)
|
| 281 |
+
with torch.no_grad():
|
| 282 |
+
outputs = newModel(**sentence)
|
| 283 |
+
logits = outputs.logits
|
| 284 |
+
predicted = torch.argmax(logits, dim=-1).item()
|
| 285 |
+
label = newModel.config.id2label[predicted]
|
| 286 |
+
|
| 287 |
+
return "[" + label + "] " + translated_output.text
|
| 288 |
+
else:
|
| 289 |
+
return translated_output.text
|
| 290 |
+
except Exception as e:
|
| 291 |
+
raise gr.Error(e)
|
| 292 |
|
| 293 |
with gr.Blocks() as demo:
|
| 294 |
with gr.Tab("Prepare Dataset"):
|