Update app.py
Browse filesReplace large model with small model for initial tasks.
app.py
CHANGED
|
@@ -405,6 +405,38 @@ def writing_task(prompt: str) -> str:
|
|
| 405 |
|
| 406 |
return content
|
| 407 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 408 |
|
| 409 |
def smol_lm_jd_process(job_description, system_prompt, max_new_tokens=512):
|
| 410 |
prompt = f"""<|im_start|>system
|
|
@@ -419,7 +451,8 @@ def smol_lm_jd_process(job_description, system_prompt, max_new_tokens=512):
|
|
| 419 |
# start_idx = response.find("<|im_start|>assistant")
|
| 420 |
# end_idx = response.find("<|im_end|>", start_idx)
|
| 421 |
# response = response[start_idx + len("<|im_start|>assistant\n"):end_idx].strip()
|
| 422 |
-
response = writing_task(prompt)
|
|
|
|
| 423 |
return response
|
| 424 |
|
| 425 |
def process_job_description(company_name, company_url, job_description, resume):
|
|
|
|
| 405 |
|
| 406 |
return content
|
| 407 |
|
| 408 |
+
def smol_writing_task(prompt: str) -> str:
|
| 409 |
+
|
| 410 |
+
API_TOKEN_SMOLLM = getenv("HF_TOKEN")
|
| 411 |
+
|
| 412 |
+
client = InferenceClient(
|
| 413 |
+
provider="hf-inference",
|
| 414 |
+
api_key=API_TOKEN_SMOLLM,
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
completion = client.chat.completions.create(
|
| 418 |
+
model="HuggingFaceTB/SmolLM3-3B",
|
| 419 |
+
messages=[
|
| 420 |
+
{
|
| 421 |
+
"role": "user",
|
| 422 |
+
"content": prompt
|
| 423 |
+
}
|
| 424 |
+
],
|
| 425 |
+
)
|
| 426 |
+
|
| 427 |
+
raw_content = completion.choices[0].message['content']
|
| 428 |
+
print(f"Raw content: {raw_content}")
|
| 429 |
+
|
| 430 |
+
content_split = raw_content.split("</think>")
|
| 431 |
+
|
| 432 |
+
if len(content_split) > 1:
|
| 433 |
+
think = content_split[0]
|
| 434 |
+
content = "".join(content_split[1:])
|
| 435 |
+
else:
|
| 436 |
+
think = content_split[0]
|
| 437 |
+
content = "No data found."
|
| 438 |
+
|
| 439 |
+
return content
|
| 440 |
|
| 441 |
def smol_lm_jd_process(job_description, system_prompt, max_new_tokens=512):
|
| 442 |
prompt = f"""<|im_start|>system
|
|
|
|
| 451 |
# start_idx = response.find("<|im_start|>assistant")
|
| 452 |
# end_idx = response.find("<|im_end|>", start_idx)
|
| 453 |
# response = response[start_idx + len("<|im_start|>assistant\n"):end_idx].strip()
|
| 454 |
+
# response = writing_task(prompt)
|
| 455 |
+
response = smol_writing_task(prompt)
|
| 456 |
return response
|
| 457 |
|
| 458 |
def process_job_description(company_name, company_url, job_description, resume):
|