Alvin2707's picture
Update app.py
e06ee10 verified
raw
history blame
730 Bytes
from fastapi import FastAPI
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
app = FastAPI()
# Load model and tokenizer tanpa mendefinisikan cache_dir
tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-1B-Instruct")
model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-1B-Instruct").to("cpu")
@app.get("/")
def home():
return {"message": "FastAPI running with Llama-3.2-1B-Instruct"}
@app.post("/generate")
def generate_text(prompt: str):
inputs = tokenizer(prompt, return_tensors="pt").to("cpu")
output = model.generate(**inputs, max_length=300)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
return {"generated_text": generated_text}