|
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
|
|
|
model_name = "pkshatech/GLuCoSE-base-ja" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
|
|
|
|
def generate_answer(question): |
|
|
inputs = tokenizer(question, return_tensors="pt") |
|
|
outputs = model.generate(**inputs) |
|
|
answer = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
return answer |
|
|
|
|
|
|
|
|
from flask import Flask, request, jsonify |
|
|
|
|
|
app = Flask(__name__) |
|
|
|
|
|
@app.route('/', methods=['POST']) |
|
|
def rag(): |
|
|
data = request.get_json() |
|
|
question = data['question'] |
|
|
answer = generate_answer(question) |
|
|
return jsonify({'answer': answer}) |
|
|
|
|
|
if __name__ == '__main__': |
|
|
app.run(debug=True, host='0.0.0.0', port=5000) |