|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
from PyPDF2 import PdfReader |
|
|
import torch |
|
|
|
|
|
|
|
|
model_name = "ibm-granite/granite-3.0-8b-instruct" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") |
|
|
|
|
|
|
|
|
pdf_context = "" |
|
|
|
|
|
def upload_pdf(file): |
|
|
global pdf_context |
|
|
if file is None: |
|
|
return "No file uploaded." |
|
|
reader = PdfReader(file) |
|
|
pdf_context = "" |
|
|
for page in reader.pages: |
|
|
pdf_context += page.extract_text() + "\n" |
|
|
return "PDF uploaded and text extracted successfully!" |
|
|
|
|
|
def chat(message, history): |
|
|
|
|
|
messages = [{"role": "system", "content": f"You are a helpful assistant. Answer based on this context: {pdf_context}"}] |
|
|
for user_msg, assistant_msg in history: |
|
|
messages.append({"role": "user", "content": user_msg}) |
|
|
messages.append({"role": "assistant", "content": assistant_msg}) |
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
|
|
|
input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
|
|
inputs = tokenizer(input_text, return_tensors="pt").to(model.device) |
|
|
outputs = model.generate(**inputs, max_new_tokens=300, temperature=0.7) |
|
|
response = tokenizer.decode(outputs[0][len(inputs["input_ids"][0]):], skip_special_tokens=True) |
|
|
return response |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# Basic PDF Q&A Chat with IBM Granite") |
|
|
|
|
|
with gr.Row(): |
|
|
pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"]) |
|
|
upload_btn = gr.Button("Upload PDF") |
|
|
|
|
|
status = gr.Textbox(label="Status") |
|
|
|
|
|
chat_interface = gr.ChatInterface( |
|
|
fn=chat, |
|
|
title="Ask questions about the PDF" |
|
|
) |
|
|
|
|
|
upload_btn.click(upload_pdf, inputs=pdf_input, outputs=status) |
|
|
|
|
|
demo.launch() |