File size: 2,072 Bytes
dc4b3bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from PyPDF2 import PdfReader
import torch

# Load IBM Granite model (use a smaller one if needed, e.g., granite-3.0-3b-instruct)
model_name = "ibm-granite/granite-3.0-8b-instruct"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")

# Global variable for PDF context
pdf_context = ""

def upload_pdf(file):
    global pdf_context
    if file is None:
        return "No file uploaded."
    reader = PdfReader(file)
    pdf_context = ""
    for page in reader.pages:
        pdf_context += page.extract_text() + "\n"
    return "PDF uploaded and text extracted successfully!"

def chat(message, history):
    # Build messages with history and PDF context
    messages = [{"role": "system", "content": f"You are a helpful assistant. Answer based on this context: {pdf_context}"}]
    for user_msg, assistant_msg in history:
        messages.append({"role": "user", "content": user_msg})
        messages.append({"role": "assistant", "content": assistant_msg})
    messages.append({"role": "user", "content": message})
    
    # Apply chat template and generate
    input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
    outputs = model.generate(**inputs, max_new_tokens=300, temperature=0.7)
    response = tokenizer.decode(outputs[0][len(inputs["input_ids"][0]):], skip_special_tokens=True)
    return response

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Basic PDF Q&A Chat with IBM Granite")
    
    with gr.Row():
        pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
        upload_btn = gr.Button("Upload PDF")
    
    status = gr.Textbox(label="Status")
    
    chat_interface = gr.ChatInterface(
        fn=chat,
        title="Ask questions about the PDF"
    )
    
    upload_btn.click(upload_pdf, inputs=pdf_input, outputs=status)

demo.launch()