nttwt1597 commited on
Commit
cb69176
·
verified ·
1 Parent(s): 598633b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -22,7 +22,7 @@ import pandas as pd
22
 
23
  import gradio as gr
24
 
25
- model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct"
26
 
27
  tokenizer = AutoTokenizer.from_pretrained(
28
  model_name,
@@ -44,8 +44,8 @@ quantization_config = BitsAndBytesConfig(
44
 
45
  # Get the model
46
  llm = HuggingFaceLLM(
47
- model_name="meta-llama/Meta-Llama-3.1-8B-Instruct",
48
- tokenizer_name="meta-llama/Meta-Llama-3.1-8B-Instruct",
49
  model_kwargs={
50
  "token": token_r,
51
  "quantization_config": quantization_config
@@ -248,9 +248,9 @@ demo = gr.Interface(
248
  inputs=prompt_box,
249
  outputs=output_box,
250
  # allow_flagging='auto',
251
- # allow_flagging="manual",
252
- # flagging_options=["appropriate","inappropriate","incorrect",],
253
- # flagging_callback=hf_writer,
254
  # live=True
255
  )
256
 
 
22
 
23
  import gradio as gr
24
 
25
+ model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
26
 
27
  tokenizer = AutoTokenizer.from_pretrained(
28
  model_name,
 
44
 
45
  # Get the model
46
  llm = HuggingFaceLLM(
47
+ model_name="meta-llama/Meta-Llama-3-8B-Instruct",
48
+ tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
49
  model_kwargs={
50
  "token": token_r,
51
  "quantization_config": quantization_config
 
248
  inputs=prompt_box,
249
  outputs=output_box,
250
  # allow_flagging='auto',
251
+ allow_flagging="manual",
252
+ flagging_options=["appropriate","inappropriate","incorrect",],
253
+ flagging_callback=hf_writer,
254
  # live=True
255
  )
256