burtenshaw HF Staff commited on
Commit
e40a429
·
verified ·
1 Parent(s): bb132a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -20,12 +20,19 @@ tokenizer, model = load_model()
20
 
21
  @spaces.GPU
22
  def generate(prompt, history):
23
- conversation = [
 
 
24
  {"role": "user", "content": prompt},
25
  ]
 
 
 
 
26
 
 
27
  inputs = tokenizer.apply_chat_template(
28
- conversation,
29
  add_generation_prompt=True,
30
  tokenize=True,
31
  return_tensors="pt",
@@ -39,7 +46,9 @@ def generate(prompt, history):
39
  )
40
 
41
  generated_tokens = outputs[0, inputs.input_ids.shape[1]:]
42
- return tokenizer.decode(generated_tokens, skip_special_tokens=True)
 
 
43
 
44
 
45
  demo = gr.ChatInterface(fn=generate, type="messages", examples=["hello", "hola", "merhaba"], title="NanoChat")
 
20
 
21
  @spaces.GPU
22
  def generate(prompt, history):
23
+
24
+ if len(history) > 0:
25
+ messages = history + [
26
  {"role": "user", "content": prompt},
27
  ]
28
+ else:
29
+ messages = [
30
+ {"role": "user", "content": prompt},
31
+ ]
32
 
33
+ print(history)
34
  inputs = tokenizer.apply_chat_template(
35
+ messages,
36
  add_generation_prompt=True,
37
  tokenize=True,
38
  return_tensors="pt",
 
46
  )
47
 
48
  generated_tokens = outputs[0, inputs.input_ids.shape[1]:]
49
+ output = tokenizer.decode(generated_tokens, skip_special_tokens=True)
50
+
51
+ return output
52
 
53
 
54
  demo = gr.ChatInterface(fn=generate, type="messages", examples=["hello", "hola", "merhaba"], title="NanoChat")