Guy24 commited on
Commit
f1707a6
·
1 Parent(s): c023ca3

Fix SSR mode error and improve token handling for HF Spaces deployment

Browse files
Files changed (2) hide show
  1. app.py +12 -4
  2. requirements.txt +3 -1
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  from huggingface_hub import login
3
 
4
  # run once at startup
5
- if "HF_TOKEN" in os.environ:
6
  login(token=os.environ["HF_TOKEN"])
7
 
8
  # app.py
@@ -10,9 +10,17 @@ import os; os.environ.setdefault('HF_HOME', '/data/hf-cache')
10
  os.environ.setdefault('HF_HUB_ENABLE_HF_TRANSFER', '1')
11
 
12
  from huggingface_hub import login
13
- login(os.getenv("HF_TOKEN", ""))
 
 
 
 
 
 
 
 
 
14
 
15
- from spaces import GPU
16
  import torch
17
  from exceptiongroup import catch
18
  from transformers import AutoTokenizer, AutoModelForCausalLM
@@ -321,4 +329,4 @@ with gr.Blocks(theme="soft") as demo:
321
  )
322
 
323
  if __name__ == "__main__":
324
- demo.launch()
 
2
  from huggingface_hub import login
3
 
4
  # run once at startup
5
+ if "HF_TOKEN" in os.environ and os.environ["HF_TOKEN"]:
6
  login(token=os.environ["HF_TOKEN"])
7
 
8
  # app.py
 
10
  os.environ.setdefault('HF_HUB_ENABLE_HF_TRANSFER', '1')
11
 
12
  from huggingface_hub import login
13
+ hf_token = os.getenv("HF_TOKEN", "")
14
+ if hf_token:
15
+ login(token=hf_token)
16
+
17
+ try:
18
+ from spaces import GPU
19
+ except ImportError:
20
+ # For local testing, create a no-op decorator
21
+ def GPU(f):
22
+ return f
23
 
 
24
  import torch
25
  from exceptiongroup import catch
26
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
329
  )
330
 
331
  if __name__ == "__main__":
332
+ demo.launch(ssr_mode=False)
requirements.txt CHANGED
@@ -2,4 +2,6 @@ torch
2
  transformers
3
  pandas
4
  tqdm
5
- scikit-learn
 
 
 
2
  transformers
3
  pandas
4
  tqdm
5
+ scikit-learn
6
+ gradio>=5.0.0
7
+ huggingface_hub