Spaces:
Sleeping
Sleeping
edbeeching
commited on
Commit
Β·
002e03d
1
Parent(s):
0fb1b95
adds pro options
Browse files
app.py
CHANGED
|
@@ -103,11 +103,15 @@ def validate_request(request: GenerationRequest, oauth_token: Optional[Union[gr.
|
|
| 103 |
|
| 104 |
|
| 105 |
# Check user tier and apply appropriate limits
|
| 106 |
-
|
|
|
|
| 107 |
max_samples = MAX_SAMPLES_PRO if is_pro else MAX_SAMPLES_FREE
|
| 108 |
|
| 109 |
if request.num_output_examples > max_samples:
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
| 111 |
raise Exception(f"Requested number of output examples {request.num_output_examples} exceeds the max limit of {max_samples} for {user_tier} users.")
|
| 112 |
|
| 113 |
# check the prompt column exists in the dataset
|
|
@@ -203,10 +207,16 @@ def add_request_to_db(request: GenerationRequest):
|
|
| 203 |
|
| 204 |
def main():
|
| 205 |
with gr.Blocks(title="Synthetic Data Generation") as demo:
|
| 206 |
-
gr.HTML("<h3 style='text-align:center'>Generate synthetic data with AI models.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
|
| 208 |
pro_message = gr.Markdown(visible=False)
|
| 209 |
-
main_interface = gr.Column(visible=
|
| 210 |
|
| 211 |
# Store the current oauth token for use in submit_request
|
| 212 |
current_oauth_token = gr.State(None)
|
|
@@ -264,8 +274,8 @@ def main():
|
|
| 264 |
# model_token = gr.Textbox(label="Model Token (Optional)", type="password", placeholder="Your HF token with read/write access to the model...")
|
| 265 |
with gr.Group():
|
| 266 |
gr.Markdown("## Dataset information")
|
| 267 |
-
# Dynamic user limit info
|
| 268 |
-
user_limit_info = gr.Markdown(value="", visible=True)
|
| 269 |
with gr.Row():
|
| 270 |
with gr.Column():
|
| 271 |
input_dataset_name = gr.Textbox(label="Input Dataset Name", placeholder="e.g., simplescaling/s1K-1.1")
|
|
@@ -273,7 +283,7 @@ def main():
|
|
| 273 |
|
| 274 |
with gr.Column():
|
| 275 |
output_dataset_name = gr.Textbox(label="Output Dataset Name", placeholder="e.g., my-generated-dataset, must be unique. Will be created under the org 'synthetic-data-universe'")
|
| 276 |
-
num_output_samples = gr.Slider(label="Number of samples, leave as '0' for all", value=0, minimum=0, maximum=
|
| 277 |
|
| 278 |
with gr.Accordion("Advanced Options", open=False):
|
| 279 |
with gr.Row():
|
|
@@ -361,7 +371,7 @@ def main():
|
|
| 361 |
|
| 362 |
def update_user_limits(oauth_token):
|
| 363 |
if oauth_token is None:
|
| 364 |
-
return ""
|
| 365 |
|
| 366 |
is_pro = verify_pro_status(oauth_token)
|
| 367 |
if is_pro:
|
|
@@ -370,14 +380,25 @@ def main():
|
|
| 370 |
return "π€ **Free User**: You can generate up to 100 samples per request. [Upgrade to PRO](http://huggingface.co/subscribe/pro?source=synthetic-data-universe) for 10,000 samples."
|
| 371 |
|
| 372 |
def control_access(profile: Optional[gr.OAuthProfile] = None, oauth_token: Optional[gr.OAuthToken] = None):
|
| 373 |
-
|
| 374 |
-
# Allow all users but show different messaging, and store the token
|
| 375 |
limit_msg = update_user_limits(oauth_token)
|
| 376 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 377 |
|
| 378 |
-
login_button = gr.LoginButton() # this is required or AUTH will not work
|
| 379 |
|
| 380 |
-
|
|
|
|
|
|
|
|
|
|
| 381 |
demo.queue(max_size=None, default_concurrency_limit=None).launch(show_error=True)
|
| 382 |
|
| 383 |
if __name__ == "__main__":
|
|
|
|
| 103 |
|
| 104 |
|
| 105 |
# Check user tier and apply appropriate limits
|
| 106 |
+
# Anonymous users (oauth_token is None) are treated as free tier
|
| 107 |
+
is_pro = verify_pro_status(oauth_token) if oauth_token else False
|
| 108 |
max_samples = MAX_SAMPLES_PRO if is_pro else MAX_SAMPLES_FREE
|
| 109 |
|
| 110 |
if request.num_output_examples > max_samples:
|
| 111 |
+
if oauth_token is None:
|
| 112 |
+
user_tier = "anonymous"
|
| 113 |
+
else:
|
| 114 |
+
user_tier = "PRO/Enterprise" if is_pro else "free"
|
| 115 |
raise Exception(f"Requested number of output examples {request.num_output_examples} exceeds the max limit of {max_samples} for {user_tier} users.")
|
| 116 |
|
| 117 |
# check the prompt column exists in the dataset
|
|
|
|
| 207 |
|
| 208 |
def main():
|
| 209 |
with gr.Blocks(title="Synthetic Data Generation") as demo:
|
| 210 |
+
gr.HTML("<h3 style='text-align:center'>Generate synthetic data with AI models. Free to use! Sign in for PRO benefits (10k samples vs 100). <a href='http://huggingface.co/subscribe/pro?source=synthetic-data-universe' target='_blank'>Upgrade to PRO</a></h3>", elem_id="sub_title")
|
| 211 |
+
|
| 212 |
+
# Add sign-in button at the top
|
| 213 |
+
with gr.Row():
|
| 214 |
+
gr.Markdown("") # Empty space for alignment
|
| 215 |
+
login_button = gr.LoginButton(value="π Sign in for PRO benefits", size="sm")
|
| 216 |
+
gr.Markdown("") # Empty space for alignment
|
| 217 |
|
| 218 |
pro_message = gr.Markdown(visible=False)
|
| 219 |
+
main_interface = gr.Column(visible=True)
|
| 220 |
|
| 221 |
# Store the current oauth token for use in submit_request
|
| 222 |
current_oauth_token = gr.State(None)
|
|
|
|
| 274 |
# model_token = gr.Textbox(label="Model Token (Optional)", type="password", placeholder="Your HF token with read/write access to the model...")
|
| 275 |
with gr.Group():
|
| 276 |
gr.Markdown("## Dataset information")
|
| 277 |
+
# Dynamic user limit info - default to anonymous user
|
| 278 |
+
user_limit_info = gr.Markdown(value="π€ **Anonymous User**: You can generate up to 100 samples per request.", visible=True)
|
| 279 |
with gr.Row():
|
| 280 |
with gr.Column():
|
| 281 |
input_dataset_name = gr.Textbox(label="Input Dataset Name", placeholder="e.g., simplescaling/s1K-1.1")
|
|
|
|
| 283 |
|
| 284 |
with gr.Column():
|
| 285 |
output_dataset_name = gr.Textbox(label="Output Dataset Name", placeholder="e.g., my-generated-dataset, must be unique. Will be created under the org 'synthetic-data-universe'")
|
| 286 |
+
num_output_samples = gr.Slider(label="Number of samples, leave as '0' for all", value=0, minimum=0, maximum=MAX_SAMPLES_FREE, step=1)
|
| 287 |
|
| 288 |
with gr.Accordion("Advanced Options", open=False):
|
| 289 |
with gr.Row():
|
|
|
|
| 371 |
|
| 372 |
def update_user_limits(oauth_token):
|
| 373 |
if oauth_token is None:
|
| 374 |
+
return "π€ **Anonymous User**: You can generate up to 100 samples per request. [Sign in](javascript:void(0)) for PRO benefits (10,000 samples)."
|
| 375 |
|
| 376 |
is_pro = verify_pro_status(oauth_token)
|
| 377 |
if is_pro:
|
|
|
|
| 380 |
return "π€ **Free User**: You can generate up to 100 samples per request. [Upgrade to PRO](http://huggingface.co/subscribe/pro?source=synthetic-data-universe) for 10,000 samples."
|
| 381 |
|
| 382 |
def control_access(profile: Optional[gr.OAuthProfile] = None, oauth_token: Optional[gr.OAuthToken] = None):
|
| 383 |
+
# Always show the interface, whether user is logged in or not
|
|
|
|
| 384 |
limit_msg = update_user_limits(oauth_token)
|
| 385 |
+
|
| 386 |
+
# Update slider maximum based on user tier
|
| 387 |
+
if oauth_token is None:
|
| 388 |
+
max_samples = MAX_SAMPLES_FREE
|
| 389 |
+
else:
|
| 390 |
+
is_pro = verify_pro_status(oauth_token)
|
| 391 |
+
max_samples = MAX_SAMPLES_PRO if is_pro else MAX_SAMPLES_FREE
|
| 392 |
+
|
| 393 |
+
slider_update = gr.update(maximum=max_samples)
|
| 394 |
+
|
| 395 |
+
return gr.update(visible=True), gr.update(visible=False), oauth_token, limit_msg, slider_update
|
| 396 |
|
|
|
|
| 397 |
|
| 398 |
+
# Handle login state changes - LoginButton automatically handles auth state changes
|
| 399 |
+
# The demo.load will handle both initial load and auth changes
|
| 400 |
+
|
| 401 |
+
demo.load(control_access, inputs=None, outputs=[main_interface, pro_message, current_oauth_token, user_limit_info, num_output_samples])
|
| 402 |
demo.queue(max_size=None, default_concurrency_limit=None).launch(show_error=True)
|
| 403 |
|
| 404 |
if __name__ == "__main__":
|