whitphx HF Staff commited on
Commit
f76cf5a
·
1 Parent(s): 6da0494
Files changed (1) hide show
  1. client/src/index.ts +4 -9
client/src/index.ts CHANGED
@@ -5,6 +5,7 @@ import { hideBin } from "yargs/helpers";
5
  import { table } from "table";
6
  import prompts from "prompts";
7
  import { searchModels, formatModel } from "./hf-api.js";
 
8
  import type { ModelEntry } from "@huggingface/hub";
9
 
10
  const SERVER_URL = process.env.BENCH_SERVER_URL || "http://localhost:7860";
@@ -101,7 +102,7 @@ yargs(hideBin(process.argv))
101
  })
102
  .positional("task", {
103
  describe: "Task to perform (e.g., feature-extraction, fill-mask)",
104
- type: "string",
105
  demandOption: true,
106
  })
107
  .option("platform", {
@@ -274,7 +275,7 @@ yargs(hideBin(process.argv))
274
  return yargs
275
  .positional("task", {
276
  describe: "Task type (e.g., feature-extraction, text-classification, fill-mask)",
277
- type: "string",
278
  demandOption: true,
279
  })
280
  .positional("query", {
@@ -285,11 +286,6 @@ yargs(hideBin(process.argv))
285
  describe: "Maximum number of models to benchmark",
286
  type: "number",
287
  })
288
- .option("sort", {
289
- describe: "Sort models by",
290
- choices: ["downloads", "likes", "lastModified", "trending"] as const,
291
- default: "downloads" as const,
292
- })
293
  .option("platform", {
294
  describe: "Platform(s) to run on (can specify multiple)",
295
  type: "array",
@@ -336,10 +332,9 @@ yargs(hideBin(process.argv))
336
  console.log(`Searching for ${argv.task} models${argv.query ? ` matching "${argv.query}"` : ""}...\n`);
337
 
338
  const models = await searchModels({
339
- task: argv.task,
340
  search: argv.query,
341
  limit: argv.limit,
342
- sort: argv.sort,
343
  });
344
 
345
  if (models.length === 0) {
 
5
  import { table } from "table";
6
  import prompts from "prompts";
7
  import { searchModels, formatModel } from "./hf-api.js";
8
+ import { PIPELINE_DATA } from "@huggingface/tasks";
9
  import type { ModelEntry } from "@huggingface/hub";
10
 
11
  const SERVER_URL = process.env.BENCH_SERVER_URL || "http://localhost:7860";
 
102
  })
103
  .positional("task", {
104
  describe: "Task to perform (e.g., feature-extraction, fill-mask)",
105
+ choices: Object.keys(PIPELINE_DATA),
106
  demandOption: true,
107
  })
108
  .option("platform", {
 
275
  return yargs
276
  .positional("task", {
277
  describe: "Task type (e.g., feature-extraction, text-classification, fill-mask)",
278
+ choices: Object.keys(PIPELINE_DATA),
279
  demandOption: true,
280
  })
281
  .positional("query", {
 
286
  describe: "Maximum number of models to benchmark",
287
  type: "number",
288
  })
 
 
 
 
 
289
  .option("platform", {
290
  describe: "Platform(s) to run on (can specify multiple)",
291
  type: "array",
 
332
  console.log(`Searching for ${argv.task} models${argv.query ? ` matching "${argv.query}"` : ""}...\n`);
333
 
334
  const models = await searchModels({
335
+ task: argv.task as keyof typeof PIPELINE_DATA,
336
  search: argv.query,
337
  limit: argv.limit,
 
338
  });
339
 
340
  if (models.length === 0) {