Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Rm advanced options for config
Browse files
src/lib/components/InferencePlayground/InferencePlaygroundGenerationConfig.svelte
CHANGED
|
@@ -3,7 +3,6 @@
|
|
| 3 |
|
| 4 |
import {
|
| 5 |
GENERATION_CONFIG_KEYS,
|
| 6 |
-
GENERATION_CONFIG_KEYS_ADVANCED,
|
| 7 |
GENERATION_CONFIG_SETTINGS,
|
| 8 |
} from "./generationConfigSettings";
|
| 9 |
|
|
@@ -44,41 +43,6 @@
|
|
| 44 |
</div>
|
| 45 |
{/each}
|
| 46 |
|
| 47 |
-
<details>
|
| 48 |
-
<summary>Advanced Options</summary>
|
| 49 |
-
<div class="mt-4 flex flex-col gap-y-5">
|
| 50 |
-
{#each GENERATION_CONFIG_KEYS_ADVANCED as key}
|
| 51 |
-
{@const settings = GENERATION_CONFIG_SETTINGS[key]}
|
| 52 |
-
<div>
|
| 53 |
-
<div class="flex items-center justify-between">
|
| 54 |
-
<label for="temperature-range" class="mb-2 block text-sm font-medium text-gray-900 dark:text-white"
|
| 55 |
-
>{settings.label}</label
|
| 56 |
-
>
|
| 57 |
-
<input
|
| 58 |
-
type="number"
|
| 59 |
-
class="w-18 rounded border bg-transparent px-1 py-0.5 text-right text-sm dark:border-gray-700"
|
| 60 |
-
min={settings.min}
|
| 61 |
-
max={settings.max}
|
| 62 |
-
step={settings.step}
|
| 63 |
-
value={conversation.config[key] ?? settings.default}
|
| 64 |
-
on:input={e => (conversation.config[key] = Number(e.currentTarget.value))}
|
| 65 |
-
/>
|
| 66 |
-
</div>
|
| 67 |
-
<input
|
| 68 |
-
id="temperature-range"
|
| 69 |
-
type="range"
|
| 70 |
-
min={settings.min}
|
| 71 |
-
max={settings.max}
|
| 72 |
-
step={settings.step}
|
| 73 |
-
value={conversation.config[key] ?? settings.default}
|
| 74 |
-
on:input={e => (conversation.config[key] = Number(e.currentTarget.value))}
|
| 75 |
-
class="h-2 w-full cursor-pointer appearance-none rounded-lg bg-gray-200 accent-black dark:bg-gray-700 dark:accent-blue-500"
|
| 76 |
-
/>
|
| 77 |
-
</div>
|
| 78 |
-
{/each}
|
| 79 |
-
</div>
|
| 80 |
-
</details>
|
| 81 |
-
|
| 82 |
<div class="mt-2">
|
| 83 |
<label class="flex cursor-pointer items-center justify-between">
|
| 84 |
<input type="checkbox" bind:checked={conversation.streaming} class="peer sr-only" />
|
|
|
|
| 3 |
|
| 4 |
import {
|
| 5 |
GENERATION_CONFIG_KEYS,
|
|
|
|
| 6 |
GENERATION_CONFIG_SETTINGS,
|
| 7 |
} from "./generationConfigSettings";
|
| 8 |
|
|
|
|
| 43 |
</div>
|
| 44 |
{/each}
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
<div class="mt-2">
|
| 47 |
<label class="flex cursor-pointer items-center justify-between">
|
| 48 |
<input type="checkbox" bind:checked={conversation.streaming} class="peer sr-only" />
|
src/lib/components/InferencePlayground/generationConfigSettings.ts
CHANGED
|
@@ -32,9 +32,7 @@ export const GENERATION_CONFIG_SETTINGS: Record<string, GenerationKeySettings> =
|
|
| 32 |
|
| 33 |
export type GenerationConfigKey = keyof typeof GENERATION_CONFIG_SETTINGS;
|
| 34 |
|
| 35 |
-
export const GENERATION_CONFIG_KEYS: GenerationConfigKey[] = ["temperature", "max_tokens"];
|
| 36 |
-
|
| 37 |
-
export const GENERATION_CONFIG_KEYS_ADVANCED: GenerationConfigKey[] = ["top_p"];
|
| 38 |
|
| 39 |
export type GenerationConfig = Record<GenerationConfigKey, number>;
|
| 40 |
|
|
|
|
| 32 |
|
| 33 |
export type GenerationConfigKey = keyof typeof GENERATION_CONFIG_SETTINGS;
|
| 34 |
|
| 35 |
+
export const GENERATION_CONFIG_KEYS: GenerationConfigKey[] = ["temperature", "max_tokens", "top_p"];
|
|
|
|
|
|
|
| 36 |
|
| 37 |
export type GenerationConfig = Record<GenerationConfigKey, number>;
|
| 38 |
|
src/routes/+page.server.ts
CHANGED
|
@@ -23,7 +23,7 @@ export const load: PageServerLoad = async ({ fetch }) => {
|
|
| 23 |
},
|
| 24 |
});
|
| 25 |
if (!res.ok) {
|
| 26 |
-
console.error(
|
| 27 |
return null; // Ignore failed requests by returning null
|
| 28 |
}
|
| 29 |
const tokenizerConfig = await res.json();
|
|
|
|
| 23 |
},
|
| 24 |
});
|
| 25 |
if (!res.ok) {
|
| 26 |
+
console.error(`Error fetching tokenizer file for ${model.id}`, res.status, res.statusText);
|
| 27 |
return null; // Ignore failed requests by returning null
|
| 28 |
}
|
| 29 |
const tokenizerConfig = await res.json();
|