Spaces:
Runtime error
Runtime error
Thomas G. Lopes
commited on
Commit
·
33620bc
1
Parent(s):
9b31207
better disabled
Browse files
src/routes/canvas/chat-node.svelte
CHANGED
|
@@ -4,7 +4,7 @@
|
|
| 4 |
import { images } from "$lib/state/images.svelte.js";
|
| 5 |
import { models } from "$lib/state/models.svelte";
|
| 6 |
import { token } from "$lib/state/token.svelte";
|
| 7 |
-
import type
|
| 8 |
import { AsyncQueue } from "$lib/utils/queue.js";
|
| 9 |
import { InferenceClient } from "@huggingface/inference";
|
| 10 |
import type { ChatCompletionInputMessage } from "@huggingface/tasks";
|
|
@@ -81,23 +81,42 @@
|
|
| 81 |
try {
|
| 82 |
const client = new InferenceClient(token.value);
|
| 83 |
|
| 84 |
-
const messages: ChatCompletionInputMessage[] =
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
const stream = client.chatCompletionStream(
|
| 103 |
{
|
|
@@ -273,10 +292,14 @@
|
|
| 273 |
return { x: x + 150, y: y + 150 };
|
| 274 |
}
|
| 275 |
|
|
|
|
|
|
|
|
|
|
| 276 |
const fileQueue = new AsyncQueue();
|
| 277 |
const fileUpload = new FileUpload({
|
| 278 |
accept: "image/*",
|
| 279 |
multiple: true,
|
|
|
|
| 280 |
async onAccept(file) {
|
| 281 |
fileQueue.add(async () => {
|
| 282 |
const key = await images.upload(file);
|
|
@@ -289,7 +312,6 @@
|
|
| 289 |
if (fileQueue.queue.length <= 1) fileUpload.clear();
|
| 290 |
});
|
| 291 |
},
|
| 292 |
-
// TODO: disable on models that don't support img upload
|
| 293 |
});
|
| 294 |
</script>
|
| 295 |
|
|
@@ -336,8 +358,12 @@
|
|
| 336 |
<div class="flex items-center gap-2">
|
| 337 |
<button
|
| 338 |
type="button"
|
| 339 |
-
class="flex items-center gap-1.5 rounded-md px-2 py-1 text-xs text-gray-600 transition-colors
|
| 340 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 341 |
{...fileUpload.trigger}
|
| 342 |
>
|
| 343 |
<IconAttachment class="h-4 w-4" />
|
|
|
|
| 4 |
import { images } from "$lib/state/images.svelte.js";
|
| 5 |
import { models } from "$lib/state/models.svelte";
|
| 6 |
import { token } from "$lib/state/token.svelte";
|
| 7 |
+
import { PipelineTag, type Model } from "$lib/types.js";
|
| 8 |
import { AsyncQueue } from "$lib/utils/queue.js";
|
| 9 |
import { InferenceClient } from "@huggingface/inference";
|
| 10 |
import type { ChatCompletionInputMessage } from "@huggingface/tasks";
|
|
|
|
| 81 |
try {
|
| 82 |
const client = new InferenceClient(token.value);
|
| 83 |
|
| 84 |
+
const messages: ChatCompletionInputMessage[] = await Promise.all(
|
| 85 |
+
history.flatMap(async n => {
|
| 86 |
+
const res: ChatCompletionInputMessage[] = [];
|
| 87 |
+
if (n.data.query) {
|
| 88 |
+
let content: string | Array<{ type: string; text?: string; image_url?: { url: string } }> = n.data.query;
|
| 89 |
+
|
| 90 |
+
// If node has images, convert to multimodal format
|
| 91 |
+
if (n.data.imageIds && n.data.imageIds.length > 0) {
|
| 92 |
+
const urls = await Promise.all(n.data.imageIds.map(k => images.get(k)));
|
| 93 |
+
content = [
|
| 94 |
+
{
|
| 95 |
+
type: "text",
|
| 96 |
+
text: n.data.query,
|
| 97 |
+
},
|
| 98 |
+
...n.data.imageIds.map((_imgKey, i) => ({
|
| 99 |
+
type: "image_url",
|
| 100 |
+
image_url: { url: urls[i] as string },
|
| 101 |
+
})),
|
| 102 |
+
];
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
res.push({
|
| 106 |
+
role: "user",
|
| 107 |
+
content,
|
| 108 |
+
});
|
| 109 |
+
}
|
| 110 |
+
if (n.data.response) {
|
| 111 |
+
res.push({
|
| 112 |
+
role: "assistant",
|
| 113 |
+
content: n.data.response,
|
| 114 |
+
});
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
return res;
|
| 118 |
+
}),
|
| 119 |
+
).then(arr => arr.flat());
|
| 120 |
|
| 121 |
const stream = client.chatCompletionStream(
|
| 122 |
{
|
|
|
|
| 292 |
return { x: x + 150, y: y + 150 };
|
| 293 |
}
|
| 294 |
|
| 295 |
+
const currentModel = $derived(models.all.find(m => m.id === data.modelId));
|
| 296 |
+
const supportsImgUpload = $derived(currentModel?.pipeline_tag === PipelineTag.ImageTextToText);
|
| 297 |
+
|
| 298 |
const fileQueue = new AsyncQueue();
|
| 299 |
const fileUpload = new FileUpload({
|
| 300 |
accept: "image/*",
|
| 301 |
multiple: true,
|
| 302 |
+
disabled: () => !supportsImgUpload,
|
| 303 |
async onAccept(file) {
|
| 304 |
fileQueue.add(async () => {
|
| 305 |
const key = await images.upload(file);
|
|
|
|
| 312 |
if (fileQueue.queue.length <= 1) fileUpload.clear();
|
| 313 |
});
|
| 314 |
},
|
|
|
|
| 315 |
});
|
| 316 |
</script>
|
| 317 |
|
|
|
|
| 358 |
<div class="flex items-center gap-2">
|
| 359 |
<button
|
| 360 |
type="button"
|
| 361 |
+
class="flex items-center gap-1.5 rounded-md px-2 py-1 text-xs text-gray-600 transition-colors"
|
| 362 |
+
class:opacity-50={!supportsImgUpload}
|
| 363 |
+
class:cursor-not-allowed={!supportsImgUpload}
|
| 364 |
+
class:hover:bg-gray-200={supportsImgUpload}
|
| 365 |
+
class:hover:text-gray-900={supportsImgUpload}
|
| 366 |
+
title={supportsImgUpload ? "Attach file" : "Model doesn't support images"}
|
| 367 |
{...fileUpload.trigger}
|
| 368 |
>
|
| 369 |
<IconAttachment class="h-4 w-4" />
|