Spaces:
Running
Running
New model dropped
Browse files- app/api/ask/route.ts +39 -18
- assets/deepseek.svg +1 -0
- assets/kimi.svg +1 -0
- assets/qwen.svg +1 -0
- components/editor/ask-ai/fake-ask.tsx +75 -0
- components/editor/ask-ai/index.tsx +12 -13
- components/editor/ask-ai/loading.tsx +11 -2
- components/editor/ask-ai/settings.tsx +10 -1
- components/my-projects/index.tsx +0 -1
- components/not-logged/not-logged.tsx +2 -4
- lib/max-tokens.ts +95 -0
- lib/providers.ts +18 -9
- lib/templates.ts +0 -0
app/api/ask/route.ts
CHANGED
|
@@ -18,6 +18,7 @@ import {
|
|
| 18 |
UPDATE_PAGE_END,
|
| 19 |
PROMPT_FOR_PROJECT_NAME,
|
| 20 |
} from "@/lib/prompts";
|
|
|
|
| 21 |
import MY_TOKEN_KEY from "@/lib/get-cookie-name";
|
| 22 |
import { Page } from "@/types";
|
| 23 |
import { createRepo, RepoDesignation, uploadFiles } from "@huggingface/hub";
|
|
@@ -25,6 +26,7 @@ import { isAuthenticated } from "@/lib/auth";
|
|
| 25 |
import { getBestProvider } from "@/lib/best-provider";
|
| 26 |
import { rewritePrompt } from "@/lib/rewrite-prompt";
|
| 27 |
import { COLORS } from "@/lib/utils";
|
|
|
|
| 28 |
|
| 29 |
const ipAddresses = new Map();
|
| 30 |
|
|
@@ -122,6 +124,21 @@ export async function POST(request: NextRequest) {
|
|
| 122 |
// let completeResponse = "";
|
| 123 |
try {
|
| 124 |
const client = new InferenceClient(token);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
const chatCompletion = client.chatCompletionStream(
|
| 126 |
{
|
| 127 |
model: selectedModel.value,
|
|
@@ -129,14 +146,14 @@ export async function POST(request: NextRequest) {
|
|
| 129 |
messages: [
|
| 130 |
{
|
| 131 |
role: "system",
|
| 132 |
-
content:
|
| 133 |
},
|
| 134 |
{
|
| 135 |
role: "user",
|
| 136 |
-
content:
|
| 137 |
},
|
| 138 |
],
|
| 139 |
-
|
| 140 |
},
|
| 141 |
billTo ? { billTo } : {}
|
| 142 |
);
|
|
@@ -297,6 +314,21 @@ export async function PUT(request: NextRequest) {
|
|
| 297 |
const selectedProvider = await getBestProvider(selectedModel.value, provider)
|
| 298 |
|
| 299 |
try {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
const response = await client.chatCompletion(
|
| 301 |
{
|
| 302 |
model: selectedModel.value,
|
|
@@ -304,33 +336,22 @@ export async function PUT(request: NextRequest) {
|
|
| 304 |
messages: [
|
| 305 |
{
|
| 306 |
role: "system",
|
| 307 |
-
content:
|
| 308 |
},
|
| 309 |
{
|
| 310 |
role: "user",
|
| 311 |
-
content:
|
| 312 |
-
? `Also here are the previous prompts:\n\n${previousPrompts.map((p: string) => `- ${p}`).join("\n")}`
|
| 313 |
-
: "You are modifying the HTML file based on the user's request.",
|
| 314 |
},
|
| 315 |
{
|
| 316 |
role: "assistant",
|
| 317 |
-
|
| 318 |
-
content: `${
|
| 319 |
-
selectedElementHtml
|
| 320 |
-
? `\n\nYou have to update ONLY the following element, NOTHING ELSE: \n\n\`\`\`html\n${selectedElementHtml}\n\`\`\` Could be in multiple pages, if so, update all the pages.`
|
| 321 |
-
: ""
|
| 322 |
-
}. Current pages: ${pages?.map((p: Page) => `- ${p.path} \n${p.html}`).join("\n")}. ${files?.length > 0 ? `Current images: ${files?.map((f: string) => `- ${f}`).join("\n")}.` : ""}`,
|
| 323 |
},
|
| 324 |
{
|
| 325 |
role: "user",
|
| 326 |
content: prompt,
|
| 327 |
},
|
| 328 |
],
|
| 329 |
-
...
|
| 330 |
-
? {
|
| 331 |
-
max_tokens: 65_536,
|
| 332 |
-
}
|
| 333 |
-
: {}),
|
| 334 |
},
|
| 335 |
billTo ? { billTo } : {}
|
| 336 |
);
|
|
|
|
| 18 |
UPDATE_PAGE_END,
|
| 19 |
PROMPT_FOR_PROJECT_NAME,
|
| 20 |
} from "@/lib/prompts";
|
| 21 |
+
import { calculateMaxTokens, estimateInputTokens, getProviderSpecificConfig } from "@/lib/max-tokens";
|
| 22 |
import MY_TOKEN_KEY from "@/lib/get-cookie-name";
|
| 23 |
import { Page } from "@/types";
|
| 24 |
import { createRepo, RepoDesignation, uploadFiles } from "@huggingface/hub";
|
|
|
|
| 26 |
import { getBestProvider } from "@/lib/best-provider";
|
| 27 |
import { rewritePrompt } from "@/lib/rewrite-prompt";
|
| 28 |
import { COLORS } from "@/lib/utils";
|
| 29 |
+
import { templates } from "@/lib/templates";
|
| 30 |
|
| 31 |
const ipAddresses = new Map();
|
| 32 |
|
|
|
|
| 124 |
// let completeResponse = "";
|
| 125 |
try {
|
| 126 |
const client = new InferenceClient(token);
|
| 127 |
+
|
| 128 |
+
// Calculate dynamic max_tokens based on provider and input size
|
| 129 |
+
const systemPrompt = INITIAL_SYSTEM_PROMPT + (enhancedSettings.isActive ? `
|
| 130 |
+
Here are some examples of designs that you can inspire from:
|
| 131 |
+
${templates.map((template) => `- ${template}`).join("\n")}
|
| 132 |
+
IMPORTANT: Use the templates as inspiration, but do not copy them exactly.
|
| 133 |
+
Try to create a unique design, based on the templates, but not exactly like them, mostly depending on the user's prompt. These are just examples, do not copy them exactly.
|
| 134 |
+
` : "");
|
| 135 |
+
|
| 136 |
+
const userPrompt = `${rewrittenPrompt}${redesignMarkdown ? `\n\nHere is my current design as a markdown:\n\n${redesignMarkdown}\n\nNow, please create a new design based on this markdown. Use the images in the markdown.` : ""} : ""}`;
|
| 137 |
+
|
| 138 |
+
const estimatedInputTokens = estimateInputTokens(systemPrompt, userPrompt);
|
| 139 |
+
const dynamicMaxTokens = calculateMaxTokens(selectedProvider, estimatedInputTokens, true);
|
| 140 |
+
const providerConfig = getProviderSpecificConfig(selectedProvider, dynamicMaxTokens);
|
| 141 |
+
|
| 142 |
const chatCompletion = client.chatCompletionStream(
|
| 143 |
{
|
| 144 |
model: selectedModel.value,
|
|
|
|
| 146 |
messages: [
|
| 147 |
{
|
| 148 |
role: "system",
|
| 149 |
+
content: systemPrompt,
|
| 150 |
},
|
| 151 |
{
|
| 152 |
role: "user",
|
| 153 |
+
content: userPrompt
|
| 154 |
},
|
| 155 |
],
|
| 156 |
+
...providerConfig,
|
| 157 |
},
|
| 158 |
billTo ? { billTo } : {}
|
| 159 |
);
|
|
|
|
| 314 |
const selectedProvider = await getBestProvider(selectedModel.value, provider)
|
| 315 |
|
| 316 |
try {
|
| 317 |
+
// Calculate dynamic max_tokens for PUT request
|
| 318 |
+
const systemPrompt = FOLLOW_UP_SYSTEM_PROMPT + (isNew ? PROMPT_FOR_PROJECT_NAME : "");
|
| 319 |
+
const userContext = previousPrompts
|
| 320 |
+
? `Also here are the previous prompts:\n\n${previousPrompts.map((p: string) => `- ${p}`).join("\n")}`
|
| 321 |
+
: "You are modifying the HTML file based on the user's request.";
|
| 322 |
+
const assistantContext = `${
|
| 323 |
+
selectedElementHtml
|
| 324 |
+
? `\n\nYou have to update ONLY the following element, NOTHING ELSE: \n\n\`\`\`html\n${selectedElementHtml}\n\`\`\` Could be in multiple pages, if so, update all the pages.`
|
| 325 |
+
: ""
|
| 326 |
+
}. Current pages: ${pages?.map((p: Page) => `- ${p.path} \n${p.html}`).join("\n")}. ${files?.length > 0 ? `Current images: ${files?.map((f: string) => `- ${f}`).join("\n")}.` : ""}`;
|
| 327 |
+
|
| 328 |
+
const estimatedInputTokens = estimateInputTokens(systemPrompt, prompt, userContext + assistantContext);
|
| 329 |
+
const dynamicMaxTokens = calculateMaxTokens(selectedProvider, estimatedInputTokens, false);
|
| 330 |
+
const providerConfig = getProviderSpecificConfig(selectedProvider, dynamicMaxTokens);
|
| 331 |
+
|
| 332 |
const response = await client.chatCompletion(
|
| 333 |
{
|
| 334 |
model: selectedModel.value,
|
|
|
|
| 336 |
messages: [
|
| 337 |
{
|
| 338 |
role: "system",
|
| 339 |
+
content: systemPrompt,
|
| 340 |
},
|
| 341 |
{
|
| 342 |
role: "user",
|
| 343 |
+
content: userContext,
|
|
|
|
|
|
|
| 344 |
},
|
| 345 |
{
|
| 346 |
role: "assistant",
|
| 347 |
+
content: assistantContext,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 348 |
},
|
| 349 |
{
|
| 350 |
role: "user",
|
| 351 |
content: prompt,
|
| 352 |
},
|
| 353 |
],
|
| 354 |
+
...providerConfig,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 355 |
},
|
| 356 |
billTo ? { billTo } : {}
|
| 357 |
);
|
assets/deepseek.svg
ADDED
|
|
assets/kimi.svg
ADDED
|
|
assets/qwen.svg
ADDED
|
|
components/editor/ask-ai/fake-ask.tsx
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { useState } from "react";
|
| 2 |
+
import { useLocalStorage } from "react-use";
|
| 3 |
+
import { ArrowUp } from "lucide-react";
|
| 4 |
+
import { useRouter } from "next/navigation";
|
| 5 |
+
|
| 6 |
+
import { Button } from "@/components/ui/button";
|
| 7 |
+
import { useLoginModal } from "@/components/contexts/login-context";
|
| 8 |
+
import { PromptBuilder } from "./prompt-builder";
|
| 9 |
+
import { EnhancedSettings } from "@/types";
|
| 10 |
+
import { Settings } from "./settings";
|
| 11 |
+
|
| 12 |
+
export const FakeAskAi = () => {
|
| 13 |
+
const router = useRouter();
|
| 14 |
+
const [prompt, setPrompt] = useState("");
|
| 15 |
+
const [openProvider, setOpenProvider] = useState(false);
|
| 16 |
+
const [enhancedSettings, setEnhancedSettings, removeEnhancedSettings] =
|
| 17 |
+
useLocalStorage<EnhancedSettings>("deepsite-enhancedSettings", {
|
| 18 |
+
isActive: true,
|
| 19 |
+
primaryColor: undefined,
|
| 20 |
+
secondaryColor: undefined,
|
| 21 |
+
theme: undefined,
|
| 22 |
+
});
|
| 23 |
+
const [, setPromptStorage] = useLocalStorage("prompt", "");
|
| 24 |
+
|
| 25 |
+
const callAi = async () => {
|
| 26 |
+
setPromptStorage(prompt);
|
| 27 |
+
router.push("/projects/new");
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
// todo redirect to login + set prompt in storage, then redirect to projects/new + set the prompt in the state
|
| 31 |
+
|
| 32 |
+
return (
|
| 33 |
+
<div className="p-3 w-full max-w-xl mx-auto">
|
| 34 |
+
<div className="relative bg-neutral-800 border border-neutral-700 rounded-2xl ring-[4px] focus-within:ring-neutral-500/30 focus-within:border-neutral-600 ring-transparent z-20 w-full group">
|
| 35 |
+
<div className="w-full relative flex items-center justify-between">
|
| 36 |
+
<textarea
|
| 37 |
+
className="w-full bg-transparent text-sm outline-none text-white placeholder:text-neutral-400 p-4 resize-none"
|
| 38 |
+
placeholder="Ask DeepSite anything..."
|
| 39 |
+
value={prompt}
|
| 40 |
+
onChange={(e) => setPrompt(e.target.value)}
|
| 41 |
+
onKeyDown={(e) => {
|
| 42 |
+
if (e.key === "Enter" && !e.shiftKey) {
|
| 43 |
+
callAi();
|
| 44 |
+
}
|
| 45 |
+
}}
|
| 46 |
+
/>
|
| 47 |
+
</div>
|
| 48 |
+
<div className="flex items-center justify-between gap-2 px-4 pb-3 mt-2">
|
| 49 |
+
<div className="flex-1 flex items-center justify-start gap-1.5 flex-wrap">
|
| 50 |
+
<PromptBuilder
|
| 51 |
+
enhancedSettings={enhancedSettings!}
|
| 52 |
+
setEnhancedSettings={setEnhancedSettings}
|
| 53 |
+
/>
|
| 54 |
+
<Settings
|
| 55 |
+
open={openProvider}
|
| 56 |
+
isFollowUp={false}
|
| 57 |
+
error=""
|
| 58 |
+
onClose={setOpenProvider}
|
| 59 |
+
/>
|
| 60 |
+
</div>
|
| 61 |
+
<div className="flex items-center justify-end gap-2">
|
| 62 |
+
<Button
|
| 63 |
+
size="iconXs"
|
| 64 |
+
variant="outline"
|
| 65 |
+
className="!rounded-md"
|
| 66 |
+
onClick={() => callAi()}
|
| 67 |
+
>
|
| 68 |
+
<ArrowUp className="size-4" />
|
| 69 |
+
</Button>
|
| 70 |
+
</div>
|
| 71 |
+
</div>
|
| 72 |
+
</div>
|
| 73 |
+
</div>
|
| 74 |
+
);
|
| 75 |
+
};
|
components/editor/ask-ai/index.tsx
CHANGED
|
@@ -1,20 +1,11 @@
|
|
| 1 |
-
import {
|
| 2 |
import classNames from "classnames";
|
| 3 |
-
import {
|
| 4 |
-
|
| 5 |
-
ChevronDown,
|
| 6 |
-
CircleStop,
|
| 7 |
-
Pause,
|
| 8 |
-
Plus,
|
| 9 |
-
Square,
|
| 10 |
-
StopCircle,
|
| 11 |
-
} from "lucide-react";
|
| 12 |
-
import { useLocalStorage, useUpdateEffect } from "react-use";
|
| 13 |
import { toast } from "sonner";
|
| 14 |
|
| 15 |
import { useAi } from "@/hooks/useAi";
|
| 16 |
import { useEditor } from "@/hooks/useEditor";
|
| 17 |
-
import { isTheSameHtml } from "@/lib/compare-html-diff";
|
| 18 |
import { EnhancedSettings, Project } from "@/types";
|
| 19 |
import { SelectedFiles } from "@/components/editor/ask-ai/selected-files";
|
| 20 |
import { SelectedHtmlElement } from "@/components/editor/ask-ai/selected-html-element";
|
|
@@ -28,7 +19,6 @@ import { useUser } from "@/hooks/useUser";
|
|
| 28 |
import { useLoginModal } from "@/components/contexts/login-context";
|
| 29 |
import { Settings } from "./settings";
|
| 30 |
import { useProModal } from "@/components/contexts/pro-context";
|
| 31 |
-
import { MODELS } from "@/lib/providers";
|
| 32 |
import { MAX_FREE_PROJECTS } from "@/lib/utils";
|
| 33 |
|
| 34 |
export const AskAi = ({
|
|
@@ -71,6 +61,7 @@ export const AskAi = ({
|
|
| 71 |
secondaryColor: undefined,
|
| 72 |
theme: undefined,
|
| 73 |
});
|
|
|
|
| 74 |
|
| 75 |
const [isFollowUp, setIsFollowUp] = useState(true);
|
| 76 |
const [prompt, setPrompt] = useState("");
|
|
@@ -83,8 +74,16 @@ export const AskAi = ({
|
|
| 83 |
setOpenThink(true);
|
| 84 |
};
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
const callAi = async (redesignMarkdown?: string) => {
|
| 87 |
if (!user) return openLoginModal();
|
|
|
|
| 88 |
if (!user.isPro && projects.length >= MAX_FREE_PROJECTS)
|
| 89 |
return openProModal([]);
|
| 90 |
if (isAiWorking) return;
|
|
|
|
| 1 |
+
import { useRef, useState } from "react";
|
| 2 |
import classNames from "classnames";
|
| 3 |
+
import { ArrowUp, ChevronDown, CircleStop } from "lucide-react";
|
| 4 |
+
import { useLocalStorage, useUpdateEffect, useMount } from "react-use";
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import { toast } from "sonner";
|
| 6 |
|
| 7 |
import { useAi } from "@/hooks/useAi";
|
| 8 |
import { useEditor } from "@/hooks/useEditor";
|
|
|
|
| 9 |
import { EnhancedSettings, Project } from "@/types";
|
| 10 |
import { SelectedFiles } from "@/components/editor/ask-ai/selected-files";
|
| 11 |
import { SelectedHtmlElement } from "@/components/editor/ask-ai/selected-html-element";
|
|
|
|
| 19 |
import { useLoginModal } from "@/components/contexts/login-context";
|
| 20 |
import { Settings } from "./settings";
|
| 21 |
import { useProModal } from "@/components/contexts/pro-context";
|
|
|
|
| 22 |
import { MAX_FREE_PROJECTS } from "@/lib/utils";
|
| 23 |
|
| 24 |
export const AskAi = ({
|
|
|
|
| 61 |
secondaryColor: undefined,
|
| 62 |
theme: undefined,
|
| 63 |
});
|
| 64 |
+
const [promptStorage, , removePromptStorage] = useLocalStorage("prompt", "");
|
| 65 |
|
| 66 |
const [isFollowUp, setIsFollowUp] = useState(true);
|
| 67 |
const [prompt, setPrompt] = useState("");
|
|
|
|
| 74 |
setOpenThink(true);
|
| 75 |
};
|
| 76 |
|
| 77 |
+
useMount(() => {
|
| 78 |
+
if (promptStorage && promptStorage.trim() !== "") {
|
| 79 |
+
setPrompt(promptStorage);
|
| 80 |
+
callAi();
|
| 81 |
+
}
|
| 82 |
+
});
|
| 83 |
+
|
| 84 |
const callAi = async (redesignMarkdown?: string) => {
|
| 85 |
if (!user) return openLoginModal();
|
| 86 |
+
removePromptStorage();
|
| 87 |
if (!user.isPro && projects.length >= MAX_FREE_PROJECTS)
|
| 88 |
return openProModal([]);
|
| 89 |
if (isAiWorking) return;
|
components/editor/ask-ai/loading.tsx
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
|
|
| 1 |
import Loading from "@/components/loading";
|
| 2 |
-
import { useState } from "react";
|
| 3 |
import { useInterval } from "react-use";
|
| 4 |
|
| 5 |
const TEXTS = [
|
|
@@ -23,8 +24,16 @@ export const AiLoading = ({
|
|
| 23 |
className?: string;
|
| 24 |
}) => {
|
| 25 |
const [selectedText, setSelectedText] = useState(
|
| 26 |
-
text ?? TEXTS[
|
| 27 |
);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
useInterval(() => {
|
| 29 |
if (!text) {
|
| 30 |
if (selectedText === TEXTS[TEXTS.length - 1]) {
|
|
|
|
| 1 |
+
"use client";
|
| 2 |
import Loading from "@/components/loading";
|
| 3 |
+
import { useState, useEffect } from "react";
|
| 4 |
import { useInterval } from "react-use";
|
| 5 |
|
| 6 |
const TEXTS = [
|
|
|
|
| 24 |
className?: string;
|
| 25 |
}) => {
|
| 26 |
const [selectedText, setSelectedText] = useState(
|
| 27 |
+
text ?? TEXTS[0] // Start with first text to avoid hydration issues
|
| 28 |
);
|
| 29 |
+
|
| 30 |
+
// Set random text on client-side only to avoid hydration mismatch
|
| 31 |
+
useEffect(() => {
|
| 32 |
+
if (!text) {
|
| 33 |
+
setSelectedText(TEXTS[Math.floor(Math.random() * TEXTS.length)]);
|
| 34 |
+
}
|
| 35 |
+
}, [text]);
|
| 36 |
+
|
| 37 |
useInterval(() => {
|
| 38 |
if (!text) {
|
| 39 |
if (selectedText === TEXTS[TEXTS.length - 1]) {
|
components/editor/ask-ai/settings.tsx
CHANGED
|
@@ -76,7 +76,16 @@ export function Settings({
|
|
| 76 |
disabled={globalAiLoading}
|
| 77 |
size="xs"
|
| 78 |
>
|
| 79 |
-
<Brain className="size-3.5" />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
<span className="truncate max-w-[120px]">
|
| 81 |
{isMounted
|
| 82 |
? selectedModel?.label?.split(" ").join("-").toLowerCase()
|
|
|
|
| 76 |
disabled={globalAiLoading}
|
| 77 |
size="xs"
|
| 78 |
>
|
| 79 |
+
{/* <Brain className="size-3.5" /> */}
|
| 80 |
+
{selectedModel?.logo && (
|
| 81 |
+
<Image
|
| 82 |
+
src={selectedModel?.logo}
|
| 83 |
+
alt={selectedModel.label}
|
| 84 |
+
className={`size-3.5 ${open ? "" : "filter invert"}`}
|
| 85 |
+
width={20}
|
| 86 |
+
height={20}
|
| 87 |
+
/>
|
| 88 |
+
)}
|
| 89 |
<span className="truncate max-w-[120px]">
|
| 90 |
{isMounted
|
| 91 |
? selectedModel?.label?.split(" ").join("-").toLowerCase()
|
components/my-projects/index.tsx
CHANGED
|
@@ -13,7 +13,6 @@ import { Button } from "@/components/ui/button";
|
|
| 13 |
import { useProModal } from "@/components/contexts/pro-context";
|
| 14 |
import { api } from "@/lib/api";
|
| 15 |
import { NotLogged } from "../not-logged/not-logged";
|
| 16 |
-
import { LoadProject } from "./load-project";
|
| 17 |
|
| 18 |
export function MyProjects() {
|
| 19 |
const { user, projects, setProjects } = useUser();
|
|
|
|
| 13 |
import { useProModal } from "@/components/contexts/pro-context";
|
| 14 |
import { api } from "@/lib/api";
|
| 15 |
import { NotLogged } from "../not-logged/not-logged";
|
|
|
|
| 16 |
|
| 17 |
export function MyProjects() {
|
| 18 |
const { user, projects, setProjects } = useUser();
|
components/not-logged/not-logged.tsx
CHANGED
|
@@ -4,6 +4,7 @@ import { useUser } from "@/hooks/useUser";
|
|
| 4 |
import { Button } from "@/components/ui/button";
|
| 5 |
import { AnimatedBlobs } from "../animated-blobs";
|
| 6 |
import { AnimatedText } from "../animated-text";
|
|
|
|
| 7 |
|
| 8 |
export const NotLogged = () => {
|
| 9 |
const { openLoginWindow } = useUser();
|
|
@@ -21,10 +22,7 @@ export const NotLogged = () => {
|
|
| 21 |
Access the most simple and powerful AI Vibe Code Editor to create your
|
| 22 |
next project.
|
| 23 |
</p>
|
| 24 |
-
<
|
| 25 |
-
Log In to Continue
|
| 26 |
-
</Button>
|
| 27 |
-
<div className="mt-14 max-w-2xl w-full mx-auto">{/* <AskAi /> */}</div>
|
| 28 |
<AnimatedBlobs />
|
| 29 |
</header>
|
| 30 |
<div id="features" className="min-h-screen py-20 px-6 relative">
|
|
|
|
| 4 |
import { Button } from "@/components/ui/button";
|
| 5 |
import { AnimatedBlobs } from "../animated-blobs";
|
| 6 |
import { AnimatedText } from "../animated-text";
|
| 7 |
+
import { FakeAskAi } from "../editor/ask-ai/fake-ask";
|
| 8 |
|
| 9 |
export const NotLogged = () => {
|
| 10 |
const { openLoginWindow } = useUser();
|
|
|
|
| 22 |
Access the most simple and powerful AI Vibe Code Editor to create your
|
| 23 |
next project.
|
| 24 |
</p>
|
| 25 |
+
<FakeAskAi />
|
|
|
|
|
|
|
|
|
|
| 26 |
<AnimatedBlobs />
|
| 27 |
</header>
|
| 28 |
<div id="features" className="min-h-screen py-20 px-6 relative">
|
lib/max-tokens.ts
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Calculate optimal max_tokens based on provider capabilities and input size
|
| 3 |
+
*
|
| 4 |
+
* @param selectedProvider - The selected provider object from getBestProvider
|
| 5 |
+
* @param inputTokens - Estimated input tokens (prompt + system message + context)
|
| 6 |
+
* @param isStreaming - Whether this is a streaming request (affects buffer)
|
| 7 |
+
* @returns Optimal max_tokens value
|
| 8 |
+
*/
|
| 9 |
+
export function calculateMaxTokens(
|
| 10 |
+
selectedProvider: any,
|
| 11 |
+
inputTokens: number = 0,
|
| 12 |
+
isStreaming: boolean = false
|
| 13 |
+
): number {
|
| 14 |
+
if (!selectedProvider?.context_length) {
|
| 15 |
+
// Fallback for unknown providers - use conservative default
|
| 16 |
+
return 4096;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
const contextLength = selectedProvider.context_length;
|
| 20 |
+
|
| 21 |
+
// Reserve buffer for safety and potential tokenization differences
|
| 22 |
+
const safetyBuffer = isStreaming ? 1000 : 500;
|
| 23 |
+
|
| 24 |
+
// Calculate available tokens for output
|
| 25 |
+
const availableTokens = contextLength - inputTokens - safetyBuffer;
|
| 26 |
+
|
| 27 |
+
// Define reasonable max output limits based on use case
|
| 28 |
+
const useCase = {
|
| 29 |
+
// For HTML generation, we typically need substantial output
|
| 30 |
+
htmlGeneration: Math.min(32_000, availableTokens),
|
| 31 |
+
// For code editing, moderate output is usually sufficient
|
| 32 |
+
codeEditing: Math.min(16_000, availableTokens),
|
| 33 |
+
// Conservative fallback
|
| 34 |
+
default: Math.min(8_000, availableTokens)
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
// Choose based on available tokens and provider capabilities
|
| 38 |
+
let targetTokens: number;
|
| 39 |
+
|
| 40 |
+
if (availableTokens >= 32_000) {
|
| 41 |
+
targetTokens = useCase.htmlGeneration;
|
| 42 |
+
} else if (availableTokens >= 16_000) {
|
| 43 |
+
targetTokens = useCase.codeEditing;
|
| 44 |
+
} else {
|
| 45 |
+
targetTokens = useCase.default;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
// Ensure we don't go below minimum viable output
|
| 49 |
+
const minimumViableOutput = 2048;
|
| 50 |
+
if (targetTokens < minimumViableOutput) {
|
| 51 |
+
// If we can't provide minimum viable output, try with minimal buffer
|
| 52 |
+
const minimalBuffer = 200;
|
| 53 |
+
targetTokens = Math.max(
|
| 54 |
+
minimumViableOutput,
|
| 55 |
+
contextLength - inputTokens - minimalBuffer
|
| 56 |
+
);
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
// Final safety check - never exceed context length
|
| 60 |
+
return Math.min(targetTokens, contextLength - inputTokens - 100);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
/**
|
| 64 |
+
* Estimate input tokens for a request (rough estimation)
|
| 65 |
+
*
|
| 66 |
+
* @param systemPrompt - System prompt content
|
| 67 |
+
* @param userPrompt - User prompt content
|
| 68 |
+
* @param additionalContext - Additional context (templates, pages, etc.)
|
| 69 |
+
* @returns Estimated token count
|
| 70 |
+
*/
|
| 71 |
+
export function estimateInputTokens(
|
| 72 |
+
systemPrompt: string = "",
|
| 73 |
+
userPrompt: string = "",
|
| 74 |
+
additionalContext: string = ""
|
| 75 |
+
): number {
|
| 76 |
+
// Rough estimation: ~4 characters per token for English text
|
| 77 |
+
// This is conservative - actual tokenization may vary
|
| 78 |
+
const totalChars = systemPrompt.length + userPrompt.length + additionalContext.length;
|
| 79 |
+
return Math.ceil(totalChars / 3.5); // Slightly more conservative than 4 chars/token
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
/**
|
| 83 |
+
* Get max_tokens configuration for specific providers with special handling
|
| 84 |
+
*/
|
| 85 |
+
export function getProviderSpecificConfig(selectedProvider: any, baseMaxTokens: number) {
|
| 86 |
+
const providerName = selectedProvider?.provider;
|
| 87 |
+
|
| 88 |
+
switch (providerName) {
|
| 89 |
+
case "sambanova":
|
| 90 |
+
// SambaNova has specific limitations - don't set max_tokens
|
| 91 |
+
return {};
|
| 92 |
+
default:
|
| 93 |
+
return { max_tokens: baseMaxTokens };
|
| 94 |
+
}
|
| 95 |
+
}
|
lib/providers.ts
CHANGED
|
@@ -1,37 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
export const PROVIDERS = {
|
| 2 |
"fireworks-ai": {
|
| 3 |
name: "Fireworks AI",
|
| 4 |
-
max_tokens: 131_000,
|
| 5 |
id: "fireworks-ai",
|
| 6 |
},
|
| 7 |
nebius: {
|
| 8 |
name: "Nebius AI Studio",
|
| 9 |
-
max_tokens: 131_000,
|
| 10 |
id: "nebius",
|
| 11 |
},
|
| 12 |
sambanova: {
|
| 13 |
name: "SambaNova",
|
| 14 |
-
max_tokens: 32_000,
|
| 15 |
id: "sambanova",
|
| 16 |
},
|
| 17 |
novita: {
|
| 18 |
name: "NovitaAI",
|
| 19 |
-
max_tokens: 16_000,
|
| 20 |
id: "novita",
|
| 21 |
},
|
| 22 |
hyperbolic: {
|
| 23 |
name: "Hyperbolic",
|
| 24 |
-
max_tokens: 131_000,
|
| 25 |
id: "hyperbolic",
|
| 26 |
},
|
| 27 |
together: {
|
| 28 |
name: "Together AI",
|
| 29 |
-
max_tokens: 128_000,
|
| 30 |
id: "together",
|
| 31 |
},
|
| 32 |
groq: {
|
| 33 |
name: "Groq",
|
| 34 |
-
max_tokens: 16_384,
|
| 35 |
id: "groq",
|
| 36 |
},
|
| 37 |
};
|
|
@@ -42,6 +39,7 @@ export const MODELS = [
|
|
| 42 |
label: "DeepSeek V3 O324",
|
| 43 |
providers: ["fireworks-ai", "nebius", "sambanova", "novita", "hyperbolic"],
|
| 44 |
autoProvider: "novita",
|
|
|
|
| 45 |
},
|
| 46 |
// {
|
| 47 |
// value: "deepseek-ai/DeepSeek-R1-0528",
|
|
@@ -62,31 +60,42 @@ export const MODELS = [
|
|
| 62 |
label: "Qwen3 Coder 480B A35B Instruct",
|
| 63 |
providers: ["novita", "hyperbolic"],
|
| 64 |
autoProvider: "novita",
|
|
|
|
| 65 |
},
|
| 66 |
{
|
| 67 |
value: "moonshotai/Kimi-K2-Instruct",
|
| 68 |
label: "Kimi K2 Instruct",
|
| 69 |
providers: ["together", "novita", "groq"],
|
| 70 |
autoProvider: "groq",
|
|
|
|
| 71 |
},
|
| 72 |
{
|
| 73 |
value: "deepseek-ai/DeepSeek-V3.1",
|
| 74 |
label: "DeepSeek V3.1",
|
| 75 |
providers: ["fireworks-ai", "novita"],
|
| 76 |
autoProvider: "fireworks-ai",
|
|
|
|
| 77 |
},
|
| 78 |
{
|
| 79 |
value: "moonshotai/Kimi-K2-Instruct-0905",
|
| 80 |
label: "Kimi K2 Instruct 0905",
|
| 81 |
providers: ["together", "groq", "novita"],
|
| 82 |
-
|
| 83 |
-
|
| 84 |
},
|
| 85 |
{
|
| 86 |
value: "deepseek-ai/DeepSeek-V3.1-Terminus",
|
| 87 |
label: "DeepSeek V3.1 Terminus",
|
| 88 |
providers: ["novita"],
|
| 89 |
autoProvider: "novita",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
isNew: true,
|
| 91 |
}
|
| 92 |
];
|
|
|
|
| 1 |
+
import DeepSeekLogo from "@/assets/deepseek.svg";
|
| 2 |
+
import QwenLogo from "@/assets/qwen.svg";
|
| 3 |
+
import KimiLogo from "@/assets/kimi.svg";
|
| 4 |
+
|
| 5 |
export const PROVIDERS = {
|
| 6 |
"fireworks-ai": {
|
| 7 |
name: "Fireworks AI",
|
|
|
|
| 8 |
id: "fireworks-ai",
|
| 9 |
},
|
| 10 |
nebius: {
|
| 11 |
name: "Nebius AI Studio",
|
|
|
|
| 12 |
id: "nebius",
|
| 13 |
},
|
| 14 |
sambanova: {
|
| 15 |
name: "SambaNova",
|
|
|
|
| 16 |
id: "sambanova",
|
| 17 |
},
|
| 18 |
novita: {
|
| 19 |
name: "NovitaAI",
|
|
|
|
| 20 |
id: "novita",
|
| 21 |
},
|
| 22 |
hyperbolic: {
|
| 23 |
name: "Hyperbolic",
|
|
|
|
| 24 |
id: "hyperbolic",
|
| 25 |
},
|
| 26 |
together: {
|
| 27 |
name: "Together AI",
|
|
|
|
| 28 |
id: "together",
|
| 29 |
},
|
| 30 |
groq: {
|
| 31 |
name: "Groq",
|
|
|
|
| 32 |
id: "groq",
|
| 33 |
},
|
| 34 |
};
|
|
|
|
| 39 |
label: "DeepSeek V3 O324",
|
| 40 |
providers: ["fireworks-ai", "nebius", "sambanova", "novita", "hyperbolic"],
|
| 41 |
autoProvider: "novita",
|
| 42 |
+
logo: DeepSeekLogo,
|
| 43 |
},
|
| 44 |
// {
|
| 45 |
// value: "deepseek-ai/DeepSeek-R1-0528",
|
|
|
|
| 60 |
label: "Qwen3 Coder 480B A35B Instruct",
|
| 61 |
providers: ["novita", "hyperbolic"],
|
| 62 |
autoProvider: "novita",
|
| 63 |
+
logo: QwenLogo,
|
| 64 |
},
|
| 65 |
{
|
| 66 |
value: "moonshotai/Kimi-K2-Instruct",
|
| 67 |
label: "Kimi K2 Instruct",
|
| 68 |
providers: ["together", "novita", "groq"],
|
| 69 |
autoProvider: "groq",
|
| 70 |
+
logo: KimiLogo,
|
| 71 |
},
|
| 72 |
{
|
| 73 |
value: "deepseek-ai/DeepSeek-V3.1",
|
| 74 |
label: "DeepSeek V3.1",
|
| 75 |
providers: ["fireworks-ai", "novita"],
|
| 76 |
autoProvider: "fireworks-ai",
|
| 77 |
+
logo: DeepSeekLogo,
|
| 78 |
},
|
| 79 |
{
|
| 80 |
value: "moonshotai/Kimi-K2-Instruct-0905",
|
| 81 |
label: "Kimi K2 Instruct 0905",
|
| 82 |
providers: ["together", "groq", "novita"],
|
| 83 |
+
autoProvider: "groq",
|
| 84 |
+
logo: KimiLogo,
|
| 85 |
},
|
| 86 |
{
|
| 87 |
value: "deepseek-ai/DeepSeek-V3.1-Terminus",
|
| 88 |
label: "DeepSeek V3.1 Terminus",
|
| 89 |
providers: ["novita"],
|
| 90 |
autoProvider: "novita",
|
| 91 |
+
logo: DeepSeekLogo,
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
value: "deepseek-ai/DeepSeek-V3.2-Exp",
|
| 95 |
+
label: "DeepSeek V3.2 Exp",
|
| 96 |
+
providers: ["novita"],
|
| 97 |
+
autoProvider: "novita",
|
| 98 |
+
logo: DeepSeekLogo,
|
| 99 |
isNew: true,
|
| 100 |
}
|
| 101 |
];
|
lib/templates.ts
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|