messiasads commited on
Commit
472b022
·
1 Parent(s): d62d36c

fix: settings for localhost calls

Browse files
app/api/ask-ai/route.ts CHANGED
@@ -3,7 +3,6 @@ import type { NextRequest } from "next/server";
3
  import { NextResponse } from "next/server";
4
  import OpenAI from "openai";
5
 
6
- import { MODELS } from "@/lib/providers";
7
  import {
8
  DIVIDER,
9
  FOLLOW_UP_SYSTEM_PROMPT,
@@ -14,7 +13,7 @@ import {
14
 
15
  export async function POST(request: NextRequest) {
16
  const body = await request.json();
17
- const { prompt, model, redesignMarkdown, html, apiKey, customModel, baseUrl } = body;
18
 
19
  const openai = new OpenAI({
20
  apiKey: apiKey || process.env.OPENAI_API_KEY || "",
@@ -28,23 +27,11 @@ export async function POST(request: NextRequest) {
28
  );
29
  }
30
 
31
- const selectedModel = MODELS.find(
32
- (m) => m.value === model || m.label === model
33
- );
34
- if (!selectedModel) {
35
- return NextResponse.json(
36
- { ok: false, error: "Invalid model selected" },
37
- { status: 400 }
38
- );
39
- }
40
-
41
  try {
42
- // Create a stream response
43
  const encoder = new TextEncoder();
44
  const stream = new TransformStream();
45
  const writer = stream.writable.getWriter();
46
 
47
- // Start the response
48
  const response = new NextResponse(stream.readable, {
49
  headers: {
50
  "Content-Type": "text/plain; charset=utf-8",
@@ -57,12 +44,10 @@ export async function POST(request: NextRequest) {
57
  let completeResponse = "";
58
  try {
59
  const chatCompletion = await openai.chat.completions.create({
60
- model: customModel || selectedModel.value,
 
61
  messages: [
62
- {
63
- role: "system",
64
- content: INITIAL_SYSTEM_PROMPT,
65
- },
66
  {
67
  role: "user",
68
  content: redesignMarkdown
@@ -72,16 +57,19 @@ export async function POST(request: NextRequest) {
72
  : prompt,
73
  },
74
  ],
75
- stream: true,
76
  });
77
 
78
  for await (const chunk of chatCompletion) {
79
  const content = chunk.choices[0]?.delta?.content || "";
80
- await writer.write(encoder.encode(content));
81
  completeResponse += content;
82
- if (completeResponse.includes("</html>")) {
83
- break;
84
- }
 
 
 
 
85
  }
86
  } catch (error: any) {
87
  await writer.write(
@@ -95,7 +83,7 @@ export async function POST(request: NextRequest) {
95
  )
96
  );
97
  } finally {
98
- await writer?.close();
99
  }
100
  })();
101
 
@@ -114,10 +102,18 @@ export async function POST(request: NextRequest) {
114
 
115
  export async function PUT(request: NextRequest) {
116
  const body = await request.json();
117
- const { prompt, html, previousPrompt, selectedElementHtml, apiKey, model, baseUrl, customModel } = body;
 
 
 
 
 
 
 
 
118
 
119
  const openai = new OpenAI({
120
- apiKey: apiKey || process.env.OPENAI_API_KEY,
121
  baseURL: baseUrl || process.env.OPENAI_BASE_URL,
122
  });
123
 
@@ -128,116 +124,95 @@ export async function PUT(request: NextRequest) {
128
  );
129
  }
130
 
131
- const selectedModel = MODELS.find(
132
- (m) => m.value === model || m.label === model
133
- );
134
- if (!selectedModel) {
135
- return NextResponse.json(
136
- { ok: false, error: "Invalid model selected" },
137
- { status: 400 }
138
- );
139
- }
140
-
141
  try {
142
  const response = await openai.chat.completions.create({
143
- model: customModel || selectedModel.value,
144
  messages: [
145
- {
146
- role: "system",
147
- content: FOLLOW_UP_SYSTEM_PROMPT,
148
- },
149
  {
150
  role: "user",
151
- content: previousPrompt
152
- ? previousPrompt
153
- : "You are modifying the HTML file based on the user's request.",
154
  },
155
  {
156
  role: "assistant",
157
- content: `The current code is: \n\`\`\`html\n${html}\n\`\`\` ${selectedElementHtml
158
- ? `\n\nYou have to update ONLY the following element, NOTHING ELSE: \n\n\`\`\`html\n${selectedElementHtml}\n\`\`\``
159
- : ""}
160
- `,
161
- },
162
- {
163
- role: "user",
164
- content: prompt,
165
  },
 
166
  ],
167
  });
168
 
169
- const chunk = response.choices[0]?.message?.content;
170
- if (!chunk) {
171
  return NextResponse.json(
172
- { ok: false, message: "No content returned from the model" },
173
  { status: 400 }
174
  );
175
  }
176
 
177
- if (chunk) {
178
- const updatedLines: number[][] = [];
179
- let newHtml = html;
180
- let position = 0;
181
- let moreBlocks = true;
182
-
183
- while (moreBlocks) {
184
- const searchStartIndex = chunk.indexOf(SEARCH_START, position);
185
- if (searchStartIndex === -1) {
186
- moreBlocks = false;
187
- continue;
188
- }
189
 
190
- const dividerIndex = chunk.indexOf(DIVIDER, searchStartIndex);
191
- if (dividerIndex === -1) {
192
- moreBlocks = false;
193
- continue;
194
- }
195
 
196
- const replaceEndIndex = chunk.indexOf(REPLACE_END, dividerIndex);
197
- if (replaceEndIndex === -1) {
198
- moreBlocks = false;
199
- continue;
200
- }
201
 
202
- const searchBlock = chunk.substring(
203
- searchStartIndex + SEARCH_START.length,
204
- dividerIndex
205
- );
206
- const replaceBlock = chunk.substring(
207
- dividerIndex + DIVIDER.length,
208
- replaceEndIndex
209
- );
210
 
211
- if (searchBlock.trim() === "") {
212
- newHtml = `${replaceBlock}\n${newHtml}`;
213
- updatedLines.push([1, replaceBlock.split("\n").length]);
214
- } else {
215
- const blockPosition = newHtml.indexOf(searchBlock);
216
- if (blockPosition !== -1) {
217
- const beforeText = newHtml.substring(0, blockPosition);
218
- const startLineNumber = beforeText.split("\n").length;
219
- const replaceLines = replaceBlock.split("\n").length;
220
- const endLineNumber = startLineNumber + replaceLines - 1;
221
-
222
- updatedLines.push([startLineNumber, endLineNumber]);
223
- newHtml = newHtml.replace(searchBlock, replaceBlock);
224
- }
225
  }
226
-
227
- position = replaceEndIndex + REPLACE_END.length;
228
  }
229
 
230
- return NextResponse.json({
231
- ok: true,
232
- html: newHtml,
233
- updatedLines,
234
- });
235
- } else {
236
- return NextResponse.json(
237
- { ok: false, message: "No content returned from the model" },
238
- { status: 400 }
239
- );
240
  }
 
 
 
 
 
 
241
  } catch (error: any) {
242
  return NextResponse.json(
243
  {
@@ -248,4 +223,4 @@ export async function PUT(request: NextRequest) {
248
  { status: 500 }
249
  );
250
  }
251
- }
 
3
  import { NextResponse } from "next/server";
4
  import OpenAI from "openai";
5
 
 
6
  import {
7
  DIVIDER,
8
  FOLLOW_UP_SYSTEM_PROMPT,
 
13
 
14
  export async function POST(request: NextRequest) {
15
  const body = await request.json();
16
+ const { prompt, model, redesignMarkdown, html, apiKey, baseUrl } = body;
17
 
18
  const openai = new OpenAI({
19
  apiKey: apiKey || process.env.OPENAI_API_KEY || "",
 
27
  );
28
  }
29
 
 
 
 
 
 
 
 
 
 
 
30
  try {
 
31
  const encoder = new TextEncoder();
32
  const stream = new TransformStream();
33
  const writer = stream.writable.getWriter();
34
 
 
35
  const response = new NextResponse(stream.readable, {
36
  headers: {
37
  "Content-Type": "text/plain; charset=utf-8",
 
44
  let completeResponse = "";
45
  try {
46
  const chatCompletion = await openai.chat.completions.create({
47
+ model,
48
+ stream: true,
49
  messages: [
50
+ { role: "system", content: INITIAL_SYSTEM_PROMPT },
 
 
 
51
  {
52
  role: "user",
53
  content: redesignMarkdown
 
57
  : prompt,
58
  },
59
  ],
 
60
  });
61
 
62
  for await (const chunk of chatCompletion) {
63
  const content = chunk.choices[0]?.delta?.content || "";
64
+ if (!content) continue;
65
  completeResponse += content;
66
+ await writer.write(encoder.encode(content));
67
+ }
68
+
69
+ if (!completeResponse.trim()) {
70
+ await writer.write(
71
+ encoder.encode("\n[ERROR] Model returned empty response.\n")
72
+ );
73
  }
74
  } catch (error: any) {
75
  await writer.write(
 
83
  )
84
  );
85
  } finally {
86
+ await writer.close();
87
  }
88
  })();
89
 
 
102
 
103
  export async function PUT(request: NextRequest) {
104
  const body = await request.json();
105
+ const {
106
+ prompt,
107
+ html,
108
+ previousPrompt,
109
+ selectedElementHtml,
110
+ apiKey,
111
+ model,
112
+ baseUrl,
113
+ } = body;
114
 
115
  const openai = new OpenAI({
116
+ apiKey: apiKey || process.env.OPENAI_API_KEY || "",
117
  baseURL: baseUrl || process.env.OPENAI_BASE_URL,
118
  });
119
 
 
124
  );
125
  }
126
 
 
 
 
 
 
 
 
 
 
 
127
  try {
128
  const response = await openai.chat.completions.create({
129
+ model,
130
  messages: [
131
+ { role: "system", content: FOLLOW_UP_SYSTEM_PROMPT },
 
 
 
132
  {
133
  role: "user",
134
+ content:
135
+ previousPrompt ||
136
+ "You are modifying the HTML file based on the user's request.",
137
  },
138
  {
139
  role: "assistant",
140
+ content: `The current code is: \n\`\`\`html\n${html}\n\`\`\` ${
141
+ selectedElementHtml
142
+ ? `\n\nYou have to update ONLY the following element, NOTHING ELSE: \n\n\`\`\`html\n${selectedElementHtml}\n\`\`\``
143
+ : ""
144
+ }`,
 
 
 
145
  },
146
+ { role: "user", content: prompt },
147
  ],
148
  });
149
 
150
+ const chunk = response.choices[0]?.message?.content || "";
151
+ if (!chunk.trim()) {
152
  return NextResponse.json(
153
+ { ok: false, message: "Model returned empty response" },
154
  { status: 400 }
155
  );
156
  }
157
 
158
+ // aplica os blocos de modificação SEARCH_START...DIVIDER...REPLACE_END
159
+ let newHtml = html;
160
+ const updatedLines: number[][] = [];
161
+ let position = 0;
162
+ let moreBlocks = true;
163
+
164
+ while (moreBlocks) {
165
+ const searchStartIndex = chunk.indexOf(SEARCH_START, position);
166
+ if (searchStartIndex === -1) {
167
+ moreBlocks = false;
168
+ continue;
169
+ }
170
 
171
+ const dividerIndex = chunk.indexOf(DIVIDER, searchStartIndex);
172
+ if (dividerIndex === -1) {
173
+ moreBlocks = false;
174
+ continue;
175
+ }
176
 
177
+ const replaceEndIndex = chunk.indexOf(REPLACE_END, dividerIndex);
178
+ if (replaceEndIndex === -1) {
179
+ moreBlocks = false;
180
+ continue;
181
+ }
182
 
183
+ const searchBlock = chunk.substring(
184
+ searchStartIndex + SEARCH_START.length,
185
+ dividerIndex
186
+ );
187
+ const replaceBlock = chunk.substring(
188
+ dividerIndex + DIVIDER.length,
189
+ replaceEndIndex
190
+ );
191
 
192
+ if (searchBlock.trim() === "") {
193
+ newHtml = `${replaceBlock}\n${newHtml}`;
194
+ updatedLines.push([1, replaceBlock.split("\n").length]);
195
+ } else {
196
+ const blockPosition = newHtml.indexOf(searchBlock);
197
+ if (blockPosition !== -1) {
198
+ const beforeText = newHtml.substring(0, blockPosition);
199
+ const startLineNumber = beforeText.split("\n").length;
200
+ const replaceLines = replaceBlock.split("\n").length;
201
+ const endLineNumber = startLineNumber + replaceLines - 1;
202
+
203
+ updatedLines.push([startLineNumber, endLineNumber]);
204
+ newHtml = newHtml.replace(searchBlock, replaceBlock);
 
205
  }
 
 
206
  }
207
 
208
+ position = replaceEndIndex + REPLACE_END.length;
 
 
 
 
 
 
 
 
 
209
  }
210
+
211
+ return NextResponse.json({
212
+ ok: true,
213
+ html: newHtml,
214
+ updatedLines,
215
+ });
216
  } catch (error: any) {
217
  return NextResponse.json(
218
  {
 
223
  { status: 500 }
224
  );
225
  }
226
+ }
components/editor/ask-ai/index.tsx CHANGED
@@ -54,12 +54,6 @@ export function AskAI({
54
  const [hasAsked, setHasAsked] = useState(false);
55
  const [previousPrompt, setPreviousPrompt] = useState("");
56
  const [provider, setProvider] = useLocalStorage("provider", "auto");
57
- const [model, setModel] = useLocalStorage(
58
- "model",
59
- typeof window !== "undefined"
60
- ? localStorage.getItem("openai_model") || MODELS[0].value
61
- : MODELS[0].value
62
- );
63
  const [openProvider, setOpenProvider] = useState(false);
64
  const [providerError, setProviderError] = useState("");
65
  const [think, setThink] = useState<string | undefined>(undefined);
@@ -68,6 +62,11 @@ export function AskAI({
68
  const [controller, setController] = useState<AbortController | null>(null);
69
  const [isFollowUp, setIsFollowUp] = useState(true);
70
 
 
 
 
 
 
71
  const callAi = async (redesignMarkdown?: string) => {
72
  if (isAiWorking) return;
73
  if (!redesignMarkdown && !prompt.trim()) return;
@@ -91,7 +90,7 @@ export function AskAI({
91
  : "";
92
  const apiKey = localStorage.getItem("openai_api_key");
93
  const baseUrl = localStorage.getItem("openai_base_url");
94
- const customModel = localStorage.getItem("openai_model");
95
  const request = await fetch("/api/ask-ai", {
96
  method: "PUT",
97
  body: JSON.stringify({
@@ -103,7 +102,6 @@ export function AskAI({
103
  selectedElementHtml,
104
  apiKey,
105
  baseUrl,
106
- customModel,
107
  }),
108
  headers: {
109
  "Content-Type": "application/json",
@@ -128,7 +126,7 @@ export function AskAI({
128
  } else {
129
  const apiKey = localStorage.getItem("openai_api_key");
130
  const baseUrl = localStorage.getItem("openai_base_url");
131
- const customModel = localStorage.getItem("openai_model");
132
  const request = await fetch("/api/ask-ai", {
133
  method: "POST",
134
  body: JSON.stringify({
@@ -139,7 +137,6 @@ export function AskAI({
139
  redesignMarkdown,
140
  apiKey,
141
  baseUrl,
142
- customModel,
143
  }),
144
  headers: {
145
  "Content-Type": "application/json",
@@ -188,7 +185,6 @@ export function AskAI({
188
  setPrompt("");
189
  setisAiWorking(false);
190
  setHasAsked(true);
191
- setModel(MODELS[0].value);
192
  if (audio.current) audio.current.play();
193
 
194
  // Now we have the complete HTML including </html>, so set it to be sure
@@ -418,9 +414,11 @@ export function AskAI({
418
  <div className="flex items-center justify-end gap-2">
419
  <Settings
420
  provider={provider as string}
421
- model={model as string}
422
  onChange={setProvider}
423
- onModelChange={setModel}
 
 
424
  open={openProvider}
425
  error={providerError}
426
  isFollowUp={!isSameHtml && isFollowUp}
 
54
  const [hasAsked, setHasAsked] = useState(false);
55
  const [previousPrompt, setPreviousPrompt] = useState("");
56
  const [provider, setProvider] = useLocalStorage("provider", "auto");
 
 
 
 
 
 
57
  const [openProvider, setOpenProvider] = useState(false);
58
  const [providerError, setProviderError] = useState("");
59
  const [think, setThink] = useState<string | undefined>(undefined);
 
62
  const [controller, setController] = useState<AbortController | null>(null);
63
  const [isFollowUp, setIsFollowUp] = useState(true);
64
 
65
+ const getModel = () =>
66
+ typeof window !== "undefined"
67
+ ? localStorage.getItem("openai_model") || "gpt-4o-mini"
68
+ : "gpt-4o-mini";
69
+
70
  const callAi = async (redesignMarkdown?: string) => {
71
  if (isAiWorking) return;
72
  if (!redesignMarkdown && !prompt.trim()) return;
 
90
  : "";
91
  const apiKey = localStorage.getItem("openai_api_key");
92
  const baseUrl = localStorage.getItem("openai_base_url");
93
+ const model = getModel();
94
  const request = await fetch("/api/ask-ai", {
95
  method: "PUT",
96
  body: JSON.stringify({
 
102
  selectedElementHtml,
103
  apiKey,
104
  baseUrl,
 
105
  }),
106
  headers: {
107
  "Content-Type": "application/json",
 
126
  } else {
127
  const apiKey = localStorage.getItem("openai_api_key");
128
  const baseUrl = localStorage.getItem("openai_base_url");
129
+ const model = getModel();
130
  const request = await fetch("/api/ask-ai", {
131
  method: "POST",
132
  body: JSON.stringify({
 
137
  redesignMarkdown,
138
  apiKey,
139
  baseUrl,
 
140
  }),
141
  headers: {
142
  "Content-Type": "application/json",
 
185
  setPrompt("");
186
  setisAiWorking(false);
187
  setHasAsked(true);
 
188
  if (audio.current) audio.current.play();
189
 
190
  // Now we have the complete HTML including </html>, so set it to be sure
 
414
  <div className="flex items-center justify-end gap-2">
415
  <Settings
416
  provider={provider as string}
417
+ model={getModel()}
418
  onChange={setProvider}
419
+ onModelChange={(newModel: string) => {
420
+ localStorage.setItem("openai_model", newModel);
421
+ }}
422
  open={openProvider}
423
  error={providerError}
424
  isFollowUp={!isSameHtml && isFollowUp}