File size: 21,305 Bytes
16ee67f
73299ba
16ee67f
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73299ba
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73299ba
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73299ba
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73299ba
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73299ba
d4c4db7
 
 
 
 
 
 
 
 
16ee67f
d4c4db7
 
 
 
 
 
 
 
16ee67f
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16ee67f
d4c4db7
 
 
 
 
 
 
16ee67f
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73299ba
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73299ba
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16ee67f
d4c4db7
73299ba
d4c4db7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16ee67f
d4c4db7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
<!DOCTYPE html>
<html class="dark" lang="en">
<head>
  <meta charset="utf-8" />
  <meta content="width=device-width, initial-scale=1.0" name="viewport" />
  <title>NeuralNomadAI - HuggingFace Demo (index.html)</title>

  <!-- Google Fonts -->
  <link href="https://fonts.googleapis.com" rel="preconnect" />
  <link crossorigin href="https://fonts.gstatic.com" rel="preconnect" />
  <link href="https://fonts.googleapis.com/css2?family=Space+Grotesk:wght@400;500;700&display=swap" rel="stylesheet" />

  <!-- Tailwind CDN (keeps the original layout & utilities) -->
  <script src="https://cdn.tailwindcss.com?plugins=forms,container-queries"></script>

  <!-- Local CSS for small customizations -->
  <link rel="stylesheet" href="style.css" />
</head>

<body class="bg-gray-50 dark:bg-gray-900 font-display text-gray-900 dark:text-gray-100">
  <div class="flex min-h-screen w-full flex-col">
    <!-- Header -->
    <header class="sticky top-0 z-50 border-b border-gray-200/50 dark:border-gray-700/50 bg-white/80 dark:bg-gray-900/80 backdrop-blur-sm">
      <div class="mx-auto flex max-w-7xl items-center justify-between px-4 py-3">
        <a href="#home" data-page="home" class="flex items-center gap-4 cursor-pointer">
          <svg class="h-8 w-8 text-sky-600" fill="none" viewBox="0 0 48 48" xmlns="http://www.w3.org/2000/svg"><path d="M39.5563 34.1455V13.8546C39.5563 15.708 36.8773 17.3437 32.7927 18.3189C30.2914 18.916 27.263 19.2655 24 19.2655C20.737 19.2655 17.7086 18.916 15.2073 18.3189C11.1227 17.3437 8.44365 15.708 8.44365 13.8546V34.1455C8.44365 35.9988 11.1227 37.6346 15.2073 38.6098C17.7086 39.2069 20.737 39.5564 24 39.5564C27.263 39.5564 30.2914 39.2069 32.7927 38.6098C36.8773 37.6346 39.5563 35.9988 39.5563 34.1455Z" fill="currentColor"></path></svg>
          <h1 class="text-lg font-bold">NeuralNomadAI</h1>
        </a>

        <nav class="hidden md:flex items-center gap-6">
          <a href="#home" data-page="home" class="nav-link text-sm font-medium">Home</a>
          <a href="#services" data-page="services" class="nav-link text-sm font-medium">Services</a>
          <a href="#portfolio" data-page="portfolio" class="nav-link text-sm font-medium">Portfolio</a>
          <a href="#contact" data-page="contact" class="nav-link text-sm font-medium">Contact</a>
        </nav>

        <div class="flex items-center gap-4">
          <a href="#contact" data-page="contact" class="rounded-lg bg-sky-600 px-4 py-2 text-sm font-semibold text-white">Get Started</a>
        </div>
      </div>
    </header>

    <main class="flex-grow">
      <!-- HOME (brief) -->
      <section id="home" class="page-content active">
        <div class="mx-auto max-w-7xl px-4 py-12">
          <div class="text-center">
            <h2 class="text-3xl font-bold">AI Prompt & Image Playground — Hugging Face</h2>
            <p class="mt-3 text-gray-600 dark:text-gray-300">This demo uses the Hugging Face Inference API. Replace the client token with a backend proxy token before production.</p>
          </div>

          <!-- Prompt Ideator -->
          <div class="mt-10 max-w-xl mx-auto">
            <h3 class="text-xl font-semibold mb-3">✨ AI Prompt Ideator</h3>
            <div class="flex gap-3">
              <input id="prompt-input" type="text" placeholder="e.g., a cat astronaut on Mars" aria-label="Prompt input"
                class="w-full rounded-lg border border-gray-300 p-3 bg-white dark:bg-gray-800" />
              <button id="generate-prompt-btn" type="button" class="rounded-lg bg-sky-600 px-4 py-2 text-white font-semibold">
                <span class="btn-text">Generate Prompt</span>
                <span class="btn-spinner hidden ml-2"></span>
              </button>
            </div>
            <div id="prompt-result" class="mt-4 result-box hidden" role="status" aria-live="polite"></div>
          </div>

          <!-- Image Playground -->
          <div class="mt-12 max-w-xl mx-auto">
            <h3 class="text-xl font-semibold mb-3">✨ AI Image Playground</h3>
            <div class="flex gap-3">
              <input id="image-prompt-input" type="text" placeholder="e.g., futuristic city at sunset" aria-label="Image prompt input"
                class="w-full rounded-lg border border-gray-300 p-3 bg-white dark:bg-gray-800" />
              <button id="generate-image-btn" type="button" class="rounded-lg bg-sky-600 px-4 py-2 text-white font-semibold">
                <span class="btn-text">Generate Image</span>
                <span class="btn-spinner hidden ml-2"></span>
              </button>
            </div>

            <div id="image-loading" class="mt-6 hidden text-sm text-gray-600 dark:text-gray-300">
              <div class="flex items-center gap-3">
                <svg class="animate-spin h-6 w-6 text-sky-600" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
                  <circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle>
                  <path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4z"></path>
                </svg>
                <span>Generating image — this may take 10–30s depending on the model.</span>
              </div>
            </div>

            <div id="image-result" class="mt-6 result-box" role="status" aria-live="polite"></div>
          </div>

        </div>
      </section>

      <!-- SERVICES (placeholder) -->
      <section id="services" class="page-content">
        <div class="mx-auto max-w-7xl px-4 py-12">
          <h2 class="text-2xl font-bold">Services</h2>
          <p class="mt-3 text-gray-600 dark:text-gray-300">Sample content...</p>
        </div>
      </section>

      <!-- PORTFOLIO (placeholder) -->
      <section id="portfolio" class="page-content">
        <div class="mx-auto max-w-7xl px-4 py-12">
          <h2 class="text-2xl font-bold">Portfolio</h2>
          <p class="mt-3 text-gray-600 dark:text-gray-300">Sample content...</p>
        </div>
      </section>

      <!-- CONTACT (placeholder) -->
      <section id="contact" class="page-content">
        <div class="mx-auto max-w-7xl px-4 py-12">
          <h2 class="text-2xl font-bold">Contact</h2>
          <p class="mt-3 text-gray-600 dark:text-gray-300">Sample content...</p>
        </div>
      </section>
    </main>

    <footer class="border-t border-gray-200/50 dark:border-gray-700/50 bg-white/80 dark:bg-gray-900/80">
      <div class="mx-auto max-w-7xl px-4 py-8 text-center text-sm text-gray-600 dark:text-gray-300">
        © 2024 NeuralNomadAI — Hugging Face demo. Remember to proxy your API key on the server.
      </div>
    </footer>
  </div>

  <!-- JS: huggingface API integration + UI handling -->
  <script>
    // -------------------------
    // IMPORTANT (security):
    // Do NOT put real HF tokens in client-side code for production.
    // For quick local testing you can paste a short-lived token here,
    // but the recommended pattern is:
    //   - Create a small backend endpoint (e.g. /api/hf/generate)
    //   - The backend attaches your HF token to the request and forwards it to HF
    // -------------------------

    // If you still want to test client-side (ONLY FOR LOCAL TESTING), put a token here:
    // const HF_TOKEN = 'HF_API_TOKEN_HERE';
    // Otherwise set HF_TOKEN to empty and call a backend endpoint instead.
    const HF_TOKEN = ''; // <-- REPLACE WITH '' FOR PRODUCTION. Use backend proxy.

    // Backend proxy settings (recommended)
    // If you implement a backend, set PROXY_BASE to your server endpoint:
    // e.g. const PROXY_BASE = '/api/hf'; and implement endpoints /api/hf/text and /api/hf/image
    const PROXY_BASE = ''; // e.g. '/api/hf'  (leave empty to call HF directly)

    // Model choices (you can change these)
    // Text generation: use a text generation model (make sure to check the model's capabilities)
    const HF_TEXT_MODEL = 'gpt2'; // small example; swap with a better model for real usage
    // Image generation: choose a model that returns image binary, e.g., "stabilityai/stable-diffusion-2"
    const HF_IMAGE_MODEL = 'stabilityai/stable-diffusion-2';

    // Utilities
    const escapeHtml = (unsafe) => {
      if (unsafe === null || unsafe === undefined) return '';
      return String(unsafe)
        .replace(/&/g, '&amp;')
        .replace(/</g, '&lt;')
        .replace(/>/g, '&gt;')
        .replace(/"/g, '&quot;')
        .replace(/'/g, '&#039;');
    };

    const formatSafeHtml = (text) => {
      let out = escapeHtml(text);
      out = out.replace(/\*\*(.+?)\*\*/g, (m, p1) => `<strong>${p1}</strong>`);
      out = out.replace(/\r\n|\r|\n/g, '<br>');
      return out;
    };

    // DOM
    document.addEventListener('DOMContentLoaded', () => {
      const promptInput = document.getElementById('prompt-input');
      const generatePromptBtn = document.getElementById('generate-prompt-btn');
      const promptResult = document.getElementById('prompt-result');

      const imagePromptInput = document.getElementById('image-prompt-input');
      const generateImageBtn = document.getElementById('generate-image-btn');
      const imageLoading = document.getElementById('image-loading');
      const imageResult = document.getElementById('image-result');

      // Small helpers
      const setButtonLoading = (button, loading = true, loadingText = 'Generating...') => {
        if (!button) return;
        const btnTextNode = button.querySelector('.btn-text');
        const spinner = button.querySelector('.btn-spinner');
        button.disabled = loading;
        button.classList.toggle('opacity-60', loading);
        button.setAttribute('aria-busy', String(loading));
        if (btnTextNode && loading) btnTextNode.textContent = loadingText;
        if (btnTextNode && !loading) {
          if (button.id === 'generate-prompt-btn') btnTextNode.textContent = 'Generate Prompt';
          if (button.id === 'generate-image-btn') btnTextNode.textContent = 'Generate Image';
        }
        if (spinner) spinner.classList.toggle('hidden', !loading);
      };

      const showError = (el, message) => {
        if (!el) return;
        el.classList.remove('hidden');
        el.innerHTML = `<div class="p-3 rounded bg-red-50 text-red-700 dark:bg-red-900/30">${escapeHtml(message)}</div>`;
      };

      const showHtml = (el, html) => {
        if (!el) return;
        el.classList.remove('hidden');
        el.innerHTML = html;
      };

      // Generic function to call Hugging Face Inference API for text
      // If PROXY_BASE is set, the code will call PROXY_BASE + '/text' instead.
      // The backend should forward the request to HF with the Authorization header.
      async function hfTextGenerate(prompt) {
        if (!prompt) throw new Error('Empty prompt');
        const payload = { inputs: prompt, options: { wait_for_model: true } };

        if (PROXY_BASE) {
          // call your backend proxy (recommended)
          const resp = await fetch(`${PROXY_BASE}/text`, {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({ model: HF_TEXT_MODEL, payload }),
          });
          if (!resp.ok) throw new Error(`Proxy error: ${resp.status} ${await resp.text()}`);
          return resp.json();
        }

        // Direct client-side call to HF Inference
        if (!HF_TOKEN) throw new Error('No HF token set (use a backend proxy in production)');

        const url = `https://api-inference.huggingface.co/models/${encodeURIComponent(HF_TEXT_MODEL)}`;
        const resp = await fetch(url, {
          method: 'POST',
          headers: {
            Authorization: `Bearer ${HF_TOKEN}`,
            'Content-Type': 'application/json',
          },
          body: JSON.stringify(payload),
        });

        if (!resp.ok) {
          const txt = await resp.text().catch(() => '');
          throw new Error(`Hugging Face API error: ${resp.status} ${txt}`);
        }

        return resp.json();
      }

      // Generic function to call Hugging Face Inference API for images
      // Many HF image models return binary image data (image/png). We handle binary responses and JSON fallbacks.
      async function hfImageGenerate(prompt) {
        if (!prompt) throw new Error('Empty prompt');

        if (PROXY_BASE) {
          const resp = await fetch(`${PROXY_BASE}/image`, {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({ model: HF_IMAGE_MODEL, prompt }),
          });
          if (!resp.ok) throw new Error(`Proxy error: ${resp.status} ${await resp.text()}`);
          // Proxy might forward binary image or JSON with base64; try to detect
          const contentType = resp.headers.get('content-type') || '';
          if (contentType.startsWith('image/')) {
            const blob = await resp.blob();
            return { blob };
          } else {
            return resp.json();
          }
        }

        // Direct client-side call to HF Inference (NOT RECOMMENDED)
        if (!HF_TOKEN) throw new Error('No HF token set (use a backend proxy in production)');

        const url = `https://api-inference.huggingface.co/models/${encodeURIComponent(HF_IMAGE_MODEL)}`;
        const resp = await fetch(url, {
          method: 'POST',
          headers: {
            Authorization: `Bearer ${HF_TOKEN}`,
            'Content-Type': 'application/json',
          },
          // For some image models HF expects {"inputs":"<prompt>"} or a specialized payload
          body: JSON.stringify({ inputs: prompt }),
        });

        if (!resp.ok) {
          const txt = await resp.text().catch(() => '');
          throw new Error(`Hugging Face image error: ${resp.status} ${txt}`);
        }

        const contentType = resp.headers.get('content-type') || '';
        if (contentType.startsWith('image/')) {
          const blob = await resp.blob();
          return { blob };
        } else {
          // JSON response (could contain base64 or URLs); parse it
          const json = await resp.json().catch(() => null);
          return json;
        }
      }

      // Handlers
      const handlePromptGeneration = async () => {
        const input = (promptInput.value || '').trim();
        if (!input) {
          showError(promptResult, 'Please enter a short idea to expand into a prompt.');
          return;
        }
        promptResult.classList.add('hidden');
        setButtonLoading(generatePromptBtn, true, 'Generating...');
        try {
          const systemPrompt = `You are a senior prompt engineer. Expand the user's short idea into a detailed, artistic prompt suitable for image generation. The idea: "${input}"`;
          // Use HF text generation
          const resp = await hfTextGenerate(systemPrompt);
          // Response shapes vary: HF text models often return an array like [{generated_text: "..."}]
          let text = null;
          if (Array.isArray(resp) && resp.length > 0 && resp[0].generated_text) {
            text = resp[0].generated_text;
          } else if (resp.generated_text) {
            text = resp.generated_text;
          } else if (typeof resp === 'string') {
            text = resp;
          } else if (Array.isArray(resp) && typeof resp[0] === 'string') {
            text = resp[0];
          } else if (resp && resp[0] && resp[0].text) {
            text = resp[0].text;
          }

          if (!text) {
            showError(promptResult, 'No text returned by the model (unexpected response shape). Check console for raw output.');
            console.debug('HF text response (raw):', resp);
          } else {
            const safe = formatSafeHtml(text);
            showHtml(promptResult, safe);
          }
        } catch (err) {
          console.error('Prompt generation error:', err);
          showError(promptResult, `Error: ${err.message || err}`);
        } finally {
          setButtonLoading(generatePromptBtn, false);
        }
      };

      const handleImageGeneration = async () => {
        const prompt = (imagePromptInput.value || '').trim();
        if (!prompt) {
          showError(imageResult, 'Please enter an image prompt to generate an image.');
          return;
        }
        imageResult.innerHTML = '';
        imageLoading.classList.remove('hidden');
        setButtonLoading(generateImageBtn, true, 'Generating...');
        try {
          const resp = await hfImageGenerate(prompt);

          // If backend or HF returned binary blob
          if (resp && resp.blob) {
            const url = URL.createObjectURL(resp.blob);
            const img = document.createElement('img');
            img.src = url;
            img.alt = `AI generated: ${prompt}`;
            img.loading = 'lazy';
            img.decoding = 'async';
            img.className = 'rounded-lg shadow-lg mx-auto';
            imageResult.appendChild(img);
            return;
          }

          // HF may return JSON containing base64 or an array of output URLs.
          // Common shapes:
          //  - [{generated_text: "..."}] (unlikely for images)
          //  - { error: "..." }
          //  - [{image_base64: "..."}, ...] or { images: ["data:image/png;..."] }
          // - [{ "generated_image": "<base64>" }]
          // We'll try a few possibilities defensively:

          // If it's an array with objects containing 'image' or 'base64' fields
          if (Array.isArray(resp)) {
            // try to find a base64 string
            const obj = resp.find(r => (r.image || r.base64 || r.image_base64 || r.generated_image));
            const val = obj && (obj.image || obj.base64 || obj.image_base64 || obj.generated_image);
            if (val && typeof val === 'string') {
              // if it already begins with data:, use directly; else assume base64 png
              const src = val.startsWith('data:') ? val : `data:image/png;base64,${val}`;
              const img = document.createElement('img');
              img.src = src;
              img.alt = `AI generated: ${prompt}`;
              img.loading = 'lazy';
              img.decoding = 'async';
              img.className = 'rounded-lg shadow-lg mx-auto';
              imageResult.appendChild(img);
              return;
            }
          }

          // If resp.images is an array of data URLs or URLs
          if (resp && (resp.images || resp.output)) {
            const arr = resp.images || resp.output;
            if (Array.isArray(arr) && arr.length > 0) {
              const imageUrl = arr[0];
              const img = document.createElement('img');
              img.src = imageUrl;
              img.alt = `AI generated: ${prompt}`;
              img.loading = 'lazy';
              img.decoding = 'async';
              img.className = 'rounded-lg shadow-lg mx-auto';
              imageResult.appendChild(img);
              return;
            }
          }

          // If resp contains a base64 string in top-level 'generated_image' or 'b64_json'
          const b64 = resp && (resp.generated_image || resp.b64_json || resp.image_base64 || resp.base64);
          if (b64 && typeof b64 === 'string') {
            const src = b64.startsWith('data:') ? b64 : `data:image/png;base64,${b64}`;
            const img = document.createElement('img');
            img.src = src;
            img.alt = `AI generated: ${prompt}`;
            img.loading = 'lazy';
            img.decoding = 'async';
            img.className = 'rounded-lg shadow-lg mx-auto';
            imageResult.appendChild(img);
            return;
          }

          // As last resort, show the raw JSON for debugging
          showHtml(imageResult, `<pre class="p-3 bg-white/80 dark:bg-gray-800/70 rounded">${escapeHtml(JSON.stringify(resp, null, 2))}</pre>`);
          console.debug('HF image response (raw):', resp);
        } catch (err) {
          console.error('Image generation error:', err);
          showError(imageResult, `Error: ${err.message || err}`);
        } finally {
          imageLoading.classList.add('hidden');
          setButtonLoading(generateImageBtn, false);
        }
      };

      // Attach listeners
      generatePromptBtn.addEventListener('click', handlePromptGeneration);
      generateImageBtn.addEventListener('click', handleImageGeneration);

      // Navigation (simple)
      const navLinks = document.querySelectorAll('[data-page]');
      const pages = document.querySelectorAll('.page-content');
      const showPage = (id) => {
        pages.forEach(p => p.classList.remove('active'));
        const el = document.getElementById(id);
        if (el) el.classList.add('active');
      };
      navLinks.forEach(link => {
        link.addEventListener('click', (e) => {
          e.preventDefault();
          const page = link.getAttribute('data-page');
          if (page) {
            history.pushState({ page }, '', `#${page}`);
            showPage(page);
          }
        });
      });
      window.addEventListener('popstate', (ev) => {
        const page = (ev.state && ev.state.page) || (location.hash && location.hash.substring(1)) || 'home';
        showPage(page);
      });
    });
  </script>
</body>
</html>