diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..d600b6c76dd93f7b2472160d42b2797cae50c8e5
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,25 @@
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+lerna-debug.log*
+
+node_modules
+dist
+dist-ssr
+*.local
+
+# Editor directories and files
+.vscode/*
+!.vscode/extensions.json
+.idea
+.DS_Store
+*.suo
+*.ntvs*
+*.njsproj
+*.sln
+*.sw?
+
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000000000000000000000000000000000000..a78447ebf932f1bb3a5b124b472bea8b3a86f80f
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,7 @@
+[*]
+charset = utf-8
+insert_final_newline = true
+end_of_line = lf
+indent_style = space
+indent_size = 2
+max_line_length = 80
\ No newline at end of file
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..41d8a7ebb4319bbd89c16be09fb5936359e14ef4
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,33 @@
+# A comma-separated list of access keys. Example: `ACCESS_KEYS="ABC123,JUD71F,HUWE3"`. Leave blank for unrestricted access.
+ACCESS_KEYS=""
+
+# The timeout in hours for access key validation. Set to 0 to require validation on every page load.
+ACCESS_KEY_TIMEOUT_HOURS="24"
+
+# The default model ID for WebLLM with F16 shaders.
+WEBLLM_DEFAULT_F16_MODEL_ID="Qwen3-0.6B-q4f16_1-MLC"
+
+# The default model ID for WebLLM with F32 shaders.
+WEBLLM_DEFAULT_F32_MODEL_ID="Qwen3-0.6B-q4f32_1-MLC"
+
+# The default model ID for Wllama.
+WLLAMA_DEFAULT_MODEL_ID="qwen-3-0.6b"
+
+# The base URL for the internal OpenAI compatible API. Example: `INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL="https://api.openai.com/v1"`. Leave blank to disable internal OpenAI compatible API.
+INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL=""
+
+# The access key for the internal OpenAI compatible API.
+INTERNAL_OPENAI_COMPATIBLE_API_KEY=""
+
+# The model for the internal OpenAI compatible API.
+INTERNAL_OPENAI_COMPATIBLE_API_MODEL=""
+
+# The name of the internal OpenAI compatible API, displayed in the UI.
+INTERNAL_OPENAI_COMPATIBLE_API_NAME="Internal API"
+
+# The type of inference to use by default. The possible values are:
+# "browser" -> In the browser (Private)
+# "openai" -> Remote Server (API)
+# "horde" -> AI Horde (Pre-configured)
+# "internal" -> $INTERNAL_OPENAI_COMPATIBLE_API_NAME
+DEFAULT_INFERENCE_TYPE="browser"
diff --git a/.github/hf-space-config.yml b/.github/hf-space-config.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6d9d5b00160bb6ad09b8a92ab74ab6cd0a360b3b
--- /dev/null
+++ b/.github/hf-space-config.yml
@@ -0,0 +1,11 @@
+title: MiniSearch
+emoji: 👌🔍
+colorFrom: yellow
+colorTo: yellow
+sdk: docker
+short_description: Minimalist web-searching app with browser-based AI assistant
+pinned: true
+custom_headers:
+ cross-origin-embedder-policy: require-corp
+ cross-origin-opener-policy: same-origin
+ cross-origin-resource-policy: cross-origin
diff --git a/.github/workflows/ai-review.yml b/.github/workflows/ai-review.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8024aea2b679c2894f7da786c3d9be23f6c16867
--- /dev/null
+++ b/.github/workflows/ai-review.yml
@@ -0,0 +1,138 @@
+name: Review Pull Request with AI
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+ branches: ["main"]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ ai-review:
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-ai-review') }}
+ continue-on-error: true
+ runs-on: ubuntu-latest
+ name: AI Review
+ permissions:
+ pull-requests: write
+ contents: read
+ timeout-minutes: 30
+ steps:
+ - name: Checkout Repository
+ uses: actions/checkout@v5
+
+ - name: Create temporary directory
+ run: mkdir -p /tmp/pr_review
+
+ - name: Process PR description
+ id: process_pr
+ run: |
+ PR_BODY_ESCAPED=$(cat << 'EOF'
+ ${{ github.event.pull_request.body }}
+ EOF
+ )
+ PROCESSED_BODY=$(echo "$PR_BODY_ESCAPED" | sed -E 's/\[(.*?)\]\(.*?\)/\1/g')
+ echo "$PROCESSED_BODY" > /tmp/pr_review/processed_body.txt
+
+ - name: Fetch branches and output the diff
+ run: |
+ git fetch origin main:main
+ git fetch origin pull/${{ github.event.pull_request.number }}/head:pr-branch
+ git diff main..pr-branch > /tmp/pr_review/diff.txt
+
+ - name: Prepare review request
+ id: prepare_request
+ run: |
+ PR_TITLE=$(echo "${{ github.event.pull_request.title }}" | sed 's/[()]/\\&/g')
+ DIFF_CONTENT=$(cat /tmp/pr_review/diff.txt)
+ PROCESSED_BODY=$(cat /tmp/pr_review/processed_body.txt)
+
+ jq -n \
+ --arg model "${{ vars.OPENAI_COMPATIBLE_API_MODEL }}" \
+ --arg http_referer "${{ github.event.repository.html_url }}" \
+ --arg title "${{ github.event.repository.name }}" \
+ --arg system "You are an experienced developer reviewing a Pull Request. You focus only on what matters and provide concise, actionable feedback.
+
+ Review Context:
+ Repository Name: \"${{ github.event.repository.name }}\"
+ Repository Description: \"${{ github.event.repository.description }}\"
+ Branch: \"${{ github.event.pull_request.head.ref }}\"
+ PR Title: \"$PR_TITLE\"
+
+ Guidelines:
+ 1. Only comment on issues that:
+ - Could cause bugs or security issues
+ - Significantly impact performance
+ - Make the code harder to maintain
+ - Violate critical best practices
+
+ 2. For each issue:
+ - Point to the specific line/file
+ - Explain why it's a problem
+ - Suggest a concrete fix
+
+ 3. Praise exceptional solutions briefly, only if truly innovative
+
+ 4. Skip commenting on:
+ - Minor style issues
+ - Obvious changes
+ - Working code that could be marginally improved
+ - Things that are just personal preference
+
+ Remember:
+ Less is more. If the code is good and working, just say so, with a short message." \
+ --arg user "This is the description of the pull request:
+ \`\`\`markdown
+ $PROCESSED_BODY
+ \`\`\`
+
+ And here is the diff of the changes, for you to review:
+ \`\`\`diff
+ $DIFF_CONTENT
+ \`\`\`" \
+ '{
+ "model": $model,
+ "messages": [
+ {"role": "system", "content": $system},
+ {"role": "user", "content": $user}
+ ],
+ "temperature": 0.7,
+ "top_p": 0.9
+ }' > /tmp/pr_review/request.json
+ - name: Get AI Review
+ id: ai_review
+ run: |
+ RESPONSE=$(curl -s ${{ vars.OPENAI_COMPATIBLE_API_BASE_URL }}/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer ${{ secrets.OPENAI_COMPATIBLE_API_KEY }}" \
+ -d @/tmp/pr_review/request.json)
+
+ # Check for errors in the response
+ if echo "$RESPONSE" | jq -e '.object == "error"' > /dev/null; then
+ echo "Error from API:" >&2
+ ERROR_MSG=$(echo "$RESPONSE" | jq -r '.message.detail[0].msg // .message')
+ echo "$ERROR_MSG" >&2
+ exit 1
+ fi
+
+ echo "### Review" > /tmp/pr_review/response.txt
+ echo "" >> /tmp/pr_review/response.txt
+ echo "$RESPONSE" | jq -r '.choices[0].message.content' >> /tmp/pr_review/response.txt
+
+ - name: Find Comment
+ uses: peter-evans/find-comment@v4
+ id: find_comment
+ with:
+ issue-number: ${{ github.event.pull_request.number }}
+ comment-author: "github-actions[bot]"
+ body-includes: "### Review"
+
+ - name: Post or Update PR Review
+ uses: peter-evans/create-or-update-comment@v5
+ with:
+ comment-id: ${{ steps.find_comment.outputs.comment-id }}
+ issue-number: ${{ github.event.pull_request.number }}
+ body-path: /tmp/pr_review/response.txt
+ edit-mode: replace
diff --git a/.github/workflows/deploy-to-hugging-face.yml b/.github/workflows/deploy-to-hugging-face.yml
new file mode 100644
index 0000000000000000000000000000000000000000..277b8981cf32b2e3c564a82842913e50e39030cb
--- /dev/null
+++ b/.github/workflows/deploy-to-hugging-face.yml
@@ -0,0 +1,18 @@
+name: Deploy to Hugging Face
+
+on:
+ workflow_dispatch:
+
+jobs:
+ sync-to-hf:
+ name: Sync to Hugging Face Spaces
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v5
+ - uses: JacobLinCool/huggingface-sync@v1
+ with:
+ github: ${{ secrets.GITHUB_TOKEN }}
+ user: ${{ vars.HF_SPACE_OWNER }}
+ space: ${{ vars.HF_SPACE_NAME }}
+ token: ${{ secrets.HF_TOKEN }}
+ configuration: ".github/hf-space-config.yml"
diff --git a/.github/workflows/on-pull-request-to-main.yml b/.github/workflows/on-pull-request-to-main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6eae98e615c1c1f2c899a9a5f1d785dd3883ff62
--- /dev/null
+++ b/.github/workflows/on-pull-request-to-main.yml
@@ -0,0 +1,9 @@
+name: On Pull Request To Main
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+ branches: ["main"]
+jobs:
+ test-lint-ping:
+ if: ${{ !contains(github.event.pull_request.labels.*.name, 'skip-test-lint-ping') }}
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
diff --git a/.github/workflows/on-push-to-main.yml b/.github/workflows/on-push-to-main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8ce693215c4351bab8b54ccac302345e1202ba03
--- /dev/null
+++ b/.github/workflows/on-push-to-main.yml
@@ -0,0 +1,7 @@
+name: On Push To Main
+on:
+ push:
+ branches: ["main"]
+jobs:
+ test-lint-ping:
+ uses: ./.github/workflows/reusable-test-lint-ping.yml
diff --git a/.github/workflows/publish-docker-image.yml b/.github/workflows/publish-docker-image.yml
new file mode 100644
index 0000000000000000000000000000000000000000..eccd087fc5660d5013c093e3403526e5b0c2274c
--- /dev/null
+++ b/.github/workflows/publish-docker-image.yml
@@ -0,0 +1,39 @@
+name: Publish Docker Image
+
+on:
+ workflow_dispatch:
+
+jobs:
+ build-and-push-image:
+ name: Publish Docker Image to GitHub Packages
+ runs-on: ubuntu-latest
+ env:
+ REGISTRY: ghcr.io
+ IMAGE_NAME: ${{ github.repository }}
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v5
+ - name: Log in to the Container registry
+ uses: docker/login-action@v3
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Extract metadata (tags, labels) for Docker
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ - name: Build and push Docker Image
+ uses: docker/build-push-action@v6
+ with:
+ context: .
+ push: true
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ platforms: linux/amd64,linux/arm64
diff --git a/.github/workflows/reusable-test-lint-ping.yml b/.github/workflows/reusable-test-lint-ping.yml
new file mode 100644
index 0000000000000000000000000000000000000000..ca60f0e3122ade4f34208e2c449b53767e9bc69d
--- /dev/null
+++ b/.github/workflows/reusable-test-lint-ping.yml
@@ -0,0 +1,26 @@
+on:
+ workflow_call:
+jobs:
+ check-code-quality:
+ name: Check Code Quality
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v5
+ - uses: actions/setup-node@v6
+ with:
+ node-version: "lts/*"
+ cache: "npm"
+ - run: npm ci --ignore-scripts
+ - run: npm test
+ - run: npm run lint
+ check-docker-container:
+ needs: [check-code-quality]
+ name: Check Docker Container
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v5
+ - run: docker compose -f docker-compose.production.yml up -d
+ - name: Check if main page is available
+ run: until curl -s -o /dev/null -w "%{http_code}" localhost:7860 | grep 200; do sleep 1; done
+ timeout-minutes: 1
+ - run: docker compose -f docker-compose.production.yml down
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f1b26f1ea73cad18af0078381a02bbc532714a0a
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+node_modules
+.DS_Store
+/client/dist
+/server/models
+.vscode
+/vite-build-stats.html
+.env
diff --git a/.husky/pre-commit b/.husky/pre-commit
new file mode 100644
index 0000000000000000000000000000000000000000..2312dc587f61186ccf0d627d678d851b9eef7b82
--- /dev/null
+++ b/.husky/pre-commit
@@ -0,0 +1 @@
+npx lint-staged
diff --git a/.npmrc b/.npmrc
new file mode 100644
index 0000000000000000000000000000000000000000..80bcbed90c4f2b3d895d5086dc775e1bd8b32b43
--- /dev/null
+++ b/.npmrc
@@ -0,0 +1 @@
+legacy-peer-deps = true
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..9cede968af905f548a6faee9fc175750fe371a10
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,96 @@
+FROM node:lts AS llama-builder
+
+ARG LLAMA_CPP_RELEASE_TAG="b6604"
+
+RUN apt-get update && apt-get install -y \
+ build-essential \
+ cmake \
+ ccache \
+ git \
+ curl
+
+RUN cd /tmp && \
+ git clone https://github.com/ggerganov/llama.cpp.git && \
+ cd llama.cpp && \
+ git checkout $LLAMA_CPP_RELEASE_TAG && \
+ cmake -B build -DGGML_NATIVE=OFF -DLLAMA_CURL=OFF && \
+ cmake --build build --config Release -j --target llama-server && \
+ mkdir -p /usr/local/lib/llama && \
+ find build -type f \( -name "libllama.so" -o -name "libmtmd.so" -o -name "libggml.so" -o -name "libggml-base.so" -o -name "libggml-cpu.so" \) -exec cp {} /usr/local/lib/llama/ \;
+
+FROM node:lts
+
+ENV PORT=7860
+EXPOSE $PORT
+
+ARG USERNAME=node
+ARG HOME_DIR=/home/${USERNAME}
+ARG APP_DIR=${HOME_DIR}/app
+
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends \
+ python3 \
+ python3-venv && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN mkdir -p /usr/local/searxng /etc/searxng && \
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng /etc/searxng && \
+ chmod 755 /etc/searxng
+
+WORKDIR /usr/local/searxng
+RUN python3 -m venv searxng-venv && \
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng/searxng-venv && \
+ /usr/local/searxng/searxng-venv/bin/pip install --upgrade pip && \
+ /usr/local/searxng/searxng-venv/bin/pip install wheel setuptools pyyaml lxml
+
+RUN git clone https://github.com/searxng/searxng.git /usr/local/searxng/searxng-src && \
+ chown -R ${USERNAME}:${USERNAME} /usr/local/searxng/searxng-src
+
+ARG SEARXNG_SETTINGS_PATH="/etc/searxng/settings.yml"
+
+WORKDIR /usr/local/searxng/searxng-src
+RUN cp searx/settings.yml $SEARXNG_SETTINGS_PATH && \
+ chown ${USERNAME}:${USERNAME} $SEARXNG_SETTINGS_PATH && \
+ chmod 644 $SEARXNG_SETTINGS_PATH && \
+ sed -i 's/ultrasecretkey/'$(openssl rand -hex 32)'/g' $SEARXNG_SETTINGS_PATH && \
+ sed -i 's/- html/- json/' $SEARXNG_SETTINGS_PATH && \
+ /usr/local/searxng/searxng-venv/bin/pip install -r requirements.txt && \
+ /usr/local/searxng/searxng-venv/bin/pip install --no-build-isolation -e .
+
+COPY --from=llama-builder /tmp/llama.cpp/build/bin/llama-server /usr/local/bin/
+COPY --from=llama-builder /usr/local/lib/llama/* /usr/local/lib/
+RUN ldconfig /usr/local/lib
+
+USER ${USERNAME}
+
+WORKDIR ${APP_DIR}
+
+ARG ACCESS_KEYS
+ARG ACCESS_KEY_TIMEOUT_HOURS
+ARG WEBLLM_DEFAULT_F16_MODEL_ID
+ARG WEBLLM_DEFAULT_F32_MODEL_ID
+ARG WLLAMA_DEFAULT_MODEL_ID
+ARG INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL
+ARG INTERNAL_OPENAI_COMPATIBLE_API_KEY
+ARG INTERNAL_OPENAI_COMPATIBLE_API_MODEL
+ARG INTERNAL_OPENAI_COMPATIBLE_API_NAME
+ARG DEFAULT_INFERENCE_TYPE
+ARG HOST
+ARG HMR_PORT
+ARG ALLOWED_HOSTS
+
+COPY --chown=${USERNAME}:${USERNAME} ./package.json ./package-lock.json ./.npmrc ./
+
+RUN npm ci
+
+COPY --chown=${USERNAME}:${USERNAME} . .
+
+RUN git config --global --add safe.directory ${APP_DIR} && \
+ npm run build
+
+HEALTHCHECK --interval=5m CMD curl -f http://localhost:7860/status || exit 1
+
+ENTRYPOINT [ "/bin/sh", "-c" ]
+
+CMD ["(cd /usr/local/searxng/searxng-src && /usr/local/searxng/searxng-venv/bin/python -m searx.webapp > /dev/null 2>&1) & npm start -- --host"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..49ccc1a8283e7b7fdbcc8f53c3ebc4dce16f620a
--- /dev/null
+++ b/README.md
@@ -0,0 +1,127 @@
+---
+title: MiniSearch
+emoji: 👌🔍
+colorFrom: yellow
+colorTo: yellow
+sdk: docker
+short_description: Minimalist web-searching app with browser-based AI assistant
+pinned: true
+custom_headers:
+ cross-origin-embedder-policy: require-corp
+ cross-origin-opener-policy: same-origin
+ cross-origin-resource-policy: cross-origin
+---
+
+# MiniSearch
+
+A minimalist web-searching app with an AI assistant that runs directly from your browser.
+
+Live demo: https://felladrin-minisearch.hf.space
+
+## Screenshot
+
+
+
+## Features
+
+- **Privacy-focused**: [No tracking, no ads, no data collection](https://docs.searxng.org/own-instance.html#how-does-searxng-protect-privacy)
+- **Easy to use**: Minimalist yet intuitive interface for all users
+- **Cross-platform**: Models run inside the browser, both on desktop and mobile
+- **Integrated**: Search from the browser address bar by setting it as the default search engine
+- **Efficient**: Models are loaded and cached only when needed
+- **Customizable**: Tweakable settings for search results and text generation
+- **Open-source**: [The code is available for inspection and contribution at GitHub](https://github.com/felladrin/MiniSearch)
+
+## Prerequisites
+
+- [Docker](https://docs.docker.com/get-docker/)
+
+## Getting started
+
+Here are the easiest ways to get started with MiniSearch. Pick the one that suits you best.
+
+**Option 1** - Use [MiniSearch's Docker Image](https://github.com/felladrin/MiniSearch/pkgs/container/minisearch) by running in your terminal:
+
+```bash
+docker run -p 7860:7860 ghcr.io/felladrin/minisearch:main
+```
+
+**Option 2** - Add MiniSearch's Docker Image to your existing Docker Compose file:
+
+```yaml
+services:
+ minisearch:
+ image: ghcr.io/felladrin/minisearch:main
+ ports:
+ - "7860:7860"
+```
+
+**Option 3** - Build from source by [downloading the repository files](https://github.com/felladrin/MiniSearch/archive/refs/heads/main.zip) and running:
+
+```bash
+docker compose -f docker-compose.production.yml up --build
+```
+
+Once the container is running, open http://localhost:7860 in your browser and start searching!
+
+## Frequently asked questions [](https://deepwiki.com/felladrin/MiniSearch)
+
+
+ How do I search via the browser's address bar?
+
+ You can set MiniSearch as your browser's address-bar search engine using the pattern http://localhost:7860/?q=%s, in which your search term replaces %s.
+
+
+
+
+ How do I search via Raycast?
+
+ You can add this Quicklink to Raycast, so typing your query will open MiniSearch with the search results. You can also edit it to point to your own domain.
+
+
+
+
+
+ Can I use custom models via OpenAI-Compatible API?
+
+ Yes! For this, open the Menu and change the "AI Processing Location" to Remote server (API). Then configure the Base URL, and optionally set an API Key and a Model to use.
+
+
+
+
+ How do I restrict the access to my MiniSearch instance via password?
+
+ Create a .env file and set a value for ACCESS_KEYS. Then reset the MiniSearch docker container.
+
+
+ For example, if you to set the password to PepperoniPizza, then this is what you should add to your .env:
+ ACCESS_KEYS="PepperoniPizza"
+
+
+ You can find more examples in the .env.example file.
+
+
+
+
+ I want to serve MiniSearch to other users, allowing them to use my own OpenAI-Compatible API key, but without revealing it to them. Is it possible?
+ Yes! In MiniSearch, we call this text-generation feature "Internal OpenAI-Compatible API". To use this it:
+
+ Set up your OpenAI-Compatible API endpoint by configuring the following environment variables in your .env file:
+
+ INTERNAL_OPENAI_COMPATIBLE_API_BASE_URL: The base URL for your API
+ INTERNAL_OPENAI_COMPATIBLE_API_KEY: Your API access key
+ INTERNAL_OPENAI_COMPATIBLE_API_MODEL: The model to use
+ INTERNAL_OPENAI_COMPATIBLE_API_NAME: The name to display in the UI
+
+
+ Restart MiniSearch server.
+ In the MiniSearch menu, select the new option (named as per your INTERNAL_OPENAI_COMPATIBLE_API_NAME setting) from the "AI Processing Location" dropdown.
+
+
+
+
+ How can I contribute to the development of this tool?
+ Fork this repository and clone it. Then, start the development server by running the following command:
+ docker compose up
+ Make your changes, push them to your fork, and open a pull request! All contributions are welcome!
+
diff --git a/biome.json b/biome.json
new file mode 100644
index 0000000000000000000000000000000000000000..b5afc2926ca9cd15a6face957349605f13f8a6a5
--- /dev/null
+++ b/biome.json
@@ -0,0 +1,34 @@
+{
+ "$schema": "https://biomejs.dev/schemas/latest/schema.json",
+ "vcs": {
+ "enabled": false,
+ "clientKind": "git",
+ "useIgnoreFile": false
+ },
+ "files": {
+ "ignoreUnknown": false
+ },
+ "formatter": {
+ "enabled": true,
+ "indentStyle": "space"
+ },
+ "linter": {
+ "enabled": true,
+ "rules": {
+ "recommended": true
+ }
+ },
+ "javascript": {
+ "formatter": {
+ "quoteStyle": "double"
+ }
+ },
+ "assist": {
+ "enabled": true,
+ "actions": {
+ "source": {
+ "organizeImports": "on"
+ }
+ }
+ }
+}
diff --git a/client/components/AiResponse/AiModelDownloadAllowanceContent.tsx b/client/components/AiResponse/AiModelDownloadAllowanceContent.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..227cec2d8152fb4ddcb9a4e91e4c8e3034b0feba
--- /dev/null
+++ b/client/components/AiResponse/AiModelDownloadAllowanceContent.tsx
@@ -0,0 +1,62 @@
+import { Alert, Button, Group, Text } from "@mantine/core";
+import { IconCheck, IconInfoCircle, IconX } from "@tabler/icons-react";
+import { usePubSub } from "create-pubsub/react";
+import { useState } from "react";
+import { addLogEntry } from "../../modules/logEntries";
+import { settingsPubSub } from "../../modules/pubSub";
+
+export default function AiModelDownloadAllowanceContent() {
+ const [settings, setSettings] = usePubSub(settingsPubSub);
+ const [hasDeniedDownload, setDeniedDownload] = useState(false);
+
+ const handleAccept = () => {
+ setSettings({
+ ...settings,
+ allowAiModelDownload: true,
+ });
+ addLogEntry("User allowed the AI model download");
+ };
+
+ const handleDecline = () => {
+ setDeniedDownload(true);
+ addLogEntry("User denied the AI model download");
+ };
+
+ return hasDeniedDownload ? null : (
+ }
+ >
+
+ To obtain AI responses, a language model needs to be downloaded to your
+ browser. Enabling this option lets the app store it and load it
+ instantly on subsequent uses.
+
+
+ Please note that the download size ranges from 100 MB to 4 GB, depending
+ on the model you select in the Menu, so it's best to avoid using mobile
+ data for this.
+
+
+ }
+ onClick={handleDecline}
+ size="xs"
+ >
+ Not now
+
+ }
+ onClick={handleAccept}
+ size="xs"
+ >
+ Allow download
+
+
+
+ );
+}
diff --git a/client/components/AiResponse/AiResponseContent.tsx b/client/components/AiResponse/AiResponseContent.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..4905d471908774c431464e3047f89ce2c0218252
--- /dev/null
+++ b/client/components/AiResponse/AiResponseContent.tsx
@@ -0,0 +1,219 @@
+import {
+ ActionIcon,
+ Alert,
+ Badge,
+ Box,
+ Card,
+ Group,
+ ScrollArea,
+ Text,
+ Tooltip,
+} from "@mantine/core";
+import {
+ IconArrowsMaximize,
+ IconArrowsMinimize,
+ IconHandStop,
+ IconInfoCircle,
+ IconRefresh,
+ IconVolume2,
+} from "@tabler/icons-react";
+import type { PublishFunction } from "create-pubsub";
+import { usePubSub } from "create-pubsub/react";
+import { type ReactNode, useMemo, useState } from "react";
+import { addLogEntry } from "../../modules/logEntries";
+import { settingsPubSub } from "../../modules/pubSub";
+import { searchAndRespond } from "../../modules/textGeneration";
+import CopyIconButton from "./CopyIconButton";
+import FormattedMarkdown from "./FormattedMarkdown";
+
+export default function AiResponseContent({
+ textGenerationState,
+ response,
+ setTextGenerationState,
+}: {
+ textGenerationState: string;
+ response: string;
+ setTextGenerationState: PublishFunction<
+ | "failed"
+ | "awaitingSearchResults"
+ | "preparingToGenerate"
+ | "idle"
+ | "loadingModel"
+ | "generating"
+ | "interrupted"
+ | "completed"
+ >;
+}) {
+ const [settings, setSettings] = usePubSub(settingsPubSub);
+ const [isSpeaking, setIsSpeaking] = useState(false);
+
+ const ConditionalScrollArea = useMemo(
+ () =>
+ ({ children }: { children: ReactNode }) => {
+ return settings.enableAiResponseScrolling ? (
+
+ {children}
+
+ ) : (
+ {children}
+ );
+ },
+ [settings.enableAiResponseScrolling],
+ );
+
+ function speakResponse(text: string) {
+ if (isSpeaking) {
+ self.speechSynthesis.cancel();
+ setIsSpeaking(false);
+ return;
+ }
+
+ const prepareTextForSpeech = (textToClean: string) => {
+ const withoutReasoning = textToClean.replace(
+ new RegExp(
+ `${settings.reasoningStartMarker}[\\s\\S]*?${settings.reasoningEndMarker}`,
+ "g",
+ ),
+ "",
+ );
+ const withoutLinks = withoutReasoning.replace(
+ /\[([^\]]+)\]\([^)]+\)/g,
+ "($1)",
+ );
+ const withoutMarkdown = withoutLinks.replace(/[#*`_~[\]]/g, "");
+ return withoutMarkdown.trim();
+ };
+
+ const utterance = new SpeechSynthesisUtterance(prepareTextForSpeech(text));
+
+ const voices = self.speechSynthesis.getVoices();
+
+ if (voices.length > 0 && settings.selectedVoiceId) {
+ const voice = voices.find(
+ (voice) => voice.voiceURI === settings.selectedVoiceId,
+ );
+
+ if (voice) {
+ utterance.voice = voice;
+ utterance.lang = voice.lang;
+ }
+ }
+
+ utterance.onerror = () => {
+ addLogEntry("Failed to speak response");
+ setIsSpeaking(false);
+ };
+
+ utterance.onend = () => setIsSpeaking(false);
+
+ setIsSpeaking(true);
+ self.speechSynthesis.speak(utterance);
+ }
+
+ return (
+
+
+
+
+
+ {textGenerationState === "generating"
+ ? "Generating AI Response..."
+ : "AI Response"}
+
+ {textGenerationState === "interrupted" && (
+
+ Interrupted
+
+ )}
+
+
+ {textGenerationState === "generating" ? (
+
+ setTextGenerationState("interrupted")}
+ variant="subtle"
+ color="gray"
+ >
+
+
+
+ ) : (
+
+ searchAndRespond()}
+ variant="subtle"
+ color="gray"
+ >
+
+
+
+ )}
+
+ speakResponse(response)}
+ variant="subtle"
+ color={isSpeaking ? "blue" : "gray"}
+ >
+
+
+
+ {settings.enableAiResponseScrolling ? (
+
+ {
+ setSettings({
+ ...settings,
+ enableAiResponseScrolling: false,
+ });
+ }}
+ variant="subtle"
+ color="gray"
+ >
+
+
+
+ ) : (
+
+ {
+ setSettings({
+ ...settings,
+ enableAiResponseScrolling: true,
+ });
+ }}
+ variant="subtle"
+ color="gray"
+ >
+
+
+
+ )}
+
+
+
+
+
+
+ {response}
+
+ {textGenerationState === "failed" && (
+ }
+ >
+ Could not generate response. Please try refreshing the page.
+
+ )}
+
+
+ );
+}
diff --git a/client/components/AiResponse/AiResponseSection.tsx b/client/components/AiResponse/AiResponseSection.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..20c8bb37d0c8633f35644d8aa0d1e833d369292a
--- /dev/null
+++ b/client/components/AiResponse/AiResponseSection.tsx
@@ -0,0 +1,101 @@
+import { CodeHighlightAdapterProvider } from "@mantine/code-highlight";
+import { usePubSub } from "create-pubsub/react";
+import { useMemo } from "react";
+import {
+ chatMessagesPubSub,
+ isRestoringFromHistoryPubSub,
+ modelLoadingProgressPubSub,
+ modelSizeInMegabytesPubSub,
+ queryPubSub,
+ responsePubSub,
+ settingsPubSub,
+ textGenerationStatePubSub,
+} from "../../modules/pubSub";
+import { shikiAdapter } from "../../modules/shiki";
+import "@mantine/code-highlight/styles.css";
+import AiModelDownloadAllowanceContent from "./AiModelDownloadAllowanceContent";
+import AiResponseContent from "./AiResponseContent";
+import ChatInterface from "./ChatInterface";
+import LoadingModelContent from "./LoadingModelContent";
+import PreparingContent from "./PreparingContent";
+
+export default function AiResponseSection() {
+ const [query] = usePubSub(queryPubSub);
+ const [response] = usePubSub(responsePubSub);
+ const [textGenerationState, setTextGenerationState] = usePubSub(
+ textGenerationStatePubSub,
+ );
+ const [modelLoadingProgress] = usePubSub(modelLoadingProgressPubSub);
+ const [settings] = usePubSub(settingsPubSub);
+ const [modelSizeInMegabytes] = usePubSub(modelSizeInMegabytesPubSub);
+ const [chatMessages] = usePubSub(chatMessagesPubSub);
+ const [isRestoringFromHistory] = usePubSub(isRestoringFromHistoryPubSub);
+
+ return useMemo(() => {
+ if (!settings.enableAiResponse || textGenerationState === "idle") {
+ return null;
+ }
+
+ const generatingStates = [
+ "generating",
+ "interrupted",
+ "completed",
+ "failed",
+ ];
+ if (generatingStates.includes(textGenerationState)) {
+ return (
+
+
+
+ {textGenerationState === "completed" && (
+ 0 ? chatMessages : undefined
+ }
+ suppressInitialFollowUp={isRestoringFromHistory}
+ />
+ )}
+
+ );
+ }
+
+ if (textGenerationState === "loadingModel") {
+ return (
+
+ );
+ }
+
+ if (textGenerationState === "preparingToGenerate") {
+ return ;
+ }
+
+ if (textGenerationState === "awaitingSearchResults") {
+ return ;
+ }
+
+ if (textGenerationState === "awaitingModelDownloadAllowance") {
+ return ;
+ }
+
+ return null;
+ }, [
+ settings.enableAiResponse,
+ textGenerationState,
+ response,
+ query,
+ chatMessages,
+ modelLoadingProgress,
+ modelSizeInMegabytes,
+ setTextGenerationState,
+ isRestoringFromHistory,
+ ]);
+}
diff --git a/client/components/AiResponse/ChatHeader.tsx b/client/components/AiResponse/ChatHeader.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..4f1564976554881ed71921438afc5d8c5b1a631a
--- /dev/null
+++ b/client/components/AiResponse/ChatHeader.tsx
@@ -0,0 +1,33 @@
+import { Group, Text } from "@mantine/core";
+import type { ChatMessage } from "gpt-tokenizer/GptEncoding";
+import CopyIconButton from "./CopyIconButton";
+
+interface ChatHeaderProps {
+ messages: ChatMessage[];
+}
+
+function ChatHeader({ messages }: ChatHeaderProps) {
+ const getChatContent = () => {
+ return messages
+ .slice(2)
+ .map(
+ (msg, index) =>
+ `${index + 1}. ${msg.role?.toUpperCase()}\n\n${msg.content}`,
+ )
+ .join("\n\n");
+ };
+
+ return (
+
+ Follow-up questions
+ {messages.length > 2 && (
+
+ )}
+
+ );
+}
+
+export default ChatHeader;
diff --git a/client/components/AiResponse/ChatInputArea.tsx b/client/components/AiResponse/ChatInputArea.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..0ded2a8a46ddcd1b9f0fe89e5dae0a27a2d65887
--- /dev/null
+++ b/client/components/AiResponse/ChatInputArea.tsx
@@ -0,0 +1,106 @@
+import { Button, Group, Textarea } from "@mantine/core";
+import { IconSend } from "@tabler/icons-react";
+import { usePubSub } from "create-pubsub/react";
+import {
+ chatGenerationStatePubSub,
+ chatInputPubSub,
+ followUpQuestionPubSub,
+ isRestoringFromHistoryPubSub,
+ suppressNextFollowUpPubSub,
+} from "../../modules/pubSub";
+
+interface ChatInputAreaProps {
+ onKeyDown: (event: React.KeyboardEvent) => void;
+ handleSend: (textToSend?: string) => void;
+}
+
+function ChatInputArea({ onKeyDown, handleSend }: ChatInputAreaProps) {
+ const [input, setInput] = usePubSub(chatInputPubSub);
+ const [generationState] = usePubSub(chatGenerationStatePubSub);
+ const [followUpQuestion] = usePubSub(followUpQuestionPubSub);
+ const [isRestoringFromHistory] = usePubSub(isRestoringFromHistoryPubSub);
+ const [suppressNextFollowUp] = usePubSub(suppressNextFollowUpPubSub);
+
+ const isGenerating =
+ generationState.isGeneratingResponse &&
+ !generationState.isGeneratingFollowUpQuestion;
+
+ const defaultPlaceholder = "Anything else you would like to know?";
+ const placeholder =
+ isRestoringFromHistory || suppressNextFollowUp
+ ? defaultPlaceholder
+ : followUpQuestion || defaultPlaceholder;
+
+ const onChange = (event: React.ChangeEvent) => {
+ setInput(event.target.value);
+ };
+ const handleKeyDownWithPlaceholder = (
+ event: React.KeyboardEvent,
+ ) => {
+ if (
+ input.trim() === "" &&
+ followUpQuestion &&
+ !isRestoringFromHistory &&
+ !suppressNextFollowUp
+ ) {
+ if (event.key === "Enter" && !event.shiftKey) {
+ event.preventDefault();
+ handleSend(followUpQuestion);
+ return;
+ }
+ }
+
+ onKeyDown(event);
+ };
+
+ const handleSendWithPlaceholder = () => {
+ if (
+ input.trim() === "" &&
+ followUpQuestion &&
+ !isRestoringFromHistory &&
+ !suppressNextFollowUp
+ ) {
+ handleSend(followUpQuestion);
+ } else {
+ handleSend();
+ }
+ };
+
+ return (
+
+
+
+
+
+
+ );
+}
+
+export default ChatInputArea;
diff --git a/client/components/AiResponse/ChatInterface.tsx b/client/components/AiResponse/ChatInterface.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..8977d75716a43f5a7e0d85f74cfa75dca15b7516
--- /dev/null
+++ b/client/components/AiResponse/ChatInterface.tsx
@@ -0,0 +1,434 @@
+import { Card, Stack } from "@mantine/core";
+import { usePubSub } from "create-pubsub/react";
+import type { ChatMessage } from "gpt-tokenizer/GptEncoding";
+import {
+ type KeyboardEvent,
+ useCallback,
+ useEffect,
+ useRef,
+ useState,
+} from "react";
+import throttle from "throttleit";
+import { generateFollowUpQuestion } from "../../modules/followUpQuestions";
+import {
+ getCurrentSearchRunId,
+ saveChatMessageForQuery,
+ updateSearchResults,
+} from "../../modules/history";
+import { handleEnterKeyDown } from "../../modules/keyboard";
+import { addLogEntry } from "../../modules/logEntries";
+import {
+ chatGenerationStatePubSub,
+ chatInputPubSub,
+ followUpQuestionPubSub,
+ imageSearchResultsPubSub,
+ queryPubSub,
+ settingsPubSub,
+ suppressNextFollowUpPubSub,
+ textSearchResultsPubSub,
+ updateImageSearchResults,
+ updateTextSearchResults,
+} from "../../modules/pubSub";
+import { generateRelatedSearchQuery } from "../../modules/relatedSearchQuery";
+import { searchImages, searchText } from "../../modules/search";
+import { generateChatResponse } from "../../modules/textGeneration";
+import ChatHeader from "./ChatHeader";
+import ChatInputArea from "./ChatInputArea";
+import MessageList from "./MessageList";
+
+interface ChatInterfaceProps {
+ initialQuery?: string;
+ initialResponse?: string;
+ initialMessages?: ChatMessage[];
+ suppressInitialFollowUp?: boolean;
+}
+
+export default function ChatInterface({
+ initialQuery,
+ initialResponse,
+ initialMessages,
+ suppressInitialFollowUp,
+}: ChatInterfaceProps) {
+ const initialMessagesArray =
+ initialMessages &&
+ initialMessages.length > 0 &&
+ initialQuery &&
+ initialResponse
+ ? [
+ { role: "user" as const, content: initialQuery },
+ { role: "assistant" as const, content: initialResponse },
+ ...initialMessages,
+ ]
+ : initialMessages || [];
+
+ const [messages, setMessages] = useState(initialMessagesArray);
+ const [input, setInput] = usePubSub(chatInputPubSub);
+ const [generationState, setGenerationState] = usePubSub(
+ chatGenerationStatePubSub,
+ );
+ const [, setFollowUpQuestion] = usePubSub(followUpQuestionPubSub);
+ const [textSearchResults] = usePubSub(textSearchResultsPubSub);
+ const [imageSearchResults] = usePubSub(imageSearchResultsPubSub);
+ const [currentQuery] = usePubSub(queryPubSub);
+ const [suppressNextFollowUp] = usePubSub(suppressNextFollowUpPubSub);
+ const [previousFollowUpQuestions, setPreviousFollowUpQuestions] = useState<
+ string[]
+ >([]);
+ const [settings] = usePubSub(settingsPubSub);
+ const [streamedResponse, setStreamedResponse] = useState("");
+ const hasInitialized = useRef(false);
+ const prevInitialMessagesRef = useRef(undefined);
+ const updateStreamedResponse = useCallback(
+ throttle((response: string) => {
+ setStreamedResponse(response);
+ }, 1000 / 12),
+ [],
+ );
+
+ const regenerateFollowUpQuestion = useCallback(
+ async (currentQuery: string, currentResponse: string) => {
+ if (suppressNextFollowUp) return;
+ if (!currentResponse || !currentQuery.trim()) return;
+
+ try {
+ setGenerationState({
+ isGeneratingResponse: false,
+ isGeneratingFollowUpQuestion: true,
+ });
+
+ const newQuestion = await generateFollowUpQuestion({
+ topic: currentQuery,
+ currentContent: currentResponse,
+ previousQuestions: previousFollowUpQuestions,
+ });
+
+ setPreviousFollowUpQuestions((prev) =>
+ [...prev, newQuestion].slice(-5),
+ );
+ setFollowUpQuestion(newQuestion);
+ setGenerationState({
+ isGeneratingResponse: false,
+ isGeneratingFollowUpQuestion: false,
+ });
+ } catch (_) {
+ setFollowUpQuestion("");
+ setGenerationState({
+ isGeneratingResponse: false,
+ isGeneratingFollowUpQuestion: false,
+ });
+ }
+ },
+ [
+ setFollowUpQuestion,
+ setGenerationState,
+ previousFollowUpQuestions,
+ suppressNextFollowUp,
+ ],
+ );
+
+ useEffect(() => {
+ const messagesChanged =
+ !prevInitialMessagesRef.current ||
+ JSON.stringify(prevInitialMessagesRef.current) !==
+ JSON.stringify(initialMessages);
+
+ if (!messagesChanged) return;
+
+ prevInitialMessagesRef.current = initialMessages;
+
+ const newInitialMessagesArray =
+ initialMessages &&
+ initialMessages.length > 0 &&
+ initialQuery &&
+ initialResponse
+ ? [
+ { role: "user" as const, content: initialQuery },
+ { role: "assistant" as const, content: initialResponse },
+ ...initialMessages,
+ ]
+ : initialMessages || [];
+
+ if (newInitialMessagesArray.length > 0) {
+ setMessages(newInitialMessagesArray);
+ } else if (initialQuery && initialResponse) {
+ setMessages([
+ { role: "user", content: initialQuery },
+ { role: "assistant", content: initialResponse },
+ ]);
+ }
+ }, [initialQuery, initialResponse, initialMessages]);
+
+ useEffect(() => {
+ if (suppressNextFollowUp) {
+ hasInitialized.current = true;
+ return;
+ }
+ if (suppressInitialFollowUp) return;
+ if (hasInitialized.current) return;
+
+ if (initialMessages && initialMessages.length > 0) {
+ const lastAssistant = messages
+ .filter((m) => m.role === "assistant")
+ .pop();
+ const lastUser = messages.filter((m) => m.role === "user").pop();
+ if (lastUser && lastAssistant) {
+ regenerateFollowUpQuestion(lastUser.content, lastAssistant.content);
+ hasInitialized.current = true;
+ }
+ } else if (messages.length >= 2 && initialQuery && initialResponse) {
+ regenerateFollowUpQuestion(initialQuery, initialResponse);
+ hasInitialized.current = true;
+ }
+ }, [
+ initialQuery,
+ initialResponse,
+ initialMessages,
+ messages,
+ regenerateFollowUpQuestion,
+ suppressInitialFollowUp,
+ suppressNextFollowUp,
+ ]);
+
+ useEffect(() => {
+ return () => {
+ setFollowUpQuestion("");
+ setPreviousFollowUpQuestions([]);
+ };
+ }, [setFollowUpQuestion]);
+
+ const handleEditMessage = useCallback(
+ (absoluteIndex: number) => {
+ const target = messages[absoluteIndex];
+ if (!target || target.role !== "user") return;
+ setInput(target.content);
+ setMessages(messages.slice(0, absoluteIndex));
+ setFollowUpQuestion("");
+ },
+ [messages, setInput, setFollowUpQuestion],
+ );
+
+ const handleRegenerateResponse = useCallback(async () => {
+ if (
+ generationState.isGeneratingResponse ||
+ messages.length < 3 ||
+ messages[messages.length - 1].role !== "assistant"
+ )
+ return;
+
+ const history = messages.slice(0, -1);
+ const lastUser = history[history.length - 1];
+
+ setMessages(history);
+ setGenerationState({ ...generationState, isGeneratingResponse: true });
+ setFollowUpQuestion("");
+ setStreamedResponse("");
+
+ try {
+ const finalResponse = await generateChatResponse(
+ history,
+ updateStreamedResponse,
+ );
+
+ setMessages((prev) => [
+ ...prev,
+ { role: "assistant", content: finalResponse },
+ ]);
+
+ addLogEntry("AI response re-generated");
+
+ if (lastUser?.role === "user") {
+ await regenerateFollowUpQuestion(lastUser.content, finalResponse);
+ }
+ } catch (error) {
+ addLogEntry(`Error re-generating response: ${error}`);
+ } finally {
+ setGenerationState({ ...generationState, isGeneratingResponse: false });
+ }
+ }, [
+ generationState,
+ messages,
+ regenerateFollowUpQuestion,
+ setFollowUpQuestion,
+ setGenerationState,
+ updateStreamedResponse,
+ ]);
+
+ const handleSend = useCallback(
+ async (textToSend?: string) => {
+ const currentInput = textToSend ?? input;
+ if (currentInput.trim() === "" || generationState.isGeneratingResponse)
+ return;
+
+ const userMessage: ChatMessage = { role: "user", content: currentInput };
+ const newMessages: ChatMessage[] = [...messages, userMessage];
+
+ setMessages(newMessages);
+ if (!textToSend) setInput("");
+ setGenerationState({
+ ...generationState,
+ isGeneratingResponse: true,
+ });
+ setFollowUpQuestion("");
+ setStreamedResponse("");
+
+ try {
+ const relatedQuery = await generateRelatedSearchQuery([...newMessages]);
+ const searchQuery = relatedQuery || currentInput;
+
+ if (settings.enableTextSearch) {
+ const freshResults = await searchText(
+ searchQuery,
+ settings.searchResultsLimit,
+ );
+
+ if (freshResults.length > 0) {
+ const existingUrls = new Set(
+ textSearchResults.map(([, , url]) => url),
+ );
+
+ const uniqueFreshResults = freshResults.filter(
+ ([, , url]) => !existingUrls.has(url),
+ );
+
+ if (uniqueFreshResults.length > 0) {
+ const updatedResults = [
+ ...textSearchResults,
+ ...uniqueFreshResults,
+ ];
+ updateTextSearchResults(updatedResults);
+
+ updateSearchResults(getCurrentSearchRunId(), {
+ textResults: {
+ type: "text",
+ items: updatedResults.map(([title, snippet, url]) => ({
+ title,
+ url,
+ snippet,
+ })),
+ },
+ });
+ }
+ }
+ }
+
+ if (settings.enableImageSearch) {
+ searchImages(searchQuery, settings.searchResultsLimit)
+ .then((imageResults) => {
+ if (imageResults.length > 0) {
+ const existingUrls = new Set(
+ imageSearchResults.map(([, url]) => url),
+ );
+
+ const uniqueFreshResults = imageResults.filter(
+ ([, url]) => !existingUrls.has(url),
+ );
+
+ if (uniqueFreshResults.length > 0) {
+ const updatedImageResults = [
+ ...uniqueFreshResults,
+ ...imageSearchResults,
+ ];
+ updateImageSearchResults(updatedImageResults);
+
+ updateSearchResults(getCurrentSearchRunId(), {
+ imageResults: {
+ type: "image",
+ items: updatedImageResults.map(
+ ([title, url, thumbnailUrl, sourceUrl]) => ({
+ title,
+ url,
+ thumbnailUrl,
+ sourceUrl,
+ }),
+ ),
+ },
+ });
+ }
+ }
+ })
+ .catch((error) => {
+ addLogEntry(`Error in follow-up image search: ${error}`);
+ });
+ }
+ } catch (error) {
+ addLogEntry(`Error in follow-up search: ${error}`);
+ }
+
+ try {
+ const finalResponse = await generateChatResponse(
+ newMessages,
+ updateStreamedResponse,
+ );
+
+ setMessages((prevMessages) => [
+ ...prevMessages,
+ { role: "assistant", content: finalResponse },
+ ]);
+
+ addLogEntry("AI response completed");
+
+ await saveChatMessageForQuery(currentQuery, "user", currentInput);
+ await saveChatMessageForQuery(currentQuery, "assistant", finalResponse);
+
+ await regenerateFollowUpQuestion(currentInput, finalResponse);
+ } catch (error) {
+ addLogEntry(`Error in chat response: ${error}`);
+ setMessages((prevMessages) => [
+ ...prevMessages,
+ {
+ role: "assistant",
+ content:
+ "Sorry, I encountered an error while generating a response.",
+ },
+ ]);
+ } finally {
+ setGenerationState({
+ ...generationState,
+ isGeneratingResponse: false,
+ });
+ }
+ },
+ [
+ generationState,
+ messages,
+ settings,
+ input,
+ regenerateFollowUpQuestion,
+ setFollowUpQuestion,
+ setGenerationState,
+ setInput,
+ updateStreamedResponse,
+ currentQuery,
+ textSearchResults,
+ imageSearchResults,
+ ],
+ );
+
+ const handleKeyDown = useCallback(
+ (event: KeyboardEvent) => {
+ handleEnterKeyDown(event, settings, handleSend);
+ },
+ [settings, handleSend],
+ );
+
+ return (
+
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/client/components/AiResponse/CopyIconButton.tsx b/client/components/AiResponse/CopyIconButton.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..af8d82dfe54056adf9761d7c2d74d7fae37b348b
--- /dev/null
+++ b/client/components/AiResponse/CopyIconButton.tsx
@@ -0,0 +1,32 @@
+import { ActionIcon, CopyButton, Tooltip } from "@mantine/core";
+import { IconCheck, IconCopy } from "@tabler/icons-react";
+
+interface CopyIconButtonProps {
+ value: string;
+ tooltipLabel?: string;
+}
+
+export default function CopyIconButton({
+ value,
+ tooltipLabel = "Copy",
+}: CopyIconButtonProps) {
+ return (
+
+ {({ copied, copy }) => (
+
+
+ {copied ? : }
+
+
+ )}
+
+ );
+}
diff --git a/client/components/AiResponse/EnableAiResponsePrompt.tsx b/client/components/AiResponse/EnableAiResponsePrompt.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..07fad1766668effd48db99fb0280a4fbd7eaf794
--- /dev/null
+++ b/client/components/AiResponse/EnableAiResponsePrompt.tsx
@@ -0,0 +1,85 @@
+import {
+ ActionIcon,
+ Alert,
+ Button,
+ Grid,
+ Group,
+ Popover,
+ Stack,
+ Text,
+} from "@mantine/core";
+import { IconCheck, IconInfoCircle, IconX } from "@tabler/icons-react";
+
+interface EnableAiResponsePromptProps {
+ onAccept: () => void;
+ onDecline: () => void;
+}
+
+export default function EnableAiResponsePrompt({
+ onAccept,
+ onDecline,
+}: EnableAiResponsePromptProps) {
+ const helpContent = (
+
+
+ MiniSearch is a web-searching app with an integrated AI assistant.
+
+
+ With AI Responses enabled, it will generate summaries and answer
+ questions based on search results.
+
+
+ If disabled, it will function as a classic web search tool.
+
+
+ You can toggle this feature at anytime through the Menu.
+
+
+ );
+
+ return (
+
+
+
+
+ Enable AI Responses?
+
+
+
+
+
+
+ {helpContent}
+
+
+
+
+
+ }
+ onClick={onDecline}
+ size="xs"
+ >
+ No, thanks
+
+ }
+ onClick={onAccept}
+ size="xs"
+ >
+ Yes, please
+
+
+
+
+
+ );
+}
diff --git a/client/components/AiResponse/ExpandableLink.tsx b/client/components/AiResponse/ExpandableLink.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..187b87fbd284f6c7e0cb21466d213cffe6fcac5a
--- /dev/null
+++ b/client/components/AiResponse/ExpandableLink.tsx
@@ -0,0 +1,123 @@
+import type { MantineTheme } from "@mantine/core";
+import { Button } from "@mantine/core";
+import React from "react";
+
+interface ExpandableLinkProps {
+ href: string;
+ children: React.ReactNode;
+}
+
+export default function ExpandableLink({
+ href,
+ children,
+}: ExpandableLinkProps) {
+ const childContent = children?.toString() || "";
+ const firstChar = childContent.charAt(0);
+ const [isExpanded, setIsExpanded] = React.useState(true);
+ const timerRef = React.useRef(null);
+
+ React.useEffect(() => {
+ timerRef.current = window.setTimeout(() => {
+ setIsExpanded(false);
+ timerRef.current = null;
+ }, 3000);
+
+ return () => {
+ if (timerRef.current) {
+ clearTimeout(timerRef.current);
+ }
+ };
+ }, []);
+
+ const handleMouseEnter = () => {
+ if (timerRef.current) {
+ clearTimeout(timerRef.current);
+ timerRef.current = null;
+ }
+ setIsExpanded(true);
+ };
+
+ const handleMouseLeave = () => {
+ timerRef.current = window.setTimeout(() => {
+ setIsExpanded(false);
+ timerRef.current = null;
+ }, 3000);
+ };
+
+ const fullTextRef = React.useRef(null);
+ const [fullTextWidth, setFullTextWidth] = React.useState(0);
+
+ React.useEffect(() => {
+ const measureText = () => {
+ if (fullTextRef.current) {
+ setFullTextWidth(fullTextRef.current.scrollWidth);
+ }
+ };
+
+ measureText();
+
+ window.addEventListener("resize", measureText);
+ return () => {
+ window.removeEventListener("resize", measureText);
+ };
+ }, []);
+
+ return (
+ ({
+ textDecoration: "none",
+ transform: "translateY(-2px)",
+ overflow: "hidden",
+ position: "relative",
+ width: isExpanded ? `${fullTextWidth + theme.spacing.md}px` : "2em",
+ transition: "width 0.3s ease-in-out",
+ textAlign: "center",
+ })}
+ onMouseEnter={handleMouseEnter}
+ onMouseLeave={handleMouseLeave}
+ onFocus={handleMouseEnter}
+ onBlur={handleMouseLeave}
+ >
+
+ {firstChar}
+
+
+ {children}
+
+
+ );
+}
diff --git a/client/components/AiResponse/FormattedMarkdown.tsx b/client/components/AiResponse/FormattedMarkdown.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..47c13489c5850832d585b90878eeab2005d689a4
--- /dev/null
+++ b/client/components/AiResponse/FormattedMarkdown.tsx
@@ -0,0 +1,41 @@
+import { TypographyStylesProvider } from "@mantine/core";
+import { useReasoningContent } from "./hooks/useReasoningContent";
+import MarkdownRenderer from "./MarkdownRenderer";
+import ReasoningSection from "./ReasoningSection";
+
+interface FormattedMarkdownProps {
+ children: string;
+ className?: string;
+ enableCopy?: boolean;
+}
+
+export default function FormattedMarkdown({
+ children,
+ className = "",
+ enableCopy = true,
+}: FormattedMarkdownProps) {
+ const { reasoningContent, mainContent, isGenerating } =
+ useReasoningContent(children);
+
+ if (!children) {
+ return null;
+ }
+
+ return (
+
+ {reasoningContent && (
+
+ )}
+ {!isGenerating && mainContent && (
+
+ )}
+
+ );
+}
diff --git a/client/components/AiResponse/LoadingModelContent.tsx b/client/components/AiResponse/LoadingModelContent.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..509a3f847a2427f5647c508a72a68f69a48da3bc
--- /dev/null
+++ b/client/components/AiResponse/LoadingModelContent.tsx
@@ -0,0 +1,40 @@
+import { Card, Group, Progress, Stack, Text } from "@mantine/core";
+
+export default function LoadingModelContent({
+ modelLoadingProgress,
+ modelSizeInMegabytes,
+}: {
+ modelLoadingProgress: number;
+ modelSizeInMegabytes: number;
+}) {
+ const isLoadingStarting = modelLoadingProgress === 0;
+ const isLoadingComplete = modelLoadingProgress === 100;
+ const percent =
+ isLoadingComplete || isLoadingStarting ? 100 : modelLoadingProgress;
+ const strokeColor = percent === 100 ? "#52c41a" : "#3385ff";
+ const downloadedSize = (modelSizeInMegabytes * modelLoadingProgress) / 100;
+ const sizeText = `${downloadedSize.toFixed(0)} MB / ${modelSizeInMegabytes.toFixed(0)} MB`;
+
+ return (
+
+
+ Loading AI...
+
+
+
+
+ {!isLoadingStarting && (
+
+
+ {sizeText}
+
+
+ {percent.toFixed(1)}%
+
+
+ )}
+
+
+
+ );
+}
diff --git a/client/components/AiResponse/MarkdownRenderer.tsx b/client/components/AiResponse/MarkdownRenderer.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..b0ec794c66ce188ac28a8c019aa37ac7231e5871
--- /dev/null
+++ b/client/components/AiResponse/MarkdownRenderer.tsx
@@ -0,0 +1,104 @@
+import { CodeHighlight } from "@mantine/code-highlight";
+import { Blockquote, Box, Code, Divider, Text } from "@mantine/core";
+import React from "react";
+import { ErrorBoundary } from "react-error-boundary";
+import Markdown from "react-markdown";
+import rehypeExternalLinks from "rehype-external-links";
+import remarkGfm from "remark-gfm";
+import ExpandableLink from "./ExpandableLink";
+
+interface MarkdownRendererProps {
+ content: string;
+ enableCopy?: boolean;
+ className?: string;
+}
+
+export default function MarkdownRenderer({
+ content,
+ enableCopy = true,
+ className = "",
+}: MarkdownRendererProps) {
+ if (!content) {
+ return null;
+ }
+
+ const unwrapParagraphs = (children: React.ReactNode) => {
+ return React.Children.map(children, (child) => {
+ if (React.isValidElement(child) && child.type === "p") {
+ return (child.props as { children: React.ReactNode }).children;
+ }
+ return child;
+ });
+ };
+
+ return (
+
+ {children}
+ );
+ },
+ li(props) {
+ const { children } = props;
+ return {unwrapParagraphs(children)} ;
+ },
+ hr() {
+ return ;
+ },
+ pre(props) {
+ return <>{props.children}>;
+ },
+ blockquote(props) {
+ const { children } = props;
+ return (
+
+ {unwrapParagraphs(children)}
+
+ );
+ },
+ code(props) {
+ const { children, className, node } = props;
+ const codeContent = children?.toString().replace(/\n$/, "") ?? "";
+ let language = "text";
+
+ if (className) {
+ const languageMatch = /language-(\w+)/.exec(className);
+ if (languageMatch) language = languageMatch[1];
+ }
+
+ if (
+ language === "text" &&
+ node?.position?.end.line === node?.position?.start.line
+ ) {
+ return {codeContent};
+ }
+
+ return (
+ {codeContent}}>
+
+
+ );
+ },
+ }}
+ >
+ {content}
+
+
+ );
+}
diff --git a/client/components/AiResponse/MessageList.tsx b/client/components/AiResponse/MessageList.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..0324fd98f5cef3248e5f5a137e1309ee22f04830
--- /dev/null
+++ b/client/components/AiResponse/MessageList.tsx
@@ -0,0 +1,126 @@
+import { ActionIcon, Group, Paper, Stack, Tooltip } from "@mantine/core";
+import { IconPencil, IconRefresh } from "@tabler/icons-react";
+import type { ChatMessage } from "gpt-tokenizer/GptEncoding";
+import { memo } from "react";
+import FormattedMarkdown from "./FormattedMarkdown";
+
+interface MessageListProps {
+ messages: ChatMessage[];
+ onEditMessage: (absoluteIndex: number) => void;
+ onRegenerate: () => void;
+ isGenerating: boolean;
+}
+
+interface MessageProps {
+ message: ChatMessage;
+ index: number;
+ absoluteIndex: number;
+ isLastAssistant: boolean;
+ isGenerating: boolean;
+ onEditMessage: (absoluteIndex: number) => void;
+ onRegenerate: () => void;
+}
+
+const Message = memo(function Message({
+ message,
+ index,
+ absoluteIndex,
+ isLastAssistant,
+ isGenerating,
+ onEditMessage,
+ onRegenerate,
+}: MessageProps) {
+ const canEdit = message.role === "user";
+ const canRegenerate = isLastAssistant && message.role === "assistant";
+ const iconSize = 16;
+ const iconVariant: "subtle" = "subtle";
+
+ return (
+
+ {canEdit && (
+
+ onEditMessage(absoluteIndex)}
+ >
+
+
+
+ )}
+
+
+ {message.content}
+
+
+ {canRegenerate && (
+
+ onRegenerate()}
+ >
+
+
+
+ )}
+
+ );
+});
+
+const MessageList = memo(function MessageList({
+ messages,
+ onEditMessage,
+ onRegenerate,
+ isGenerating,
+}: MessageListProps) {
+ if (messages.length <= 2) return null;
+
+ return (
+
+ {messages
+ .slice(2)
+ .filter((message) => message.content.length > 0)
+ .map((message, index) => {
+ const absoluteIndex = index + 2;
+ const isLastAssistant =
+ absoluteIndex === messages.length - 1 &&
+ message.role === "assistant";
+ return (
+
+ );
+ })}
+
+ );
+});
+
+export default MessageList;
diff --git a/client/components/AiResponse/PreparingContent.tsx b/client/components/AiResponse/PreparingContent.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..b6989c1a9fdab0440ac10adfbd56738c3975324a
--- /dev/null
+++ b/client/components/AiResponse/PreparingContent.tsx
@@ -0,0 +1,33 @@
+import { Card, Skeleton, Stack, Text } from "@mantine/core";
+
+export default function PreparingContent({
+ textGenerationState,
+}: {
+ textGenerationState: string;
+}) {
+ const getStateText = () => {
+ if (textGenerationState === "awaitingSearchResults") {
+ return "Awaiting search results...";
+ }
+ if (textGenerationState === "preparingToGenerate") {
+ return "Preparing AI response...";
+ }
+ return null;
+ };
+
+ return (
+
+
+ {getStateText()}
+
+
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/client/components/AiResponse/ReasoningSection.tsx b/client/components/AiResponse/ReasoningSection.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..7d27a6e7d1ffbb0d1c09cfb15a107cdb23ec9292
--- /dev/null
+++ b/client/components/AiResponse/ReasoningSection.tsx
@@ -0,0 +1,71 @@
+import {
+ Box,
+ Collapse,
+ Flex,
+ Group,
+ Loader,
+ Text,
+ UnstyledButton,
+} from "@mantine/core";
+import { IconChevronDown, IconChevronRight } from "@tabler/icons-react";
+import { useState } from "react";
+import MarkdownRenderer from "./MarkdownRenderer";
+
+interface ReasoningSectionProps {
+ content: string;
+ isGenerating?: boolean;
+}
+
+export default function ReasoningSection({
+ content,
+ isGenerating = false,
+}: ReasoningSectionProps) {
+ const [isOpen, setIsOpen] = useState(false);
+
+ return (
+
+ setIsOpen(!isOpen)}
+ style={(theme) => ({
+ width: "100%",
+ padding: theme.spacing.xs,
+ borderStartStartRadius: theme.radius.md,
+ borderStartEndRadius: theme.radius.md,
+ borderEndEndRadius: !isOpen ? theme.radius.md : 0,
+ borderEndStartRadius: !isOpen ? theme.radius.md : 0,
+ backgroundColor: theme.colors.dark[8],
+ "&:hover": {
+ backgroundColor: theme.colors.dark[5],
+ },
+ cursor: isOpen ? "zoom-out" : "zoom-in",
+ })}
+ >
+
+ {isOpen ? (
+
+ ) : (
+
+ )}
+
+
+ {isGenerating ? "Thinking" : "Thought Process"}
+
+ {isGenerating && }
+
+
+
+
+ ({
+ backgroundColor: theme.colors.dark[8],
+ padding: theme.spacing.sm,
+ borderBottomLeftRadius: theme.radius.md,
+ borderBottomRightRadius: theme.radius.md,
+ })}
+ >
+
+
+
+
+ );
+}
diff --git a/client/components/AiResponse/WebLlmModelSelect.tsx b/client/components/AiResponse/WebLlmModelSelect.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..a967fef6e08b5c761c813adb57ae88a392c7df35
--- /dev/null
+++ b/client/components/AiResponse/WebLlmModelSelect.tsx
@@ -0,0 +1,81 @@
+import { type ComboboxItem, Select } from "@mantine/core";
+import { prebuiltAppConfig } from "@mlc-ai/web-llm";
+import { useCallback, useEffect, useState } from "react";
+import { isF16Supported } from "../../modules/webGpu";
+
+export default function WebLlmModelSelect({
+ value,
+ onChange,
+}: {
+ value: string;
+ onChange: (value: string) => void;
+}) {
+ const [webGpuModels] = useState(() => {
+ const models = prebuiltAppConfig.model_list
+ .filter((model) => {
+ const isSmall = isSmallModel(model);
+ const suffix = getModelSuffix(isF16Supported, isSmall);
+ return model.model_id.endsWith(suffix);
+ })
+ .sort((a, b) => (a.vram_required_MB ?? 0) - (b.vram_required_MB ?? 0))
+ .map((model) => {
+ const modelSizeInMegabytes =
+ Math.round(model.vram_required_MB ?? 0) || "N/A";
+ const isSmall = isSmallModel(model);
+ const suffix = getModelSuffix(isF16Supported, isSmall);
+ const modelName = model.model_id.replace(suffix, "");
+
+ return {
+ label: `${modelSizeInMegabytes} MB • ${modelName}`,
+ value: model.model_id,
+ };
+ });
+
+ return models;
+ });
+
+ useEffect(() => {
+ const isCurrentModelValid = webGpuModels.some(
+ (model) => model.value === value,
+ );
+
+ if (!isCurrentModelValid && webGpuModels.length > 0) {
+ onChange(webGpuModels[0].value);
+ }
+ }, [onChange, webGpuModels, value]);
+
+ const handleChange = useCallback(
+ (value: string | null) => {
+ if (value) onChange(value);
+ },
+ [onChange],
+ );
+
+ return (
+
+ );
+}
+
+type ModelConfig = (typeof prebuiltAppConfig.model_list)[number];
+
+const smallModels = ["SmolLM2-135M", "SmolLM2-360M"] as const;
+
+function isSmallModel(model: ModelConfig) {
+ return smallModels.some((smallModel) =>
+ model.model_id.startsWith(smallModel),
+ );
+}
+
+function getModelSuffix(isF16: boolean, isSmall: boolean) {
+ if (isSmall) return isF16 ? "-q0f16-MLC" : "-q0f32-MLC";
+
+ return isF16 ? "-q4f16_1-MLC" : "-q4f32_1-MLC";
+}
diff --git a/client/components/AiResponse/WllamaModelSelect.tsx b/client/components/AiResponse/WllamaModelSelect.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..6a63fcaa664cc28ddc563f7b068acdd7ecb16d7f
--- /dev/null
+++ b/client/components/AiResponse/WllamaModelSelect.tsx
@@ -0,0 +1,42 @@
+import { type ComboboxItem, Select } from "@mantine/core";
+import { useEffect, useState } from "react";
+import { wllamaModels } from "../../modules/wllama";
+
+export default function WllamaModelSelect({
+ value,
+ onChange,
+}: {
+ value: string;
+ onChange: (value: string) => void;
+}) {
+ const [wllamaModelOptions] = useState(
+ Object.entries(wllamaModels)
+ .sort(([, a], [, b]) => a.fileSizeInMegabytes - b.fileSizeInMegabytes)
+ .map(([value, { label, fileSizeInMegabytes }]) => ({
+ label: `${fileSizeInMegabytes} MB • ${label}`,
+ value,
+ })),
+ );
+
+ useEffect(() => {
+ const isCurrentModelValid = wllamaModelOptions.some(
+ (model) => model.value === value,
+ );
+
+ if (!isCurrentModelValid && wllamaModelOptions.length > 0) {
+ onChange(wllamaModelOptions[0].value);
+ }
+ }, [onChange, wllamaModelOptions, value]);
+
+ return (
+ value && onChange(value)}
+ label="AI Model"
+ description="Select the model to use for AI responses."
+ data={wllamaModelOptions}
+ allowDeselect={false}
+ searchable
+ />
+ );
+}
diff --git a/client/components/AiResponse/hooks/useReasoningContent.ts b/client/components/AiResponse/hooks/useReasoningContent.ts
new file mode 100644
index 0000000000000000000000000000000000000000..4511ee1b57e2fd52ecc1422bc34c4173db2b6378
--- /dev/null
+++ b/client/components/AiResponse/hooks/useReasoningContent.ts
@@ -0,0 +1,42 @@
+import { usePubSub } from "create-pubsub/react";
+import { useCallback } from "react";
+import { settingsPubSub } from "../../../modules/pubSub";
+
+export function useReasoningContent(text: string) {
+ const [settings] = usePubSub(settingsPubSub);
+
+ const extractReasoningAndMainContent = useCallback(
+ (text: string, startMarker: string, endMarker: string) => {
+ if (!text)
+ return { reasoningContent: "", mainContent: "", isGenerating: false };
+
+ if (!text.trim().startsWith(startMarker))
+ return { reasoningContent: "", mainContent: text, isGenerating: false };
+
+ const endIndex = text.indexOf(endMarker);
+
+ if (endIndex === -1) {
+ return {
+ reasoningContent: text.slice(startMarker.length),
+ mainContent: "",
+ isGenerating: true,
+ };
+ }
+
+ return {
+ reasoningContent: text.slice(startMarker.length, endIndex),
+ mainContent: text.slice(endIndex + endMarker.length),
+ isGenerating: false,
+ };
+ },
+ [],
+ );
+
+ const result = extractReasoningAndMainContent(
+ text,
+ settings.reasoningStartMarker,
+ settings.reasoningEndMarker,
+ );
+
+ return result;
+}
diff --git a/client/components/Analytics/SearchStats.tsx b/client/components/Analytics/SearchStats.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..0da17831c6e2fa014786a47f858a393fd7c59f2f
--- /dev/null
+++ b/client/components/Analytics/SearchStats.tsx
@@ -0,0 +1,317 @@
+import {
+ Badge,
+ Card,
+ Center,
+ Group,
+ Progress,
+ SimpleGrid,
+ Stack,
+ Text,
+ ThemeIcon,
+ Title,
+} from "@mantine/core";
+import { IconSearch } from "@tabler/icons-react";
+import { useMemo } from "react";
+import { useSearchHistory } from "../../hooks/useSearchHistory";
+import type { SearchEntry } from "../../modules/history";
+import { formatRelativeTime } from "../../modules/stringFormatters";
+
+interface SearchStatsProps {
+ period?: "today" | "week" | "month" | "all";
+ compact?: boolean;
+}
+
+interface StatsData {
+ totalSearches: number;
+ avgPerDay: number;
+ mostActiveHour: number;
+ topSources: { source: string; count: number; percentage: number }[];
+ recentActivity: SearchEntry[];
+ searchTrends: { date: string; count: number }[];
+}
+
+export default function SearchStats({
+ period = "week",
+ compact = false,
+}: SearchStatsProps) {
+ const { recentSearches, isLoading } = useSearchHistory({ limit: 1000 });
+
+ const stats = useMemo((): StatsData => {
+ if (!recentSearches.length) {
+ return {
+ totalSearches: 0,
+ avgPerDay: 0,
+ mostActiveHour: 0,
+ topSources: [],
+ recentActivity: [],
+ searchTrends: [],
+ };
+ }
+
+ const now = new Date();
+ const filterDate = new Date();
+
+ switch (period) {
+ case "today":
+ filterDate.setHours(0, 0, 0, 0);
+ break;
+ case "week":
+ filterDate.setDate(now.getDate() - 7);
+ break;
+ case "month":
+ filterDate.setDate(now.getDate() - 30);
+ break;
+ default:
+ filterDate.setFullYear(2000);
+ }
+
+ const filteredSearches = recentSearches.filter(
+ (search) => search.timestamp >= filterDate.getTime(),
+ );
+
+ const totalSearches = filteredSearches.length;
+
+ const daysDiff = Math.max(
+ 1,
+ Math.ceil((now.getTime() - filterDate.getTime()) / (1000 * 60 * 60 * 24)),
+ );
+ const avgPerDay = Math.round(totalSearches / daysDiff);
+
+ const hourCounts = new Array(24).fill(0);
+ filteredSearches.forEach((search) => {
+ const hour = new Date(search.timestamp).getHours();
+ hourCounts[hour]++;
+ });
+ const mostActiveHour = hourCounts.indexOf(Math.max(...hourCounts));
+
+ const sourceCounts = filteredSearches.reduce(
+ (acc, search) => {
+ if (compact && search.source.toLowerCase() === "user") {
+ return acc;
+ }
+ acc[search.source] = (acc[search.source] || 0) + 1;
+ return acc;
+ },
+ {} as Record,
+ );
+
+ const sourcesTotal = Object.values(sourceCounts).reduce(
+ (sum, n) => sum + n,
+ 0,
+ );
+ const topSources = Object.entries(sourceCounts)
+ .map(([source, count]) => ({
+ source: source.charAt(0).toUpperCase() + source.slice(1),
+ count,
+ percentage:
+ sourcesTotal > 0 ? Math.round((count / sourcesTotal) * 100) : 0,
+ }))
+ .sort((a, b) => b.count - a.count);
+
+ const recentActivity = filteredSearches
+ .sort((a, b) => b.timestamp - a.timestamp)
+ .slice(0, 10);
+
+ const trends = new Map();
+ filteredSearches.forEach((search) => {
+ const date = new Date(search.timestamp).toISOString().split("T")[0];
+ trends.set(date, (trends.get(date) || 0) + 1);
+ });
+
+ const searchTrends = Array.from(trends.entries())
+ .map(([date, count]) => ({ date, count }))
+ .sort((a, b) => a.date.localeCompare(b.date));
+
+ return {
+ totalSearches,
+ avgPerDay,
+ mostActiveHour,
+ topSources,
+ recentActivity,
+ searchTrends,
+ };
+ }, [recentSearches, period, compact]);
+
+ const getSourceColor = (source: string) => {
+ const colors = {
+ User: "blue",
+ Followup: "green",
+ Suggestion: "orange",
+ };
+ return colors[source as keyof typeof colors] || "gray";
+ };
+
+ const formatHour = (hour: number) => {
+ const period = hour >= 12 ? "PM" : "AM";
+ const displayHour = hour === 0 ? 12 : hour > 12 ? hour - 12 : hour;
+ return `${displayHour}:00 ${period}`;
+ };
+
+ if (isLoading) {
+ return (
+
+
+ Loading analytics...
+
+
+ );
+ }
+
+ if (stats.totalSearches === 0) {
+ return (
+
+
+
+
+
+
+ No search data available
+
+ Start searching to see analytics
+
+
+
+
+ );
+ }
+
+ function MetricCard({
+ title,
+ value,
+ }: {
+ title: string;
+ value: string | number;
+ }) {
+ return (
+
+
+
+ {title}
+
+
+ {value}
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+
+ 0
+ ? formatRelativeTime(stats.recentActivity[0].timestamp)
+ : "Never"
+ }
+ />
+
+
+
+ {!(compact && stats.topSources.length === 0) && (
+
+
+
+ Search Sources
+
+
+ {stats.topSources.length > 0 ? (
+
+ {stats.topSources.map((source) => (
+
+
+
+ {source.source}
+
+
+ {source.count} searches
+
+
+
+
+
+
+ {source.percentage}%
+
+
+
+ ))}
+
+ ) : (
+
+ No data available
+
+ )}
+
+
+ )}
+
+
+ {stats.searchTrends.length > 1 && (
+
+
+
+ Search Trends
+
+
+ Daily search activity over the selected period
+
+
+
+ {stats.searchTrends.slice(-7).map((trend) => {
+ const maxCount = Math.max(
+ ...stats.searchTrends.map((t) => t.count),
+ );
+ const percentage =
+ maxCount > 0 ? (trend.count / maxCount) * 100 : 0;
+
+ return (
+
+
+ {new Date(trend.date).toLocaleDateString("en", {
+ month: "short",
+ day: "numeric",
+ })}
+
+
+
+
+
+ {trend.count}
+
+
+
+ );
+ })}
+
+
+
+ )}
+
+ );
+}
diff --git a/client/components/App/App.tsx b/client/components/App/App.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..94bd6a2a44a266b8da6701f40d090f271711dce0
--- /dev/null
+++ b/client/components/App/App.tsx
@@ -0,0 +1,100 @@
+import { MantineProvider } from "@mantine/core";
+import { Route, Switch } from "wouter";
+import "@mantine/core/styles.css";
+import { Notifications } from "@mantine/notifications";
+import { usePubSub } from "create-pubsub/react";
+import { lazy, useEffect, useState } from "react";
+import { addLogEntry } from "../../modules/logEntries";
+import { settingsPubSub } from "../../modules/pubSub";
+import { defaultSettings } from "../../modules/settings";
+import "@mantine/notifications/styles.css";
+import { verifyStoredAccessKey } from "../../modules/accessKey";
+import MainPage from "../Pages/Main/MainPage";
+
+const AccessPage = lazy(() => import("../Pages/AccessPage"));
+
+export function App() {
+ useInitializeSettings();
+ const { hasValidatedAccessKey, isCheckingStoredKey, setValidatedAccessKey } =
+ useAccessKeyValidation();
+
+ if (isCheckingStoredKey) {
+ return null;
+ }
+
+ return (
+
+
+
+
+ {VITE_ACCESS_KEYS_ENABLED && !hasValidatedAccessKey ? (
+ setValidatedAccessKey(true)} />
+ ) : (
+
+ )}
+
+
+
+ );
+}
+
+/**
+ * A custom React hook that initializes the application settings.
+ *
+ * @returns The initialized settings object.
+ *
+ * @remarks
+ * This hook uses the `usePubSub` hook to access and update the settings state.
+ * It initializes the settings by merging the default settings with any existing settings.
+ * The initialization is performed once when the component mounts.
+ */
+function useInitializeSettings() {
+ const [settings, setSettings] = usePubSub(settingsPubSub);
+ const [state, setState] = useState({
+ settingsInitialized: false,
+ });
+
+ useEffect(() => {
+ if (state.settingsInitialized) return;
+
+ setSettings({ ...defaultSettings, ...settings });
+
+ setState({ settingsInitialized: true });
+
+ addLogEntry("Settings initialized");
+ }, [settings, setSettings, state.settingsInitialized]);
+
+ return settings;
+}
+
+/**
+ * A custom React hook that validates the stored access key on mount.
+ *
+ * @returns An object containing the validation state and loading state
+ */
+function useAccessKeyValidation() {
+ const [state, setState] = useState({
+ hasValidatedAccessKey: false,
+ isCheckingStoredKey: true,
+ });
+
+ useEffect(() => {
+ async function checkStoredAccessKey() {
+ if (VITE_ACCESS_KEYS_ENABLED) {
+ const isValid = await verifyStoredAccessKey();
+ if (isValid)
+ setState((prev) => ({ ...prev, hasValidatedAccessKey: true }));
+ }
+ setState((prev) => ({ ...prev, isCheckingStoredKey: false }));
+ }
+
+ checkStoredAccessKey();
+ }, []);
+
+ return {
+ hasValidatedAccessKey: state.hasValidatedAccessKey,
+ isCheckingStoredKey: state.isCheckingStoredKey,
+ setValidatedAccessKey: (value: boolean) =>
+ setState((prev) => ({ ...prev, hasValidatedAccessKey: value })),
+ };
+}
diff --git a/client/components/Logs/LogsModal.tsx b/client/components/Logs/LogsModal.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..35c0abcb0a0f2835fc05d96b2bc23ba0e70439e1
--- /dev/null
+++ b/client/components/Logs/LogsModal.tsx
@@ -0,0 +1,136 @@
+import {
+ Alert,
+ Button,
+ Center,
+ CloseButton,
+ Group,
+ Modal,
+ Pagination,
+ Table,
+ TextInput,
+ Tooltip,
+} from "@mantine/core";
+import { IconInfoCircle, IconSearch } from "@tabler/icons-react";
+import { usePubSub } from "create-pubsub/react";
+import { useCallback, useEffect, useMemo, useState } from "react";
+import { logEntriesPubSub } from "../../modules/logEntries";
+
+export default function LogsModal({
+ opened,
+ onClose,
+}: {
+ opened: boolean;
+ onClose: () => void;
+}) {
+ const [logEntries] = usePubSub(logEntriesPubSub);
+
+ const [page, setPage] = useState(1);
+ const [filterText, setFilterText] = useState("");
+
+ const logEntriesPerPage = 5;
+
+ const filteredLogEntries = useMemo(() => {
+ if (!filterText) return logEntries;
+ const lowerCaseFilter = filterText.toLowerCase();
+ return logEntries.filter((entry) =>
+ entry.message.toLowerCase().includes(lowerCaseFilter),
+ );
+ }, [logEntries, filterText]);
+
+ const logEntriesFromCurrentPage = useMemo(
+ () =>
+ filteredLogEntries.slice(
+ (page - 1) * logEntriesPerPage,
+ page * logEntriesPerPage,
+ ),
+ [filteredLogEntries, page],
+ );
+
+ useEffect(() => {
+ void filterText;
+ setPage(1);
+ }, [filterText]);
+
+ const downloadLogsAsJson = useCallback(() => {
+ const jsonString = JSON.stringify(logEntries, null, 2);
+ const blob = new Blob([jsonString], { type: "application/json" });
+ const url = URL.createObjectURL(blob);
+ const link = document.createElement("a");
+ link.href = url;
+ link.download = "logs.json";
+ document.body.appendChild(link);
+ link.click();
+ document.body.removeChild(link);
+ URL.revokeObjectURL(url);
+ }, [logEntries]);
+
+ return (
+
+ } mb="md">
+
+
+ This information is stored solely in your browser for personal use.
+ It isn't sent automatically and is retained for debugging purposes
+ should you need to{" "}
+
+ report a bug
+
+ .
+
+
+ Download Logs
+
+
+
+ }
+ value={filterText}
+ onChange={(event) => setFilterText(event.currentTarget.value)}
+ rightSection={
+ filterText ? (
+
+ setFilterText("")}
+ aria-label="Clear filter"
+ />
+
+ ) : null
+ }
+ />
+
+
+
+ Time
+ Message
+
+
+
+ {logEntriesFromCurrentPage.map((entry, index) => (
+
+
+ {new Date(entry.timestamp).toLocaleTimeString()}
+
+ {entry.message}
+
+ ))}
+
+
+
+
+
+
+ );
+}
diff --git a/client/components/Logs/ShowLogsButton.tsx b/client/components/Logs/ShowLogsButton.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..9851a61191b8063322497fb3c588570e8240957c
--- /dev/null
+++ b/client/components/Logs/ShowLogsButton.tsx
@@ -0,0 +1,42 @@
+import { Button, Center, Loader, Stack, Text } from "@mantine/core";
+import { lazy, Suspense, useState } from "react";
+import { addLogEntry } from "../../modules/logEntries";
+
+const LogsModal = lazy(() => import("./LogsModal"));
+
+export default function ShowLogsButton() {
+ const [isLogsModalOpen, setLogsModalOpen] = useState(false);
+
+ const handleShowLogsButtonClick = () => {
+ addLogEntry("User opened the logs modal");
+ setLogsModalOpen(true);
+ };
+
+ const handleCloseLogsButtonClick = () => {
+ addLogEntry("User closed the logs modal");
+ setLogsModalOpen(false);
+ };
+
+ return (
+
+
+
+
+ }
+ >
+
+ Show logs
+
+
+ View session logs for debugging.
+
+
+
+
+ );
+}
diff --git a/client/components/Pages/AccessPage.tsx b/client/components/Pages/AccessPage.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..a15ac688054a8613e964998eef195d202307ed0c
--- /dev/null
+++ b/client/components/Pages/AccessPage.tsx
@@ -0,0 +1,70 @@
+import { Button, Container, Stack, TextInput, Title } from "@mantine/core";
+import { type FormEvent, useState } from "react";
+import { validateAccessKey } from "../../modules/accessKey";
+import { addLogEntry } from "../../modules/logEntries";
+
+interface AccessPageState {
+ accessKey: string;
+ error: string;
+}
+
+export default function AccessPage({
+ onAccessKeyValid,
+}: {
+ onAccessKeyValid: () => void;
+}) {
+ const [state, setState] = useState({
+ accessKey: "",
+ error: "",
+ });
+
+ const handleSubmit = async (formEvent: FormEvent) => {
+ formEvent.preventDefault();
+ setState((prev) => ({ ...prev, error: "" }));
+ try {
+ const isValid = await validateAccessKey(state.accessKey);
+ if (isValid) {
+ addLogEntry("Valid access key entered");
+ onAccessKeyValid();
+ } else {
+ setState((prev) => ({ ...prev, error: "Invalid access key" }));
+ addLogEntry("Invalid access key attempt");
+ }
+ } catch (error) {
+ setState((prev) => ({ ...prev, error: "Error validating access key" }));
+ addLogEntry(`Error validating access key: ${error}`);
+ }
+ };
+
+ return (
+
+
+
+ Access Restricted
+
+
+
+
+ );
+}
diff --git a/client/components/Pages/Main/MainPage.tsx b/client/components/Pages/Main/MainPage.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..69b207fb20050ef9fb4eae286c08909e2d1d04f6
--- /dev/null
+++ b/client/components/Pages/Main/MainPage.tsx
@@ -0,0 +1,81 @@
+import { Container, Stack } from "@mantine/core";
+import { usePubSub } from "create-pubsub/react";
+import { lazy, Suspense } from "react";
+import {
+ imageSearchStatePubSub,
+ queryPubSub,
+ settingsPubSub,
+ textGenerationStatePubSub,
+ textSearchStatePubSub,
+} from "../../../modules/pubSub";
+import { searchAndRespond } from "../../../modules/textGeneration";
+import SearchForm from "../../Search/Form/SearchForm";
+import MenuButton from "./Menu/MenuButton";
+
+const AiResponseSection = lazy(
+ () => import("../../AiResponse/AiResponseSection"),
+);
+const SearchResultsSection = lazy(
+ () => import("../../Search/Results/SearchResultsSection"),
+);
+const EnableAiResponsePrompt = lazy(
+ () => import("../../AiResponse/EnableAiResponsePrompt"),
+);
+
+export default function MainPage() {
+ const [query, updateQuery] = usePubSub(queryPubSub);
+ const [textSearchState] = usePubSub(textSearchStatePubSub);
+ const [imageSearchState] = usePubSub(imageSearchStatePubSub);
+ const [textGenerationState] = usePubSub(textGenerationStatePubSub);
+ const [settings, setSettings] = usePubSub(settingsPubSub);
+
+ const isQueryEmpty = query.length === 0;
+
+ return (
+
+
+ }
+ />
+ {!isQueryEmpty && (
+ <>
+ {settings.showEnableAiResponsePrompt && (
+
+ {
+ setSettings({
+ ...settings,
+ showEnableAiResponsePrompt: false,
+ enableAiResponse: true,
+ });
+ searchAndRespond();
+ }}
+ onDecline={() => {
+ setSettings({
+ ...settings,
+ showEnableAiResponsePrompt: false,
+ enableAiResponse: false,
+ });
+ }}
+ />
+
+ )}
+ {!settings.showEnableAiResponsePrompt &&
+ textGenerationState !== "idle" && (
+
+
+
+ )}
+ {(textSearchState !== "idle" || imageSearchState !== "idle") && (
+
+
+
+ )}
+ >
+ )}
+
+
+ );
+}
diff --git a/client/components/Pages/Main/Menu/AISettings/AISettingsForm.tsx b/client/components/Pages/Main/Menu/AISettings/AISettingsForm.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..7f55ed227e55a7cb90f4733e5d14fd405cfff9ef
--- /dev/null
+++ b/client/components/Pages/Main/Menu/AISettings/AISettingsForm.tsx
@@ -0,0 +1,208 @@
+import { Select, Slider, Stack, Switch, Text, TextInput } from "@mantine/core";
+import { useForm } from "@mantine/form";
+import { usePubSub } from "create-pubsub/react";
+import { useMemo } from "react";
+import { settingsPubSub } from "../../../../../modules/pubSub";
+import {
+ defaultSettings,
+ inferenceTypes,
+} from "../../../../../modules/settings";
+import { isWebGPUAvailable } from "../../../../../modules/webGpu";
+import { AIParameterSlider } from "./components/AIParameterSlider";
+import { BrowserSettings } from "./components/BrowserSettings";
+import { HordeSettings } from "./components/HordeSettings";
+import { OpenAISettings } from "./components/OpenAISettings";
+import { SystemPromptInput } from "./components/SystemPromptInput";
+import { useHordeModels } from "./hooks/useHordeModels";
+import { useHordeUserInfo } from "./hooks/useHordeUserInfo";
+import { useOpenAiModels } from "./hooks/useOpenAiModels";
+
+export default function AISettingsForm() {
+ const [settings, setSettings] = usePubSub(settingsPubSub);
+ const { openAiModels, useTextInput } = useOpenAiModels(settings);
+ const hordeModels = useHordeModels(settings);
+ const hordeUserInfo = useHordeUserInfo(settings);
+
+ const form = useForm({
+ initialValues: settings,
+ onValuesChange: setSettings,
+ });
+
+ const inferenceTypeSupportsMinP =
+ (form.values.inferenceType === "browser" &&
+ (!isWebGPUAvailable || !form.values.enableWebGpu)) ||
+ form.values.inferenceType === "horde";
+
+ const penaltySliderMarks = useMemo(
+ () => [
+ { value: -2.0, label: "-2.0" },
+ { value: 0.0, label: "0" },
+ { value: 2.0, label: "2.0" },
+ ],
+ [],
+ );
+
+ const probabilitySliderMarks = useMemo(
+ () =>
+ Array.from({ length: 3 }, (_, index) => ({
+ value: index / 2,
+ label: (index / 2).toString(),
+ })),
+ [],
+ );
+
+ const searchResultsToConsiderSliderMarks = useMemo(
+ () =>
+ Array.from({ length: 7 }, (_, index) => ({
+ value: index,
+ label: index.toString(),
+ })),
+ [],
+ );
+
+ const temperatureSliderMarks = useMemo(
+ () => [
+ { value: 0, label: "0" },
+ { value: 1, label: "1" },
+ { value: 2, label: "2" },
+ ],
+ [],
+ );
+
+ return (
+
+
+
+ {form.values.enableAiResponse && (
+ <>
+
+ Search results to consider
+
+ Determines the number of search results to consider when
+ generating AI responses. A higher value may enhance accuracy, but
+ it will also increase response time.
+
+
+
+
+
+
+ {form.values.inferenceType === "openai" && (
+
+ )}
+
+ {form.values.inferenceType === "horde" && (
+
+ )}
+
+ {form.values.inferenceType === "browser" && (
+
+ )}
+
+
+
+
+
+
+
+ {inferenceTypeSupportsMinP && (
+
+ )}
+
+
+
+
+
+
+ Reasoning Section Parsing
+
+ Configure how the AI's reasoning section is parsed in the
+ response.
+
+
+
+
+
+
+ >
+ )}
+
+ );
+}
diff --git a/client/components/Pages/Main/Menu/AISettings/components/AIParameterSlider.tsx b/client/components/Pages/Main/Menu/AISettings/components/AIParameterSlider.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..fd9240e9f48dfbfc94c7fb9197b9b56d6d720629
--- /dev/null
+++ b/client/components/Pages/Main/Menu/AISettings/components/AIParameterSlider.tsx
@@ -0,0 +1,17 @@
+import { Slider, Stack, Text } from "@mantine/core";
+import type { AIParameterSliderProps } from "../types";
+
+export const AIParameterSlider = ({
+ label,
+ description,
+ defaultValue,
+ ...props
+}: AIParameterSliderProps) => (
+
+ {label}
+
+ {description} Defaults to {defaultValue}.
+
+
+
+);
diff --git a/client/components/Pages/Main/Menu/AISettings/components/BrowserSettings.tsx b/client/components/Pages/Main/Menu/AISettings/components/BrowserSettings.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..fe907fff6f344ea36c04249eab7674f486907c3e
--- /dev/null
+++ b/client/components/Pages/Main/Menu/AISettings/components/BrowserSettings.tsx
@@ -0,0 +1,60 @@
+import { NumberInput, Skeleton, Switch } from "@mantine/core";
+import type { UseFormReturnType } from "@mantine/form";
+import { lazy, Suspense } from "react";
+import type { defaultSettings } from "../../../../../../modules/settings";
+
+const WebLlmModelSelect = lazy(
+ () => import("../../../../../../components/AiResponse/WebLlmModelSelect"),
+);
+const WllamaModelSelect = lazy(
+ () => import("../../../../../../components/AiResponse/WllamaModelSelect"),
+);
+
+interface BrowserSettingsProps {
+ form: UseFormReturnType;
+ isWebGPUAvailable: boolean;
+}
+
+export const BrowserSettings = ({
+ form,
+ isWebGPUAvailable,
+}: BrowserSettingsProps) => (
+ <>
+ {isWebGPUAvailable && (
+
+ )}
+
+ {isWebGPUAvailable && form.values.enableWebGpu ? (
+ }>
+
+ form.setFieldValue("webLlmModelId", value)
+ }
+ />
+
+ ) : (
+ <>
+ }>
+
+ form.setFieldValue("wllamaModelId", value)
+ }
+ />
+
+
+ >
+ )}
+ >
+);
diff --git a/client/components/Pages/Main/Menu/AISettings/components/HordeSettings.tsx b/client/components/Pages/Main/Menu/AISettings/components/HordeSettings.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..6e3f466ec9dad5cb010c309e7461080c6f37a22a
--- /dev/null
+++ b/client/components/Pages/Main/Menu/AISettings/components/HordeSettings.tsx
@@ -0,0 +1,44 @@
+import { Select, TextInput } from "@mantine/core";
+import type { UseFormReturnType } from "@mantine/form";
+import type { defaultSettings } from "../../../../../../modules/settings";
+import { aiHordeDefaultApiKey } from "../../../../../../modules/textGenerationWithHorde";
+import type { HordeUserInfo, ModelOption } from "../types";
+
+interface HordeSettingsProps {
+ form: UseFormReturnType;
+ hordeUserInfo: HordeUserInfo | null;
+ hordeModels: ModelOption[];
+}
+
+export const HordeSettings = ({
+ form,
+ hordeUserInfo,
+ hordeModels,
+}: HordeSettingsProps) => (
+ <>
+
+ {form.values.hordeApiKey.length > 0 &&
+ form.values.hordeApiKey !== aiHordeDefaultApiKey && (
+
+ )}
+ >
+);
diff --git a/client/components/Pages/Main/Menu/AISettings/components/OpenAISettings.tsx b/client/components/Pages/Main/Menu/AISettings/components/OpenAISettings.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..0e31c1259499495f345ef208243475887851aaae
--- /dev/null
+++ b/client/components/Pages/Main/Menu/AISettings/components/OpenAISettings.tsx
@@ -0,0 +1,58 @@
+import { Group, Select, Text, TextInput } from "@mantine/core";
+import type { UseFormReturnType } from "@mantine/form";
+import { IconInfoCircle } from "@tabler/icons-react";
+import type { defaultSettings } from "../../../../../../modules/settings";
+import type { ModelOption } from "../types";
+
+interface OpenAISettingsProps {
+ form: UseFormReturnType;
+ openAiModels: ModelOption[];
+ useTextInput: boolean;
+}
+
+export const OpenAISettings = ({
+ form,
+ openAiModels,
+ useTextInput,
+}: OpenAISettingsProps) => (
+ <>
+
+
+
+
+ You may need to add{" "}
+ {`${self.location.protocol}//${self.location.hostname}`} to the
+ list of allowed network origins in your API server settings.
+
+
+
+ {useTextInput ? (
+
+ ) : (
+
+ )}
+ >
+);
diff --git a/client/components/Pages/Main/Menu/AISettings/components/SystemPromptInput.tsx b/client/components/Pages/Main/Menu/AISettings/components/SystemPromptInput.tsx
new file mode 100644
index 0000000000000000000000000000000000000000..9f7421726be55ca70dec5b5670d9c6a89e0b16f1
--- /dev/null
+++ b/client/components/Pages/Main/Menu/AISettings/components/SystemPromptInput.tsx
@@ -0,0 +1,102 @@
+import { Text, Textarea } from "@mantine/core";
+import type { UseFormReturnType } from "@mantine/form";
+import { defaultSettings } from "../../../../../../modules/settings";
+
+interface SystemPromptInputProps {
+ form: UseFormReturnType;
+}
+
+export const SystemPromptInput = ({ form }: SystemPromptInputProps) => {
+ const isUsingCustomInstructions =
+ form.values.systemPrompt !== defaultSettings.systemPrompt;
+
+ const handleRestoreDefaultInstructions = () => {
+ form.setFieldValue("systemPrompt", defaultSettings.systemPrompt);
+ };
+
+ return (
+