diff --git a/app.py b/app.py index 8f02899c88f356235a697fad5ba93ba3a4e966ff..c8f11ead815df8ffd95b0c21a404f71a35650c46 100644 --- a/app.py +++ b/app.py @@ -1,443 +1,873 @@ import os import subprocess import signal -os.environ["GRADIO_ANALYTICS_ENABLED"] = "False" -import gradio as gr import tempfile +from pathlib import Path +from textwrap import dedent +from typing import Optional, Tuple, List, Union +from dataclasses import dataclass, field +os.environ["GRADIO_ANALYTICS_ENABLED"] = "False" + +import gradio as gr from huggingface_hub import HfApi, ModelCard, whoami from gradio_huggingfacehub_search import HuggingfaceHubSearch -from pathlib import Path -from textwrap import dedent from apscheduler.schedulers.background import BackgroundScheduler -# used for restarting the space -HF_TOKEN = os.environ.get("HF_TOKEN") -CONVERSION_SCRIPT = "./llama.cpp/convert_hf_to_gguf.py" - -# escape HTML for logging -def escape(s: str) -> str: - s = s.replace("&", "&") # Must be done first! - s = s.replace("<", "<") - s = s.replace(">", ">") - s = s.replace('"', """) - s = s.replace("\n", "
") - return s - -def generate_importance_matrix(model_path: str, train_data_path: str, output_path: str): - imatrix_command = [ - "./llama.cpp/llama-imatrix", - "-m", model_path, - "-f", train_data_path, - "-ngl", "99", - "--output-frequency", "10", - "-o", output_path, - ] - - if not os.path.isfile(model_path): - raise Exception(f"Model file not found: {model_path}") - - print("Running imatrix command...") - process = subprocess.Popen(imatrix_command, shell=False) - - try: - process.wait(timeout=60) # added wait - except subprocess.TimeoutExpired: - print("Imatrix computation timed out. Sending SIGINT to allow graceful termination...") - process.send_signal(signal.SIGINT) +@dataclass +class QuantizationConfig: + """Configuration for model quantization.""" + method: str + use_imatrix: bool = False + imatrix_method: str = "IQ4_NL" + quant_embedding: bool = False + embedding_tensor_method: str = "Q8_0" + leave_output: bool = False + quant_output: bool = False + output_tensor_method: str = "Q8_0" + # Generated values - These will be set during processing + fp16_model: str = field(default="", init=False) + quantized_gguf: str = field(default="", init=False) + imatrix_file: str = field(default="", init=False) + + +@dataclass +class SplitConfig: + """Configuration for model splitting.""" + enabled: bool = False + max_tensors: int = 256 + max_size: Optional[str] = None + + +@dataclass +class OutputConfig: + """Configuration for output settings.""" + private_repo: bool = False + repo_name: str = "" + filename: str = "" + + +@dataclass +class ModelProcessingConfig: + """Configuration for the entire model processing pipeline.""" + token: str + model_id: str + model_name: str + outdir: str + quant_config: QuantizationConfig + split_config: SplitConfig + output_config: OutputConfig + # Generated values - These will be set during processing + new_repo_url: str = field(default="", init=False) + new_repo_id: str = field(default="", init=False) + + +class GGUFConverterError(Exception): + """Custom exception for GGUF conversion errors.""" + pass + + +class HuggingFaceModelProcessor: + """Handles the processing of Hugging Face models to GGUF format.""" + + ERROR_LOGIN = "You must be logged in to use GGUF-my-repo." + DOWNLOAD_FOLDER = "./downloads" + OUTPUT_FOLDER = "./outputs" + CALIBRATION_FILE = "calibration_data_v5_rc.txt" + + QUANTIZE_TIMEOUT=86400 + HF_TO_GGUF_TIMEOUT=3600 + IMATRIX_TIMEOUT=86400 + SPLIT_TIMEOUT=3600 + KILL_TIMEOUT=5 + + def __init__(self): + self.SPACE_ID = os.environ.get("SPACE_ID", "") + self.SPACE_URL = f"https://{self.SPACE_ID.replace('/', '-')}.hf.space/" if self.SPACE_ID else "http://localhost:7860/" + self.HF_TOKEN = os.environ.get("HF_TOKEN") + self.RUN_LOCALLY = os.environ.get("RUN_LOCALLY") + + # Create necessary folders + self._create_folder(self.DOWNLOAD_FOLDER) + self._create_folder(self.OUTPUT_FOLDER) + + def _create_folder(self, folder_name: str) -> str: + """Create a folder if it doesn't exist.""" + if not os.path.exists(folder_name): + print(f"Creating folder: {folder_name}") + os.makedirs(folder_name) + return folder_name + + def _validate_token(self, oauth_token: Optional[gr.OAuthToken]) -> str: + """Validate the OAuth token and return the token string.""" + if oauth_token is None or oauth_token.token is None: + raise GGUFConverterError(self.ERROR_LOGIN) + + try: + whoami(oauth_token.token) + return oauth_token.token + except Exception as e: + raise GGUFConverterError(self.ERROR_LOGIN) + + def _escape_html(self, s: str) -> str: + """Escape HTML characters for safe display.""" + replacements = [ + ("&", "&"), + ("<", "<"), + (">", ">"), + ('"', """), + ("\n", "
") + ] + for old, new in replacements: + s = s.replace(old, new) + return s + + def _get_model_creator(self, model_id: str) -> str: + """Extract model creator from model ID.""" + return model_id.split('/')[0] + + def _get_model_name(self, model_id: str) -> str: + """Extract model name from model ID.""" + return model_id.split('/')[-1] + + def _upload_file(self, processing_config: ModelProcessingConfig, path_or_fileobj: str, path_in_repo: str) -> None: + """Upload a file to Hugging Face repository.""" + if self.RUN_LOCALLY == "1": + print("Skipping upload...") + return + + api = HfApi(token=processing_config.token) + api.upload_file( + path_or_fileobj=path_or_fileobj, + path_in_repo=path_in_repo, + repo_id=processing_config.new_repo_id, + ) + + def _generate_importance_matrix(self, quant_config: QuantizationConfig) -> None: + """Generate importance matrix for quantization.""" + if not os.path.isfile(quant_config.fp16_model): + raise GGUFConverterError(f"Model file not found: {quant_config.fp16_model}") + + train_data_path = self.CALIBRATION_FILE + if not os.path.isfile(train_data_path): + raise GGUFConverterError(f"Training data file not found: {train_data_path}") + + print(f"Training data file path: {train_data_path}") + print("Running imatrix command...") + + imatrix_command = [ + "llama-imatrix", + "-m", quant_config.fp16_model, + "-f", train_data_path, + "-ngl", "99", + "--output-frequency", "10", + "-o", quant_config.imatrix_file, + ] + + process = subprocess.Popen(imatrix_command, shell=False, stderr=subprocess.STDOUT) + try: + process.wait(timeout=self.IMATRIX_TIMEOUT) + except subprocess.TimeoutExpired: + print("Imatrix computation timed out. Sending SIGINT to allow graceful termination...") + process.send_signal(signal.SIGINT) + try: + process.wait(timeout=self.KILL_TIMEOUT) + except subprocess.TimeoutExpired: + print("Imatrix proc still didn't term. Forcefully terminating process...") + process.kill() + raise GGUFConverterError("Error generating imatrix: Operation timed out.") + + if process.returncode != 0: + raise GGUFConverterError(f"Error generating imatrix: code={process.returncode}.") + + print(f"Importance matrix generation completed: {os.path.abspath(quant_config.imatrix_file)}") + + def _split_and_upload_model(self, processing_config: ModelProcessingConfig) -> None: + """Split large model files and upload shards.""" + quant_config = processing_config.quant_config + split_config = processing_config.split_config + + print(f"Model path: {quant_config.quantized_gguf}") + print(f"Output dir: {processing_config.outdir}") + + split_cmd = ["llama-gguf-split", "--split"] + + if split_config.max_size: + split_cmd.extend(["--split-max-size", split_config.max_size]) + else: + split_cmd.extend(["--split-max-tensors", str(split_config.max_tensors)]) + + model_path_prefix = '.'.join(quant_config.quantized_gguf.split('.')[:-1]) + split_cmd.extend([quant_config.quantized_gguf, model_path_prefix]) + + print(f"Split command: {split_cmd}") + process = subprocess.Popen(split_cmd, shell=False, stderr=subprocess.STDOUT) try: - process.wait(timeout=5) # grace period + process.wait(timeout=self.SPLIT_TIMEOUT) except subprocess.TimeoutExpired: - print("Imatrix proc still didn't term. Forecfully terming process...") - process.kill() - - print("Importance matrix generation completed.") - -def split_upload_model(model_path: str, outdir: str, repo_id: str, oauth_token: gr.OAuthToken | None, split_max_tensors=256, split_max_size=None): - print(f"Model path: {model_path}") - print(f"Output dir: {outdir}") - - if oauth_token is None or oauth_token.token is None: - raise ValueError("You have to be logged in.") - - split_cmd = [ - "./llama.cpp/llama-gguf-split", - "--split", - ] - if split_max_size: - split_cmd.append("--split-max-size") - split_cmd.append(split_max_size) - else: - split_cmd.append("--split-max-tensors") - split_cmd.append(str(split_max_tensors)) - - # args for output - model_path_prefix = '.'.join(model_path.split('.')[:-1]) # remove the file extension - split_cmd.append(model_path) - split_cmd.append(model_path_prefix) - - print(f"Split command: {split_cmd}") - - result = subprocess.run(split_cmd, shell=False, capture_output=True, text=True) - print(f"Split command stdout: {result.stdout}") - print(f"Split command stderr: {result.stderr}") - - if result.returncode != 0: - stderr_str = result.stderr.decode("utf-8") - raise Exception(f"Error splitting the model: {stderr_str}") - print("Model split successfully!") - - # remove the original model file if needed - if os.path.exists(model_path): - os.remove(model_path) - - model_file_prefix = model_path_prefix.split('/')[-1] - print(f"Model file name prefix: {model_file_prefix}") - sharded_model_files = [f for f in os.listdir(outdir) if f.startswith(model_file_prefix) and f.endswith(".gguf")] - if sharded_model_files: + print("Splitting timed out. Sending SIGINT to allow graceful termination...") + process.send_signal(signal.SIGINT) + try: + process.wait(timeout=self.KILL_TIMEOUT) + except subprocess.TimeoutExpired: + print("Splitting timed out. Killing process...") + process.kill() + raise GGUFConverterError("Error splitting the model: Operation timed out.") + + if process.returncode != 0: + raise GGUFConverterError(f"Error splitting the model: code={process.returncode}") + + print("Model split successfully!") + + # Remove original model file + if os.path.exists(quant_config.quantized_gguf): + os.remove(quant_config.quantized_gguf) + + model_file_prefix = model_path_prefix.split('/')[-1] + print(f"Model file name prefix: {model_file_prefix}") + + sharded_model_files = [ + f for f in os.listdir(processing_config.outdir) + if f.startswith(model_file_prefix) and f.endswith(".gguf") + ] + + if not sharded_model_files: + raise GGUFConverterError("No sharded files found.") + print(f"Sharded model files: {sharded_model_files}") - api = HfApi(token=oauth_token.token) + for file in sharded_model_files: - file_path = os.path.join(outdir, file) - print(f"Uploading file: {file_path}") + file_path = os.path.join(processing_config.outdir, file) try: - api.upload_file( - path_or_fileobj=file_path, - path_in_repo=file, - repo_id=repo_id, - ) + print(f"Uploading file: {file_path}") + self._upload_file(processing_config, file_path, file) except Exception as e: - raise Exception(f"Error uploading file {file_path}: {e}") - else: - raise Exception("No sharded files found.") - - print("Sharded model has been uploaded successfully!") - -def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_repo, train_data_file, split_model, split_max_tensors, split_max_size, oauth_token: gr.OAuthToken | None): - if oauth_token is None or oauth_token.token is None: - raise gr.Error("You must be logged in to use GGUF-my-repo") - - # validate the oauth token - try: - whoami(oauth_token.token) - except Exception as e: - raise gr.Error("You must be logged in to use GGUF-my-repo") - - model_name = model_id.split('/')[-1] - - try: - api = HfApi(token=oauth_token.token) - - dl_pattern = ["*.md", "*.json", "*.model"] - - pattern = ( - "*.safetensors" - if any( - file.path.endswith(".safetensors") - for file in api.list_repo_tree( - repo_id=model_id, - recursive=True, + raise GGUFConverterError(f"Error uploading file {file_path}: {e}") + + print("Sharded model has been uploaded successfully!") + + def _download_base_model(self, processing_config: ModelProcessingConfig) -> str: + """Download and convert Hugging Face model to GGUF FP16 format.""" + print(f"Downloading model {processing_config.model_name}") + + if os.path.exists(processing_config.quant_config.fp16_model): + print("Skipping fp16 conversion...") + print(f"Converted model path: {os.path.abspath(processing_config.quant_config.fp16_model)}") + return processing_config.quant_config.fp16_model + + with tempfile.TemporaryDirectory(dir=self.DOWNLOAD_FOLDER) as tmpdir: + local_dir = f"{Path(tmpdir)}/{processing_config.model_name}" + print(f"Local directory: {os.path.abspath(local_dir)}") + + # Download model + api = HfApi(token=processing_config.token) + pattern = ( + "*.safetensors" + if any( + file.path.endswith(".safetensors") + for file in api.list_repo_tree( + repo_id=processing_config.model_id, + recursive=True, + ) ) + else "*.bin" ) - else "*.bin" + dl_pattern = ["*.md", "*.json", "*.model"] + dl_pattern += [pattern] + api.snapshot_download(repo_id=processing_config.model_id, local_dir=local_dir, allow_patterns=dl_pattern) + print("Model downloaded successfully!") + print(f"Model directory contents: {os.listdir(local_dir)}") + + config_dir = os.path.join(local_dir, "config.json") + adapter_config_dir = os.path.join(local_dir, "adapter_config.json") + if os.path.exists(adapter_config_dir) and not os.path.exists(config_dir): + raise GGUFConverterError( + 'adapter_config.json is present.

If you are converting a LoRA adapter to GGUF, ' + 'please use GGUF-my-lora.' + ) + + # Convert HF to GGUF + print(f"Converting to GGUF FP16: {os.path.abspath(processing_config.quant_config.fp16_model)}") + convert_command = [ + "python3", "/app/convert_hf_to_gguf.py", local_dir, + "--outtype", "f16", "--outfile", processing_config.quant_config.fp16_model + ] + process = subprocess.Popen(convert_command, shell=False, stderr=subprocess.STDOUT) + try: + process.wait(timeout=self.HF_TO_GGUF_TIMEOUT) + except subprocess.TimeoutExpired: + print("Conversion timed out. Sending SIGINT to allow graceful termination...") + process.send_signal(signal.SIGINT) + try: + process.wait(timeout=self.KILL_TIMEOUT) + except subprocess.TimeoutExpired: + print("Conversion timed out. Killing process...") + process.kill() + raise GGUFConverterError("Error converting to fp16: Operation timed out.") + + if process.returncode != 0: + raise GGUFConverterError(f"Error converting to fp16: code={process.returncode}") + + print("Model converted to fp16 successfully!") + print(f"Converted model path: {os.path.abspath(processing_config.quant_config.fp16_model)}") + return processing_config.quant_config.fp16_model + + def _quantize_model(self, quant_config: QuantizationConfig) -> str: + """Quantize the GGUF model.""" + quantize_cmd = ["llama-quantize"] + + if quant_config.quant_embedding: + quantize_cmd.extend(["--token-embedding-type", quant_config.embedding_tensor_method]) + + if quant_config.leave_output: + quantize_cmd.append("--leave-output-tensor") + else: + if quant_config.quant_output: + quantize_cmd.extend(["--output-tensor-type", quant_config.output_tensor_method]) + + # Set imatrix file path if needed + if quant_config.use_imatrix: + self._generate_importance_matrix(quant_config) + quantize_cmd.extend(["--imatrix", quant_config.imatrix_file]) + else: + print("Not using imatrix quantization.") + + quantize_cmd.append(quant_config.fp16_model) + quantize_cmd.append(quant_config.quantized_gguf) + + if quant_config.use_imatrix: + quantize_cmd.append(quant_config.imatrix_method) + else: + quantize_cmd.append(quant_config.method) + + print(f"Quantizing model with {quantize_cmd}") + + # Use Popen for quantization + process = subprocess.Popen(quantize_cmd, shell=False, stderr=subprocess.STDOUT) + try: + process.wait(timeout=self.QUANTIZE_TIMEOUT) + except subprocess.TimeoutExpired: + print("Quantization timed out. Sending SIGINT to allow graceful termination...") + process.send_signal(signal.SIGINT) + try: + process.wait(timeout=self.KILL_TIMEOUT) + except subprocess.TimeoutExpired: + print("Quantization timed out. Killing process...") + process.kill() + raise GGUFConverterError("Error quantizing: Operation timed out.") + + if process.returncode != 0: + raise GGUFConverterError(f"Error quantizing: code={process.returncode}") + + print(f"Quantized successfully with {quant_config.imatrix_method if quant_config.use_imatrix else quant_config.method} option!") + print(f"Quantized model path: {os.path.abspath(quant_config.quantized_gguf)}") + return quant_config.quantized_gguf + + def _create_empty_repo(self, processing_config: ModelProcessingConfig): + api = HfApi(token=processing_config.token) + new_repo_url = api.create_repo( + repo_id=processing_config.output_config.repo_name, + exist_ok=True, + private=processing_config.output_config.private_repo ) + processing_config.new_repo_url = new_repo_url.url + processing_config.new_repo_id = new_repo_url.repo_id + print("Repo created successfully!", processing_config.new_repo_url) - dl_pattern += [pattern] - - if not os.path.exists("downloads"): - os.makedirs("downloads") - - if not os.path.exists("outputs"): - os.makedirs("outputs") - - with tempfile.TemporaryDirectory(dir="outputs") as outdir: - fp16 = str(Path(outdir)/f"{model_name}.fp16.gguf") - - with tempfile.TemporaryDirectory(dir="downloads") as tmpdir: - # Keep the model name as the dirname so the model name metadata is populated correctly - local_dir = Path(tmpdir)/model_name - print(local_dir) - api.snapshot_download(repo_id=model_id, local_dir=local_dir, local_dir_use_symlinks=False, allow_patterns=dl_pattern) - print("Model downloaded successfully!") - print(f"Current working directory: {os.getcwd()}") - print(f"Model directory contents: {os.listdir(local_dir)}") - - config_dir = local_dir/"config.json" - adapter_config_dir = local_dir/"adapter_config.json" - if os.path.exists(adapter_config_dir) and not os.path.exists(config_dir): - raise Exception('adapter_config.json is present.

If you are converting a LoRA adapter to GGUF, please use GGUF-my-lora.') - - result = subprocess.run([ - "python", CONVERSION_SCRIPT, local_dir, "--outtype", "f16", "--outfile", fp16 - ], shell=False, capture_output=True) - print(result) - if result.returncode != 0: - stderr_str = result.stderr.decode("utf-8") - raise Exception(f"Error converting to fp16: {stderr_str}") - print("Model converted to fp16 successfully!") - print(f"Converted model path: {fp16}") - - imatrix_path = Path(outdir)/"imatrix.dat" - - if use_imatrix: - if train_data_file: - train_data_path = train_data_file.name - else: - train_data_path = "llama.cpp/groups_merged.txt" #fallback calibration dataset - - print(f"Training data file path: {train_data_path}") - - if not os.path.isfile(train_data_path): - raise Exception(f"Training data file not found: {train_data_path}") - - generate_importance_matrix(fp16, train_data_path, imatrix_path) - else: - print("Not using imatrix quantization.") - - # Quantize the model - quantized_gguf_name = f"{model_name.lower()}-{imatrix_q_method.lower()}-imat.gguf" if use_imatrix else f"{model_name.lower()}-{q_method.lower()}.gguf" - quantized_gguf_path = str(Path(outdir)/quantized_gguf_name) - if use_imatrix: - quantise_ggml = [ - "./llama.cpp/llama-quantize", - "--imatrix", imatrix_path, fp16, quantized_gguf_path, imatrix_q_method - ] - else: - quantise_ggml = [ - "./llama.cpp/llama-quantize", - fp16, quantized_gguf_path, q_method - ] - result = subprocess.run(quantise_ggml, shell=False, capture_output=True) - if result.returncode != 0: - stderr_str = result.stderr.decode("utf-8") - raise Exception(f"Error quantizing: {stderr_str}") - print(f"Quantized successfully with {imatrix_q_method if use_imatrix else q_method} option!") - print(f"Quantized model path: {quantized_gguf_path}") - - # Create empty repo - username = whoami(oauth_token.token)["name"] - new_repo_url = api.create_repo(repo_id=f"{username}/{model_name}-{imatrix_q_method if use_imatrix else q_method}-GGUF", exist_ok=True, private=private_repo) - new_repo_id = new_repo_url.repo_id - print("Repo created successfully!", new_repo_url) + return new_repo_url + + def _generate_readme(self, processing_config: ModelProcessingConfig) -> str: + """Generate README.md for the quantized model.""" + creator = self._get_model_creator(processing_config.model_id) + username = whoami(processing_config.token)["name"] + try: + card = ModelCard.load(processing_config.model_id, token=processing_config.token) + except: + card = ModelCard("") + + if card.data.tags is None: + card.data.tags = [] + card.data.tags.extend(["llama-cpp", "gguf-my-repo"]) + card.data.base_model = processing_config.model_id + + card.text = dedent( + f""" +# {processing_config.model_name} +**Model creator:** [{creator}](https://huggingface.co/{creator})
+**Original model**: [{processing_config.model_id}](https://huggingface.co/{processing_config.model_id})
+**GGUF quantization:** provided by [{username}](https:/huggingface.co/{username}) using `llama.cpp`
+## Special thanks +🙏 Special thanks to [Georgi Gerganov](https://github.com/ggerganov) and the whole team working on [llama.cpp](https://github.com/ggerganov/llama.cpp/) for making all of this possible. +## Use with Ollama +```bash +ollama run "hf.co/{processing_config.new_repo_id}:" +``` +## Use with LM Studio +```bash +lms load "{processing_config.new_repo_id}" +``` +## Use with llama.cpp CLI +```bash +llama-cli --hf-repo "{processing_config.new_repo_id}" --hf-file "{processing_config.output_config.filename}" -p "The meaning to life and the universe is" +``` +## Use with llama.cpp Server: +```bash +llama-server --hf-repo "{processing_config.new_repo_id}" --hf-file "{processing_config.output_config.filename}" -c 4096 +``` + """ + ) + + readme_path = f"{processing_config.outdir}/README.md" + card.save(readme_path) + return readme_path + + def process_model(self, processing_config: ModelProcessingConfig) -> Tuple[str, str]: + """Main method to process a model through the entire pipeline.""" + quant_config = processing_config.quant_config + split_config = processing_config.split_config + output_config = processing_config.output_config + + print(f"Current working directory: {os.path.abspath(os.getcwd())}") + + # Download and convert base model + self._download_base_model(processing_config) + + # Quantize the model + self._quantize_model(quant_config) + + # Create empty repo + self._create_empty_repo(processing_config) + + # Upload model + if split_config.enabled: + print(f"Splitting quantized model: {os.path.abspath(quant_config.quantized_gguf)}") + self._split_and_upload_model(processing_config) + else: + try: + print(f"Uploading quantized model: {os.path.abspath(quant_config.quantized_gguf)}") + self._upload_file(processing_config, quant_config.quantized_gguf, output_config.filename) + except Exception as e: + raise GGUFConverterError(f"Error uploading quantized model: {e}") + + # Upload imatrix if it exists + if quant_config.use_imatrix and os.path.isfile(quant_config.imatrix_file): try: - card = ModelCard.load(model_id, token=oauth_token.token) - except: - card = ModelCard("") - if card.data.tags is None: - card.data.tags = [] - card.data.tags.append("llama-cpp") - card.data.tags.append("gguf-my-repo") - card.data.base_model = model_id - card.text = dedent( - f""" - # {new_repo_id} - This model was converted to GGUF format from [`{model_id}`](https://huggingface.co/{model_id}) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. - Refer to the [original model card](https://huggingface.co/{model_id}) for more details on the model. - - ## Use with llama.cpp - Install llama.cpp through brew (works on Mac and Linux) - - ```bash - brew install llama.cpp - - ``` - Invoke the llama.cpp server or the CLI. - - ### CLI: - ```bash - llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is" - ``` - - ### Server: - ```bash - llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048 - ``` - - Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. - - Step 1: Clone llama.cpp from GitHub. - ``` - git clone https://github.com/ggerganov/llama.cpp - ``` - - Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). - ``` - cd llama.cpp && LLAMA_CURL=1 make - ``` - - Step 3: Run inference through the main binary. - ``` - ./llama-cli --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -p "The meaning to life and the universe is" - ``` - or - ``` - ./llama-server --hf-repo {new_repo_id} --hf-file {quantized_gguf_name} -c 2048 - ``` - """ + print(f"Uploading imatrix.dat: {os.path.abspath(quant_config.imatrix_file)}") + self._upload_file(processing_config, quant_config.imatrix_file, f"{processing_config.model_name}-imatrix.gguf") + except Exception as e: + raise GGUFConverterError(f"Error uploading imatrix.dat: {e}") + + # Upload README.md + readme_path = self._generate_readme(processing_config) + self._upload_file(processing_config, readme_path, "README.md") + + print(f"Uploaded successfully with {quant_config.imatrix_method if quant_config.use_imatrix else quant_config.method} option!") + + +class GGUFConverterUI: + """Gradio UI for the GGUF Converter.""" + + def __init__(self): + self.processor = HuggingFaceModelProcessor() + self.css = """/* Custom CSS to allow scrolling */ + .gradio-container {overflow-y: auto;} + """ + + # Initialize components + self._initialize_components() + self._setup_interface() + + def _initialize_components(self): + """Initialize all UI components.""" + ##### + # Base model section + ##### + self.model_id = HuggingfaceHubSearch( + label="Hub Model ID", + placeholder="Search for model id on Huggingface", + search_type="model", + ) + + ##### + # Quantization section + ##### + self.use_imatrix = gr.Checkbox( + value=False, + label="Use Imatrix Quantization", + info="Use importance matrix for quantization." + ) + self.q_method = gr.Dropdown( + choices=["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0", "F16", "BF16"], + label="Quantization Method", + info="GGML quantization type", + value="Q4_K_M", + filterable=False, + visible=True + ) + self.imatrix_q_method = gr.Dropdown( + choices=["IQ3_M", "IQ3_XXS", "Q4_K_M", "Q4_K_S", "IQ4_NL", "IQ4_XS", "Q5_K_M", "Q5_K_S"], + label="Imatrix Quantization Method", + info="GGML imatrix quants type", + value="IQ4_NL", + filterable=False, + visible=False + ) + self.train_data_file = gr.File( + label="Training Data File", + file_types=[".txt"], + visible=False + ) + + ##### + # Advanced Options section + ##### + self.split_model = gr.Checkbox( + value=False, + label="Split Model", + info="Shard the model using gguf-split." + ) + self.split_max_tensors = gr.Number( + value=256, + label="Max Tensors per File", + info="Maximum number of tensors per file when splitting model.", + visible=False + ) + self.split_max_size = gr.Textbox( + label="Max File Size", + info="Maximum file size when splitting model (--split-max-size). May leave empty to use the default. Accepted suffixes: M, G. Example: 256M, 5G", + visible=False + ) + self.leave_output = gr.Checkbox( + value=False, + label="Leave output tensor", + info="Leaves output.weight un(re)quantized" + ) + self.quant_embedding = gr.Checkbox( + value=False, + label="Quant embeddings tensor", + info="Quantize embeddings tensor separately" + ) + self.embedding_tensor_method = gr.Dropdown( + choices=["Q2_K", "Q3_K", "Q4_K", "Q5_K", "Q6_K", "Q8_0"], + label="Embeddings Quantization Method", + info="use a specific quant type for the token embeddings tensor", + value="Q8_0", + filterable=False, + visible=False + ) + self.quant_output = gr.Checkbox( + value=False, + label="Quant output tensor", + info="Quantize output tensor separately" + ) + self.output_tensor_method = gr.Dropdown( + choices=["Q2_K", "Q3_K", "Q4_K", "Q5_K", "Q6_K", "Q8_0"], + label="Output Quantization Method", + info="use a specific quant type for the output.weight tensor", + value="Q8_0", + filterable=False, + visible=False + ) + + ##### + # Output Settings section + ##### + self.private_repo = gr.Checkbox( + value=False, + label="Private Repo", + info="Create a private repo under your username." + ) + self.repo_name = gr.Textbox( + label="Output Repository Name", + info="Set your repository name", + max_lines=1 + ) + self.gguf_name = gr.Textbox( + label="Output File Name", + info="Set output file name", + max_lines=1 + ) + + ##### + # Buttons section + ##### + self.clear_btn = gr.ClearButton( + value="Clear", + variant="secondary", + components=[ + self.model_id, + self.q_method, + self.use_imatrix, + self.imatrix_q_method, + self.private_repo, + self.train_data_file, + self.leave_output, + self.quant_embedding, + self.embedding_tensor_method, + self.quant_output, + self.output_tensor_method, + self.split_model, + self.split_max_tensors, + self.split_max_size, + self.repo_name, + self.gguf_name, + ] + ) + self.submit_btn = gr.Button( + value="Submit", + variant="primary" + ) + + ##### + # Outputs section + ##### + self.output_label = gr.Markdown(label="output") + self.output_image = gr.Image( + show_label=False, + show_download_button=False, + interactive=False + ) + + @staticmethod + def _update_output_repo(model_id: str, oauth_token: Optional[gr.OAuthToken]) -> str: + """Update output repository name based on model and user.""" + if oauth_token is None or not oauth_token.token: + return "" + if not model_id: + return "" + try: + username = whoami(oauth_token.token)["name"] + model_name = model_id.split('/')[-1] + return f"{username}/{model_name}-GGUF" + except: + return "" + + @staticmethod + def _update_output_filename(model_id: str, use_imatrix: bool, q_method: str, imatrix_q_method: str) -> str: + """Update output filename based on model and quantization settings.""" + if not model_id: + return "" + model_name = model_id.split('/')[-1] + if use_imatrix: + return f"{model_name}-{imatrix_q_method.upper()}-imat.gguf" + return f"{model_name}-{q_method.upper()}.gguf" + + def _setup_interface(self): + """Set up the Gradio interface.""" + with gr.Blocks(css=self.css) as self.demo: + ##### + # Layout + ##### + gr.Markdown(HuggingFaceModelProcessor.ERROR_LOGIN) + gr.LoginButton(min_width=250) + gr.HTML("

Create your own GGUF Quants!

") + gr.Markdown(f"The space takes an HF repo as an input, quantizes it and creates a Public repo containing the selected quant under your HF user namespace.
Use via {self.processor.SPACE_URL}") + + with gr.Row(): + with gr.Column() as inputs: + gr.Markdown("### Model Configuration") + self.model_id.render() + with gr.Column(): + self.use_imatrix.render() + self.q_method.render() + self.imatrix_q_method.render() + self.train_data_file.render() + gr.Markdown("### Advanced Options") + self.quant_embedding.render() + self.embedding_tensor_method.render() + self.leave_output.render() + self.quant_output.render() + self.output_tensor_method.render() + self.split_model.render() + with gr.Row() as split_options: + self.split_max_tensors.render() + self.split_max_size.render() + gr.Markdown("### Output Settings") + gr.Markdown("You can customize settings for your GGUF repo.") + self.private_repo.render() + with gr.Row(): + self.repo_name.render() + self.gguf_name.render() + # Buttons + with gr.Row() as buttons: + self.clear_btn.render() + self.submit_btn.render() + with gr.Column() as outputs: + self.output_label.render() + self.output_image.render() + + ##### + # Event handlers + ##### + self.submit_btn.click( + fn=self._process_model_wrapper, + inputs=[ + self.model_id, + self.q_method, + self.use_imatrix, + self.imatrix_q_method, + self.private_repo, + self.train_data_file, + self.repo_name, + self.gguf_name, + self.quant_embedding, + self.embedding_tensor_method, + self.leave_output, + self.quant_output, + self.output_tensor_method, + self.split_model, + self.split_max_tensors, + self.split_max_size + ], + outputs=[ + self.output_label, + self.output_image, + ], ) - readme_path = Path(outdir)/"README.md" - card.save(readme_path) - if split_model: - split_upload_model(str(quantized_gguf_path), outdir, new_repo_id, oauth_token, split_max_tensors, split_max_size) - else: - try: - print(f"Uploading quantized model: {quantized_gguf_path}") - api.upload_file( - path_or_fileobj=quantized_gguf_path, - path_in_repo=quantized_gguf_name, - repo_id=new_repo_id, - ) - except Exception as e: - raise Exception(f"Error uploading quantized model: {e}") - - if os.path.isfile(imatrix_path): - try: - print(f"Uploading imatrix.dat: {imatrix_path}") - api.upload_file( - path_or_fileobj=imatrix_path, - path_in_repo="imatrix.dat", - repo_id=new_repo_id, - ) - except Exception as e: - raise Exception(f"Error uploading imatrix.dat: {e}") + ##### + # OnChange handlers + ##### + self.use_imatrix.change( + fn=lambda use_imatrix: [gr.update(visible=not use_imatrix), gr.update(visible=use_imatrix), gr.update(visible=use_imatrix)], + inputs=self.use_imatrix, + outputs=[self.q_method, self.imatrix_q_method, self.train_data_file] + ) + self.split_model.change( + fn=lambda split_model: [gr.update(visible=split_model), gr.update(visible=split_model)], + inputs=self.split_model, + outputs=[self.split_max_tensors, self.split_max_size] + ) + self.quant_embedding.change( + fn=lambda quant_embedding: gr.update(visible=quant_embedding), + inputs=self.quant_embedding, + outputs=[self.embedding_tensor_method] + ) + self.leave_output.change( + fn=lambda leave_output, quant_output: [gr.update(visible=not leave_output), gr.update(visible=not leave_output and quant_output)], + inputs=[self.leave_output, self.leave_output], + outputs=[self.quant_output, self.output_tensor_method] + ) + self.quant_output.change( + fn=lambda quant_output: [gr.update(visible=not quant_output), gr.update(visible=quant_output)], + inputs=self.quant_output, + outputs=[self.leave_output, self.output_tensor_method] + ) + self.model_id.change( + fn=self._update_output_repo, + inputs=[self.model_id], + outputs=[self.repo_name] + ) + self.model_id.change( + fn=self._update_output_filename, + inputs=[self.model_id, self.use_imatrix, self.q_method, self.imatrix_q_method], + outputs=[self.gguf_name] + ) + self.use_imatrix.change( + fn=self._update_output_filename, + inputs=[self.model_id, self.use_imatrix, self.q_method, self.imatrix_q_method], + outputs=[self.gguf_name] + ) + self.q_method.change( + fn=self._update_output_filename, + inputs=[self.model_id, self.use_imatrix, self.q_method, self.imatrix_q_method], + outputs=[self.gguf_name] + ) + self.imatrix_q_method.change( + fn=self._update_output_filename, + inputs=[self.model_id, self.use_imatrix, self.q_method, self.imatrix_q_method], + outputs=[self.gguf_name] + ) - api.upload_file( - path_or_fileobj=readme_path, - path_in_repo="README.md", - repo_id=new_repo_id, + def _process_model_wrapper(self, model_id: str, q_method: str, use_imatrix: bool, + imatrix_q_method: str, private_repo: bool, train_data_file, + repo_name: str, gguf_name: str, quant_embedding: bool, + embedding_tensor_method: str, leave_output: bool, + quant_output: bool, output_tensor_method: str, + split_model: bool, split_max_tensors, split_max_size: str, oauth_token: Optional[gr.OAuthToken]) -> Tuple[str, str]: + """Wrapper for the process_model method to handle the conversion using ModelProcessingConfig.""" + try: + # Validate token and get token string + token = self.processor._validate_token(oauth_token) + + # Create configuration objects + quant_config = QuantizationConfig( + method=q_method, + use_imatrix=use_imatrix, + imatrix_method=imatrix_q_method, + quant_embedding=quant_embedding, + embedding_tensor_method=embedding_tensor_method, + leave_output=leave_output, + quant_output=quant_output, + output_tensor_method=output_tensor_method ) - print(f"Uploaded successfully with {imatrix_q_method if use_imatrix else q_method} option!") - # end of the TemporaryDirectory(dir="outputs") block; temporary outputs are deleted here + split_config = SplitConfig( + enabled=split_model, + max_tensors=split_max_tensors if isinstance(split_max_tensors, int) else 256, + max_size=split_max_size + ) - return ( - f'

✅ DONE


Find your repo here: {new_repo_id}', - "llama.png", - ) - except Exception as e: - return (f'

❌ ERROR


{escape(str(e))}
', "error.png") - - -css="""/* Custom CSS to allow scrolling */ -.gradio-container {overflow-y: auto;} -""" -model_id = HuggingfaceHubSearch( - label="Hub Model ID", - placeholder="Search for model id on Huggingface", - search_type="model", -) - -q_method = gr.Dropdown( - ["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"], - label="Quantization Method", - info="GGML quantization type", - value="Q4_K_M", - filterable=False, - visible=True -) - -imatrix_q_method = gr.Dropdown( - ["IQ3_M", "IQ3_XXS", "Q4_K_M", "Q4_K_S", "IQ4_NL", "IQ4_XS", "Q5_K_M", "Q5_K_S"], - label="Imatrix Quantization Method", - info="GGML imatrix quants type", - value="IQ4_NL", - filterable=False, - visible=False -) - -use_imatrix = gr.Checkbox( - value=False, - label="Use Imatrix Quantization", - info="Use importance matrix for quantization." -) - -private_repo = gr.Checkbox( - value=False, - label="Private Repo", - info="Create a private repo under your username." -) - -train_data_file = gr.File( - label="Training Data File", - file_types=["txt"], - visible=False -) - -split_model = gr.Checkbox( - value=False, - label="Split Model", - info="Shard the model using gguf-split." -) - -split_max_tensors = gr.Number( - value=256, - label="Max Tensors per File", - info="Maximum number of tensors per file when splitting model.", - visible=False -) - -split_max_size = gr.Textbox( - label="Max File Size", - info="Maximum file size when splitting model (--split-max-size). May leave empty to use the default. Accepted suffixes: M, G. Example: 256M, 5G", - visible=False -) - -iface = gr.Interface( - fn=process_model, - inputs=[ - model_id, - q_method, - use_imatrix, - imatrix_q_method, - private_repo, - train_data_file, - split_model, - split_max_tensors, - split_max_size, - ], - outputs=[ - gr.Markdown(label="output"), - gr.Image(show_label=False), - ], - title="Create your own GGUF Quants, blazingly fast ⚡!", - description="The space takes an HF repo as an input, quantizes it and creates a Public repo containing the selected quant under your HF user namespace.", - api_name=False - ) - -# Create Gradio interface -with gr.Blocks(css=css) as demo: - gr.Markdown("You must be logged in to use GGUF-my-repo.") - gr.LoginButton(min_width=250) - - iface.render() - - def update_split_visibility(split_model): - return gr.update(visible=split_model), gr.update(visible=split_model) - - split_model.change( - fn=update_split_visibility, - inputs=split_model, - outputs=[split_max_tensors, split_max_size] - ) - - def update_visibility(use_imatrix): - return gr.update(visible=not use_imatrix), gr.update(visible=use_imatrix), gr.update(visible=use_imatrix) - - use_imatrix.change( - fn=update_visibility, - inputs=use_imatrix, - outputs=[q_method, imatrix_q_method, train_data_file] - ) - -def restart_space(): - HfApi().restart_space(repo_id="ggml-org/gguf-my-repo", token=HF_TOKEN, factory_reboot=True) - -scheduler = BackgroundScheduler() -scheduler.add_job(restart_space, "interval", seconds=21600) -scheduler.start() - -# Launch the interface -demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False) \ No newline at end of file + output_config = OutputConfig( + private_repo=private_repo, + repo_name=repo_name, + filename=gguf_name + ) + + model_name = self.processor._get_model_name(model_id) + + with tempfile.TemporaryDirectory(dir=self.processor.OUTPUT_FOLDER) as outDirObj: + outdir = ( + self.processor._create_folder(os.path.join(self.processor.OUTPUT_FOLDER, model_name)) + if self.processor.RUN_LOCALLY == "1" + else Path(outDirObj) + ) + + quant_config.fp16_model = f"{outdir}/{model_name}-fp16.gguf" + quant_config.imatrix_file = f"{outdir}/{model_name}-imatrix.gguf" + quant_config.quantized_gguf = f"{outdir}/{gguf_name}" + + processing_config = ModelProcessingConfig( + token=token, + model_id=model_id, + model_name=model_name, + outdir=outdir, + quant_config=quant_config, + split_config=split_config, + output_config=output_config + ) + + # Call the processor's main method with the config object + self.processor.process_model(processing_config) + + return ( + f'

✅ DONE


Find your repo here: {processing_config.new_repo_id}', + "llama.png", + ) + + except Exception as e: + print(f"Error processing model: {e}") + return (f'

❌ ERROR


{self.processor._escape_html(str(e))}
', "error.png") + + + def launch(self): + """Launch the Gradio interface.""" + # Set up space restart scheduler + def restart_space(): + HfApi().restart_space(repo_id=self.processor.SPACE_ID, token=self.processor.HF_TOKEN, factory_reboot=True) + + scheduler = BackgroundScheduler() + scheduler.add_job(restart_space, "interval", seconds=21600) + scheduler.start() + + # Launch the interface + self.demo.queue(default_concurrency_limit=1, max_size=5).launch(debug=True, show_api=False) + + +# Main execution +if __name__ == "__main__": + ui = GGUFConverterUI() + ui.launch() diff --git a/calibration_data_v5_rc.txt b/calibration_data_v5_rc.txt new file mode 100644 index 0000000000000000000000000000000000000000..aba17fe897c00fae02a18d26068aa453dee09e50 --- /dev/null +++ b/calibration_data_v5_rc.txt @@ -0,0 +1,4802 @@ +=========== +; A072257: a(n) = ((6*n-17)*4^n - 1)/3. +; -6,-15,-27,21,597,4437,25941,136533,677205,3233109,15029589,68506965,307582293,1364546901,5995058517,26127717717,113100805461,486762960213,2084490794325,8887718991189,37749899220309,159795689903445,674367131702613,2838206015165781,11915774014084437,49914895870022997,208666782734832981,870695927958295893,3626898899909039445,15084056351939581269,62642068416972019029,259791645704742851925,1076060070966390510933,4451814236455238456661,18397552756179659478357,75951394266153460520277,313250310030353132508501,1290780171984369691743573,5314236415389307413812565,21861408571364544242603349,89863485924687435319825749,369125350255666774676952405,1515187027250335232298407253,6215490613912013463556019541,25480932475290743991673640277,104399609979733736516492809557,427501960233217988265164232021,1749621922190004121857428903253,7156944013788545162616803513685,29261601355268295351215565657429,119581706621529640207855669040469,488468031287944396043396301804885,1994436944359080925021479709791573,8140007054265537063477496849454421,33209065324379001707476299438970197,135432409726783420644170445920490837,552114232624203337833742776340403541 + +mov $1,-2 +lpb $0 + sub $0,1 + add $1,$0 + mul $1,4 +lpe +add $0,2 +sub $1,$0 +sub $1,14 +div $1,2 +mul $1,3 +add $1,21 +mov $0,$1 + + .byte $01 ; Unknown purpose + + .byte OBJ_TOAD, $0D, $04 + .byte $FF ; Terminator + + +=========== +// +// Copyright (c) 2015-2018 The NRDI developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef NRDI_CONCURRENTQUEUE_H +#define NRDI_CONCURRENTQUEUE_H + + +#include +#include +#include + +template +class concurrentqueue +{ +private: + std::mutex mutex; + std::condition_variable condition; + std::deque queue; + +public: + void push(T const& value) { + { + std::unique_lock lock(this->mutex); + queue.push_front(value); + } + this->condition.notify_one(); + } + T pop() { + std::unique_lock lock(this->mutex); + this->condition.wait(lock, [=]{ return !this->queue.empty(); }); + T rc(std::move(this->queue.back())); + this->queue.pop_back(); + return rc; + } + + T popNotWait(){ + std::unique_lock lock(this->mutex); + T rc(std::move(this->queue.back())); + this->queue.pop_back(); + return rc; + } + + bool hasElements(){ + std::unique_lock lock(this->mutex); + return !queue.empty(); + } +}; + +#endif //NRDI_CONCURRENTQUEUE_H + +// +// RYJViewController.h +// RYJToolKit +// +// Created by developRen on 11/30/2020. +// Copyright (c) 2020 developRen. All rights reserved. +// + +@import UIKit; + +@interface RYJViewController : UIViewController + +@end + + +=========== +;;; vnvni.el --- Quail package for Vietnamese by VNI method + +;; Copyright (C) 2001-2015 Free Software Foundation, Inc. + +;; Author: Werner Lemberg +;; Nguyen Thai Ngoc Duy +;; Keywords: multilingual, input method, Vietnamese + +;; This file is part of GNU Emacs. + +;; GNU Emacs is free software: you can redistribute it and/or modify +;; it under the terms of the GNU General Public License as published by +;; the Free Software Foundation, either version 3 of the License, or +;; (at your option) any later version. + +;; GNU Emacs is distributed in the hope that it will be useful, +;; but WITHOUT ANY WARRANTY; without even the implied warranty of +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +;; GNU General Public License for more details. + +;; You should have received a copy of the GNU General Public License +;; along with GNU Emacs. If not, see . + +;;; Commentary: + +;; There are two commonly-used input methods for Vietnamese: Telex +;; (implemented in vntelex.el) and VNI (implemented in this file, +;; which was based on vntelex.el). + +;;; Code: + +(require 'quail) + + +(quail-define-package + "vietnamese-vni" ; NAME + "Vietnamese" ; LANGUAGE + "VV" ; TITLE + t ; GUIDANCE + "Vietnamese VNI input method + +Diacritics: + + effect postfix examples + ------------------------------ + circumflex 6 a6 -> â + breve 8 a8 -> ă + horn 7 o7 -> ơ + + acute 1 a1 -> á + grave 2 a2 -> à + hook above 3 a3 -> ả + tilde 4 a4 -> ã + dot below 5 a5 -> ạ + + d bar 9 d9 -> đ + +Combinations: + + A82 -> Ằ, o74 -> ỡ, etc. + +Doubling the postfix (but not in combinations) separates the letter +and postfix: E66 -> E6, a55 -> a5, etc. +" ; DOCSTRING + nil ; TRANSLATION-KEYS + t ; FORGET-LAST-SELECTION + nil ; DETERMINISTIC + nil ; KBD-TRANSLATE + nil ; SHOW-LAYOUT + nil ; CREATE-DECODE-MAP + nil ; MAXIMUM-SHORTEST + nil ; OVERLAY-PLIST + nil ; UPDATE-TRANSLATION-FUNCTION + nil ; CONVERSION-KEYS + t) ; SIMPLE + +(quail-define-rules + ("a2" ?à) ; LATIN SMALL LETTER A WITH GRAVE + ("A2" ?À) ; LATIN CAPITAL LETTER A WITH GRAVE + ("a1" ?á) ; LATIN SMALL LETTER A WITH ACUTE + ("A1" ?Á) ; LATIN CAPITAL LETTER A WITH ACUTE + ("a6" ?â) ; LATIN SMALL LETTER A WITH CIRCUMFLEX + ("A6" ?Â) ; LATIN CAPITAL LETTER A WITH CIRCUMFLEX + ("a4" ?ã) ; LATIN SMALL LETTER A WITH TILDE + ("A4" ?Ã) ; LATIN CAPITAL LETTER A WITH TILDE + ("e2" ?è) ; LATIN SMALL LETTER E WITH GRAVE + ("E2" ?È) ; LATIN CAPITAL LETTER E WITH GRAVE + ("e1" ?é) ; LATIN SMALL LETTER E WITH ACUTE + ("E1" ?É) ; LATIN CAPITAL LETTER E WITH ACUTE + ("e6" ?ê) ; LATIN SMALL LETTER E WITH CIRCUMFLEX + ("E6" ?Ê) ; LATIN CAPITAL LETTER E WITH CIRCUMFLEX + ("i2" ?ì) ; LATIN SMALL LETTER I WITH GRAVE + ("I2" ?Ì) ; LATIN CAPITAL LETTER I WITH GRAVE + ("i1" ?í) ; LATIN SMALL LETTER I WITH ACUTE + ("I1" ?Í) ; LATIN CAPITAL LETTER I WITH ACUTE + ("o2" ?ò) ; LATIN SMALL LETTER O WITH GRAVE + ("O2" ?Ò) ; LATIN CAPITAL LETTER O WITH GRAVE + ("o1" ?ó) ; LATIN SMALL LETTER O WITH ACUTE + ("O1" ?Ó) ; LATIN CAPITAL LETTER O WITH ACUTE + ("o6" ?ô) ; LATIN SMALL LETTER O WITH CIRCUMFLEX + ("O6" ?Ô) ; LATIN CAPITAL LETTER O WITH CIRCUMFLEX + ("o4" ?õ) ; LATIN SMALL LETTER O WITH TILDE + ("O4" ?Õ) ; LATIN CAPITAL LETTER O WITH TILDE + ("u2" ?ù) ; LATIN SMALL LETTER U WITH GRAVE + ("U2" ?Ù) ; LATIN CAPITAL LETTER U WITH GRAVE + ("u1" ?ú) ; LATIN SMALL LETTER U WITH ACUTE + ("U1" ?Ú) ; LATIN CAPITAL LETTER U WITH ACUTE + ("y1" ?ý) ; LATIN SMALL LETTER Y WITH ACUTE + ("Y1" ?Ý) ; LATIN CAPITAL LETTER Y WITH ACUTE + ("a8" ?ă) ; LATIN SMALL LETTER A WITH BREVE + ("A8" ?Ă) ; LATIN CAPITAL LETTER A WITH BREVE + ("i4" ?ĩ) ; LATIN SMALL LETTER I WITH TILDE + ("I4" ?Ĩ) ; LATIN CAPITAL LETTER I WITH TILDE + ("u4" ?ũ) ; LATIN SMALL LETTER U WITH TILDE + ("U4" ?Ũ) ; LATIN CAPITAL LETTER U WITH TILDE + ("o7" ?ơ) ; LATIN SMALL LETTER O WITH HORN + ("O7" ?Ơ) ; LATIN CAPITAL LETTER O WITH HORN + ("u7" ?ư) ; LATIN SMALL LETTER U WITH HORN + ("U7" ?Ư) ; LATIN CAPITAL LETTER U WITH HORN + ("a5" ?ạ) ; LATIN SMALL LETTER A WITH DOT BELOW + ("A5" ?Ạ) ; LATIN CAPITAL LETTER A WITH DOT BELOW + ("a3" ?ả) ; LATIN SMALL LETTER A WITH HOOK ABOVE + ("A3" ?Ả) ; LATIN CAPITAL LETTER A WITH HOOK ABOVE + ("a61" ?ấ) ; LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE + ("A61" ?Ấ) ; LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE + ("a62" ?ầ) ; LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE + ("A62" ?Ầ) ; LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE + ("a63" ?ẩ) ; LATIN SMALL LETTER A WITH CIRCUMFLEX AND HO6K ABOVE + ("A63" ?Ẩ) ; LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HO6K ABOVE + ("a64" ?ẫ) ; LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE + ("A64" ?Ẫ) ; LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE + ("a65" ?ậ) ; LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW + ("A65" ?Ậ) ; LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW + ("a81" ?ắ) ; LATIN SMALL LETTER A WITH BREVE AND ACUTE + ("A81" ?Ắ) ; LATIN CAPITAL LETTER A WITH BREVE AND ACUTE + ("a82" ?ằ) ; LATIN SMALL LETTER A WITH BREVE AND GRAVE + ("A82" ?Ằ) ; LATIN CAPITAL LETTER A WITH BREVE AND GRAVE + ("a83" ?ẳ) ; LATIN SMALL LETTER A WITH BREVE AND HO6K ABOVE + ("A83" ?Ẳ) ; LATIN CAPITAL LETTER A WITH BREVE AND HO6K ABOVE + ("a84" ?ẵ) ; LATIN SMALL LETTER A WITH BREVE AND TILDE + ("A84" ?Ẵ) ; LATIN CAPITAL LETTER A WITH BREVE AND TILDE + ("a85" ?ặ) ; LATIN SMALL LETTER A WITH BREVE AND DOT BELOW + ("A85" ?Ặ) ; LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW + ("e5" ?ẹ) ; LATIN SMALL LETTER E WITH DOT BELOW + ("E5" ?Ẹ) ; LATIN CAPITAL LETTER E WITH DOT BELOW + ("e3" ?ẻ) ; LATIN SMALL LETTER E WITH HO6K ABOVE + ("E3" ?Ẻ) ; LATIN CAPITAL LETTER E WITH HO6K ABOVE + ("e4" ?ẽ) ; LATIN SMALL LETTER E WITH TILDE + ("E4" ?Ẽ) ; LATIN CAPITAL LETTER E WITH TILDE + ("e61" ?ế) ; LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE + ("E61" ?Ế) ; LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE + ("e62" ?ề) ; LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE + ("E62" ?Ề) ; LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE + ("e63" ?ể) ; LATIN SMALL LETTER E WITH CIRCUMFLEX AND HO6K ABOVE + ("E63" ?Ể) ; LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HO6K ABOVE + ("e64" ?ễ) ; LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE + ("E64" ?Ễ) ; LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE + ("e65" ?ệ) ; LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW + ("E65" ?Ệ) ; LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW + ("i3" ?ỉ) ; LATIN SMALL LETTER I WITH HO6K ABOVE + ("I3" ?Ỉ) ; LATIN CAPITAL LETTER I WITH HO6K ABOVE + ("i5" ?ị) ; LATIN SMALL LETTER I WITH DOT BELOW + ("I5" ?Ị) ; LATIN CAPITAL LETTER I WITH DOT BELOW + ("o5" ?ọ) ; LATIN SMALL LETTER O WITH DOT BELOW + ("O5" ?Ọ) ; LATIN CAPITAL LETTER O WITH DOT BELOW + ("o3" ?ỏ) ; LATIN SMALL LETTER O WITH HO6K ABOVE + ("O3" ?Ỏ) ; LATIN CAPITAL LETTER O WITH HO6K ABOVE + ("o61" ?ố) ; LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE + ("O61" ?Ố) ; LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE + ("o62" ?ồ) ; LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE + ("O62" ?Ồ) ; LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE + ("o63" ?ổ) ; LATIN SMALL LETTER O WITH CIRCUMFLEX AND HO6K ABOVE + ("O63" ?Ổ) ; LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HO6K ABOVE + ("o64" ?ỗ) ; LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE + ("O64" ?Ỗ) ; LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE + ("o65" ?ộ) ; LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELO7 + ("O65" ?Ộ) ; LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELO7 + ("o71" ?ớ) ; LATIN SMALL LETTER O WITH HORN AND ACUTE + ("O71" ?Ớ) ; LATIN CAPITAL LETTER O WITH HORN AND ACUTE + ("o72" ?ờ) ; LATIN SMALL LETTER O WITH HORN AND GRAVE + ("O72" ?Ờ) ; LATIN CAPITAL LETTER O WITH HORN AND GRAVE + ("o73" ?ở) ; LATIN SMALL LETTER O WITH HORN AND HO6K ABOVE + ("O73" ?Ở) ; LATIN CAPITAL LETTER O WITH HORN AND HO6K ABOVE + ("o74" ?ỡ) ; LATIN SMALL LETTER O WITH HORN AND TILDE + ("O74" ?Ỡ) ; LATIN CAPITAL LETTER O WITH HORN AND TILDE + ("o75" ?ợ) ; LATIN SMALL LETTER O WITH HORN AND DOT BELO7 + ("O75" ?Ợ) ; LATIN CAPITAL LETTER O WITH HORN AND DOT BELO7 + ("u5" ?ụ) ; LATIN SMALL LETTER U WITH DOT BELO7 + ("U5" ?Ụ) ; LATIN CAPITAL LETTER U WITH DOT BELO7 + ("u3" ?ủ) ; LATIN SMALL LETTER U WITH HO6K ABOVE + ("U3" ?Ủ) ; LATIN CAPITAL LETTER U WITH HO6K ABOVE + ("u71" ?ứ) ; LATIN SMALL LETTER U WITH HORN AND ACUTE + ("U71" ?Ứ) ; LATIN CAPITAL LETTER U WITH HORN AND ACUTE + ("u72" ?ừ) ; LATIN SMALL LETTER U WITH HORN AND GRAVE + ("U72" ?Ừ) ; LATIN CAPITAL LETTER U WITH HORN AND GRAVE + ("u73" ?ử) ; LATIN SMALL LETTER U WITH HORN AND HO6K ABOVE + ("U73" ?Ử) ; LATIN CAPITAL LETTER U WITH HORN AND HO6K ABOVE + ("u74" ?ữ) ; LATIN SMALL LETTER U WITH HORN AND TILDE + ("U74" ?Ữ) ; LATIN CAPITAL LETTER U WITH HORN AND TILDE + ("u75" ?ự) ; LATIN SMALL LETTER U WITH HORN AND DOT BELO7 + ("U75" ?Ự) ; LATIN CAPITAL LETTER U WITH HORN AND DOT BELO7 + ("y2" ?ỳ) ; LATIN SMALL LETTER Y WITH GRAVE + ("Y2" ?Ỳ) ; LATIN CAPITAL LETTER Y WITH GRAVE + ("y5" ?ỵ) ; LATIN SMALL LETTER Y WITH DOT BELO7 + ("Y5" ?Ỵ) ; LATIN CAPITAL LETTER Y WITH DOT BELO7 + ("y3" ?ỷ) ; LATIN SMALL LETTER Y WITH HO6K ABOVE + ("Y3" ?Ỷ) ; LATIN CAPITAL LETTER Y WITH HO6K ABOVE + ("y4" ?ỹ) ; LATIN SMALL LETTER Y WITH TILDE + ("Y4" ?Ỹ) ; LATIN CAPITAL LETTER Y WITH TILDE + ("d9" ?đ) ; LATIN SMALL LETTER D WITH STROKE + ("D9" ?Đ) ; LATIN CAPITAL LETTER D WITH STROKE +;("$$" ?₫) ; U+20AB DONG SIGN (#### check) + + ("a22" ["a22"]) + ("A22" ["A2"]) + ("a11" ["a1"]) + ("A11" ["A1"]) + ("a66"' ["a6"]) + ("A66"' ["A6"]) + ("a44" ["a4"]) + ("A44" ["A4"]) + ("e22" ["e2"]) + ("E22" ["E2"]) + ("e11" ["e1"]) + ("E11" ["E1"]) + ("e66" ["e6"]) + ("E66" ["E6"]) + ("i22" ["i2"]) + ("I22" ["I2"]) + ("i11" ["i1"]) + ("I11" ["I1"]) + ("o22" ["o2"]) + ("O22" ["O2"]) + ("o11" ["o1"]) + ("O11" ["O1"]) + ("o66" ["o6"]) + ("O66" ["O6"]) + ("o44" ["o4"]) + ("O44" ["O4"]) + ("u22" ["u2"]) + ("U22" ["U2"]) + ("u11" ["u1"]) + ("U11" ["U1"]) + ("y11" ["y1"]) + ("Y11" ["Y1"]) + ("a88" ["a8"]) + ("A88" ["A8"]) + ("i44" ["i4"]) + ("I44" ["I4"]) + ("u44" ["u4"]) + ("U44" ["u4"]) + ("o77" ["o7"]) + ("O77" ["O7"]) + ("u77" ["u7"]) + ("U77" ["U7"]) + ("a55" ["a5"]) + ("A55" ["A5"]) + ("a33" ["a3"]) + ("A33" ["A3"]) + ("e55" ["e5"]) + ("E55" ["E5"]) + ("e33" ["e3"]) + ("E33" ["E3"]) + ("e44" ["e4"]) + ("E44" ["E4"]) + ("i33" ["i3"]) + ("I33" ["I3"]) + ("i55" ["i5"]) + ("I55" ["I5"]) + ("o55" ["o5"]) + ("O55" ["O5"]) + ("o33" ["o3"]) + ("O33" ["O3"]) + ("u55" ["u5"]) + ("U55" ["U5"]) + ("u33" ["u3"]) + ("U33" ["U3"]) + ("y22" ["y2"]) + ("Y22" ["Y2"]) + ("y55" ["y5"]) + ("Y55" ["Y5"]) + ("y33" ["y3"]) + ("Y33" ["Y3"]) + ("y44" ["y4"]) + ("Y44" ["Y4"]) + ("d9" ["d9"]) + ("D99" ["D9"]) +;("$$$" ["$$"]) + + ;; escape from composition + ("\\1" ?1) + ("\\2" ?2) + ("\\3" ?3) + ("\\4" ?4) + ("\\5" ?5) + ("\\6" ?6) + ("\\7" ?7) + ("\\8" ?8) + ("\\9" ?9) + ("\\\\" ?\\)) ; literal backslash + + +;; Local Variables: +;; coding: utf-8 +;; End: + +(custom-set-variables + ;; custom-set-variables was added by Custom. + ;; If you edit it by hand, you could mess it up, so be careful. + ;; Your init file should contain only one such instance. + ;; If there is more than one, they won't work right. + '(company-auto-commit nil) + '(company-dabbrev-downcase nil) + '(company-eclim-executable nil) + '(company-minimum-prefix-length 1) + '(company-show-numbers t) + '(company-tooltip-idle-delay 0.08) + '(custom-safe-themes + '("d3a406c5905923546d8a3ad0164a266deaf451856eca5f21b36594ffcb08413a" "c59857e3e950131e0c17c65711f1812d20a54b829115b7c522672ae6ba0864cc" default)) + '(ecb-auto-activate t) + '(ecb-auto-expand-tag-tree 'all) + '(ecb-auto-update-methods-after-save t) + '(ecb-layout-name "left9") + '(ecb-layout-window-sizes + '(("left9" + (ecb-methods-buffer-name 0.12871287128712872 . 0.9833333333333333)))) + '(ecb-options-version "2.50") + '(ecb-tip-of-the-day nil) + '(ecb-toggle-layout-sequence '("left9")) + '(eclim-executable "/Applications/Eclipse.app/Contents/Eclipse/eclim") + '(eclimd-executable "/Applications/Eclipse.app/Contents/Eclipse/eclimd") + '(font-lock-global-modes '(not speedbar-mode)) + '(global-company-mode t) + '(imenu-list-minor-mode nil) + '(imenu-list-position 'left) + '(imenu-list-size 0.1) + '(lsp-auto-execute-action nil) + '(lsp-headerline-breadcrumb-enable t) + '(lsp-idle-delay 0.1) + '(lsp-ui-doc-position 'bottom t) + '(org-emphasis-alist + '(("*" bold) + ("/" italic) + ("_" underline) + ("=" org-verbatim verbatim) + ("~" org-code verbatim) + ("+" + (:strike-through t)))) + '(org-trello-current-prefix-keybinding "C-c o") + '(package-selected-packages + '(json-mode yang-mode dashboard markdown-toc use-package hide-mode-line lsp-treemacs lsp-ui helm-ls-git which-key quickrun helm-company company company-box multiple-cursors org-download rainbow-delimiters smartparens undo-tree hungry-delete yasnippet-snippets monokai-theme move-text goto-line-preview window-numbering helm-gtags helm-c-yasnippet helm-ag column-enforce-mode bug-hunter)) + '(rainbow-identifiers-choose-face-function 'ostnm/rainbow-identifiers-predefined-choose-face) + '(yas-inhibit-overlay-modification-protection t)) +(custom-set-faces + ;; custom-set-faces was added by Custom. + ;; If you edit it by hand, you could mess it up, so be careful. + ;; Your init file should contain only one such instance. + ;; If there is more than one, they won't work right. + '(font-lock-function-name-face ((t (:foreground "#A6E22E" :weight bold :height 1.3)))) + '(imenu-list-entry-face-0 ((t (:inherit imenu-list-entry-face :foreground "light green")))) + '(imenu-list-entry-face-1 ((t (:inherit imenu-list-entry-face :foreground "gold"))))) + + +=========== +# -*- tcl -*- +# +# $Id: idx.html,v 1.7 2005/09/28 04:51:19 andreas_kupries Exp $ +# +# Engine to convert a docidx document into HTML. +# +# Copyright (c) 2003 Andreas Kupries +# Freely redistributable. +# +###################################################################### + +dt_source _idx_common.tcl +dt_source _html.tcl + +###################################################################### +# Conversion specification. +# +# One-pass processing. + +rename idx_postprocess {} +rename fmt_postprocess idx_postprocess + +proc fmt_plain_text {text} {return {}} + +################################################################ +## Backend for HTML markup + +global firstkey ; set firstkey 1 +global even ; set even 1 +global reflist ; set reflist [list] +global cnt ; set cnt 0 +global kwid +array set kwid {} + +proc fmt_index_begin {label title} { + set hdr "" + append hdr "[markup ]\n" + append hdr "[markup ] $label [markup ]\n" + + # Engine parameter - insert 'meta' + if {[set meta [Get meta]] != {}} {append hdr [markup $meta]\n} + + # Engine parameter - load predefined keyword anchors. + if {[llength [set ki [Get kwid]]]} { + global kwid + array set kwid $ki + } + + append hdr "[markup ]\n" + append hdr [ht_comment [c_provenance]]\n + append hdr [ht_comment "CVS: \$Id\$ $label"]\n + append hdr \n + append hdr [markup ]\n + + # Engine parameter - insert 'header' + if {[set header [Get header]] != {}} {append hdr [markup $header]\n} + + if {($label != {}) && ($title != {})} { + append hdr "[markup

] $label -- $title [markup

]\n" + } elseif {$label != {}} { + append hdr "[markup

] $label [markup

]\n" + } elseif {$title != {}} { + append hdr "[markup

] $title [markup

]\n" + } + append hdr [markup "
"]\n + return $hdr +} + +proc fmt_index_end {} { + set text [FlushReferences] + append text [tag/ table]\n + + # Engine parameter - insert 'footer' + set footer [Get footer] + if {$footer != {}} {set footer \n[markup $footer]\n} + + return $text[tag hr]${footer}[tag/ body][tag/ html]\n +} + +proc fmt_key {text} { + global firstkey even reflist cnt kwid + + set res [FlushReferences] + set firstkey 0 + + if {$even} { + append res [markup ""]\n + } else { + append res [markup ""]\n + } + set even [expr {1-$even}] + + if {[info exists kwid($text)]} { + set anchor $kwid($text) + } else { + set anchor key$cnt + incr cnt + } + + append res " [markup {
}]" + append res "[markup ""] ${text} [markup ][tag/ td]\n" + append res " [markup {}]\n" + return $res +} + +proc FlushReferences {} { + global firstkey reflist + + set res "" + if {!$firstkey} { + set lines [list] + foreach {ref label} $reflist { + lappend lines "\t[markup ""] ${label} [tag/ a]" + } + append res "[join $lines ,\n]\n [tag /td]\n[tag/ tr]\n" + } + set reflist [list] + return $res +} + +proc fmt_manpage {file label} {global reflist ; lappend reflist [dt_fmap $file] $label ; return} +proc fmt_url {url label} {global reflist ; lappend reflist $url $label ; return} +proc fmt_comment {text} {ht_comment $text} + +################################################################ + +global __var +array set __var { + meta {} + header {} + footer {} + kwid {} +} +proc Get {varname} {global __var ; return $__var($varname)} +proc idx_listvariables {} {global __var ; return [array names __var]} +proc idx_varset {varname text} { + global __var + if {![info exists __var($varname)]} {return -code error "Unknown engine variable \"$varname\""} + set __var($varname) $text + return +} + +################################################################ + + + + + + + +PlainTextEntityProcessor (Solr 6.4.2 API) + + + + + + + + + + + + +
+
org.apache.solr.handler.dataimport
+

Class PlainTextEntityProcessor

+
+
+ +
+
    +
  • +
    +
    +
    public class PlainTextEntityProcessor
    +extends EntityProcessorBase
    +

    An implementation of EntityProcessor which reads data from a url/file and give out a row which contains one String + value. The name of the field is 'plainText'.

    +
    +
    Since:
    +
    solr 1.4
    +
    +
  • +
+
+
+ +
+
+
    +
  • + + + +
      +
    • + + +

      Constructor Detail

      + + + +
        +
      • +

        PlainTextEntityProcessor

        +
        public PlainTextEntityProcessor()
        +
      • +
      +
    • +
    + +
      +
    • + + +

      Method Detail

      + + + +
        +
      • +

        init

        +
        public void init(Context context)
        +
        Description copied from class: EntityProcessor
        +
        This method is called when it starts processing an entity. When it comes + back to the entity it is called again. So it can reset anything at that point. + For a rootmost entity this is called only once for an ingestion. For sub-entities , this + is called multiple once for each row from its parent entity
        +
        +
        Overrides:
        +
        init in class EntityProcessorBase
        +
        Parameters:
        +
        context - The current context
        +
        +
      • +
      + + + +
        +
      • +

        nextRow

        +
        public Map<String,Object> nextRow()
        +
        Description copied from class: EntityProcessorBase
        +
        For a simple implementation, this is the only method that the sub-class should implement. This is intended to + stream rows one-by-one. Return null to signal end of rows
        +
        +
        Overrides:
        +
        nextRow in class EntityProcessorBase
        +
        Returns:
        +
        a row where the key is the name of the field and value can be any Object or a Collection of objects. Return + null to signal end of rows
        +
        +
      • +
      +
    • +
    +
  • +
+
+
+ + + + + +

+ Copyright © 2000-2017 Apache Software Foundation. All Rights Reserved. +

+ + + + +=========== +var searchData= +[ + ['in_5frange_194',['in_range',['../class_threshold_transform.html#a874992f571166f58cd7ef73856d30509',1,'ThresholdTransform']]], + ['ina219_195',['INA219',['../class_i_n_a219.html',1,'INA219'],['../class_i_n_a219.html#a3bdfe18ddb1ca53b53f48ac8672a94d0',1,'INA219::ina219()'],['../class_i_n_a219_value.html#afbc6bf7ce37f6c243d0f944bfa9cfef3',1,'INA219Value::ina219()'],['../class_i_n_a219.html#adc0300a6f6fa99ee85786a354614e343',1,'INA219::INA219()']]], + ['ina219_2ecpp_196',['ina219.cpp',['../ina219_8cpp.html',1,'']]], + ['ina219_2eh_197',['ina219.h',['../ina219_8h.html',1,'']]], + ['ina219cal_5ft_198',['INA219CAL_t',['../ina219_8h.html#af0f21d23ba16939156ae369e49fc59d0',1,'ina219.h']]], + ['ina219valtype_199',['INA219ValType',['../ina219_8h.html#a80681f57fd3d14d390d830a2ea845654',1,'ina219.h']]], + ['ina219value_200',['INA219Value',['../class_i_n_a219_value.html',1,'INA219Value'],['../class_i_n_a219_value.html#a929d4abc76063b697a4836da30d1d5c6',1,'INA219Value::INA219Value()'],['../ina219_8h.html#aab1b0e7a44b1751e455f2f0716da1794',1,'INA219value(): ina219.h']]], + ['index_2eh_201',['index.h',['../index_8h.html',1,'']]], + ['initfilter_202',['initFilter',['../class_sensor_n_x_p___f_x_o_s8700___f_x_a_s21002.html#a68de3c26ea9715291dcaee9d1260c149',1,'SensorNXP_FXOS8700_FXAS21002']]], + ['input_203',['input',['../class_curve_interpolator_1_1_sample.html#a43dd7cc1fd950a03a6674056194e935b',1,'CurveInterpolator::Sample']]], + ['input_5fbuffer_5flength_204',['INPUT_BUFFER_LENGTH',['../nmea__parser_8h.html#a6088978b66925a9ff44bbdf302f00726',1,'nmea_parser.h']]], + ['integerconsumer_205',['IntegerConsumer',['../valueconsumer_8h.html#a45a844f7269f830a1f708f965627248c',1,'valueconsumer.h']]], + ['integerproducer_206',['IntegerProducer',['../valueproducer_8h.html#ab74ca4be7de22a21f73128ea38a4cbbb',1,'valueproducer.h']]], + ['integersensor_207',['IntegerSensor',['../class_integer_sensor.html',1,'IntegerSensor'],['../class_integer_sensor.html#a2214ecc518fbbb314be8ca987aa0e730',1,'IntegerSensor::IntegerSensor()']]], + ['integerthreshold_208',['IntegerThreshold',['../class_integer_threshold.html',1,'IntegerThreshold'],['../class_integer_threshold.html#a63bb646505b2c51b0d57b0799d062d86',1,'IntegerThreshold::IntegerThreshold()']]], + ['integertransform_209',['IntegerTransform',['../transform_8h.html#ae9c0b5d70ecdf35d4bd3a0a4bd4e0d17',1,'transform.h']]], + ['integrator_210',['Integrator',['../class_integrator.html',1,'Integrator'],['../class_integrator.html#a787de1dd45b1ced5607e6209f0e01dd0',1,'Integrator::Integrator()']]], + ['integrator_2ecpp_211',['integrator.cpp',['../integrator_8cpp.html',1,'']]], + ['integrator_2eh_212',['integrator.h',['../integrator_8h.html',1,'']]], + ['interrupt_5ftype_213',['interrupt_type',['../class_digital_input.html#aa4e30fc0836ce2d0934b9eb5d507f116',1,'DigitalInput']]], + ['ip_5faddress_214',['IP_ADDRESS',['../sensesp__app_8h.html#abeacf3e22e82961fc23c43cad734ebbdad623eb60dd773b245fe13ee928fd47c8',1,'sensesp_app.h']]], + ['ipaddrdev_215',['IPAddrDev',['../class_i_p_addr_dev.html',1,'IPAddrDev'],['../class_i_p_addr_dev.html#ae5d89601ff69fb635850bbaacdd61664',1,'IPAddrDev::IPAddrDev()']]], + ['is_5fconnected_216',['is_connected',['../class_w_s_client.html#a6ae43b80f511194b43b0dff2763d68b1',1,'WSClient']]], + ['issignalkconnected_217',['isSignalKConnected',['../class_sens_e_s_p_app.html#adee3f53bc75ae6d5d73a8707dd75600e',1,'SensESPApp']]], + ['iswificonnected_218',['isWifiConnected',['../class_sens_e_s_p_app.html#a15f6e0569533b417e9724540d758c884',1,'SensESPApp']]] +]; + +/** + * @fileoverview added by tsickle + * @suppress {checkTypes,extraRequire,missingOverride,missingReturn,unusedPrivateMembers,uselessCode} checked by tsc + */ +import { decimalFormatted } from './../services/utilities'; +/** @type {?} */ +export const sumTotalsDollarColoredBoldFormatter = (/** + * @param {?} totals + * @param {?} columnDef + * @param {?=} grid + * @return {?} + */ +(totals, columnDef, grid) => { + /** @type {?} */ + const field = columnDef.field || ''; + /** @type {?} */ + const val = totals.sum && totals.sum[field]; + /** @type {?} */ + const prefix = (columnDef.params && columnDef.params.groupFormatterPrefix) ? columnDef.params.groupFormatterPrefix : ''; + /** @type {?} */ + const suffix = (columnDef.params && columnDef.params.groupFormatterSuffix) ? columnDef.params.groupFormatterSuffix : ''; + if (isNaN(+val)) { + return ''; + } + else if (val >= 0) { + return `${prefix + '$' + decimalFormatted(val, 2, 2) + suffix}`; + } + else { + return `${prefix + '$' + decimalFormatted(val, 2, 2) + suffix}`; + } +}); +//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoic3VtVG90YWxzRG9sbGFyQ29sb3JlZEJvbGRGb3JtYXR0ZXIuanMiLCJzb3VyY2VSb290Ijoibmc6Ly9hbmd1bGFyLXNsaWNrZ3JpZC8iLCJzb3VyY2VzIjpbImFwcC9tb2R1bGVzL2FuZ3VsYXItc2xpY2tncmlkL2dyb3VwaW5nLWZvcm1hdHRlcnMvc3VtVG90YWxzRG9sbGFyQ29sb3JlZEJvbGRGb3JtYXR0ZXIudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6Ijs7OztBQUNBLE9BQU8sRUFBRSxnQkFBZ0IsRUFBRSxNQUFNLHlCQUF5QixDQUFDOztBQUUzRCxNQUFNLE9BQU8sbUNBQW1DOzs7Ozs7QUFBeUIsQ0FBQyxNQUFXLEVBQUUsU0FBaUIsRUFBRSxJQUFVLEVBQUUsRUFBRTs7VUFDaEgsS0FBSyxHQUFHLFNBQVMsQ0FBQyxLQUFLLElBQUksRUFBRTs7VUFDN0IsR0FBRyxHQUFHLE1BQU0sQ0FBQyxHQUFHLElBQUksTUFBTSxDQUFDLEdBQUcsQ0FBQyxLQUFLLENBQUM7O1VBQ3JDLE1BQU0sR0FBRyxDQUFDLFNBQVMsQ0FBQyxNQUFNLElBQUksU0FBUyxDQUFDLE1BQU0sQ0FBQyxvQkFBb0IsQ0FBQyxDQUFDLENBQUMsQ0FBQyxTQUFTLENBQUMsTUFBTSxDQUFDLG9CQUFvQixDQUFDLENBQUMsQ0FBQyxFQUFFOztVQUNqSCxNQUFNLEdBQUcsQ0FBQyxTQUFTLENBQUMsTUFBTSxJQUFJLFNBQVMsQ0FBQyxNQUFNLENBQUMsb0JBQW9CLENBQUMsQ0FBQyxDQUFDLENBQUMsU0FBUyxDQUFDLE1BQU0sQ0FBQyxvQkFBb0IsQ0FBQyxDQUFDLENBQUMsRUFBRTtJQUV2SCxJQUFJLEtBQUssQ0FBQyxDQUFDLEdBQUcsQ0FBQyxFQUFFO1FBQ2YsT0FBTyxFQUFFLENBQUM7S0FDWDtTQUFNLElBQUksR0FBRyxJQUFJLENBQUMsRUFBRTtRQUNuQixPQUFPLGlEQUFpRCxNQUFNLEdBQUcsR0FBRyxHQUFHLGdCQUFnQixDQUFDLEdBQUcsRUFBRSxDQUFDLEVBQUUsQ0FBQyxDQUFDLEdBQUcsTUFBTSxTQUFTLENBQUM7S0FDdEg7U0FBTTtRQUNMLE9BQU8sK0NBQStDLE1BQU0sR0FBRyxHQUFHLEdBQUcsZ0JBQWdCLENBQUMsR0FBRyxFQUFFLENBQUMsRUFBRSxDQUFDLENBQUMsR0FBRyxNQUFNLFNBQVMsQ0FBQztLQUNwSDtBQUNILENBQUMsQ0FBQSIsInNvdXJjZXNDb250ZW50IjpbImltcG9ydCB7IENvbHVtbiwgR3JvdXBUb3RhbHNGb3JtYXR0ZXIgfSBmcm9tICcuLy4uL21vZGVscy9pbmRleCc7XHJcbmltcG9ydCB7IGRlY2ltYWxGb3JtYXR0ZWQgfSBmcm9tICcuLy4uL3NlcnZpY2VzL3V0aWxpdGllcyc7XHJcblxyXG5leHBvcnQgY29uc3Qgc3VtVG90YWxzRG9sbGFyQ29sb3JlZEJvbGRGb3JtYXR0ZXI6IEdyb3VwVG90YWxzRm9ybWF0dGVyID0gKHRvdGFsczogYW55LCBjb2x1bW5EZWY6IENvbHVtbiwgZ3JpZD86IGFueSkgPT4ge1xyXG4gIGNvbnN0IGZpZWxkID0gY29sdW1uRGVmLmZpZWxkIHx8ICcnO1xyXG4gIGNvbnN0IHZhbCA9IHRvdGFscy5zdW0gJiYgdG90YWxzLnN1bVtmaWVsZF07XHJcbiAgY29uc3QgcHJlZml4ID0gKGNvbHVtbkRlZi5wYXJhbXMgJiYgY29sdW1uRGVmLnBhcmFtcy5ncm91cEZvcm1hdHRlclByZWZpeCkgPyBjb2x1bW5EZWYucGFyYW1zLmdyb3VwRm9ybWF0dGVyUHJlZml4IDogJyc7XHJcbiAgY29uc3Qgc3VmZml4ID0gKGNvbHVtbkRlZi5wYXJhbXMgJiYgY29sdW1uRGVmLnBhcmFtcy5ncm91cEZvcm1hdHRlclN1ZmZpeCkgPyBjb2x1bW5EZWYucGFyYW1zLmdyb3VwRm9ybWF0dGVyU3VmZml4IDogJyc7XHJcblxyXG4gIGlmIChpc05hTigrdmFsKSkge1xyXG4gICAgcmV0dXJuICcnO1xyXG4gIH0gZWxzZSBpZiAodmFsID49IDApIHtcclxuICAgIHJldHVybiBgPHNwYW4gc3R5bGU9XCJjb2xvcjpncmVlbjsgZm9udC13ZWlnaHQ6IGJvbGQ7XCI+JHtwcmVmaXggKyAnJCcgKyBkZWNpbWFsRm9ybWF0dGVkKHZhbCwgMiwgMikgKyBzdWZmaXh9PC9zcGFuPmA7XHJcbiAgfSBlbHNlIHtcclxuICAgIHJldHVybiBgPHNwYW4gc3R5bGU9XCJjb2xvcjpyZWQ7IGZvbnQtd2VpZ2h0OiBib2xkO1wiPiR7cHJlZml4ICsgJyQnICsgZGVjaW1hbEZvcm1hdHRlZCh2YWwsIDIsIDIpICsgc3VmZml4fTwvc3Bhbj5gO1xyXG4gIH1cclxufTtcclxuIl19 + +=========== +Example of Image with WLS Domain +================================ +This Dockerfile extends the Oracle WebLogic image by applying a PSU patch. + +# How to build and run +First make sure you have built **oracle/weblogic:12.2.1.1-developer**. + +Then download file [p24286152_122110_Generic.zip](http://support.oracle.com) and place it next to this README. + +To build, run: + + $ docker build -t 12211-psu24286152 . + +To start the Admin Server, run: + + $ docker run -p 7001:7001 12211-psu24286152 + +When you run the container a patched WebLogic Server 12.2.1.1 empty domain is created. At startup of the container a random password will be generated for the Administration of the domain. You can find this password in the output line: + +`Oracle WebLogic Server auto generated Admin password:` + +Go to your browser and start the Adminsitration console by running: + http://localhost:7001/console + +Extend this patched image to create a domain image and start WebLogic Servers running in containers. +# Copyright +Copyright (c) 2018 Oracle and/or its affiliates. All rights reserved. + +--- +title: Informacje o autoryzacji w mikrousługach .NET i aplikacjach internetowych +description: Zabezpieczenia w mikrousługach platformy .NET i aplikacjach sieci Web — zapoznaj się z omówieniem głównych opcji autoryzacji w aplikacjach ASP.NET Core — opartych na rolach i opartych na zasadach. +author: mjrousos +ms.date: 01/30/2020 +ms.openlocfilehash: 27936a33ea2bb46cedb9d10ee47a2117e1843e14 +ms.sourcegitcommit: e3cbf26d67f7e9286c7108a2752804050762d02d +ms.translationtype: MT +ms.contentlocale: pl-PL +ms.lasthandoff: 04/09/2020 +ms.locfileid: "80988209" +--- +# Informacje o autoryzacji w mikrousługach .NET i aplikacjach internetowych + +Po uwierzytelnieniu ASP.NET core interfejsów API sieci Web muszą autoryzować dostęp. Ten proces umożliwia usłudze udostępnianie interfejsów API niektórym uwierzytelnionym użytkownikom, ale nie wszystkim. [Autoryzacja](/aspnet/core/security/authorization/introduction) może być wykonywana na podstawie ról użytkowników lub na podstawie zasad niestandardowych, które mogą obejmować inspekcję oświadczeń lub innych heurystyki. + +Ograniczenie dostępu do ASP.NET trasy Core MVC jest tak proste, jak zastosowanie atrybutu Authorize do metody akcji (lub do klasy kontrolera, jeśli wszystkie akcje kontrolera wymagają autoryzacji), jak pokazano na poniższym przykładzie: + +```csharp +public class AccountController : Controller +{ + public ActionResult Login() + { + } + + [Authorize] + public ActionResult Logout() + { + } +} +``` + +Domyślnie dodanie atrybutu Authorize bez parametrów ograniczy dostęp do uwierzytelnionych użytkowników dla tego kontrolera lub akcji. Aby dodatkowo ograniczyć interfejs API, który ma być dostępny tylko dla określonych użytkowników, atrybut można rozwinąć, aby określić wymagane role lub zasady, które użytkownicy muszą spełnić. + +## Wdrażanie autoryzacji opartej na rolach + +ASP.NET Podstawowa tożsamość ma wbudowaną koncepcję ról. Oprócz użytkowników ASP.NET Tożsamość podstawowa przechowuje informacje o różnych rolach używanych przez aplikację i śledzi, którzy użytkownicy są przypisani do ról. Te przypisania można zmienić programowo z typem, `RoleManager` który aktualizuje role w utrwalone magazynu i `UserManager` typu, który może przyznać lub odwołać role od użytkowników. + +Jeśli uwierzytelniasz się za pomocą tokenów nośnych JWT, ASP.NET core JWT oprogramowanie pośredniczące uwierzytelniania na okaziciela wypełni role użytkownika na podstawie oświadczeń roli znalezionych w tokenie. Aby ograniczyć dostęp do akcji lub kontrolera MVC do użytkowników w określonych rolach, można dołączyć parametr Roles w adnotacji Autoryzacyjnej (atrybut), jak pokazano w następującym fragmencie kodu: + +```csharp +[Authorize(Roles = "Administrator, PowerUser")] +public class ControlPanelController : Controller +{ + public ActionResult SetTime() + { + } + + [Authorize(Roles = "Administrator")] + public ActionResult ShutDown() + { + } +} +``` + +W tym przykładzie tylko użytkownicy w rolach administratora lub PowerUser mogą uzyskiwać dostęp do interfejsów API w kontrolerze ControlPanel (na przykład wykonywania akcji SetTime). Interfejs API ShutDown jest dodatkowo ograniczony, aby zezwolić na dostęp tylko do użytkowników w roli administratora. + +Aby wymagać, aby użytkownik był w wielu rolach, należy użyć wielu atrybutów Autoryzuj, jak pokazano w poniższym przykładzie: + +```csharp +[Authorize(Roles = "Administrator, PowerUser")] +[Authorize(Roles = "RemoteEmployee ")] +[Authorize(Policy = "CustomPolicy")] +public ActionResult API1 () +{ +} +``` + +W tym przykładzie, aby wywołać API1, użytkownik musi: + +- Być w roli administratora *lub* PowerUser, *i* + +- Wcielić się w rolę RemoteEmployee *i* + +- Spełnij niestandardowy program obsługi autoryzacji CustomPolicy. + +## Wdrażanie autoryzacji opartej na zasadach + +Reguły autoryzacji niestandardowej można również zapisywać przy użyciu [zasad autoryzacji](https://docs.asp.net/en/latest/security/authorization/policies.html). Ta sekcja zawiera omówienie. Aby uzyskać więcej informacji, zobacz [warsztaty autoryzacji ASP.NET](https://github.com/blowdart/AspNetAuthorizationWorkshop). + +Zasady autoryzacji niestandardowej są rejestrowane w metodzie Startup.ConfigureServices przy użyciu usługi. AddAuthorization metody. Ta metoda przyjmuje pełnomocnika, który konfiguruje argument AuthorizationOptions. + +```csharp +services.AddAuthorization(options => +{ + options.AddPolicy("AdministratorsOnly", policy => + policy.RequireRole("Administrator")); + + options.AddPolicy("EmployeesOnly", policy => + policy.RequireClaim("EmployeeNumber")); + + options.AddPolicy("Over21", policy => + policy.Requirements.Add(new MinimumAgeRequirement(21))); +}); +``` + +Jak pokazano w przykładzie, zasady mogą być skojarzone z różnymi typami wymagań. Po zarejestrowaniu zasad można je zastosować do akcji lub kontrolera, przekazując nazwę zasad jako argument Zasad atrybutu Authorize (na przykład `[Authorize(Policy="EmployeesOnly")]`) Zasady mogą mieć wiele wymagań, a nie tylko jeden (jak pokazano w tych przykładach). + +W poprzednim przykładzie pierwsze wywołanie AddPolicy jest tylko alternatywnym sposobem autoryzowania przez rolę. Jeśli `[Authorize(Policy="AdministratorsOnly")]` jest stosowany do interfejsu API, tylko użytkownicy w roli administratora będą mogli uzyskać do niego dostęp. + +Drugie wywołanie pokazuje łatwy sposób wymagać, że określone oświadczenie powinno być obecne dla użytkownika. Metoda również opcjonalnie przyjmuje oczekiwane wartości dla oświadczenia. Jeśli wartości są określone, wymaganie jest spełnione tylko wtedy, gdy użytkownik ma zarówno oświadczenie prawidłowego typu i jedną z określonych wartości. Jeśli używasz oprogramowania pośredniczącego uwierzytelniania na okaziciela JWT, wszystkie właściwości JWT będą dostępne jako oświadczenia użytkownika. + +Najciekawsze zasady pokazane tutaj jest `AddPolicy` w trzeciej metody, ponieważ używa wymagania autoryzacji niestandardowej. Za pomocą wymagań autoryzacji niestandardowej, można mieć dużą kontrolę nad jak autoryzacja jest wykonywana. Aby to zadziałało, należy zaimplementować następujące typy: + +- Typ wymagania, który pochodzi z i który zawiera pola określające szczegóły wymagania. W tym przykładzie jest to pole `MinimumAgeRequirement` wiekowe dla typu próbki. + +- Program obsługi, który implementuje , gdzie T jest typem, który program obsługi może spełnić. Program obsługi musi implementować metodę, która sprawdza, czy określony kontekst, który zawiera informacje o użytkowniku spełnia wymagania. + +Jeśli użytkownik spełnia wymagania, wywołanie `context.Succeed` wskazuje, że użytkownik jest autoryzowany. Jeśli istnieje wiele sposobów, że użytkownik może spełnić wymagania autoryzacji, można utworzyć wiele programów obsługi. + +Oprócz rejestrowania niestandardowych wymagań `AddPolicy` dotyczących zasad za pomocą wywołań, należy również`services.AddTransient()`zarejestrować niestandardowe programy obsługi wymagań za pośrednictwem iniekcji zależności ( ). + +Przykład wymagania autoryzacji niestandardowej i programu obsługi do sprawdzania wieku `DateOfBirth` użytkownika (na podstawie oświadczenia) jest dostępny w [dokumentacji autoryzacji](https://docs.asp.net/en/latest/security/authorization/policies.html)ASP.NET Core . + +## Zasoby dodatkowe + +- **Uwierzytelnianie ASP.NET rdzeniowe** \ + [https://docs.microsoft.com/aspnet/core/security/authentication/identity](/aspnet/core/security/authentication/identity) + +- **Autoryzacja ASP.NET Core** \ + [https://docs.microsoft.com/aspnet/core/security/authorization/introduction](/aspnet/core/security/authorization/introduction) + +- **Autoryzacja oparta na rolach** \ + [https://docs.microsoft.com/aspnet/core/security/authorization/roles](/aspnet/core/security/authorization/roles) + +- **Autoryzacja oparta na zasadach niestandardowych** \ + [https://docs.microsoft.com/aspnet/core/security/authorization/policies](/aspnet/core/security/authorization/policies) + +>[!div class="step-by-step"] +>[Poprzedni](index.md) +>[następny](developer-app-secrets-storage.md) + + +=========== +"Rules for running Rollup under Bazel" + +load("@build_bazel_rules_nodejs//:providers.bzl", "JSEcmaScriptModuleInfo", "NodeContextInfo", "NpmPackageInfo", "node_modules_aspect", "run_node") +load("@build_bazel_rules_nodejs//internal/linker:link_node_modules.bzl", "module_mappings_aspect") + +_DOC = """Runs the Rollup.js CLI under Bazel. + +See https://rollupjs.org/guide/en/#command-line-reference + +Typical example: +```python +load("@npm_bazel_rollup//:index.bzl", "rollup_bundle") + +rollup_bundle( + name = "bundle", + srcs = ["dependency.js"], + entry_point = "input.js", + config_file = "rollup.config.js", +) +``` + +Note that the command-line options set by Bazel override what appears in the rollup config file. +This means that typically a single `rollup.config.js` can contain settings for your whole repo, +and multiple `rollup_bundle` rules can share the configuration. + +Thus, setting options that Bazel controls will have no effect, e.g. + +```javascript +module.exports = { + output: { file: 'this_is_ignored.js' }, +} +``` + +You must determine ahead of time whether Rollup needs to produce a directory output. +This is the case if you have dynamic imports which cause code-splitting, or if you +provide multiple entry points. Use the `output_dir` attribute to specify that you want a +directory output. +Rollup's CLI has the same behavior, forcing you to pick `--output.file` or `--output.dir`. + +To get multiple output formats, wrap the rule with a macro or list comprehension, e.g. + +```python +[ + rollup_bundle( + name = "bundle.%s" % format, + entry_point = "foo.js", + format = format, + ) + for format in [ + "cjs", + "umd", + ] +] +``` + +This will produce one output per requested format. +""" + +_ROLLUP_ATTRS = { + "srcs": attr.label_list( + doc = """Non-entry point JavaScript source files from the workspace. + +You must not repeat file(s) passed to entry_point/entry_points. +""", + # Don't try to constrain the filenames, could be json, svg, whatever + allow_files = True, + ), + "args": attr.string_list( + doc = """Command line arguments to pass to rollup. Can be used to override config file settings. + +These argument passed on the command line before all arguments that are always added by the +rule such as `--output.dir` or `--output.file`, `--format`, `--config` and `--preserveSymlinks` and +also those that are optionally added by the rule such as `--sourcemap`. + +See rollup CLI docs https://rollupjs.org/guide/en/#command-line-flags for complete list of supported arguments.""", + default = [], + ), + "config_file": attr.label( + doc = """A rollup.config.js file + +Passed to the --config +See https://rollupjs.org/guide/en/#configuration-files + +If not set, a default basic Rollup config is used. +""", + allow_single_file = True, + default = "@npm_bazel_rollup//:rollup.config.js", + ), + "entry_point": attr.label( + doc = """The bundle's entry point (e.g. your main.js or app.js or index.js). + +This is just a shortcut for the `entry_points` attribute with a single output chunk named the same as the rule. + +For example, these are equivalent: + +```python +rollup_bundle( + name = "bundle", + entry_point = "index.js", +) +``` + +```python +rollup_bundle( + name = "bundle", + entry_points = { + "index.js": "bundle" + } +) +``` + +If `rollup_bundle` is used on a `ts_library`, the `rollup_bundle` rule handles selecting the correct outputs from `ts_library`. +In this case, `entry_point` can be specified as the `.ts` file and `rollup_bundle` will handle the mapping to the `.mjs` output file. + +For example: + +```python +ts_library( + name = "foo", + srcs = [ + "foo.ts", + "index.ts", + ], +) + +rollup_bundle( + name = "bundle", + deps = [ "foo" ], + entry_point = "index.ts", +) +``` +""", + allow_single_file = True, + ), + "entry_points": attr.label_keyed_string_dict( + doc = """The bundle's entry points (e.g. your main.js or app.js or index.js). + +Passed to the [`--input` option](https://github.com/rollup/rollup/blob/master/docs/999-big-list-of-options.md#input) in Rollup. + +Keys in this dictionary are labels pointing to .js entry point files. +Values are the name to be given to the corresponding output chunk. + +Either this attribute or `entry_point` must be specified, but not both. +""", + allow_files = True, + ), + "format": attr.string( + doc = """"Specifies the format of the generated bundle. One of the following: + +- `amd`: Asynchronous Module Definition, used with module loaders like RequireJS +- `cjs`: CommonJS, suitable for Node and other bundlers +- `esm`: Keep the bundle as an ES module file, suitable for other bundlers and inclusion as a `