Spaces:
				
			
			
	
			
			
					
		Running
		
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
	Upload 3 files
Browse files- app.py +211 -198
- exception.py +50 -0
- logger.py +21 -0
    	
        app.py
    CHANGED
    
    | @@ -1,198 +1,211 @@ | |
| 1 | 
            -
            # Importing required libraries
         | 
| 2 | 
            -
            import warnings
         | 
| 3 | 
            -
            warnings.filterwarnings("ignore")
         | 
| 4 | 
            -
             | 
| 5 | 
            -
            import json
         | 
| 6 | 
            -
            import subprocess
         | 
| 7 | 
            -
             | 
| 8 | 
            -
            from  | 
| 9 | 
            -
            from llama_cpp_agent import  | 
| 10 | 
            -
            from llama_cpp_agent | 
| 11 | 
            -
            from llama_cpp_agent. | 
| 12 | 
            -
            from llama_cpp_agent.chat_history | 
| 13 | 
            -
             | 
| 14 | 
            -
             | 
| 15 | 
            -
             | 
| 16 | 
            -
             | 
| 17 | 
            -
             | 
| 18 | 
            -
             | 
| 19 | 
            -
             | 
| 20 | 
            -
             | 
| 21 | 
            -
             | 
| 22 | 
            -
             | 
| 23 | 
            -
             | 
| 24 | 
            -
             | 
| 25 | 
            -
             | 
| 26 | 
            -
             | 
| 27 | 
            -
                 | 
| 28 | 
            -
                 | 
| 29 | 
            -
             | 
| 30 | 
            -
             | 
| 31 | 
            -
             | 
| 32 | 
            -
             | 
| 33 | 
            -
             | 
| 34 | 
            -
             | 
| 35 | 
            -
             | 
| 36 | 
            -
             | 
| 37 | 
            -
             | 
| 38 | 
            -
             | 
| 39 | 
            -
             | 
| 40 | 
            -
             | 
| 41 | 
            -
             | 
| 42 | 
            -
                 | 
| 43 | 
            -
                 | 
| 44 | 
            -
                 | 
| 45 | 
            -
                 | 
| 46 | 
            -
                 | 
| 47 | 
            -
             | 
| 48 | 
            -
                 | 
| 49 | 
            -
                 | 
| 50 | 
            -
             | 
| 51 | 
            -
             | 
| 52 | 
            -
             | 
| 53 | 
            -
             | 
| 54 | 
            -
             | 
| 55 | 
            -
             | 
| 56 | 
            -
                    -  | 
| 57 | 
            -
                    -  | 
| 58 | 
            -
                    -  | 
| 59 | 
            -
                    -  | 
| 60 | 
            -
                    -  | 
| 61 | 
            -
             | 
| 62 | 
            -
             | 
| 63 | 
            -
                     | 
| 64 | 
            -
             | 
| 65 | 
            -
             | 
| 66 | 
            -
                 | 
| 67 | 
            -
             | 
| 68 | 
            -
             | 
| 69 | 
            -
                 | 
| 70 | 
            -
             | 
| 71 | 
            -
                    llm | 
| 72 | 
            -
             | 
| 73 | 
            -
             | 
| 74 | 
            -
             | 
| 75 | 
            -
             | 
| 76 | 
            -
                         | 
| 77 | 
            -
             | 
| 78 | 
            -
             | 
| 79 | 
            -
             | 
| 80 | 
            -
             | 
| 81 | 
            -
             | 
| 82 | 
            -
             | 
| 83 | 
            -
             | 
| 84 | 
            -
                     | 
| 85 | 
            -
             | 
| 86 | 
            -
                     | 
| 87 | 
            -
             | 
| 88 | 
            -
             | 
| 89 | 
            -
             | 
| 90 | 
            -
             | 
| 91 | 
            -
             | 
| 92 | 
            -
             | 
| 93 | 
            -
             | 
| 94 | 
            -
             | 
| 95 | 
            -
             | 
| 96 | 
            -
             | 
| 97 | 
            -
             | 
| 98 | 
            -
             | 
| 99 | 
            -
             | 
| 100 | 
            -
             | 
| 101 | 
            -
             | 
| 102 | 
            -
             | 
| 103 | 
            -
                     | 
| 104 | 
            -
             | 
| 105 | 
            -
                     | 
| 106 | 
            -
             | 
| 107 | 
            -
             | 
| 108 | 
            -
             | 
| 109 | 
            -
             | 
| 110 | 
            -
             | 
| 111 | 
            -
             | 
| 112 | 
            -
                     | 
| 113 | 
            -
                     | 
| 114 | 
            -
             | 
| 115 | 
            -
             | 
| 116 | 
            -
             | 
| 117 | 
            -
             | 
| 118 | 
            -
             | 
| 119 | 
            -
                     | 
| 120 | 
            -
             | 
| 121 | 
            -
             | 
| 122 | 
            -
             | 
| 123 | 
            -
             | 
| 124 | 
            -
             | 
| 125 | 
            -
             | 
| 126 | 
            -
             | 
| 127 | 
            -
             | 
| 128 | 
            -
             | 
| 129 | 
            -
             | 
| 130 | 
            -
             | 
| 131 | 
            -
             | 
| 132 | 
            -
             | 
| 133 | 
            -
             | 
| 134 | 
            -
             | 
| 135 | 
            -
             | 
| 136 | 
            -
             | 
| 137 | 
            -
             | 
| 138 | 
            -
             | 
| 139 | 
            -
             | 
| 140 | 
            -
             | 
| 141 | 
            -
             | 
| 142 | 
            -
             | 
| 143 | 
            -
             | 
| 144 | 
            -
             | 
| 145 | 
            -
             | 
| 146 | 
            -
             | 
| 147 | 
            -
                         | 
| 148 | 
            -
                        value= | 
| 149 | 
            -
                         | 
| 150 | 
            -
                         | 
| 151 | 
            -
             | 
| 152 | 
            -
                     | 
| 153 | 
            -
             | 
| 154 | 
            -
                         | 
| 155 | 
            -
                         | 
| 156 | 
            -
                         | 
| 157 | 
            -
             | 
| 158 | 
            -
             | 
| 159 | 
            -
                         | 
| 160 | 
            -
             | 
| 161 | 
            -
             | 
| 162 | 
            -
                         | 
| 163 | 
            -
                         | 
| 164 | 
            -
                         | 
| 165 | 
            -
             | 
| 166 | 
            -
             | 
| 167 | 
            -
                         | 
| 168 | 
            -
             | 
| 169 | 
            -
             | 
| 170 | 
            -
                         | 
| 171 | 
            -
                         | 
| 172 | 
            -
                         | 
| 173 | 
            -
             | 
| 174 | 
            -
             | 
| 175 | 
            -
                         | 
| 176 | 
            -
             | 
| 177 | 
            -
             | 
| 178 | 
            -
                         | 
| 179 | 
            -
                         | 
| 180 | 
            -
                         | 
| 181 | 
            -
             | 
| 182 | 
            -
             | 
| 183 | 
            -
                         | 
| 184 | 
            -
             | 
| 185 | 
            -
             | 
| 186 | 
            -
             | 
| 187 | 
            -
             | 
| 188 | 
            -
             | 
| 189 | 
            -
             | 
| 190 | 
            -
             | 
| 191 | 
            -
             | 
| 192 | 
            -
             | 
| 193 | 
            -
             | 
| 194 | 
            -
             | 
| 195 | 
            -
             | 
| 196 | 
            -
             | 
| 197 | 
            -
             | 
| 198 | 
            -
                 | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # Importing required libraries
         | 
| 2 | 
            +
            import warnings
         | 
| 3 | 
            +
            warnings.filterwarnings("ignore")
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            import json
         | 
| 6 | 
            +
            import subprocess
         | 
| 7 | 
            +
            import sys
         | 
| 8 | 
            +
            from llama_cpp import Llama
         | 
| 9 | 
            +
            from llama_cpp_agent import LlamaCppAgent
         | 
| 10 | 
            +
            from llama_cpp_agent import MessagesFormatterType
         | 
| 11 | 
            +
            from llama_cpp_agent.providers import LlamaCppPythonProvider
         | 
| 12 | 
            +
            from llama_cpp_agent.chat_history import BasicChatHistory
         | 
| 13 | 
            +
            from llama_cpp_agent.chat_history.messages import Roles
         | 
| 14 | 
            +
            import gradio as gr
         | 
| 15 | 
            +
            from huggingface_hub import hf_hub_download
         | 
| 16 | 
            +
            from typing import List, Tuple
         | 
| 17 | 
            +
            from logger import logging
         | 
| 18 | 
            +
            from exception import CustomExceptionHandling
         | 
| 19 | 
            +
             | 
| 20 | 
            +
             | 
| 21 | 
            +
            # Download gguf model files
         | 
| 22 | 
            +
            llm = None
         | 
| 23 | 
            +
            llm_model = None
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            hf_hub_download(
         | 
| 26 | 
            +
                repo_id="bartowski/Qwen2.5-Coder-1.5B-Instruct-GGUF",
         | 
| 27 | 
            +
                filename="Qwen2.5-Coder-1.5B-Instruct-Q6_K.gguf",
         | 
| 28 | 
            +
                local_dir="./models",
         | 
| 29 | 
            +
            )
         | 
| 30 | 
            +
            hf_hub_download(
         | 
| 31 | 
            +
                repo_id="bartowski/Qwen2.5-Coder-3B-Instruct-GGUF",
         | 
| 32 | 
            +
                filename="Qwen2.5-Coder-3B-Instruct-Q5_K_S.gguf",
         | 
| 33 | 
            +
                local_dir="./models",
         | 
| 34 | 
            +
            )
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            # Set the title and description
         | 
| 37 | 
            +
            title = "Qwen-Coder Llama.cpp"
         | 
| 38 | 
            +
            description = """Qwen2.5-Coder, a six-model family of LLMs, boasts enhanced code generation, reasoning, and debugging. Trained on 5.5 trillion tokens, its 32B parameter model rivals GPT-4o, offering versatile capabilities for coding and broader applications."""
         | 
| 39 | 
            +
             | 
| 40 | 
            +
             | 
| 41 | 
            +
            def respond(
         | 
| 42 | 
            +
                message: str,
         | 
| 43 | 
            +
                history: List[Tuple[str, str]],
         | 
| 44 | 
            +
                model: str,
         | 
| 45 | 
            +
                system_message: str,
         | 
| 46 | 
            +
                max_tokens: int,
         | 
| 47 | 
            +
                temperature: float,
         | 
| 48 | 
            +
                top_p: float,
         | 
| 49 | 
            +
                top_k: int,
         | 
| 50 | 
            +
                repeat_penalty: float,
         | 
| 51 | 
            +
            ):
         | 
| 52 | 
            +
                """
         | 
| 53 | 
            +
                Respond to a message using the Qwen2.5-Coder model via Llama.cpp.
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                Args:
         | 
| 56 | 
            +
                    - message (str): The message to respond to.
         | 
| 57 | 
            +
                    - history (List[Tuple[str, str]]): The chat history.
         | 
| 58 | 
            +
                    - model (str): The model to use.
         | 
| 59 | 
            +
                    - system_message (str): The system message to use.
         | 
| 60 | 
            +
                    - max_tokens (int): The maximum number of tokens to generate.
         | 
| 61 | 
            +
                    - temperature (float): The temperature of the model.
         | 
| 62 | 
            +
                    - top_p (float): The top-p of the model.
         | 
| 63 | 
            +
                    - top_k (int): The top-k of the model.
         | 
| 64 | 
            +
                    - repeat_penalty (float): The repetition penalty of the model.
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                Returns:
         | 
| 67 | 
            +
                    str: The response to the message.
         | 
| 68 | 
            +
                """
         | 
| 69 | 
            +
                try:
         | 
| 70 | 
            +
                    # Load the global variables
         | 
| 71 | 
            +
                    global llm
         | 
| 72 | 
            +
                    global llm_model
         | 
| 73 | 
            +
             | 
| 74 | 
            +
                    # Load the model
         | 
| 75 | 
            +
                    if llm is None or llm_model != model:
         | 
| 76 | 
            +
                        llm = Llama(
         | 
| 77 | 
            +
                            model_path=f"models/{model}",
         | 
| 78 | 
            +
                            flash_attn=False,
         | 
| 79 | 
            +
                            n_gpu_layers=0,
         | 
| 80 | 
            +
                            n_batch=32,
         | 
| 81 | 
            +
                            n_ctx=8192,
         | 
| 82 | 
            +
                        )
         | 
| 83 | 
            +
                        llm_model = model
         | 
| 84 | 
            +
                    provider = LlamaCppPythonProvider(llm)
         | 
| 85 | 
            +
             | 
| 86 | 
            +
                    # Create the agent
         | 
| 87 | 
            +
                    agent = LlamaCppAgent(
         | 
| 88 | 
            +
                        provider,
         | 
| 89 | 
            +
                        system_prompt=f"{system_message}",
         | 
| 90 | 
            +
                        predefined_messages_formatter_type=MessagesFormatterType.CHATML,
         | 
| 91 | 
            +
                        debug_output=True,
         | 
| 92 | 
            +
                    )
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                    # Set the settings like temperature, top-k, top-p, max tokens, etc.
         | 
| 95 | 
            +
                    settings = provider.get_provider_default_settings()
         | 
| 96 | 
            +
                    settings.temperature = temperature
         | 
| 97 | 
            +
                    settings.top_k = top_k
         | 
| 98 | 
            +
                    settings.top_p = top_p
         | 
| 99 | 
            +
                    settings.max_tokens = max_tokens
         | 
| 100 | 
            +
                    settings.repeat_penalty = repeat_penalty
         | 
| 101 | 
            +
                    settings.stream = True
         | 
| 102 | 
            +
             | 
| 103 | 
            +
                    messages = BasicChatHistory()
         | 
| 104 | 
            +
             | 
| 105 | 
            +
                    # Add the chat history
         | 
| 106 | 
            +
                    for msn in history:
         | 
| 107 | 
            +
                        user = {"role": Roles.user, "content": msn[0]}
         | 
| 108 | 
            +
                        assistant = {"role": Roles.assistant, "content": msn[1]}
         | 
| 109 | 
            +
                        messages.add_message(user)
         | 
| 110 | 
            +
                        messages.add_message(assistant)
         | 
| 111 | 
            +
             | 
| 112 | 
            +
                    # Get the response stream
         | 
| 113 | 
            +
                    stream = agent.get_chat_response(
         | 
| 114 | 
            +
                        message,
         | 
| 115 | 
            +
                        llm_sampling_settings=settings,
         | 
| 116 | 
            +
                        chat_history=messages,
         | 
| 117 | 
            +
                        returns_streaming_generator=True,
         | 
| 118 | 
            +
                        print_output=False,
         | 
| 119 | 
            +
                    )
         | 
| 120 | 
            +
             | 
| 121 | 
            +
                    # Log the success
         | 
| 122 | 
            +
                    logging.info("Response stream generated successfully")
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                    # Generate the response
         | 
| 125 | 
            +
                    outputs = ""
         | 
| 126 | 
            +
                    for output in stream:
         | 
| 127 | 
            +
                        outputs += output
         | 
| 128 | 
            +
                        yield outputs
         | 
| 129 | 
            +
             | 
| 130 | 
            +
                # Handle exceptions that may occur during the process
         | 
| 131 | 
            +
                except Exception as e:
         | 
| 132 | 
            +
                    # Custom exception handling
         | 
| 133 | 
            +
                    raise CustomExceptionHandling(e, sys) from e
         | 
| 134 | 
            +
             | 
| 135 | 
            +
             | 
| 136 | 
            +
            # Create a chat interface
         | 
| 137 | 
            +
            demo = gr.ChatInterface(
         | 
| 138 | 
            +
                respond,
         | 
| 139 | 
            +
                additional_inputs_accordion=gr.Accordion(
         | 
| 140 | 
            +
                    label="⚙️ Parameters", open=False, render=False
         | 
| 141 | 
            +
                ),
         | 
| 142 | 
            +
                additional_inputs=[
         | 
| 143 | 
            +
                    gr.Dropdown(
         | 
| 144 | 
            +
                        choices=[
         | 
| 145 | 
            +
                            "Qwen2.5-Coder-1.5B-Instruct-Q6_K.gguf",
         | 
| 146 | 
            +
                            "Qwen2.5-Coder-3B-Instruct-Q5_K_S.gguf",
         | 
| 147 | 
            +
                        ],
         | 
| 148 | 
            +
                        value="Qwen2.5-Coder-1.5B-Instruct-Q6_K.gguf",
         | 
| 149 | 
            +
                        label="Model",
         | 
| 150 | 
            +
                        info="Select the AI model to use for chat",
         | 
| 151 | 
            +
                    ),
         | 
| 152 | 
            +
                    gr.Textbox(
         | 
| 153 | 
            +
                        value="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.",
         | 
| 154 | 
            +
                        label="System Prompt",
         | 
| 155 | 
            +
                        info="Define the AI assistant's personality and behavior",
         | 
| 156 | 
            +
                        lines=2,
         | 
| 157 | 
            +
                    ),
         | 
| 158 | 
            +
                    gr.Slider(
         | 
| 159 | 
            +
                        minimum=512,
         | 
| 160 | 
            +
                        maximum=4096,
         | 
| 161 | 
            +
                        value=2048,
         | 
| 162 | 
            +
                        step=512,
         | 
| 163 | 
            +
                        label="Max Tokens",
         | 
| 164 | 
            +
                        info="Maximum length of response (higher = longer replies)",
         | 
| 165 | 
            +
                    ),
         | 
| 166 | 
            +
                    gr.Slider(
         | 
| 167 | 
            +
                        minimum=0.1,
         | 
| 168 | 
            +
                        maximum=2.0,
         | 
| 169 | 
            +
                        value=0.7,
         | 
| 170 | 
            +
                        step=0.1,
         | 
| 171 | 
            +
                        label="Temperature",
         | 
| 172 | 
            +
                        info="Creativity level (higher = more creative, lower = more focused)",
         | 
| 173 | 
            +
                    ),
         | 
| 174 | 
            +
                    gr.Slider(
         | 
| 175 | 
            +
                        minimum=0.1,
         | 
| 176 | 
            +
                        maximum=1.0,
         | 
| 177 | 
            +
                        value=0.95,
         | 
| 178 | 
            +
                        step=0.05,
         | 
| 179 | 
            +
                        label="Top-p",
         | 
| 180 | 
            +
                        info="Nucleus sampling threshold",
         | 
| 181 | 
            +
                    ),
         | 
| 182 | 
            +
                    gr.Slider(
         | 
| 183 | 
            +
                        minimum=1,
         | 
| 184 | 
            +
                        maximum=100,
         | 
| 185 | 
            +
                        value=40,
         | 
| 186 | 
            +
                        step=1,
         | 
| 187 | 
            +
                        label="Top-k",
         | 
| 188 | 
            +
                        info="Limit vocabulary choices to top K tokens",
         | 
| 189 | 
            +
                    ),
         | 
| 190 | 
            +
                    gr.Slider(
         | 
| 191 | 
            +
                        minimum=1.0,
         | 
| 192 | 
            +
                        maximum=2.0,
         | 
| 193 | 
            +
                        value=1.1,
         | 
| 194 | 
            +
                        step=0.1,
         | 
| 195 | 
            +
                        label="Repetition Penalty",
         | 
| 196 | 
            +
                        info="Penalize repeated words (higher = less repetition)",
         | 
| 197 | 
            +
                    ),
         | 
| 198 | 
            +
                ],
         | 
| 199 | 
            +
                theme="Ocean",
         | 
| 200 | 
            +
                submit_btn="Send",
         | 
| 201 | 
            +
                stop_btn="Stop",
         | 
| 202 | 
            +
                title=title,
         | 
| 203 | 
            +
                description=description,
         | 
| 204 | 
            +
                chatbot=gr.Chatbot(scale=1, show_copy_button=True),
         | 
| 205 | 
            +
                flagging_mode="never",
         | 
| 206 | 
            +
            )
         | 
| 207 | 
            +
             | 
| 208 | 
            +
             | 
| 209 | 
            +
            # Launch the chat interface
         | 
| 210 | 
            +
            if __name__ == "__main__":
         | 
| 211 | 
            +
                demo.launch(debug=False)
         | 
    	
        exception.py
    ADDED
    
    | @@ -0,0 +1,50 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            """
         | 
| 2 | 
            +
            This module defines a custom exception handling class and a function to get error message with details of the error.
         | 
| 3 | 
            +
            """
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            # Standard Library
         | 
| 6 | 
            +
            import sys
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            # Local imports
         | 
| 9 | 
            +
            from logger import logging
         | 
| 10 | 
            +
             | 
| 11 | 
            +
             | 
| 12 | 
            +
            # Function Definition to get error message with details of the error (file name and line number) when an error occurs in the program
         | 
| 13 | 
            +
            def get_error_message(error, error_detail: sys):
         | 
| 14 | 
            +
                """
         | 
| 15 | 
            +
                Get error message with details of the error.
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                Args:
         | 
| 18 | 
            +
                    - error (Exception): The error that occurred.
         | 
| 19 | 
            +
                    - error_detail (sys): The details of the error.
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                Returns:
         | 
| 22 | 
            +
                    str: A string containing the error message along with the file name and line number where the error occurred.
         | 
| 23 | 
            +
                """
         | 
| 24 | 
            +
                _, _, exc_tb = error_detail.exc_info()
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                # Get error details
         | 
| 27 | 
            +
                file_name = exc_tb.tb_frame.f_code.co_filename
         | 
| 28 | 
            +
                return "Error occured in python script name [{0}] line number [{1}] error message[{2}]".format(
         | 
| 29 | 
            +
                    file_name, exc_tb.tb_lineno, str(error)
         | 
| 30 | 
            +
                )
         | 
| 31 | 
            +
             | 
| 32 | 
            +
             | 
| 33 | 
            +
            # Custom Exception Handling Class Definition
         | 
| 34 | 
            +
            class CustomExceptionHandling(Exception):
         | 
| 35 | 
            +
                """
         | 
| 36 | 
            +
                Custom Exception Handling:
         | 
| 37 | 
            +
                    This class defines a custom exception that can be raised when an error occurs in the program.
         | 
| 38 | 
            +
                    It takes an error message and an error detail as input and returns a formatted error message when the exception is raised.
         | 
| 39 | 
            +
                """
         | 
| 40 | 
            +
             | 
| 41 | 
            +
                # Constructor
         | 
| 42 | 
            +
                def __init__(self, error_message, error_detail: sys):
         | 
| 43 | 
            +
                    """Initialize the exception"""
         | 
| 44 | 
            +
                    super().__init__(error_message)
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                    self.error_message = get_error_message(error_message, error_detail=error_detail)
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                def __str__(self):
         | 
| 49 | 
            +
                    """String representation of the exception"""
         | 
| 50 | 
            +
                    return self.error_message
         | 
    	
        logger.py
    ADDED
    
    | @@ -0,0 +1,21 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # Importing the required modules
         | 
| 2 | 
            +
            import os
         | 
| 3 | 
            +
            import logging
         | 
| 4 | 
            +
            from datetime import datetime
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            # Creating a log file with the current date and time as the name of the file
         | 
| 7 | 
            +
            LOG_FILE = f"{datetime.now().strftime('%m_%d_%Y_%H_%M_%S')}.log"
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            # Creating a logs folder if it does not exist
         | 
| 10 | 
            +
            logs_path = os.path.join(os.getcwd(), "logs", LOG_FILE)
         | 
| 11 | 
            +
            os.makedirs(logs_path, exist_ok=True)
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            # Setting the log file path and the log level
         | 
| 14 | 
            +
            LOG_FILE_PATH = os.path.join(logs_path, LOG_FILE)
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            # Configuring the logger
         | 
| 17 | 
            +
            logging.basicConfig(
         | 
| 18 | 
            +
                filename=LOG_FILE_PATH,
         | 
| 19 | 
            +
                format="[ %(asctime)s ] %(lineno)d %(name)s - %(levelname)s - %(message)s",
         | 
| 20 | 
            +
                level=logging.INFO,
         | 
| 21 | 
            +
            )
         | 
