devjas1
commited on
Commit
·
b3a4795
1
Parent(s):
eae02ee
(FEAT)[Refactor commit message generation]: enhance the generate_commit_message function to utilize configuration settings and improve error handling.
Browse files- src/generator.py +24 -3
src/generator.py
CHANGED
|
@@ -22,9 +22,30 @@ def load_phi2(model_path: str, context_window: int = 2048):
|
|
| 22 |
|
| 23 |
|
| 24 |
# Generation Function
|
| 25 |
-
def generate_commit_message(
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
|
| 30 |
# Fallback Logic
|
|
|
|
| 22 |
|
| 23 |
|
| 24 |
# Generation Function
|
| 25 |
+
def generate_commit_message(prompt: str, config: dict, max_tokens: int = None):
|
| 26 |
+
"""
|
| 27 |
+
Generate a commit message using the Phi-2 model.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
prompt (str): The input prompt for generation
|
| 31 |
+
config (dict): Configuration dictionary containing model settings
|
| 32 |
+
max_tokens (int, optional): Maximum tokens to generate
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
str: Generated commit message
|
| 36 |
+
"""
|
| 37 |
+
if max_tokens is None:
|
| 38 |
+
max_tokens = config.get("generator", {}).get("max_tokens", 512)
|
| 39 |
+
|
| 40 |
+
model_path = config["generator"]["model_path"]
|
| 41 |
+
n_ctx = config.get("generator", {}).get("n_ctx", 2048)
|
| 42 |
+
|
| 43 |
+
try:
|
| 44 |
+
llm = Llama(model_path=model_path, n_ctx=n_ctx, verbose=False)
|
| 45 |
+
response = llm(prompt, max_tokens=max_tokens)
|
| 46 |
+
return response["choices"][0]["text"].strip()
|
| 47 |
+
except Exception as e:
|
| 48 |
+
raise RuntimeError(f"Failed to generate with Phi-2: {e}") from e
|
| 49 |
|
| 50 |
|
| 51 |
# Fallback Logic
|