rdune71 commited on
Commit
9cf1bcf
Β·
1 Parent(s): da339ca

Improve Ollama diagnostic script with model generation test and better feedback

Browse files
Files changed (1) hide show
  1. diagnose_ollama.py +87 -3
diagnose_ollama.py CHANGED
@@ -1,6 +1,6 @@
1
  #!/usr/bin/env python3
2
  """
3
- Diagnostic script to test Ollama connectivity
4
  """
5
 
6
  import requests
@@ -11,6 +11,7 @@ def test_ollama_connectivity():
11
 
12
  print("=== Ollama Connectivity Diagnostic ===")
13
  print(f"Configured Ollama Host: {config.ollama_host}")
 
14
  print()
15
 
16
  # Headers to skip ngrok browser warning
@@ -37,6 +38,18 @@ def test_ollama_connectivity():
37
  print(f" Available Models: {len(models)}")
38
  for model in models:
39
  print(f" - {model.get('name', 'Unknown model')}")
 
 
 
 
 
 
 
 
 
 
 
 
40
  except Exception as e:
41
  print(f" Error parsing response: {e}")
42
  print(f" Response text: {response.text[:200]}...")
@@ -46,15 +59,74 @@ def test_ollama_connectivity():
46
 
47
  except requests.exceptions.Timeout:
48
  print(" βœ— Request timed out (took more than 10 seconds)")
 
49
  except requests.exceptions.ConnectionError as e:
50
  print(f" βœ— Connection error: {e}")
 
51
  except Exception as e:
52
  print(f" βœ— Unexpected error: {e}")
53
 
54
  print()
55
 
56
- # Test 2: Check localhost as fallback
57
- print("Test 2: Checking localhost fallback (if different from configured host)...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  if config.ollama_host != "http://localhost:11434":
59
  try:
60
  local_response = requests.get(
@@ -65,6 +137,12 @@ def test_ollama_connectivity():
65
  print(f" Local Status Code: {local_response.status_code}")
66
  if local_response.status_code == 200:
67
  print(" βœ“ Successfully connected to localhost Ollama")
 
 
 
 
 
 
68
  else:
69
  print(f" βœ— Local connection failed with status {local_response.status_code}")
70
  except Exception as e:
@@ -74,6 +152,12 @@ def test_ollama_connectivity():
74
 
75
  print()
76
  print("=== Diagnostic Complete ===")
 
 
 
 
 
 
77
 
78
  if __name__ == "__main__":
79
  test_ollama_connectivity()
 
1
  #!/usr/bin/env python3
2
  """
3
+ Diagnostic script to test Ollama connectivity and model generation
4
  """
5
 
6
  import requests
 
11
 
12
  print("=== Ollama Connectivity Diagnostic ===")
13
  print(f"Configured Ollama Host: {config.ollama_host}")
14
+ print(f"Configured Model: {config.local_model_name}")
15
  print()
16
 
17
  # Headers to skip ngrok browser warning
 
38
  print(f" Available Models: {len(models)}")
39
  for model in models:
40
  print(f" - {model.get('name', 'Unknown model')}")
41
+
42
+ # Check if our configured model is available
43
+ model_names = [m.get('name') for m in models]
44
+ if config.local_model_name in model_names:
45
+ print(f" βœ“ Configured model '{config.local_model_name}' is available")
46
+ else:
47
+ print(f" ⚠ Configured model '{config.local_model_name}' not found")
48
+ if models:
49
+ print(f" Available models: {', '.join(model_names)}")
50
+ else:
51
+ print(f" No models found. Try running: ollama pull {config.local_model_name}")
52
+
53
  except Exception as e:
54
  print(f" Error parsing response: {e}")
55
  print(f" Response text: {response.text[:200]}...")
 
59
 
60
  except requests.exceptions.Timeout:
61
  print(" βœ— Request timed out (took more than 10 seconds)")
62
+ print(" This may indicate network issues or an unresponsive Ollama server")
63
  except requests.exceptions.ConnectionError as e:
64
  print(f" βœ— Connection error: {e}")
65
+ print(" This may indicate that the Ollama server is not running or the URL is incorrect")
66
  except Exception as e:
67
  print(f" βœ— Unexpected error: {e}")
68
 
69
  print()
70
 
71
+ # Test 2: Test model generation if we can connect
72
+ print("Test 2: Testing model generation...")
73
+ try:
74
+ # First verify we can connect
75
+ response = requests.get(
76
+ f"{config.ollama_host}/api/tags",
77
+ headers=headers,
78
+ timeout=10
79
+ )
80
+
81
+ if response.status_code == 200:
82
+ data = response.json()
83
+ models = data.get("models", [])
84
+ model_names = [m.get('name') for m in models]
85
+
86
+ if config.local_model_name in model_names:
87
+ print(f" Testing generation with model: {config.local_model_name}")
88
+
89
+ # Test generation
90
+ generate_payload = {
91
+ "model": config.local_model_name,
92
+ "prompt": "Hello, please respond with just the word 'Success' in uppercase.",
93
+ "stream": False
94
+ }
95
+
96
+ generate_response = requests.post(
97
+ f"{config.ollama_host}/api/generate",
98
+ json=generate_payload,
99
+ headers=headers,
100
+ timeout=30
101
+ )
102
+
103
+ if generate_response.status_code == 200:
104
+ generate_data = generate_response.json()
105
+ response_text = generate_data.get("response", "").strip()
106
+ print(f" βœ“ Model generation successful")
107
+ print(f" Response: {response_text}")
108
+ else:
109
+ print(f" βœ— Model generation failed with status {generate_response.status_code}")
110
+ print(f" Response: {generate_response.text[:200]}...")
111
+ else:
112
+ print(f" ⚠ Skipping generation test - model '{config.local_model_name}' not available")
113
+ if not models:
114
+ print(f" No models found. Try running: ollama pull {config.local_model_name}")
115
+ else:
116
+ print(f" ⚠ Skipping generation test - cannot connect to Ollama host")
117
+
118
+ except requests.exceptions.Timeout:
119
+ print(" βœ— Generation test timed out (took more than 30 seconds)")
120
+ print(" This may indicate the model is still loading or the server is under heavy load")
121
+ except requests.exceptions.ConnectionError as e:
122
+ print(f" βœ— Generation test connection error: {e}")
123
+ except Exception as e:
124
+ print(f" βœ— Generation test error: {e}")
125
+
126
+ print()
127
+
128
+ # Test 3: Check localhost as fallback
129
+ print("Test 3: Checking localhost fallback (if different from configured host)...")
130
  if config.ollama_host != "http://localhost:11434":
131
  try:
132
  local_response = requests.get(
 
137
  print(f" Local Status Code: {local_response.status_code}")
138
  if local_response.status_code == 200:
139
  print(" βœ“ Successfully connected to localhost Ollama")
140
+ try:
141
+ local_data = local_response.json()
142
+ local_models = local_data.get("models", [])
143
+ print(f" Local Available Models: {len(local_models)}")
144
+ except Exception as e:
145
+ print(f" Error parsing local response: {e}")
146
  else:
147
  print(f" βœ— Local connection failed with status {local_response.status_code}")
148
  except Exception as e:
 
152
 
153
  print()
154
  print("=== Diagnostic Complete ===")
155
+ print()
156
+ print("Troubleshooting Tips:")
157
+ print("1. If connection fails, verify your Ollama server is running: ollama serve")
158
+ print("2. If no models found, pull a model: ollama pull mistral")
159
+ print("3. If ngrok issues, verify your tunnel is active and URL is correct")
160
+ print("4. If timeout issues, check firewall settings and network connectivity")
161
 
162
  if __name__ == "__main__":
163
  test_ollama_connectivity()