-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgithub.py
More file actions
322 lines (268 loc) Β· 12.4 KB
/
github.py
File metadata and controls
322 lines (268 loc) Β· 12.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
# -*- coding: utf-8 -*-
"""
GitHub Knowledge Score Calculator
This script fetches data for a given GitHub user using the GitHub API,
calculates a knowledge score based on various metrics, and saves the
results to an Excel file.
Prerequisites:
- Python 3.x
- pandas library (`pip install pandas`)
- requests library (`pip install requests`)
- openpyxl library (`pip install openpyxl`) - Required by pandas for Excel writing
GitHub API Token:
- You need a GitHub Personal Access Token (PAT) for authenticated requests
to avoid strict rate limits.
- Generate a token here: https://github.com/settings/tokens
- Grant the token 'public_repo' and 'read:user' scopes.
- **Security Best Practice:** Store your token securely, e.g., as an
environment variable named 'GITHUB_TOKEN'. Do not hardcode it in the script.
"""
import requests
import pandas as pd
import os
import json
from datetime import datetime, timedelta
from dotenv import load_dotenv
load_dotenv()
# --- Configuration ---
# Define maximum values for normalization (scaling to 0-100).
# These are subjective and can be adjusted based on expected ranges.
NORMALIZATION_CAPS = {
'repo_count': 100,
'avg_star_count': 500,
'avg_fork_count': 100,
'language_diversity': 20,
'pr_count_others': 50,
'commit_activity_proxy': 1000, # Based on recent push events count
'issue_engagement': 50,
'followers': 1000,
'gists_count': 50,
}
# Weights for the final score calculation (as provided by the user)
WEIGHTS = {
'repo_count': 0.15,
'avg_star_count': 0.20,
'avg_fork_count': 0.10,
'language_diversity': 0.10,
'pr_count_others': 0.15,
'commit_activity_proxy': 0.10,
'issue_engagement': 0.10,
'followers': 0.05,
'gists_count': 0.05,
}
# Output file name
OUTPUT_EXCEL_FILE = 'github_knowledge_scores.xlsx'
RAW_DATA_FILE = 'github_user_data.json' # Optional: File to store raw API responses
# --- Helper Functions ---
def get_github_token():
"""Retrieves the GitHub token from environment variables."""
token = os.environ.get('GITHUB_TOKEN')
if not token:
print("Warning: GITHUB_TOKEN environment variable not set.")
print("API requests will be unauthenticated and heavily rate-limited.")
print("Generate a token: https://github.com/settings/tokens")
print("Set it as an environment variable: export GITHUB_TOKEN='your_token_here'")
# Optionally, prompt the user if not found (less secure)
# token = input("Enter your GitHub Personal Access Token: ")
return token
def make_github_request(url, token, params=None):
"""Makes an authenticated request to the GitHub API."""
headers = {'Accept': 'application/vnd.github.v3+json'}
if token:
headers['Authorization'] = f'token {token}'
try:
response = requests.get(url, headers=headers, params=params, timeout=20) # Added timeout
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
# Check rate limits (optional but good practice)
# print(f"Rate limit remaining: {response.headers.get('X-RateLimit-Remaining')}")
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error fetching data from {url}: {e}")
if hasattr(e, 'response') and e.response is not None:
print(f"Response status code: {e.response.status_code}")
print(f"Response content: {e.response.text}")
return None
except json.JSONDecodeError:
print(f"Error decoding JSON response from {url}")
print(f"Response content: {response.text}")
return None
def normalize(value, metric_name):
"""Normalizes a value to a 0-100 scale based on predefined caps."""
max_value = NORMALIZATION_CAPS.get(metric_name, 1) # Default max to 1 to avoid division by zero
if max_value == 0: max_value = 1 # Avoid division by zero if cap is 0
normalized_value = (value / max_value) * 100
return min(normalized_value, 100) # Cap the score at 100
def save_raw_data(username, data):
"""Saves the fetched raw data to a JSON file."""
all_data = {}
# Load existing data if file exists
if os.path.exists(RAW_DATA_FILE):
try:
with open(RAW_DATA_FILE, 'r', encoding='utf-8') as f:
all_data = json.load(f)
except (json.JSONDecodeError, FileNotFoundError):
print(f"Warning: Could not read existing raw data file {RAW_DATA_FILE}. Starting fresh.")
all_data = {}
# Add or update data for the user
all_data[username] = data
# Save back to file
try:
with open(RAW_DATA_FILE, 'w', encoding='utf-8') as f:
json.dump(all_data, f, indent=4)
print(f"Raw data saved to {RAW_DATA_FILE}")
except IOError as e:
print(f"Error saving raw data to {RAW_DATA_FILE}: {e}")
# --- Data Fetching Functions ---
def get_user_profile(username, token):
"""Fetches basic user profile information."""
print(f"Fetching profile for {username}...")
url = f"https://api.github.com/users/{username}"
return make_github_request(url, token)
def get_user_repos(username, token):
"""Fetches user's public repositories (first 100)."""
print(f"Fetching repositories for {username}...")
url = f"https://api.github.com/users/{username}/repos"
# Fetch up to 100 repos per page (max allowed)
params = {'per_page': 100, 'type': 'owner', 'sort': 'updated'}
# Note: This currently only fetches the first page (up to 100 repos).
# Proper pagination handling would be needed for users with >100 repos.
return make_github_request(url, token, params=params)
def get_commit_activity_proxy(username, token):
"""
Estimates recent commit activity by counting PushEvents in the last year.
Fetches up to 100 recent events.
"""
print(f"Fetching recent events (commit proxy) for {username}...")
url = f"https://api.github.com/users/{username}/events/public"
params = {'per_page': 100} # Max 100 events per page
events = make_github_request(url, token, params=params)
if not events:
return 0
one_year_ago = datetime.now() - timedelta(days=365)
push_event_count = 0
commit_count = 0
for event in events:
event_time = datetime.strptime(event['created_at'], "%Y-%m-%dT%H:%M:%SZ")
if event_time < one_year_ago:
continue # Stop if events are older than a year
if event['type'] == 'PushEvent':
push_event_count += 1
# The payload contains commit details, sum them up
if 'payload' in event and 'size' in event['payload']:
commit_count += event['payload']['size'] # 'size' is the number of commits in the push
# Return the count of commits from recent push events as a proxy
print(f"Found {commit_count} commits in {push_event_count} recent public push events.")
return commit_count
def search_github(query, token):
"""Performs a search on GitHub (used for PRs and Issues)."""
print(f"Searching GitHub: {query}...")
url = "https://api.github.com/search/issues"
# Fetch up to 100 results per page
params = {'q': query, 'per_page': 100, 'sort': 'updated', 'order': 'desc'}
# Note: This currently only fetches the first page (up to 100 results).
# Proper pagination handling would be needed for more comprehensive results.
results = make_github_request(url, token, params=params)
return results.get('total_count', 0) if results else 0
def get_prs_to_others(username, token):
"""Counts PRs authored by the user in repositories they don't own."""
query = f"is:pr author:{username} -user:{username}"
return search_github(query, token)
def get_issue_engagement(username, token):
"""Counts Issues created by the user in repositories they don't own."""
# This is a simplified metric. Real engagement is harder to quantify.
# We count issues created by the user in other repos.
# Could also add comments: commenter:{username}
query = f"is:issue author:{username} -user:{username}"
return search_github(query, token)
# --- Main Calculation Logic ---
def calculate_knowledge_score(username, token):
"""Fetches all data and calculates the knowledge score."""
user_data = get_user_profile(username, token)
if not user_data:
return None # Failed to get basic user data
repos_data = get_user_repos(username, token)
# Note: commit_activity, pr_count_others, issue_engagement might return 0
# if the API calls fail or if there's no activity.
commit_activity = get_commit_activity_proxy(username, token)
pr_count_others = get_prs_to_others(username, token)
issue_engagement = get_issue_engagement(username, token)
# --- Extract Metrics ---
metrics = {
'username': username,
'repo_count': user_data.get('public_repos', 0),
'followers': user_data.get('followers', 0),
'gists_count': user_data.get('public_gists', 0),
'avg_star_count': 0,
'avg_fork_count': 0,
'language_diversity': 0,
'commit_activity_proxy': commit_activity,
'pr_count_others': pr_count_others,
'issue_engagement': issue_engagement,
'knowledge_score': 0, # Initialize score
'last_updated': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
raw_fetched_data = {
'profile': user_data,
'repos': repos_data,
'commit_proxy_count': commit_activity,
'prs_others_count': pr_count_others,
'issues_others_count': issue_engagement
}
save_raw_data(username, raw_fetched_data) # Optionally save raw data
# --- Calculate Repo-Based Metrics ---
if repos_data and isinstance(repos_data, list) and len(repos_data) > 0:
total_stars = sum(repo.get('stargazers_count', 0) for repo in repos_data)
total_forks = sum(repo.get('forks_count', 0) for repo in repos_data)
languages = set()
for repo in repos_data:
if repo.get('language'): # Primary language
languages.add(repo['language'])
# Could also fetch languages endpoint per repo for more detail, but adds many API calls
# repo_lang_url = repo.get('languages_url')
# if repo_lang_url:
# repo_langs = make_github_request(repo_lang_url, token)
# if repo_langs:
# languages.update(repo_langs.keys())
num_repos = len(repos_data)
metrics['avg_star_count'] = total_stars / num_repos if num_repos > 0 else 0
metrics['avg_fork_count'] = total_forks / num_repos if num_repos > 0 else 0
metrics['language_diversity'] = len(languages)
else:
print(f"Warning: No repository data found or accessible for {username}.")
# Ensure repo-dependent metrics are 0 if no repos
metrics['repo_count'] = 0 # Override profile count if repo list is empty/inaccessible
metrics['avg_star_count'] = 0
metrics['avg_fork_count'] = 0
metrics['language_diversity'] = 0
# --- Normalize Metrics and Calculate Score ---
final_score = 0
normalized_metrics = {}
print("\n--- Calculated Metrics ---")
for key, weight in WEIGHTS.items():
raw_value = metrics.get(key, 0)
normalized_value = normalize(raw_value, key)
final_score += normalized_value * weight
normalized_metrics[f'normalized_{key}'] = round(normalized_value, 2)
print(f"{key.replace('_', ' ').title()}: {raw_value} (Normalized: {normalized_metrics[f'normalized_{key}']:.2f})")
metrics['knowledge_score'] = round(final_score, 2)
print(f"\nFinal Knowledge Score for {username}: {metrics['knowledge_score']:.2f}")
# Combine raw and normalized metrics for output
output_data = {**metrics, **normalized_metrics}
return output_data
# --- Main Execution ---
def get_github_user_score_from_url(github_url):
"""
Given a GitHub profile URL, fetches the user's knowledge score data.
Args:
github_url (str): The GitHub profile URL (e.g., 'https://github.com/username').
Returns:
dict: Knowledge score data or None if failed.
"""
# Extract username from URL
if not github_url.startswith("https://github.com/"):
raise ValueError("Invalid GitHub URL format. URL must start with 'https://github.com/'")
username = github_url.replace("https://github.com/", "").strip("/")
# Fetch GitHub token
token = get_github_token()
# Calculate knowledge score
return calculate_knowledge_score(username, token)